diff --git a/.codegen/model.go.tmpl b/.codegen/model.go.tmpl index 52a478f71..40eae0d68 100644 --- a/.codegen/model.go.tmpl +++ b/.codegen/model.go.tmpl @@ -31,7 +31,7 @@ type {{.PascalName}} struct { {{end}} {{- define "field-tag" -}} - {{if .IsJson}}tfsdk:"{{if and (ne .Entity.Terraform nil) (ne .Entity.Terraform.Alias "") }}{{.Entity.Terraform.Alias}}{{else}}{{.Name}}{{end}}" tf:"{{if not .Required}}optional{{end}}"{{else}}tfsdk:"-"{{end -}} + {{if .IsJson}}tfsdk:"{{if and (ne .Entity.Terraform nil) (ne .Entity.Terraform.Alias "") }}{{.Entity.Terraform.Alias}}{{else}}{{.Name}}{{end}}" tf:"{{- $first := true -}}{{- if not .Required -}}{{- if not $first -}},{{end}}optional{{- $first = false -}}{{- end -}}{{- if .Entity.IsObject -}}{{- if not $first -}},{{end}}object{{- $first = false -}}{{- end -}}"{{else}}tfsdk:"-"{{end -}} {{- end -}} {{- define "type" -}} diff --git a/go.mod b/go.mod index e01145f07..1fc08e53b 100644 --- a/go.mod +++ b/go.mod @@ -52,6 +52,7 @@ require ( github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect github.com/hashicorp/terraform-json v0.22.1 // indirect + github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect diff --git a/go.sum b/go.sum index dfd13d335..c323c71e4 100644 --- a/go.sum +++ b/go.sum @@ -134,6 +134,8 @@ github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7 github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= github.com/hashicorp/terraform-plugin-framework v1.11.0 h1:M7+9zBArexHFXDx/pKTxjE6n/2UCXY6b8FIq9ZYhwfE= github.com/hashicorp/terraform-plugin-framework v1.11.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 h1:bxZfGo9DIUoLLtHMElsu+zwqI4IsMZQBRRy4iLzZJ8E= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0/go.mod h1:wGeI02gEhj9nPANU62F2jCaHjXulejm/X+af4PdZaNo= github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= diff --git a/internal/providers/pluginfw/tfschema/customizable_schema_test.go b/internal/providers/pluginfw/tfschema/customizable_schema_test.go index d86be910b..ff949d9da 100644 --- a/internal/providers/pluginfw/tfschema/customizable_schema_test.go +++ b/internal/providers/pluginfw/tfschema/customizable_schema_test.go @@ -13,9 +13,10 @@ import ( ) type TestTfSdk struct { - Description types.String `tfsdk:"description" tf:""` - Nested *NestedTfSdk `tfsdk:"nested" tf:"optional"` - Map map[string]types.String `tfsdk:"map" tf:"optional"` + Description types.String `tfsdk:"description" tf:""` + Nested *NestedTfSdk `tfsdk:"nested" tf:"optional"` + NestedSliceObject []NestedTfSdk `tfsdk:"nested_slice_object" tf:"optional,object"` + Map map[string]types.String `tfsdk:"map" tf:"optional"` } type NestedTfSdk struct { @@ -121,3 +122,11 @@ func TestCustomizeSchemaAddPlanModifier(t *testing.T) { assert.True(t, len(scm.Attributes["description"].(schema.StringAttribute).PlanModifiers) == 1) } + +func TestCustomizeSchemaObjectTypeValidatorAdded(t *testing.T) { + scm := ResourceStructToSchema(TestTfSdk{}, func(c CustomizableSchema) CustomizableSchema { + return c + }) + + assert.True(t, len(scm.Blocks["nested_slice_object"].(schema.ListNestedBlock).Validators) == 1) +} diff --git a/internal/providers/pluginfw/tfschema/struct_to_schema.go b/internal/providers/pluginfw/tfschema/struct_to_schema.go index aa473e301..206af67fb 100644 --- a/internal/providers/pluginfw/tfschema/struct_to_schema.go +++ b/internal/providers/pluginfw/tfschema/struct_to_schema.go @@ -7,11 +7,19 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/internal/tfreflect" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" ) +type structTag struct { + optional bool + computed bool + singleObject bool +} + func typeToSchema(v reflect.Value) NestedBlockObject { scmAttr := map[string]AttributeBuilder{} scmBlock := map[string]BlockBuilder{} @@ -30,8 +38,7 @@ func typeToSchema(v reflect.Value) NestedBlockObject { if fieldName == "-" { continue } - isOptional := fieldIsOptional(typeField) - isComputed := fieldIsComputed(typeField) + structTag := getStructTag(typeField) kind := typeField.Type.Kind() value := field.Value typeFieldType := typeField.Type @@ -52,42 +59,47 @@ func typeToSchema(v reflect.Value) NestedBlockObject { case reflect.TypeOf(types.Bool{}): scmAttr[fieldName] = ListAttributeBuilder{ ElementType: types.BoolType, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case reflect.TypeOf(types.Int64{}): scmAttr[fieldName] = ListAttributeBuilder{ ElementType: types.Int64Type, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case reflect.TypeOf(types.Float64{}): scmAttr[fieldName] = ListAttributeBuilder{ ElementType: types.Float64Type, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case reflect.TypeOf(types.String{}): scmAttr[fieldName] = ListAttributeBuilder{ ElementType: types.StringType, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } default: // Nested struct nestedScm := typeToSchema(reflect.New(elemType).Elem()) + var validators []validator.List + if structTag.singleObject { + validators = append(validators, listvalidator.SizeAtMost(1)) + } scmBlock[fieldName] = ListNestedBlockBuilder{ NestedObject: NestedBlockObject{ Attributes: nestedScm.Attributes, Blocks: nestedScm.Blocks, }, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, + Validators: validators, } } } else if kind == reflect.Map { @@ -102,30 +114,30 @@ func typeToSchema(v reflect.Value) NestedBlockObject { case reflect.TypeOf(types.Bool{}): scmAttr[fieldName] = MapAttributeBuilder{ ElementType: types.BoolType, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case reflect.TypeOf(types.Int64{}): scmAttr[fieldName] = MapAttributeBuilder{ ElementType: types.Int64Type, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case reflect.TypeOf(types.Float64{}): scmAttr[fieldName] = MapAttributeBuilder{ ElementType: types.Float64Type, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case reflect.TypeOf(types.String{}): scmAttr[fieldName] = MapAttributeBuilder{ ElementType: types.StringType, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } default: // Nested struct @@ -134,36 +146,36 @@ func typeToSchema(v reflect.Value) NestedBlockObject { NestedObject: NestedAttributeObject{ Attributes: nestedScm.Attributes, }, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } } } else if kind == reflect.Struct { switch value.Interface().(type) { case types.Bool: scmAttr[fieldName] = BoolAttributeBuilder{ - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case types.Int64: scmAttr[fieldName] = Int64AttributeBuilder{ - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case types.Float64: scmAttr[fieldName] = Float64AttributeBuilder{ - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case types.String: scmAttr[fieldName] = StringAttributeBuilder{ - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case types.List: panic(fmt.Errorf("types.List should never be used in tfsdk structs. %s", common.TerraformBugErrorMessage)) @@ -176,9 +188,9 @@ func typeToSchema(v reflect.Value) NestedBlockObject { nestedScm := typeToSchema(sv) scmBlock[fieldName] = ListNestedBlockBuilder{ NestedObject: nestedScm, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } } } else { @@ -188,14 +200,13 @@ func typeToSchema(v reflect.Value) NestedBlockObject { return NestedBlockObject{Attributes: scmAttr, Blocks: scmBlock} } -func fieldIsComputed(field reflect.StructField) bool { +func getStructTag(field reflect.StructField) structTag { tagValue := field.Tag.Get("tf") - return strings.Contains(tagValue, "computed") -} - -func fieldIsOptional(field reflect.StructField) bool { - tagValue := field.Tag.Get("tf") - return strings.Contains(tagValue, "optional") + return structTag{ + optional: strings.Contains(tagValue, "optional"), + computed: strings.Contains(tagValue, "computed"), + singleObject: strings.Contains(tagValue, "object"), + } } // ResourceStructToSchema builds a resource schema from a tfsdk struct, with custoimzations applied. diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index ab9b6220d..0b4c6101e 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -16,11 +16,11 @@ import ( type App struct { // The active deployment of the app. - ActiveDeployment []AppDeployment `tfsdk:"active_deployment" tf:"optional"` + ActiveDeployment []AppDeployment `tfsdk:"active_deployment" tf:"optional,object"` - AppStatus []ApplicationStatus `tfsdk:"app_status" tf:"optional"` + AppStatus []ApplicationStatus `tfsdk:"app_status" tf:"optional,object"` - ComputeStatus []ComputeStatus `tfsdk:"compute_status" tf:"optional"` + ComputeStatus []ComputeStatus `tfsdk:"compute_status" tf:"optional,object"` // The creation time of the app. Formatted timestamp in ISO 6801. CreateTime types.String `tfsdk:"create_time" tf:"optional"` // The email of the user that created the app. @@ -35,7 +35,7 @@ type App struct { // characters and hyphens. It must be unique within the workspace. Name types.String `tfsdk:"name" tf:""` // The pending deployment of the app. - PendingDeployment []AppDeployment `tfsdk:"pending_deployment" tf:"optional"` + PendingDeployment []AppDeployment `tfsdk:"pending_deployment" tf:"optional,object"` // Resources for the app. Resources []AppResource `tfsdk:"resources" tf:"optional"` @@ -80,7 +80,7 @@ type AppDeployment struct { // The email of the user creates the deployment. Creator types.String `tfsdk:"creator" tf:"optional"` // The deployment artifacts for an app. - DeploymentArtifacts []AppDeploymentArtifacts `tfsdk:"deployment_artifacts" tf:"optional"` + DeploymentArtifacts []AppDeploymentArtifacts `tfsdk:"deployment_artifacts" tf:"optional,object"` // The unique id of the deployment. DeploymentId types.String `tfsdk:"deployment_id" tf:"optional"` // The mode of which the deployment will manage the source code. @@ -94,7 +94,7 @@ type AppDeployment struct { // the deployment. SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` // Status and status message of the deployment - Status []AppDeploymentStatus `tfsdk:"status" tf:"optional"` + Status []AppDeploymentStatus `tfsdk:"status" tf:"optional,object"` // The update time of the deployment. Formatted timestamp in ISO 6801. UpdateTime types.String `tfsdk:"update_time" tf:"optional"` } @@ -144,15 +144,15 @@ type AppResource struct { // Description of the App Resource. Description types.String `tfsdk:"description" tf:"optional"` - Job []AppResourceJob `tfsdk:"job" tf:"optional"` + Job []AppResourceJob `tfsdk:"job" tf:"optional,object"` // Name of the App Resource. Name types.String `tfsdk:"name" tf:""` - Secret []AppResourceSecret `tfsdk:"secret" tf:"optional"` + Secret []AppResourceSecret `tfsdk:"secret" tf:"optional,object"` - ServingEndpoint []AppResourceServingEndpoint `tfsdk:"serving_endpoint" tf:"optional"` + ServingEndpoint []AppResourceServingEndpoint `tfsdk:"serving_endpoint" tf:"optional,object"` - SqlWarehouse []AppResourceSqlWarehouse `tfsdk:"sql_warehouse" tf:"optional"` + SqlWarehouse []AppResourceSqlWarehouse `tfsdk:"sql_warehouse" tf:"optional,object"` } type AppResourceJob struct { diff --git a/internal/service/billing_tf/model.go b/internal/service/billing_tf/model.go index 8eba23e7e..f2a63fde2 100755 --- a/internal/service/billing_tf/model.go +++ b/internal/service/billing_tf/model.go @@ -60,7 +60,7 @@ type BudgetConfiguration struct { // usage to limit the scope of what is considered for this budget. Leave // empty to include all usage for this account. All provided filters must be // matched for usage to be included. - Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional"` + Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional,object"` // Update time of this budget configuration. UpdateTime types.Int64 `tfsdk:"update_time" tf:"optional"` } @@ -71,7 +71,7 @@ type BudgetConfigurationFilter struct { // be entered exactly as they appear in your usage data. Tags []BudgetConfigurationFilterTagClause `tfsdk:"tags" tf:"optional"` // If provided, usage must match with the provided Databricks workspace IDs. - WorkspaceId []BudgetConfigurationFilterWorkspaceIdClause `tfsdk:"workspace_id" tf:"optional"` + WorkspaceId []BudgetConfigurationFilterWorkspaceIdClause `tfsdk:"workspace_id" tf:"optional,object"` } type BudgetConfigurationFilterClause struct { @@ -83,7 +83,7 @@ type BudgetConfigurationFilterClause struct { type BudgetConfigurationFilterTagClause struct { Key types.String `tfsdk:"key" tf:"optional"` - Value []BudgetConfigurationFilterClause `tfsdk:"value" tf:"optional"` + Value []BudgetConfigurationFilterClause `tfsdk:"value" tf:"optional,object"` } type BudgetConfigurationFilterWorkspaceIdClause struct { @@ -119,7 +119,7 @@ type CreateBudgetConfigurationBudget struct { // usage to limit the scope of what is considered for this budget. Leave // empty to include all usage for this account. All provided filters must be // matched for usage to be included. - Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional"` + Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional,object"` } type CreateBudgetConfigurationBudgetActionConfigurations struct { @@ -148,12 +148,12 @@ type CreateBudgetConfigurationBudgetAlertConfigurations struct { type CreateBudgetConfigurationRequest struct { // Properties of the new budget configuration. - Budget []CreateBudgetConfigurationBudget `tfsdk:"budget" tf:""` + Budget []CreateBudgetConfigurationBudget `tfsdk:"budget" tf:"object"` } type CreateBudgetConfigurationResponse struct { // The created budget configuration. - Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional"` + Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional,object"` } type CreateLogDeliveryConfigurationParams struct { @@ -280,7 +280,7 @@ type GetBudgetConfigurationRequest struct { } type GetBudgetConfigurationResponse struct { - Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional"` + Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional,object"` } // Get log delivery configuration @@ -342,7 +342,7 @@ type LogDeliveryConfiguration struct { // available for usage before March 2019 (`2019-03`). DeliveryStartTime types.String `tfsdk:"delivery_start_time" tf:"optional"` // Databricks log delivery status. - LogDeliveryStatus []LogDeliveryStatus `tfsdk:"log_delivery_status" tf:"optional"` + LogDeliveryStatus []LogDeliveryStatus `tfsdk:"log_delivery_status" tf:"optional,object"` // Log delivery type. Supported values are: // // * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the @@ -438,20 +438,20 @@ type UpdateBudgetConfigurationBudget struct { // usage to limit the scope of what is considered for this budget. Leave // empty to include all usage for this account. All provided filters must be // matched for usage to be included. - Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional"` + Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional,object"` } type UpdateBudgetConfigurationRequest struct { // The updated budget. This will overwrite the budget specified by the // budget ID. - Budget []UpdateBudgetConfigurationBudget `tfsdk:"budget" tf:""` + Budget []UpdateBudgetConfigurationBudget `tfsdk:"budget" tf:"object"` // The Databricks budget configuration ID. BudgetId types.String `tfsdk:"-"` } type UpdateBudgetConfigurationResponse struct { // The updated budget. - Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional"` + Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional,object"` } type UpdateLogDeliveryConfigurationStatusRequest struct { @@ -466,11 +466,11 @@ type UpdateLogDeliveryConfigurationStatusRequest struct { } type WrappedCreateLogDeliveryConfiguration struct { - LogDeliveryConfiguration []CreateLogDeliveryConfigurationParams `tfsdk:"log_delivery_configuration" tf:"optional"` + LogDeliveryConfiguration []CreateLogDeliveryConfigurationParams `tfsdk:"log_delivery_configuration" tf:"optional,object"` } type WrappedLogDeliveryConfiguration struct { - LogDeliveryConfiguration []LogDeliveryConfiguration `tfsdk:"log_delivery_configuration" tf:"optional"` + LogDeliveryConfiguration []LogDeliveryConfiguration `tfsdk:"log_delivery_configuration" tf:"optional,object"` } type WrappedLogDeliveryConfigurations struct { diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index dea0528df..25fa29c01 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -15,11 +15,11 @@ import ( ) type AccountsCreateMetastore struct { - MetastoreInfo []CreateMetastore `tfsdk:"metastore_info" tf:"optional"` + MetastoreInfo []CreateMetastore `tfsdk:"metastore_info" tf:"optional,object"` } type AccountsCreateMetastoreAssignment struct { - MetastoreAssignment []CreateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional"` + MetastoreAssignment []CreateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional,object"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` // Workspace ID. @@ -27,32 +27,32 @@ type AccountsCreateMetastoreAssignment struct { } type AccountsCreateStorageCredential struct { - CredentialInfo []CreateStorageCredential `tfsdk:"credential_info" tf:"optional"` + CredentialInfo []CreateStorageCredential `tfsdk:"credential_info" tf:"optional,object"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` } type AccountsMetastoreAssignment struct { - MetastoreAssignment []MetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional"` + MetastoreAssignment []MetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional,object"` } type AccountsMetastoreInfo struct { - MetastoreInfo []MetastoreInfo `tfsdk:"metastore_info" tf:"optional"` + MetastoreInfo []MetastoreInfo `tfsdk:"metastore_info" tf:"optional,object"` } type AccountsStorageCredentialInfo struct { - CredentialInfo []StorageCredentialInfo `tfsdk:"credential_info" tf:"optional"` + CredentialInfo []StorageCredentialInfo `tfsdk:"credential_info" tf:"optional,object"` } type AccountsUpdateMetastore struct { // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` - MetastoreInfo []UpdateMetastore `tfsdk:"metastore_info" tf:"optional"` + MetastoreInfo []UpdateMetastore `tfsdk:"metastore_info" tf:"optional,object"` } type AccountsUpdateMetastoreAssignment struct { - MetastoreAssignment []UpdateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional"` + MetastoreAssignment []UpdateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional,object"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` // Workspace ID. @@ -60,7 +60,7 @@ type AccountsUpdateMetastoreAssignment struct { } type AccountsUpdateStorageCredential struct { - CredentialInfo []UpdateStorageCredential `tfsdk:"credential_info" tf:"optional"` + CredentialInfo []UpdateStorageCredential `tfsdk:"credential_info" tf:"optional,object"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` // Name of the storage credential. @@ -194,7 +194,7 @@ type CatalogInfo struct { // Username of catalog creator. CreatedBy types.String `tfsdk:"created_by" tf:"optional"` - EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional"` + EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional,object"` // Whether predictive optimization should be enabled for this object and // objects under it. EnablePredictiveOptimization types.String `tfsdk:"enable_predictive_optimization" tf:"optional"` @@ -219,7 +219,7 @@ type CatalogInfo struct { // remote sharing server. ProviderName types.String `tfsdk:"provider_name" tf:"optional"` // Status of an asynchronously provisioned resource. - ProvisioningInfo []ProvisioningInfo `tfsdk:"provisioning_info" tf:"optional"` + ProvisioningInfo []ProvisioningInfo `tfsdk:"provisioning_info" tf:"optional,object"` // Kind of catalog securable. SecurableKind types.String `tfsdk:"securable_kind" tf:"optional"` @@ -249,7 +249,7 @@ type ColumnInfo struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` - Mask []ColumnMask `tfsdk:"mask" tf:"optional"` + Mask []ColumnMask `tfsdk:"mask" tf:"optional,object"` // Name of Column. Name types.String `tfsdk:"name" tf:"optional"` // Whether field may be Null (default: true). @@ -309,7 +309,7 @@ type ConnectionInfo struct { // connection. Properties map[string]types.String `tfsdk:"properties" tf:"optional"` // Status of an asynchronously provisioned resource. - ProvisioningInfo []ProvisioningInfo `tfsdk:"provisioning_info" tf:"optional"` + ProvisioningInfo []ProvisioningInfo `tfsdk:"provisioning_info" tf:"optional,object"` // If the connection is read only. ReadOnly types.Bool `tfsdk:"read_only" tf:"optional"` // Kind of connection securable. @@ -328,7 +328,7 @@ type ConnectionInfo struct { // ONLINE_CONTINUOUS_UPDATE or the ONLINE_UPDATING_PIPELINE_RESOURCES state. type ContinuousUpdateStatus struct { // Progress of the initial data synchronization. - InitialPipelineSyncProgress []PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional"` + InitialPipelineSyncProgress []PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional,object"` // The last source table Delta version that was synced to the online table. // Note that this Delta version may not be completely synced to the online // table yet. @@ -384,7 +384,7 @@ type CreateExternalLocation struct { // Name of the storage credential used with this location. CredentialName types.String `tfsdk:"credential_name" tf:""` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional,object"` // Indicates whether fallback mode is enabled for this external location. // When fallback mode is enabled, the access to the location falls back to // cluster credentials if UC credentials are not sufficient. @@ -414,7 +414,7 @@ type CreateFunction struct { // Pretty printed function data type. FullDataType types.String `tfsdk:"full_data_type" tf:""` - InputParams []FunctionParameterInfos `tfsdk:"input_params" tf:""` + InputParams []FunctionParameterInfos `tfsdk:"input_params" tf:"object"` // Whether the function is deterministic. IsDeterministic types.Bool `tfsdk:"is_deterministic" tf:""` // Function null call. @@ -426,7 +426,7 @@ type CreateFunction struct { // JSON-serialized key-value pair map, encoded (escaped) as a string. Properties types.String `tfsdk:"properties" tf:"optional"` // Table function return parameters. - ReturnParams []FunctionParameterInfos `tfsdk:"return_params" tf:"optional"` + ReturnParams []FunctionParameterInfos `tfsdk:"return_params" tf:"optional,object"` // Function language. When **EXTERNAL** is used, the language of the routine // function should be specified in the __external_language__ field, and the // __return_params__ of the function cannot be used (as **TABLE** return @@ -436,7 +436,7 @@ type CreateFunction struct { // Function body. RoutineDefinition types.String `tfsdk:"routine_definition" tf:""` // Function dependencies. - RoutineDependencies []DependencyList `tfsdk:"routine_dependencies" tf:"optional"` + RoutineDependencies []DependencyList `tfsdk:"routine_dependencies" tf:"optional,object"` // Name of parent schema relative to its parent catalog. SchemaName types.String `tfsdk:"schema_name" tf:""` // Function security type. @@ -451,7 +451,7 @@ type CreateFunction struct { type CreateFunctionRequest struct { // Partial __FunctionInfo__ specifying the function to be created. - FunctionInfo []CreateFunction `tfsdk:"function_info" tf:""` + FunctionInfo []CreateFunction `tfsdk:"function_info" tf:"object"` } type CreateMetastore struct { @@ -489,15 +489,15 @@ type CreateMonitor struct { // drift metrics (comparing metrics across time windows). CustomMetrics []MonitorMetric `tfsdk:"custom_metrics" tf:"optional"` // The data classification config for the monitor. - DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional"` + DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional,object"` // Configuration for monitoring inference logs. - InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional"` + InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional,object"` // The notification settings for the monitor. - Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional"` + Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional,object"` // Schema where output metric tables are created. OutputSchemaName types.String `tfsdk:"output_schema_name" tf:""` // The schedule for automatically updating and refreshing metric tables. - Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional,object"` // Whether to skip creating a default dashboard summarizing data quality // metrics. SkipBuiltinDashboard types.Bool `tfsdk:"skip_builtin_dashboard" tf:"optional"` @@ -508,11 +508,11 @@ type CreateMonitor struct { // slices. SlicingExprs []types.String `tfsdk:"slicing_exprs" tf:"optional"` // Configuration for monitoring snapshot tables. - Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional"` + Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional,object"` // Full name of the table. TableName types.String `tfsdk:"-"` // Configuration for monitoring time series tables. - TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional"` + TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional,object"` // Optional argument to specify the warehouse for dashboard creation. If not // specified, the first running warehouse will be used. WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` @@ -523,7 +523,7 @@ type CreateOnlineTableRequest struct { // Full three-part (catalog, schema, table) name of the table. Name types.String `tfsdk:"name" tf:"optional"` // Specification of the online table. - Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional"` + Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional,object"` } type CreateRegisteredModelRequest struct { @@ -558,17 +558,17 @@ type CreateSchema struct { type CreateStorageCredential struct { // The AWS IAM role configuration. - AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional"` + AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional,object"` // The Azure managed identity configuration. - AzureManagedIdentity []AzureManagedIdentityRequest `tfsdk:"azure_managed_identity" tf:"optional"` + AzureManagedIdentity []AzureManagedIdentityRequest `tfsdk:"azure_managed_identity" tf:"optional,object"` // The Azure service principal configuration. - AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` + AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional,object"` // The Cloudflare API token configuration. - CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` + CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional,object"` // Comment associated with the credential. Comment types.String `tfsdk:"comment" tf:"optional"` // The Databricks managed GCP service account configuration. - DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional"` + DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` // The credential name. The name must be unique within the metastore. Name types.String `tfsdk:"name" tf:""` // Whether the storage credential is only usable for read operations. @@ -582,7 +582,7 @@ type CreateTableConstraint struct { // A table constraint, as defined by *one* of the following fields being // set: __primary_key_constraint__, __foreign_key_constraint__, // __named_table_constraint__. - Constraint []TableConstraint `tfsdk:"constraint" tf:""` + Constraint []TableConstraint `tfsdk:"constraint" tf:"object"` // The full name of the table referenced by the constraint. FullNameArg types.String `tfsdk:"full_name_arg" tf:""` } @@ -778,9 +778,9 @@ type DeltaRuntimePropertiesKvPairs struct { // field must be defined. type Dependency struct { // A function that is dependent on a SQL object. - Function []FunctionDependency `tfsdk:"function" tf:"optional"` + Function []FunctionDependency `tfsdk:"function" tf:"optional,object"` // A table that is dependent on a SQL object. - Table []TableDependency `tfsdk:"table" tf:"optional"` + Table []TableDependency `tfsdk:"table" tf:"optional,object"` } // A list of dependencies. @@ -853,7 +853,7 @@ type EnableResponse struct { // Encryption options that apply to clients connecting to cloud storage. type EncryptionDetails struct { // Server-Side Encryption properties for clients communicating with AWS s3. - SseEncryptionDetails []SseEncryptionDetails `tfsdk:"sse_encryption_details" tf:"optional"` + SseEncryptionDetails []SseEncryptionDetails `tfsdk:"sse_encryption_details" tf:"optional,object"` } // Get boolean reflecting if table exists @@ -880,7 +880,7 @@ type ExternalLocationInfo struct { // Name of the storage credential used with this location. CredentialName types.String `tfsdk:"credential_name" tf:"optional"` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional,object"` // Indicates whether fallback mode is enabled for this external location. // When fallback mode is enabled, the access to the location falls back to // cluster credentials if UC credentials are not sufficient. @@ -964,7 +964,7 @@ type FunctionInfo struct { // Id of Function, relative to parent schema. FunctionId types.String `tfsdk:"function_id" tf:"optional"` - InputParams []FunctionParameterInfos `tfsdk:"input_params" tf:"optional"` + InputParams []FunctionParameterInfos `tfsdk:"input_params" tf:"optional,object"` // Whether the function is deterministic. IsDeterministic types.Bool `tfsdk:"is_deterministic" tf:"optional"` // Function null call. @@ -980,7 +980,7 @@ type FunctionInfo struct { // JSON-serialized key-value pair map, encoded (escaped) as a string. Properties types.String `tfsdk:"properties" tf:"optional"` // Table function return parameters. - ReturnParams []FunctionParameterInfos `tfsdk:"return_params" tf:"optional"` + ReturnParams []FunctionParameterInfos `tfsdk:"return_params" tf:"optional,object"` // Function language. When **EXTERNAL** is used, the language of the routine // function should be specified in the __external_language__ field, and the // __return_params__ of the function cannot be used (as **TABLE** return @@ -990,7 +990,7 @@ type FunctionInfo struct { // Function body. RoutineDefinition types.String `tfsdk:"routine_definition" tf:"optional"` // Function dependencies. - RoutineDependencies []DependencyList `tfsdk:"routine_dependencies" tf:"optional"` + RoutineDependencies []DependencyList `tfsdk:"routine_dependencies" tf:"optional,object"` // Name of parent schema relative to its parent catalog. SchemaName types.String `tfsdk:"schema_name" tf:"optional"` // Function security type. @@ -1058,19 +1058,19 @@ type GenerateTemporaryTableCredentialRequest struct { type GenerateTemporaryTableCredentialResponse struct { // AWS temporary credentials for API authentication. Read more at // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. - AwsTempCredentials []AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional"` + AwsTempCredentials []AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional,object"` // Azure temporary credentials for API authentication. Read more at // https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas - AzureUserDelegationSas []AzureUserDelegationSas `tfsdk:"azure_user_delegation_sas" tf:"optional"` + AzureUserDelegationSas []AzureUserDelegationSas `tfsdk:"azure_user_delegation_sas" tf:"optional,object"` // Server time when the credential will expire, in epoch milliseconds. The // API client is advised to cache the credential given this expiration time. ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` // GCP temporary credentials for API authentication. Read more at // https://developers.google.com/identity/protocols/oauth2/service-account - GcpOauthToken []GcpOauthToken `tfsdk:"gcp_oauth_token" tf:"optional"` + GcpOauthToken []GcpOauthToken `tfsdk:"gcp_oauth_token" tf:"optional,object"` // R2 temporary credentials for API authentication. Read more at // https://developers.cloudflare.com/r2/api/s3/tokens/. - R2TempCredentials []R2Credentials `tfsdk:"r2_temp_credentials" tf:"optional"` + R2TempCredentials []R2Credentials `tfsdk:"r2_temp_credentials" tf:"optional,object"` // The URL of the storage path accessible by the temporary credential. Url types.String `tfsdk:"url" tf:"optional"` } @@ -1276,7 +1276,7 @@ type GetQuotaRequest struct { type GetQuotaResponse struct { // The returned QuotaInfo. - QuotaInfo []QuotaInfo `tfsdk:"quota_info" tf:"optional"` + QuotaInfo []QuotaInfo `tfsdk:"quota_info" tf:"optional,object"` } // Get refresh @@ -1811,7 +1811,7 @@ type ModelVersionInfo struct { // parent schema ModelName types.String `tfsdk:"model_name" tf:"optional"` // Model version dependencies, for feature-store packaged models - ModelVersionDependencies []DependencyList `tfsdk:"model_version_dependencies" tf:"optional"` + ModelVersionDependencies []DependencyList `tfsdk:"model_version_dependencies" tf:"optional,object"` // MLflow run ID used when creating the model version, if ``source`` was // generated by an experiment run stored in an MLflow tracking server RunId types.String `tfsdk:"run_id" tf:"optional"` @@ -1910,26 +1910,26 @@ type MonitorInfo struct { // if the monitor is in PENDING state. DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` // The data classification config for the monitor. - DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional"` + DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional,object"` // The full name of the drift metrics table. Format: // __catalog_name__.__schema_name__.__table_name__. DriftMetricsTableName types.String `tfsdk:"drift_metrics_table_name" tf:""` // Configuration for monitoring inference logs. - InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional"` + InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional,object"` // The latest failure message of the monitor (if any). LatestMonitorFailureMsg types.String `tfsdk:"latest_monitor_failure_msg" tf:"optional"` // The version of the monitor config (e.g. 1,2,3). If negative, the monitor // may be corrupted. MonitorVersion types.String `tfsdk:"monitor_version" tf:""` // The notification settings for the monitor. - Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional"` + Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional,object"` // Schema where output metric tables are created. OutputSchemaName types.String `tfsdk:"output_schema_name" tf:"optional"` // The full name of the profile metrics table. Format: // __catalog_name__.__schema_name__.__table_name__. ProfileMetricsTableName types.String `tfsdk:"profile_metrics_table_name" tf:""` // The schedule for automatically updating and refreshing metric tables. - Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional,object"` // List of column expressions to slice data with for targeted analysis. The // data is grouped by each expression independently, resulting in a separate // slice for each predicate and its complements. For high-cardinality @@ -1937,14 +1937,14 @@ type MonitorInfo struct { // slices. SlicingExprs []types.String `tfsdk:"slicing_exprs" tf:"optional"` // Configuration for monitoring snapshot tables. - Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional"` + Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional,object"` // The status of the monitor. Status types.String `tfsdk:"status" tf:""` // The full name of the table to monitor. Format: // __catalog_name__.__schema_name__.__table_name__. TableName types.String `tfsdk:"table_name" tf:""` // Configuration for monitoring time series tables. - TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional"` + TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional,object"` } type MonitorMetric struct { @@ -1976,10 +1976,10 @@ type MonitorMetric struct { type MonitorNotifications struct { // Who to send notifications to on monitor failure. - OnFailure []MonitorDestination `tfsdk:"on_failure" tf:"optional"` + OnFailure []MonitorDestination `tfsdk:"on_failure" tf:"optional,object"` // Who to send notifications to when new data classification tags are // detected. - OnNewClassificationTagDetected []MonitorDestination `tfsdk:"on_new_classification_tag_detected" tf:"optional"` + OnNewClassificationTagDetected []MonitorDestination `tfsdk:"on_new_classification_tag_detected" tf:"optional,object"` } type MonitorRefreshInfo struct { @@ -2033,9 +2033,9 @@ type OnlineTable struct { // Full three-part (catalog, schema, table) name of the table. Name types.String `tfsdk:"name" tf:"optional"` // Specification of the online table. - Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional"` + Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional,object"` // Online Table status - Status []OnlineTableStatus `tfsdk:"status" tf:"optional"` + Status []OnlineTableStatus `tfsdk:"status" tf:"optional,object"` // Data serving REST API URL for this table TableServingUrl types.String `tfsdk:"table_serving_url" tf:"optional"` } @@ -2056,10 +2056,10 @@ type OnlineTableSpec struct { // Primary Key columns to be used for data insert/update in the destination. PrimaryKeyColumns []types.String `tfsdk:"primary_key_columns" tf:"optional"` // Pipeline runs continuously after generating the initial data. - RunContinuously []OnlineTableSpecContinuousSchedulingPolicy `tfsdk:"run_continuously" tf:"optional"` + RunContinuously []OnlineTableSpecContinuousSchedulingPolicy `tfsdk:"run_continuously" tf:"optional,object"` // Pipeline stops after generating the initial data and can be triggered // later (manually, through a cron job or through data triggers) - RunTriggered []OnlineTableSpecTriggeredSchedulingPolicy `tfsdk:"run_triggered" tf:"optional"` + RunTriggered []OnlineTableSpecTriggeredSchedulingPolicy `tfsdk:"run_triggered" tf:"optional,object"` // Three-part (catalog, schema, table) name of the source Delta table. SourceTableFullName types.String `tfsdk:"source_table_full_name" tf:"optional"` // Time series key to deduplicate (tie-break) rows with the same primary @@ -2077,21 +2077,21 @@ type OnlineTableSpecTriggeredSchedulingPolicy struct { type OnlineTableStatus struct { // Detailed status of an online table. Shown if the online table is in the // ONLINE_CONTINUOUS_UPDATE or the ONLINE_UPDATING_PIPELINE_RESOURCES state. - ContinuousUpdateStatus []ContinuousUpdateStatus `tfsdk:"continuous_update_status" tf:"optional"` + ContinuousUpdateStatus []ContinuousUpdateStatus `tfsdk:"continuous_update_status" tf:"optional,object"` // The state of the online table. DetailedState types.String `tfsdk:"detailed_state" tf:"optional"` // Detailed status of an online table. Shown if the online table is in the // OFFLINE_FAILED or the ONLINE_PIPELINE_FAILED state. - FailedStatus []FailedStatus `tfsdk:"failed_status" tf:"optional"` + FailedStatus []FailedStatus `tfsdk:"failed_status" tf:"optional,object"` // A text description of the current state of the online table. Message types.String `tfsdk:"message" tf:"optional"` // Detailed status of an online table. Shown if the online table is in the // PROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT // state. - ProvisioningStatus []ProvisioningStatus `tfsdk:"provisioning_status" tf:"optional"` + ProvisioningStatus []ProvisioningStatus `tfsdk:"provisioning_status" tf:"optional,object"` // Detailed status of an online table. Shown if the online table is in the // ONLINE_TRIGGERED_UPDATE or the ONLINE_NO_PENDING_UPDATE state. - TriggeredUpdateStatus []TriggeredUpdateStatus `tfsdk:"triggered_update_status" tf:"optional"` + TriggeredUpdateStatus []TriggeredUpdateStatus `tfsdk:"triggered_update_status" tf:"optional,object"` } type PermissionsChange struct { @@ -2148,7 +2148,7 @@ type ProvisioningInfo struct { type ProvisioningStatus struct { // Details about initial data synchronization. Only populated when in the // PROVISIONING_INITIAL_SNAPSHOT state. - InitialPipelineSyncProgress []PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional"` + InitialPipelineSyncProgress []PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional,object"` } type QuotaInfo struct { @@ -2268,7 +2268,7 @@ type SchemaInfo struct { // Username of schema creator. CreatedBy types.String `tfsdk:"created_by" tf:"optional"` - EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional"` + EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional,object"` // Whether predictive optimization should be enabled for this object and // objects under it. EnablePredictiveOptimization types.String `tfsdk:"enable_predictive_optimization" tf:"optional"` @@ -2321,13 +2321,13 @@ type SseEncryptionDetails struct { type StorageCredentialInfo struct { // The AWS IAM role configuration. - AwsIamRole []AwsIamRoleResponse `tfsdk:"aws_iam_role" tf:"optional"` + AwsIamRole []AwsIamRoleResponse `tfsdk:"aws_iam_role" tf:"optional,object"` // The Azure managed identity configuration. - AzureManagedIdentity []AzureManagedIdentityResponse `tfsdk:"azure_managed_identity" tf:"optional"` + AzureManagedIdentity []AzureManagedIdentityResponse `tfsdk:"azure_managed_identity" tf:"optional,object"` // The Azure service principal configuration. - AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` + AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional,object"` // The Cloudflare API token configuration. - CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` + CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional,object"` // Comment associated with the credential. Comment types.String `tfsdk:"comment" tf:"optional"` // Time at which this Credential was created, in epoch milliseconds. @@ -2335,7 +2335,7 @@ type StorageCredentialInfo struct { // Username of credential creator. CreatedBy types.String `tfsdk:"created_by" tf:"optional"` // The Databricks managed GCP service account configuration. - DatabricksGcpServiceAccount []DatabricksGcpServiceAccountResponse `tfsdk:"databricks_gcp_service_account" tf:"optional"` + DatabricksGcpServiceAccount []DatabricksGcpServiceAccountResponse `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` // The unique identifier of the credential. Id types.String `tfsdk:"id" tf:"optional"` // Whether the current securable is accessible from all workspaces or a @@ -2370,11 +2370,11 @@ type SystemSchemaInfo struct { // __primary_key_constraint__, __foreign_key_constraint__, // __named_table_constraint__. type TableConstraint struct { - ForeignKeyConstraint []ForeignKeyConstraint `tfsdk:"foreign_key_constraint" tf:"optional"` + ForeignKeyConstraint []ForeignKeyConstraint `tfsdk:"foreign_key_constraint" tf:"optional,object"` - NamedTableConstraint []NamedTableConstraint `tfsdk:"named_table_constraint" tf:"optional"` + NamedTableConstraint []NamedTableConstraint `tfsdk:"named_table_constraint" tf:"optional,object"` - PrimaryKeyConstraint []PrimaryKeyConstraint `tfsdk:"primary_key_constraint" tf:"optional"` + PrimaryKeyConstraint []PrimaryKeyConstraint `tfsdk:"primary_key_constraint" tf:"optional,object"` } // A table that is dependent on a SQL object. @@ -2414,14 +2414,14 @@ type TableInfo struct { // omitted if table is not deleted. DeletedAt types.Int64 `tfsdk:"deleted_at" tf:"optional"` // Information pertaining to current state of the delta table. - DeltaRuntimePropertiesKvpairs []DeltaRuntimePropertiesKvPairs `tfsdk:"delta_runtime_properties_kvpairs" tf:"optional"` + DeltaRuntimePropertiesKvpairs []DeltaRuntimePropertiesKvPairs `tfsdk:"delta_runtime_properties_kvpairs" tf:"optional,object"` - EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional"` + EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional,object"` // Whether predictive optimization should be enabled for this object and // objects under it. EnablePredictiveOptimization types.String `tfsdk:"enable_predictive_optimization" tf:"optional"` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional,object"` // Full name of table, in form of // __catalog_name__.__schema_name__.__table_name__ FullName types.String `tfsdk:"full_name" tf:"optional"` @@ -2437,7 +2437,7 @@ type TableInfo struct { // A map of key-value properties attached to the securable. Properties map[string]types.String `tfsdk:"properties" tf:"optional"` - RowFilter []TableRowFilter `tfsdk:"row_filter" tf:"optional"` + RowFilter []TableRowFilter `tfsdk:"row_filter" tf:"optional,object"` // Name of parent schema relative to its parent catalog. SchemaName types.String `tfsdk:"schema_name" tf:"optional"` // List of schemes whose objects can be referenced without qualification. @@ -2466,7 +2466,7 @@ type TableInfo struct { // provided; - when DependencyList is an empty list, the dependency is // provided but is empty; - when DependencyList is not an empty list, // dependencies are provided and recorded. - ViewDependencies []DependencyList `tfsdk:"view_dependencies" tf:"optional"` + ViewDependencies []DependencyList `tfsdk:"view_dependencies" tf:"optional,object"` } type TableRowFilter struct { @@ -2496,7 +2496,7 @@ type TriggeredUpdateStatus struct { // table to the online table. Timestamp types.String `tfsdk:"timestamp" tf:"optional"` // Progress of the active data synchronization pipeline. - TriggeredUpdateProgress []PipelineProgress `tfsdk:"triggered_update_progress" tf:"optional"` + TriggeredUpdateProgress []PipelineProgress `tfsdk:"triggered_update_progress" tf:"optional,object"` } // Delete an assignment @@ -2551,7 +2551,7 @@ type UpdateExternalLocation struct { // Name of the storage credential used with this location. CredentialName types.String `tfsdk:"credential_name" tf:"optional"` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional,object"` // Indicates whether fallback mode is enabled for this external location. // When fallback mode is enabled, the access to the location falls back to // cluster credentials if UC credentials are not sufficient. @@ -2639,15 +2639,15 @@ type UpdateMonitor struct { // if the monitor is in PENDING state. DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` // The data classification config for the monitor. - DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional"` + DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional,object"` // Configuration for monitoring inference logs. - InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional"` + InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional,object"` // The notification settings for the monitor. - Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional"` + Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional,object"` // Schema where output metric tables are created. OutputSchemaName types.String `tfsdk:"output_schema_name" tf:""` // The schedule for automatically updating and refreshing metric tables. - Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional,object"` // List of column expressions to slice data with for targeted analysis. The // data is grouped by each expression independently, resulting in a separate // slice for each predicate and its complements. For high-cardinality @@ -2655,11 +2655,11 @@ type UpdateMonitor struct { // slices. SlicingExprs []types.String `tfsdk:"slicing_exprs" tf:"optional"` // Configuration for monitoring snapshot tables. - Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional"` + Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional,object"` // Full name of the table. TableName types.String `tfsdk:"-"` // Configuration for monitoring time series tables. - TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional"` + TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional,object"` } type UpdatePermissions struct { @@ -2703,17 +2703,17 @@ type UpdateSchema struct { type UpdateStorageCredential struct { // The AWS IAM role configuration. - AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional"` + AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional,object"` // The Azure managed identity configuration. - AzureManagedIdentity []AzureManagedIdentityResponse `tfsdk:"azure_managed_identity" tf:"optional"` + AzureManagedIdentity []AzureManagedIdentityResponse `tfsdk:"azure_managed_identity" tf:"optional,object"` // The Azure service principal configuration. - AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` + AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional,object"` // The Cloudflare API token configuration. - CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` + CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional,object"` // Comment associated with the credential. Comment types.String `tfsdk:"comment" tf:"optional"` // The Databricks managed GCP service account configuration. - DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional"` + DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` // Force update even if there are dependent external locations or external // tables. Force types.Bool `tfsdk:"force" tf:"optional"` @@ -2774,15 +2774,15 @@ type UpdateWorkspaceBindingsParameters struct { type ValidateStorageCredential struct { // The AWS IAM role configuration. - AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional"` + AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional,object"` // The Azure managed identity configuration. - AzureManagedIdentity []AzureManagedIdentityRequest `tfsdk:"azure_managed_identity" tf:"optional"` + AzureManagedIdentity []AzureManagedIdentityRequest `tfsdk:"azure_managed_identity" tf:"optional,object"` // The Azure service principal configuration. - AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` + AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional,object"` // The Cloudflare API token configuration. - CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` + CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional,object"` // The Databricks created GCP service account configuration. - DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional"` + DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` // The name of an existing external location to validate. ExternalLocationName types.String `tfsdk:"external_location_name" tf:"optional"` // Whether the storage credential is only usable for read operations. @@ -2825,7 +2825,7 @@ type VolumeInfo struct { // The identifier of the user who created the volume CreatedBy types.String `tfsdk:"created_by" tf:"optional"` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional,object"` // The three-level (fully qualified) name of the volume FullName types.String `tfsdk:"full_name" tf:"optional"` // The unique identifier of the metastore diff --git a/internal/service/compute_tf/model.go b/internal/service/compute_tf/model.go index 91eef81ce..dcc16fd50 100755 --- a/internal/service/compute_tf/model.go +++ b/internal/service/compute_tf/model.go @@ -166,7 +166,7 @@ type AzureAttributes struct { // mutated over the lifetime of a cluster. FirstOnDemand types.Int64 `tfsdk:"first_on_demand" tf:"optional"` // Defines values necessary to configure and run Azure Log Analytics agent - LogAnalyticsInfo []LogAnalyticsInfo `tfsdk:"log_analytics_info" tf:"optional"` + LogAnalyticsInfo []LogAnalyticsInfo `tfsdk:"log_analytics_info" tf:"optional,object"` // The max bid price to be used for Azure spot instances. The Max price for // the bid cannot be higher than the on-demand price of the instance. If not // specified, the default value is -1, which specifies that the instance @@ -245,17 +245,17 @@ type ClusterAttributes struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only // one destination can be specified for one cluster. If the conf is given, // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -292,7 +292,7 @@ type ClusterAttributes struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional,object"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -310,7 +310,7 @@ type ClusterAttributes struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -366,7 +366,7 @@ type ClusterAttributes struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } type ClusterCompliance struct { @@ -386,7 +386,7 @@ type ClusterDetails struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -395,10 +395,10 @@ type ClusterDetails struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // Number of CPU cores available for this cluster. Note that this can be // fractional, e.g. 7.5 cores, since certain node types are configured to // share cores between Spark nodes on the same instance. @@ -412,9 +412,9 @@ type ClusterDetails struct { // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Cluster log delivery status. - ClusterLogStatus []LogSyncStatus `tfsdk:"cluster_log_status" tf:"optional"` + ClusterLogStatus []LogSyncStatus `tfsdk:"cluster_log_status" tf:"optional,object"` // Total amount of cluster memory, in megabytes ClusterMemoryMb types.Int64 `tfsdk:"cluster_memory_mb" tf:"optional"` // Cluster name requested by the user. This doesn't have to be unique. If @@ -473,11 +473,11 @@ type ClusterDetails struct { // - Name: DefaultTags map[string]types.String `tfsdk:"default_tags" tf:"optional"` - DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional,object"` // Node on which the Spark driver resides. The driver node contains the // Spark master and the Databricks application that manages the per-notebook // Spark REPLs. - Driver []SparkNode `tfsdk:"driver" tf:"optional"` + Driver []SparkNode `tfsdk:"driver" tf:"optional,object"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -497,7 +497,7 @@ type ClusterDetails struct { Executors []SparkNode `tfsdk:"executors" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -575,7 +575,7 @@ type ClusterDetails struct { // or edit this cluster. The contents of `spec` can be used in the body of a // create cluster request. This field might not be populated for older // clusters. Note: not included in the response of the ListClusters API. - Spec []ClusterSpec `tfsdk:"spec" tf:"optional"` + Spec []ClusterSpec `tfsdk:"spec" tf:"optional,object"` // SSH public key contents that will be added to each Spark node in this // cluster. The corresponding private keys can be used to login with the // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. @@ -593,18 +593,18 @@ type ClusterDetails struct { TerminatedTime types.Int64 `tfsdk:"terminated_time" tf:"optional"` // Information about why the cluster was terminated. This field only appears // when the cluster is in a `TERMINATING` or `TERMINATED` state. - TerminationReason []TerminationReason `tfsdk:"termination_reason" tf:"optional"` + TerminationReason []TerminationReason `tfsdk:"termination_reason" tf:"optional,object"` - WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } type ClusterEvent struct { // ClusterId types.String `tfsdk:"cluster_id" tf:""` // - DataPlaneEventDetails []DataPlaneEventDetails `tfsdk:"data_plane_event_details" tf:"optional"` + DataPlaneEventDetails []DataPlaneEventDetails `tfsdk:"data_plane_event_details" tf:"optional,object"` // - Details []EventDetails `tfsdk:"details" tf:"optional"` + Details []EventDetails `tfsdk:"details" tf:"optional,object"` // The timestamp when the event occurred, stored as the number of // milliseconds since the Unix epoch. If not provided, this will be assigned // by the Timeline service. @@ -623,13 +623,13 @@ type ClusterLibraryStatuses struct { type ClusterLogConf struct { // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : // "dbfs:/home/cluster_log" } }` - Dbfs []DbfsStorageInfo `tfsdk:"dbfs" tf:"optional"` + Dbfs []DbfsStorageInfo `tfsdk:"dbfs" tf:"optional,object"` // destination and either the region or endpoint need to be provided. e.g. // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : // "us-west-2" } }` Cluster iam role is used to access s3, please make sure // the cluster iam role in `instance_profile_arn` has permission to write // data to the s3 destination. - S3 []S3StorageInfo `tfsdk:"s3" tf:"optional"` + S3 []S3StorageInfo `tfsdk:"s3" tf:"optional,object"` } type ClusterPermission struct { @@ -733,7 +733,7 @@ type ClusterSize struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // Number of worker nodes that this cluster should have. A cluster has one // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 // Spark nodes. @@ -755,7 +755,7 @@ type ClusterSpec struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -764,17 +764,17 @@ type ClusterSpec struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only // one destination can be specified for one cluster. If the conf is given, // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -811,7 +811,7 @@ type ClusterSpec struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional,object"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -829,7 +829,7 @@ type ClusterSpec struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -896,7 +896,7 @@ type ClusterSpec struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } // Get status @@ -928,7 +928,7 @@ type CommandStatusRequest struct { type CommandStatusResponse struct { Id types.String `tfsdk:"id" tf:"optional"` - Results []Results `tfsdk:"results" tf:"optional"` + Results []Results `tfsdk:"results" tf:"optional,object"` Status types.String `tfsdk:"status" tf:"optional"` } @@ -954,7 +954,7 @@ type CreateCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -963,20 +963,20 @@ type CreateCluster struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // When specified, this clones libraries from a source cluster during the // creation of a new cluster. - CloneFrom []CloneCluster `tfsdk:"clone_from" tf:"optional"` + CloneFrom []CloneCluster `tfsdk:"clone_from" tf:"optional,object"` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only // one destination can be specified for one cluster. If the conf is given, // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -1013,7 +1013,7 @@ type CreateCluster struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional,object"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -1031,7 +1031,7 @@ type CreateCluster struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -1098,7 +1098,7 @@ type CreateCluster struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } type CreateClusterResponse struct { @@ -1115,10 +1115,10 @@ type CreateContext struct { type CreateInstancePool struct { // Attributes related to instance pools running on Amazon Web Services. If // not specified at pool creation, a set of default values will be used. - AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to instance pools running on Azure. If not specified // at pool creation, a set of default values will be used. - AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // Additional tags for pool resources. Databricks will tag all pool // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: @@ -1127,7 +1127,7 @@ type CreateInstancePool struct { CustomTags map[string]types.String `tfsdk:"custom_tags" tf:"optional"` // Defines the specification of the disks that will be attached to all spark // containers. - DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional"` + DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional,object"` // Autoscaling Local Storage: when enabled, this instances in this pool will // dynamically acquire additional disk space when its Spark workers are // running low on disk space. In AWS, this feature requires specific AWS @@ -1136,7 +1136,7 @@ type CreateInstancePool struct { EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk" tf:"optional"` // Attributes related to instance pools running on Google Cloud Platform. If // not specified at pool creation, a set of default values will be used. - GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // Automatically terminates the extra instances in the pool cache after they // are inactive for this time in minutes if min_idle_instances requirement // is already met. If not set, the extra pool instances will be @@ -1314,7 +1314,7 @@ type DiskSpec struct { DiskThroughput types.Int64 `tfsdk:"disk_throughput" tf:"optional"` // The type of disks that will be launched with this cluster. - DiskType []DiskType `tfsdk:"disk_type" tf:"optional"` + DiskType []DiskType `tfsdk:"disk_type" tf:"optional,object"` } type DiskType struct { @@ -1331,7 +1331,7 @@ type DockerBasicAuth struct { } type DockerImage struct { - BasicAuth []DockerBasicAuth `tfsdk:"basic_auth" tf:"optional"` + BasicAuth []DockerBasicAuth `tfsdk:"basic_auth" tf:"optional,object"` // URL of the docker image. Url types.String `tfsdk:"url" tf:"optional"` } @@ -1344,7 +1344,7 @@ type EditCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -1353,10 +1353,10 @@ type EditCluster struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // ID of the cluser ClusterId types.String `tfsdk:"cluster_id" tf:""` // The configuration for delivering spark logs to a long-term storage @@ -1365,7 +1365,7 @@ type EditCluster struct { // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -1402,7 +1402,7 @@ type EditCluster struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional,object"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -1420,7 +1420,7 @@ type EditCluster struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -1487,7 +1487,7 @@ type EditCluster struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } type EditClusterResponse struct { @@ -1612,11 +1612,11 @@ type Environment struct { type EventDetails struct { // * For created clusters, the attributes of the cluster. * For edited // clusters, the new attributes of the cluster. - Attributes []ClusterAttributes `tfsdk:"attributes" tf:"optional"` + Attributes []ClusterAttributes `tfsdk:"attributes" tf:"optional,object"` // The cause of a change in target size. Cause types.String `tfsdk:"cause" tf:"optional"` // The actual cluster size that was set in the cluster creation or edit. - ClusterSize []ClusterSize `tfsdk:"cluster_size" tf:"optional"` + ClusterSize []ClusterSize `tfsdk:"cluster_size" tf:"optional,object"` // The current number of vCPUs in the cluster. CurrentNumVcpus types.Int64 `tfsdk:"current_num_vcpus" tf:"optional"` // The current number of nodes in the cluster. @@ -1634,7 +1634,7 @@ type EventDetails struct { FreeSpace types.Int64 `tfsdk:"free_space" tf:"optional"` // List of global and cluster init scripts associated with this cluster // event. - InitScripts []InitScriptEventDetails `tfsdk:"init_scripts" tf:"optional"` + InitScripts []InitScriptEventDetails `tfsdk:"init_scripts" tf:"optional,object"` // Instance Id where the event originated from InstanceId types.String `tfsdk:"instance_id" tf:"optional"` // Unique identifier of the specific job run associated with this cluster @@ -1642,15 +1642,15 @@ type EventDetails struct { // cluster name JobRunName types.String `tfsdk:"job_run_name" tf:"optional"` // The cluster attributes before a cluster was edited. - PreviousAttributes []ClusterAttributes `tfsdk:"previous_attributes" tf:"optional"` + PreviousAttributes []ClusterAttributes `tfsdk:"previous_attributes" tf:"optional,object"` // The size of the cluster before an edit or resize. - PreviousClusterSize []ClusterSize `tfsdk:"previous_cluster_size" tf:"optional"` + PreviousClusterSize []ClusterSize `tfsdk:"previous_cluster_size" tf:"optional,object"` // Previous disk size in bytes PreviousDiskSize types.Int64 `tfsdk:"previous_disk_size" tf:"optional"` // A termination reason: * On a TERMINATED event, this is the reason of the // termination. * On a RESIZE_COMPLETE event, this indicates the reason that // we failed to acquire some nodes. - Reason []TerminationReason `tfsdk:"reason" tf:"optional"` + Reason []TerminationReason `tfsdk:"reason" tf:"optional,object"` // The targeted number of vCPUs in the cluster. TargetNumVcpus types.Int64 `tfsdk:"target_num_vcpus" tf:"optional"` // The targeted number of nodes in the cluster. @@ -1791,7 +1791,7 @@ type GetEventsResponse struct { Events []ClusterEvent `tfsdk:"events" tf:"optional"` // The parameters required to retrieve the next page of events. Omitted if // there are no more events to read. - NextPage []GetEvents `tfsdk:"next_page" tf:"optional"` + NextPage []GetEvents `tfsdk:"next_page" tf:"optional,object"` // The total number of events filtered by the start_time, end_time, and // event_types. TotalCount types.Int64 `tfsdk:"total_count" tf:"optional"` @@ -1806,10 +1806,10 @@ type GetGlobalInitScriptRequest struct { type GetInstancePool struct { // Attributes related to instance pools running on Amazon Web Services. If // not specified at pool creation, a set of default values will be used. - AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to instance pools running on Azure. If not specified // at pool creation, a set of default values will be used. - AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // Additional tags for pool resources. Databricks will tag all pool // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: @@ -1829,7 +1829,7 @@ type GetInstancePool struct { DefaultTags map[string]types.String `tfsdk:"default_tags" tf:"optional"` // Defines the specification of the disks that will be attached to all spark // containers. - DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional"` + DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional,object"` // Autoscaling Local Storage: when enabled, this instances in this pool will // dynamically acquire additional disk space when its Spark workers are // running low on disk space. In AWS, this feature requires specific AWS @@ -1838,7 +1838,7 @@ type GetInstancePool struct { EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk" tf:"optional"` // Attributes related to instance pools running on Google Cloud Platform. If // not specified at pool creation, a set of default values will be used. - GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // Automatically terminates the extra instances in the pool cache after they // are inactive for this time in minutes if min_idle_instances requirement // is already met. If not set, the extra pool instances will be @@ -1874,9 +1874,9 @@ type GetInstancePool struct { // Current state of the instance pool. State types.String `tfsdk:"state" tf:"optional"` // Usage statistics about the instance pool. - Stats []InstancePoolStats `tfsdk:"stats" tf:"optional"` + Stats []InstancePoolStats `tfsdk:"stats" tf:"optional,object"` // Status of failed pending instances in the pool. - Status []InstancePoolStatus `tfsdk:"status" tf:"optional"` + Status []InstancePoolStatus `tfsdk:"status" tf:"optional,object"` } // Get instance pool permission levels @@ -2029,35 +2029,35 @@ type InitScriptInfo struct { // destination needs to be provided. e.g. `{ "abfss" : { "destination" : // "abfss://@.dfs.core.windows.net/" // } } - Abfss []Adlsgen2Info `tfsdk:"abfss" tf:"optional"` + Abfss []Adlsgen2Info `tfsdk:"abfss" tf:"optional,object"` // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : // "dbfs:/home/cluster_log" } }` - Dbfs []DbfsStorageInfo `tfsdk:"dbfs" tf:"optional"` + Dbfs []DbfsStorageInfo `tfsdk:"dbfs" tf:"optional,object"` // destination needs to be provided. e.g. `{ "file" : { "destination" : // "file:/my/local/file.sh" } }` - File []LocalFileInfo `tfsdk:"file" tf:"optional"` + File []LocalFileInfo `tfsdk:"file" tf:"optional,object"` // destination needs to be provided. e.g. `{ "gcs": { "destination": // "gs://my-bucket/file.sh" } }` - Gcs []GcsStorageInfo `tfsdk:"gcs" tf:"optional"` + Gcs []GcsStorageInfo `tfsdk:"gcs" tf:"optional,object"` // destination and either the region or endpoint need to be provided. e.g. // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : // "us-west-2" } }` Cluster iam role is used to access s3, please make sure // the cluster iam role in `instance_profile_arn` has permission to write // data to the s3 destination. - S3 []S3StorageInfo `tfsdk:"s3" tf:"optional"` + S3 []S3StorageInfo `tfsdk:"s3" tf:"optional,object"` // destination needs to be provided. e.g. `{ "volumes" : { "destination" : // "/Volumes/my-init.sh" } }` - Volumes []VolumesStorageInfo `tfsdk:"volumes" tf:"optional"` + Volumes []VolumesStorageInfo `tfsdk:"volumes" tf:"optional,object"` // destination needs to be provided. e.g. `{ "workspace" : { "destination" : // "/Users/user1@databricks.com/my-init.sh" } }` - Workspace []WorkspaceStorageInfo `tfsdk:"workspace" tf:"optional"` + Workspace []WorkspaceStorageInfo `tfsdk:"workspace" tf:"optional,object"` } type InitScriptInfoAndExecutionDetails struct { // Details about the script - ExecutionDetails []InitScriptExecutionDetails `tfsdk:"execution_details" tf:"optional"` + ExecutionDetails []InitScriptExecutionDetails `tfsdk:"execution_details" tf:"optional,object"` // The script - Script []InitScriptInfo `tfsdk:"script" tf:"optional"` + Script []InitScriptInfo `tfsdk:"script" tf:"optional,object"` } type InstallLibraries struct { @@ -2097,10 +2097,10 @@ type InstancePoolAccessControlResponse struct { type InstancePoolAndStats struct { // Attributes related to instance pools running on Amazon Web Services. If // not specified at pool creation, a set of default values will be used. - AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to instance pools running on Azure. If not specified // at pool creation, a set of default values will be used. - AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // Additional tags for pool resources. Databricks will tag all pool // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: @@ -2120,7 +2120,7 @@ type InstancePoolAndStats struct { DefaultTags map[string]types.String `tfsdk:"default_tags" tf:"optional"` // Defines the specification of the disks that will be attached to all spark // containers. - DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional"` + DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional,object"` // Autoscaling Local Storage: when enabled, this instances in this pool will // dynamically acquire additional disk space when its Spark workers are // running low on disk space. In AWS, this feature requires specific AWS @@ -2129,7 +2129,7 @@ type InstancePoolAndStats struct { EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk" tf:"optional"` // Attributes related to instance pools running on Google Cloud Platform. If // not specified at pool creation, a set of default values will be used. - GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // Automatically terminates the extra instances in the pool cache after they // are inactive for this time in minutes if min_idle_instances requirement // is already met. If not set, the extra pool instances will be @@ -2165,9 +2165,9 @@ type InstancePoolAndStats struct { // Current state of the instance pool. State types.String `tfsdk:"state" tf:"optional"` // Usage statistics about the instance pool. - Stats []InstancePoolStats `tfsdk:"stats" tf:"optional"` + Stats []InstancePoolStats `tfsdk:"stats" tf:"optional,object"` // Status of failed pending instances in the pool. - Status []InstancePoolStatus `tfsdk:"status" tf:"optional"` + Status []InstancePoolStatus `tfsdk:"status" tf:"optional,object"` } type InstancePoolAwsAttributes struct { @@ -2314,7 +2314,7 @@ type InstanceProfile struct { type Library struct { // Specification of a CRAN library to be installed as part of the library - Cran []RCranLibrary `tfsdk:"cran" tf:"optional"` + Cran []RCranLibrary `tfsdk:"cran" tf:"optional,object"` // Deprecated. URI of the egg library to install. Installing Python egg // files is deprecated and is not supported in Databricks Runtime 14.0 and // above. @@ -2329,10 +2329,10 @@ type Library struct { Jar types.String `tfsdk:"jar" tf:"optional"` // Specification of a maven library to be installed. For example: `{ // "coordinates": "org.jsoup:jsoup:1.7.2" }` - Maven []MavenLibrary `tfsdk:"maven" tf:"optional"` + Maven []MavenLibrary `tfsdk:"maven" tf:"optional,object"` // Specification of a PyPi library to be installed. For example: `{ // "package": "simplejson" }` - Pypi []PythonPyPiLibrary `tfsdk:"pypi" tf:"optional"` + Pypi []PythonPyPiLibrary `tfsdk:"pypi" tf:"optional,object"` // URI of the requirements.txt file to install. Only Workspace paths and // Unity Catalog Volumes paths are supported. For example: `{ // "requirements": "/Workspace/path/to/requirements.txt" }` or `{ @@ -2354,7 +2354,7 @@ type LibraryFullStatus struct { // libraries UI. IsLibraryForAllClusters types.Bool `tfsdk:"is_library_for_all_clusters" tf:"optional"` // Unique identifier for the library. - Library []Library `tfsdk:"library" tf:"optional"` + Library []Library `tfsdk:"library" tf:"optional,object"` // All the info and warning messages that have occurred so far for this // library. Messages []types.String `tfsdk:"messages" tf:"optional"` @@ -2568,9 +2568,9 @@ type NodeType struct { // Memory (in MB) available for this node type. MemoryMb types.Int64 `tfsdk:"memory_mb" tf:""` - NodeInfo []CloudProviderNodeInfo `tfsdk:"node_info" tf:"optional"` + NodeInfo []CloudProviderNodeInfo `tfsdk:"node_info" tf:"optional,object"` - NodeInstanceType []NodeInstanceType `tfsdk:"node_instance_type" tf:"optional"` + NodeInstanceType []NodeInstanceType `tfsdk:"node_instance_type" tf:"optional,object"` // Unique identifier for this node type. NodeTypeId types.String `tfsdk:"node_type_id" tf:""` // Number of CPU cores available for this node type. Note that this can be @@ -2709,7 +2709,7 @@ type ResizeCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // The cluster to be resized. ClusterId types.String `tfsdk:"cluster_id" tf:""` // Number of worker nodes that this cluster should have. A cluster has one @@ -2801,7 +2801,7 @@ type SparkNode struct { // Globally unique identifier for the host instance from the cloud provider. InstanceId types.String `tfsdk:"instance_id" tf:"optional"` // Attributes specific to AWS for a Spark node. - NodeAwsAttributes []SparkNodeAwsAttributes `tfsdk:"node_aws_attributes" tf:"optional"` + NodeAwsAttributes []SparkNodeAwsAttributes `tfsdk:"node_aws_attributes" tf:"optional,object"` // Globally unique identifier for this node. NodeId types.String `tfsdk:"node_id" tf:"optional"` // Private IP address (typically a 10.x.x.x address) of the Spark node. Note @@ -2877,7 +2877,7 @@ type UnpinClusterResponse struct { type UpdateCluster struct { // The cluster to be updated. - Cluster []UpdateClusterResource `tfsdk:"cluster" tf:"optional"` + Cluster []UpdateClusterResource `tfsdk:"cluster" tf:"optional,object"` // ID of the cluster. ClusterId types.String `tfsdk:"cluster_id" tf:""` // Specifies which fields of the cluster will be updated. This is required @@ -2892,7 +2892,7 @@ type UpdateClusterResource struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -2901,17 +2901,17 @@ type UpdateClusterResource struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only // one destination can be specified for one cluster. If the conf is given, // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -2948,7 +2948,7 @@ type UpdateClusterResource struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional,object"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -2966,7 +2966,7 @@ type UpdateClusterResource struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -3033,7 +3033,7 @@ type UpdateClusterResource struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } type UpdateClusterResponse struct { @@ -3049,7 +3049,7 @@ type VolumesStorageInfo struct { type WorkloadType struct { // defined what type of clients can use the cluster. E.g. Notebooks, Jobs - Clients []ClientsTypes `tfsdk:"clients" tf:""` + Clients []ClientsTypes `tfsdk:"clients" tf:"object"` } type WorkspaceStorageInfo struct { diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index 876787f5d..d0035a99d 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -37,7 +37,7 @@ type CreateDashboardRequest struct { type CreateScheduleRequest struct { // The cron expression describing the frequency of the periodic refresh for // this schedule. - CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:""` + CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` // UUID identifying the dashboard to which the schedule belongs. DashboardId types.String `tfsdk:"-"` // The display name for schedule. @@ -53,7 +53,7 @@ type CreateSubscriptionRequest struct { ScheduleId types.String `tfsdk:"-"` // Subscriber details for users and destinations to be added as subscribers // to the schedule. - Subscriber []Subscriber `tfsdk:"subscriber" tf:""` + Subscriber []Subscriber `tfsdk:"subscriber" tf:"object"` } type CronSchedule struct { @@ -147,9 +147,9 @@ type ExecuteMessageQueryRequest struct { // Genie AI Response type GenieAttachment struct { - Query []QueryAttachment `tfsdk:"query" tf:"optional"` + Query []QueryAttachment `tfsdk:"query" tf:"optional,object"` - Text []TextAttachment `tfsdk:"text" tf:"optional"` + Text []TextAttachment `tfsdk:"text" tf:"optional,object"` } type GenieConversation struct { @@ -201,7 +201,7 @@ type GenieGetMessageQueryResultRequest struct { type GenieGetMessageQueryResultResponse struct { // SQL Statement Execution response. See [Get status, manifest, and result // first chunk](:method:statementexecution/getstatement) for more details. - StatementResponse sql.StatementResponse `tfsdk:"statement_response" tf:"optional"` + StatementResponse sql.StatementResponse `tfsdk:"statement_response" tf:"optional,object"` } type GenieMessage struct { @@ -214,13 +214,13 @@ type GenieMessage struct { // Timestamp when the message was created CreatedTimestamp types.Int64 `tfsdk:"created_timestamp" tf:"optional"` // Error message if AI failed to respond to the message - Error []MessageError `tfsdk:"error" tf:"optional"` + Error []MessageError `tfsdk:"error" tf:"optional,object"` // Message ID Id types.String `tfsdk:"id" tf:""` // Timestamp when the message was last updated LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp" tf:"optional"` // The result of SQL query if the message has a query attachment - QueryResult []Result `tfsdk:"query_result" tf:"optional"` + QueryResult []Result `tfsdk:"query_result" tf:"optional,object"` // Genie space ID SpaceId types.String `tfsdk:"space_id" tf:""` // MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching @@ -253,11 +253,11 @@ type GenieStartConversationMessageRequest struct { } type GenieStartConversationResponse struct { - Conversation []GenieConversation `tfsdk:"conversation" tf:"optional"` + Conversation []GenieConversation `tfsdk:"conversation" tf:"optional,object"` // Conversation ID ConversationId types.String `tfsdk:"conversation_id" tf:""` - Message []GenieMessage `tfsdk:"message" tf:"optional"` + Message []GenieMessage `tfsdk:"message" tf:"optional,object"` // Message ID MessageId types.String `tfsdk:"message_id" tf:""` } @@ -427,7 +427,7 @@ type Schedule struct { CreateTime types.String `tfsdk:"create_time" tf:"optional"` // The cron expression describing the frequency of the periodic refresh for // this schedule. - CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:""` + CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` // UUID identifying the dashboard to which the schedule belongs. DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` // The display name for schedule. @@ -447,10 +447,10 @@ type Schedule struct { type Subscriber struct { // The destination to receive the subscription email. This parameter is // mutually exclusive with `user_subscriber`. - DestinationSubscriber []SubscriptionSubscriberDestination `tfsdk:"destination_subscriber" tf:"optional"` + DestinationSubscriber []SubscriptionSubscriberDestination `tfsdk:"destination_subscriber" tf:"optional,object"` // The user to receive the subscription email. This parameter is mutually // exclusive with `destination_subscriber`. - UserSubscriber []SubscriptionSubscriberUser `tfsdk:"user_subscriber" tf:"optional"` + UserSubscriber []SubscriptionSubscriberUser `tfsdk:"user_subscriber" tf:"optional,object"` } type Subscription struct { @@ -469,7 +469,7 @@ type Subscription struct { ScheduleId types.String `tfsdk:"schedule_id" tf:"optional"` // Subscriber details for users and destinations to be added as subscribers // to the schedule. - Subscriber []Subscriber `tfsdk:"subscriber" tf:""` + Subscriber []Subscriber `tfsdk:"subscriber" tf:"object"` // UUID identifying the subscription. SubscriptionId types.String `tfsdk:"subscription_id" tf:"optional"` // A timestamp indicating when the subscription was last updated. @@ -536,7 +536,7 @@ type UpdateDashboardRequest struct { type UpdateScheduleRequest struct { // The cron expression describing the frequency of the periodic refresh for // this schedule. - CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:""` + CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` // UUID identifying the dashboard to which the schedule belongs. DashboardId types.String `tfsdk:"-"` // The display name for schedule. diff --git a/internal/service/iam_tf/model.go b/internal/service/iam_tf/model.go index a590d0562..7eee54840 100755 --- a/internal/service/iam_tf/model.go +++ b/internal/service/iam_tf/model.go @@ -263,7 +263,7 @@ type Group struct { Members []ComplexValue `tfsdk:"members" tf:"optional"` // Container for the group identifier. Workspace local versus account. - Meta []ResourceMeta `tfsdk:"meta" tf:"optional"` + Meta []ResourceMeta `tfsdk:"meta" tf:"optional,object"` // Corresponds to AWS instance profile/arn role. Roles []ComplexValue `tfsdk:"roles" tf:"optional"` // The schema of the group. @@ -584,7 +584,7 @@ type PermissionAssignment struct { // The permissions level of the principal. Permissions []types.String `tfsdk:"permissions" tf:"optional"` // Information about the principal assigned to the workspace. - Principal []PrincipalOutput `tfsdk:"principal" tf:"optional"` + Principal []PrincipalOutput `tfsdk:"principal" tf:"optional,object"` } type PermissionAssignments struct { @@ -694,7 +694,7 @@ type UpdateRuleSetRequest struct { // Name of the rule set. Name types.String `tfsdk:"name" tf:""` - RuleSet []RuleSetUpdateRequest `tfsdk:"rule_set" tf:""` + RuleSet []RuleSetUpdateRequest `tfsdk:"rule_set" tf:"object"` } type UpdateWorkspaceAssignments struct { @@ -736,7 +736,7 @@ type User struct { // provided by the client will be ignored. Id types.String `tfsdk:"id" tf:"optional"` - Name []Name `tfsdk:"name" tf:"optional"` + Name []Name `tfsdk:"name" tf:"optional,object"` // Corresponds to AWS instance profile/arn role. Roles []ComplexValue `tfsdk:"roles" tf:"optional"` // The schema of the user. diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index 71c3096a4..d2544ac0d 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -26,7 +26,7 @@ type BaseJob struct { JobId types.Int64 `tfsdk:"job_id" tf:"optional"` // Settings for this job and all of its runs. These settings can be updated // using the `resetJob` method. - Settings []JobSettings `tfsdk:"settings" tf:"optional"` + Settings []JobSettings `tfsdk:"settings" tf:"optional,object"` } type BaseRun struct { @@ -47,10 +47,10 @@ type BaseRun struct { // The cluster used for this run. If the run is specified to use a new // cluster, this field is set once the Jobs service has requested a cluster // for the run. - ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional"` + ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional,object"` // A snapshot of the job’s cluster specification when this run was // created. - ClusterSpec []ClusterSpec `tfsdk:"cluster_spec" tf:"optional"` + ClusterSpec []ClusterSpec `tfsdk:"cluster_spec" tf:"optional,object"` // The creator user name. This field won’t be included in the response if // the user has already been deleted. CreatorUserName types.String `tfsdk:"creator_user_name" tf:"optional"` @@ -77,7 +77,7 @@ type BaseRun struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource []GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional,object"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. @@ -98,7 +98,7 @@ type BaseRun struct { // run_id of the original attempt; otherwise, it is the same as the run_id. OriginalAttemptRunId types.Int64 `tfsdk:"original_attempt_run_id" tf:"optional"` // The parameters used for this run. - OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional"` + OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional,object"` // The time in milliseconds that the run has spent in the queue. QueueDuration types.Int64 `tfsdk:"queue_duration" tf:"optional"` // The repair history of the run. @@ -123,7 +123,7 @@ type BaseRun struct { RunType types.String `tfsdk:"run_type" tf:"optional"` // The cron schedule that triggered this run if it was triggered by the // periodic scheduler. - Schedule []CronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []CronSchedule `tfsdk:"schedule" tf:"optional,object"` // The time in milliseconds it took to set up the cluster. For runs that run // on new clusters this is the cluster creation time, for runs that run on // existing clusters this time should be very short. The duration of a task @@ -138,9 +138,9 @@ type BaseRun struct { // new cluster, this is the time the cluster creation call is issued. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // Deprecated. Please use the `status` field instead. - State []RunState `tfsdk:"state" tf:"optional"` + State []RunState `tfsdk:"state" tf:"optional,object"` // The current status of the run - Status []RunStatus `tfsdk:"status" tf:"optional"` + Status []RunStatus `tfsdk:"status" tf:"optional,object"` // The list of tasks performed by the run. Each task has its own `run_id` // which you can use to call `JobsGetOutput` to retrieve the run resutls. Tasks []RunTask `tfsdk:"tasks" tf:"optional"` @@ -156,7 +156,7 @@ type BaseRun struct { // arrival. * `TABLE`: Indicates a run that is triggered by a table update. Trigger types.String `tfsdk:"trigger" tf:"optional"` // Additional details about what triggered the run - TriggerInfo []TriggerInfo `tfsdk:"trigger_info" tf:"optional"` + TriggerInfo []TriggerInfo `tfsdk:"trigger_info" tf:"optional,object"` } type CancelAllRuns struct { @@ -213,7 +213,7 @@ type ClusterSpec struct { Libraries compute.Library `tfsdk:"library" tf:"optional"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` } type ConditionTask struct { @@ -248,9 +248,9 @@ type CreateJob struct { // An optional continuous property for this job. The continuous property // will ensure that there is always one run executing. Only one of // `schedule` and `continuous` can be used. - Continuous []Continuous `tfsdk:"continuous" tf:"optional"` + Continuous []Continuous `tfsdk:"continuous" tf:"optional,object"` // Deployment information for jobs managed by external sources. - Deployment []JobDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []JobDeployment `tfsdk:"deployment" tf:"optional,object"` // An optional description for the job. The maximum length is 27700 // characters in UTF-8 encoding. Description types.String `tfsdk:"description" tf:"optional"` @@ -261,7 +261,7 @@ type CreateJob struct { EditMode types.String `tfsdk:"edit_mode" tf:"optional"` // An optional set of email addresses that is notified when runs of this job // begin or complete as well as when this job is deleted. - EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` // A list of task execution environment specifications that can be // referenced by serverless tasks of this job. An environment is required to // be present for serverless tasks. For serverless notebook tasks, the @@ -283,9 +283,9 @@ type CreateJob struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource []GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional,object"` // An optional set of health rules that can be defined for this job. - Health []JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. @@ -308,22 +308,22 @@ type CreateJob struct { // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // job. - NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` // Job-level parameter definitions Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. - Queue []QueueSettings `tfsdk:"queue" tf:"optional"` + Queue []QueueSettings `tfsdk:"queue" tf:"optional,object"` // Write-only setting. Specifies the user, service principal or group that // the job/pipeline runs as. If not specified, the job/pipeline runs as the // user who created the job/pipeline. // // Exactly one of `user_name`, `service_principal_name`, `group_name` should // be specified. If not, an error is thrown. - RunAs []JobRunAs `tfsdk:"run_as" tf:"optional"` + RunAs []JobRunAs `tfsdk:"run_as" tf:"optional,object"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI // or sending an API request to `runNow`. - Schedule []CronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []CronSchedule `tfsdk:"schedule" tf:"optional,object"` // A map of tags associated with the job. These are forwarded to the cluster // as cluster tags for jobs clusters, and are subject to the same // limitations as cluster tags. A maximum of 25 tags can be added to the @@ -337,10 +337,10 @@ type CreateJob struct { // A configuration to trigger a run when certain conditions are met. The // default behavior is that the job runs only when triggered by clicking // “Run Now” in the Jobs UI or sending an API request to `runNow`. - Trigger []TriggerSettings `tfsdk:"trigger" tf:"optional"` + Trigger []TriggerSettings `tfsdk:"trigger" tf:"optional,object"` // A collection of system notification IDs to notify when runs of this job // begin or complete. - WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } // Job was created successfully @@ -467,7 +467,7 @@ type EnforcePolicyComplianceResponse struct { // clusters. Updated job settings are derived by applying policy default // values to the existing job clusters in order to satisfy policy // requirements. - Settings []JobSettings `tfsdk:"settings" tf:"optional"` + Settings []JobSettings `tfsdk:"settings" tf:"optional,object"` } // Run was exported successfully. @@ -507,7 +507,7 @@ type ForEachStats struct { // Sample of 3 most common error messages occurred during the iteration. ErrorMessageStats []ForEachTaskErrorMessageStats `tfsdk:"error_message_stats" tf:"optional"` // Describes stats of the iteration. Only latest retries are considered. - TaskRunStats []ForEachTaskTaskRunStats `tfsdk:"task_run_stats" tf:"optional"` + TaskRunStats []ForEachTaskTaskRunStats `tfsdk:"task_run_stats" tf:"optional,object"` } type ForEachTask struct { @@ -519,7 +519,7 @@ type ForEachTask struct { // an array parameter. Inputs types.String `tfsdk:"inputs" tf:""` // Configuration for the task that will be run for each element in the array - Task []Task `tfsdk:"task" tf:""` + Task []Task `tfsdk:"task" tf:"object"` } type ForEachTaskErrorMessageStats struct { @@ -644,7 +644,7 @@ type GitSource struct { GitProvider types.String `tfsdk:"git_provider" tf:""` // Read-only state of the remote repository at the time the job was run. // This field is only included on job runs. - GitSnapshot []GitSnapshot `tfsdk:"git_snapshot" tf:"optional"` + GitSnapshot []GitSnapshot `tfsdk:"git_snapshot" tf:"optional,object"` // Name of the tag to be checked out and used by this job. This field cannot // be specified in conjunction with git_branch or git_commit. GitTag types.String `tfsdk:"tag" tf:"optional"` @@ -652,7 +652,7 @@ type GitSource struct { GitUrl types.String `tfsdk:"url" tf:""` // The source of the job specification in the remote repository when the job // is source controlled. - JobSource []JobSource `tfsdk:"job_source" tf:"optional"` + JobSource []JobSource `tfsdk:"job_source" tf:"optional,object"` } // Job was retrieved successfully. @@ -675,7 +675,7 @@ type Job struct { RunAsUserName types.String `tfsdk:"run_as_user_name" tf:"optional"` // Settings for this job and all of its runs. These settings can be updated // using the `resetJob` method. - Settings []JobSettings `tfsdk:"settings" tf:"optional"` + Settings []JobSettings `tfsdk:"settings" tf:"optional,object"` } type JobAccessControlRequest struct { @@ -708,7 +708,7 @@ type JobCluster struct { // determine which cluster to launch for the task execution. JobClusterKey types.String `tfsdk:"job_cluster_key" tf:""` // If new_cluster, a description of a cluster that is created for each task. - NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:""` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"object"` } type JobCompliance struct { @@ -775,7 +775,7 @@ type JobEnvironment struct { // The environment entity used to preserve serverless environment side panel // and jobs' environment for non-notebook task. In this minimal environment // spec, only pip dependencies are supported. - Spec compute.Environment `tfsdk:"spec" tf:"optional"` + Spec compute.Environment `tfsdk:"spec" tf:"optional,object"` } type JobNotificationSettings struct { @@ -851,9 +851,9 @@ type JobSettings struct { // An optional continuous property for this job. The continuous property // will ensure that there is always one run executing. Only one of // `schedule` and `continuous` can be used. - Continuous []Continuous `tfsdk:"continuous" tf:"optional"` + Continuous []Continuous `tfsdk:"continuous" tf:"optional,object"` // Deployment information for jobs managed by external sources. - Deployment []JobDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []JobDeployment `tfsdk:"deployment" tf:"optional,object"` // An optional description for the job. The maximum length is 27700 // characters in UTF-8 encoding. Description types.String `tfsdk:"description" tf:"optional"` @@ -864,7 +864,7 @@ type JobSettings struct { EditMode types.String `tfsdk:"edit_mode" tf:"optional"` // An optional set of email addresses that is notified when runs of this job // begin or complete as well as when this job is deleted. - EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` // A list of task execution environment specifications that can be // referenced by serverless tasks of this job. An environment is required to // be present for serverless tasks. For serverless notebook tasks, the @@ -886,9 +886,9 @@ type JobSettings struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource []GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional,object"` // An optional set of health rules that can be defined for this job. - Health []JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. @@ -911,22 +911,22 @@ type JobSettings struct { // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // job. - NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` // Job-level parameter definitions Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. - Queue []QueueSettings `tfsdk:"queue" tf:"optional"` + Queue []QueueSettings `tfsdk:"queue" tf:"optional,object"` // Write-only setting. Specifies the user, service principal or group that // the job/pipeline runs as. If not specified, the job/pipeline runs as the // user who created the job/pipeline. // // Exactly one of `user_name`, `service_principal_name`, `group_name` should // be specified. If not, an error is thrown. - RunAs []JobRunAs `tfsdk:"run_as" tf:"optional"` + RunAs []JobRunAs `tfsdk:"run_as" tf:"optional,object"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI // or sending an API request to `runNow`. - Schedule []CronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []CronSchedule `tfsdk:"schedule" tf:"optional,object"` // A map of tags associated with the job. These are forwarded to the cluster // as cluster tags for jobs clusters, and are subject to the same // limitations as cluster tags. A maximum of 25 tags can be added to the @@ -940,10 +940,10 @@ type JobSettings struct { // A configuration to trigger a run when certain conditions are met. The // default behavior is that the job runs only when triggered by clicking // “Run Now” in the Jobs UI or sending an API request to `runNow`. - Trigger []TriggerSettings `tfsdk:"trigger" tf:"optional"` + Trigger []TriggerSettings `tfsdk:"trigger" tf:"optional,object"` // A collection of system notification IDs to notify when runs of this job // begin or complete. - WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } // The source of the job specification in the remote repository when the job is @@ -1221,9 +1221,9 @@ type RepairHistoryItem struct { // The start time of the (repaired) run. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // Deprecated. Please use the `status` field instead. - State []RunState `tfsdk:"state" tf:"optional"` + State []RunState `tfsdk:"state" tf:"optional,object"` // The current status of the run - Status []RunStatus `tfsdk:"status" tf:"optional"` + Status []RunStatus `tfsdk:"status" tf:"optional,object"` // The run IDs of the task runs that ran as part of this repair history // item. TaskRunIds []types.Int64 `tfsdk:"task_run_ids" tf:"optional"` @@ -1276,7 +1276,7 @@ type RepairRun struct { // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` // Controls whether the pipeline should perform a full refresh - PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional"` + PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional,object"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` // A list of parameters for jobs with Python tasks, for example @@ -1348,7 +1348,7 @@ type ResetJob struct { // // Changes to the field `JobBaseSettings.timeout_seconds` are applied to // active runs. Changes to other fields are applied to future runs only. - NewSettings []JobSettings `tfsdk:"new_settings" tf:""` + NewSettings []JobSettings `tfsdk:"new_settings" tf:"object"` } type ResetResponse struct { @@ -1389,25 +1389,25 @@ type ResolvedStringParamsValues struct { } type ResolvedValues struct { - ConditionTask []ResolvedConditionTaskValues `tfsdk:"condition_task" tf:"optional"` + ConditionTask []ResolvedConditionTaskValues `tfsdk:"condition_task" tf:"optional,object"` - DbtTask []ResolvedDbtTaskValues `tfsdk:"dbt_task" tf:"optional"` + DbtTask []ResolvedDbtTaskValues `tfsdk:"dbt_task" tf:"optional,object"` - NotebookTask []ResolvedNotebookTaskValues `tfsdk:"notebook_task" tf:"optional"` + NotebookTask []ResolvedNotebookTaskValues `tfsdk:"notebook_task" tf:"optional,object"` - PythonWheelTask []ResolvedPythonWheelTaskValues `tfsdk:"python_wheel_task" tf:"optional"` + PythonWheelTask []ResolvedPythonWheelTaskValues `tfsdk:"python_wheel_task" tf:"optional,object"` - RunJobTask []ResolvedRunJobTaskValues `tfsdk:"run_job_task" tf:"optional"` + RunJobTask []ResolvedRunJobTaskValues `tfsdk:"run_job_task" tf:"optional,object"` - SimulationTask []ResolvedParamPairValues `tfsdk:"simulation_task" tf:"optional"` + SimulationTask []ResolvedParamPairValues `tfsdk:"simulation_task" tf:"optional,object"` - SparkJarTask []ResolvedStringParamsValues `tfsdk:"spark_jar_task" tf:"optional"` + SparkJarTask []ResolvedStringParamsValues `tfsdk:"spark_jar_task" tf:"optional,object"` - SparkPythonTask []ResolvedStringParamsValues `tfsdk:"spark_python_task" tf:"optional"` + SparkPythonTask []ResolvedStringParamsValues `tfsdk:"spark_python_task" tf:"optional,object"` - SparkSubmitTask []ResolvedStringParamsValues `tfsdk:"spark_submit_task" tf:"optional"` + SparkSubmitTask []ResolvedStringParamsValues `tfsdk:"spark_submit_task" tf:"optional,object"` - SqlTask []ResolvedParamPairValues `tfsdk:"sql_task" tf:"optional"` + SqlTask []ResolvedParamPairValues `tfsdk:"sql_task" tf:"optional,object"` } // Run was retrieved successfully @@ -1429,10 +1429,10 @@ type Run struct { // The cluster used for this run. If the run is specified to use a new // cluster, this field is set once the Jobs service has requested a cluster // for the run. - ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional"` + ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional,object"` // A snapshot of the job’s cluster specification when this run was // created. - ClusterSpec []ClusterSpec `tfsdk:"cluster_spec" tf:"optional"` + ClusterSpec []ClusterSpec `tfsdk:"cluster_spec" tf:"optional,object"` // The creator user name. This field won’t be included in the response if // the user has already been deleted. CreatorUserName types.String `tfsdk:"creator_user_name" tf:"optional"` @@ -1459,7 +1459,7 @@ type Run struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource []GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional,object"` // Only populated by for-each iterations. The parent for-each task is // located in tasks array. Iterations []RunTask `tfsdk:"iterations" tf:"optional"` @@ -1485,7 +1485,7 @@ type Run struct { // run_id of the original attempt; otherwise, it is the same as the run_id. OriginalAttemptRunId types.Int64 `tfsdk:"original_attempt_run_id" tf:"optional"` // The parameters used for this run. - OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional"` + OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional,object"` // A token that can be used to list the previous page of sub-resources. PrevPageToken types.String `tfsdk:"prev_page_token" tf:"optional"` // The time in milliseconds that the run has spent in the queue. @@ -1512,7 +1512,7 @@ type Run struct { RunType types.String `tfsdk:"run_type" tf:"optional"` // The cron schedule that triggered this run if it was triggered by the // periodic scheduler. - Schedule []CronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []CronSchedule `tfsdk:"schedule" tf:"optional,object"` // The time in milliseconds it took to set up the cluster. For runs that run // on new clusters this is the cluster creation time, for runs that run on // existing clusters this time should be very short. The duration of a task @@ -1527,9 +1527,9 @@ type Run struct { // new cluster, this is the time the cluster creation call is issued. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // Deprecated. Please use the `status` field instead. - State []RunState `tfsdk:"state" tf:"optional"` + State []RunState `tfsdk:"state" tf:"optional,object"` // The current status of the run - Status []RunStatus `tfsdk:"status" tf:"optional"` + Status []RunStatus `tfsdk:"status" tf:"optional,object"` // The list of tasks performed by the run. Each task has its own `run_id` // which you can use to call `JobsGetOutput` to retrieve the run resutls. Tasks []RunTask `tfsdk:"tasks" tf:"optional"` @@ -1545,7 +1545,7 @@ type Run struct { // arrival. * `TABLE`: Indicates a run that is triggered by a table update. Trigger types.String `tfsdk:"trigger" tf:"optional"` // Additional details about what triggered the run - TriggerInfo []TriggerInfo `tfsdk:"trigger_info" tf:"optional"` + TriggerInfo []TriggerInfo `tfsdk:"trigger_info" tf:"optional,object"` } type RunConditionTask struct { @@ -1581,9 +1581,9 @@ type RunForEachTask struct { Inputs types.String `tfsdk:"inputs" tf:""` // Read only field. Populated for GetRun and ListRuns RPC calls and stores // the execution stats of an For each task - Stats []ForEachStats `tfsdk:"stats" tf:"optional"` + Stats []ForEachStats `tfsdk:"stats" tf:"optional,object"` // Configuration for the task that will be run for each element in the array - Task []Task `tfsdk:"task" tf:""` + Task []Task `tfsdk:"task" tf:"object"` } type RunJobOutput struct { @@ -1632,7 +1632,7 @@ type RunJobTask struct { // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` // Controls whether the pipeline should perform a full refresh - PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional"` + PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional,object"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` // A list of parameters for jobs with Python tasks, for example @@ -1735,7 +1735,7 @@ type RunNow struct { // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` // Controls whether the pipeline should perform a full refresh - PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional"` + PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional,object"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` // A list of parameters for jobs with Python tasks, for example @@ -1757,7 +1757,7 @@ type RunNow struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables PythonParams []types.String `tfsdk:"python_params" tf:"optional"` // The queue settings of the run. - Queue []QueueSettings `tfsdk:"queue" tf:"optional"` + Queue []QueueSettings `tfsdk:"queue" tf:"optional,object"` // A list of parameters for jobs with spark submit task, for example // `"spark_submit_params": ["--class", // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to @@ -1795,7 +1795,7 @@ type RunNowResponse struct { // Run output was retrieved successfully. type RunOutput struct { // The output of a dbt task, if available. - DbtOutput []DbtOutput `tfsdk:"dbt_output" tf:"optional"` + DbtOutput []DbtOutput `tfsdk:"dbt_output" tf:"optional,object"` // An error message indicating why a task failed or why output is not // available. The message is unstructured, and its exact format is subject // to change. @@ -1816,7 +1816,7 @@ type RunOutput struct { // Whether the logs are truncated. LogsTruncated types.Bool `tfsdk:"logs_truncated" tf:"optional"` // All details of the run except for its output. - Metadata []Run `tfsdk:"metadata" tf:"optional"` + Metadata []Run `tfsdk:"metadata" tf:"optional,object"` // The output of a notebook task, if available. A notebook task that // terminates (either successfully or with a failure) without calling // `dbutils.notebook.exit()` is considered to have an empty output. This @@ -1825,11 +1825,11 @@ type RunOutput struct { // the [ClusterLogConf] field to configure log storage for the job cluster. // // [ClusterLogConf]: https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterlogconf - NotebookOutput []NotebookOutput `tfsdk:"notebook_output" tf:"optional"` + NotebookOutput []NotebookOutput `tfsdk:"notebook_output" tf:"optional,object"` // The output of a run job task, if available - RunJobOutput []RunJobOutput `tfsdk:"run_job_output" tf:"optional"` + RunJobOutput []RunJobOutput `tfsdk:"run_job_output" tf:"optional,object"` // The output of a SQL task, if available. - SqlOutput []SqlOutput `tfsdk:"sql_output" tf:"optional"` + SqlOutput []SqlOutput `tfsdk:"sql_output" tf:"optional,object"` } type RunParameters struct { @@ -1869,7 +1869,7 @@ type RunParameters struct { // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` // Controls whether the pipeline should perform a full refresh - PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional"` + PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional,object"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` // A list of parameters for jobs with Python tasks, for example @@ -1936,12 +1936,12 @@ type RunState struct { // The current status of the run type RunStatus struct { // If the run was queued, details about the reason for queuing the run. - QueueDetails []QueueDetails `tfsdk:"queue_details" tf:"optional"` + QueueDetails []QueueDetails `tfsdk:"queue_details" tf:"optional,object"` // The current state of the run. State types.String `tfsdk:"state" tf:"optional"` // If the run is in a TERMINATING or TERMINATED state, details about the // reason for terminating the run. - TerminationDetails []TerminationDetails `tfsdk:"termination_details" tf:"optional"` + TerminationDetails []TerminationDetails `tfsdk:"termination_details" tf:"optional,object"` } // Used when outputting a child run, in GetRun or ListRuns. @@ -1963,15 +1963,15 @@ type RunTask struct { // The cluster used for this run. If the run is specified to use a new // cluster, this field is set once the Jobs service has requested a cluster // for the run. - ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional"` + ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional,object"` // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. - ConditionTask []RunConditionTask `tfsdk:"condition_task" tf:"optional"` + ConditionTask []RunConditionTask `tfsdk:"condition_task" tf:"optional,object"` // If dbt_task, indicates that this must execute a dbt task. It requires // both Databricks SQL and the ability to use a serverless or a pro SQL // warehouse. - DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional"` + DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before // executing this task. The key is `task_key`, and the value is the name @@ -1981,7 +1981,7 @@ type RunTask struct { Description types.String `tfsdk:"description" tf:"optional"` // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. - EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. EndTime types.Int64 `tfsdk:"end_time" tf:"optional"` @@ -2004,7 +2004,7 @@ type RunTask struct { ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` // If for_each_task, indicates that this task must execute the nested task // within it. - ForEachTask []RunForEachTask `tfsdk:"for_each_task" tf:"optional"` + ForEachTask []RunForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by // notebook, dbt, Python script, and SQL File tasks. If `git_source` is set, @@ -2013,7 +2013,7 @@ type RunTask struct { // `WORKSPACE` on the task. Note: dbt and SQL File tasks support only // version-controlled sources. If dbt or SQL File tasks are used, // `git_source` must be defined on the job. - GitSource []GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional,object"` // If job_cluster_key, this task is executed reusing the cluster specified // in `job.settings.job_clusters`. JobClusterKey types.String `tfsdk:"job_cluster_key" tf:"optional"` @@ -2022,22 +2022,22 @@ type RunTask struct { Libraries compute.Library `tfsdk:"library" tf:"optional"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. - NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional"` + NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. - NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` // If pipeline_task, indicates that this task must execute a Pipeline. - PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional"` + PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` // If python_wheel_task, indicates that this job must execute a PythonWheel. - PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional"` + PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // The time in milliseconds that the run has spent in the queue. QueueDuration types.Int64 `tfsdk:"queue_duration" tf:"optional"` // Parameter values including resolved references - ResolvedValues []ResolvedValues `tfsdk:"resolved_values" tf:"optional"` + ResolvedValues []ResolvedValues `tfsdk:"resolved_values" tf:"optional,object"` // The time in milliseconds it took the job run and all of its repairs to // finish. RunDuration types.Int64 `tfsdk:"run_duration" tf:"optional"` @@ -2049,7 +2049,7 @@ type RunTask struct { // possible values. RunIf types.String `tfsdk:"run_if" tf:"optional"` // If run_job_task, indicates that this task must execute another job. - RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional"` + RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` RunPageUrl types.String `tfsdk:"run_page_url" tf:"optional"` // The time in milliseconds it took to set up the cluster. For runs that run @@ -2061,9 +2061,9 @@ type RunTask struct { // `run_duration` field. SetupDuration types.Int64 `tfsdk:"setup_duration" tf:"optional"` // If spark_jar_task, indicates that this task must run a JAR. - SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional"` + SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` // If spark_python_task, indicates that this task must run a Python file. - SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional"` + SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // @@ -2081,18 +2081,18 @@ type RunTask struct { // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. - SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional"` + SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` // If sql_task, indicates that this job must execute a SQL task. - SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional"` + SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // The time at which this run was started in epoch milliseconds // (milliseconds since 1/1/1970 UTC). This may not be the time when the job // task starts executing, for example, if the job is scheduled to run on a // new cluster, this is the time the cluster creation call is issued. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // Deprecated. Please use the `status` field instead. - State []RunState `tfsdk:"state" tf:"optional"` + State []RunState `tfsdk:"state" tf:"optional,object"` // The current status of the run - Status []RunStatus `tfsdk:"status" tf:"optional"` + Status []RunStatus `tfsdk:"status" tf:"optional,object"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be @@ -2104,7 +2104,7 @@ type RunTask struct { // A collection of system notification IDs to notify when the run begins or // completes. The default behavior is to not send any system notifications. // Task webhooks respect the task notification settings. - WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } type SparkJarTask struct { @@ -2191,7 +2191,7 @@ type SqlDashboardWidgetOutput struct { // Time (in epoch milliseconds) when execution of the SQL widget ends. EndTime types.Int64 `tfsdk:"end_time" tf:"optional"` // The information about the error when execution fails. - Error []SqlOutputError `tfsdk:"error" tf:"optional"` + Error []SqlOutputError `tfsdk:"error" tf:"optional,object"` // The link to find the output results. OutputLink types.String `tfsdk:"output_link" tf:"optional"` // Time (in epoch milliseconds) when execution of the SQL widget starts. @@ -2206,11 +2206,11 @@ type SqlDashboardWidgetOutput struct { type SqlOutput struct { // The output of a SQL alert task, if available. - AlertOutput []SqlAlertOutput `tfsdk:"alert_output" tf:"optional"` + AlertOutput []SqlAlertOutput `tfsdk:"alert_output" tf:"optional,object"` // The output of a SQL dashboard task, if available. - DashboardOutput []SqlDashboardOutput `tfsdk:"dashboard_output" tf:"optional"` + DashboardOutput []SqlDashboardOutput `tfsdk:"dashboard_output" tf:"optional,object"` // The output of a SQL query task, if available. - QueryOutput []SqlQueryOutput `tfsdk:"query_output" tf:"optional"` + QueryOutput []SqlQueryOutput `tfsdk:"query_output" tf:"optional,object"` } type SqlOutputError struct { @@ -2238,17 +2238,17 @@ type SqlStatementOutput struct { type SqlTask struct { // If alert, indicates that this job must refresh a SQL alert. - Alert []SqlTaskAlert `tfsdk:"alert" tf:"optional"` + Alert []SqlTaskAlert `tfsdk:"alert" tf:"optional,object"` // If dashboard, indicates that this job must refresh a SQL dashboard. - Dashboard []SqlTaskDashboard `tfsdk:"dashboard" tf:"optional"` + Dashboard []SqlTaskDashboard `tfsdk:"dashboard" tf:"optional,object"` // If file, indicates that this job runs a SQL file in a remote Git // repository. - File []SqlTaskFile `tfsdk:"file" tf:"optional"` + File []SqlTaskFile `tfsdk:"file" tf:"optional,object"` // Parameters to be used for each run of this job. The SQL alert task does // not support custom parameters. Parameters map[string]types.String `tfsdk:"parameters" tf:"optional"` // If query, indicates that this job must execute a SQL query. - Query []SqlTaskQuery `tfsdk:"query" tf:"optional"` + Query []SqlTaskQuery `tfsdk:"query" tf:"optional,object"` // The canonical identifier of the SQL warehouse. Recommended to use with // serverless or pro SQL warehouses. Classic SQL warehouses are only // supported for SQL alert, dashboard and query tasks and are limited to @@ -2314,7 +2314,7 @@ type SubmitRun struct { AccessControlList []JobAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // An optional set of email addresses notified when the run begins or // completes. - EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` // A list of task execution environment specifications that can be // referenced by tasks of this run. Environments []JobEnvironment `tfsdk:"environments" tf:"optional"` @@ -2328,9 +2328,9 @@ type SubmitRun struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource []GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional,object"` // An optional set of health rules that can be defined for this job. - Health []JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` // An optional token that can be used to guarantee the idempotency of job // run requests. If a run with the provided token already exists, the // request does not create a new run but returns the ID of the existing run @@ -2350,12 +2350,12 @@ type SubmitRun struct { // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // run. - NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` // The queue settings of the one-time run. - Queue []QueueSettings `tfsdk:"queue" tf:"optional"` + Queue []QueueSettings `tfsdk:"queue" tf:"optional,object"` // Specifies the user or service principal that the job runs as. If not // specified, the job runs as the user who submits the request. - RunAs []JobRunAs `tfsdk:"run_as" tf:"optional"` + RunAs []JobRunAs `tfsdk:"run_as" tf:"optional,object"` // An optional name for the run. The default value is `Untitled`. RunName types.String `tfsdk:"run_name" tf:"optional"` @@ -2365,7 +2365,7 @@ type SubmitRun struct { TimeoutSeconds types.Int64 `tfsdk:"timeout_seconds" tf:"optional"` // A collection of system notification IDs to notify when the run begins or // completes. - WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } // Run was created and started successfully. @@ -2378,11 +2378,11 @@ type SubmitTask struct { // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. - ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional"` + ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional,object"` // If dbt_task, indicates that this must execute a dbt task. It requires // both Databricks SQL and the ability to use a serverless or a pro SQL // warehouse. - DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional"` + DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before // executing this task. The key is `task_key`, and the value is the name @@ -2392,7 +2392,7 @@ type SubmitTask struct { Description types.String `tfsdk:"description" tf:"optional"` // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. - EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` // The key that references an environment spec in a job. This field is // required for Python script, Python wheel and dbt tasks when using // serverless compute. @@ -2404,37 +2404,37 @@ type SubmitTask struct { ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` // If for_each_task, indicates that this task must execute the nested task // within it. - ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional"` + ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional set of health rules that can be defined for this job. - Health []JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` // An optional list of libraries to be installed on the cluster. The default // value is an empty list. Libraries compute.Library `tfsdk:"library" tf:"optional"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. - NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional"` + NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. - NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` // If pipeline_task, indicates that this task must execute a Pipeline. - PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional"` + PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` // If python_wheel_task, indicates that this job must execute a PythonWheel. - PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional"` + PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // An optional value indicating the condition that determines whether the // task should be run once its dependencies have been completed. When // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf types.String `tfsdk:"run_if" tf:"optional"` // If run_job_task, indicates that this task must execute another job. - RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional"` + RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` // If spark_jar_task, indicates that this task must run a JAR. - SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional"` + SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` // If spark_python_task, indicates that this task must run a Python file. - SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional"` + SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // @@ -2452,9 +2452,9 @@ type SubmitTask struct { // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. - SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional"` + SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` // If sql_task, indicates that this job must execute a SQL task. - SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional"` + SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be @@ -2466,7 +2466,7 @@ type SubmitTask struct { // A collection of system notification IDs to notify when the run begins or // completes. The default behavior is to not send any system notifications. // Task webhooks respect the task notification settings. - WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } type TableUpdateTriggerConfiguration struct { @@ -2490,11 +2490,11 @@ type Task struct { // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. - ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional"` + ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional,object"` // If dbt_task, indicates that this must execute a dbt task. It requires // both Databricks SQL and the ability to use a serverless or a pro SQL // warehouse. - DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional"` + DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete before executing this // task. The task will run only if the `run_if` condition is true. The key @@ -2507,7 +2507,7 @@ type Task struct { // An optional set of email addresses that is notified when runs of this // task begin or complete as well as when this task is deleted. The default // behavior is to not send any emails. - EmailNotifications []TaskEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []TaskEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` // The key that references an environment spec in a job. This field is // required for Python script, Python wheel and dbt tasks when using // serverless compute. @@ -2519,9 +2519,9 @@ type Task struct { ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` // If for_each_task, indicates that this task must execute the nested task // within it. - ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional"` + ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional set of health rules that can be defined for this job. - Health []JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` // If job_cluster_key, this task is executed reusing the cluster specified // in `job.settings.job_clusters`. JobClusterKey types.String `tfsdk:"job_cluster_key" tf:"optional"` @@ -2539,18 +2539,18 @@ type Task struct { MinRetryIntervalMillis types.Int64 `tfsdk:"min_retry_interval_millis" tf:"optional"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. - NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional"` + NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task. - NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` // If pipeline_task, indicates that this task must execute a Pipeline. - PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional"` + PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` // If python_wheel_task, indicates that this job must execute a PythonWheel. - PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional"` + PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // An optional policy to specify whether to retry a job when it times out. // The default behavior is to not retry on timeout. RetryOnTimeout types.Bool `tfsdk:"retry_on_timeout" tf:"optional"` @@ -2565,11 +2565,11 @@ type Task struct { // dependencies have failed RunIf types.String `tfsdk:"run_if" tf:"optional"` // If run_job_task, indicates that this task must execute another job. - RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional"` + RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` // If spark_jar_task, indicates that this task must run a JAR. - SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional"` + SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` // If spark_python_task, indicates that this task must run a Python file. - SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional"` + SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // @@ -2587,9 +2587,9 @@ type Task struct { // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. - SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional"` + SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` // If sql_task, indicates that this job must execute a SQL task. - SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional"` + SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be @@ -2601,7 +2601,7 @@ type Task struct { // A collection of system notification IDs to notify when runs of this task // begin or complete. The default behavior is to not send any system // notifications. - WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } type TaskDependency struct { @@ -2730,15 +2730,15 @@ type TriggerInfo struct { type TriggerSettings struct { // File arrival trigger settings. - FileArrival []FileArrivalTriggerConfiguration `tfsdk:"file_arrival" tf:"optional"` + FileArrival []FileArrivalTriggerConfiguration `tfsdk:"file_arrival" tf:"optional,object"` // Whether this trigger is paused or not. PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` // Periodic trigger settings. - Periodic []PeriodicTriggerConfiguration `tfsdk:"periodic" tf:"optional"` + Periodic []PeriodicTriggerConfiguration `tfsdk:"periodic" tf:"optional,object"` // Old table trigger settings name. Deprecated in favor of `table_update`. - Table []TableUpdateTriggerConfiguration `tfsdk:"table" tf:"optional"` + Table []TableUpdateTriggerConfiguration `tfsdk:"table" tf:"optional,object"` - TableUpdate []TableUpdateTriggerConfiguration `tfsdk:"table_update" tf:"optional"` + TableUpdate []TableUpdateTriggerConfiguration `tfsdk:"table_update" tf:"optional,object"` } type UpdateJob struct { @@ -2759,7 +2759,7 @@ type UpdateJob struct { // // Changes to the field `JobSettings.timeout_seconds` are applied to active // runs. Changes to other fields are applied to future runs only. - NewSettings []JobSettings `tfsdk:"new_settings" tf:"optional"` + NewSettings []JobSettings `tfsdk:"new_settings" tf:"optional,object"` } type UpdateResponse struct { diff --git a/internal/service/marketplace_tf/model.go b/internal/service/marketplace_tf/model.go index 692e1c80d..40648fd8c 100755 --- a/internal/service/marketplace_tf/model.go +++ b/internal/service/marketplace_tf/model.go @@ -21,7 +21,7 @@ type AddExchangeForListingRequest struct { } type AddExchangeForListingResponse struct { - ExchangeForListing []ExchangeListing `tfsdk:"exchange_for_listing" tf:"optional"` + ExchangeForListing []ExchangeListing `tfsdk:"exchange_for_listing" tf:"optional,object"` } // Get one batch of listings. One may specify up to 50 IDs per request. @@ -59,7 +59,7 @@ type ContactInfo struct { } type CreateExchangeFilterRequest struct { - Filter []ExchangeFilter `tfsdk:"filter" tf:""` + Filter []ExchangeFilter `tfsdk:"filter" tf:"object"` } type CreateExchangeFilterResponse struct { @@ -67,7 +67,7 @@ type CreateExchangeFilterResponse struct { } type CreateExchangeRequest struct { - Exchange []Exchange `tfsdk:"exchange" tf:""` + Exchange []Exchange `tfsdk:"exchange" tf:"object"` } type CreateExchangeResponse struct { @@ -77,7 +77,7 @@ type CreateExchangeResponse struct { type CreateFileRequest struct { DisplayName types.String `tfsdk:"display_name" tf:"optional"` - FileParent []FileParent `tfsdk:"file_parent" tf:""` + FileParent []FileParent `tfsdk:"file_parent" tf:"object"` MarketplaceFileType types.String `tfsdk:"marketplace_file_type" tf:""` @@ -85,13 +85,13 @@ type CreateFileRequest struct { } type CreateFileResponse struct { - FileInfo []FileInfo `tfsdk:"file_info" tf:"optional"` + FileInfo []FileInfo `tfsdk:"file_info" tf:"optional,object"` // Pre-signed POST URL to blob storage UploadUrl types.String `tfsdk:"upload_url" tf:"optional"` } type CreateInstallationRequest struct { - AcceptedConsumerTerms []ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:"optional"` + AcceptedConsumerTerms []ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:"optional,object"` CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` @@ -99,13 +99,13 @@ type CreateInstallationRequest struct { RecipientType types.String `tfsdk:"recipient_type" tf:"optional"` // for git repo installations - RepoDetail []RepoInstallation `tfsdk:"repo_detail" tf:"optional"` + RepoDetail []RepoInstallation `tfsdk:"repo_detail" tf:"optional,object"` ShareName types.String `tfsdk:"share_name" tf:"optional"` } type CreateListingRequest struct { - Listing []Listing `tfsdk:"listing" tf:""` + Listing []Listing `tfsdk:"listing" tf:"object"` } type CreateListingResponse struct { @@ -114,7 +114,7 @@ type CreateListingResponse struct { // Data request messages also creates a lead (maybe) type CreatePersonalizationRequest struct { - AcceptedConsumerTerms []ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:""` + AcceptedConsumerTerms []ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:"object"` Comment types.String `tfsdk:"comment" tf:"optional"` @@ -138,7 +138,7 @@ type CreatePersonalizationRequestResponse struct { } type CreateProviderRequest struct { - Provider []ProviderInfo `tfsdk:"provider" tf:""` + Provider []ProviderInfo `tfsdk:"provider" tf:"object"` } type CreateProviderResponse struct { @@ -264,7 +264,7 @@ type FileInfo struct { DownloadLink types.String `tfsdk:"download_link" tf:"optional"` - FileParent []FileParent `tfsdk:"file_parent" tf:"optional"` + FileParent []FileParent `tfsdk:"file_parent" tf:"optional,object"` Id types.String `tfsdk:"id" tf:"optional"` @@ -292,7 +292,7 @@ type GetExchangeRequest struct { } type GetExchangeResponse struct { - Exchange []Exchange `tfsdk:"exchange" tf:"optional"` + Exchange []Exchange `tfsdk:"exchange" tf:"optional,object"` } // Get a file @@ -301,7 +301,7 @@ type GetFileRequest struct { } type GetFileResponse struct { - FileInfo []FileInfo `tfsdk:"file_info" tf:"optional"` + FileInfo []FileInfo `tfsdk:"file_info" tf:"optional,object"` } type GetLatestVersionProviderAnalyticsDashboardResponse struct { @@ -330,7 +330,7 @@ type GetListingRequest struct { } type GetListingResponse struct { - Listing []Listing `tfsdk:"listing" tf:"optional"` + Listing []Listing `tfsdk:"listing" tf:"optional,object"` } // List listings @@ -361,11 +361,11 @@ type GetProviderRequest struct { } type GetProviderResponse struct { - Provider []ProviderInfo `tfsdk:"provider" tf:"optional"` + Provider []ProviderInfo `tfsdk:"provider" tf:"optional,object"` } type Installation struct { - Installation []InstallationDetail `tfsdk:"installation" tf:"optional"` + Installation []InstallationDetail `tfsdk:"installation" tf:"optional,object"` } type InstallationDetail struct { @@ -391,7 +391,7 @@ type InstallationDetail struct { Status types.String `tfsdk:"status" tf:"optional"` - TokenDetail []TokenDetail `tfsdk:"token_detail" tf:"optional"` + TokenDetail []TokenDetail `tfsdk:"token_detail" tf:"optional,object"` Tokens []TokenInfo `tfsdk:"tokens" tf:"optional"` } @@ -578,11 +578,11 @@ type ListProvidersResponse struct { } type Listing struct { - Detail []ListingDetail `tfsdk:"detail" tf:"optional"` + Detail []ListingDetail `tfsdk:"detail" tf:"optional,object"` Id types.String `tfsdk:"id" tf:"optional"` // Next Number: 26 - Summary []ListingSummary `tfsdk:"summary" tf:""` + Summary []ListingSummary `tfsdk:"summary" tf:"object"` } type ListingDetail struct { @@ -594,7 +594,7 @@ type ListingDetail struct { // The starting date timestamp for when the data spans CollectionDateStart types.Int64 `tfsdk:"collection_date_start" tf:"optional"` // Smallest unit of time in the dataset - CollectionGranularity []DataRefreshInfo `tfsdk:"collection_granularity" tf:"optional"` + CollectionGranularity []DataRefreshInfo `tfsdk:"collection_granularity" tf:"optional,object"` // Whether the dataset is free or paid Cost types.String `tfsdk:"cost" tf:"optional"` // Where/how the data is sourced @@ -633,7 +633,7 @@ type ListingDetail struct { TermsOfService types.String `tfsdk:"terms_of_service" tf:"optional"` // How often data is updated - UpdateFrequency []DataRefreshInfo `tfsdk:"update_frequency" tf:"optional"` + UpdateFrequency []DataRefreshInfo `tfsdk:"update_frequency" tf:"optional,object"` } type ListingFulfillment struct { @@ -643,9 +643,9 @@ type ListingFulfillment struct { RecipientType types.String `tfsdk:"recipient_type" tf:"optional"` - RepoInfo []RepoInfo `tfsdk:"repo_info" tf:"optional"` + RepoInfo []RepoInfo `tfsdk:"repo_info" tf:"optional,object"` - ShareInfo []ShareInfo `tfsdk:"share_info" tf:"optional"` + ShareInfo []ShareInfo `tfsdk:"share_info" tf:"optional,object"` } type ListingSetting struct { @@ -665,7 +665,7 @@ type ListingSummary struct { ExchangeIds []types.String `tfsdk:"exchange_ids" tf:"optional"` // if a git repo is being created, a listing will be initialized with this // field as opposed to a share - GitRepo []RepoInfo `tfsdk:"git_repo" tf:"optional"` + GitRepo []RepoInfo `tfsdk:"git_repo" tf:"optional,object"` ListingType types.String `tfsdk:"listingType" tf:""` @@ -673,15 +673,15 @@ type ListingSummary struct { ProviderId types.String `tfsdk:"provider_id" tf:"optional"` - ProviderRegion []RegionInfo `tfsdk:"provider_region" tf:"optional"` + ProviderRegion []RegionInfo `tfsdk:"provider_region" tf:"optional,object"` PublishedAt types.Int64 `tfsdk:"published_at" tf:"optional"` PublishedBy types.String `tfsdk:"published_by" tf:"optional"` - Setting []ListingSetting `tfsdk:"setting" tf:"optional"` + Setting []ListingSetting `tfsdk:"setting" tf:"optional,object"` - Share []ShareInfo `tfsdk:"share" tf:"optional"` + Share []ShareInfo `tfsdk:"share" tf:"optional,object"` // Enums Status types.String `tfsdk:"status" tf:"optional"` @@ -705,10 +705,10 @@ type ListingTag struct { type PersonalizationRequest struct { Comment types.String `tfsdk:"comment" tf:"optional"` - ConsumerRegion []RegionInfo `tfsdk:"consumer_region" tf:""` + ConsumerRegion []RegionInfo `tfsdk:"consumer_region" tf:"object"` // contact info for the consumer requesting data or performing a listing // installation - ContactInfo []ContactInfo `tfsdk:"contact_info" tf:"optional"` + ContactInfo []ContactInfo `tfsdk:"contact_info" tf:"optional,object"` CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` @@ -728,7 +728,7 @@ type PersonalizationRequest struct { RecipientType types.String `tfsdk:"recipient_type" tf:"optional"` - Share []ShareInfo `tfsdk:"share" tf:"optional"` + Share []ShareInfo `tfsdk:"share" tf:"optional,object"` Status types.String `tfsdk:"status" tf:"optional"` @@ -870,27 +870,27 @@ type TokenInfo struct { } type UpdateExchangeFilterRequest struct { - Filter []ExchangeFilter `tfsdk:"filter" tf:""` + Filter []ExchangeFilter `tfsdk:"filter" tf:"object"` Id types.String `tfsdk:"-"` } type UpdateExchangeFilterResponse struct { - Filter []ExchangeFilter `tfsdk:"filter" tf:"optional"` + Filter []ExchangeFilter `tfsdk:"filter" tf:"optional,object"` } type UpdateExchangeRequest struct { - Exchange []Exchange `tfsdk:"exchange" tf:""` + Exchange []Exchange `tfsdk:"exchange" tf:"object"` Id types.String `tfsdk:"-"` } type UpdateExchangeResponse struct { - Exchange []Exchange `tfsdk:"exchange" tf:"optional"` + Exchange []Exchange `tfsdk:"exchange" tf:"optional,object"` } type UpdateInstallationRequest struct { - Installation []InstallationDetail `tfsdk:"installation" tf:""` + Installation []InstallationDetail `tfsdk:"installation" tf:"object"` InstallationId types.String `tfsdk:"-"` @@ -900,17 +900,17 @@ type UpdateInstallationRequest struct { } type UpdateInstallationResponse struct { - Installation []InstallationDetail `tfsdk:"installation" tf:"optional"` + Installation []InstallationDetail `tfsdk:"installation" tf:"optional,object"` } type UpdateListingRequest struct { Id types.String `tfsdk:"-"` - Listing []Listing `tfsdk:"listing" tf:""` + Listing []Listing `tfsdk:"listing" tf:"object"` } type UpdateListingResponse struct { - Listing []Listing `tfsdk:"listing" tf:"optional"` + Listing []Listing `tfsdk:"listing" tf:"optional,object"` } type UpdatePersonalizationRequestRequest struct { @@ -920,13 +920,13 @@ type UpdatePersonalizationRequestRequest struct { RequestId types.String `tfsdk:"-"` - Share []ShareInfo `tfsdk:"share" tf:"optional"` + Share []ShareInfo `tfsdk:"share" tf:"optional,object"` Status types.String `tfsdk:"status" tf:""` } type UpdatePersonalizationRequestResponse struct { - Request []PersonalizationRequest `tfsdk:"request" tf:"optional"` + Request []PersonalizationRequest `tfsdk:"request" tf:"optional,object"` } type UpdateProviderAnalyticsDashboardRequest struct { @@ -950,9 +950,9 @@ type UpdateProviderAnalyticsDashboardResponse struct { type UpdateProviderRequest struct { Id types.String `tfsdk:"-"` - Provider []ProviderInfo `tfsdk:"provider" tf:""` + Provider []ProviderInfo `tfsdk:"provider" tf:"object"` } type UpdateProviderResponse struct { - Provider []ProviderInfo `tfsdk:"provider" tf:"optional"` + Provider []ProviderInfo `tfsdk:"provider" tf:"optional,object"` } diff --git a/internal/service/ml_tf/model.go b/internal/service/ml_tf/model.go index 71e0ecbc1..e3e52c78a 100755 --- a/internal/service/ml_tf/model.go +++ b/internal/service/ml_tf/model.go @@ -94,7 +94,7 @@ type ApproveTransitionRequest struct { type ApproveTransitionRequestResponse struct { // Activity recorded for the action. - Activity []Activity `tfsdk:"activity" tf:"optional"` + Activity []Activity `tfsdk:"activity" tf:"optional,object"` } // Comment details. @@ -124,7 +124,7 @@ type CreateComment struct { type CreateCommentResponse struct { // Comment details. - Comment []CommentObject `tfsdk:"comment" tf:"optional"` + Comment []CommentObject `tfsdk:"comment" tf:"optional,object"` } type CreateExperiment struct { @@ -156,7 +156,7 @@ type CreateModelRequest struct { } type CreateModelResponse struct { - RegisteredModel []Model `tfsdk:"registered_model" tf:"optional"` + RegisteredModel []Model `tfsdk:"registered_model" tf:"optional,object"` } type CreateModelVersionRequest struct { @@ -178,7 +178,7 @@ type CreateModelVersionRequest struct { type CreateModelVersionResponse struct { // Return new version number generated for this model in registry. - ModelVersion []ModelVersion `tfsdk:"model_version" tf:"optional"` + ModelVersion []ModelVersion `tfsdk:"model_version" tf:"optional,object"` } type CreateRegistryWebhook struct { @@ -219,9 +219,9 @@ type CreateRegistryWebhook struct { // version be archived. Events []types.String `tfsdk:"events" tf:""` - HttpUrlSpec []HttpUrlSpec `tfsdk:"http_url_spec" tf:"optional"` + HttpUrlSpec []HttpUrlSpec `tfsdk:"http_url_spec" tf:"optional,object"` - JobSpec []JobSpec `tfsdk:"job_spec" tf:"optional"` + JobSpec []JobSpec `tfsdk:"job_spec" tf:"optional,object"` // Name of the model whose events would trigger this webhook. ModelName types.String `tfsdk:"model_name" tf:"optional"` // Enable or disable triggering the webhook, or put the webhook into test @@ -250,7 +250,7 @@ type CreateRun struct { type CreateRunResponse struct { // The newly created run. - Run []Run `tfsdk:"run" tf:"optional"` + Run []Run `tfsdk:"run" tf:"optional,object"` } type CreateTransitionRequest struct { @@ -274,11 +274,11 @@ type CreateTransitionRequest struct { type CreateTransitionRequestResponse struct { // Transition request details. - Request []TransitionRequest `tfsdk:"request" tf:"optional"` + Request []TransitionRequest `tfsdk:"request" tf:"optional,object"` } type CreateWebhookResponse struct { - Webhook []RegistryWebhook `tfsdk:"webhook" tf:"optional"` + Webhook []RegistryWebhook `tfsdk:"webhook" tf:"optional,object"` } type Dataset struct { @@ -306,7 +306,7 @@ type Dataset struct { type DatasetInput struct { // The dataset being used as a Run input. - Dataset []Dataset `tfsdk:"dataset" tf:"optional"` + Dataset []Dataset `tfsdk:"dataset" tf:"optional,object"` // A list of tags for the dataset input, e.g. a “context” tag with value // “training” Tags []InputTag `tfsdk:"tags" tf:"optional"` @@ -562,7 +562,7 @@ type GetExperimentRequest struct { type GetExperimentResponse struct { // Experiment details. - Experiment []Experiment `tfsdk:"experiment" tf:"optional"` + Experiment []Experiment `tfsdk:"experiment" tf:"optional,object"` } // Get history of a given metric within a run @@ -611,7 +611,7 @@ type GetModelRequest struct { } type GetModelResponse struct { - RegisteredModelDatabricks []ModelDatabricks `tfsdk:"registered_model_databricks" tf:"optional"` + RegisteredModelDatabricks []ModelDatabricks `tfsdk:"registered_model_databricks" tf:"optional,object"` } // Get a model version URI @@ -636,7 +636,7 @@ type GetModelVersionRequest struct { } type GetModelVersionResponse struct { - ModelVersion []ModelVersion `tfsdk:"model_version" tf:"optional"` + ModelVersion []ModelVersion `tfsdk:"model_version" tf:"optional,object"` } // Get registered model permission levels @@ -668,7 +668,7 @@ type GetRunRequest struct { type GetRunResponse struct { // Run metadata (name, start time, etc) and data (metrics, params, and // tags). - Run []Run `tfsdk:"run" tf:"optional"` + Run []Run `tfsdk:"run" tf:"optional,object"` } type HttpUrlSpec struct { @@ -1152,11 +1152,11 @@ type RegistryWebhook struct { // version be archived. Events []types.String `tfsdk:"events" tf:"optional"` - HttpUrlSpec []HttpUrlSpecWithoutSecret `tfsdk:"http_url_spec" tf:"optional"` + HttpUrlSpec []HttpUrlSpecWithoutSecret `tfsdk:"http_url_spec" tf:"optional,object"` // Webhook ID Id types.String `tfsdk:"id" tf:"optional"` - JobSpec []JobSpecWithoutSecret `tfsdk:"job_spec" tf:"optional"` + JobSpec []JobSpecWithoutSecret `tfsdk:"job_spec" tf:"optional,object"` // Time of the object at last update, as a Unix timestamp in milliseconds. LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp" tf:"optional"` // Name of the model whose events would trigger this webhook. @@ -1193,7 +1193,7 @@ type RejectTransitionRequest struct { type RejectTransitionRequestResponse struct { // Activity recorded for the action. - Activity []Activity `tfsdk:"activity" tf:"optional"` + Activity []Activity `tfsdk:"activity" tf:"optional,object"` } type RenameModelRequest struct { @@ -1204,7 +1204,7 @@ type RenameModelRequest struct { } type RenameModelResponse struct { - RegisteredModel []Model `tfsdk:"registered_model" tf:"optional"` + RegisteredModel []Model `tfsdk:"registered_model" tf:"optional,object"` } type RestoreExperiment struct { @@ -1242,11 +1242,11 @@ type RestoreRunsResponse struct { type Run struct { // Run data. - Data []RunData `tfsdk:"data" tf:"optional"` + Data []RunData `tfsdk:"data" tf:"optional,object"` // Run metadata. - Info []RunInfo `tfsdk:"info" tf:"optional"` + Info []RunInfo `tfsdk:"info" tf:"optional,object"` // Run inputs. - Inputs []RunInputs `tfsdk:"inputs" tf:"optional"` + Inputs []RunInputs `tfsdk:"inputs" tf:"optional,object"` } type RunData struct { @@ -1497,7 +1497,7 @@ type TestRegistryWebhookRequest struct { type TestRegistryWebhookResponse struct { // Test webhook response object. - Webhook []TestRegistryWebhook `tfsdk:"webhook" tf:"optional"` + Webhook []TestRegistryWebhook `tfsdk:"webhook" tf:"optional,object"` } type TransitionModelVersionStageDatabricks struct { @@ -1546,7 +1546,7 @@ type TransitionRequest struct { } type TransitionStageResponse struct { - ModelVersion []ModelVersionDatabricks `tfsdk:"model_version" tf:"optional"` + ModelVersion []ModelVersionDatabricks `tfsdk:"model_version" tf:"optional,object"` } type UpdateComment struct { @@ -1558,7 +1558,7 @@ type UpdateComment struct { type UpdateCommentResponse struct { // Comment details. - Comment []CommentObject `tfsdk:"comment" tf:"optional"` + Comment []CommentObject `tfsdk:"comment" tf:"optional,object"` } type UpdateExperiment struct { @@ -1632,11 +1632,11 @@ type UpdateRegistryWebhook struct { // version be archived. Events []types.String `tfsdk:"events" tf:"optional"` - HttpUrlSpec []HttpUrlSpec `tfsdk:"http_url_spec" tf:"optional"` + HttpUrlSpec []HttpUrlSpec `tfsdk:"http_url_spec" tf:"optional,object"` // Webhook ID Id types.String `tfsdk:"id" tf:""` - JobSpec []JobSpec `tfsdk:"job_spec" tf:"optional"` + JobSpec []JobSpec `tfsdk:"job_spec" tf:"optional,object"` // Enable or disable triggering the webhook, or put the webhook into test // mode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an // associated event happens. @@ -1662,7 +1662,7 @@ type UpdateRun struct { type UpdateRunResponse struct { // Updated metadata of the run. - RunInfo []RunInfo `tfsdk:"run_info" tf:"optional"` + RunInfo []RunInfo `tfsdk:"run_info" tf:"optional,object"` } type UpdateWebhookResponse struct { diff --git a/internal/service/oauth2_tf/model.go b/internal/service/oauth2_tf/model.go index 3efb4b2b4..f7959bfcf 100755 --- a/internal/service/oauth2_tf/model.go +++ b/internal/service/oauth2_tf/model.go @@ -26,7 +26,7 @@ type CreateCustomAppIntegration struct { // offline_access, openid, profile, email. Scopes []types.String `tfsdk:"scopes" tf:"optional"` // Token access policy - TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } type CreateCustomAppIntegrationOutput struct { @@ -44,7 +44,7 @@ type CreatePublishedAppIntegration struct { // tableau-deskop AppId types.String `tfsdk:"app_id" tf:"optional"` // Token access policy - TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } type CreatePublishedAppIntegrationOutput struct { @@ -128,7 +128,7 @@ type GetCustomAppIntegrationOutput struct { Scopes []types.String `tfsdk:"scopes" tf:"optional"` // Token access policy - TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } // Get OAuth Custom App Integration @@ -155,7 +155,7 @@ type GetPublishedAppIntegrationOutput struct { // Display name of the published OAuth app Name types.String `tfsdk:"name" tf:"optional"` // Token access policy - TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } // Get OAuth Published App Integration @@ -258,7 +258,7 @@ type UpdateCustomAppIntegration struct { // integration RedirectUrls []types.String `tfsdk:"redirect_urls" tf:"optional"` // Token access policy to be updated in the custom OAuth app integration - TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } type UpdateCustomAppIntegrationOutput struct { @@ -267,7 +267,7 @@ type UpdateCustomAppIntegrationOutput struct { type UpdatePublishedAppIntegration struct { IntegrationId types.String `tfsdk:"-"` // Token access policy to be updated in the published OAuth app integration - TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } type UpdatePublishedAppIntegrationOutput struct { diff --git a/internal/service/pipelines_tf/model.go b/internal/service/pipelines_tf/model.go index 68ee17d59..c4ad05458 100755 --- a/internal/service/pipelines_tf/model.go +++ b/internal/service/pipelines_tf/model.go @@ -36,7 +36,7 @@ type CreatePipeline struct { // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous types.Bool `tfsdk:"continuous" tf:"optional"` // Deployment type of this pipeline. - Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional,object"` // Whether the pipeline is in Development mode. Defaults to false. Development types.Bool `tfsdk:"development" tf:"optional"` @@ -44,14 +44,14 @@ type CreatePipeline struct { // Pipeline product edition. Edition types.String `tfsdk:"edition" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. - Filters []Filters `tfsdk:"filters" tf:"optional"` + Filters []Filters `tfsdk:"filters" tf:"optional,object"` // The definition of a gateway pipeline to support CDC. - GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional"` + GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional,object"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` // The configuration for a managed ingestion pipeline. These settings cannot // be used with the 'libraries', 'target' or 'catalog' settings. - IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional"` + IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional,object"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `tfsdk:"libraries" tf:"optional"` // Friendly identifier for this pipeline. @@ -73,12 +73,12 @@ type CreatePipeline struct { // To publish to Unity Catalog, also specify `catalog`. Target types.String `tfsdk:"target" tf:"optional"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. - Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional"` + Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional,object"` } type CreatePipelineResponse struct { // Only returned when dry_run is true. - EffectiveSettings []PipelineSpec `tfsdk:"effective_settings" tf:"optional"` + EffectiveSettings []PipelineSpec `tfsdk:"effective_settings" tf:"optional,object"` // The unique identifier for the newly created pipeline. Only returned when // dry_run is false. PipelineId types.String `tfsdk:"pipeline_id" tf:"optional"` @@ -126,7 +126,7 @@ type EditPipeline struct { // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous types.Bool `tfsdk:"continuous" tf:"optional"` // Deployment type of this pipeline. - Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional,object"` // Whether the pipeline is in Development mode. Defaults to false. Development types.Bool `tfsdk:"development" tf:"optional"` // Pipeline product edition. @@ -136,14 +136,14 @@ type EditPipeline struct { // will fail with a conflict. ExpectedLastModified types.Int64 `tfsdk:"expected_last_modified" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. - Filters []Filters `tfsdk:"filters" tf:"optional"` + Filters []Filters `tfsdk:"filters" tf:"optional,object"` // The definition of a gateway pipeline to support CDC. - GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional"` + GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional,object"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` // The configuration for a managed ingestion pipeline. These settings cannot // be used with the 'libraries', 'target' or 'catalog' settings. - IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional"` + IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional,object"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `tfsdk:"libraries" tf:"optional"` // Friendly identifier for this pipeline. @@ -167,7 +167,7 @@ type EditPipeline struct { // To publish to Unity Catalog, also specify `catalog`. Target types.String `tfsdk:"target" tf:"optional"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. - Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional"` + Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional,object"` } type EditPipelineResponse struct { @@ -238,7 +238,7 @@ type GetPipelineResponse struct { RunAsUserName types.String `tfsdk:"run_as_user_name" tf:"optional"` // The pipeline specification. This field is not returned when called by // `ListPipelines`. - Spec []PipelineSpec `tfsdk:"spec" tf:"optional"` + Spec []PipelineSpec `tfsdk:"spec" tf:"optional,object"` // The pipeline state. State types.String `tfsdk:"state" tf:"optional"` } @@ -253,14 +253,14 @@ type GetUpdateRequest struct { type GetUpdateResponse struct { // The current update info. - Update []UpdateInfo `tfsdk:"update" tf:"optional"` + Update []UpdateInfo `tfsdk:"update" tf:"optional,object"` } type IngestionConfig struct { // Select tables from a specific source schema. - Schema []SchemaSpec `tfsdk:"schema" tf:"optional"` + Schema []SchemaSpec `tfsdk:"schema" tf:"optional,object"` // Select tables from a specific source table. - Table []TableSpec `tfsdk:"table" tf:"optional"` + Table []TableSpec `tfsdk:"table" tf:"optional,object"` } type IngestionGatewayPipelineDefinition struct { @@ -294,7 +294,7 @@ type IngestionPipelineDefinition struct { Objects []IngestionConfig `tfsdk:"objects" tf:"optional"` // Configuration settings to control the ingestion of tables. These settings // are applied to all tables in the pipeline. - TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional"` + TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional,object"` } // List pipeline events @@ -478,20 +478,20 @@ type PipelineCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []PipelineClusterAutoscale `tfsdk:"autoscale" tf:"optional"` + Autoscale []PipelineClusterAutoscale `tfsdk:"autoscale" tf:"optional,object"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes compute.AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes compute.AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes compute.AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes compute.AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // The configuration for delivering spark logs to a long-term storage // destination. Only dbfs destinations are supported. Only one destination // can be specified for one cluster. If the conf is given, the logs will be // delivered to the destination every `5 mins`. The destination of driver // logs is `$destination/$clusterId/driver`, while the destination of // executor logs is `$destination/$clusterId/executor`. - ClusterLogConf compute.ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf compute.ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Additional tags for cluster resources. Databricks will tag all cluster // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: @@ -513,7 +513,7 @@ type PipelineCluster struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes compute.GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes compute.GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -593,7 +593,7 @@ type PipelineDeployment struct { type PipelineEvent struct { // Information about an error captured by the event. - Error []ErrorDetail `tfsdk:"error" tf:"optional"` + Error []ErrorDetail `tfsdk:"error" tf:"optional,object"` // The event type. Should always correspond to the details EventType types.String `tfsdk:"event_type" tf:"optional"` // A time-based, globally unique id. @@ -605,9 +605,9 @@ type PipelineEvent struct { // The display message associated with the event. Message types.String `tfsdk:"message" tf:"optional"` // Describes where the event originates from. - Origin []Origin `tfsdk:"origin" tf:"optional"` + Origin []Origin `tfsdk:"origin" tf:"optional,object"` // A sequencing object to identify and order events. - Sequence []Sequencing `tfsdk:"sequence" tf:"optional"` + Sequence []Sequencing `tfsdk:"sequence" tf:"optional,object"` // The time of the event. Timestamp types.String `tfsdk:"timestamp" tf:"optional"` } @@ -615,14 +615,14 @@ type PipelineEvent struct { type PipelineLibrary struct { // The path to a file that defines a pipeline and is stored in the // Databricks Repos. - File []FileLibrary `tfsdk:"file" tf:"optional"` + File []FileLibrary `tfsdk:"file" tf:"optional,object"` // URI of the jar to be installed. Currently only DBFS is supported. Jar types.String `tfsdk:"jar" tf:"optional"` // Specification of a maven library to be installed. - Maven compute.MavenLibrary `tfsdk:"maven" tf:"optional"` + Maven compute.MavenLibrary `tfsdk:"maven" tf:"optional,object"` // The path to a notebook that defines a pipeline and is stored in the // Databricks workspace. - Notebook []NotebookLibrary `tfsdk:"notebook" tf:"optional"` + Notebook []NotebookLibrary `tfsdk:"notebook" tf:"optional,object"` // URI of the whl to be installed. Whl types.String `tfsdk:"whl" tf:"optional"` } @@ -673,20 +673,20 @@ type PipelineSpec struct { // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous types.Bool `tfsdk:"continuous" tf:"optional"` // Deployment type of this pipeline. - Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional,object"` // Whether the pipeline is in Development mode. Defaults to false. Development types.Bool `tfsdk:"development" tf:"optional"` // Pipeline product edition. Edition types.String `tfsdk:"edition" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. - Filters []Filters `tfsdk:"filters" tf:"optional"` + Filters []Filters `tfsdk:"filters" tf:"optional,object"` // The definition of a gateway pipeline to support CDC. - GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional"` + GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional,object"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` // The configuration for a managed ingestion pipeline. These settings cannot // be used with the 'libraries', 'target' or 'catalog' settings. - IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional"` + IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional,object"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `tfsdk:"libraries" tf:"optional"` // Friendly identifier for this pipeline. @@ -708,7 +708,7 @@ type PipelineSpec struct { // To publish to Unity Catalog, also specify `catalog`. Target types.String `tfsdk:"target" tf:"optional"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. - Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional"` + Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional,object"` } type PipelineStateInfo struct { @@ -733,9 +733,9 @@ type PipelineStateInfo struct { } type PipelineTrigger struct { - Cron []CronTrigger `tfsdk:"cron" tf:"optional"` + Cron []CronTrigger `tfsdk:"cron" tf:"optional,object"` - Manual []ManualTrigger `tfsdk:"manual" tf:"optional"` + Manual []ManualTrigger `tfsdk:"manual" tf:"optional,object"` } type SchemaSpec struct { @@ -753,14 +753,14 @@ type SchemaSpec struct { // Configuration settings to control the ingestion of tables. These settings // are applied to all tables in this schema and override the // table_configuration defined in the IngestionPipelineDefinition object. - TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional"` + TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional,object"` } type Sequencing struct { // A sequence number, unique and increasing within the control plane. ControlPlaneSeqNo types.Int64 `tfsdk:"control_plane_seq_no" tf:"optional"` // the ID assigned by the data plane. - DataPlaneId []DataPlaneId `tfsdk:"data_plane_id" tf:"optional"` + DataPlaneId []DataPlaneId `tfsdk:"data_plane_id" tf:"optional,object"` } type SerializedException struct { @@ -834,7 +834,7 @@ type TableSpec struct { // Configuration settings to control the ingestion of tables. These settings // override the table_configuration defined in the // IngestionPipelineDefinition object and the SchemaSpec. - TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional"` + TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional,object"` } type TableSpecificConfig struct { @@ -854,7 +854,7 @@ type UpdateInfo struct { ClusterId types.String `tfsdk:"cluster_id" tf:"optional"` // The pipeline configuration with system defaults applied where unspecified // by the user. Not returned by ListUpdates. - Config []PipelineSpec `tfsdk:"config" tf:"optional"` + Config []PipelineSpec `tfsdk:"config" tf:"optional,object"` // The time when this update was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // If true, this update will reset all tables before running. diff --git a/internal/service/provisioning_tf/model.go b/internal/service/provisioning_tf/model.go index 8880df8f4..502b80640 100755 --- a/internal/service/provisioning_tf/model.go +++ b/internal/service/provisioning_tf/model.go @@ -15,7 +15,7 @@ import ( ) type AwsCredentials struct { - StsRole []StsRole `tfsdk:"sts_role" tf:"optional"` + StsRole []StsRole `tfsdk:"sts_role" tf:"optional,object"` } type AwsKeyInfo struct { @@ -42,7 +42,7 @@ type AzureWorkspaceInfo struct { // The general workspace configurations that are specific to cloud providers. type CloudResourceContainer struct { // The general workspace configurations that are specific to Google Cloud. - Gcp []CustomerFacingGcpCloudResourceContainer `tfsdk:"gcp" tf:"optional"` + Gcp []CustomerFacingGcpCloudResourceContainer `tfsdk:"gcp" tf:"optional,object"` } type CreateAwsKeyInfo struct { @@ -59,11 +59,11 @@ type CreateAwsKeyInfo struct { } type CreateCredentialAwsCredentials struct { - StsRole []CreateCredentialStsRole `tfsdk:"sts_role" tf:"optional"` + StsRole []CreateCredentialStsRole `tfsdk:"sts_role" tf:"optional,object"` } type CreateCredentialRequest struct { - AwsCredentials []CreateCredentialAwsCredentials `tfsdk:"aws_credentials" tf:""` + AwsCredentials []CreateCredentialAwsCredentials `tfsdk:"aws_credentials" tf:"object"` // The human-readable name of the credential configuration object. CredentialsName types.String `tfsdk:"credentials_name" tf:""` } @@ -74,9 +74,9 @@ type CreateCredentialStsRole struct { } type CreateCustomerManagedKeyRequest struct { - AwsKeyInfo []CreateAwsKeyInfo `tfsdk:"aws_key_info" tf:"optional"` + AwsKeyInfo []CreateAwsKeyInfo `tfsdk:"aws_key_info" tf:"optional,object"` - GcpKeyInfo []CreateGcpKeyInfo `tfsdk:"gcp_key_info" tf:"optional"` + GcpKeyInfo []CreateGcpKeyInfo `tfsdk:"gcp_key_info" tf:"optional,object"` // The cases that the key can be used for. UseCases []types.String `tfsdk:"use_cases" tf:""` } @@ -89,7 +89,7 @@ type CreateGcpKeyInfo struct { type CreateNetworkRequest struct { // The Google Cloud specific information for this network (for example, the // VPC ID, subnet ID, and secondary IP ranges). - GcpNetworkInfo []GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional"` + GcpNetworkInfo []GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional,object"` // The human-readable name of the network configuration. NetworkName types.String `tfsdk:"network_name" tf:""` // IDs of one to five security groups associated with this network. Security @@ -102,7 +102,7 @@ type CreateNetworkRequest struct { // communication from this VPC over [AWS PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ - VpcEndpoints []NetworkVpcEndpoints `tfsdk:"vpc_endpoints" tf:"optional"` + VpcEndpoints []NetworkVpcEndpoints `tfsdk:"vpc_endpoints" tf:"optional,object"` // The ID of the VPC associated with this network. VPC IDs can be used in // multiple network configurations. VpcId types.String `tfsdk:"vpc_id" tf:"optional"` @@ -110,7 +110,7 @@ type CreateNetworkRequest struct { type CreateStorageConfigurationRequest struct { // Root S3 bucket information. - RootBucketInfo []RootBucketInfo `tfsdk:"root_bucket_info" tf:""` + RootBucketInfo []RootBucketInfo `tfsdk:"root_bucket_info" tf:"object"` // The human-readable name of the storage configuration. StorageConfigurationName types.String `tfsdk:"storage_configuration_name" tf:""` } @@ -120,7 +120,7 @@ type CreateVpcEndpointRequest struct { AwsVpcEndpointId types.String `tfsdk:"aws_vpc_endpoint_id" tf:"optional"` // The Google Cloud specific information for this Private Service Connect // endpoint. - GcpVpcEndpointInfo []GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info" tf:"optional"` + GcpVpcEndpointInfo []GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info" tf:"optional,object"` // The AWS region in which this VPC endpoint object exists. Region types.String `tfsdk:"region" tf:"optional"` // The human-readable name of the storage configuration. @@ -135,7 +135,7 @@ type CreateWorkspaceRequest struct { Cloud types.String `tfsdk:"cloud" tf:"optional"` // The general workspace configurations that are specific to cloud // providers. - CloudResourceContainer []CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional"` + CloudResourceContainer []CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional,object"` // ID of the workspace's credential configuration object. CredentialsId types.String `tfsdk:"credentials_id" tf:"optional"` // The custom tags key-value pairing that is attached to this workspace. The @@ -196,9 +196,9 @@ type CreateWorkspaceRequest struct { // for a new workspace]. // // [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html - GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional"` + GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional,object"` // The configurations for the GKE cluster of a Databricks workspace. - GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional"` + GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional,object"` // The Google Cloud region of the workspace data plane in your Google // account. For example, `us-east4`. Location types.String `tfsdk:"location" tf:"optional"` @@ -242,7 +242,7 @@ type Credential struct { // The Databricks account ID that hosts the credential. AccountId types.String `tfsdk:"account_id" tf:"optional"` - AwsCredentials []AwsCredentials `tfsdk:"aws_credentials" tf:"optional"` + AwsCredentials []AwsCredentials `tfsdk:"aws_credentials" tf:"optional,object"` // Time in epoch milliseconds when the credential was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // Databricks credential configuration ID. @@ -262,13 +262,13 @@ type CustomerManagedKey struct { // The Databricks account ID that holds the customer-managed key. AccountId types.String `tfsdk:"account_id" tf:"optional"` - AwsKeyInfo []AwsKeyInfo `tfsdk:"aws_key_info" tf:"optional"` + AwsKeyInfo []AwsKeyInfo `tfsdk:"aws_key_info" tf:"optional,object"` // Time in epoch milliseconds when the customer key was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // ID of the encryption key configuration object. CustomerManagedKeyId types.String `tfsdk:"customer_managed_key_id" tf:"optional"` - GcpKeyInfo []GcpKeyInfo `tfsdk:"gcp_key_info" tf:"optional"` + GcpKeyInfo []GcpKeyInfo `tfsdk:"gcp_key_info" tf:"optional,object"` // The cases that the key can be used for. UseCases []types.String `tfsdk:"use_cases" tf:"optional"` } @@ -466,7 +466,7 @@ type Network struct { ErrorMessages []NetworkHealth `tfsdk:"error_messages" tf:"optional"` // The Google Cloud specific information for this network (for example, the // VPC ID, subnet ID, and secondary IP ranges). - GcpNetworkInfo []GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional"` + GcpNetworkInfo []GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional,object"` // The Databricks network configuration ID. NetworkId types.String `tfsdk:"network_id" tf:"optional"` // The human-readable name of the network configuration. @@ -479,7 +479,7 @@ type Network struct { // communication from this VPC over [AWS PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ - VpcEndpoints []NetworkVpcEndpoints `tfsdk:"vpc_endpoints" tf:"optional"` + VpcEndpoints []NetworkVpcEndpoints `tfsdk:"vpc_endpoints" tf:"optional,object"` // The ID of the VPC associated with this network configuration. VPC IDs can // be used in multiple networks. VpcId types.String `tfsdk:"vpc_id" tf:"optional"` @@ -564,7 +564,7 @@ type StorageConfiguration struct { // Time in epoch milliseconds when the storage configuration was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // Root S3 bucket information. - RootBucketInfo []RootBucketInfo `tfsdk:"root_bucket_info" tf:"optional"` + RootBucketInfo []RootBucketInfo `tfsdk:"root_bucket_info" tf:"optional,object"` // Databricks storage configuration ID. StorageConfigurationId types.String `tfsdk:"storage_configuration_id" tf:"optional"` // The human-readable name of the storage configuration. @@ -668,7 +668,7 @@ type VpcEndpoint struct { AwsVpcEndpointId types.String `tfsdk:"aws_vpc_endpoint_id" tf:"optional"` // The Google Cloud specific information for this Private Service Connect // endpoint. - GcpVpcEndpointInfo []GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info" tf:"optional"` + GcpVpcEndpointInfo []GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info" tf:"optional,object"` // The AWS region in which this VPC endpoint object exists. Region types.String `tfsdk:"region" tf:"optional"` // The current state (such as `available` or `rejected`) of the VPC @@ -696,12 +696,12 @@ type Workspace struct { // The AWS region of the workspace data plane (for example, `us-west-2`). AwsRegion types.String `tfsdk:"aws_region" tf:"optional"` - AzureWorkspaceInfo []AzureWorkspaceInfo `tfsdk:"azure_workspace_info" tf:"optional"` + AzureWorkspaceInfo []AzureWorkspaceInfo `tfsdk:"azure_workspace_info" tf:"optional,object"` // The cloud name. This field always has the value `gcp`. Cloud types.String `tfsdk:"cloud" tf:"optional"` // The general workspace configurations that are specific to cloud // providers. - CloudResourceContainer []CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional"` + CloudResourceContainer []CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional,object"` // Time in epoch milliseconds when the workspace was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // ID of the workspace's credential configuration object. @@ -741,9 +741,9 @@ type Workspace struct { // for a new workspace]. // // [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html - GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional"` + GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional,object"` // The configurations for the GKE cluster of a Databricks workspace. - GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional"` + GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional,object"` // The Google Cloud region of the workspace data plane in your Google // account (for example, `us-east4`). Location types.String `tfsdk:"location" tf:"optional"` diff --git a/internal/service/serving_tf/model.go b/internal/service/serving_tf/model.go index e34dcc103..7e6bdee0a 100755 --- a/internal/service/serving_tf/model.go +++ b/internal/service/serving_tf/model.go @@ -33,17 +33,17 @@ type Ai21LabsConfig struct { type AiGatewayConfig struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. - Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional,object"` // Configuration for payload logging using inference tables. Use these // tables to monitor and audit data being sent to and received from model // APIs and to improve model quality. - InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional,object"` // Configuration for rate limits which can be set to limit endpoint traffic. RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` // Configuration to enable usage tracking using system tables. These tables // allow you to monitor operational usage on endpoints and their associated // costs. - UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` + UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional,object"` } type AiGatewayGuardrailParameters struct { @@ -51,7 +51,7 @@ type AiGatewayGuardrailParameters struct { // decide if the keyword exists in the request or response content. InvalidKeywords []types.String `tfsdk:"invalid_keywords" tf:"optional"` // Configuration for guardrail PII filter. - Pii []AiGatewayGuardrailPiiBehavior `tfsdk:"pii" tf:"optional"` + Pii []AiGatewayGuardrailPiiBehavior `tfsdk:"pii" tf:"optional,object"` // Indicates whether the safety filter is enabled. Safety types.Bool `tfsdk:"safety" tf:"optional"` // The list of allowed topics. Given a chat request, this guardrail flags @@ -71,9 +71,9 @@ type AiGatewayGuardrailPiiBehavior struct { type AiGatewayGuardrails struct { // Configuration for input guardrail filters. - Input []AiGatewayGuardrailParameters `tfsdk:"input" tf:"optional"` + Input []AiGatewayGuardrailParameters `tfsdk:"input" tf:"optional,object"` // Configuration for output guardrail filters. - Output []AiGatewayGuardrailParameters `tfsdk:"output" tf:"optional"` + Output []AiGatewayGuardrailParameters `tfsdk:"output" tf:"optional,object"` } type AiGatewayInferenceTableConfig struct { @@ -178,13 +178,13 @@ type AutoCaptureConfigOutput struct { // The name of the schema in Unity Catalog. SchemaName types.String `tfsdk:"schema_name" tf:"optional"` - State []AutoCaptureState `tfsdk:"state" tf:"optional"` + State []AutoCaptureState `tfsdk:"state" tf:"optional,object"` // The prefix of the table in Unity Catalog. TableNamePrefix types.String `tfsdk:"table_name_prefix" tf:"optional"` } type AutoCaptureState struct { - PayloadTable []PayloadTable `tfsdk:"payload_table" tf:"optional"` + PayloadTable []PayloadTable `tfsdk:"payload_table" tf:"optional,object"` } // Get build logs for a served model @@ -228,9 +228,9 @@ type CohereConfig struct { type CreateServingEndpoint struct { // The AI Gateway configuration for the serving endpoint. NOTE: only // external model endpoints are supported as of now. - AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` + AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional,object"` // The core config of the serving endpoint. - Config []EndpointCoreConfigInput `tfsdk:"config" tf:""` + Config []EndpointCoreConfigInput `tfsdk:"config" tf:"object"` // The name of the serving endpoint. This field is required and must be // unique across a Databricks workspace. An endpoint name can consist of // alphanumeric characters, dashes, and underscores. @@ -293,7 +293,7 @@ type EmbeddingsV1ResponseEmbeddingElement struct { type EndpointCoreConfigInput struct { // Configuration for Inference Tables which automatically logs requests and // responses to Unity Catalog. - AutoCaptureConfig []AutoCaptureConfigInput `tfsdk:"auto_capture_config" tf:"optional"` + AutoCaptureConfig []AutoCaptureConfigInput `tfsdk:"auto_capture_config" tf:"optional,object"` // The name of the serving endpoint to update. This field is required. Name types.String `tfsdk:"-"` // A list of served entities for the endpoint to serve. A serving endpoint @@ -304,13 +304,13 @@ type EndpointCoreConfigInput struct { ServedModels []ServedModelInput `tfsdk:"served_models" tf:"optional"` // The traffic config defining how invocations to the serving endpoint // should be routed. - TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional"` + TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional,object"` } type EndpointCoreConfigOutput struct { // Configuration for Inference Tables which automatically logs requests and // responses to Unity Catalog. - AutoCaptureConfig []AutoCaptureConfigOutput `tfsdk:"auto_capture_config" tf:"optional"` + AutoCaptureConfig []AutoCaptureConfigOutput `tfsdk:"auto_capture_config" tf:"optional,object"` // The config version that the serving endpoint is currently serving. ConfigVersion types.Int64 `tfsdk:"config_version" tf:"optional"` // The list of served entities under the serving endpoint config. @@ -319,7 +319,7 @@ type EndpointCoreConfigOutput struct { // the serving endpoint config. ServedModels []ServedModelOutput `tfsdk:"served_models" tf:"optional"` // The traffic configuration associated with the serving endpoint config. - TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional"` + TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional,object"` } type EndpointCoreConfigSummary struct { @@ -333,7 +333,7 @@ type EndpointCoreConfigSummary struct { type EndpointPendingConfig struct { // Configuration for Inference Tables which automatically logs requests and // responses to Unity Catalog. - AutoCaptureConfig []AutoCaptureConfigOutput `tfsdk:"auto_capture_config" tf:"optional"` + AutoCaptureConfig []AutoCaptureConfigOutput `tfsdk:"auto_capture_config" tf:"optional,object"` // The config version that the serving endpoint is currently serving. ConfigVersion types.Int64 `tfsdk:"config_version" tf:"optional"` // The list of served entities belonging to the last issued update to the @@ -346,7 +346,7 @@ type EndpointPendingConfig struct { StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // The traffic config defining how invocations to the serving endpoint // should be routed. - TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional"` + TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional,object"` } type EndpointState struct { @@ -383,25 +383,25 @@ type ExportMetricsResponse struct { type ExternalModel struct { // AI21Labs Config. Only required if the provider is 'ai21labs'. - Ai21labsConfig []Ai21LabsConfig `tfsdk:"ai21labs_config" tf:"optional"` + Ai21labsConfig []Ai21LabsConfig `tfsdk:"ai21labs_config" tf:"optional,object"` // Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'. - AmazonBedrockConfig []AmazonBedrockConfig `tfsdk:"amazon_bedrock_config" tf:"optional"` + AmazonBedrockConfig []AmazonBedrockConfig `tfsdk:"amazon_bedrock_config" tf:"optional,object"` // Anthropic Config. Only required if the provider is 'anthropic'. - AnthropicConfig []AnthropicConfig `tfsdk:"anthropic_config" tf:"optional"` + AnthropicConfig []AnthropicConfig `tfsdk:"anthropic_config" tf:"optional,object"` // Cohere Config. Only required if the provider is 'cohere'. - CohereConfig []CohereConfig `tfsdk:"cohere_config" tf:"optional"` + CohereConfig []CohereConfig `tfsdk:"cohere_config" tf:"optional,object"` // Databricks Model Serving Config. Only required if the provider is // 'databricks-model-serving'. - DatabricksModelServingConfig []DatabricksModelServingConfig `tfsdk:"databricks_model_serving_config" tf:"optional"` + DatabricksModelServingConfig []DatabricksModelServingConfig `tfsdk:"databricks_model_serving_config" tf:"optional,object"` // Google Cloud Vertex AI Config. Only required if the provider is // 'google-cloud-vertex-ai'. - GoogleCloudVertexAiConfig []GoogleCloudVertexAiConfig `tfsdk:"google_cloud_vertex_ai_config" tf:"optional"` + GoogleCloudVertexAiConfig []GoogleCloudVertexAiConfig `tfsdk:"google_cloud_vertex_ai_config" tf:"optional,object"` // The name of the external model. Name types.String `tfsdk:"name" tf:""` // OpenAI Config. Only required if the provider is 'openai'. - OpenaiConfig []OpenAiConfig `tfsdk:"openai_config" tf:"optional"` + OpenaiConfig []OpenAiConfig `tfsdk:"openai_config" tf:"optional,object"` // PaLM Config. Only required if the provider is 'palm'. - PalmConfig []PaLmConfig `tfsdk:"palm_config" tf:"optional"` + PalmConfig []PaLmConfig `tfsdk:"palm_config" tf:"optional,object"` // The name of the provider for the external model. Currently, the supported // providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', // 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and @@ -513,7 +513,7 @@ type LogsRequest struct { type ModelDataPlaneInfo struct { // Information required to query DataPlane API 'query' endpoint. - QueryInfo oauth2.DataPlaneInfo `tfsdk:"query_info" tf:"optional"` + QueryInfo oauth2.DataPlaneInfo `tfsdk:"query_info" tf:"optional,object"` } type OpenAiConfig struct { @@ -606,11 +606,11 @@ type PayloadTable struct { type PutAiGatewayRequest struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. - Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional,object"` // Configuration for payload logging using inference tables. Use these // tables to monitor and audit data being sent to and received from model // APIs and to improve model quality. - InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional,object"` // The name of the serving endpoint whose AI Gateway is being updated. This // field is required. Name types.String `tfsdk:"-"` @@ -619,23 +619,23 @@ type PutAiGatewayRequest struct { // Configuration to enable usage tracking using system tables. These tables // allow you to monitor operational usage on endpoints and their associated // costs. - UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` + UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional,object"` } type PutAiGatewayResponse struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. - Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional,object"` // Configuration for payload logging using inference tables. Use these // tables to monitor and audit data being sent to and received from model // APIs and to improve model quality . - InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional,object"` // Configuration for rate limits which can be set to limit endpoint traffic. RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` // Configuration to enable usage tracking using system tables. These tables // allow you to monitor operational usage on endpoints and their associated // costs. - UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` + UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional,object"` } // Update rate limits of a serving endpoint @@ -656,7 +656,7 @@ type QueryEndpointInput struct { // Pandas Dataframe input in the records orientation. DataframeRecords []any `tfsdk:"dataframe_records" tf:"optional"` // Pandas Dataframe input in the split orientation. - DataframeSplit []DataframeSplitInput `tfsdk:"dataframe_split" tf:"optional"` + DataframeSplit []DataframeSplitInput `tfsdk:"dataframe_split" tf:"optional,object"` // The extra parameters field used ONLY for __completions, chat,__ and // __embeddings external & foundation model__ serving endpoints. This is a // map of strings and should only be used with other external/foundation @@ -732,7 +732,7 @@ type QueryEndpointResponse struct { // The usage object that may be returned by the __external/foundation // model__ serving endpoint. This contains information about the number of // tokens used in the prompt and response. - Usage []ExternalModelUsageElement `tfsdk:"usage" tf:"optional"` + Usage []ExternalModelUsageElement `tfsdk:"usage" tf:"optional,object"` } type RateLimit struct { @@ -781,7 +781,7 @@ type ServedEntityInput struct { // endpoint without external_model. If the endpoint is created without // external_model, users cannot update it to add external_model later. The // task type of all external models within an endpoint must be the same. - ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional"` + ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional,object"` // ARN of the instance profile that the served entity uses to access AWS // resources. InstanceProfileArn types.String `tfsdk:"instance_profile_arn" tf:"optional"` @@ -842,12 +842,12 @@ type ServedEntityOutput struct { // foundation_model, and (entity_name, entity_version, workload_size, // workload_type, and scale_to_zero_enabled) is returned based on the // endpoint type. - ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional"` + ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional,object"` // The foundation model that is served. NOTE: Only one of foundation_model, // external_model, and (entity_name, entity_version, workload_size, // workload_type, and scale_to_zero_enabled) is returned based on the // endpoint type. - FoundationModel []FoundationModel `tfsdk:"foundation_model" tf:"optional"` + FoundationModel []FoundationModel `tfsdk:"foundation_model" tf:"optional,object"` // ARN of the instance profile that the served entity uses to access AWS // resources. InstanceProfileArn types.String `tfsdk:"instance_profile_arn" tf:"optional"` @@ -861,7 +861,7 @@ type ServedEntityOutput struct { // zero. ScaleToZeroEnabled types.Bool `tfsdk:"scale_to_zero_enabled" tf:"optional"` // Information corresponding to the state of the served entity. - State []ServedModelState `tfsdk:"state" tf:"optional"` + State []ServedModelState `tfsdk:"state" tf:"optional,object"` // The workload size of the served entity. The workload size corresponds to // a range of provisioned concurrency that the compute autoscales between. A // single unit of provisioned concurrency can process one request at a time. @@ -893,11 +893,11 @@ type ServedEntitySpec struct { // The external model that is served. NOTE: Only one of external_model, // foundation_model, and (entity_name, entity_version) is returned based on // the endpoint type. - ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional"` + ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional,object"` // The foundation model that is served. NOTE: Only one of foundation_model, // external_model, and (entity_name, entity_version) is returned based on // the endpoint type. - FoundationModel []FoundationModel `tfsdk:"foundation_model" tf:"optional"` + FoundationModel []FoundationModel `tfsdk:"foundation_model" tf:"optional,object"` // The name of the served entity. Name types.String `tfsdk:"name" tf:"optional"` } @@ -977,7 +977,7 @@ type ServedModelOutput struct { // zero. ScaleToZeroEnabled types.Bool `tfsdk:"scale_to_zero_enabled" tf:"optional"` // Information corresponding to the state of the Served Model. - State []ServedModelState `tfsdk:"state" tf:"optional"` + State []ServedModelState `tfsdk:"state" tf:"optional,object"` // The workload size of the served model. The workload size corresponds to a // range of provisioned concurrency that the compute will autoscale between. // A single unit of provisioned concurrency can process one request at a @@ -1034,9 +1034,9 @@ type ServerLogsResponse struct { type ServingEndpoint struct { // The AI Gateway configuration for the serving endpoint. NOTE: Only // external model endpoints are currently supported. - AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` + AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional,object"` // The config that is currently being served by the endpoint. - Config []EndpointCoreConfigSummary `tfsdk:"config" tf:"optional"` + Config []EndpointCoreConfigSummary `tfsdk:"config" tf:"optional,object"` // The timestamp when the endpoint was created in Unix time. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` // The email of the user who created the serving endpoint. @@ -1049,7 +1049,7 @@ type ServingEndpoint struct { // The name of the serving endpoint. Name types.String `tfsdk:"name" tf:"optional"` // Information corresponding to the state of the serving endpoint. - State []EndpointState `tfsdk:"state" tf:"optional"` + State []EndpointState `tfsdk:"state" tf:"optional,object"` // Tags attached to the serving endpoint. Tags []EndpointTag `tfsdk:"tags" tf:"optional"` // The task type of the serving endpoint. @@ -1083,15 +1083,15 @@ type ServingEndpointAccessControlResponse struct { type ServingEndpointDetailed struct { // The AI Gateway configuration for the serving endpoint. NOTE: Only // external model endpoints are currently supported. - AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` + AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional,object"` // The config that is currently being served by the endpoint. - Config []EndpointCoreConfigOutput `tfsdk:"config" tf:"optional"` + Config []EndpointCoreConfigOutput `tfsdk:"config" tf:"optional,object"` // The timestamp when the endpoint was created in Unix time. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` // The email of the user who created the serving endpoint. Creator types.String `tfsdk:"creator" tf:"optional"` // Information required to query DataPlane APIs. - DataPlaneInfo []ModelDataPlaneInfo `tfsdk:"data_plane_info" tf:"optional"` + DataPlaneInfo []ModelDataPlaneInfo `tfsdk:"data_plane_info" tf:"optional,object"` // Endpoint invocation url if route optimization is enabled for endpoint EndpointUrl types.String `tfsdk:"endpoint_url" tf:"optional"` // System-generated ID of the endpoint. This is used to refer to the @@ -1102,14 +1102,14 @@ type ServingEndpointDetailed struct { // The name of the serving endpoint. Name types.String `tfsdk:"name" tf:"optional"` // The config that the endpoint is attempting to update to. - PendingConfig []EndpointPendingConfig `tfsdk:"pending_config" tf:"optional"` + PendingConfig []EndpointPendingConfig `tfsdk:"pending_config" tf:"optional,object"` // The permission level of the principal making the request. PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` // Boolean representing if route optimization has been enabled for the // endpoint RouteOptimized types.Bool `tfsdk:"route_optimized" tf:"optional"` // Information corresponding to the state of the serving endpoint. - State []EndpointState `tfsdk:"state" tf:"optional"` + State []EndpointState `tfsdk:"state" tf:"optional,object"` // Tags attached to the serving endpoint. Tags []EndpointTag `tfsdk:"tags" tf:"optional"` // The task type of the serving endpoint. @@ -1157,7 +1157,7 @@ type V1ResponseChoiceElement struct { // The logprobs returned only by the __completions__ endpoint. Logprobs types.Int64 `tfsdk:"logprobs" tf:"optional"` // The message response from the __chat__ endpoint. - Message []ChatMessage `tfsdk:"message" tf:"optional"` + Message []ChatMessage `tfsdk:"message" tf:"optional,object"` // The text response from the __completions__ endpoint. Text types.String `tfsdk:"text" tf:"optional"` } diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index 1ee6dcc0a..a3fad58cb 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -15,7 +15,7 @@ import ( ) type AutomaticClusterUpdateSetting struct { - AutomaticClusterUpdateWorkspace []ClusterAutoRestartMessage `tfsdk:"automatic_cluster_update_workspace" tf:""` + AutomaticClusterUpdateWorkspace []ClusterAutoRestartMessage `tfsdk:"automatic_cluster_update_workspace" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -46,9 +46,9 @@ type ClusterAutoRestartMessage struct { // intended to use only for purposes like showing an error message to the // customer with the additional details. For example, using these details we // can check why exactly the feature is disabled for this customer. - EnablementDetails []ClusterAutoRestartMessageEnablementDetails `tfsdk:"enablement_details" tf:"optional"` + EnablementDetails []ClusterAutoRestartMessageEnablementDetails `tfsdk:"enablement_details" tf:"optional,object"` - MaintenanceWindow []ClusterAutoRestartMessageMaintenanceWindow `tfsdk:"maintenance_window" tf:"optional"` + MaintenanceWindow []ClusterAutoRestartMessageMaintenanceWindow `tfsdk:"maintenance_window" tf:"optional,object"` RestartEvenIfNoUpdatesAvailable types.Bool `tfsdk:"restart_even_if_no_updates_available" tf:"optional"` } @@ -70,7 +70,7 @@ type ClusterAutoRestartMessageEnablementDetails struct { } type ClusterAutoRestartMessageMaintenanceWindow struct { - WeekDayBasedSchedule []ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule `tfsdk:"week_day_based_schedule" tf:"optional"` + WeekDayBasedSchedule []ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule `tfsdk:"week_day_based_schedule" tf:"optional,object"` } type ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule struct { @@ -78,7 +78,7 @@ type ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule struct { Frequency types.String `tfsdk:"frequency" tf:"optional"` - WindowStartTime []ClusterAutoRestartMessageMaintenanceWindowWindowStartTime `tfsdk:"window_start_time" tf:"optional"` + WindowStartTime []ClusterAutoRestartMessageMaintenanceWindowWindowStartTime `tfsdk:"window_start_time" tf:"optional,object"` } type ClusterAutoRestartMessageMaintenanceWindowWindowStartTime struct { @@ -97,7 +97,7 @@ type ComplianceSecurityProfile struct { type ComplianceSecurityProfileSetting struct { // SHIELD feature: CSP - ComplianceSecurityProfileWorkspace []ComplianceSecurityProfile `tfsdk:"compliance_security_profile_workspace" tf:""` + ComplianceSecurityProfileWorkspace []ComplianceSecurityProfile `tfsdk:"compliance_security_profile_workspace" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -115,15 +115,15 @@ type ComplianceSecurityProfileSetting struct { } type Config struct { - Email []EmailConfig `tfsdk:"email" tf:"optional"` + Email []EmailConfig `tfsdk:"email" tf:"optional,object"` - GenericWebhook []GenericWebhookConfig `tfsdk:"generic_webhook" tf:"optional"` + GenericWebhook []GenericWebhookConfig `tfsdk:"generic_webhook" tf:"optional,object"` - MicrosoftTeams []MicrosoftTeamsConfig `tfsdk:"microsoft_teams" tf:"optional"` + MicrosoftTeams []MicrosoftTeamsConfig `tfsdk:"microsoft_teams" tf:"optional,object"` - Pagerduty []PagerdutyConfig `tfsdk:"pagerduty" tf:"optional"` + Pagerduty []PagerdutyConfig `tfsdk:"pagerduty" tf:"optional,object"` - Slack []SlackConfig `tfsdk:"slack" tf:"optional"` + Slack []SlackConfig `tfsdk:"slack" tf:"optional,object"` } // Details required to configure a block list or allow list. @@ -143,7 +143,7 @@ type CreateIpAccessList struct { // An IP access list was successfully created. type CreateIpAccessListResponse struct { // Definition of an IP Access list - IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional,object"` } type CreateNetworkConnectivityConfigRequest struct { @@ -161,7 +161,7 @@ type CreateNetworkConnectivityConfigRequest struct { type CreateNotificationDestinationRequest struct { // The configuration for the notification destination. Must wrap EXACTLY one // of the nested configs. - Config []Config `tfsdk:"config" tf:"optional"` + Config []Config `tfsdk:"config" tf:"optional,object"` // The display name for the notification destination. DisplayName types.String `tfsdk:"display_name" tf:"optional"` } @@ -178,7 +178,7 @@ type CreateOboTokenRequest struct { // An on-behalf token was successfully created for the service principal. type CreateOboTokenResponse struct { - TokenInfo []TokenInfo `tfsdk:"token_info" tf:"optional"` + TokenInfo []TokenInfo `tfsdk:"token_info" tf:"optional,object"` // Value of the token. TokenValue types.String `tfsdk:"token_value" tf:"optional"` } @@ -205,7 +205,7 @@ type CreateTokenRequest struct { type CreateTokenResponse struct { // The information for the new token. - TokenInfo []PublicTokenInfo `tfsdk:"token_info" tf:"optional"` + TokenInfo []PublicTokenInfo `tfsdk:"token_info" tf:"optional,object"` // The value of the new token. TokenValue types.String `tfsdk:"token_value" tf:"optional"` } @@ -221,7 +221,7 @@ type CspEnablementAccount struct { type CspEnablementAccountSetting struct { // Account level policy for CSP - CspEnablementAccount []CspEnablementAccount `tfsdk:"csp_enablement_account" tf:""` + CspEnablementAccount []CspEnablementAccount `tfsdk:"csp_enablement_account" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -257,7 +257,7 @@ type DefaultNamespaceSetting struct { // PATCH request to identify the setting version you are updating. Etag types.String `tfsdk:"etag" tf:"optional"` - Namespace []StringMessage `tfsdk:"namespace" tf:""` + Namespace []StringMessage `tfsdk:"namespace" tf:"object"` // Name of the corresponding setting. This field is populated in the // response, but it will not be respected even if it's set in the request // body. The setting name in the path parameter will be respected instead. @@ -430,7 +430,7 @@ type DeleteTokenManagementRequest struct { } type DisableLegacyAccess struct { - DisableLegacyAccess []BooleanMessage `tfsdk:"disable_legacy_access" tf:""` + DisableLegacyAccess []BooleanMessage `tfsdk:"disable_legacy_access" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -448,7 +448,7 @@ type DisableLegacyAccess struct { } type DisableLegacyFeatures struct { - DisableLegacyFeatures []BooleanMessage `tfsdk:"disable_legacy_features" tf:""` + DisableLegacyFeatures []BooleanMessage `tfsdk:"disable_legacy_features" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -480,7 +480,7 @@ type EnhancedSecurityMonitoring struct { type EnhancedSecurityMonitoringSetting struct { // SHIELD feature: ESM - EnhancedSecurityMonitoringWorkspace []EnhancedSecurityMonitoring `tfsdk:"enhanced_security_monitoring_workspace" tf:""` + EnhancedSecurityMonitoringWorkspace []EnhancedSecurityMonitoring `tfsdk:"enhanced_security_monitoring_workspace" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -504,7 +504,7 @@ type EsmEnablementAccount struct { type EsmEnablementAccountSetting struct { // Account level policy for ESM - EsmEnablementAccount []EsmEnablementAccount `tfsdk:"esm_enablement_account" tf:""` + EsmEnablementAccount []EsmEnablementAccount `tfsdk:"esm_enablement_account" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -539,7 +539,7 @@ type ExchangeToken struct { // Exchange a token with the IdP type ExchangeTokenRequest struct { // The partition of Credentials store - PartitionId []PartitionId `tfsdk:"partitionId" tf:""` + PartitionId []PartitionId `tfsdk:"partitionId" tf:"object"` // Array of scopes for the token request. Scopes []types.String `tfsdk:"scopes" tf:""` // A list of token types being requested @@ -554,7 +554,7 @@ type ExchangeTokenResponse struct { // An IP access list was successfully returned. type FetchIpAccessListResponse struct { // Definition of an IP Access list - IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional,object"` } type GenericWebhookConfig struct { @@ -682,7 +682,7 @@ type GetIpAccessListRequest struct { type GetIpAccessListResponse struct { // Definition of an IP Access list - IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional,object"` } // IP access lists were successfully returned. @@ -751,7 +751,7 @@ type GetTokenPermissionLevelsResponse struct { // Token with specified Token ID was successfully returned. type GetTokenResponse struct { - TokenInfo []TokenInfo `tfsdk:"token_info" tf:"optional"` + TokenInfo []TokenInfo `tfsdk:"token_info" tf:"optional,object"` } // Definition of an IP Access list @@ -931,10 +931,10 @@ type NccEgressConfig struct { // The network connectivity rules that are applied by default without // resource specific configurations. You can find the stable network // information of your serverless compute resources here. - DefaultRules []NccEgressDefaultRules `tfsdk:"default_rules" tf:"optional"` + DefaultRules []NccEgressDefaultRules `tfsdk:"default_rules" tf:"optional,object"` // The network connectivity rules that configured for each destinations. // These rules override default rules. - TargetRules []NccEgressTargetRules `tfsdk:"target_rules" tf:"optional"` + TargetRules []NccEgressTargetRules `tfsdk:"target_rules" tf:"optional,object"` } // The network connectivity rules that are applied by default without resource @@ -944,11 +944,11 @@ type NccEgressDefaultRules struct { // The stable AWS IP CIDR blocks. You can use these to configure the // firewall of your resources to allow traffic from your Databricks // workspace. - AwsStableIpRule []NccAwsStableIpRule `tfsdk:"aws_stable_ip_rule" tf:"optional"` + AwsStableIpRule []NccAwsStableIpRule `tfsdk:"aws_stable_ip_rule" tf:"optional,object"` // The stable Azure service endpoints. You can configure the firewall of // your Azure resources to allow traffic from your Databricks serverless // compute resources. - AzureServiceEndpointRule []NccAzureServiceEndpointRule `tfsdk:"azure_service_endpoint_rule" tf:"optional"` + AzureServiceEndpointRule []NccAzureServiceEndpointRule `tfsdk:"azure_service_endpoint_rule" tf:"optional,object"` } // The network connectivity rules that configured for each destinations. These @@ -964,7 +964,7 @@ type NetworkConnectivityConfiguration struct { CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // The network connectivity rules that apply to network traffic from your // serverless compute resources. - EgressConfig []NccEgressConfig `tfsdk:"egress_config" tf:"optional"` + EgressConfig []NccEgressConfig `tfsdk:"egress_config" tf:"optional,object"` // The name of the network connectivity configuration. The name can contain // alphanumeric characters, hyphens, and underscores. The length must be // between 3 and 30 characters. The name must match the regular expression @@ -984,7 +984,7 @@ type NotificationDestination struct { // The configuration for the notification destination. Will be exactly one // of the nested configs. Only returns for users with workspace admin // permissions. - Config []Config `tfsdk:"config" tf:"optional"` + Config []Config `tfsdk:"config" tf:"optional,object"` // [Output-only] The type of the notification destination. The type can not // be changed once set. DestinationType types.String `tfsdk:"destination_type" tf:"optional"` @@ -1028,7 +1028,7 @@ type PersonalComputeSetting struct { // PATCH request to identify the setting version you are updating. Etag types.String `tfsdk:"etag" tf:"optional"` - PersonalCompute []PersonalComputeMessage `tfsdk:"personal_compute" tf:""` + PersonalCompute []PersonalComputeMessage `tfsdk:"personal_compute" tf:"object"` // Name of the corresponding setting. This field is populated in the // response, but it will not be respected even if it's set in the request // body. The setting name in the path parameter will be respected instead. @@ -1085,7 +1085,7 @@ type RestrictWorkspaceAdminsSetting struct { // PATCH request to identify the setting version you are updating. Etag types.String `tfsdk:"etag" tf:"optional"` - RestrictWorkspaceAdmins []RestrictWorkspaceAdminsMessage `tfsdk:"restrict_workspace_admins" tf:""` + RestrictWorkspaceAdmins []RestrictWorkspaceAdminsMessage `tfsdk:"restrict_workspace_admins" tf:"object"` // Name of the corresponding setting. This field is populated in the // response, but it will not be respected even if it's set in the request // body. The setting name in the path parameter will be respected instead. @@ -1198,7 +1198,7 @@ type UpdateAutomaticClusterUpdateSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []AutomaticClusterUpdateSetting `tfsdk:"setting" tf:""` + Setting []AutomaticClusterUpdateSetting `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1212,7 +1212,7 @@ type UpdateComplianceSecurityProfileSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []ComplianceSecurityProfileSetting `tfsdk:"setting" tf:""` + Setting []ComplianceSecurityProfileSetting `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1226,7 +1226,7 @@ type UpdateCspEnablementAccountSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []CspEnablementAccountSetting `tfsdk:"setting" tf:""` + Setting []CspEnablementAccountSetting `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1248,7 +1248,7 @@ type UpdateDefaultNamespaceSettingRequest struct { // assumed). This setting requires a restart of clusters and SQL warehouses // to take effect. Additionally, the default namespace only applies when // using Unity Catalog-enabled compute. - Setting []DefaultNamespaceSetting `tfsdk:"setting" tf:""` + Setting []DefaultNamespaceSetting `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1262,7 +1262,7 @@ type UpdateDisableLegacyAccessRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []DisableLegacyAccess `tfsdk:"setting" tf:""` + Setting []DisableLegacyAccess `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1276,7 +1276,7 @@ type UpdateDisableLegacyFeaturesRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []DisableLegacyFeatures `tfsdk:"setting" tf:""` + Setting []DisableLegacyFeatures `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1290,7 +1290,7 @@ type UpdateEnhancedSecurityMonitoringSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []EnhancedSecurityMonitoringSetting `tfsdk:"setting" tf:""` + Setting []EnhancedSecurityMonitoringSetting `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1304,7 +1304,7 @@ type UpdateEsmEnablementAccountSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []EsmEnablementAccountSetting `tfsdk:"setting" tf:""` + Setting []EsmEnablementAccountSetting `tfsdk:"setting" tf:"object"` } // Details required to update an IP access list. @@ -1329,7 +1329,7 @@ type UpdateIpAccessList struct { type UpdateNotificationDestinationRequest struct { // The configuration for the notification destination. Must wrap EXACTLY one // of the nested configs. - Config []Config `tfsdk:"config" tf:"optional"` + Config []Config `tfsdk:"config" tf:"optional,object"` // The display name for the notification destination. DisplayName types.String `tfsdk:"display_name" tf:"optional"` @@ -1347,7 +1347,7 @@ type UpdatePersonalComputeSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []PersonalComputeSetting `tfsdk:"setting" tf:""` + Setting []PersonalComputeSetting `tfsdk:"setting" tf:"object"` } type UpdateResponse struct { @@ -1364,5 +1364,5 @@ type UpdateRestrictWorkspaceAdminsSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []RestrictWorkspaceAdminsSetting `tfsdk:"setting" tf:""` + Setting []RestrictWorkspaceAdminsSetting `tfsdk:"setting" tf:"object"` } diff --git a/internal/service/sharing_tf/model.go b/internal/service/sharing_tf/model.go index 210f466b3..d83c38ff9 100755 --- a/internal/service/sharing_tf/model.go +++ b/internal/service/sharing_tf/model.go @@ -22,7 +22,7 @@ type CentralCleanRoomInfo struct { // All collaborators who are in the clean room. Collaborators []CleanRoomCollaboratorInfo `tfsdk:"collaborators" tf:"optional"` // The collaborator who created the clean room. - Creator []CleanRoomCollaboratorInfo `tfsdk:"creator" tf:"optional"` + Creator []CleanRoomCollaboratorInfo `tfsdk:"creator" tf:"optional,object"` // The cloud where clean room tasks will be run. StationCloud types.String `tfsdk:"station_cloud" tf:"optional"` // The region where clean room tasks will be run. @@ -33,11 +33,11 @@ type CleanRoomAssetInfo struct { // Time at which this asset was added, in epoch milliseconds. AddedAt types.Int64 `tfsdk:"added_at" tf:"optional"` // Details about the notebook asset. - NotebookInfo []CleanRoomNotebookInfo `tfsdk:"notebook_info" tf:"optional"` + NotebookInfo []CleanRoomNotebookInfo `tfsdk:"notebook_info" tf:"optional,object"` // The collaborator who owns the asset. - Owner []CleanRoomCollaboratorInfo `tfsdk:"owner" tf:"optional"` + Owner []CleanRoomCollaboratorInfo `tfsdk:"owner" tf:"optional,object"` // Details about the table asset. - TableInfo []CleanRoomTableInfo `tfsdk:"table_info" tf:"optional"` + TableInfo []CleanRoomTableInfo `tfsdk:"table_info" tf:"optional,object"` // Time at which this asset was updated, in epoch milliseconds. UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` } @@ -55,7 +55,7 @@ type CleanRoomCatalogUpdate struct { // The name of the catalog to update assets. CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` // The updates to the assets in the catalog. - Updates []SharedDataObjectUpdate `tfsdk:"updates" tf:"optional"` + Updates []SharedDataObjectUpdate `tfsdk:"updates" tf:"optional,object"` } type CleanRoomCollaboratorInfo struct { @@ -83,7 +83,7 @@ type CleanRoomInfo struct { // Username of current owner of clean room. Owner types.String `tfsdk:"owner" tf:"optional"` // Central clean room details. - RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"optional"` + RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"optional,object"` // Time at which this clean room was updated, in epoch milliseconds. UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` // Username of clean room updater. @@ -115,7 +115,7 @@ type ColumnInfo struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` - Mask []ColumnMask `tfsdk:"mask" tf:"optional"` + Mask []ColumnMask `tfsdk:"mask" tf:"optional,object"` // Name of Column. Name types.String `tfsdk:"name" tf:"optional"` // Whether field may be Null (default: true). @@ -154,7 +154,7 @@ type CreateCleanRoom struct { // Name of the clean room. Name types.String `tfsdk:"name" tf:""` // Central clean room details. - RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:""` + RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"object"` } type CreateProvider struct { @@ -182,13 +182,13 @@ type CreateRecipient struct { // Expiration timestamp of the token, in epoch milliseconds. ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` // IP Access List - IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional,object"` // Name of Recipient. Name types.String `tfsdk:"name" tf:""` // Username of the recipient owner. Owner types.String `tfsdk:"owner" tf:"optional"` // Recipient properties as map of string key-value pairs. - PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional"` + PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional,object"` // The one-time sharing code provided by the data recipient. This field is // required when the __authentication_type__ is **DATABRICKS**. SharingCode types.String `tfsdk:"sharing_code" tf:"optional"` @@ -447,7 +447,7 @@ type ProviderInfo struct { Owner types.String `tfsdk:"owner" tf:"optional"` // The recipient profile. This field is only present when the // authentication_type is `TOKEN`. - RecipientProfile []RecipientProfile `tfsdk:"recipient_profile" tf:"optional"` + RecipientProfile []RecipientProfile `tfsdk:"recipient_profile" tf:"optional,object"` // This field is only present when the authentication_type is `TOKEN` or not // provided. RecipientProfileStr types.String `tfsdk:"recipient_profile_str" tf:"optional"` @@ -489,7 +489,7 @@ type RecipientInfo struct { // __cloud__:__region__:__metastore-uuid__. DataRecipientGlobalMetastoreId types.String `tfsdk:"data_recipient_global_metastore_id" tf:"optional"` // IP Access List - IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional,object"` // Unique identifier of recipient's Unity Catalog metastore. This field is // only present when the __authentication_type__ is **DATABRICKS** MetastoreId types.String `tfsdk:"metastore_id" tf:"optional"` @@ -498,7 +498,7 @@ type RecipientInfo struct { // Username of the recipient owner. Owner types.String `tfsdk:"owner" tf:"optional"` // Recipient properties as map of string key-value pairs. - PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional"` + PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional,object"` // Cloud region of the recipient's Unity Catalog Metstore. This field is // only present when the __authentication_type__ is **DATABRICKS**. Region types.String `tfsdk:"region" tf:"optional"` @@ -676,7 +676,7 @@ type SharedDataObjectUpdate struct { // One of: **ADD**, **REMOVE**, **UPDATE**. Action types.String `tfsdk:"action" tf:"optional"` // The data object that is being added, removed, or updated. - DataObject []SharedDataObject `tfsdk:"data_object" tf:"optional"` + DataObject []SharedDataObject `tfsdk:"data_object" tf:"optional,object"` } type UpdateCleanRoom struct { @@ -713,7 +713,7 @@ type UpdateRecipient struct { // Expiration timestamp of the token, in epoch milliseconds. ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` // IP Access List - IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional,object"` // Name of the recipient. Name types.String `tfsdk:"-"` // New name for the recipient. @@ -724,7 +724,7 @@ type UpdateRecipient struct { // update request, the specified properties will override the existing // properties. To add and remove properties, one would need to perform a // read-modify-write. - PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional"` + PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional,object"` } type UpdateResponse struct { diff --git a/internal/service/sql_tf/model.go b/internal/service/sql_tf/model.go index f7b5567a2..18cf637b8 100755 --- a/internal/service/sql_tf/model.go +++ b/internal/service/sql_tf/model.go @@ -25,7 +25,7 @@ type AccessControl struct { type Alert struct { // Trigger conditions of the alert. - Condition []AlertCondition `tfsdk:"condition" tf:"optional"` + Condition []AlertCondition `tfsdk:"condition" tf:"optional,object"` // The timestamp indicating when the alert was created. CreateTime types.String `tfsdk:"create_time" tf:"optional"` // Custom body of alert notification, if it exists. See [here] for custom @@ -74,17 +74,17 @@ type AlertCondition struct { Op types.String `tfsdk:"op" tf:"optional"` // Name of the column from the query result to use for comparison in alert // evaluation. - Operand []AlertConditionOperand `tfsdk:"operand" tf:"optional"` + Operand []AlertConditionOperand `tfsdk:"operand" tf:"optional,object"` // Threshold value used for comparison in alert evaluation. - Threshold []AlertConditionThreshold `tfsdk:"threshold" tf:"optional"` + Threshold []AlertConditionThreshold `tfsdk:"threshold" tf:"optional,object"` } type AlertConditionOperand struct { - Column []AlertOperandColumn `tfsdk:"column" tf:"optional"` + Column []AlertOperandColumn `tfsdk:"column" tf:"optional,object"` } type AlertConditionThreshold struct { - Value []AlertOperandValue `tfsdk:"value" tf:"optional"` + Value []AlertOperandValue `tfsdk:"value" tf:"optional,object"` } type AlertOperandColumn struct { @@ -158,7 +158,7 @@ type AlertQuery struct { // on the query page. Name types.String `tfsdk:"name" tf:"optional"` - Options []QueryOptions `tfsdk:"options" tf:"optional"` + Options []QueryOptions `tfsdk:"options" tf:"optional,object"` // The text of the query to be run. Query types.String `tfsdk:"query" tf:"optional"` @@ -234,7 +234,7 @@ type CreateAlert struct { // Name of the alert. Name types.String `tfsdk:"name" tf:""` // Alert configuration options. - Options []AlertOptions `tfsdk:"options" tf:""` + Options []AlertOptions `tfsdk:"options" tf:"object"` // The identifier of the workspace folder containing the object. Parent types.String `tfsdk:"parent" tf:"optional"` // Query ID. @@ -246,12 +246,12 @@ type CreateAlert struct { } type CreateAlertRequest struct { - Alert []CreateAlertRequestAlert `tfsdk:"alert" tf:"optional"` + Alert []CreateAlertRequestAlert `tfsdk:"alert" tf:"optional,object"` } type CreateAlertRequestAlert struct { // Trigger conditions of the alert. - Condition []AlertCondition `tfsdk:"condition" tf:"optional"` + Condition []AlertCondition `tfsdk:"condition" tf:"optional,object"` // Custom body of alert notification, if it exists. See [here] for custom // templating instructions. // @@ -276,7 +276,7 @@ type CreateAlertRequestAlert struct { } type CreateQueryRequest struct { - Query []CreateQueryRequestQuery `tfsdk:"query" tf:"optional"` + Query []CreateQueryRequestQuery `tfsdk:"query" tf:"optional,object"` } type CreateQueryRequestQuery struct { @@ -325,7 +325,7 @@ type CreateQueryVisualizationsLegacyRequest struct { } type CreateVisualizationRequest struct { - Visualization []CreateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional"` + Visualization []CreateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional,object"` } type CreateVisualizationRequestVisualization struct { @@ -356,7 +356,7 @@ type CreateWarehouseRequest struct { // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` // Channel Details - Channel []Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional,object"` // Size of the clusters allocated for this warehouse. Increasing the size of // a spark cluster allows you to run larger queries on it. If you want to // increase the number of concurrent queries, please tune max_num_clusters. @@ -402,7 +402,7 @@ type CreateWarehouseRequest struct { // instances and EBS volumes) associated with this SQL warehouse. // // Supported values: - Number of tags < 45. - Tags []EndpointTags `tfsdk:"tags" tf:"optional"` + Tags []EndpointTags `tfsdk:"tags" tf:"optional,object"` // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless // compute, you must set to `PRO` and also set the field // `enable_serverless_compute` to `true`. @@ -420,7 +420,7 @@ type CreateWidget struct { // Widget ID returned by :method:dashboardwidgets/create Id types.String `tfsdk:"-"` - Options []WidgetOptions `tfsdk:"options" tf:""` + Options []WidgetOptions `tfsdk:"options" tf:"object"` // If this is a textbox widget, the application displays this text. This // field is ignored if the widget contains a visualization in the // `visualization` field. @@ -459,7 +459,7 @@ type Dashboard struct { // the dashboard page. Name types.String `tfsdk:"name" tf:"optional"` - Options []DashboardOptions `tfsdk:"options" tf:"optional"` + Options []DashboardOptions `tfsdk:"options" tf:"optional,object"` // The identifier of the workspace folder containing the object. Parent types.String `tfsdk:"parent" tf:"optional"` // * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * @@ -473,7 +473,7 @@ type Dashboard struct { // Timestamp when this dashboard was last updated. UpdatedAt types.String `tfsdk:"updated_at" tf:"optional"` - User []User `tfsdk:"user" tf:"optional"` + User []User `tfsdk:"user" tf:"optional,object"` // The ID of the user who owns the dashboard. UserId types.Int64 `tfsdk:"user_id" tf:"optional"` @@ -555,7 +555,7 @@ type DateRange struct { type DateRangeValue struct { // Manually specified date-time range value. - DateRangeValue []DateRange `tfsdk:"date_range_value" tf:"optional"` + DateRangeValue []DateRange `tfsdk:"date_range_value" tf:"optional,object"` // Dynamic date-time range value based on current date-time. DynamicDateRangeValue types.String `tfsdk:"dynamic_date_range_value" tf:"optional"` // Date-time precision to format the value into when the query is run. @@ -624,7 +624,7 @@ type EditAlert struct { // Name of the alert. Name types.String `tfsdk:"name" tf:""` // Alert configuration options. - Options []AlertOptions `tfsdk:"options" tf:""` + Options []AlertOptions `tfsdk:"options" tf:"object"` // Query ID. QueryId types.String `tfsdk:"query_id" tf:""` // Number of seconds after being triggered before the alert rearms itself @@ -642,7 +642,7 @@ type EditWarehouseRequest struct { // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` // Channel Details - Channel []Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional,object"` // Size of the clusters allocated for this warehouse. Increasing the size of // a spark cluster allows you to run larger queries on it. If you want to // increase the number of concurrent queries, please tune max_num_clusters. @@ -690,7 +690,7 @@ type EditWarehouseRequest struct { // instances and EBS volumes) associated with this SQL warehouse. // // Supported values: - Number of tags < 45. - Tags []EndpointTags `tfsdk:"tags" tf:"optional"` + Tags []EndpointTags `tfsdk:"tags" tf:"optional,object"` // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless // compute, you must set to `PRO` and also set the field // `enable_serverless_compute` to `true`. @@ -716,7 +716,7 @@ type EndpointHealth struct { Details types.String `tfsdk:"details" tf:"optional"` // The reason for failure to bring up clusters for this warehouse. This is // available when status is 'FAILED' and sometimes when it is DEGRADED. - FailureReason []TerminationReason `tfsdk:"failure_reason" tf:"optional"` + FailureReason []TerminationReason `tfsdk:"failure_reason" tf:"optional,object"` // Deprecated. split into summary and details for security Message types.String `tfsdk:"message" tf:"optional"` // Health status of the warehouse. @@ -735,7 +735,7 @@ type EndpointInfo struct { // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` // Channel Details - Channel []Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional,object"` // Size of the clusters allocated for this warehouse. Increasing the size of // a spark cluster allows you to run larger queries on it. If you want to // increase the number of concurrent queries, please tune max_num_clusters. @@ -753,7 +753,7 @@ type EndpointInfo struct { EnableServerlessCompute types.Bool `tfsdk:"enable_serverless_compute" tf:"optional"` // Optional health status. Assume the warehouse is healthy if this field is // not set. - Health []EndpointHealth `tfsdk:"health" tf:"optional"` + Health []EndpointHealth `tfsdk:"health" tf:"optional,object"` // unique identifier for warehouse Id types.String `tfsdk:"id" tf:"optional"` // Deprecated. Instance profile used to pass IAM role to the cluster @@ -787,7 +787,7 @@ type EndpointInfo struct { // current number of clusters running for the service NumClusters types.Int64 `tfsdk:"num_clusters" tf:"optional"` // ODBC parameters for the SQL warehouse - OdbcParams []OdbcParams `tfsdk:"odbc_params" tf:"optional"` + OdbcParams []OdbcParams `tfsdk:"odbc_params" tf:"optional,object"` // Configurations whether the warehouse should use spot instances. SpotInstancePolicy types.String `tfsdk:"spot_instance_policy" tf:"optional"` // State of the warehouse @@ -796,7 +796,7 @@ type EndpointInfo struct { // instances and EBS volumes) associated with this SQL warehouse. // // Supported values: - Number of tags < 45. - Tags []EndpointTags `tfsdk:"tags" tf:"optional"` + Tags []EndpointTags `tfsdk:"tags" tf:"optional,object"` // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless // compute, you must set to `PRO` and also set the field // `enable_serverless_compute` to `true`. @@ -817,7 +817,7 @@ type EnumValue struct { // List of valid query parameter values, newline delimited. EnumOptions types.String `tfsdk:"enum_options" tf:"optional"` // If specified, allows multiple values to be selected for this parameter. - MultiValuesOptions []MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional"` + MultiValuesOptions []MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional,object"` // List of selected query parameter values. Values []types.String `tfsdk:"values" tf:"optional"` } @@ -1069,7 +1069,7 @@ type GetWarehouseResponse struct { // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` // Channel Details - Channel []Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional,object"` // Size of the clusters allocated for this warehouse. Increasing the size of // a spark cluster allows you to run larger queries on it. If you want to // increase the number of concurrent queries, please tune max_num_clusters. @@ -1087,7 +1087,7 @@ type GetWarehouseResponse struct { EnableServerlessCompute types.Bool `tfsdk:"enable_serverless_compute" tf:"optional"` // Optional health status. Assume the warehouse is healthy if this field is // not set. - Health []EndpointHealth `tfsdk:"health" tf:"optional"` + Health []EndpointHealth `tfsdk:"health" tf:"optional,object"` // unique identifier for warehouse Id types.String `tfsdk:"id" tf:"optional"` // Deprecated. Instance profile used to pass IAM role to the cluster @@ -1121,7 +1121,7 @@ type GetWarehouseResponse struct { // current number of clusters running for the service NumClusters types.Int64 `tfsdk:"num_clusters" tf:"optional"` // ODBC parameters for the SQL warehouse - OdbcParams []OdbcParams `tfsdk:"odbc_params" tf:"optional"` + OdbcParams []OdbcParams `tfsdk:"odbc_params" tf:"optional,object"` // Configurations whether the warehouse should use spot instances. SpotInstancePolicy types.String `tfsdk:"spot_instance_policy" tf:"optional"` // State of the warehouse @@ -1130,7 +1130,7 @@ type GetWarehouseResponse struct { // instances and EBS volumes) associated with this SQL warehouse. // // Supported values: - Number of tags < 45. - Tags []EndpointTags `tfsdk:"tags" tf:"optional"` + Tags []EndpointTags `tfsdk:"tags" tf:"optional,object"` // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless // compute, you must set to `PRO` and also set the field // `enable_serverless_compute` to `true`. @@ -1139,9 +1139,9 @@ type GetWarehouseResponse struct { type GetWorkspaceWarehouseConfigResponse struct { // Optional: Channel selection details - Channel []Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional,object"` // Deprecated: Use sql_configuration_parameters - ConfigParam []RepeatedEndpointConfPairs `tfsdk:"config_param" tf:"optional"` + ConfigParam []RepeatedEndpointConfPairs `tfsdk:"config_param" tf:"optional,object"` // Spark confs for external hive metastore configuration JSON serialized // size must be less than <= 512K DataAccessConfig []EndpointConfPair `tfsdk:"data_access_config" tf:"optional"` @@ -1153,7 +1153,7 @@ type GetWorkspaceWarehouseConfigResponse struct { // specific type availability in the warehouse create and edit form UI. EnabledWarehouseTypes []WarehouseTypePair `tfsdk:"enabled_warehouse_types" tf:"optional"` // Deprecated: Use sql_configuration_parameters - GlobalParam []RepeatedEndpointConfPairs `tfsdk:"global_param" tf:"optional"` + GlobalParam []RepeatedEndpointConfPairs `tfsdk:"global_param" tf:"optional,object"` // GCP only: Google Service Account used to pass to cluster to access Google // Cloud Storage GoogleServiceAccount types.String `tfsdk:"google_service_account" tf:"optional"` @@ -1162,7 +1162,7 @@ type GetWorkspaceWarehouseConfigResponse struct { // Security policy for warehouses SecurityPolicy types.String `tfsdk:"security_policy" tf:"optional"` // SQL configuration parameters - SqlConfigurationParameters []RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional"` + SqlConfigurationParameters []RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional,object"` } type LegacyAlert struct { @@ -1175,11 +1175,11 @@ type LegacyAlert struct { // Name of the alert. Name types.String `tfsdk:"name" tf:"optional"` // Alert configuration options. - Options []AlertOptions `tfsdk:"options" tf:"optional"` + Options []AlertOptions `tfsdk:"options" tf:"optional,object"` // The identifier of the workspace folder containing the object. Parent types.String `tfsdk:"parent" tf:"optional"` - Query []AlertQuery `tfsdk:"query" tf:"optional"` + Query []AlertQuery `tfsdk:"query" tf:"optional,object"` // Number of seconds after being triggered before the alert rearms itself // and can be triggered again. If `null`, alert will never be triggered // again. @@ -1191,7 +1191,7 @@ type LegacyAlert struct { // Timestamp when the alert was last updated. UpdatedAt types.String `tfsdk:"updated_at" tf:"optional"` - User []User `tfsdk:"user" tf:"optional"` + User []User `tfsdk:"user" tf:"optional,object"` } type LegacyQuery struct { @@ -1228,7 +1228,7 @@ type LegacyQuery struct { // type parameters are handled safely. IsSafe types.Bool `tfsdk:"is_safe" tf:"optional"` - LastModifiedBy []User `tfsdk:"last_modified_by" tf:"optional"` + LastModifiedBy []User `tfsdk:"last_modified_by" tf:"optional,object"` // The ID of the user who last saved changes to this query. LastModifiedById types.Int64 `tfsdk:"last_modified_by_id" tf:"optional"` // If there is a cached result for this query and user, this field includes @@ -1239,7 +1239,7 @@ type LegacyQuery struct { // on the query page. Name types.String `tfsdk:"name" tf:"optional"` - Options []QueryOptions `tfsdk:"options" tf:"optional"` + Options []QueryOptions `tfsdk:"options" tf:"optional,object"` // The identifier of the workspace folder containing the object. Parent types.String `tfsdk:"parent" tf:"optional"` // * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * @@ -1258,7 +1258,7 @@ type LegacyQuery struct { // The timestamp at which this query was last updated. UpdatedAt types.String `tfsdk:"updated_at" tf:"optional"` - User []User `tfsdk:"user" tf:"optional"` + User []User `tfsdk:"user" tf:"optional,object"` // The ID of the user who owns the query. UserId types.Int64 `tfsdk:"user_id" tf:"optional"` @@ -1285,7 +1285,7 @@ type LegacyVisualization struct { // settings in JSON. Options any `tfsdk:"options" tf:"optional"` - Query []LegacyQuery `tfsdk:"query" tf:"optional"` + Query []LegacyQuery `tfsdk:"query" tf:"optional,object"` // The type of visualization: chart, table, pivot table, and so on. Type types.String `tfsdk:"type" tf:"optional"` @@ -1307,7 +1307,7 @@ type ListAlertsResponse struct { type ListAlertsResponseAlert struct { // Trigger conditions of the alert. - Condition []AlertCondition `tfsdk:"condition" tf:"optional"` + Condition []AlertCondition `tfsdk:"condition" tf:"optional,object"` // The timestamp indicating when the alert was created. CreateTime types.String `tfsdk:"create_time" tf:"optional"` // Custom body of alert notification, if it exists. See [here] for custom @@ -1528,7 +1528,7 @@ type Parameter struct { EnumOptions types.String `tfsdk:"enumOptions" tf:"optional"` // If specified, allows multiple values to be selected for this parameter. // Only applies to dropdown list and query-based dropdown list parameters. - MultiValuesOptions []MultiValuesOptions `tfsdk:"multiValuesOptions" tf:"optional"` + MultiValuesOptions []MultiValuesOptions `tfsdk:"multiValuesOptions" tf:"optional,object"` // The literal parameter marker that appears between double curly braces in // the query text. Name types.String `tfsdk:"name" tf:"optional"` @@ -1584,7 +1584,7 @@ type Query struct { type QueryBackedValue struct { // If specified, allows multiple values to be selected for this parameter. - MultiValuesOptions []MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional"` + MultiValuesOptions []MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional,object"` // UUID of the query that provides the parameter values. QueryId types.String `tfsdk:"query_id" tf:"optional"` // List of selected query parameter values. @@ -1622,7 +1622,7 @@ type QueryEditContent struct { type QueryFilter struct { // A range filter for query submitted time. The time range must be <= 30 // days. - QueryStartTimeRange []TimeRange `tfsdk:"query_start_time_range" tf:"optional"` + QueryStartTimeRange []TimeRange `tfsdk:"query_start_time_range" tf:"optional,object"` // A list of statement IDs. StatementIds []types.String `tfsdk:"statement_ids" tf:"optional"` @@ -1635,7 +1635,7 @@ type QueryFilter struct { type QueryInfo struct { // SQL Warehouse channel information at the time of query execution - ChannelUsed []ChannelInfo `tfsdk:"channel_used" tf:"optional"` + ChannelUsed []ChannelInfo `tfsdk:"channel_used" tf:"optional,object"` // Total execution time of the statement ( excluding result fetch time ). Duration types.Int64 `tfsdk:"duration" tf:"optional"` // Alias for `warehouse_id`. @@ -1654,7 +1654,7 @@ type QueryInfo struct { // A key that can be used to look up query details. LookupKey types.String `tfsdk:"lookup_key" tf:"optional"` // Metrics about query execution. - Metrics []QueryMetrics `tfsdk:"metrics" tf:"optional"` + Metrics []QueryMetrics `tfsdk:"metrics" tf:"optional,object"` // Whether plans exist for the execution, or the reason why they are missing PlansState types.String `tfsdk:"plans_state" tf:"optional"` // The time the query ended. @@ -1773,21 +1773,21 @@ type QueryOptions struct { type QueryParameter struct { // Date-range query parameter value. Can only specify one of // `dynamic_date_range_value` or `date_range_value`. - DateRangeValue []DateRangeValue `tfsdk:"date_range_value" tf:"optional"` + DateRangeValue []DateRangeValue `tfsdk:"date_range_value" tf:"optional,object"` // Date query parameter value. Can only specify one of `dynamic_date_value` // or `date_value`. - DateValue []DateValue `tfsdk:"date_value" tf:"optional"` + DateValue []DateValue `tfsdk:"date_value" tf:"optional,object"` // Dropdown query parameter value. - EnumValue []EnumValue `tfsdk:"enum_value" tf:"optional"` + EnumValue []EnumValue `tfsdk:"enum_value" tf:"optional,object"` // Literal parameter marker that appears between double curly braces in the // query text. Name types.String `tfsdk:"name" tf:"optional"` // Numeric query parameter value. - NumericValue []NumericValue `tfsdk:"numeric_value" tf:"optional"` + NumericValue []NumericValue `tfsdk:"numeric_value" tf:"optional,object"` // Query-based dropdown query parameter value. - QueryBackedValue []QueryBackedValue `tfsdk:"query_backed_value" tf:"optional"` + QueryBackedValue []QueryBackedValue `tfsdk:"query_backed_value" tf:"optional,object"` // Text query parameter value. - TextValue []TextValue `tfsdk:"text_value" tf:"optional"` + TextValue []TextValue `tfsdk:"text_value" tf:"optional,object"` // Text displayed in the user-facing parameter widget in the UI. Title types.String `tfsdk:"title" tf:"optional"` } @@ -1874,7 +1874,7 @@ type ResultManifest struct { Format types.String `tfsdk:"format" tf:"optional"` // The schema is an ordered list of column descriptions. - Schema []ResultSchema `tfsdk:"schema" tf:"optional"` + Schema []ResultSchema `tfsdk:"schema" tf:"optional,object"` // The total number of bytes in the result set. This field is not available // when using `INLINE` disposition. TotalByteCount types.Int64 `tfsdk:"total_byte_count" tf:"optional"` @@ -1920,9 +1920,9 @@ type SetResponse struct { type SetWorkspaceWarehouseConfigRequest struct { // Optional: Channel selection details - Channel []Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional,object"` // Deprecated: Use sql_configuration_parameters - ConfigParam []RepeatedEndpointConfPairs `tfsdk:"config_param" tf:"optional"` + ConfigParam []RepeatedEndpointConfPairs `tfsdk:"config_param" tf:"optional,object"` // Spark confs for external hive metastore configuration JSON serialized // size must be less than <= 512K DataAccessConfig []EndpointConfPair `tfsdk:"data_access_config" tf:"optional"` @@ -1934,7 +1934,7 @@ type SetWorkspaceWarehouseConfigRequest struct { // specific type availability in the warehouse create and edit form UI. EnabledWarehouseTypes []WarehouseTypePair `tfsdk:"enabled_warehouse_types" tf:"optional"` // Deprecated: Use sql_configuration_parameters - GlobalParam []RepeatedEndpointConfPairs `tfsdk:"global_param" tf:"optional"` + GlobalParam []RepeatedEndpointConfPairs `tfsdk:"global_param" tf:"optional,object"` // GCP only: Google Service Account used to pass to cluster to access Google // Cloud Storage GoogleServiceAccount types.String `tfsdk:"google_service_account" tf:"optional"` @@ -1943,7 +1943,7 @@ type SetWorkspaceWarehouseConfigRequest struct { // Security policy for warehouses SecurityPolicy types.String `tfsdk:"security_policy" tf:"optional"` // SQL configuration parameters - SqlConfigurationParameters []RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional"` + SqlConfigurationParameters []RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional,object"` } type SetWorkspaceWarehouseConfigResponse struct { @@ -1976,21 +1976,21 @@ type StatementParameterListItem struct { type StatementResponse struct { // The result manifest provides schema and metadata for the result set. - Manifest []ResultManifest `tfsdk:"manifest" tf:"optional"` + Manifest []ResultManifest `tfsdk:"manifest" tf:"optional,object"` - Result []ResultData `tfsdk:"result" tf:"optional"` + Result []ResultData `tfsdk:"result" tf:"optional,object"` // The statement ID is returned upon successfully submitting a SQL // statement, and is a required reference for all subsequent calls. StatementId types.String `tfsdk:"statement_id" tf:"optional"` // The status response includes execution state and if relevant, error // information. - Status []StatementStatus `tfsdk:"status" tf:"optional"` + Status []StatementStatus `tfsdk:"status" tf:"optional,object"` } // The status response includes execution state and if relevant, error // information. type StatementStatus struct { - Error []ServiceError `tfsdk:"error" tf:"optional"` + Error []ServiceError `tfsdk:"error" tf:"optional,object"` // Statement execution state: - `PENDING`: waiting for warehouse - // `RUNNING`: running - `SUCCEEDED`: execution was successful, result data // available for fetch - `FAILED`: execution failed; reason for failure @@ -2061,7 +2061,7 @@ type TrashQueryRequest struct { } type UpdateAlertRequest struct { - Alert []UpdateAlertRequestAlert `tfsdk:"alert" tf:"optional"` + Alert []UpdateAlertRequestAlert `tfsdk:"alert" tf:"optional,object"` Id types.String `tfsdk:"-"` // Field mask is required to be passed into the PATCH request. Field mask @@ -2073,7 +2073,7 @@ type UpdateAlertRequest struct { type UpdateAlertRequestAlert struct { // Trigger conditions of the alert. - Condition []AlertCondition `tfsdk:"condition" tf:"optional"` + Condition []AlertCondition `tfsdk:"condition" tf:"optional,object"` // Custom body of alert notification, if it exists. See [here] for custom // templating instructions. // @@ -2101,7 +2101,7 @@ type UpdateAlertRequestAlert struct { type UpdateQueryRequest struct { Id types.String `tfsdk:"-"` - Query []UpdateQueryRequestQuery `tfsdk:"query" tf:"optional"` + Query []UpdateQueryRequestQuery `tfsdk:"query" tf:"optional,object"` // Field mask is required to be passed into the PATCH request. Field mask // specifies which fields of the setting payload will be updated. The field // mask needs to be supplied as single string. To specify multiple fields in @@ -2147,7 +2147,7 @@ type UpdateVisualizationRequest struct { // the field mask, use comma as the separator (no space). UpdateMask types.String `tfsdk:"update_mask" tf:""` - Visualization []UpdateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional"` + Visualization []UpdateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional,object"` } type UpdateVisualizationRequestVisualization struct { @@ -2260,13 +2260,13 @@ type Widget struct { // The unique ID for this widget. Id types.String `tfsdk:"id" tf:"optional"` - Options []WidgetOptions `tfsdk:"options" tf:"optional"` + Options []WidgetOptions `tfsdk:"options" tf:"optional,object"` // The visualization description API changes frequently and is unsupported. // You can duplicate a visualization by copying description objects received // _from the API_ and then using them to create a new one with a POST // request to the same endpoint. Databricks does not recommend constructing // ad-hoc visualizations entirely in JSON. - Visualization []LegacyVisualization `tfsdk:"visualization" tf:"optional"` + Visualization []LegacyVisualization `tfsdk:"visualization" tf:"optional,object"` // Unused field. Width types.Int64 `tfsdk:"width" tf:"optional"` } @@ -2284,7 +2284,7 @@ type WidgetOptions struct { ParameterMappings any `tfsdk:"parameterMappings" tf:"optional"` // Coordinates of this widget on a dashboard. This portion of the API // changes frequently and is unsupported. - Position []WidgetPosition `tfsdk:"position" tf:"optional"` + Position []WidgetPosition `tfsdk:"position" tf:"optional,object"` // Custom title of the widget Title types.String `tfsdk:"title" tf:"optional"` // Timestamp of the last time this object was updated. diff --git a/internal/service/vectorsearch_tf/model.go b/internal/service/vectorsearch_tf/model.go index 11f417939..e0590e7ad 100755 --- a/internal/service/vectorsearch_tf/model.go +++ b/internal/service/vectorsearch_tf/model.go @@ -29,10 +29,10 @@ type CreateEndpoint struct { type CreateVectorIndexRequest struct { // Specification for Delta Sync Index. Required if `index_type` is // `DELTA_SYNC`. - DeltaSyncIndexSpec []DeltaSyncVectorIndexSpecRequest `tfsdk:"delta_sync_index_spec" tf:"optional"` + DeltaSyncIndexSpec []DeltaSyncVectorIndexSpecRequest `tfsdk:"delta_sync_index_spec" tf:"optional,object"` // Specification for Direct Vector Access Index. Required if `index_type` is // `DIRECT_ACCESS`. - DirectAccessIndexSpec []DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec" tf:"optional"` + DirectAccessIndexSpec []DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec" tf:"optional,object"` // Name of the endpoint to be used for serving the index EndpointName types.String `tfsdk:"endpoint_name" tf:""` // There are 2 types of Vector Search indexes: @@ -50,7 +50,7 @@ type CreateVectorIndexRequest struct { } type CreateVectorIndexResponse struct { - VectorIndex []VectorIndex `tfsdk:"vector_index" tf:"optional"` + VectorIndex []VectorIndex `tfsdk:"vector_index" tf:"optional,object"` } // Result of the upsert or delete operation. @@ -73,7 +73,7 @@ type DeleteDataVectorIndexRequest struct { // Response to a delete data vector index request. type DeleteDataVectorIndexResponse struct { // Result of the upsert or delete operation. - Result []DeleteDataResult `tfsdk:"result" tf:"optional"` + Result []DeleteDataResult `tfsdk:"result" tf:"optional,object"` // Status of the delete operation. Status types.String `tfsdk:"status" tf:"optional"` } @@ -181,7 +181,7 @@ type EndpointInfo struct { // Creator of the endpoint Creator types.String `tfsdk:"creator" tf:"optional"` // Current status of the endpoint - EndpointStatus []EndpointStatus `tfsdk:"endpoint_status" tf:"optional"` + EndpointStatus []EndpointStatus `tfsdk:"endpoint_status" tf:"optional,object"` // Type of endpoint. EndpointType types.String `tfsdk:"endpoint_type" tf:"optional"` // Unique identifier of the endpoint @@ -255,7 +255,7 @@ type MapStringValueEntry struct { // Column name. Key types.String `tfsdk:"key" tf:"optional"` // Column value, nullable. - Value []Value `tfsdk:"value" tf:"optional"` + Value []Value `tfsdk:"value" tf:"optional,object"` } type MiniVectorIndex struct { @@ -315,13 +315,13 @@ type QueryVectorIndexRequest struct { type QueryVectorIndexResponse struct { // Metadata about the result set. - Manifest []ResultManifest `tfsdk:"manifest" tf:"optional"` + Manifest []ResultManifest `tfsdk:"manifest" tf:"optional,object"` // [Optional] Token that can be used in `QueryVectorIndexNextPage` API to // get next page of results. If more than 1000 results satisfy the query, // they are returned in groups of 1000. Empty value means no more results. NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` // Data returned in the query result. - Result []ResultData `tfsdk:"result" tf:"optional"` + Result []ResultData `tfsdk:"result" tf:"optional,object"` } // Data returned in the query result. @@ -392,7 +392,7 @@ type UpsertDataVectorIndexRequest struct { // Response to an upsert data vector index request. type UpsertDataVectorIndexResponse struct { // Result of the upsert or delete operation. - Result []UpsertDataResult `tfsdk:"result" tf:"optional"` + Result []UpsertDataResult `tfsdk:"result" tf:"optional,object"` // Status of the upsert operation. Status types.String `tfsdk:"status" tf:"optional"` } @@ -400,7 +400,7 @@ type UpsertDataVectorIndexResponse struct { type Value struct { BoolValue types.Bool `tfsdk:"bool_value" tf:"optional"` - ListValue []ListValue `tfsdk:"list_value" tf:"optional"` + ListValue []ListValue `tfsdk:"list_value" tf:"optional,object"` NullValue types.String `tfsdk:"null_value" tf:"optional"` @@ -408,16 +408,16 @@ type Value struct { StringValue types.String `tfsdk:"string_value" tf:"optional"` - StructValue []Struct `tfsdk:"struct_value" tf:"optional"` + StructValue []Struct `tfsdk:"struct_value" tf:"optional,object"` } type VectorIndex struct { // The user who created the index. Creator types.String `tfsdk:"creator" tf:"optional"` - DeltaSyncIndexSpec []DeltaSyncVectorIndexSpecResponse `tfsdk:"delta_sync_index_spec" tf:"optional"` + DeltaSyncIndexSpec []DeltaSyncVectorIndexSpecResponse `tfsdk:"delta_sync_index_spec" tf:"optional,object"` - DirectAccessIndexSpec []DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec" tf:"optional"` + DirectAccessIndexSpec []DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec" tf:"optional,object"` // Name of the endpoint associated with the index EndpointName types.String `tfsdk:"endpoint_name" tf:"optional"` // There are 2 types of Vector Search indexes: @@ -433,7 +433,7 @@ type VectorIndex struct { // Primary key of the index PrimaryKey types.String `tfsdk:"primary_key" tf:"optional"` - Status []VectorIndexStatus `tfsdk:"status" tf:"optional"` + Status []VectorIndexStatus `tfsdk:"status" tf:"optional,object"` } type VectorIndexStatus struct { diff --git a/internal/service/workspace_tf/model.go b/internal/service/workspace_tf/model.go index 7564d08cc..684591341 100755 --- a/internal/service/workspace_tf/model.go +++ b/internal/service/workspace_tf/model.go @@ -73,7 +73,7 @@ type CreateRepoRequest struct { Provider types.String `tfsdk:"provider" tf:""` // If specified, the repo will be created with sparse checkout enabled. You // cannot enable/disable sparse checkout after the repo is created. - SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional,object"` // URL of the Git repository to be linked. Url types.String `tfsdk:"url" tf:""` } @@ -91,14 +91,14 @@ type CreateRepoResponse struct { // Git provider of the linked Git repository. Provider types.String `tfsdk:"provider" tf:"optional"` // Sparse checkout settings for the Git folder (repo). - SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional,object"` // URL of the linked Git repository. Url types.String `tfsdk:"url" tf:"optional"` } type CreateScope struct { // The metadata for the secret scope if the type is `AZURE_KEYVAULT` - BackendAzureKeyvault []AzureKeyVaultSecretScopeMetadata `tfsdk:"backend_azure_keyvault" tf:"optional"` + BackendAzureKeyvault []AzureKeyVaultSecretScopeMetadata `tfsdk:"backend_azure_keyvault" tf:"optional,object"` // The principal that is initially granted `MANAGE` permission to the // created scope. InitialManagePrincipal types.String `tfsdk:"initial_manage_principal" tf:"optional"` @@ -270,7 +270,7 @@ type GetRepoResponse struct { // Git provider of the linked Git repository. Provider types.String `tfsdk:"provider" tf:"optional"` // Sparse checkout settings for the Git folder (repo). - SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional,object"` // URL of the linked Git repository. Url types.String `tfsdk:"url" tf:"optional"` } @@ -516,7 +516,7 @@ type RepoInfo struct { // Git provider of the remote git repository, e.g. `gitHub`. Provider types.String `tfsdk:"provider" tf:"optional"` // Sparse checkout config for the git folder (repo). - SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional,object"` // URL of the remote git repository. Url types.String `tfsdk:"url" tf:"optional"` } @@ -560,7 +560,7 @@ type SecretScope struct { // The type of secret scope backend. BackendType types.String `tfsdk:"backend_type" tf:"optional"` // The metadata for the secret scope if the type is `AZURE_KEYVAULT` - KeyvaultMetadata []AzureKeyVaultSecretScopeMetadata `tfsdk:"keyvault_metadata" tf:"optional"` + KeyvaultMetadata []AzureKeyVaultSecretScopeMetadata `tfsdk:"keyvault_metadata" tf:"optional,object"` // A unique name to identify the secret scope. Name types.String `tfsdk:"name" tf:"optional"` } @@ -617,7 +617,7 @@ type UpdateRepoRequest struct { RepoId types.Int64 `tfsdk:"-"` // If specified, update the sparse checkout settings. The update will fail // if sparse checkout is not enabled for the repo. - SparseCheckout []SparseCheckoutUpdate `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckoutUpdate `tfsdk:"sparse_checkout" tf:"optional,object"` // Tag that the local version of the repo is checked out to. Updating the // repo to a tag puts the repo in a detached HEAD state. Before committing // new changes, you must update the repo to a branch instead of the detached