From bb871e11e7fcaa76451ddadbf3f71364174a8a7d Mon Sep 17 00:00:00 2001 From: Edward Feng <67326663+edwardfeng-db@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:28:56 +0200 Subject: [PATCH 01/99] [Internal] Make test utils public and move integration test for quality monitor (#3993) ## Changes - Make `Step`, `WorkspaceLevel`, `AccountLevel`, `UnityWorkspaceLevel`, `UnityAccountLevel` public - Rename `init_test.go` to `init.go` because variables exported from files ending with `_test` cannot be imported - Moved integration test for quality monitor next to the resource definition ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- CONTRIBUTING.md | 2 +- internal/acceptance/acceptance_test.go | 2 +- internal/acceptance/account_rule_set_test.go | 4 +- .../acceptance/artifact_allowlist_test.go | 2 +- internal/acceptance/catalog_test.go | 16 +- internal/acceptance/cluster_policy_test.go | 8 +- internal/acceptance/cluster_test.go | 22 +-- internal/acceptance/connection_test.go | 10 +- internal/acceptance/dashboard_test.go | 28 ++-- .../data_aws_crossaccount_policy_test.go | 2 +- internal/acceptance/data_catalog_test.go | 2 +- internal/acceptance/data_cluster_test.go | 2 +- internal/acceptance/data_clusters_test.go | 4 +- .../acceptance/data_current_config_test.go | 12 +- .../acceptance/data_current_metastore_test.go | 2 +- .../data_external_locations_test.go | 2 +- internal/acceptance/data_group_test.go | 4 +- .../acceptance/data_instance_profiles_test.go | 2 +- internal/acceptance/data_job_test.go | 2 +- internal/acceptance/data_metastore_test.go | 2 +- internal/acceptance/data_metastores_test.go | 2 +- .../acceptance/data_mlflow_experiment_test.go | 8 +- internal/acceptance/data_mlflow_model_test.go | 6 +- .../acceptance/data_mws_credentials_test.go | 2 +- .../acceptance/data_mws_workspaces_test.go | 2 +- internal/acceptance/data_pipelines_test.go | 2 +- internal/acceptance/data_schema_test.go | 2 +- internal/acceptance/data_schemas_test.go | 2 +- .../data_service_principals_test.go | 6 +- internal/acceptance/data_shares_test.go | 2 +- .../acceptance/data_sql_warehouse_test.go | 2 +- .../data_storage_credential_test.go | 2 +- .../data_storage_credentials_test.go | 2 +- internal/acceptance/data_table_test.go | 2 +- internal/acceptance/data_tables_test.go | 2 +- internal/acceptance/data_user_test.go | 4 +- internal/acceptance/data_volume_test.go | 2 +- internal/acceptance/data_volumes_test.go | 4 +- internal/acceptance/dbfs_file_test.go | 6 +- internal/acceptance/default_namespace_test.go | 4 +- internal/acceptance/directory_test.go | 2 +- internal/acceptance/entitlements_test.go | 18 +-- internal/acceptance/external_location_test.go | 12 +- internal/acceptance/file_test.go | 28 ++-- internal/acceptance/git_credential_test.go | 2 +- .../acceptance/global_init_script_test.go | 2 +- internal/acceptance/grant_test.go | 12 +- internal/acceptance/grants_test.go | 12 +- internal/acceptance/group_member_test.go | 4 +- internal/acceptance/group_role_test.go | 2 +- internal/acceptance/group_test.go | 16 +- internal/acceptance/{init_test.go => init.go} | 12 +- internal/acceptance/instance_profile_test.go | 8 +- internal/acceptance/ip_access_list_test.go | 4 +- internal/acceptance/job_test.go | 30 ++-- .../acceptance/metastore_assignment_test.go | 6 +- .../acceptance/metastore_data_access_test.go | 4 +- internal/acceptance/metastore_test.go | 8 +- internal/acceptance/mlflow_experiment_test.go | 2 +- internal/acceptance/mlflow_model_test.go | 2 +- internal/acceptance/model_serving_test.go | 14 +- internal/acceptance/mounts_test.go | 10 +- internal/acceptance/mws_credentials_test.go | 2 +- .../mws_customer_managed_keys_test.go | 4 +- internal/acceptance/mws_log_delivery_test.go | 2 +- .../mws_network_connectivity_config_test.go | 8 +- internal/acceptance/mws_networks_test.go | 4 +- .../mws_permissionassignments_test.go | 8 +- .../mws_private_access_settings_test.go | 4 +- .../mws_storage_configurations_test.go | 2 +- internal/acceptance/mws_vpc_endpoint_test.go | 4 +- internal/acceptance/mws_workspaces_test.go | 22 +-- internal/acceptance/notebook_test.go | 4 +- .../notification_destination_test.go | 24 +-- internal/acceptance/obo_token_test.go | 2 +- internal/acceptance/online_table_test.go | 2 +- internal/acceptance/permissions_test.go | 12 +- internal/acceptance/pipeline_test.go | 12 +- internal/acceptance/provider_test.go | 2 +- internal/acceptance/quality_monitor_test.go | 110 +------------ internal/acceptance/recipient_test.go | 12 +- internal/acceptance/registered_model_test.go | 8 +- .../restrict_workspace_admins_test.go | 4 +- internal/acceptance/schema_test.go | 10 +- internal/acceptance/secret_acl_test.go | 4 +- internal/acceptance/secret_scope_test.go | 6 +- internal/acceptance/secret_test.go | 2 +- internal/acceptance/service_principal_test.go | 24 +-- internal/acceptance/share_test.go | 10 +- internal/acceptance/sql_alert_test.go | 4 +- internal/acceptance/sql_dashboard_test.go | 2 +- internal/acceptance/sql_endpoint_test.go | 4 +- internal/acceptance/sql_global_config_test.go | 8 +- internal/acceptance/sql_permissions_test.go | 2 +- internal/acceptance/sql_query_test.go | 2 +- internal/acceptance/sql_table_test.go | 64 ++++---- .../acceptance/storage_credential_test.go | 6 +- internal/acceptance/system_schema_test.go | 2 +- internal/acceptance/token_test.go | 2 +- internal/acceptance/user_role_test.go | 2 +- internal/acceptance/user_test.go | 24 +-- internal/acceptance/vector_search_test.go | 2 +- internal/acceptance/volume_test.go | 14 +- internal/acceptance/workspace_binding_test.go | 2 +- internal/acceptance/workspace_conf_test.go | 8 +- internal/acceptance/workspace_file_test.go | 10 +- .../resource_quality_monitor_test.go | 153 ++++++++++++++++++ internal/tfreflect/reflect_utils.go | 4 +- 108 files changed, 539 insertions(+), 488 deletions(-) rename internal/acceptance/{init_test.go => init.go} (97%) create mode 100644 internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_test.go diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d4718029d7..4062159c53 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -333,7 +333,7 @@ func TestExampleResourceCreate(t *testing.T) { ```go func TestAccSecretAclResource(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_group" "ds" { display_name = "data-scientists-{var.RANDOM}" diff --git a/internal/acceptance/acceptance_test.go b/internal/acceptance/acceptance_test.go index a86f6e9eb2..590bac94fa 100644 --- a/internal/acceptance/acceptance_test.go +++ b/internal/acceptance/acceptance_test.go @@ -50,7 +50,7 @@ func TestRunningRealTerraformWithFixtureBackend(t *testing.T) { t.Setenv("DATABRICKS_HOST", client.Config.Host) t.Setenv("DATABRICKS_TOKEN", client.Config.Token) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_token" "this" { lifetime_seconds = 6000 comment = "Testing token" diff --git a/internal/acceptance/account_rule_set_test.go b/internal/acceptance/account_rule_set_test.go index c95e70d139..a134cdf667 100644 --- a/internal/acceptance/account_rule_set_test.go +++ b/internal/acceptance/account_rule_set_test.go @@ -32,7 +32,7 @@ func getServicePrincipalResource(t *testing.T) string { func TestMwsAccAccountServicePrincipalRuleSetsFullLifeCycle(t *testing.T) { loadAccountEnv(t) spResource := getServicePrincipalResource(t) - accountLevel(t, step{ + AccountLevel(t, Step{ Template: spResource + ` resource "databricks_group" "this" { display_name = "Group {var.RANDOM}" @@ -69,7 +69,7 @@ func TestMwsAccAccountServicePrincipalRuleSetsFullLifeCycle(t *testing.T) { func TestMwsAccAccountGroupRuleSetsFullLifeCycle(t *testing.T) { username := qa.RandomEmail() - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_user" "this" { user_name = "` + username + `" diff --git a/internal/acceptance/artifact_allowlist_test.go b/internal/acceptance/artifact_allowlist_test.go index 271e9179ab..aeb1347285 100644 --- a/internal/acceptance/artifact_allowlist_test.go +++ b/internal/acceptance/artifact_allowlist_test.go @@ -5,7 +5,7 @@ import ( ) func TestUcAccArtifactAllowlistResourceFullLifecycle(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_artifact_allowlist" "init" { artifact_type = "INIT_SCRIPT" diff --git a/internal/acceptance/catalog_test.go b/internal/acceptance/catalog_test.go index edae3560a4..3b1278c36e 100644 --- a/internal/acceptance/catalog_test.go +++ b/internal/acceptance/catalog_test.go @@ -7,7 +7,7 @@ import ( func TestUcAccCatalog(t *testing.T) { loadUcwsEnv(t) - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: fmt.Sprintf(` resource "databricks_catalog" "sandbox" { name = "sandbox{var.RANDOM}" @@ -21,7 +21,7 @@ func TestUcAccCatalog(t *testing.T) { } func TestUcAccCatalogIsolated(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_catalog" "sandbox" { name = "sandbox{var.STICKY_RANDOM}" @@ -30,7 +30,7 @@ func TestUcAccCatalogIsolated(t *testing.T) { purpose = "testing" } }`, - }, step{ + }, Step{ Template: ` resource "databricks_catalog" "sandbox" { name = "sandbox{var.STICKY_RANDOM}" @@ -40,7 +40,7 @@ func TestUcAccCatalogIsolated(t *testing.T) { purpose = "testing" } }`, - }, step{ + }, Step{ Template: ` resource "databricks_catalog" "sandbox" { name = "sandbox{var.STICKY_RANDOM}" @@ -55,7 +55,7 @@ func TestUcAccCatalogIsolated(t *testing.T) { func TestUcAccCatalogUpdate(t *testing.T) { loadUcwsEnv(t) - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: fmt.Sprintf(` resource "databricks_catalog" "sandbox" { name = "sandbox{var.STICKY_RANDOM}" @@ -65,7 +65,7 @@ func TestUcAccCatalogUpdate(t *testing.T) { } %s }`, getPredictiveOptimizationSetting(t, true)), - }, step{ + }, Step{ Template: fmt.Sprintf(` resource "databricks_catalog" "sandbox" { name = "sandbox{var.STICKY_RANDOM}" @@ -76,7 +76,7 @@ func TestUcAccCatalogUpdate(t *testing.T) { %s owner = "account users" }`, getPredictiveOptimizationSetting(t, true)), - }, step{ + }, Step{ Template: fmt.Sprintf(` resource "databricks_catalog" "sandbox" { name = "sandbox{var.STICKY_RANDOM}" @@ -87,7 +87,7 @@ func TestUcAccCatalogUpdate(t *testing.T) { %s owner = "{env.TEST_DATA_ENG_GROUP}" }`, getPredictiveOptimizationSetting(t, true)), - }, step{ + }, Step{ Template: fmt.Sprintf(` resource "databricks_catalog" "sandbox" { name = "sandbox{var.STICKY_RANDOM}" diff --git a/internal/acceptance/cluster_policy_test.go b/internal/acceptance/cluster_policy_test.go index 3d7ac40177..8de58a9206 100644 --- a/internal/acceptance/cluster_policy_test.go +++ b/internal/acceptance/cluster_policy_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccClusterPolicyResourceFullLifecycle(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_cluster_policy" "external_metastore" { name = "Terraform policy {var.RANDOM}" definition = jsonencode({ @@ -15,7 +15,7 @@ func TestAccClusterPolicyResourceFullLifecycle(t *testing.T) { } }) }`, - }, step{ + }, Step{ // renaming to a new random name Template: `resource "databricks_cluster_policy" "external_metastore" { name = "Terraform policy {var.RANDOM}" @@ -30,7 +30,7 @@ func TestAccClusterPolicyResourceFullLifecycle(t *testing.T) { } func TestAccClusterPolicyResourceOverrideBuiltIn(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_cluster_policy" "personal_vm" { name = "Personal Compute" policy_family_id = "personal-vm" @@ -46,7 +46,7 @@ func TestAccClusterPolicyResourceOverrideBuiltIn(t *testing.T) { } func TestAccClusterPolicyResourceOverrideNew(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_cluster_policy" "policyoverrideempty" { policy_family_id = "personal-vm" name = "Policy Override {var.RANDOM}" diff --git a/internal/acceptance/cluster_test.go b/internal/acceptance/cluster_test.go index dd5b2164f8..f399eece3f 100644 --- a/internal/acceptance/cluster_test.go +++ b/internal/acceptance/cluster_test.go @@ -8,7 +8,7 @@ import ( ) func TestAccClusterResource_CreateClusterWithLibraries(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `data "databricks_spark_version" "latest" { } resource "databricks_cluster" "this" { @@ -73,9 +73,9 @@ func singleNodeClusterTemplate(autoTerminationMinutes string) string { } func TestAccClusterResource_CreateSingleNodeCluster(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: singleNodeClusterTemplate("10"), - }, step{ + }, Step{ Template: singleNodeClusterTemplate("20"), }) } @@ -103,16 +103,16 @@ func awsClusterTemplate(availability string) string { func TestAccClusterResource_CreateAndUpdateAwsAttributes(t *testing.T) { loadWorkspaceEnv(t) if isAws(t) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: awsClusterTemplate("SPOT"), - }, step{ + }, Step{ Template: awsClusterTemplate("SPOT_WITH_FALLBACK"), }) } } func TestAccClusterResource_CreateAndNoWait(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `data "databricks_spark_version" "latest" { } resource "databricks_cluster" "this" { @@ -133,9 +133,9 @@ func TestAccClusterResource_CreateAndNoWait(t *testing.T) { } func TestAccClusterResource_WorkloadType(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: testAccClusterResourceWorkloadTypeTemplate(""), - }, step{ + }, Step{ Template: testAccClusterResourceWorkloadTypeTemplate(` workload_type { clients { @@ -147,7 +147,7 @@ func TestAccClusterResource_WorkloadType(t *testing.T) { resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.jobs", "true"), resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.notebooks", "true"), ), - }, step{ + }, Step{ Template: testAccClusterResourceWorkloadTypeTemplate(` workload_type { clients { @@ -159,7 +159,7 @@ func TestAccClusterResource_WorkloadType(t *testing.T) { resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.jobs", "false"), resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.notebooks", "false"), ), - }, step{ + }, Step{ Template: testAccClusterResourceWorkloadTypeTemplate(` workload_type { clients { } @@ -168,7 +168,7 @@ func TestAccClusterResource_WorkloadType(t *testing.T) { resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.jobs", "true"), resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.notebooks", "true"), ), - }, step{ + }, Step{ Template: testAccClusterResourceWorkloadTypeTemplate(``), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.#", "0"), diff --git a/internal/acceptance/connection_test.go b/internal/acceptance/connection_test.go index 7cf6ec5093..fade7f3d5f 100644 --- a/internal/acceptance/connection_test.go +++ b/internal/acceptance/connection_test.go @@ -49,19 +49,19 @@ func connectionTemplateWithoutOwner() string { ` } func TestUcAccConnectionsResourceFullLifecycle(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: connectionTemplateWithOwner("test.mysql.database.azure.com", "account users"), - }, step{ + }, Step{ Template: connectionTemplateWithOwner("test.mysql.database.aws.com", "account users"), - }, step{ + }, Step{ Template: connectionTemplateWithOwner("test.mysql.database.azure.com", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}"), }) } func TestUcAccConnectionsWithoutOwnerResourceFullLifecycle(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: connectionTemplateWithoutOwner(), - }, step{ + }, Step{ Template: connectionTemplateWithoutOwner(), }) } diff --git a/internal/acceptance/dashboard_test.go b/internal/acceptance/dashboard_test.go index bb56147cdd..5fbf28b03a 100644 --- a/internal/acceptance/dashboard_test.go +++ b/internal/acceptance/dashboard_test.go @@ -89,7 +89,7 @@ func (t *templateStruct) SetAttributes(mapper map[string]string) templateStruct func TestAccBasicDashboard(t *testing.T) { var template templateStruct displayName := fmt.Sprintf("Test Dashboard - %s", qa.RandomName()) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: makeTemplate(template.SetAttributes(map[string]string{ "display_name": displayName, "warehouse_id": "{env.TEST_DEFAULT_WAREHOUSE_ID}", @@ -119,7 +119,7 @@ func TestAccBasicDashboard(t *testing.T) { func TestAccDashboardWithSerializedJSON(t *testing.T) { var template templateStruct displayName := fmt.Sprintf("Test Dashboard - %s", qa.RandomName()) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: makeTemplate(template.SetAttributes(map[string]string{ "display_name": displayName, "warehouse_id": "{env.TEST_DEFAULT_WAREHOUSE_ID}", @@ -142,7 +142,7 @@ func TestAccDashboardWithSerializedJSON(t *testing.T) { require.NoError(t, err) return nil }), - }, step{ + }, Step{ Template: makeTemplate(template.SetAttributes(map[string]string{ "serialized_dashboard": `{\"pages\":[{\"name\":\"new_name\",\"displayName\":\"New Page Modified\"}]}`, "embed_credentials": "true", @@ -174,7 +174,7 @@ func TestAccDashboardWithFilePath(t *testing.T) { fileName := tmpDir + "/Dashboard.json" var template templateStruct displayName := fmt.Sprintf("Test Dashboard - %s", qa.RandomName()) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ PreConfig: func() { os.Mkdir(tmpDir, 0755) os.WriteFile(fileName, []byte("{\"pages\":[{\"name\":\"new_name\",\"displayName\":\"New Page\"}]}"), 0644) @@ -200,7 +200,7 @@ func TestAccDashboardWithFilePath(t *testing.T) { require.NoError(t, err) return nil }), - }, step{ + }, Step{ PreConfig: func() { os.WriteFile(fileName, []byte("{\"pages\":[{\"name\":\"new_name\",\"displayName\":\"New Page Modified\"}]}"), 0644) }, @@ -231,7 +231,7 @@ func TestAccDashboardWithNoChange(t *testing.T) { initial_update_time := "" var template templateStruct displayName := fmt.Sprintf("Test Dashboard - %s", qa.RandomName()) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: makeTemplate(template.SetAttributes(map[string]string{ "display_name": displayName, "warehouse_id": "{env.TEST_DEFAULT_WAREHOUSE_ID}", @@ -254,7 +254,7 @@ func TestAccDashboardWithNoChange(t *testing.T) { initial_update_time = dashboard.UpdateTime return nil }), - }, step{ + }, Step{ Template: makeTemplate(template), Check: resourceCheck("databricks_dashboard.d1", func(ctx context.Context, client *common.DatabricksClient, id string) error { w, err := client.WorkspaceClient() @@ -284,7 +284,7 @@ func TestAccDashboardWithRemoteChange(t *testing.T) { etag := "" var template templateStruct displayName := fmt.Sprintf("Test Dashboard - %s", qa.RandomName()) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: makeTemplate(template.SetAttributes(map[string]string{ "display_name": displayName, "warehouse_id": "{env.TEST_DEFAULT_WAREHOUSE_ID}", @@ -310,7 +310,7 @@ func TestAccDashboardWithRemoteChange(t *testing.T) { etag = dashboard.Etag return nil }), - }, step{ + }, Step{ PreConfig: func() { w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) @@ -355,7 +355,7 @@ func TestAccDashboardTestAll(t *testing.T) { fileName := tmpDir + "/Dashboard.json" var template templateStruct displayName := fmt.Sprintf("Test Dashboard - %s", qa.RandomName()) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ PreConfig: func() { os.Mkdir(tmpDir, 0755) os.WriteFile(fileName, []byte("{\"pages\":[{\"name\":\"new_name\",\"displayName\":\"New Page in file\"}]}"), 0644) @@ -388,7 +388,7 @@ func TestAccDashboardTestAll(t *testing.T) { require.Equal(t, publish_dash.EmbedCredentials, false) return nil }), - }, step{ + }, Step{ PreConfig: func() { os.WriteFile(fileName, []byte("{\"pages\":[{\"name\":\"new_name\",\"displayName\":\"New Page Modified\"}]}"), 0644) }, @@ -414,7 +414,7 @@ func TestAccDashboardTestAll(t *testing.T) { assert.NotEqual(t, "", dashboard.SerializedDashboard) return nil }), - }, step{ + }, Step{ PreConfig: func() { w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) @@ -444,7 +444,7 @@ func TestAccDashboardTestAll(t *testing.T) { require.NoError(t, err) return nil }), - }, step{ + }, Step{ Template: makeTemplate(template.SetAttributes(map[string]string{ "embed_credentials": "true", "parent_path": "/Shared/Teams", @@ -466,7 +466,7 @@ func TestAccDashboardTestAll(t *testing.T) { assert.NotEqual(t, "", dashboard.SerializedDashboard) return nil }), - }, step{ + }, Step{ PreConfig: func() { os.WriteFile(fileName, []byte("{\"pages\":[{\"name\":\"new_name\",\"displayName\":\"New Page Modified again\"}]}"), 0644) }, diff --git a/internal/acceptance/data_aws_crossaccount_policy_test.go b/internal/acceptance/data_aws_crossaccount_policy_test.go index 76e263ff7f..c40e8e505b 100644 --- a/internal/acceptance/data_aws_crossaccount_policy_test.go +++ b/internal/acceptance/data_aws_crossaccount_policy_test.go @@ -9,7 +9,7 @@ import ( func TestMwsAccDataSourceAwsCrossaccountPolicy(t *testing.T) { GetEnvOrSkipTest(t, "TEST_ROOT_BUCKET") // marker for AWS test env - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` data "databricks_aws_crossaccount_policy" "this" { }`, diff --git a/internal/acceptance/data_catalog_test.go b/internal/acceptance/data_catalog_test.go index 591eacc162..83a57ff7d6 100644 --- a/internal/acceptance/data_catalog_test.go +++ b/internal/acceptance/data_catalog_test.go @@ -8,7 +8,7 @@ import ( ) func TestUcAccDataSourceCatalog(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_catalog" "sandbox" { name = "sandbox{var.RANDOM}" diff --git a/internal/acceptance/data_cluster_test.go b/internal/acceptance/data_cluster_test.go index 108c46c465..9fa9407bb3 100644 --- a/internal/acceptance/data_cluster_test.go +++ b/internal/acceptance/data_cluster_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccDataSourceCluster(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` data "databricks_cluster" "this" { cluster_id = "{env.TEST_DEFAULT_CLUSTER_ID}" diff --git a/internal/acceptance/data_clusters_test.go b/internal/acceptance/data_clusters_test.go index 968f3dc0d7..45d578873d 100644 --- a/internal/acceptance/data_clusters_test.go +++ b/internal/acceptance/data_clusters_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccDataSourceClustersNoFilter(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` data "databricks_clusters" "this" { } `, @@ -13,7 +13,7 @@ func TestAccDataSourceClustersNoFilter(t *testing.T) { } func TestAccDataSourceClustersWithFilter(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` data "databricks_clusters" "this" { cluster_name_contains = "Default" diff --git a/internal/acceptance/data_current_config_test.go b/internal/acceptance/data_current_config_test.go index 84a1f0fe98..25fcce48e4 100644 --- a/internal/acceptance/data_current_config_test.go +++ b/internal/acceptance/data_current_config_test.go @@ -24,17 +24,17 @@ func checkCurrentConfig(t *testing.T, cloudType string, isAccount string) func(s func TestAccDataCurrentConfig(t *testing.T) { loadWorkspaceEnv(t) if isAws(t) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `data "databricks_current_config" "this" {}`, Check: checkCurrentConfig(t, "aws", "false"), }) } else if isAzure(t) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `data "databricks_current_config" "this" {}`, Check: checkCurrentConfig(t, "azure", "false"), }) } else if isGcp(t) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `data "databricks_current_config" "this" {}`, Check: checkCurrentConfig(t, "gcp", "false"), }) @@ -44,17 +44,17 @@ func TestAccDataCurrentConfig(t *testing.T) { func TestMwsAccDataCurrentConfig(t *testing.T) { loadAccountEnv(t) if isAws(t) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: `data "databricks_current_config" "this" {}`, Check: checkCurrentConfig(t, "aws", "true"), }) } else if isAzure(t) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: `data "databricks_current_config" "this" {}`, Check: checkCurrentConfig(t, "azure", "true"), }) } else if isGcp(t) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: `data "databricks_current_config" "this" {}`, Check: checkCurrentConfig(t, "gcp", "true"), }) diff --git a/internal/acceptance/data_current_metastore_test.go b/internal/acceptance/data_current_metastore_test.go index 363154b1d3..38766c19a8 100644 --- a/internal/acceptance/data_current_metastore_test.go +++ b/internal/acceptance/data_current_metastore_test.go @@ -8,7 +8,7 @@ import ( ) func TestUcAccDataSourceCurrentMetastore(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` data "databricks_current_metastore" "this" { }`, diff --git a/internal/acceptance/data_external_locations_test.go b/internal/acceptance/data_external_locations_test.go index eb42a96af2..89776a05ba 100644 --- a/internal/acceptance/data_external_locations_test.go +++ b/internal/acceptance/data_external_locations_test.go @@ -8,7 +8,7 @@ import ( ) func TestUcAccDataSourceExternalLocations(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` data "databricks_external_locations" "this" { }`, diff --git a/internal/acceptance/data_group_test.go b/internal/acceptance/data_group_test.go index 8713f82fd9..664dec2fd4 100644 --- a/internal/acceptance/data_group_test.go +++ b/internal/acceptance/data_group_test.go @@ -59,7 +59,7 @@ func checkGroupDataSourcePopulated(t *testing.T) func(s *terraform.State) error func TestMwsAccGroupDataSplitMembers(t *testing.T) { GetEnvOrSkipTest(t, "ARM_CLIENT_ID") - accountLevel(t, step{ + AccountLevel(t, Step{ Template: groupDataSourceTemplate, Check: checkGroupDataSourcePopulated(t), }) @@ -67,7 +67,7 @@ func TestMwsAccGroupDataSplitMembers(t *testing.T) { func TestAccGroupDataSplitMembers(t *testing.T) { GetEnvOrSkipTest(t, "ARM_CLIENT_ID") - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: groupDataSourceTemplate, Check: checkGroupDataSourcePopulated(t), }) diff --git a/internal/acceptance/data_instance_profiles_test.go b/internal/acceptance/data_instance_profiles_test.go index e5e9544f4e..a1a1f453c1 100644 --- a/internal/acceptance/data_instance_profiles_test.go +++ b/internal/acceptance/data_instance_profiles_test.go @@ -6,7 +6,7 @@ import ( func TestAccDataSourceInstanceProfiles(t *testing.T) { GetEnvOrSkipTest(t, "TEST_EC2_INSTANCE_PROFILE") - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` data "databricks_instance_profiles" "this" { } diff --git a/internal/acceptance/data_job_test.go b/internal/acceptance/data_job_test.go index 9ca495ad98..69aed60607 100755 --- a/internal/acceptance/data_job_test.go +++ b/internal/acceptance/data_job_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccDataSourceJob(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` data "databricks_current_user" "me" {} data "databricks_spark_version" "latest" {} diff --git a/internal/acceptance/data_metastore_test.go b/internal/acceptance/data_metastore_test.go index b823890ae5..092965c499 100644 --- a/internal/acceptance/data_metastore_test.go +++ b/internal/acceptance/data_metastore_test.go @@ -8,7 +8,7 @@ import ( ) func TestUcAccDataSourceMetastore(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` data "databricks_metastore" "this" { metastore_id = "{env.TEST_METASTORE_ID}" diff --git a/internal/acceptance/data_metastores_test.go b/internal/acceptance/data_metastores_test.go index 436a9f687c..ea5b7ac782 100644 --- a/internal/acceptance/data_metastores_test.go +++ b/internal/acceptance/data_metastores_test.go @@ -8,7 +8,7 @@ import ( ) func TestUcAccDataSourceMetastores(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` data "databricks_metastores" "this" { }`, diff --git a/internal/acceptance/data_mlflow_experiment_test.go b/internal/acceptance/data_mlflow_experiment_test.go index c2d0571877..f165a328ed 100644 --- a/internal/acceptance/data_mlflow_experiment_test.go +++ b/internal/acceptance/data_mlflow_experiment_test.go @@ -8,8 +8,8 @@ import ( ) func TestAccDataSourceMlflowExperiment(t *testing.T) { - workspaceLevel(t, - step{ + WorkspaceLevel(t, + Step{ Template: ` data "databricks_current_user" "me" {} @@ -19,7 +19,7 @@ func TestAccDataSourceMlflowExperiment(t *testing.T) { description = "My MLflow experiment description" }`, }, - step{ + Step{ Template: ` data "databricks_current_user" "me" {} @@ -51,7 +51,7 @@ func TestAccDataSourceMlflowExperiment(t *testing.T) { return nil }, }, - step{ + Step{ Template: ` data "databricks_current_user" "me" {} diff --git a/internal/acceptance/data_mlflow_model_test.go b/internal/acceptance/data_mlflow_model_test.go index 8e5d3aa9b2..3f911ab962 100644 --- a/internal/acceptance/data_mlflow_model_test.go +++ b/internal/acceptance/data_mlflow_model_test.go @@ -8,8 +8,8 @@ import ( ) func TestAccDataMlflowModel(t *testing.T) { - workspaceLevel(t, - step{ + WorkspaceLevel(t, + Step{ Template: ` resource "databricks_mlflow_model" "this" { name = "model-{var.RANDOM}" @@ -26,7 +26,7 @@ func TestAccDataMlflowModel(t *testing.T) { } }`, }, - step{ + Step{ Template: ` resource "databricks_mlflow_model" "this" { name = "model-{var.RANDOM}" diff --git a/internal/acceptance/data_mws_credentials_test.go b/internal/acceptance/data_mws_credentials_test.go index 882a889566..eb39e1b7b1 100755 --- a/internal/acceptance/data_mws_credentials_test.go +++ b/internal/acceptance/data_mws_credentials_test.go @@ -8,7 +8,7 @@ import ( ) func TestAccDataSourceMwsCredentials(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` data "databricks_mws_credentials" "this" { }`, diff --git a/internal/acceptance/data_mws_workspaces_test.go b/internal/acceptance/data_mws_workspaces_test.go index d3a39b7e6b..9c80cf4ded 100755 --- a/internal/acceptance/data_mws_workspaces_test.go +++ b/internal/acceptance/data_mws_workspaces_test.go @@ -9,7 +9,7 @@ import ( ) func TestAccDataSourceMwsWorkspaces(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` data "databricks_mws_workspaces" "this" { }`, diff --git a/internal/acceptance/data_pipelines_test.go b/internal/acceptance/data_pipelines_test.go index fc194dc0d5..ee487c54a2 100755 --- a/internal/acceptance/data_pipelines_test.go +++ b/internal/acceptance/data_pipelines_test.go @@ -48,7 +48,7 @@ var ( ) func TestAccDataSourcePipelines(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` locals { name = "pipeline-ds-acceptance-{var.RANDOM}" diff --git a/internal/acceptance/data_schema_test.go b/internal/acceptance/data_schema_test.go index 4024d6c40e..7a3ad79bb9 100644 --- a/internal/acceptance/data_schema_test.go +++ b/internal/acceptance/data_schema_test.go @@ -15,7 +15,7 @@ func checkDataSourceSchema(t *testing.T) func(s *terraform.State) error { } } func TestUcAccDataSourceSchema(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_catalog" "sandbox" { name = "sandbox{var.RANDOM}" diff --git a/internal/acceptance/data_schemas_test.go b/internal/acceptance/data_schemas_test.go index 6258d0a07e..ff88d166db 100755 --- a/internal/acceptance/data_schemas_test.go +++ b/internal/acceptance/data_schemas_test.go @@ -20,7 +20,7 @@ func checkSchemasDataSourcePopulated(t *testing.T) func(s *terraform.State) erro } } func TestUcAccDataSourceSchemas(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_catalog" "sandbox" { name = "sandbox{var.RANDOM}" diff --git a/internal/acceptance/data_service_principals_test.go b/internal/acceptance/data_service_principals_test.go index ac35f4bf11..aff919908c 100644 --- a/internal/acceptance/data_service_principals_test.go +++ b/internal/acceptance/data_service_principals_test.go @@ -28,21 +28,21 @@ data databricks_service_principals "this" { func TestAccDataSourceSPNsOnAWS(t *testing.T) { GetEnvOrSkipTest(t, "TEST_EC2_INSTANCE_PROFILE") - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: spns, }) } func TestAccDataSourceSPNsOnGCP(t *testing.T) { GetEnvOrSkipTest(t, "GOOGLE_CREDENTIALS") - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: spns, }) } func TestAccDataSourceSPNsOnAzure(t *testing.T) { GetEnvOrSkipTest(t, "ARM_CLIENT_ID") - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: azureSpns, }) } diff --git a/internal/acceptance/data_shares_test.go b/internal/acceptance/data_shares_test.go index 9f7d940132..01d64784ff 100644 --- a/internal/acceptance/data_shares_test.go +++ b/internal/acceptance/data_shares_test.go @@ -19,7 +19,7 @@ func checkSharesDataSourcePopulated(t *testing.T) func(s *terraform.State) error } } func TestUcAccDataSourceShares(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_catalog" "sandbox" { name = "sandbox{var.RANDOM}" diff --git a/internal/acceptance/data_sql_warehouse_test.go b/internal/acceptance/data_sql_warehouse_test.go index 32a575ee9f..04114f5a16 100644 --- a/internal/acceptance/data_sql_warehouse_test.go +++ b/internal/acceptance/data_sql_warehouse_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccDataSourceWarehouse(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` data "databricks_sql_warehouse" "this" { id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" diff --git a/internal/acceptance/data_storage_credential_test.go b/internal/acceptance/data_storage_credential_test.go index a6f39d8d53..10d978e8ff 100755 --- a/internal/acceptance/data_storage_credential_test.go +++ b/internal/acceptance/data_storage_credential_test.go @@ -20,7 +20,7 @@ func checkStorageCredentialDataSourcePopulated(t *testing.T) func(s *terraform.S } } func TestUcAccDataSourceStorageCredential(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_storage_credential" "external" { name = "cred-{var.RANDOM}" diff --git a/internal/acceptance/data_storage_credentials_test.go b/internal/acceptance/data_storage_credentials_test.go index e29f388acf..17d3ac290f 100644 --- a/internal/acceptance/data_storage_credentials_test.go +++ b/internal/acceptance/data_storage_credentials_test.go @@ -8,7 +8,7 @@ import ( ) func TestUcAccDataSourceStorageCredentials(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` data "databricks_storage_credentials" "this" { }`, diff --git a/internal/acceptance/data_table_test.go b/internal/acceptance/data_table_test.go index 2167c7ac7e..09285149bb 100644 --- a/internal/acceptance/data_table_test.go +++ b/internal/acceptance/data_table_test.go @@ -15,7 +15,7 @@ func checkTableDataSourcePopulated(t *testing.T) func(s *terraform.State) error } } func TestUcAccDataSourceTable(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_catalog" "sandbox" { name = "sandbox{var.RANDOM}" diff --git a/internal/acceptance/data_tables_test.go b/internal/acceptance/data_tables_test.go index 4057828af7..9f81f206d9 100644 --- a/internal/acceptance/data_tables_test.go +++ b/internal/acceptance/data_tables_test.go @@ -25,7 +25,7 @@ func checkTablesDataSourcePopulated(t *testing.T) func(s *terraform.State) error } } func TestUcAccDataSourceTables(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_catalog" "sandbox" { name = "sandbox{var.RANDOM}" diff --git a/internal/acceptance/data_user_test.go b/internal/acceptance/data_user_test.go index fc46bf1ad3..e7d50d8889 100644 --- a/internal/acceptance/data_user_test.go +++ b/internal/acceptance/data_user_test.go @@ -26,14 +26,14 @@ func checkUserDataSourcePopulated(t *testing.T) func(s *terraform.State) error { } func TestMwsAccUserData(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: userDataSourceTemplate, Check: checkUserDataSourcePopulated(t), }) } func TestAccUserData(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: userDataSourceTemplate, Check: checkUserDataSourcePopulated(t), }) diff --git a/internal/acceptance/data_volume_test.go b/internal/acceptance/data_volume_test.go index 8f65c44f2e..6679da99c0 100644 --- a/internal/acceptance/data_volume_test.go +++ b/internal/acceptance/data_volume_test.go @@ -15,7 +15,7 @@ func checkDataSourceVolume(t *testing.T) func(s *terraform.State) error { } } func TestUcAccDataSourceVolume(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_catalog" "sandbox" { name = "sandbox{var.RANDOM}" diff --git a/internal/acceptance/data_volumes_test.go b/internal/acceptance/data_volumes_test.go index 968ab4a29c..67581ea3fe 100644 --- a/internal/acceptance/data_volumes_test.go +++ b/internal/acceptance/data_volumes_test.go @@ -19,7 +19,7 @@ func checkDataSourceVolumesPopulated(t *testing.T) func(s *terraform.State) erro } } func TestUcAccDataSourceVolumes(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_catalog" "sandbox" { name = "sandbox{var.RANDOM}" @@ -70,7 +70,7 @@ func checkDataSourceVolumesPluginFrameworkPopulated(t *testing.T) func(s *terraf } func TestUcAccDataSourceVolumesPluginFramework(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_catalog" "sandbox" { name = "sandbox{var.RANDOM}" diff --git a/internal/acceptance/dbfs_file_test.go b/internal/acceptance/dbfs_file_test.go index 939422a352..c9ffcd88ee 100644 --- a/internal/acceptance/dbfs_file_test.go +++ b/internal/acceptance/dbfs_file_test.go @@ -5,12 +5,12 @@ import ( ) func TestAccDatabricksDBFSFile_CreateViaContent(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_dbfs_file" "this" { content_base64 = base64encode("{var.RANDOM}") path = "/tmp/tf-test/{var.RANDOM}.bin" }`, - }, step{ + }, Step{ Template: `resource "databricks_dbfs_file" "this" { content_base64 = base64encode("{var.RANDOM}-changed") path = "/tmp/tf-test/{var.RANDOM}.bin" @@ -19,7 +19,7 @@ func TestAccDatabricksDBFSFile_CreateViaContent(t *testing.T) { } func TestAccDatabricksDBFSFile_CreateViaSource(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_dbfs_file" "file_1" { source = "{var.CWD}/../../storage/testdata/tf-test-python.py" path = "/tmp/tf-test/file-source-{var.RANDOM}" diff --git a/internal/acceptance/default_namespace_test.go b/internal/acceptance/default_namespace_test.go index 83476fbecd..a2f4402c4e 100644 --- a/internal/acceptance/default_namespace_test.go +++ b/internal/acceptance/default_namespace_test.go @@ -21,7 +21,7 @@ func TestAccDefaultNamespaceSetting(t *testing.T) { } } ` - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: template, Check: resourceCheckWithState("databricks_default_namespace_setting.this", func(ctx context.Context, client *common.DatabricksClient, state *terraform.InstanceState) error { @@ -39,7 +39,7 @@ func TestAccDefaultNamespaceSetting(t *testing.T) { return nil }), }, - step{ + Step{ Template: template, Destroy: true, Check: resourceCheck("databricks_default_namespace_setting.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { diff --git a/internal/acceptance/directory_test.go b/internal/acceptance/directory_test.go index 0de7924f93..d80e250d85 100644 --- a/internal/acceptance/directory_test.go +++ b/internal/acceptance/directory_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccDirectoryResource(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_directory" "this" { path = "/Shared/provider-test/dir_{var.RANDOM}" } diff --git a/internal/acceptance/entitlements_test.go b/internal/acceptance/entitlements_test.go index 49d5c2a88a..2aeb4e2d0b 100644 --- a/internal/acceptance/entitlements_test.go +++ b/internal/acceptance/entitlements_test.go @@ -21,13 +21,13 @@ func (e entitlement) String() string { return fmt.Sprintf("%s = %t", e.name, e.value) } -func entitlementsStepBuilder(t *testing.T, r entitlementResource) func(entitlements []entitlement) step { - return func(entitlements []entitlement) step { +func entitlementsStepBuilder(t *testing.T, r entitlementResource) func(entitlements []entitlement) Step { + return func(entitlements []entitlement) Step { entitlementsBuf := strings.Builder{} for _, entitlement := range entitlements { entitlementsBuf.WriteString(fmt.Sprintf("%s\n", entitlement.String())) } - return step{ + return Step{ Template: fmt.Sprintf(` %s resource "databricks_entitlements" "entitlements_users" { @@ -56,10 +56,10 @@ func entitlementsStepBuilder(t *testing.T, r entitlementResource) func(entitleme } } -func makeEntitlementsSteps(t *testing.T, r entitlementResource, entitlementsSteps [][]entitlement) []step { +func makeEntitlementsSteps(t *testing.T, r entitlementResource, entitlementsSteps [][]entitlement) []Step { r.setDisplayName(RandomName("entitlements-")) makeEntitlementsStep := entitlementsStepBuilder(t, r) - steps := make([]step, len(entitlementsSteps)) + steps := make([]Step, len(entitlementsSteps)) for i, entitlements := range entitlementsSteps { steps[i] = makeEntitlementsStep(entitlements) } @@ -114,7 +114,7 @@ func TestAccEntitlementsAddToEmpty(t *testing.T) { {"databricks_sql_access", true}, }, }) - workspaceLevel(t, steps...) + WorkspaceLevel(t, steps...) }) } @@ -135,7 +135,7 @@ func TestAccEntitlementsSetExplicitlyToFalse(t *testing.T) { {"databricks_sql_access", false}, }, }) - workspaceLevel(t, steps...) + WorkspaceLevel(t, steps...) }) } @@ -150,7 +150,7 @@ func TestAccEntitlementsRemoveExisting(t *testing.T) { }, {}, }) - workspaceLevel(t, steps...) + WorkspaceLevel(t, steps...) }) } @@ -164,6 +164,6 @@ func TestAccEntitlementsSomeTrueSomeFalse(t *testing.T) { {"databricks_sql_access", true}, }, }) - workspaceLevel(t, steps...) + WorkspaceLevel(t, steps...) }) } diff --git a/internal/acceptance/external_location_test.go b/internal/acceptance/external_location_test.go index fd8f497750..a7b0074cf6 100644 --- a/internal/acceptance/external_location_test.go +++ b/internal/acceptance/external_location_test.go @@ -44,7 +44,7 @@ func storageCredentialTemplateWithOwner(comment, owner string) string { } func TestUcAccExternalLocation(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_storage_credential" "external" { name = "cred-{var.RANDOM}" @@ -64,7 +64,7 @@ func TestUcAccExternalLocation(t *testing.T) { } func TestUcAccExternalLocationForceDestroy(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_storage_credential" "external" { name = "cred-{var.RANDOM}" @@ -85,19 +85,19 @@ func TestUcAccExternalLocationForceDestroy(t *testing.T) { } func TestUcAccExternalLocationUpdate(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: storageCredentialTemplateWithOwner("Managed by TF", "account users") + externalLocationTemplateWithOwner("Managed by TF", "account users") + grantsTemplateForExternalLocation, - }, step{ + }, Step{ Template: storageCredentialTemplateWithOwner("Managed by TF -- Updated Comment", "account users") + externalLocationTemplateWithOwner("Managed by TF -- Updated Comment", "account users") + grantsTemplateForExternalLocation, - }, step{ + }, Step{ Template: storageCredentialTemplateWithOwner("Managed by TF -- Updated Comment", "{env.TEST_DATA_ENG_GROUP}") + externalLocationTemplateWithOwner("Managed by TF -- Updated Comment", "{env.TEST_DATA_ENG_GROUP}") + grantsTemplateForExternalLocation, - }, step{ + }, Step{ Template: storageCredentialTemplateWithOwner("Managed by TF -- Updated Comment 2", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}") + externalLocationTemplateWithOwner("Managed by TF -- Updated Comment 2", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}") + grantsTemplateForExternalLocation, diff --git a/internal/acceptance/file_test.go b/internal/acceptance/file_test.go index d821d05b73..54f05032cd 100644 --- a/internal/acceptance/file_test.go +++ b/internal/acceptance/file_test.go @@ -18,7 +18,7 @@ import ( func TestUcAccFileDontUpdateIfNoChange(t *testing.T) { createdTime := "" - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_schema" "this" { name = "schema-{var.STICKY_RANDOM}" @@ -50,7 +50,7 @@ func TestUcAccFileDontUpdateIfNoChange(t *testing.T) { createdTime = m.LastModified return nil }), - }, step{ + }, Step{ Template: ` resource "databricks_schema" "this" { name = "schema-{var.STICKY_RANDOM}" @@ -86,7 +86,7 @@ func TestUcAccFileDontUpdateIfNoChange(t *testing.T) { func TestUcAccFileUpdateOnLocalContentChange(t *testing.T) { createdTime := "" - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_schema" "this" { name = "schema-{var.STICKY_RANDOM}" @@ -118,7 +118,7 @@ func TestUcAccFileUpdateOnLocalContentChange(t *testing.T) { createdTime = m.LastModified return nil }), - }, step{ + }, Step{ Template: ` resource "databricks_schema" "this" { name = "schema-{var.STICKY_RANDOM}" @@ -174,7 +174,7 @@ func TestUcAccFileUpdateOnLocalFileChange(t *testing.T) { source = "%s" path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" }`, fileName) - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ PreConfig: func() { os.Mkdir(tmpDir, 0755) os.WriteFile(fileName, []byte("abc\n"), 0644) @@ -193,7 +193,7 @@ func TestUcAccFileUpdateOnLocalFileChange(t *testing.T) { createdTime = m.LastModified return nil }), - }, step{ + }, Step{ PreConfig: func() { os.WriteFile(fileName, []byte("def\n"), 0644) }, @@ -235,7 +235,7 @@ func TestUcAccFileNoUpdateIfFileDoesNotChange(t *testing.T) { source = "%s" path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" }`, fileName) - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ PreConfig: func() { os.Mkdir(tmpDir, 0755) os.WriteFile(fileName, []byte("abc\n"), 0644) @@ -254,7 +254,7 @@ func TestUcAccFileNoUpdateIfFileDoesNotChange(t *testing.T) { createdTime = m.LastModified return nil }), - }, step{ + }, Step{ Template: template, Check: resourceCheck("databricks_file.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { w, err := client.WorkspaceClient() @@ -273,7 +273,7 @@ func TestUcAccFileNoUpdateIfFileDoesNotChange(t *testing.T) { func TestUcAccFileUpdateServerChange(t *testing.T) { createdTime := "" - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_schema" "this" { name = "schema-{var.STICKY_RANDOM}" @@ -314,7 +314,7 @@ func TestUcAccFileUpdateServerChange(t *testing.T) { return nil }), }, - step{ + Step{ Template: ` resource "databricks_schema" "this" { name = "schema-{var.STICKY_RANDOM}" @@ -356,7 +356,7 @@ func TestUcAccFileUpdateServerChange(t *testing.T) { } func TestUcAccFileFullLifeCycle(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_schema" "this" { name = "schema-{var.STICKY_RANDOM}" @@ -375,7 +375,7 @@ func TestUcAccFileFullLifeCycle(t *testing.T) { source = "{var.CWD}/../../storage/testdata/tf-test-python.py" path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" }`, - }, step{ + }, Step{ Template: ` resource "databricks_schema" "this" { name = "schema-{var.STICKY_RANDOM}" @@ -398,7 +398,7 @@ func TestUcAccFileFullLifeCycle(t *testing.T) { } func TestUcAccFileBase64FullLifeCycle(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_schema" "this" { name = "schema-{var.STICKY_RANDOM}" @@ -417,7 +417,7 @@ func TestUcAccFileBase64FullLifeCycle(t *testing.T) { content_base64 = "YWJjCg==" path = "/Volumes/${databricks_volume.this.catalog_name}/${databricks_volume.this.schema_name}/${databricks_volume.this.name}/abcde" }`, - }, step{ + }, Step{ Template: ` resource "databricks_schema" "this" { name = "schema-{var.STICKY_RANDOM}" diff --git a/internal/acceptance/git_credential_test.go b/internal/acceptance/git_credential_test.go index 4cae7b2c84..7d418ebbf1 100644 --- a/internal/acceptance/git_credential_test.go +++ b/internal/acceptance/git_credential_test.go @@ -9,7 +9,7 @@ import ( ) func TestAccGitCredentials(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_git_credential" "this" { git_username = "test" git_provider = "gitHub" diff --git a/internal/acceptance/global_init_script_test.go b/internal/acceptance/global_init_script_test.go index 626fa757a7..3f6c1c2cdf 100644 --- a/internal/acceptance/global_init_script_test.go +++ b/internal/acceptance/global_init_script_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccGlobalInitScriptResource_Create(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_global_init_script" "this" { name = "init-{var.RANDOM}" diff --git a/internal/acceptance/grant_test.go b/internal/acceptance/grant_test.go index f0da0e8f77..de0e83d32f 100644 --- a/internal/acceptance/grant_test.go +++ b/internal/acceptance/grant_test.go @@ -99,11 +99,11 @@ resource "databricks_grant" "some" { }` func TestUcAccGrant(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: strings.ReplaceAll(grantTemplate, "%s", "{env.TEST_DATA_ENG_GROUP}"), - }, step{ + }, Step{ Template: strings.ReplaceAll(grantTemplate, "%s", "{env.TEST_DATA_SCI_GROUP}"), - }, step{ + }, Step{ Template: strings.ReplaceAll(strings.ReplaceAll(grantTemplate, "ALL_PRIVILEGES", "ALL PRIVILEGES"), "%s", "{env.TEST_DATA_ENG_GROUP}"), }) } @@ -127,11 +127,11 @@ func grantTemplateForNamePermissionChange(suffix string, permission string) stri } func TestUcAccGrantForIdChange(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: grantTemplateForNamePermissionChange("-old", "ALL_PRIVILEGES"), - }, step{ + }, Step{ Template: grantTemplateForNamePermissionChange("-new", "ALL_PRIVILEGES"), - }, step{ + }, Step{ Template: grantTemplateForNamePermissionChange("-fail", "abc"), ExpectError: regexp.MustCompile(`cannot create grant: Privilege ABC is not applicable to this entity`), }) diff --git a/internal/acceptance/grants_test.go b/internal/acceptance/grants_test.go index 251ccb94f3..43fc09d650 100644 --- a/internal/acceptance/grants_test.go +++ b/internal/acceptance/grants_test.go @@ -105,11 +105,11 @@ resource "databricks_grants" "some" { }` func TestUcAccGrants(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: strings.ReplaceAll(grantsTemplate, "%s", "{env.TEST_DATA_ENG_GROUP}"), - }, step{ + }, Step{ Template: strings.ReplaceAll(grantsTemplate, "%s", "{env.TEST_DATA_SCI_GROUP}"), - }, step{ + }, Step{ Template: strings.ReplaceAll(strings.ReplaceAll(grantsTemplate, "ALL_PRIVILEGES", "ALL PRIVILEGES"), "%s", "{env.TEST_DATA_ENG_GROUP}"), }) } @@ -135,11 +135,11 @@ func grantsTemplateForNamePermissionChange(suffix string, permission string) str } func TestUcAccGrantsForIdChange(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: grantsTemplateForNamePermissionChange("-old", "ALL_PRIVILEGES"), - }, step{ + }, Step{ Template: grantsTemplateForNamePermissionChange("-new", "ALL_PRIVILEGES"), - }, step{ + }, Step{ Template: grantsTemplateForNamePermissionChange("-fail", "abc"), ExpectError: regexp.MustCompile(`Error: cannot create grants: Privilege ABC is not applicable to this entity`), }) diff --git a/internal/acceptance/group_member_test.go b/internal/acceptance/group_member_test.go index dabbce3842..abaf921bd0 100644 --- a/internal/acceptance/group_member_test.go +++ b/internal/acceptance/group_member_test.go @@ -29,7 +29,7 @@ resource "databricks_group_member" "rs" { }` func TestMwsAccGroupMemberResource(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: groupMemberTest, Check: resourceCheck("databricks_group.root", func(ctx context.Context, client *common.DatabricksClient, id string) error { @@ -44,7 +44,7 @@ func TestMwsAccGroupMemberResource(t *testing.T) { } func TestAccGroupMemberResource(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: groupMemberTest, Check: resourceCheck("databricks_group.root", func(ctx context.Context, client *common.DatabricksClient, id string) error { diff --git a/internal/acceptance/group_role_test.go b/internal/acceptance/group_role_test.go index 7ba0c167df..8c6a929501 100644 --- a/internal/acceptance/group_role_test.go +++ b/internal/acceptance/group_role_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccGroupRole(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_group" "this" { display_name = "tf-{var.RANDOM}" diff --git a/internal/acceptance/group_test.go b/internal/acceptance/group_test.go index 95ead635cf..285a0f1952 100644 --- a/internal/acceptance/group_test.go +++ b/internal/acceptance/group_test.go @@ -16,7 +16,7 @@ import ( func TestMwsAccGroupsExternalIdAndScimProvisioning(t *testing.T) { name := qa.RandomName("tfgroup") - accountLevel(t, step{ + AccountLevel(t, Step{ Template: `resource "databricks_group" "this" { display_name = "` + name + `" }`, @@ -33,7 +33,7 @@ func TestMwsAccGroupsExternalIdAndScimProvisioning(t *testing.T) { return groupsAPI.UpdateNameAndEntitlements( id, group.DisplayName, qa.RandomName("ext-id"), group.Entitlements) }), - }, step{ + }, Step{ Template: `resource "databricks_group" "this" { display_name = "` + name + `" }`, @@ -43,7 +43,7 @@ func TestMwsAccGroupsExternalIdAndScimProvisioning(t *testing.T) { // https://github.com/databricks/terraform-provider-databricks/issues/1099 func TestAccGroupsExternalIdAndScimProvisioning(t *testing.T) { name := qa.RandomName("tfgroup") - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_group" "this" { display_name = "` + name + `" allow_cluster_create = true @@ -63,7 +63,7 @@ func TestAccGroupsExternalIdAndScimProvisioning(t *testing.T) { id, group.DisplayName, qa.RandomName("ext-id"), group.Entitlements) }), ), - }, step{ + }, Step{ Template: `resource "databricks_group" "this" { display_name = "` + name + `" allow_cluster_create = true @@ -74,7 +74,7 @@ func TestAccGroupsExternalIdAndScimProvisioning(t *testing.T) { func TestMwsAccGroupsUpdateDisplayName(t *testing.T) { nameInit := qa.RandomName("tfgroup") nameUpdate := qa.RandomName("tfgroup") - accountLevel(t, step{ + AccountLevel(t, Step{ Template: `resource "databricks_group" "this" { display_name = "` + nameInit + `" }`, @@ -91,7 +91,7 @@ func TestMwsAccGroupsUpdateDisplayName(t *testing.T) { return nil }), ), - }, step{ + }, Step{ Template: `resource "databricks_group" "this" { display_name = "` + nameUpdate + `" }`, @@ -113,7 +113,7 @@ func TestMwsAccGroupsUpdateDisplayName(t *testing.T) { func TestAccGroupsUpdateDisplayName(t *testing.T) { nameInit := qa.RandomName("tfgroup") nameUpdate := qa.RandomName("tfgroup") - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_group" "this" { display_name = "` + nameInit + `" }`, @@ -130,7 +130,7 @@ func TestAccGroupsUpdateDisplayName(t *testing.T) { return nil }), ), - }, step{ + }, Step{ Template: `resource "databricks_group" "this" { display_name = "` + nameUpdate + `" }`, diff --git a/internal/acceptance/init_test.go b/internal/acceptance/init.go similarity index 97% rename from internal/acceptance/init_test.go rename to internal/acceptance/init.go index fb13d01567..704964b710 100644 --- a/internal/acceptance/init_test.go +++ b/internal/acceptance/init.go @@ -36,28 +36,28 @@ func init() { dbproviderlogger.SetTfLogger(dbproviderlogger.NewTfLogger(context.Background())) } -func workspaceLevel(t *testing.T, steps ...step) { +func WorkspaceLevel(t *testing.T, steps ...Step) { loadWorkspaceEnv(t) run(t, steps) } -func accountLevel(t *testing.T, steps ...step) { +func AccountLevel(t *testing.T, steps ...Step) { loadAccountEnv(t) run(t, steps) } -func unityWorkspaceLevel(t *testing.T, steps ...step) { +func UnityWorkspaceLevel(t *testing.T, steps ...Step) { loadUcwsEnv(t) run(t, steps) } -func unityAccountLevel(t *testing.T, steps ...step) { +func UnityAccountLevel(t *testing.T, steps ...Step) { loadUcacctEnv(t) run(t, steps) } // A step in a terraform acceptance test -type step struct { +type Step struct { // Terraform HCL for resources to materialize in this test step. Template string @@ -132,7 +132,7 @@ func environmentTemplate(t *testing.T, template string, otherVars ...map[string] // Test wrapper over terraform testing framework. Multiple steps share the same // terraform state context. -func run(t *testing.T, steps []step) { +func run(t *testing.T, steps []Step) { cloudEnv := os.Getenv("CLOUD_ENV") if cloudEnv == "" { t.Skip("Acceptance tests skipped unless env 'CLOUD_ENV' is set") diff --git a/internal/acceptance/instance_profile_test.go b/internal/acceptance/instance_profile_test.go index b5ddb13600..f663c17d43 100644 --- a/internal/acceptance/instance_profile_test.go +++ b/internal/acceptance/instance_profile_test.go @@ -7,9 +7,9 @@ import ( // "databricks_instance_profile" is a singleton. To avoid multiple tests using this resource // from interfering with each other, we run them in sequence as steps of a single test. func TestAccInstanceProfileIntegrationSuite(t *testing.T) { - workspaceLevel(t, + WorkspaceLevel(t, // Assign instance profile to group - step{ + Step{ Template: ` resource "databricks_instance_profile" "this" { instance_profile_arn = "{env.DUMMY_EC2_INSTANCE_PROFILE}" @@ -22,7 +22,7 @@ func TestAccInstanceProfileIntegrationSuite(t *testing.T) { instance_profile_id = databricks_instance_profile.this.id }`}, // Assign instance profile to mount - step{ + Step{ Template: ` resource "databricks_instance_profile" "this" { instance_profile_arn = "{env.DUMMY_EC2_INSTANCE_PROFILE}" @@ -37,7 +37,7 @@ func TestAccInstanceProfileIntegrationSuite(t *testing.T) { }`, }, // ServicePrincipal resource on Aws with role - step{ + Step{ Template: ` resource "databricks_service_principal" "this" { display_name = "SPN {var.RANDOM}" diff --git a/internal/acceptance/ip_access_list_test.go b/internal/acceptance/ip_access_list_test.go index 4910e3b2ab..bfd1802193 100644 --- a/internal/acceptance/ip_access_list_test.go +++ b/internal/acceptance/ip_access_list_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccIPACLListsResourceFullLifecycle(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_ip_access_list" "this" { label = "tf-{var.RANDOM}" @@ -15,7 +15,7 @@ func TestAccIPACLListsResourceFullLifecycle(t *testing.T) { "10.0.10.0/24" ] }`, - }, step{ + }, Step{ Template: ` resource "databricks_ip_access_list" "this" { label = "tf-{var.RANDOM}" diff --git a/internal/acceptance/job_test.go b/internal/acceptance/job_test.go index ddf0d74325..b28b5a6bb9 100644 --- a/internal/acceptance/job_test.go +++ b/internal/acceptance/job_test.go @@ -16,7 +16,7 @@ import ( ) func TestAccJobTasks(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` data "databricks_current_user" "me" {} data "databricks_spark_version" "latest" {} @@ -120,7 +120,7 @@ func TestAccJobTasks(t *testing.T) { func TestAccForEachTask(t *testing.T) { t.Skip("Skipping this test because feature not enabled in Prod") - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` data "databricks_current_user" "me" {} data "databricks_spark_version" "latest" {} @@ -282,19 +282,19 @@ func TestAccJobControlRunState(t *testing.T) { } randomName1 := RandomName("notebook-") randomName2 := RandomName("updated-notebook-") - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ // A new continuous job with empty block should be started automatically Template: getJobTemplate(randomName1, ``), Check: resourceCheck("databricks_job.this", waitForRunToStart), - }, step{ + }, Step{ // Updating the notebook should cancel the existing run Template: getJobTemplate(randomName2, ``), Check: resourceCheck("databricks_job.this", waitForRunToStart), - }, step{ + }, Step{ // Marking the job as paused should cancel existing run and not start a new one Template: getJobTemplate(randomName2, `pause_status = "PAUSED"`), Check: resourceCheck("databricks_job.this", waitForAllRunsToEnd), - }, step{ + }, Step{ // No pause status should be the equivalent of unpaused Template: getJobTemplate(randomName2, `pause_status = "UNPAUSED"`), Check: resourceCheck("databricks_job.this", waitForRunToStart), @@ -346,7 +346,7 @@ func runAsTemplate(runAs string) string { } func TestAccJobRunAsUser(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_user" "this" { user_name = "` + qa.RandomEmail() + `" @@ -358,7 +358,7 @@ func TestAccJobRunAsUser(t *testing.T) { func TestUcAccJobRunAsServicePrincipal(t *testing.T) { loadUcwsEnv(t) spId := GetEnvOrSkipTest(t, "ACCOUNT_LEVEL_SERVICE_PRINCIPAL_ID") - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: runAsTemplate(`service_principal_name = "` + spId + `"`), }) } @@ -378,18 +378,18 @@ func TestUcAccJobRunAsMutations(t *testing.T) { // Note: the attribute must match the type of principal that the test is run as. ctx := context.Background() attribute := getRunAsAttribute(t, ctx) - unityWorkspaceLevel( + UnityWorkspaceLevel( t, // Provision job with service principal `run_as` - step{ + Step{ Template: runAsTemplate(`service_principal_name = "` + spId + `"`), }, // Update job to a user `run_as` - step{ + Step{ Template: runAsTemplate(attribute + ` = data.databricks_current_user.me.user_name`), }, // Update job back to a service principal `run_as` - step{ + Step{ Template: runAsTemplate(`service_principal_name = "` + spId + `"`), }, ) @@ -397,7 +397,7 @@ func TestUcAccJobRunAsMutations(t *testing.T) { func TestAccRemoveWebhooks(t *testing.T) { skipf(t)("There is no API to create notification destinations. Once available, add here and enable this test.") - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource databricks_job test { webhook_notifications { @@ -407,7 +407,7 @@ func TestAccRemoveWebhooks(t *testing.T) { } } `, - }, step{ + }, Step{ Template: ` resource databricks_job test {} `, @@ -415,7 +415,7 @@ func TestAccRemoveWebhooks(t *testing.T) { } func TestAccPeriodicTrigger(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_job" "this" { name = "{var.RANDOM}" diff --git a/internal/acceptance/metastore_assignment_test.go b/internal/acceptance/metastore_assignment_test.go index 8b0b6d3dbd..415f79fb8c 100644 --- a/internal/acceptance/metastore_assignment_test.go +++ b/internal/acceptance/metastore_assignment_test.go @@ -15,7 +15,7 @@ func lockForTest(t *testing.T) func() { } func TestUcAccMetastoreAssignment(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ PreConfig: lockForTest(t), Template: `resource "databricks_metastore_assignment" "this" { metastore_id = "{env.TEST_METASTORE_ID}" @@ -25,13 +25,13 @@ func TestUcAccMetastoreAssignment(t *testing.T) { } func TestUcAccAccountMetastoreAssignment(t *testing.T) { - unityAccountLevel(t, step{ + UnityAccountLevel(t, Step{ PreConfig: lockForTest(t), Template: `resource "databricks_metastore_assignment" "this" { metastore_id = "{env.TEST_METASTORE_ID}" workspace_id = {env.DUMMY_WORKSPACE_ID} }`, - }, step{ + }, Step{ Template: `resource "databricks_metastore_assignment" "this" { metastore_id = "{env.TEST_METASTORE_ID}" workspace_id = {env.DUMMY2_WORKSPACE_ID} diff --git a/internal/acceptance/metastore_data_access_test.go b/internal/acceptance/metastore_data_access_test.go index b9c6f6046d..34a3b5032a 100644 --- a/internal/acceptance/metastore_data_access_test.go +++ b/internal/acceptance/metastore_data_access_test.go @@ -5,7 +5,7 @@ import ( ) func TestUcAccAccountMetastoreDataAccessOnAws(t *testing.T) { - unityAccountLevel(t, step{ + UnityAccountLevel(t, Step{ Template: ` resource "databricks_metastore" "this" { name = "primary-{var.RANDOM}" @@ -24,7 +24,7 @@ func TestUcAccAccountMetastoreDataAccessOnAws(t *testing.T) { } func TestUcAccMetastoreDataAccessOnAws(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_metastore_data_access" "this" { metastore_id = "{env.TEST_METASTORE_ID}" diff --git a/internal/acceptance/metastore_test.go b/internal/acceptance/metastore_test.go index 5ef31beb6a..a430b91839 100644 --- a/internal/acceptance/metastore_test.go +++ b/internal/acceptance/metastore_test.go @@ -87,7 +87,7 @@ func getTemplateFromExtraAttributes(t *testing.T, extraAttributes map[string]any func runMetastoreTest(t *testing.T, extraAttributes map[string]any) { template := getTemplateFromExtraAttributes(t, extraAttributes) - unityAccountLevel(t, step{ + UnityAccountLevel(t, Step{ Template: fmt.Sprintf(`resource "databricks_metastore" "this" { name = "{var.RANDOM}" force_destroy = true @@ -98,21 +98,21 @@ func runMetastoreTest(t *testing.T, extraAttributes map[string]any) { func runMetastoreTestWithOwnerUpdates(t *testing.T, extraAttributes map[string]any) { template := getTemplateFromExtraAttributes(t, extraAttributes) - unityAccountLevel(t, step{ + UnityAccountLevel(t, Step{ Template: fmt.Sprintf(`resource "databricks_metastore" "this" { name = "{var.STICKY_RANDOM}" force_destroy = true owner = "account users" %s }`, template), - }, step{ + }, Step{ Template: fmt.Sprintf(`resource "databricks_metastore" "this" { name = "{var.STICKY_RANDOM}" force_destroy = true owner = "{env.TEST_DATA_ENG_GROUP}" %s }`, template), - }, step{ + }, Step{ Template: fmt.Sprintf(`resource "databricks_metastore" "this" { name = "{var.STICKY_RANDOM}-updated" force_destroy = true diff --git a/internal/acceptance/mlflow_experiment_test.go b/internal/acceptance/mlflow_experiment_test.go index 2a6f76c98f..024f03fda8 100644 --- a/internal/acceptance/mlflow_experiment_test.go +++ b/internal/acceptance/mlflow_experiment_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccMLflowExperiment(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_mlflow_experiment" "e1" { name = "/Shared/tf-{var.RANDOM}" diff --git a/internal/acceptance/mlflow_model_test.go b/internal/acceptance/mlflow_model_test.go index db3d597273..6dc2e47c9d 100644 --- a/internal/acceptance/mlflow_model_test.go +++ b/internal/acceptance/mlflow_model_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccMLflowModel(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_mlflow_model" "m1" { name = "tf-{var.RANDOM}" diff --git a/internal/acceptance/model_serving_test.go b/internal/acceptance/model_serving_test.go index f0bafb7368..fdc00a920f 100644 --- a/internal/acceptance/model_serving_test.go +++ b/internal/acceptance/model_serving_test.go @@ -15,7 +15,7 @@ func TestAccModelServing(t *testing.T) { name := fmt.Sprintf("terraform-test-model-serving-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: fmt.Sprintf(` resource "databricks_model_serving" "endpoint" { name = "%s" @@ -57,7 +57,7 @@ func TestAccModelServing(t *testing.T) { } `, name), }, - step{ + Step{ Template: fmt.Sprintf(` resource "databricks_model_serving" "endpoint" { name = "%s" @@ -90,7 +90,7 @@ func TestUcAccModelServingProvisionedThroughput(t *testing.T) { name := fmt.Sprintf("terraform-test-model-serving-pt-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)) - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: fmt.Sprintf(` resource "databricks_model_serving" "endpoint" { name = "%s" @@ -111,7 +111,7 @@ func TestUcAccModelServingProvisionedThroughput(t *testing.T) { } } `, name), - }, step{ + }, Step{ Template: fmt.Sprintf(` resource "databricks_model_serving" "endpoint" { name = "%s" @@ -132,7 +132,7 @@ func TestUcAccModelServingProvisionedThroughput(t *testing.T) { } } `, name), - }, step{ + }, Step{ Template: fmt.Sprintf(` resource "databricks_model_serving" "endpoint" { name = "%s" @@ -167,7 +167,7 @@ func TestAccModelServingExternalModel(t *testing.T) { acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)) scope_name := fmt.Sprintf("terraform-test-secret-scope-%s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: fmt.Sprintf(` resource "databricks_secret_scope" "scope" { name = "%s" @@ -203,7 +203,7 @@ func TestAccModelServingExternalModel(t *testing.T) { } `, scope_name, name), }, - step{ + Step{ Template: fmt.Sprintf(` resource "databricks_secret_scope" "scope" { name = "%s" diff --git a/internal/acceptance/mounts_test.go b/internal/acceptance/mounts_test.go index 0a7270563c..9b5401b163 100644 --- a/internal/acceptance/mounts_test.go +++ b/internal/acceptance/mounts_test.go @@ -38,8 +38,8 @@ resource "databricks_mount" "my_mount" { }` func TestAccCreateDatabricksMount(t *testing.T) { - workspaceLevel(t, - step{ + WorkspaceLevel(t, + Step{ Template: mountHcl, }) } @@ -48,9 +48,9 @@ func TestAccCreateDatabricksMountIsFineOnClusterRecreate(t *testing.T) { clusterId1 := "" clusterId2 := "" - workspaceLevel(t, + WorkspaceLevel(t, // Step 1 creates the cluster and mount. - step{ + Step{ Template: mountHcl, Check: func(s *terraform.State) error { resources := s.RootModule().Resources @@ -72,7 +72,7 @@ func TestAccCreateDatabricksMountIsFineOnClusterRecreate(t *testing.T) { }, // Step 2: Manually delete the cluster, and then reapply the config. The mount // will be recreated in this case. - step{ + Step{ PreConfig: func() { w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) diff --git a/internal/acceptance/mws_credentials_test.go b/internal/acceptance/mws_credentials_test.go index 3c57a957fb..7f2c82e1de 100644 --- a/internal/acceptance/mws_credentials_test.go +++ b/internal/acceptance/mws_credentials_test.go @@ -5,7 +5,7 @@ import ( ) func TestMwsAccCredentials(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: `resource "databricks_mws_credentials" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" credentials_name = "creds-test-{var.RANDOM}" diff --git a/internal/acceptance/mws_customer_managed_keys_test.go b/internal/acceptance/mws_customer_managed_keys_test.go index e78f181b39..a1485d81ee 100644 --- a/internal/acceptance/mws_customer_managed_keys_test.go +++ b/internal/acceptance/mws_customer_managed_keys_test.go @@ -5,7 +5,7 @@ import ( ) func TestMwsAccAwsCustomerManagedKeys(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: `resource "databricks_mws_customer_managed_keys" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" aws_key_info { @@ -18,7 +18,7 @@ func TestMwsAccAwsCustomerManagedKeys(t *testing.T) { } func TestMwsAccGcpCustomerManagedKeysForStorage(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: `resource "databricks_mws_customer_managed_keys" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" gcp_key_info { diff --git a/internal/acceptance/mws_log_delivery_test.go b/internal/acceptance/mws_log_delivery_test.go index 943e739e4a..ac49aafc68 100644 --- a/internal/acceptance/mws_log_delivery_test.go +++ b/internal/acceptance/mws_log_delivery_test.go @@ -5,7 +5,7 @@ import ( ) func TestMwsAccLogDelivery(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: `resource "databricks_mws_credentials" "ld" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" credentials_name = "tf-acceptance-logdelivery-{var.RANDOM}" diff --git a/internal/acceptance/mws_network_connectivity_config_test.go b/internal/acceptance/mws_network_connectivity_config_test.go index 1e19e657fd..d7fbf5d8f8 100644 --- a/internal/acceptance/mws_network_connectivity_config_test.go +++ b/internal/acceptance/mws_network_connectivity_config_test.go @@ -6,7 +6,7 @@ import ( func TestMwsAccNetworkConnectivityConfig(t *testing.T) { if isAzure(t) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_network_connectivity_config" "this" { name = "tf-{var.RANDOM}" @@ -19,7 +19,7 @@ func TestMwsAccNetworkConnectivityConfig(t *testing.T) { group_id = "blob" } `, - }, step{ + }, Step{ Template: ` resource "databricks_mws_network_connectivity_config" "this" { name = "tf-{var.RANDOM}" @@ -35,7 +35,7 @@ func TestMwsAccNetworkConnectivityConfig(t *testing.T) { }) } if isAws(t) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_network_connectivity_config" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" @@ -43,7 +43,7 @@ func TestMwsAccNetworkConnectivityConfig(t *testing.T) { region = "{env.AWS_REGION}" } `, - }, step{ + }, Step{ Template: ` resource "databricks_mws_network_connectivity_config" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" diff --git a/internal/acceptance/mws_networks_test.go b/internal/acceptance/mws_networks_test.go index e2b1f49832..c28641a1fb 100644 --- a/internal/acceptance/mws_networks_test.go +++ b/internal/acceptance/mws_networks_test.go @@ -6,7 +6,7 @@ import ( func TestMwsAccNetworks(t *testing.T) { GetEnvOrSkipTest(t, "TEST_ROOT_BUCKET") // marker for AWS test env - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_networks" "my_network" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" @@ -24,7 +24,7 @@ func TestMwsAccNetworks(t *testing.T) { } func TestMwsAccGcpPscNetworks(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_networks" "my_network" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" diff --git a/internal/acceptance/mws_permissionassignments_test.go b/internal/acceptance/mws_permissionassignments_test.go index e63fceaa35..a251b98560 100644 --- a/internal/acceptance/mws_permissionassignments_test.go +++ b/internal/acceptance/mws_permissionassignments_test.go @@ -5,7 +5,7 @@ import ( ) func TestUcAccAssignGroupToWorkspace(t *testing.T) { - unityAccountLevel(t, step{ + UnityAccountLevel(t, Step{ Template: ` resource "databricks_group" "this" { display_name = "TF {var.RANDOM}" @@ -15,7 +15,7 @@ func TestUcAccAssignGroupToWorkspace(t *testing.T) { principal_id = databricks_group.this.id permissions = ["USER"] }`, - }, step{ + }, Step{ Template: ` resource "databricks_group" "this" { display_name = "TF {var.RANDOM}" @@ -25,7 +25,7 @@ func TestUcAccAssignGroupToWorkspace(t *testing.T) { principal_id = databricks_group.this.id permissions = ["ADMIN"] }`, - }, step{ + }, Step{ Template: ` resource "databricks_group" "this" { display_name = "TF {var.RANDOM}" @@ -39,7 +39,7 @@ func TestUcAccAssignGroupToWorkspace(t *testing.T) { } func TestAccAssignSpnToWorkspace(t *testing.T) { - unityAccountLevel(t, step{ + UnityAccountLevel(t, Step{ Template: ` resource "databricks_service_principal" "this" { display_name = "TF {var.RANDOM}" diff --git a/internal/acceptance/mws_private_access_settings_test.go b/internal/acceptance/mws_private_access_settings_test.go index 62c97a30d6..7a4199d2ce 100644 --- a/internal/acceptance/mws_private_access_settings_test.go +++ b/internal/acceptance/mws_private_access_settings_test.go @@ -6,7 +6,7 @@ import ( func TestMwsAccPrivateAccessSettings(t *testing.T) { t.SkipNow() - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_private_access_settings" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" @@ -19,7 +19,7 @@ func TestMwsAccPrivateAccessSettings(t *testing.T) { func TestMwsGcpAccPrivateAccessSettings(t *testing.T) { t.Skipf("skipping until feature is disabled") - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_private_access_settings" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" diff --git a/internal/acceptance/mws_storage_configurations_test.go b/internal/acceptance/mws_storage_configurations_test.go index 400e2e0f61..800db474ba 100644 --- a/internal/acceptance/mws_storage_configurations_test.go +++ b/internal/acceptance/mws_storage_configurations_test.go @@ -6,7 +6,7 @@ import ( func TestMwsAccStorageConfigurations(t *testing.T) { GetEnvOrSkipTest(t, "TEST_ROOT_BUCKET") // marker for AWS test env - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_storage_configurations" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" diff --git a/internal/acceptance/mws_vpc_endpoint_test.go b/internal/acceptance/mws_vpc_endpoint_test.go index 4dedcd4bc2..59b7ef2d06 100644 --- a/internal/acceptance/mws_vpc_endpoint_test.go +++ b/internal/acceptance/mws_vpc_endpoint_test.go @@ -6,7 +6,7 @@ import ( func TestMwsAccVpcEndpoint(t *testing.T) { t.SkipNow() - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_vpc_endpoint" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" @@ -19,7 +19,7 @@ func TestMwsAccVpcEndpoint(t *testing.T) { } func TestMwsAccVpcEndpoint_GCP(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_vpc_endpoint" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" diff --git a/internal/acceptance/mws_workspaces_test.go b/internal/acceptance/mws_workspaces_test.go index 4dddcd146c..07d5279874 100644 --- a/internal/acceptance/mws_workspaces_test.go +++ b/internal/acceptance/mws_workspaces_test.go @@ -22,7 +22,7 @@ import ( ) func TestMwsAccWorkspaces(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_credentials" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" @@ -76,7 +76,7 @@ func TestMwsAccWorkspaces(t *testing.T) { } func TestMwsAccWorkspacesTokenUpdate(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_credentials" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" @@ -136,7 +136,7 @@ func TestMwsAccWorkspacesTokenUpdate(t *testing.T) { return nil }), }, - step{ + Step{ Template: ` resource "databricks_mws_credentials" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" @@ -199,7 +199,7 @@ func TestMwsAccWorkspacesTokenUpdate(t *testing.T) { } func TestMwsAccGcpWorkspaces(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_workspaces" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" @@ -218,7 +218,7 @@ func TestMwsAccGcpWorkspaces(t *testing.T) { func TestMwsAccGcpByovpcWorkspaces(t *testing.T) { t.Skip() // FIXME: flaky with `Secondary IP range (pods, svc) is already in use by another GKE cluster` - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_networks" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" @@ -255,7 +255,7 @@ func TestMwsAccGcpByovpcWorkspaces(t *testing.T) { } func TestMwsAccGcpPscWorkspaces(t *testing.T) { - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_mws_networks" "this" { account_id = "{env.DATABRICKS_ACCOUNT_ID}" @@ -374,7 +374,7 @@ func TestMwsAccAwsChangeToServicePrincipal(t *testing.T) { return providers.GetProviderServer(context.Background(), providers.WithSdkV2Provider(pr)) }, } - accountLevel(t, step{ + AccountLevel(t, Step{ Template: workspaceTemplate(`token { comment = "Test {var.STICKY_RANDOM}" }`) + servicePrincipal, Check: func(s *terraform.State) error { spId := s.RootModule().Resources["databricks_service_principal.this"].Primary.ID @@ -412,20 +412,20 @@ func TestMwsAccAwsChangeToServicePrincipal(t *testing.T) { } return nil }, - }, step{ + }, Step{ // Tolerate existing token Template: workspaceTemplate(`token { comment = "Test {var.STICKY_RANDOM}" }`) + servicePrincipal, ProtoV6ProviderFactories: providerFactory, - }, step{ + }, Step{ // Allow the token to be removed Template: workspaceTemplate(``) + servicePrincipal, ProtoV6ProviderFactories: providerFactory, - }, step{ + }, Step{ // Fail when adding the token back Template: workspaceTemplate(`token { comment = "Test {var.STICKY_RANDOM}" }`) + servicePrincipal, ProtoV6ProviderFactories: providerFactory, ExpectError: regexp.MustCompile(`cannot create token: the principal used by Databricks \(client ID .*\) is not authorized to create a token in this workspace`), - }, step{ + }, Step{ // Use the original provider for a final step to clean up the newly created service principal Template: workspaceTemplate(``) + servicePrincipal, }) diff --git a/internal/acceptance/notebook_test.go b/internal/acceptance/notebook_test.go index dfe3f66286..9df1973ddf 100644 --- a/internal/acceptance/notebook_test.go +++ b/internal/acceptance/notebook_test.go @@ -5,12 +5,12 @@ import ( ) func TestAccNotebookResourceScalability(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_notebook" "this" { source = "{var.CWD}/../../storage/testdata/tf-test-python.py" path = "/Shared/provider-test/xx_{var.RANDOM}" }`, - }, step{ + }, Step{ Template: `resource "databricks_notebook" "this" { source = "{var.CWD}/../../storage/testdata/tf-test-python.py" path = "/Shared/provider-test/xx_{var.RANDOM}_renamed" diff --git a/internal/acceptance/notification_destination_test.go b/internal/acceptance/notification_destination_test.go index 309ed39d5e..78d0474e0c 100644 --- a/internal/acceptance/notification_destination_test.go +++ b/internal/acceptance/notification_destination_test.go @@ -33,7 +33,7 @@ func checkND(t *testing.T, display_name string, config_type settings.Destination func TestAccNDEmail(t *testing.T) { display_name := "Email Notification Destination - " + qa.RandomName() - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_notification_destination" "this" { display_name = "` + display_name + `" @@ -44,7 +44,7 @@ func TestAccNDEmail(t *testing.T) { } } `, - }, step{ + }, Step{ Template: ` resource "databricks_notification_destination" "this" { display_name = "` + display_name + `" @@ -61,7 +61,7 @@ func TestAccNDEmail(t *testing.T) { func TestAccNDSlack(t *testing.T) { display_name := "Notification Destination - " + qa.RandomName() - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_notification_destination" "this" { display_name = "` + display_name + `" @@ -73,7 +73,7 @@ func TestAccNDSlack(t *testing.T) { } `, Check: checkND(t, display_name, settings.DestinationTypeSlack), - }, step{ + }, Step{ Template: ` resource "databricks_notification_destination" "this" { display_name = "` + display_name + `" @@ -90,7 +90,7 @@ func TestAccNDSlack(t *testing.T) { func TestAccNDMicrosoftTeams(t *testing.T) { display_name := "Notification Destination - " + qa.RandomName() - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_notification_destination" "this" { display_name = "` + display_name + `" @@ -101,7 +101,7 @@ func TestAccNDMicrosoftTeams(t *testing.T) { } } `, - }, step{ + }, Step{ Template: ` resource "databricks_notification_destination" "this" { display_name = "` + display_name + `" @@ -118,7 +118,7 @@ func TestAccNDMicrosoftTeams(t *testing.T) { func TestAccNDPagerduty(t *testing.T) { display_name := "Notification Destination - " + qa.RandomName() - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_notification_destination" "this" { display_name = "` + display_name + `" @@ -129,7 +129,7 @@ func TestAccNDPagerduty(t *testing.T) { } } `, - }, step{ + }, Step{ Template: ` resource "databricks_notification_destination" "this" { display_name = "` + display_name + `" @@ -146,7 +146,7 @@ func TestAccNDPagerduty(t *testing.T) { func TestAccNDGenericWebhook(t *testing.T) { display_name := "Notification Destination - " + qa.RandomName() - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_notification_destination" "this" { display_name = "` + display_name + `" @@ -158,7 +158,7 @@ func TestAccNDGenericWebhook(t *testing.T) { } } `, - }, step{ + }, Step{ Template: ` resource "databricks_notification_destination" "this" { display_name = "` + display_name + `" @@ -176,7 +176,7 @@ func TestAccNDGenericWebhook(t *testing.T) { func TestAccConfigTypeChange(t *testing.T) { display_name := "Notification Destination - " + qa.RandomName() - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_notification_destination" "this" { display_name = "` + display_name + `" @@ -188,7 +188,7 @@ func TestAccConfigTypeChange(t *testing.T) { } `, Check: checkND(t, display_name, settings.DestinationTypeSlack), - }, step{ + }, Step{ Template: ` resource "databricks_notification_destination" "this" { display_name = "` + display_name + `" diff --git a/internal/acceptance/obo_token_test.go b/internal/acceptance/obo_token_test.go index 1ce50ff815..43374e8e38 100644 --- a/internal/acceptance/obo_token_test.go +++ b/internal/acceptance/obo_token_test.go @@ -7,7 +7,7 @@ import ( func TestAccAwsOboTokenResource(t *testing.T) { // running this test temporarily on the UC WS level // until infrastructure gets AWS specific markers - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` // dummy: {env.TEST_GLOBAL_METASTORE_ID} resource "databricks_service_principal" "this" { diff --git a/internal/acceptance/online_table_test.go b/internal/acceptance/online_table_test.go index b7d9aefc81..390a3e86ed 100644 --- a/internal/acceptance/online_table_test.go +++ b/internal/acceptance/online_table_test.go @@ -82,5 +82,5 @@ func TestUcAccOnlineTable(t *testing.T) { if os.Getenv("GOOGLE_CREDENTIALS") != "" { t.Skipf("databricks_online_table resource is not available on GCP") } - unityWorkspaceLevel(t, step{Template: onlineTableHcl}) + UnityWorkspaceLevel(t, Step{Template: onlineTableHcl}) } diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index 788340d606..5d803bd451 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -19,7 +19,7 @@ import ( func TestAccDatabricksPermissionsResourceFullLifecycle(t *testing.T) { randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: fmt.Sprintf(` resource "databricks_notebook" "this" { content_base64 = base64encode("# Databricks notebook source\nprint(1)") @@ -49,7 +49,7 @@ func TestAccDatabricksPermissionsResourceFullLifecycle(t *testing.T) { return nil }), ), - }, step{ + }, Step{ Template: fmt.Sprintf(` resource "databricks_notebook" "this" { content_base64 = base64encode("# Databricks notebook source\nprint(1)") @@ -87,7 +87,7 @@ func TestAccDatabricksPermissionsResourceFullLifecycle(t *testing.T) { func TestAccDatabricksReposPermissionsResourceFullLifecycle(t *testing.T) { randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: fmt.Sprintf(` resource "databricks_repo" "this" { url = "https://github.com/databrickslabs/tempo.git" @@ -199,8 +199,8 @@ func TestAccDatabricksPermissionsForSqlWarehouses(t *testing.T) { } }`, randomName) - workspaceLevel(t, - step{ + WorkspaceLevel(t, + Step{ Template: config1, Check: resource.ComposeTestCheckFunc( checkObjectType, @@ -211,7 +211,7 @@ func TestAccDatabricksPermissionsForSqlWarehouses(t *testing.T) { }, ), }, - step{ + Step{ Template: config2, Check: func(s *terraform.State) error { id := getPermissionId(s) diff --git a/internal/acceptance/pipeline_test.go b/internal/acceptance/pipeline_test.go index 7b78e2c23c..d56f651cac 100644 --- a/internal/acceptance/pipeline_test.go +++ b/internal/acceptance/pipeline_test.go @@ -50,7 +50,7 @@ var ( ) func TestAccPipelineResource_CreatePipeline(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` locals { name = "pipeline-acceptance-{var.RANDOM}" @@ -95,7 +95,7 @@ func TestAccPipelineResource_CreatePipeline(t *testing.T) { } func TestAccAwsPipelineResource_CreatePipeline(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` locals { name = "pipeline-acceptance-aws-{var.STICKY_RANDOM}" @@ -135,7 +135,7 @@ func TestAccAwsPipelineResource_CreatePipeline(t *testing.T) { continuous = false } ` + dltNotebookResource, - }, step{ + }, Step{ Template: ` locals { name = "pipeline-acceptance-aws-{var.STICKY_RANDOM}" @@ -179,7 +179,7 @@ func TestAccAwsPipelineResource_CreatePipeline(t *testing.T) { } func TestAccPipelineResource_CreatePipelineWithoutWorkers(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` locals { name = "pipeline-acceptance-{var.RANDOM}" @@ -232,7 +232,7 @@ func TestAccPipelineResource_CreatePipelineWithoutWorkers(t *testing.T) { func TestAccPipelineResourcLastModified(t *testing.T) { var lastModified int64 - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` locals { name = "pipeline-acceptance-{var.STICKY_RANDOM}" @@ -284,7 +284,7 @@ func TestAccPipelineResourcLastModified(t *testing.T) { lastModified = pipeline.LastModified return nil }), - }, step{ + }, Step{ Template: ` locals { name = "pipeline-acceptance-{var.STICKY_RANDOM}" diff --git a/internal/acceptance/provider_test.go b/internal/acceptance/provider_test.go index d794bf6c2c..1b735d4d41 100644 --- a/internal/acceptance/provider_test.go +++ b/internal/acceptance/provider_test.go @@ -5,7 +5,7 @@ import ( ) func TestUcAccCreateProviderDb2Open(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_provider" "this" { name = "terraform-test-provider" diff --git a/internal/acceptance/quality_monitor_test.go b/internal/acceptance/quality_monitor_test.go index 8a65e9a1c9..d9e8a62c51 100644 --- a/internal/acceptance/quality_monitor_test.go +++ b/internal/acceptance/quality_monitor_test.go @@ -50,7 +50,7 @@ func TestUcAccQualityMonitor(t *testing.T) { if os.Getenv("GOOGLE_CREDENTIALS") != "" { t.Skipf("databricks_quality_monitor resource is not available on GCP") } - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: commonPartQualityMonitoring + ` resource "databricks_quality_monitor" "testMonitorInference" { @@ -117,7 +117,7 @@ func TestUcAccUpdateQualityMonitor(t *testing.T) { if os.Getenv("GOOGLE_CREDENTIALS") != "" { t.Skipf("databricks_quality_monitor resource is not available on GCP") } - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: commonPartQualityMonitoring + ` resource "databricks_quality_monitor" "testMonitorInference" { table_name = databricks_sql_table.myInferenceTable.id @@ -132,7 +132,7 @@ func TestUcAccUpdateQualityMonitor(t *testing.T) { } } `, - }, step{ + }, Step{ Template: commonPartQualityMonitoring + ` resource "databricks_quality_monitor" "testMonitorInference" { table_name = databricks_sql_table.myInferenceTable.id @@ -149,107 +149,3 @@ func TestUcAccUpdateQualityMonitor(t *testing.T) { `, }) } - -func TestUcAccQualityMonitorPluginFramework(t *testing.T) { - if os.Getenv("GOOGLE_CREDENTIALS") != "" { - t.Skipf("databricks_quality_monitor resource is not available on GCP") - } - unityWorkspaceLevel(t, step{ - Template: commonPartQualityMonitoring + ` - - resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { - table_name = databricks_sql_table.myInferenceTable.id - assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" - output_schema_name = databricks_schema.things.id - inference_log = { - granularities = ["1 day"] - timestamp_col = "timestamp" - prediction_col = "prediction" - model_id_col = "model_id" - problem_type = "PROBLEM_TYPE_REGRESSION" - } - } - - resource "databricks_sql_table" "myTimeseries" { - catalog_name = databricks_catalog.sandbox.id - schema_name = databricks_schema.things.name - name = "bar{var.STICKY_RANDOM}_timeseries" - table_type = "MANAGED" - data_source_format = "DELTA" - - column { - name = "timestamp" - type = "int" - } - } - - resource "databricks_quality_monitor_pluginframework" "testMonitorTimeseries" { - table_name = databricks_sql_table.myTimeseries.id - assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myTimeseries.name}" - output_schema_name = databricks_schema.things.id - time_series = { - granularities = ["1 day"] - timestamp_col = "timestamp" - } - } - - resource "databricks_sql_table" "mySnapshot" { - catalog_name = databricks_catalog.sandbox.id - schema_name = databricks_schema.things.name - name = "bar{var.STICKY_RANDOM}_snapshot" - table_type = "MANAGED" - data_source_format = "DELTA" - - column { - name = "timestamp" - type = "int" - } - } - - resource "databricks_quality_monitor_pluginframework" "testMonitorSnapshot" { - table_name = databricks_sql_table.mySnapshot.id - assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myTimeseries.name}" - output_schema_name = databricks_schema.things.id - snapshot = { - } - } - `, - }) -} - -func TestUcAccUpdateQualityMonitorPluginFramework(t *testing.T) { - if os.Getenv("GOOGLE_CREDENTIALS") != "" { - t.Skipf("databricks_quality_monitor resource is not available on GCP") - } - unityWorkspaceLevel(t, step{ - Template: commonPartQualityMonitoring + ` - resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { - table_name = databricks_sql_table.myInferenceTable.id - assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" - output_schema_name = databricks_schema.things.id - inference_log = { - granularities = ["1 day"] - timestamp_col = "timestamp" - prediction_col = "prediction" - model_id_col = "model_id" - problem_type = "PROBLEM_TYPE_REGRESSION" - } - } - `, - }, step{ - Template: commonPartQualityMonitoring + ` - resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { - table_name = databricks_sql_table.myInferenceTable.id - assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" - output_schema_name = databricks_schema.things.id - inference_log = { - granularities = ["1 hour"] - timestamp_col = "timestamp" - prediction_col = "prediction" - model_id_col = "model_id" - problem_type = "PROBLEM_TYPE_REGRESSION" - } - } - `, - }) -} diff --git a/internal/acceptance/recipient_test.go b/internal/acceptance/recipient_test.go index e59bf90d1c..8afa234eee 100644 --- a/internal/acceptance/recipient_test.go +++ b/internal/acceptance/recipient_test.go @@ -6,7 +6,7 @@ import ( ) func TestUcAccCreateRecipientDb2Open(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_recipient" "db2open" { name = "{var.RANDOM}-terraform-db2open-recipient" @@ -22,7 +22,7 @@ func TestUcAccCreateRecipientDb2Open(t *testing.T) { } func TestUcAccCreateRecipientDb2DbAws(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_metastore" "recipient_metastore" { name = "{var.RANDOM}-terraform-recipient-metastore" @@ -46,13 +46,13 @@ func TestUcAccCreateRecipientDb2DbAws(t *testing.T) { } func TestUcAccUpdateRecipientDb2Open(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: recipientTemplateWithOwner("made by terraform", "account users"), - }, step{ + }, Step{ Template: recipientTemplateWithOwner("made by terraform -- updated comment", "account users"), - }, step{ + }, Step{ Template: recipientTemplateWithOwner("made by terraform -- updated comment", "{env.TEST_DATA_ENG_GROUP}"), - }, step{ + }, Step{ Template: recipientTemplateWithOwner("made by terraform -- updated comment 2", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}"), }) } diff --git a/internal/acceptance/registered_model_test.go b/internal/acceptance/registered_model_test.go index 9da0873aa2..f4293b3f98 100644 --- a/internal/acceptance/registered_model_test.go +++ b/internal/acceptance/registered_model_test.go @@ -5,8 +5,8 @@ import ( ) func TestUcAccRegisteredModel(t *testing.T) { - unityWorkspaceLevel(t, - step{ + UnityWorkspaceLevel(t, + Step{ Template: ` resource "databricks_registered_model" "model" { name = "terraform-test-registered-model-{var.STICKY_RANDOM}" @@ -24,7 +24,7 @@ func TestUcAccRegisteredModel(t *testing.T) { } `, }, - step{ + Step{ Template: ` resource "databricks_registered_model" "model" { name = "terraform-test-registered-model-{var.STICKY_RANDOM}" @@ -34,7 +34,7 @@ func TestUcAccRegisteredModel(t *testing.T) { } `, }, - step{ + Step{ Template: ` resource "databricks_registered_model" "model" { name = "terraform-test-registered-model-update-{var.STICKY_RANDOM}" diff --git a/internal/acceptance/restrict_workspace_admins_test.go b/internal/acceptance/restrict_workspace_admins_test.go index 3f6f7b4edd..fff187c66a 100644 --- a/internal/acceptance/restrict_workspace_admins_test.go +++ b/internal/acceptance/restrict_workspace_admins_test.go @@ -21,7 +21,7 @@ func TestAccRestrictWorkspaceAdminsSetting(t *testing.T) { } } ` - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: template, Check: resourceCheckWithState("databricks_restrict_workspace_admins_setting.this", func(ctx context.Context, client *common.DatabricksClient, state *terraform.InstanceState) error { @@ -39,7 +39,7 @@ func TestAccRestrictWorkspaceAdminsSetting(t *testing.T) { return nil }), }, - step{ + Step{ Template: template, Destroy: true, Check: resourceCheck("databricks_restrict_workspace_admins_setting.this", diff --git a/internal/acceptance/schema_test.go b/internal/acceptance/schema_test.go index eb4d59374c..e257597237 100644 --- a/internal/acceptance/schema_test.go +++ b/internal/acceptance/schema_test.go @@ -16,7 +16,7 @@ const catalogTemplate = ` ` func TestUcAccSchema(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: catalogTemplate + ` data "databricks_catalogs" "all" { depends_on = [databricks_catalog.sandbox] @@ -102,13 +102,13 @@ func getPredictiveOptimizationSetting(t *testing.T, enabled bool) string { func TestUcAccSchemaUpdate(t *testing.T) { loadUcwsEnv(t) - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: catalogTemplate + schemaTemplateWithOwner(t, "this database is managed by terraform", "account users"), - }, step{ + }, Step{ Template: catalogTemplate + schemaTemplateWithOwner(t, "this database is managed by terraform -- updated comment", "account users"), - }, step{ + }, Step{ Template: catalogTemplate + schemaTemplateWithOwner(t, "this database is managed by terraform -- updated comment", "{env.TEST_DATA_ENG_GROUP}"), - }, step{ + }, Step{ Template: catalogTemplate + schemaTemplateWithOwner(t, "this database is managed by terraform -- updated comment 2", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}"), }) } diff --git a/internal/acceptance/secret_acl_test.go b/internal/acceptance/secret_acl_test.go index cec49c6c95..9ba29433bc 100644 --- a/internal/acceptance/secret_acl_test.go +++ b/internal/acceptance/secret_acl_test.go @@ -13,7 +13,7 @@ import ( ) func TestAccSecretAclResource(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_group" "ds" { display_name = "data-scientists-{var.RANDOM}" @@ -52,7 +52,7 @@ func TestAccSecretAclResource(t *testing.T) { } func TestAccSecretAclResourceDefaultPrincipal(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_secret_scope" "app" { name = "app-{var.RANDOM}" diff --git a/internal/acceptance/secret_scope_test.go b/internal/acceptance/secret_scope_test.go index f743317822..fe3c11eb74 100644 --- a/internal/acceptance/secret_scope_test.go +++ b/internal/acceptance/secret_scope_test.go @@ -18,7 +18,7 @@ import ( func TestAccSecretScopeResource(t *testing.T) { scope := qa.RandomName("tf-") - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: fmt.Sprintf(` resource "databricks_secret_scope" "my_scope" { name = "%s" @@ -45,7 +45,7 @@ func TestAccSecretScopeResource(t *testing.T) { }), ), ExpectNonEmptyPlan: true, - }, step{ + }, Step{ Template: fmt.Sprintf(` resource "databricks_secret_scope" "my_scope" { name = "%s" @@ -63,7 +63,7 @@ func TestAccSecretScopeResourceAkvWithSp(t *testing.T) { t.Skipf("service principal isn't defined") } - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_secret_scope" "my_scope" { name = "tf-{var.RANDOM}" diff --git a/internal/acceptance/secret_test.go b/internal/acceptance/secret_test.go index c77874568e..7daca20a61 100644 --- a/internal/acceptance/secret_test.go +++ b/internal/acceptance/secret_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccSecretResource(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_secret_scope" "this" { name = "tf-scope-{var.RANDOM}" diff --git a/internal/acceptance/service_principal_test.go b/internal/acceptance/service_principal_test.go index b1acad6b95..d1b75198e8 100644 --- a/internal/acceptance/service_principal_test.go +++ b/internal/acceptance/service_principal_test.go @@ -33,13 +33,13 @@ func TestAccServicePrincipalHomeDeleteSuccess(t *testing.T) { force_delete_home_dir = true }` var spId string - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: template, Check: func(s *terraform.State) error { spId = s.RootModule().Resources["databricks_service_principal.a"].Primary.Attributes["application_id"] return nil }, - }, step{ + }, Step{ Template: template, Destroy: true, Check: func(s *terraform.State) error { @@ -69,13 +69,13 @@ func TestAccServicePrinicpalHomeDeleteNotDeleted(t *testing.T) { force_delete_home_dir = false }` var appId string - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: template, Check: func(s *terraform.State) error { appId = s.RootModule().Resources["databricks_service_principal.a"].Primary.Attributes["application_id"] return provisionHomeFolder(context.Background(), s, "databricks_service_principal.a", appId) }, - }, step{ + }, Step{ Template: template, Destroy: true, Check: func(s *terraform.State) error { @@ -93,9 +93,9 @@ func TestAccServicePrinicpalHomeDeleteNotDeleted(t *testing.T) { func TestMwsAccServicePrincipalResourceOnAzure(t *testing.T) { GetEnvOrSkipTest(t, "ARM_CLIENT_ID") azureSpnRenamed := strings.ReplaceAll(azureSpn, `"SPN `, `"SPN Renamed `) - accountLevel(t, step{ + AccountLevel(t, Step{ Template: azureSpn, - }, step{ + }, Step{ Template: azureSpnRenamed, }) } @@ -103,9 +103,9 @@ func TestMwsAccServicePrincipalResourceOnAzure(t *testing.T) { func TestAccServicePrincipalResourceOnAzure(t *testing.T) { GetEnvOrSkipTest(t, "ARM_CLIENT_ID") azureSpnRenamed := strings.ReplaceAll(azureSpn, `"SPN `, `"SPN Renamed `) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: azureSpn, - }, step{ + }, Step{ Template: azureSpnRenamed, }) } @@ -113,9 +113,9 @@ func TestAccServicePrincipalResourceOnAzure(t *testing.T) { func TestMwsAccServicePrincipalResourceOnAws(t *testing.T) { GetEnvOrSkipTest(t, "TEST_ROOT_BUCKET") awsSpnRenamed := strings.ReplaceAll(awsSpn, `"SPN `, `"SPN Renamed `) - accountLevel(t, step{ + AccountLevel(t, Step{ Template: awsSpn, - }, step{ + }, Step{ Template: awsSpnRenamed, }) } @@ -123,9 +123,9 @@ func TestMwsAccServicePrincipalResourceOnAws(t *testing.T) { func TestAccServicePrincipalResourceOnAws(t *testing.T) { GetEnvOrSkipTest(t, "TEST_EC2_INSTANCE_PROFILE") awsSpnRenamed := strings.ReplaceAll(awsSpn, `"SPN `, `"SPN Renamed `) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: awsSpn, - }, step{ + }, Step{ Template: awsSpnRenamed, }) } diff --git a/internal/acceptance/share_test.go b/internal/acceptance/share_test.go index afc3667ca4..b4a4034667 100644 --- a/internal/acceptance/share_test.go +++ b/internal/acceptance/share_test.go @@ -71,7 +71,7 @@ const preTestTemplateUpdate = ` ` func TestUcAccCreateShare(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: preTestTemplate + ` resource "databricks_share" "myshare" { name = "{var.STICKY_RANDOM}-terraform-delta-share" @@ -125,13 +125,13 @@ func shareTemplateWithOwner(comment string, owner string) string { } func TestUcAccUpdateShare(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("c", "account users"), - }, step{ + }, Step{ Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("e", "account users"), - }, step{ + }, Step{ Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("e", "{env.TEST_DATA_ENG_GROUP}"), - }, step{ + }, Step{ Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("f", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}"), }) } diff --git a/internal/acceptance/sql_alert_test.go b/internal/acceptance/sql_alert_test.go index 5bf58752a3..612df0653e 100644 --- a/internal/acceptance/sql_alert_test.go +++ b/internal/acceptance/sql_alert_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccAlert(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_sql_query" "this" { data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" @@ -31,7 +31,7 @@ func TestAccAlert(t *testing.T) { muted = false } }`, - }, step{ + }, Step{ Template: ` resource "databricks_sql_query" "this" { data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" diff --git a/internal/acceptance/sql_dashboard_test.go b/internal/acceptance/sql_dashboard_test.go index c3ddf5f492..5c69b8331b 100644 --- a/internal/acceptance/sql_dashboard_test.go +++ b/internal/acceptance/sql_dashboard_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccDashboard(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_sql_dashboard" "d1" { name = "tf-{var.RANDOM}-dashboard" diff --git a/internal/acceptance/sql_endpoint_test.go b/internal/acceptance/sql_endpoint_test.go index 53496e8f2b..53d678c2ed 100644 --- a/internal/acceptance/sql_endpoint_test.go +++ b/internal/acceptance/sql_endpoint_test.go @@ -11,7 +11,7 @@ import ( ) func TestAccSQLEndpoint(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_sql_endpoint" "this" { name = "tf-{var.RANDOM}" @@ -25,7 +25,7 @@ func TestAccSQLEndpoint(t *testing.T) { } } }`, - }, step{ + }, Step{ Template: ` resource "databricks_sql_endpoint" "that" { name = "tf-{var.RANDOM}" diff --git a/internal/acceptance/sql_global_config_test.go b/internal/acceptance/sql_global_config_test.go index 6bea052860..03c67e356e 100644 --- a/internal/acceptance/sql_global_config_test.go +++ b/internal/acceptance/sql_global_config_test.go @@ -38,7 +38,7 @@ resource "databricks_sql_global_config" "this" { func TestAccSQLGlobalConfig(t *testing.T) { loadWorkspaceEnv(t) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ PreConfig: func() { ctx := context.Background() _, err := lock.Acquire(ctx, getSqlGlobalConfigLockable(t), lock.InTest(t)) @@ -64,7 +64,7 @@ func TestAccSQLGlobalConfigServerless(t *testing.T) { } } - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ PreConfig: func() { ctx := context.Background() _, err := lock.Acquire(ctx, getSqlGlobalConfigLockable(t), lock.InTest(t)) @@ -72,10 +72,10 @@ func TestAccSQLGlobalConfigServerless(t *testing.T) { }, Template: makeSqlGlobalConfig("enable_serverless_compute = true"), Check: checkServerlessEnabled(true), - }, step{ + }, Step{ Template: makeSqlGlobalConfig(""), Check: checkServerlessEnabled(true), - }, step{ + }, Step{ Template: makeSqlGlobalConfig("enable_serverless_compute = false"), Check: checkServerlessEnabled(false), }) diff --git a/internal/acceptance/sql_permissions_test.go b/internal/acceptance/sql_permissions_test.go index 057e537516..23aa37a51f 100644 --- a/internal/acceptance/sql_permissions_test.go +++ b/internal/acceptance/sql_permissions_test.go @@ -36,7 +36,7 @@ func TestAccTableACL(t *testing.T) { require.NoError(t, err) require.False(t, cr.Failed(), cr.Error()) }) - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_sql_permissions" "this" { table = "` + tableName + `" diff --git a/internal/acceptance/sql_query_test.go b/internal/acceptance/sql_query_test.go index b106593c32..bc49c9ee6f 100644 --- a/internal/acceptance/sql_query_test.go +++ b/internal/acceptance/sql_query_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccQuery(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_sql_query" "q1" { data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" diff --git a/internal/acceptance/sql_table_test.go b/internal/acceptance/sql_table_test.go index e6c1cc918f..0f0a87dec9 100644 --- a/internal/acceptance/sql_table_test.go +++ b/internal/acceptance/sql_table_test.go @@ -14,7 +14,7 @@ func TestUcAccResourceSqlTable_Managed(t *testing.T) { if os.Getenv("GOOGLE_CREDENTIALS") != "" { skipf(t)("databricks_sql_table resource not available on GCP") } - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_schema" "this" { name = "{var.STICKY_RANDOM}" @@ -42,7 +42,7 @@ func TestUcAccResourceSqlTable_Managed(t *testing.T) { comment = "this table is managed by terraform" owner = "account users" }`, - }, step{ + }, Step{ Template: ` resource "databricks_schema" "this" { name = "{var.STICKY_RANDOM}" @@ -73,7 +73,7 @@ func TestUcAccResourceSqlTable_Managed(t *testing.T) { } func TestUcAccResourceSqlTable_External(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_storage_credential" "external" { name = "cred-{var.RANDOM}" @@ -113,7 +113,7 @@ func TestUcAccResourceSqlTable_View(t *testing.T) { if os.Getenv("GOOGLE_CREDENTIALS") != "" { skipf(t)("databricks_sql_table resource not available on GCP") } - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_schema" "this" { name = "{var.STICKY_RANDOM}" @@ -164,7 +164,7 @@ func TestUcAccResourceSqlTable_WarehousePartition(t *testing.T) { if os.Getenv("GOOGLE_CREDENTIALS") != "" { skipf(t)("databricks_sql_table resource not available on GCP") } - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_sql_endpoint" "this" { name = "tf-{var.RANDOM}" @@ -215,7 +215,7 @@ func TestUcAccResourceSqlTable_Liquid(t *testing.T) { if os.Getenv("GOOGLE_CREDENTIALS") != "" { skipf(t)("databricks_sql_table resource not available on GCP") } - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_schema" "this" { name = "{var.STICKY_RANDOM}" @@ -246,7 +246,7 @@ func TestUcAccResourceSqlTable_Liquid(t *testing.T) { cluster_keys = ["id"] comment = "this table is managed by terraform" }`, - }, step{ + }, Step{ Template: ` resource "databricks_schema" "this" { name = "{var.STICKY_RANDOM}" @@ -320,9 +320,9 @@ func TestUcAccResourceSqlTable_RenameColumn(t *testing.T) { skipf(t)("databricks_sql_table resource not available on GCP") } tableName := RandomName() - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: true, Comment: "comment"}}), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "new_name", Type: "string", Nullable: true, Comment: "comment"}}), }) } @@ -346,7 +346,7 @@ func TestUcAccResourceSqlTable_ColumnTypeSuppressDiff(t *testing.T) { } tableName := RandomName() columnName := RandomName() - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: constructManagedSqlTableTemplateWithColumnTypeUpdates(tableName, columnName, "0", []string{ "integer", "long", @@ -357,7 +357,7 @@ func TestUcAccResourceSqlTable_ColumnTypeSuppressDiff(t *testing.T) { "dec", "numeric", }), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplateWithColumnTypeUpdates(tableName, columnName, "1", []string{ "INTEGER", "LONG", @@ -368,7 +368,7 @@ func TestUcAccResourceSqlTable_ColumnTypeSuppressDiff(t *testing.T) { "DEC", "NUMERIC", }), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplateWithColumnTypeUpdates(tableName, columnName, "2", []string{ "int", "bigint", @@ -387,9 +387,9 @@ func TestUcAccResourceSqlTable_AddColumnComment(t *testing.T) { skipf(t)("databricks_sql_table resource not available on GCP") } tableName := RandomName() - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: true, Comment: "comment"}}), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: true, Comment: "new comment"}}), }) } @@ -399,9 +399,9 @@ func TestUcAccResourceSqlTable_DropColumnNullable(t *testing.T) { skipf(t)("databricks_sql_table resource not available on GCP") } tableName := RandomName() - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: true, Comment: "comment"}}), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: false, Comment: "comment"}}), }) } @@ -411,9 +411,9 @@ func TestUcAccResourceSqlTable_MultipleColumnUpdates(t *testing.T) { skipf(t)("databricks_sql_table resource not available on GCP") } tableName := RandomName() - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: true, Comment: "comment"}}), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: false, Comment: "new comment"}}), }) } @@ -424,9 +424,9 @@ func TestUcAccResourceSqlTable_ChangeColumnTypeThrows(t *testing.T) { } tableName := RandomName() - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: true, Comment: "comment"}}), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "int", Nullable: true, Comment: "comment"}}), ExpectError: typeUpdateErrorRegex, }) @@ -437,12 +437,12 @@ func TestUcAccResourceSqlTable_DropColumn(t *testing.T) { skipf(t)("databricks_sql_table resource not available on GCP") } tableName := RandomName() - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{ {Name: "name", Type: "string", Nullable: true, Comment: "comment"}, {Name: "nametwo", Type: "string", Nullable: true, Comment: "comment"}, }), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: true, Comment: "comment"}}), }) } @@ -452,13 +452,13 @@ func TestUcAccResourceSqlTable_DropMultipleColumns(t *testing.T) { skipf(t)("databricks_sql_table resource not available on GCP") } tableName := RandomName() - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{ {Name: "name", Type: "string", Nullable: true, Comment: "comment"}, {Name: "nametwo", Type: "string", Nullable: true, Comment: "comment"}, {Name: "namethree", Type: "string", Nullable: true, Comment: "comment"}, }), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: true, Comment: "comment"}}), }) } @@ -468,9 +468,9 @@ func TestUcAccResourceSqlTable_AddColumn(t *testing.T) { skipf(t)("databricks_sql_table resource not available on GCP") } tableName := RandomName() - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: true, Comment: "comment"}}), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{ {Name: "name", Type: "string", Nullable: true, Comment: "comment"}, {Name: "nametwo", Type: "string", Nullable: true, Comment: "comment"}, @@ -483,9 +483,9 @@ func TestUcAccResourceSqlTable_AddMultipleColumns(t *testing.T) { skipf(t)("databricks_sql_table resource not available on GCP") } tableName := RandomName() - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: true, Comment: "comment"}}), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{ {Name: "name", Type: "string", Nullable: true, Comment: "comment"}, {Name: "nametwo", Type: "string", Nullable: true, Comment: "comment"}, @@ -500,9 +500,9 @@ func TestUcAccResourceSqlTable_AddColumnAndUpdateThrows(t *testing.T) { } tableName := RandomName() - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: true, Comment: "comment"}}), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{ {Name: "name", Type: "string", Nullable: false, Comment: "new comment"}, {Name: "nametwo", Type: "string", Nullable: true, Comment: "comment"}, @@ -517,12 +517,12 @@ func TestUcAccResourceSqlTable_DropColumnAndUpdateThrows(t *testing.T) { } tableName := RandomName() - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{ {Name: "name", Type: "string", Nullable: true, Comment: "comment"}, {Name: "nametwo", Type: "string", Nullable: true, Comment: "comment"}, }), - }, step{ + }, Step{ Template: constructManagedSqlTableTemplate(tableName, []catalog.SqlColumnInfo{{Name: "name", Type: "string", Nullable: false, Comment: "new comment"}}), ExpectError: inlineAndMembershipChangeErrorRegex, }) diff --git a/internal/acceptance/storage_credential_test.go b/internal/acceptance/storage_credential_test.go index 92b5ff5871..c78598f729 100644 --- a/internal/acceptance/storage_credential_test.go +++ b/internal/acceptance/storage_credential_test.go @@ -7,7 +7,7 @@ import ( func TestUcAccStorageCredential(t *testing.T) { loadUcwsEnv(t) if isAws(t) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_storage_credential" "external" { name = "cred-{var.RANDOM}" @@ -29,7 +29,7 @@ func TestUcAccStorageCredential(t *testing.T) { }`, }) } else if isGcp(t) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_storage_credential" "external" { name = "cred-{var.RANDOM}" @@ -42,7 +42,7 @@ func TestUcAccStorageCredential(t *testing.T) { } func TestAccStorageCredentialOwner(t *testing.T) { - unityAccountLevel(t, step{ + UnityAccountLevel(t, Step{ Template: ` resource "databricks_service_principal" "test_acc_storage_credential_owner" { display_name = "test_acc_storage_credential_owner {var.RANDOM}" diff --git a/internal/acceptance/system_schema_test.go b/internal/acceptance/system_schema_test.go index ceef68e502..89474a6853 100644 --- a/internal/acceptance/system_schema_test.go +++ b/internal/acceptance/system_schema_test.go @@ -9,7 +9,7 @@ func TestUcAccResourceSystemSchema(t *testing.T) { if os.Getenv("GOOGLE_CREDENTIALS") != "" { t.Skipf("databricks_system_schema resource not available on GCP") } - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: ` resource "databricks_system_schema" "this" { schema = "access" diff --git a/internal/acceptance/token_test.go b/internal/acceptance/token_test.go index 0a594aecc2..2673400caf 100644 --- a/internal/acceptance/token_test.go +++ b/internal/acceptance/token_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccTokenResource(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_token" "this" { lifetime_seconds = 6000 comment = "Testing token" diff --git a/internal/acceptance/user_role_test.go b/internal/acceptance/user_role_test.go index 9828b1c050..9e10cd72b3 100644 --- a/internal/acceptance/user_role_test.go +++ b/internal/acceptance/user_role_test.go @@ -5,7 +5,7 @@ import ( ) func TestAccUserRole(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: ` resource "databricks_user" "this" { user_name = "{var.RANDOM}@example.com" diff --git a/internal/acceptance/user_test.go b/internal/acceptance/user_test.go index 9368bb88bf..07e74f1ad7 100644 --- a/internal/acceptance/user_test.go +++ b/internal/acceptance/user_test.go @@ -20,7 +20,7 @@ import ( // https://github.com/databricks/terraform-provider-databricks/issues/1097 func TestAccForceUserImport(t *testing.T) { username := qa.RandomEmail() - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `data "databricks_current_user" "me" {}`, Check: func(s *terraform.State) error { w, err := databricks.NewWorkspaceClient() @@ -40,7 +40,7 @@ func TestAccForceUserImport(t *testing.T) { } return nil }, - }, step{ + }, Step{ Template: `resource "databricks_user" "this" { user_name = "` + username + `" force = true @@ -50,13 +50,13 @@ func TestAccForceUserImport(t *testing.T) { func TestAccUserHomeDeleteHasNoEffectInAccount(t *testing.T) { username := qa.RandomEmail() - accountLevel(t, step{ + AccountLevel(t, Step{ Template: ` resource "databricks_user" "first" { user_name = "` + username + `" force_delete_home_dir = true }`, - }, step{ + }, Step{ Template: ` resource "databricks_user" "second" { user_name = "{var.RANDOM}@example.com" @@ -71,9 +71,9 @@ func TestAccUserHomeDelete(t *testing.T) { user_name = "` + username + `" force_delete_home_dir = true }` - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: template, - }, step{ + }, Step{ Template: template, Destroy: true, Check: func(s *terraform.State) error { @@ -115,12 +115,12 @@ func TestAccUserHomeDeleteNotDeleted(t *testing.T) { resource "databricks_user" "a" { user_name = "` + username + `" }` - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: template, Check: func(s *terraform.State) error { return provisionHomeFolder(context.Background(), s, "databricks_user.a", username) }, - }, step{ + }, Step{ Template: template, Destroy: true, Check: func(s *terraform.State) error { @@ -154,7 +154,7 @@ func TestAccUserResource(t *testing.T) { allow_instance_pool_create = true } ` - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: differentUsers, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("databricks_user.first", "allow_cluster_create", "false"), @@ -164,7 +164,7 @@ func TestAccUserResource(t *testing.T) { resource.TestCheckResourceAttr("databricks_user.third", "allow_cluster_create", "false"), resource.TestCheckResourceAttr("databricks_user.third", "allow_instance_pool_create", "true"), ), - }, step{ + }, Step{ Template: differentUsers, }) } @@ -174,12 +174,12 @@ func TestAccUserResourceCaseInsensitive(t *testing.T) { csUser := `resource "databricks_user" "first" { user_name = "` + username + `" }` - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: csUser, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("databricks_user.first", "user_name", strings.ToLower(username)), ), - }, step{ + }, Step{ Template: csUser, }) } diff --git a/internal/acceptance/vector_search_test.go b/internal/acceptance/vector_search_test.go index 084891c5d0..2442d0fa05 100644 --- a/internal/acceptance/vector_search_test.go +++ b/internal/acceptance/vector_search_test.go @@ -15,7 +15,7 @@ func TestUcAccVectorSearchEndpoint(t *testing.T) { name := fmt.Sprintf("terraform-test-vector-search-%[1]s", acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)) - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: fmt.Sprintf(` resource "databricks_vector_search_endpoint" "this" { name = "%s" diff --git a/internal/acceptance/volume_test.go b/internal/acceptance/volume_test.go index 8ffa282358..4b6380460b 100644 --- a/internal/acceptance/volume_test.go +++ b/internal/acceptance/volume_test.go @@ -26,7 +26,7 @@ resource "databricks_external_location" "some" { }` func TestUcAccVolumesResourceWithoutInitialOwnerAWSFullLifecycle(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: prefixTestTemplate + ` resource "databricks_volume" "this" { name = "name-abc" @@ -36,7 +36,7 @@ func TestUcAccVolumesResourceWithoutInitialOwnerAWSFullLifecycle(t *testing.T) { volume_type = "EXTERNAL" storage_location = databricks_external_location.some.url }`, - }, step{ + }, Step{ Template: prefixTestTemplate + ` resource "databricks_volume" "this" { name = "name-abc" @@ -46,7 +46,7 @@ func TestUcAccVolumesResourceWithoutInitialOwnerAWSFullLifecycle(t *testing.T) { volume_type = "EXTERNAL" storage_location = databricks_external_location.some.url }`, - }, step{ + }, Step{ Template: prefixTestTemplate + ` resource "databricks_volume" "this" { name = "name-abc" @@ -57,7 +57,7 @@ func TestUcAccVolumesResourceWithoutInitialOwnerAWSFullLifecycle(t *testing.T) { volume_type = "EXTERNAL" storage_location = databricks_external_location.some.url }`, - }, step{ + }, Step{ Template: prefixTestTemplate + ` resource "databricks_volume" "this" { name = "name-def" @@ -72,7 +72,7 @@ func TestUcAccVolumesResourceWithoutInitialOwnerAWSFullLifecycle(t *testing.T) { } func TestUcAccVolumesResourceWithInitialOwnerAWSFullLifecycle(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: prefixTestTemplate + ` resource "databricks_volume" "this" { name = "name-abc" @@ -83,7 +83,7 @@ func TestUcAccVolumesResourceWithInitialOwnerAWSFullLifecycle(t *testing.T) { volume_type = "EXTERNAL" storage_location = databricks_external_location.some.url }`, - }, step{ + }, Step{ Template: prefixTestTemplate + ` resource "databricks_volume" "this" { name = "name-abc" @@ -94,7 +94,7 @@ func TestUcAccVolumesResourceWithInitialOwnerAWSFullLifecycle(t *testing.T) { volume_type = "EXTERNAL" storage_location = databricks_external_location.some.url }`, - }, step{ + }, Step{ Template: prefixTestTemplate + ` resource "databricks_volume" "this" { name = "name-def" diff --git a/internal/acceptance/workspace_binding_test.go b/internal/acceptance/workspace_binding_test.go index 24fcd5bb01..f55f4ebd72 100644 --- a/internal/acceptance/workspace_binding_test.go +++ b/internal/acceptance/workspace_binding_test.go @@ -65,7 +65,7 @@ func workspaceBindingTemplateWithWorkspaceId(workspaceId string) string { } func TestUcAccWorkspaceBindingToOtherWorkspace(t *testing.T) { - unityWorkspaceLevel(t, step{ + UnityWorkspaceLevel(t, Step{ Template: workspaceBindingTemplateWithWorkspaceId("{env.DUMMY_WORKSPACE_ID}"), }) } diff --git a/internal/acceptance/workspace_conf_test.go b/internal/acceptance/workspace_conf_test.go index 5e91bcb285..f0468ea6b2 100644 --- a/internal/acceptance/workspace_conf_test.go +++ b/internal/acceptance/workspace_conf_test.go @@ -24,7 +24,7 @@ func assertEnableIpAccessList(t *testing.T, expected string) { } func TestAccWorkspaceConfFullLifecycle(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_workspace_conf" "this" { custom_config = { "enableIpAccessLists": true @@ -34,7 +34,7 @@ func TestAccWorkspaceConfFullLifecycle(t *testing.T) { assertEnableIpAccessList(t, "true") return nil }, - }, step{ + }, Step{ // Set enableIpAccessLists to false Template: `resource "databricks_workspace_conf" "this" { custom_config = { @@ -50,7 +50,7 @@ func TestAccWorkspaceConfFullLifecycle(t *testing.T) { assert.Equal(t, "false", conf.Primary.Attributes["custom_config.enableIpAccessLists"]) return nil }, - }, step{ + }, Step{ // Set invalid configuration Template: `resource "databricks_workspace_conf" "this" { custom_config = { @@ -59,7 +59,7 @@ func TestAccWorkspaceConfFullLifecycle(t *testing.T) { }`, // Assert on server side error returned ExpectError: regexp.MustCompile(`cannot update workspace conf: Invalid keys`), - }, step{ + }, Step{ // Set enableIpAccessLists to true with strange case and maxTokenLifetimeDays to verify // failed deletion case Template: `resource "databricks_workspace_conf" "this" { diff --git a/internal/acceptance/workspace_file_test.go b/internal/acceptance/workspace_file_test.go index ada1782612..9a9a57c8e0 100644 --- a/internal/acceptance/workspace_file_test.go +++ b/internal/acceptance/workspace_file_test.go @@ -5,12 +5,12 @@ import ( ) func TestAccWorkspaceFile(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_workspace_file" "this" { source = "{var.CWD}/../../storage/testdata/tf-test-python.py" path = "/Shared/provider-test/xx_{var.RANDOM}" }`, - }, step{ + }, Step{ Template: `resource "databricks_workspace_file" "this" { source = "{var.CWD}/../../storage/testdata/tf-test-python.py" path = "/Shared/provider-test/xx_{var.RANDOM}_renamed" @@ -19,7 +19,7 @@ func TestAccWorkspaceFile(t *testing.T) { } func TestAccWorkspaceFileEmptyFile(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_workspace_file" "empty" { source = "{var.CWD}/../../workspace/acceptance/testdata/empty_file" path = "/Shared/provider-test/empty_{var.RANDOM}" @@ -28,12 +28,12 @@ func TestAccWorkspaceFileEmptyFile(t *testing.T) { } func TestAccWorkspaceFileBase64(t *testing.T) { - workspaceLevel(t, step{ + WorkspaceLevel(t, Step{ Template: `resource "databricks_workspace_file" "this2" { content_base64 = "YWJjCg==" path = "/Shared/provider-test/xx2_{var.RANDOM}" }`, - }, step{ + }, Step{ Template: `resource "databricks_workspace_file" "this2" { content_base64 = "YWJjCg==" path = "/Shared/provider-test/xx2_{var.RANDOM}_renamed" diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_test.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_test.go new file mode 100644 index 0000000000..41039b1d13 --- /dev/null +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_test.go @@ -0,0 +1,153 @@ +package qualitymonitor_test + +import ( + "os" + "testing" + + "github.com/databricks/terraform-provider-databricks/internal/acceptance" +) + +var commonPartQualityMonitoring = `resource "databricks_catalog" "sandbox" { + name = "sandbox{var.STICKY_RANDOM}" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + force_destroy = true +} + +resource "databricks_schema" "things" { + catalog_name = databricks_catalog.sandbox.id + name = "things{var.STICKY_RANDOM}" + comment = "this database is managed by terraform" + properties = { + kind = "various" + } +} + +resource "databricks_sql_table" "myInferenceTable" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar{var.STICKY_RANDOM}_inference" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "model_id" + type = "int" + } + column { + name = "timestamp" + type = "int" + } + column { + name = "prediction" + type = "int" + } +} + +` + +func TestUcAccQualityMonitorPluginFramework(t *testing.T) { + if os.Getenv("GOOGLE_CREDENTIALS") != "" { + t.Skipf("databricks_quality_monitor resource is not available on GCP") + } + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: commonPartQualityMonitoring + ` + + resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { + table_name = databricks_sql_table.myInferenceTable.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" + output_schema_name = databricks_schema.things.id + inference_log = { + granularities = ["1 day"] + timestamp_col = "timestamp" + prediction_col = "prediction" + model_id_col = "model_id" + problem_type = "PROBLEM_TYPE_REGRESSION" + } + } + + resource "databricks_sql_table" "myTimeseries" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar{var.STICKY_RANDOM}_timeseries" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "timestamp" + type = "int" + } + } + + resource "databricks_quality_monitor_pluginframework" "testMonitorTimeseries" { + table_name = databricks_sql_table.myTimeseries.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myTimeseries.name}" + output_schema_name = databricks_schema.things.id + time_series = { + granularities = ["1 day"] + timestamp_col = "timestamp" + } + } + + resource "databricks_sql_table" "mySnapshot" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar{var.STICKY_RANDOM}_snapshot" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "timestamp" + type = "int" + } + } + + resource "databricks_quality_monitor_pluginframework" "testMonitorSnapshot" { + table_name = databricks_sql_table.mySnapshot.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myTimeseries.name}" + output_schema_name = databricks_schema.things.id + snapshot = { + } + } + `, + }) +} + +func TestUcAccUpdateQualityMonitorPluginFramework(t *testing.T) { + if os.Getenv("GOOGLE_CREDENTIALS") != "" { + t.Skipf("databricks_quality_monitor resource is not available on GCP") + } + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: commonPartQualityMonitoring + ` + resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { + table_name = databricks_sql_table.myInferenceTable.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" + output_schema_name = databricks_schema.things.id + inference_log = { + granularities = ["1 day"] + timestamp_col = "timestamp" + prediction_col = "prediction" + model_id_col = "model_id" + problem_type = "PROBLEM_TYPE_REGRESSION" + } + } + `, + }, acceptance.Step{ + Template: commonPartQualityMonitoring + ` + resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { + table_name = databricks_sql_table.myInferenceTable.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" + output_schema_name = databricks_schema.things.id + inference_log = { + granularities = ["1 hour"] + timestamp_col = "timestamp" + prediction_col = "prediction" + model_id_col = "model_id" + problem_type = "PROBLEM_TYPE_REGRESSION" + } + } + `, + }) +} diff --git a/internal/tfreflect/reflect_utils.go b/internal/tfreflect/reflect_utils.go index e98fcae152..8df201ef32 100644 --- a/internal/tfreflect/reflect_utils.go +++ b/internal/tfreflect/reflect_utils.go @@ -1,6 +1,8 @@ package tfreflect -import "reflect" +import ( + "reflect" +) type Field struct { StructField reflect.StructField From 0cb0c8acc4fc954617b338cf9a749aa8a88724b5 Mon Sep 17 00:00:00 2001 From: Edward Feng <67326663+edwardfeng-db@users.noreply.github.com> Date: Wed, 4 Sep 2024 17:41:17 +0200 Subject: [PATCH 02/99] [Internal] Move volumes test next to plugin framework data source (#3995) ## Changes - Moving the test file to be next to the data source definition ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- internal/acceptance/data_volumes_test.go | 48 --------------- .../resources/volume/data_volumes_test.go | 59 +++++++++++++++++++ 2 files changed, 59 insertions(+), 48 deletions(-) create mode 100644 internal/providers/pluginfw/resources/volume/data_volumes_test.go diff --git a/internal/acceptance/data_volumes_test.go b/internal/acceptance/data_volumes_test.go index 67581ea3fe..8d78ef8afb 100644 --- a/internal/acceptance/data_volumes_test.go +++ b/internal/acceptance/data_volumes_test.go @@ -58,51 +58,3 @@ func TestUcAccDataSourceVolumes(t *testing.T) { Check: checkDataSourceVolumesPopulated(t), }) } - -func checkDataSourceVolumesPluginFrameworkPopulated(t *testing.T) func(s *terraform.State) error { - return func(s *terraform.State) error { - _, ok := s.Modules[0].Resources["data.databricks_volumes_pluginframework.this"] - require.True(t, ok, "data.databricks_volumes_pluginframework.this has to be there") - num_volumes, _ := strconv.Atoi(s.Modules[0].Outputs["volumes"].Value.(string)) - assert.GreaterOrEqual(t, num_volumes, 1) - return nil - } -} - -func TestUcAccDataSourceVolumesPluginFramework(t *testing.T) { - UnityWorkspaceLevel(t, Step{ - Template: ` - resource "databricks_catalog" "sandbox" { - name = "sandbox{var.RANDOM}" - comment = "this catalog is managed by terraform" - properties = { - purpose = "testing" - } - } - - resource "databricks_schema" "things" { - catalog_name = databricks_catalog.sandbox.id - name = "things{var.RANDOM}" - comment = "this database is managed by terraform" - properties = { - kind = "various" - } - } - resource "databricks_volume" "this" { - name = "volume_data_source_test" - catalog_name = databricks_catalog.sandbox.name - schema_name = databricks_schema.things.name - volume_type = "MANAGED" - } - data "databricks_volumes_pluginframework" "this" { - catalog_name = databricks_catalog.sandbox.name - schema_name = databricks_schema.things.name - depends_on = [ databricks_volume.this ] - } - output "volumes" { - value = length(data.databricks_volumes_pluginframework.this.ids) - } - `, - Check: checkDataSourceVolumesPluginFrameworkPopulated(t), - }) -} diff --git a/internal/providers/pluginfw/resources/volume/data_volumes_test.go b/internal/providers/pluginfw/resources/volume/data_volumes_test.go new file mode 100644 index 0000000000..89177583fb --- /dev/null +++ b/internal/providers/pluginfw/resources/volume/data_volumes_test.go @@ -0,0 +1,59 @@ +package volume_test + +import ( + "strconv" + "testing" + + "github.com/databricks/terraform-provider-databricks/internal/acceptance" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func checkDataSourceVolumesPluginFrameworkPopulated(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + _, ok := s.Modules[0].Resources["data.databricks_volumes_pluginframework.this"] + require.True(t, ok, "data.databricks_volumes_pluginframework.this has to be there") + num_volumes, _ := strconv.Atoi(s.Modules[0].Outputs["volumes"].Value.(string)) + assert.GreaterOrEqual(t, num_volumes, 1) + return nil + } +} + +func TestUcAccDataSourceVolumesPluginFramework(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: ` + resource "databricks_catalog" "sandbox" { + name = "sandbox{var.RANDOM}" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + } + + resource "databricks_schema" "things" { + catalog_name = databricks_catalog.sandbox.id + name = "things{var.RANDOM}" + comment = "this database is managed by terraform" + properties = { + kind = "various" + } + } + resource "databricks_volume" "this" { + name = "volume_data_source_test" + catalog_name = databricks_catalog.sandbox.name + schema_name = databricks_schema.things.name + volume_type = "MANAGED" + } + data "databricks_volumes_pluginframework" "this" { + catalog_name = databricks_catalog.sandbox.name + schema_name = databricks_schema.things.name + depends_on = [ databricks_volume.this ] + } + output "volumes" { + value = length(data.databricks_volumes_pluginframework.this.ids) + } + `, + Check: checkDataSourceVolumesPluginFrameworkPopulated(t), + }) +} From 90191742807029cfc2c9dfab1530608bc12ff7b5 Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Wed, 4 Sep 2024 17:43:30 +0200 Subject: [PATCH 03/99] [Doc] Update documentation regarding authentication with Azure-managed Service Principal using GITHUB OIDC (#3932) ## Changes Update documentation regarding authentication with Azure-managed Service Principal using GITHUB OIDC. ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/index.md | 47 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/docs/index.md b/docs/index.md index 57349b07c7..05081f57ff 100644 --- a/docs/index.md +++ b/docs/index.md @@ -238,7 +238,7 @@ Alternatively, you can provide this value as an environment variable `DATABRICKS * `profile` - (optional) Connection profile specified within ~/.databrickscfg. Please check [connection profiles section](https://docs.databricks.com/dev-tools/cli/index.html#connection-profiles) for more details. This field defaults to `DEFAULT`. * `account_id` - (optional for workspace-level operations, but required for account-level) Account Id that could be found in the top right corner of [Accounts Console](https://accounts.cloud.databricks.com/). Alternatively, you can provide this value as an environment variable `DATABRICKS_ACCOUNT_ID`. Only has effect when `host = "https://accounts.cloud.databricks.com/"`, and is currently used to provision account admins via [databricks_user](resources/user.md). In the future releases of the provider this property will also be used specify account for `databricks_mws_*` resources as well. -* `auth_type` - (optional) enforce specific auth type to be used in very rare cases, where a single Terraform state manages Databricks workspaces on more than one cloud and `more than one authorization method configured` error is a false positive. Valid values are `pat`, `basic`, `oauth-m2m`, `azure-client-secret`, `azure-msi`, `azure-cli`, `google-credentials`, and `google-id`. +* `auth_type` - (optional) enforce specific auth type to be used in very rare cases, where a single Terraform state manages Databricks workspaces on more than one cloud and `more than one authorization method configured` error is a false positive. Valid values are `pat`, `basic`, `oauth-m2m`, `azure-client-secret`, `azure-msi`, `azure-cli`, `github-oidc-azure`, `google-credentials`, and `google-id`. ## Special configurations for Azure @@ -284,7 +284,7 @@ resource "databricks_user" "my-user" { } ``` -### Authenticating with Azure-managed Service Principal +### Authenticating with Azure-managed Service Principal using Client Secret ```hcl provider "azurerm" { @@ -326,6 +326,49 @@ There are `ARM_*` environment variables provide a way to share authentication co When a workspace is created using a service principal account, that service principal account is automatically added to the workspace as a member of the admins group. To add a new service principal account to an existing workspace, create a [databricks_service_principal](resources/service_principal.md). +### Authenticating with Azure-managed Service Principal using GITHUB OIDC + +```hcl +provider "azurerm" { + client_id = var.client_id + tenant_id = var.tenant_id + subscription_id = var.subscription_id + use_oidc = true +} + +resource "azurerm_databricks_workspace" "this" { + location = "centralus" + name = "my-workspace-name" + resource_group_name = var.resource_group + sku = "premium" +} + +provider "databricks" { + host = azurerm_databricks_workspace.this.workspace_url + auth_type = "github-oidc-azure" + azure_workspace_resource_id = azurerm_databricks_workspace.this.id + azure_client_id = var.client_id + azure_tenant_id = var.tenant_id +} + +resource "databricks_user" "my-user" { + user_name = "test-user@databricks.com" +} +``` + +Follow the [Configuring OpenID Connect in Azure](https://docs.github.com/en/actions/security-for-github-actions/security-hardening-your-deployments/configuring-openid-connect-in-azure). You can then use the Azure service principal to authenticate in databricks. + +* `azure_workspace_resource_id` - (optional) `id` attribute of [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace) resource. Combination of subscription id, resource group name, and workspace name. Required with `azure_use_msi` or `azure_client_secret`. + +* `azure_client_id` - (optional) This is the Azure Enterprise Application (Service principal) client id. This service principal requires contributor access to your Azure Databricks deployment. Alternatively, you can provide this value as an environment variable `ARM_CLIENT_ID`. +* `azure_tenant_id` - (optional) This is the Azure Active Directory Tenant id in which the Enterprise Application (Service Principal) +resides. Alternatively, you can provide this value as an environment variable `ARM_TENANT_ID`. +* `azure_environment` - (optional) This is the Azure Environment which defaults to the `public` cloud. Other options are `german`, `china` and `usgovernment`. Alternatively, you can provide this value as an environment variable `ARM_ENVIRONMENT`. +* `auth_type` - (required) This is the Authentication Type that is used for specifying the authenticate method. This is required for this authentication type. + +There are `ARM_*` environment variables provide a way to share authentication configuration using the `databricks` provider alongside the [`azurerm` provider](https://registry.terraform.io/providers/hashicorp/azurerm/latest). + +When a workspace is created using a service principal account, that service principal account is automatically added to the workspace as a member of the admins group. To add a new service principal account to an existing workspace, create a [databricks_service_principal](resources/service_principal.md). ## Special configurations for GCP The provider works with [Google Cloud CLI authentication](https://cloud.google.com/sdk/docs/authorizing) to facilitate local development workflows. For automated scenarios, a service principal auth is necessary using `google_service_account` parameter with [impersonation](https://cloud.google.com/docs/authentication#service-accounts) and Application Default Credentials. Alternatively, you could provide the service account key directly by passing it to `google_credentials` parameter (or `GOOGLE_CREDENTIALS` environment variable) From afc4764aebc2ef7d6e7437f65bef1d7134b421d5 Mon Sep 17 00:00:00 2001 From: Edward Feng <67326663+edwardfeng-db@users.noreply.github.com> Date: Thu, 5 Sep 2024 14:51:27 +0200 Subject: [PATCH 04/99] [Internal] Support import in acceptance test + adding import state for quality monitor (#3994) ## Changes - Add `ImportState` method for `quality monitor` resource - Make acceptance test infrastructure able to test importing behavior - Add integration test for import state for quality monitor ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- internal/acceptance/init.go | 59 +++++++++++++++---- .../resource_quality_monitor.go | 4 ++ .../resource_quality_monitor_test.go | 32 ++++++++++ 3 files changed, 82 insertions(+), 13 deletions(-) diff --git a/internal/acceptance/init.go b/internal/acceptance/init.go index 704964b710..604d7b636c 100644 --- a/internal/acceptance/init.go +++ b/internal/acceptance/init.go @@ -56,6 +56,27 @@ func UnityAccountLevel(t *testing.T, steps ...Step) { run(t, steps) } +// BuildImportStateIdFunc constructs a function that returns the id attribute of a target resouce from the terraform state. +// This is a helper function for conveniently constructing the ImportStateIdFunc field for a test step. +func BuildImportStateIdFunc(resourceId, attr string) func(*terraform.State) (string, error) { + return func(s *terraform.State) (string, error) { + // Find the resource in the Terraform state. + rs, ok := s.RootModule().Resources[resourceId] + if !ok { + return "", fmt.Errorf("resource not found in state: %s", resourceId) + } + + // Access the attribute directly from the state. + targetAttr := rs.Primary.Attributes[attr] + if targetAttr == "" { + return "", fmt.Errorf("attribute '%s' not found or empty in the resource", attr) + } + + // Return the found attribute or the ID needed for the import. + return targetAttr, nil + } +} + // A step in a terraform acceptance test type Step struct { // Terraform HCL for resources to materialize in this test step. @@ -74,9 +95,17 @@ type Step struct { PlanOnly bool PreventDiskCleanup bool PreventPostDestroyRefresh bool - ImportState bool - ImportStateVerify bool - ProtoV6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) + + // If true, will test the functionality of ImportState by importing the resource with ResourceName (must be set) and the ID of that resource. + // ID can be supplied with either ImportStateId or ImportStateIdFunc. + ImportState bool + ImportStateId string + ImportStateIdFunc func(*terraform.State) (string, error) + ImportStateVerify bool + ImportStateVerifyIdentifierAttribute string + ResourceName string + + ProtoV6ProviderFactories map[string]func() (tfprotov6.ProviderServer, error) } func createUuid() string { @@ -187,16 +216,20 @@ func run(t *testing.T, steps []Step) { stepPreConfig() } }, - Config: stepConfig, - Destroy: s.Destroy, - ExpectNonEmptyPlan: s.ExpectNonEmptyPlan, - PlanOnly: s.PlanOnly, - PreventDiskCleanup: s.PreventDiskCleanup, - PreventPostDestroyRefresh: s.PreventPostDestroyRefresh, - ImportState: s.ImportState, - ImportStateVerify: s.ImportStateVerify, - ExpectError: s.ExpectError, - ProtoV6ProviderFactories: providerFactoryForStep, + Config: stepConfig, + Destroy: s.Destroy, + ExpectNonEmptyPlan: s.ExpectNonEmptyPlan, + PlanOnly: s.PlanOnly, + PreventDiskCleanup: s.PreventDiskCleanup, + PreventPostDestroyRefresh: s.PreventPostDestroyRefresh, + ImportState: s.ImportState, + ImportStateId: s.ImportStateId, + ImportStateIdFunc: s.ImportStateIdFunc, + ImportStateVerify: s.ImportStateVerify, + ImportStateVerifyIdentifierAttribute: s.ImportStateVerifyIdentifierAttribute, + ResourceName: s.ResourceName, + ExpectError: s.ExpectError, + ProtoV6ProviderFactories: providerFactoryForStep, Check: func(state *terraform.State) error { if stepCheck != nil { return stepCheck(state) diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go index 20d86a0426..4dd21b44da 100644 --- a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go @@ -88,6 +88,10 @@ func (d *QualityMonitorResource) Configure(ctx context.Context, req resource.Con } } +func (d *QualityMonitorResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("table_name"), req, resp) +} + func (r *QualityMonitorResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_test.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_test.go index 41039b1d13..a2e391bda3 100644 --- a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_test.go +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_test.go @@ -151,3 +151,35 @@ func TestUcAccUpdateQualityMonitorPluginFramework(t *testing.T) { `, }) } + +func TestUcAccQualityMonitorImportPluginFramework(t *testing.T) { + if os.Getenv("GOOGLE_CREDENTIALS") != "" { + t.Skipf("databricks_quality_monitor resource is not available on GCP") + } + acceptance.UnityWorkspaceLevel(t, + acceptance.Step{ + Template: commonPartQualityMonitoring + ` + + resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { + table_name = databricks_sql_table.myInferenceTable.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" + output_schema_name = databricks_schema.things.id + inference_log = { + granularities = ["1 day"] + timestamp_col = "timestamp" + prediction_col = "prediction" + model_id_col = "model_id" + problem_type = "PROBLEM_TYPE_REGRESSION" + } + } + `, + }, + acceptance.Step{ + ImportState: true, + ResourceName: "databricks_quality_monitor_pluginframework.testMonitorInference", + ImportStateIdFunc: acceptance.BuildImportStateIdFunc("databricks_quality_monitor_pluginframework.testMonitorInference", "table_name"), + ImportStateVerify: true, + ImportStateVerifyIdentifierAttribute: "table_name", + }, + ) +} From ca5ea216d6e0e30e3af15460c91c2ea09963b415 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Mon, 9 Sep 2024 15:42:42 +0200 Subject: [PATCH 05/99] [Internal] Add support for `computed` tag in TfSDK Structs (#4005) ## Changes Added support for `computed` tag in TfSDK Structs making code easier to maintain and read, minimising the lines of code required in customizing the schema ## Tests Unit test - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- .../pluginfw/tfschema/struct_to_schema.go | 111 +++++++++++++++--- .../tfschema/struct_to_schema_test.go | 20 ++++ 2 files changed, 116 insertions(+), 15 deletions(-) diff --git a/internal/providers/pluginfw/tfschema/struct_to_schema.go b/internal/providers/pluginfw/tfschema/struct_to_schema.go index 471479676e..2ac1303e19 100644 --- a/internal/providers/pluginfw/tfschema/struct_to_schema.go +++ b/internal/providers/pluginfw/tfschema/struct_to_schema.go @@ -30,6 +30,7 @@ func typeToSchema(v reflect.Value) map[string]AttributeBuilder { continue } isOptional := fieldIsOptional(typeField) + isComputed := fieldIsComputed(typeField) kind := typeField.Type.Kind() value := field.Value typeFieldType := typeField.Type @@ -48,17 +49,44 @@ func typeToSchema(v reflect.Value) map[string]AttributeBuilder { } switch elemType { case reflect.TypeOf(types.Bool{}): - scm[fieldName] = ListAttributeBuilder{ElementType: types.BoolType, Optional: isOptional, Required: !isOptional} + scm[fieldName] = ListAttributeBuilder{ + ElementType: types.BoolType, + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } case reflect.TypeOf(types.Int64{}): - scm[fieldName] = ListAttributeBuilder{ElementType: types.Int64Type, Optional: isOptional, Required: !isOptional} + scm[fieldName] = ListAttributeBuilder{ + ElementType: types.Int64Type, + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } case reflect.TypeOf(types.Float64{}): - scm[fieldName] = ListAttributeBuilder{ElementType: types.Float64Type, Optional: isOptional, Required: !isOptional} + scm[fieldName] = ListAttributeBuilder{ + ElementType: types.Float64Type, + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } case reflect.TypeOf(types.String{}): - scm[fieldName] = ListAttributeBuilder{ElementType: types.StringType, Optional: isOptional, Required: !isOptional} + scm[fieldName] = ListAttributeBuilder{ + ElementType: types.StringType, + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } default: // Nested struct nestedScm := typeToSchema(reflect.New(elemType).Elem()) - scm[fieldName] = ListNestedAttributeBuilder{NestedObject: NestedAttributeObject{Attributes: nestedScm}, Optional: isOptional, Required: !isOptional} + scm[fieldName] = ListNestedAttributeBuilder{ + NestedObject: NestedAttributeObject{ + Attributes: nestedScm, + }, + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } } } else if kind == reflect.Map { elemType := typeFieldType.Elem() @@ -70,28 +98,71 @@ func typeToSchema(v reflect.Value) map[string]AttributeBuilder { } switch elemType { case reflect.TypeOf(types.Bool{}): - scm[fieldName] = MapAttributeBuilder{ElementType: types.BoolType, Optional: isOptional, Required: !isOptional} + scm[fieldName] = MapAttributeBuilder{ + ElementType: types.BoolType, + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } case reflect.TypeOf(types.Int64{}): - scm[fieldName] = MapAttributeBuilder{ElementType: types.Int64Type, Optional: isOptional, Required: !isOptional} + scm[fieldName] = MapAttributeBuilder{ + ElementType: types.Int64Type, + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } case reflect.TypeOf(types.Float64{}): - scm[fieldName] = MapAttributeBuilder{ElementType: types.Float64Type, Optional: isOptional, Required: !isOptional} + scm[fieldName] = MapAttributeBuilder{ + ElementType: types.Float64Type, + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } case reflect.TypeOf(types.String{}): - scm[fieldName] = MapAttributeBuilder{ElementType: types.StringType, Optional: isOptional, Required: !isOptional} + scm[fieldName] = MapAttributeBuilder{ + ElementType: types.StringType, + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } default: // Nested struct nestedScm := typeToSchema(reflect.New(elemType).Elem()) - scm[fieldName] = MapNestedAttributeBuilder{NestedObject: NestedAttributeObject{Attributes: nestedScm}, Optional: isOptional, Required: !isOptional} + scm[fieldName] = MapNestedAttributeBuilder{ + NestedObject: NestedAttributeObject{ + Attributes: nestedScm, + }, + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } } } else if kind == reflect.Struct { switch value.Interface().(type) { case types.Bool: - scm[fieldName] = BoolAttributeBuilder{Optional: isOptional, Required: !isOptional} + scm[fieldName] = BoolAttributeBuilder{ + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } case types.Int64: - scm[fieldName] = Int64AttributeBuilder{Optional: isOptional, Required: !isOptional} + scm[fieldName] = Int64AttributeBuilder{ + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } case types.Float64: - scm[fieldName] = Float64AttributeBuilder{Optional: isOptional, Required: !isOptional} + scm[fieldName] = Float64AttributeBuilder{ + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } case types.String: - scm[fieldName] = StringAttributeBuilder{Optional: isOptional, Required: !isOptional} + scm[fieldName] = StringAttributeBuilder{ + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } case types.List: panic(fmt.Errorf("types.List should never be used in tfsdk structs. %s", common.TerraformBugErrorMessage)) case types.Map: @@ -101,7 +172,12 @@ func typeToSchema(v reflect.Value) map[string]AttributeBuilder { elem := typeFieldType sv := reflect.New(elem) nestedScm := typeToSchema(sv) - scm[fieldName] = SingleNestedAttributeBuilder{Attributes: nestedScm, Optional: isOptional, Required: !isOptional} + scm[fieldName] = SingleNestedAttributeBuilder{ + Attributes: nestedScm, + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, + } } } else { panic(fmt.Errorf("unknown type for field: %s. %s", typeField.Name, common.TerraformBugErrorMessage)) @@ -110,6 +186,11 @@ func typeToSchema(v reflect.Value) map[string]AttributeBuilder { return scm } +func fieldIsComputed(field reflect.StructField) bool { + tagValue := field.Tag.Get("tf") + return strings.Contains(tagValue, "computed") +} + func fieldIsOptional(field reflect.StructField) bool { tagValue := field.Tag.Get("tf") return strings.Contains(tagValue, "optional") diff --git a/internal/providers/pluginfw/tfschema/struct_to_schema_test.go b/internal/providers/pluginfw/tfschema/struct_to_schema_test.go index 7eca181b17..f566c5feab 100644 --- a/internal/providers/pluginfw/tfschema/struct_to_schema_test.go +++ b/internal/providers/pluginfw/tfschema/struct_to_schema_test.go @@ -24,6 +24,12 @@ type TestIntTfSdk struct { Workers types.Int64 `tfsdk:"workers" tf:"optional"` } +type TestComputedTfSdk struct { + ComputedTag types.String `tfsdk:"computedtag" tf:"computed"` + MultipleTags types.String `tfsdk:"multipletags" tf:"computed,optional"` + NonComputed types.String `tfsdk:"noncomputed" tf:"optional"` +} + type TestFloatTfSdk struct { Float types.Float64 `tfsdk:"float" tf:"optional"` } @@ -248,3 +254,17 @@ func TestStructToSchemaExpectedError(t *testing.T) { t.Run(test.name, func(t *testing.T) { testStructToSchemaPanics(t, test.testStruct, test.expectedError) }) } } + +func TestComputedField(t *testing.T) { + // Test that ComputedTag field is computed and required + scm := ResourceStructToSchema(TestComputedTfSdk{}, nil) + assert.True(t, scm.Attributes["computedtag"].IsComputed()) + assert.True(t, scm.Attributes["computedtag"].IsRequired()) + + // Test that MultipleTags field is computed and optional + assert.True(t, scm.Attributes["multipletags"].IsComputed()) + assert.True(t, scm.Attributes["multipletags"].IsOptional()) + + // Test that NonComputed field is not computed + assert.True(t, !scm.Attributes["noncomputed"].IsComputed()) +} From c35264d1c6598729e6821f5495da6c3023ba2957 Mon Sep 17 00:00:00 2001 From: Dustin Sollick Date: Tue, 10 Sep 2024 12:44:21 -0400 Subject: [PATCH 06/99] [Doc] Small Grammar Corrections in Docs (#4006) ## Changes Hey team, This PR fixes a bunch of small nits in the documentation around verb tense (specifically 'could' vs 'can') and adds indefinite articles ('a'/'an') before singular resource references. This brings it more in line with other documentation, and fixes grammatical errors. ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/data-sources/aws_crossaccount_policy.md | 2 +- docs/data-sources/catalog.md | 2 +- docs/data-sources/catalogs.md | 2 +- docs/data-sources/external_location.md | 2 +- docs/data-sources/external_locations.md | 2 +- docs/data-sources/metastore.md | 2 +- docs/data-sources/metastores.md | 2 +- docs/data-sources/schemas.md | 2 +- docs/data-sources/storage_credential.md | 2 +- docs/data-sources/storage_credentials.md | 2 +- docs/data-sources/table.md | 2 +- docs/data-sources/tables.md | 2 +- docs/data-sources/volumes.md | 2 +- docs/resources/access_control_rule_set.md | 2 +- docs/resources/artifact_allowlist.md | 2 +- docs/resources/automatic_cluster_update_setting.md | 2 +- docs/resources/catalog.md | 2 +- docs/resources/compliance_security_profile_setting.md | 2 +- docs/resources/connection.md | 2 +- docs/resources/default_namespace_setting.md | 2 +- docs/resources/enhanced_security_monitoring_setting.md | 2 +- docs/resources/external_location.md | 2 +- docs/resources/metastore.md | 2 +- docs/resources/metastore_assignment.md | 2 +- docs/resources/metastore_data_access.md | 2 +- docs/resources/online_table.md | 2 +- docs/resources/provider.md | 2 +- docs/resources/recipient.md | 2 +- docs/resources/registered_model.md | 2 +- docs/resources/restrict_workspace_admins_setting.md | 2 +- docs/resources/schema.md | 2 +- docs/resources/share.md | 2 +- docs/resources/storage_credential.md | 2 +- docs/resources/system_schema.md | 2 +- docs/resources/vector_search_endpoint.md | 2 +- docs/resources/vector_search_index.md | 2 +- docs/resources/volume.md | 2 +- docs/resources/workspace_binding.md | 2 +- 38 files changed, 38 insertions(+), 38 deletions(-) diff --git a/docs/data-sources/aws_crossaccount_policy.md b/docs/data-sources/aws_crossaccount_policy.md index c99876a7e0..715cf59b15 100644 --- a/docs/data-sources/aws_crossaccount_policy.md +++ b/docs/data-sources/aws_crossaccount_policy.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_aws_crossaccount_policy Data Source --> **Note** This data source could be only used with account-level provider! +-> **Note** This data source can only be used with an account-level provider! This data source constructs necessary AWS cross-account policy for you, which is based on [official documentation](https://docs.databricks.com/administration-guide/account-api/iam-role.html#language-Your%C2%A0VPC,%C2%A0default). diff --git a/docs/data-sources/catalog.md b/docs/data-sources/catalog.md index f9851018dd..533f8eb60d 100644 --- a/docs/data-sources/catalog.md +++ b/docs/data-sources/catalog.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_catalog Data Source --> **Note** This data source could be only used with workspace-level provider! +-> **Note** This data source can only be used with a workspace-level provider! -> **Note** If you have a fully automated setup with workspaces created by [databricks_mws_workspaces](../resources/mws_workspaces.md) or [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace), please make sure to add [depends_on attribute](../guides/troubleshooting.md#data-resources-and-authentication-is-not-configured-errors) in order to prevent _default auth: cannot configure default credentials_ errors. diff --git a/docs/data-sources/catalogs.md b/docs/data-sources/catalogs.md index 5c20123c89..4f9fc3e569 100644 --- a/docs/data-sources/catalogs.md +++ b/docs/data-sources/catalogs.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_catalogs Data Source --> **Note** This data source could be only used with workspace-level provider! +-> **Note** This data source can only be used with a workspace-level provider! -> **Note** If you have a fully automated setup with workspaces created by [databricks_mws_workspaces](../resources/mws_workspaces.md) or [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace), please make sure to add [depends_on attribute](../guides/troubleshooting.md#data-resources-and-authentication-is-not-configured-errors) in order to prevent _default auth: cannot configure default credentials_ errors. diff --git a/docs/data-sources/external_location.md b/docs/data-sources/external_location.md index 78ecde2ca7..8a8e3cc90a 100644 --- a/docs/data-sources/external_location.md +++ b/docs/data-sources/external_location.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_external_location Data Source --> **Note** This data source could be only used with workspace-level provider! +-> **Note** This data source can only be used with a workspace-level provider! Retrieves details about a [databricks_external_location](../resources/external_location.md) that were created by Terraform or manually. diff --git a/docs/data-sources/external_locations.md b/docs/data-sources/external_locations.md index fa4bdedd0c..c9472567df 100644 --- a/docs/data-sources/external_locations.md +++ b/docs/data-sources/external_locations.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_external_locations Data Source --> **Note** This data source could be only used with workspace-level provider! +-> **Note** This data source can only be used with a workspace-level provider! Retrieves a list of [databricks_external_location](./external_location.md) objects, that were created by Terraform or manually, so that special handling could be applied. diff --git a/docs/data-sources/metastore.md b/docs/data-sources/metastore.md index 948a17ef21..db59fc7488 100644 --- a/docs/data-sources/metastore.md +++ b/docs/data-sources/metastore.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_metastore Data Source --> **Note** This data source could be only used with account-level provider! +-> **Note** This data source can only be used with an account-level provider! Retrieves information about metastore for a given id of [databricks_metastore](../resources/metastore.md) object, that was created by Terraform or manually, so that special handling could be applied. diff --git a/docs/data-sources/metastores.md b/docs/data-sources/metastores.md index a6dbe45c84..ac002e16cd 100644 --- a/docs/data-sources/metastores.md +++ b/docs/data-sources/metastores.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_metastores Data Source --> **Note** This data source could be only used with account-level provider! +-> **Note** This data source can only be used with an account-level provider! Retrieves a mapping of name to id of [databricks_metastore](../resources/metastore.md) objects, that were created by Terraform or manually, so that special handling could be applied. diff --git a/docs/data-sources/schemas.md b/docs/data-sources/schemas.md index 1128195989..aa0a589d40 100644 --- a/docs/data-sources/schemas.md +++ b/docs/data-sources/schemas.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_schemas Data Source --> **Note** This data source could be only used with workspace-level provider! +-> **Note** This data source can only be used with a workspace-level provider! -> **Note** If you have a fully automated setup with workspaces created by [databricks_mws_workspaces](../resources/mws_workspaces.md) or [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace), please make sure to add [depends_on attribute](../guides/troubleshooting.md#data-resources-and-authentication-is-not-configured-errors) in order to prevent _default auth: cannot configure default credentials_ errors. diff --git a/docs/data-sources/storage_credential.md b/docs/data-sources/storage_credential.md index 631e264e66..0361bf523b 100644 --- a/docs/data-sources/storage_credential.md +++ b/docs/data-sources/storage_credential.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_storage_credential Data Source --> **Note** This data source could be only used with workspace-level provider! +-> **Note** This data source can only be used with a workspace-level provider! Retrieves details about a [databricks_storage_credential](../resources/storage_credential.md) that were created by Terraform or manually. diff --git a/docs/data-sources/storage_credentials.md b/docs/data-sources/storage_credentials.md index a2d35bde7e..db22f74ae6 100644 --- a/docs/data-sources/storage_credentials.md +++ b/docs/data-sources/storage_credentials.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_storage_credentials Data Source --> **Note** This data source could be only used with workspace-level provider! +-> **Note** This data source can only be used with a workspace-level provider! Retrieves a list of [databricks_storage_credential](./storage_credential.md) objects, that were created by Terraform or manually, so that special handling could be applied. diff --git a/docs/data-sources/table.md b/docs/data-sources/table.md index 3eb1ccf49e..408133b4f0 100644 --- a/docs/data-sources/table.md +++ b/docs/data-sources/table.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_table Data Source --> **Note** This data source could be only used with workspace-level provider! +-> **Note** This data source can only be used with a workspace-level provider! -> **Note** If you have a fully automated setup with workspaces created by [databricks_mws_workspaces](../resources/mws_workspaces.md) or [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace), please make sure to add [depends_on attribute](../guides/troubleshooting.md#data-resources-and-authentication-is-not-configured-errors) in order to prevent _default auth: cannot configure default credentials_ errors. diff --git a/docs/data-sources/tables.md b/docs/data-sources/tables.md index fcb9516fc3..1a3f2cd4e0 100644 --- a/docs/data-sources/tables.md +++ b/docs/data-sources/tables.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_tables Data Source --> **Note** This data source could be only used with workspace-level provider! +-> **Note** This data source can only be used with a workspace-level provider! -> **Note** If you have a fully automated setup with workspaces created by [databricks_mws_workspaces](../resources/mws_workspaces.md) or [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace), please make sure to add [depends_on attribute](../guides/troubleshooting.md#data-resources-and-authentication-is-not-configured-errors) in order to prevent _default auth: cannot configure default credentials_ errors. diff --git a/docs/data-sources/volumes.md b/docs/data-sources/volumes.md index 619e05c513..89ee190f39 100644 --- a/docs/data-sources/volumes.md +++ b/docs/data-sources/volumes.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_volumes Data Source --> **Note** This data source could be only used with workspace-level provider! +-> **Note** This data source can only be used with a workspace-level provider! Retrieves a list of [databricks_volume](../resources/volume.md) ids (full names), that were created by Terraform or manually. diff --git a/docs/resources/access_control_rule_set.md b/docs/resources/access_control_rule_set.md index 44a6479651..3a7767c570 100644 --- a/docs/resources/access_control_rule_set.md +++ b/docs/resources/access_control_rule_set.md @@ -4,7 +4,7 @@ subcategory: "Security" # databricks_access_control_rule_set Resource --> **Note** This resource could be used with account or workspace-level provider. +-> **Note** This resource can be used with an account or workspace-level provider. This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. diff --git a/docs/resources/artifact_allowlist.md b/docs/resources/artifact_allowlist.md index 4973872a3c..a65fe6dd57 100644 --- a/docs/resources/artifact_allowlist.md +++ b/docs/resources/artifact_allowlist.md @@ -6,7 +6,7 @@ subcategory: "Unity Catalog" -> **Note** It is required to define all allowlist for an artifact type in a single resource, otherwise Terraform cannot guarantee config drift prevention. --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! In Databricks Runtime 13.3 and above, you can add libraries and init scripts to the allowlist in UC so that users can leverage these artifacts on compute configured with shared access mode. diff --git a/docs/resources/automatic_cluster_update_setting.md b/docs/resources/automatic_cluster_update_setting.md index 0a5ce409d0..152a95b9ea 100644 --- a/docs/resources/automatic_cluster_update_setting.md +++ b/docs/resources/automatic_cluster_update_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_automatic_cluster_update_workspace_setting Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! The `databricks_automatic_cluster_update_workspace_setting` resource allows you to control whether automatic cluster update is enabled for the current workspace. By default, it is turned off. Enabling this feature on a workspace requires that you add the Enhanced Security and Compliance add-on. diff --git a/docs/resources/catalog.md b/docs/resources/catalog.md index 1145837aa4..980c6c837d 100644 --- a/docs/resources/catalog.md +++ b/docs/resources/catalog.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_catalog Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views. diff --git a/docs/resources/compliance_security_profile_setting.md b/docs/resources/compliance_security_profile_setting.md index a507243418..6bb5afc090 100644 --- a/docs/resources/compliance_security_profile_setting.md +++ b/docs/resources/compliance_security_profile_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_compliance_security_profile_workspace_setting Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! -> **Note** This setting can NOT be disabled once it is enabled. diff --git a/docs/resources/connection.md b/docs/resources/connection.md index 5115f1ca1d..f7421bd5da 100644 --- a/docs/resources/connection.md +++ b/docs/resources/connection.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_connection (Resource) --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: diff --git a/docs/resources/default_namespace_setting.md b/docs/resources/default_namespace_setting.md index 723937bd6e..cf3c5ee36a 100644 --- a/docs/resources/default_namespace_setting.md +++ b/docs/resources/default_namespace_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_default_namespace_setting Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! The `databricks_default_namespace_setting` resource allows you to operate the setting configuration for the default namespace in the Databricks workspace. Setting the default catalog for the workspace determines the catalog that is used when queries do not reference diff --git a/docs/resources/enhanced_security_monitoring_setting.md b/docs/resources/enhanced_security_monitoring_setting.md index 7447b7ad34..64a18c9fcb 100644 --- a/docs/resources/enhanced_security_monitoring_setting.md +++ b/docs/resources/enhanced_security_monitoring_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_enhanced_security_monitoring_workspace_setting Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! The `databricks_enhanced_security_monitoring_workspace_setting` resource allows you to control whether enhanced security monitoring is enabled for the current workspace. If the compliance security profile is enabled, this is automatically enabled. By default, diff --git a/docs/resources/external_location.md b/docs/resources/external_location.md index 933f863d67..59cc555685 100644 --- a/docs/resources/external_location.md +++ b/docs/resources/external_location.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_external_location Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: diff --git a/docs/resources/metastore.md b/docs/resources/metastore.md index d159f53037..6cd5a8417e 100644 --- a/docs/resources/metastore.md +++ b/docs/resources/metastore.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_metastore Resource --> **Note** This resource could be used with account or workspace-level provider. +-> **Note** This resource can be used with an account or workspace-level provider. A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. diff --git a/docs/resources/metastore_assignment.md b/docs/resources/metastore_assignment.md index 14edd3d60c..6a336a312b 100644 --- a/docs/resources/metastore_assignment.md +++ b/docs/resources/metastore_assignment.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_metastore_assignment (Resource) --> **Note** This resource could be used with account or workspace-level provider. +-> **Note** This resource can be used with an account or workspace-level provider. A single [databricks_metastore](metastore.md) can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates. diff --git a/docs/resources/metastore_data_access.md b/docs/resources/metastore_data_access.md index 8b9334065b..290eb061cb 100644 --- a/docs/resources/metastore_data_access.md +++ b/docs/resources/metastore_data_access.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_metastore_data_access (Resource) --> **Note** This resource could be used with account or workspace-level provider. +-> **Note** This resource can be used with an account or workspace-level provider. Optionally, each [databricks_metastore](metastore.md) can have a default [databricks_storage_credential](storage_credential.md) defined as `databricks_metastore_data_access`. This will be used by Unity Catalog to access data in the root storage location if defined. diff --git a/docs/resources/online_table.md b/docs/resources/online_table.md index bbfa9cafbd..66aa48025c 100644 --- a/docs/resources/online_table.md +++ b/docs/resources/online_table.md @@ -4,7 +4,7 @@ subcategory: "Unity Catalog" # databricks_online_table (Resource) --> **Note** This resource could be only used on Unity Catalog-enabled workspace! +-> **Note** This resource can only be used on a Unity Catalog-enabled workspace! This resource allows you to create [Online Table](https://docs.databricks.com/en/machine-learning/feature-store/online-tables.html) in Databricks. An online table is a read-only copy of a Delta Table that is stored in row-oriented format optimized for online access. Online tables are fully serverless tables that auto-scale throughput capacity with the request load and provide low latency and high throughput access to data of any scale. Online tables are designed to work with Databricks Model Serving, Feature Serving, and retrieval-augmented generation (RAG) applications where they are used for fast data lookups. diff --git a/docs/resources/provider.md b/docs/resources/provider.md index c6a834eae4..25ebe76601 100644 --- a/docs/resources/provider.md +++ b/docs/resources/provider.md @@ -3,7 +3,7 @@ subcategory: "Delta Sharing" --- # databricks_provider Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! In Delta Sharing, a provider is an entity that shares data with a recipient. Within a metastore, Unity Catalog provides the ability to create a provider which contains a list of shares that have been shared with you. diff --git a/docs/resources/recipient.md b/docs/resources/recipient.md index e6803b219a..6df597ea92 100644 --- a/docs/resources/recipient.md +++ b/docs/resources/recipient.md @@ -3,7 +3,7 @@ subcategory: "Delta Sharing" --- # databricks_recipient Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! In Delta Sharing, a recipient is an entity that receives shares from a provider. In Unity Catalog, a share is a securable object that represents an organization and associates it with a credential or secure sharing identifier that allows that organization to access one or more shares. diff --git a/docs/resources/registered_model.md b/docs/resources/registered_model.md index 5594309e92..44c583102b 100644 --- a/docs/resources/registered_model.md +++ b/docs/resources/registered_model.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_registered_model Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! This resource allows you to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. diff --git a/docs/resources/restrict_workspace_admins_setting.md b/docs/resources/restrict_workspace_admins_setting.md index 8e88269caa..765825f866 100644 --- a/docs/resources/restrict_workspace_admins_setting.md +++ b/docs/resources/restrict_workspace_admins_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_restrict_workspace_admins_setting Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! The `databricks_restrict_workspace_admins_setting` resource lets you control the capabilities of workspace admins. diff --git a/docs/resources/schema.md b/docs/resources/schema.md index 0ad1af4c6f..65a144c4e9 100644 --- a/docs/resources/schema.md +++ b/docs/resources/schema.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_schema Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views. diff --git a/docs/resources/share.md b/docs/resources/share.md index 29f002f4ef..38252a8818 100644 --- a/docs/resources/share.md +++ b/docs/resources/share.md @@ -3,7 +3,7 @@ subcategory: "Delta Sharing" --- # databricks_share Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! In Delta Sharing, a share is a read-only collection of tables and table partitions that a provider wants to share with one or more recipients. If your recipient uses a Unity Catalog-enabled Databricks workspace, you can also include notebook files, views (including dynamic views that restrict access at the row and column level), Unity Catalog volumes, and Unity Catalog models in a share. diff --git a/docs/resources/storage_credential.md b/docs/resources/storage_credential.md index 40f452ff7a..b57120e8dd 100644 --- a/docs/resources/storage_credential.md +++ b/docs/resources/storage_credential.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_storage_credential Resource --> **Note** This resource could be used with account or workspace-level provider. +-> **Note** This resource can be used with an account or workspace-level provider. To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: diff --git a/docs/resources/system_schema.md b/docs/resources/system_schema.md index 18c515de6e..80634f0859 100644 --- a/docs/resources/system_schema.md +++ b/docs/resources/system_schema.md @@ -5,7 +5,7 @@ subcategory: "Unity Catalog" -> **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! Manages system tables enablement. System tables are a Databricks-hosted analytical store of your account’s operational data. System tables can be used for historical observability across your account. System tables must be enabled by an account admin. diff --git a/docs/resources/vector_search_endpoint.md b/docs/resources/vector_search_endpoint.md index adb3319264..c90de0c25a 100644 --- a/docs/resources/vector_search_endpoint.md +++ b/docs/resources/vector_search_endpoint.md @@ -3,7 +3,7 @@ subcategory: "Mosaic AI Vector Search" --- # databricks_vector_search_endpoint Resource --> **Note** This resource could be only used on Unity Catalog-enabled workspace! +-> **Note** This resource can only be used on a Unity Catalog-enabled workspace! This resource allows you to create [Mosaic AI Vector Search Endpoint](https://docs.databricks.com/en/generative-ai/vector-search.html) in Databricks. Mosaic AI Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. The Mosaic AI Vector Search Endpoint is used to create and access vector search indexes. diff --git a/docs/resources/vector_search_index.md b/docs/resources/vector_search_index.md index 74d02e746e..abf17447fb 100644 --- a/docs/resources/vector_search_index.md +++ b/docs/resources/vector_search_index.md @@ -3,7 +3,7 @@ subcategory: "Mosaic AI Vector Search" --- # databricks_vector_search_index Resource --> **Note** This resource could be only used on Unity Catalog-enabled workspace! +-> **Note** This resource can only be used on a Unity Catalog-enabled workspace! This resource allows you to create [Mosaic AI Vector Search Index](https://docs.databricks.com/en/generative-ai/create-query-vector-search.html) in Databricks. Mosaic AI Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. The Mosaic AI Vector Search Index provides the ability to search data in the linked Delta Table. diff --git a/docs/resources/volume.md b/docs/resources/volume.md index a95fd5fac9..b116e42129 100644 --- a/docs/resources/volume.md +++ b/docs/resources/volume.md @@ -5,7 +5,7 @@ subcategory: "Unity Catalog" -> **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! Volumes are Unity Catalog objects representing a logical volume of storage in a cloud object storage location. Volumes provide capabilities for accessing, storing, governing, and organizing files. While tables provide governance over tabular datasets, volumes add governance over non-tabular datasets. You can use volumes to store and access files in any format, including structured, semi-structured, and unstructured data. diff --git a/docs/resources/workspace_binding.md b/docs/resources/workspace_binding.md index 94f33912c1..5a649bd9e4 100644 --- a/docs/resources/workspace_binding.md +++ b/docs/resources/workspace_binding.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_workspace_binding Resource --> **Note** This resource could be only used with workspace-level provider! +-> **Note** This resource can only be used with a workspace-level provider! If you use workspaces to isolate user data access, you may want to limit access to catalog, external locations or storage credentials from specific workspaces in your account, also known as workspace binding From 620bb2703ca51483e63ec20c03c21d1d6797884d Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Wed, 11 Sep 2024 12:49:58 +0200 Subject: [PATCH 07/99] [Dependency] Update Go SDK to 0.46.0 (#4007) ## Changes ## Tests - [X] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [X] using Go SDK --- .codegen/_openapi_sha | 2 +- go.mod | 2 +- go.sum | 4 +- internal/service/catalog_tf/model.go | 23 ++++- internal/service/iam_tf/model.go | 14 +-- internal/service/jobs_tf/model.go | 100 +++++++++++++++++++++- internal/service/ml_tf/model.go | 7 +- internal/service/serving_tf/model.go | 6 +- internal/service/vectorsearch_tf/model.go | 5 ++ 9 files changed, 145 insertions(+), 18 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 8b01a2422a..4ceeab3d38 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -3eae49b444cac5a0118a3503e5b7ecef7f96527a \ No newline at end of file +d05898328669a3f8ab0c2ecee37db2673d3ea3f7 \ No newline at end of file diff --git a/go.mod b/go.mod index 40a4bd7940..8f5de34e8d 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/databricks/terraform-provider-databricks go 1.22 require ( - github.com/databricks/databricks-sdk-go v0.45.0 + github.com/databricks/databricks-sdk-go v0.46.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 diff --git a/go.sum b/go.sum index 6eb1b4559f..9cace277b5 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.45.0 h1:wdx5Wm/ESrahdHeq62WrjLeGjV4r722LLanD8ahI0Mo= -github.com/databricks/databricks-sdk-go v0.45.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.46.0 h1:D0TxmtSVAOsdnfzH4OGtAmcq+8TyA7Z6fA6JEYhupeY= +github.com/databricks/databricks-sdk-go v0.46.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index a71e0fa77e..e84b479703 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -445,7 +445,9 @@ type CreateMetastore struct { } type CreateMetastoreAssignment struct { - // The name of the default catalog in the metastore. + // The name of the default catalog in the metastore. This field is + // depracted. Please use "Default Namespace API" to configure the default + // catalog for a Databricks workspace. DefaultCatalogName types.String `tfsdk:"default_catalog_name" tf:""` // The unique ID of the metastore. MetastoreId types.String `tfsdk:"metastore_id" tf:""` @@ -2105,6 +2107,21 @@ type ReadVolumeRequest struct { Name types.String `tfsdk:"-"` } +type RegenerateDashboardRequest struct { + // Full name of the table. + TableName types.String `tfsdk:"-"` + // Optional argument to specify the warehouse for dashboard regeneration. If + // not specified, the first running warehouse will be used. + WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` +} + +type RegenerateDashboardResponse struct { + // Id of the regenerated monitoring dashboard. + DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` + // The directory where the regenerated dashboard is stored. + ParentFolder types.String `tfsdk:"parent_folder" tf:"optional"` +} + // Registered model alias. type RegisteredModelAlias struct { // Name of the alias, e.g. 'champion' or 'latest_stable' @@ -2510,7 +2527,9 @@ type UpdateMetastore struct { } type UpdateMetastoreAssignment struct { - // The name of the default catalog for the metastore. + // The name of the default catalog in the metastore. This field is + // depracted. Please use "Default Namespace API" to configure the default + // catalog for a Databricks workspace. DefaultCatalogName types.String `tfsdk:"default_catalog_name" tf:"optional"` // The unique ID of the metastore. MetastoreId types.String `tfsdk:"metastore_id" tf:"optional"` diff --git a/internal/service/iam_tf/model.go b/internal/service/iam_tf/model.go index 4274dfc1c7..0d056da55b 100755 --- a/internal/service/iam_tf/model.go +++ b/internal/service/iam_tf/model.go @@ -178,9 +178,10 @@ type GetPermissionRequest struct { // The id of the request object. RequestObjectId types.String `tfsdk:"-"` // The type of the request object. Can be one of the following: alerts, - // authorization, clusters, cluster-policies, dbsql-dashboards, directories, - // experiments, files, instance-pools, jobs, notebooks, pipelines, queries, - // registered-models, repos, serving-endpoints, or warehouses. + // authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, + // directories, experiments, files, instance-pools, jobs, notebooks, + // pipelines, queries, registered-models, repos, serving-endpoints, or + // warehouses. RequestObjectType types.String `tfsdk:"-"` } @@ -609,9 +610,10 @@ type PermissionsRequest struct { // The id of the request object. RequestObjectId types.String `tfsdk:"-"` // The type of the request object. Can be one of the following: alerts, - // authorization, clusters, cluster-policies, dbsql-dashboards, directories, - // experiments, files, instance-pools, jobs, notebooks, pipelines, queries, - // registered-models, repos, serving-endpoints, or warehouses. + // authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, + // directories, experiments, files, instance-pools, jobs, notebooks, + // pipelines, queries, registered-models, repos, serving-endpoints, or + // warehouses. RequestObjectType types.String `tfsdk:"-"` } diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index 0ea0d45f4a..d5a1b57f58 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -137,8 +137,10 @@ type BaseRun struct { // task starts executing, for example, if the job is scheduled to run on a // new cluster, this is the time the cluster creation call is issued. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` - // The current state of the run. + // Deprecated. Please use the `status` field instead. State *RunState `tfsdk:"state" tf:"optional"` + // The current status of the run + Status *RunStatus `tfsdk:"status" tf:"optional"` // The list of tasks performed by the run. Each task has its own `run_id` // which you can use to call `JobsGetOutput` to retrieve the run resutls. Tasks []RunTask `tfsdk:"tasks" tf:"optional"` @@ -1183,6 +1185,19 @@ type PythonWheelTask struct { Parameters []types.String `tfsdk:"parameters" tf:"optional"` } +type QueueDetails struct { + // The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run + // was queued due to reaching the workspace limit of active task runs. * + // `MAX_CONCURRENT_RUNS_REACHED`: The run was queued due to reaching the + // per-job limit of concurrent job runs. * + // `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`: The run was queued due to reaching + // the workspace limit of active run job tasks. + Code types.String `tfsdk:"code" tf:"optional"` + // A descriptive message with the queuing details. This field is + // unstructured, and its exact format is subject to change. + Message types.String `tfsdk:"message" tf:"optional"` +} + type QueueSettings struct { // If true, enable queueing for the job. This is a required field. Enabled types.Bool `tfsdk:"enabled" tf:""` @@ -1196,8 +1211,10 @@ type RepairHistoryItem struct { Id types.Int64 `tfsdk:"id" tf:"optional"` // The start time of the (repaired) run. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` - // The current state of the run. + // Deprecated. Please use the `status` field instead. State *RunState `tfsdk:"state" tf:"optional"` + // The current status of the run + Status *RunStatus `tfsdk:"status" tf:"optional"` // The run IDs of the task runs that ran as part of this repair history // item. TaskRunIds []types.Int64 `tfsdk:"task_run_ids" tf:"optional"` @@ -1500,8 +1517,10 @@ type Run struct { // task starts executing, for example, if the job is scheduled to run on a // new cluster, this is the time the cluster creation call is issued. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` - // The current state of the run. + // Deprecated. Please use the `status` field instead. State *RunState `tfsdk:"state" tf:"optional"` + // The current status of the run + Status *RunStatus `tfsdk:"status" tf:"optional"` // The list of tasks performed by the run. Each task has its own `run_id` // which you can use to call `JobsGetOutput` to retrieve the run resutls. Tasks []RunTask `tfsdk:"tasks" tf:"optional"` @@ -1905,6 +1924,17 @@ type RunState struct { UserCancelledOrTimedout types.Bool `tfsdk:"user_cancelled_or_timedout" tf:"optional"` } +// The current status of the run +type RunStatus struct { + // If the run was queued, details about the reason for queuing the run. + QueueDetails *QueueDetails `tfsdk:"queue_details" tf:"optional"` + // The current state of the run. + State types.String `tfsdk:"state" tf:"optional"` + // If the run is in a TERMINATING or TERMINATED state, details about the + // reason for terminating the run. + TerminationDetails *TerminationDetails `tfsdk:"termination_details" tf:"optional"` +} + // Used when outputting a child run, in GetRun or ListRuns. type RunTask struct { // The sequence number of this run attempt for a triggered job run. The @@ -2050,8 +2080,10 @@ type RunTask struct { // task starts executing, for example, if the job is scheduled to run on a // new cluster, this is the time the cluster creation call is issued. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` - // The current state of the run. + // Deprecated. Please use the `status` field instead. State *RunState `tfsdk:"state" tf:"optional"` + // The current status of the run + Status *RunStatus `tfsdk:"status" tf:"optional"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be @@ -2619,6 +2651,66 @@ type TaskNotificationSettings struct { NoAlertForSkippedRuns types.Bool `tfsdk:"no_alert_for_skipped_runs" tf:"optional"` } +type TerminationDetails struct { + // The code indicates why the run was terminated. Additional codes might be + // introduced in future releases. * `SUCCESS`: The run was completed + // successfully. * `CANCELED`: The run was canceled during execution by the + // Databricks platform; for example, if the maximum run duration was + // exceeded. * `SKIPPED`: Run was never executed, for example, if the + // upstream task run failed, the dependency type condition was not met, or + // there were no material tasks to execute. * `INTERNAL_ERROR`: The run + // encountered an unexpected error. Refer to the state message for further + // details. * `DRIVER_ERROR`: The run encountered an error while + // communicating with the Spark Driver. * `CLUSTER_ERROR`: The run failed + // due to a cluster error. Refer to the state message for further details. * + // `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an + // error when communicating with the third party service. * + // `INVALID_CLUSTER_REQUEST`: The run failed because it issued an invalid + // request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The + // workspace has reached the quota for the maximum number of concurrent + // active runs. Consider scheduling the runs over a larger time frame. * + // `FEATURE_DISABLED`: The run failed because it tried to access a feature + // unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The + // number of cluster creation, start, and upsize requests have exceeded the + // allotted rate limit. Consider spreading the run execution over a larger + // time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when + // accessing the customer blob storage. Refer to the state message for + // further details. * `RUN_EXECUTION_ERROR`: The run was completed with task + // failures. For more details, refer to the state message or run output. * + // `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while + // accessing a resource. Refer to the state message for further details. * + // `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the + // user-requested library. Refer to the state message for further details. + // The causes might include, but are not limited to: The provided library is + // invalid, there are insufficient permissions to install the library, and + // so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the + // limit of maximum concurrent runs set for the job. * + // `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has + // already reached the maximum number of contexts it is configured to + // create. See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run + // execution does not exist. Refer to the state message for further details. + // * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid + // configuration. Refer to the state message for further details. * + // `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to + // the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: + // The run was skipped due to reaching the job level queue size limit. + // + // [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now + Code types.String `tfsdk:"code" tf:"optional"` + // A descriptive message with the termination details. This field is + // unstructured and the format might change. + Message types.String `tfsdk:"message" tf:"optional"` + // * `SUCCESS`: The run terminated without any issues * `INTERNAL_ERROR`: An + // error occurred in the Databricks platform. Please look at the [status + // page] or contact support if the issue persists. * `CLIENT_ERROR`: The run + // was terminated because of an error caused by user input or the job + // configuration. * `CLOUD_FAILURE`: The run was terminated because of an + // issue with your cloud provider. + // + // [status page]: https://status.databricks.com/ + Type types.String `tfsdk:"type" tf:"optional"` +} + // Additional details about what triggered the run type TriggerInfo struct { // The run id of the Run Job task run diff --git a/internal/service/ml_tf/model.go b/internal/service/ml_tf/model.go index 1d7a2d71fe..e8735e8d51 100755 --- a/internal/service/ml_tf/model.go +++ b/internal/service/ml_tf/model.go @@ -735,7 +735,12 @@ type JobSpecWithoutSecret struct { // Get all artifacts type ListArtifactsRequest struct { - // Token indicating the page of artifact results to fetch + // Token indicating the page of artifact results to fetch. `page_token` is + // not supported when listing artifacts in UC Volumes. A maximum of 1000 + // artifacts will be retrieved for UC Volumes. Please call + // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC + // Volumes, which supports pagination. See [List directory contents | Files + // API](/api/workspace/files/listdirectorycontents). PageToken types.String `tfsdk:"-"` // Filter artifacts matching this path (a relative path from the root // artifact directory). diff --git a/internal/service/serving_tf/model.go b/internal/service/serving_tf/model.go index 9022705a63..c40d18ee63 100755 --- a/internal/service/serving_tf/model.go +++ b/internal/service/serving_tf/model.go @@ -795,6 +795,10 @@ type ServedModelInput struct { // ARN of the instance profile that the served model will use to access AWS // resources. InstanceProfileArn types.String `tfsdk:"instance_profile_arn" tf:"optional"` + // The maximum tokens per second that the endpoint can scale up to. + MaxProvisionedThroughput types.Int64 `tfsdk:"max_provisioned_throughput" tf:"optional"` + // The minimum tokens per second that the endpoint can scale down to. + MinProvisionedThroughput types.Int64 `tfsdk:"min_provisioned_throughput" tf:"optional"` // The name of the model in Databricks Model Registry to be served or if the // model resides in Unity Catalog, the full name of model, in the form of // __catalog_name__.__schema_name__.__model_name__. @@ -817,7 +821,7 @@ type ServedModelInput struct { // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 // provisioned concurrency). If scale-to-zero is enabled, the lower bound of // the provisioned concurrency for each workload size will be 0. - WorkloadSize types.String `tfsdk:"workload_size" tf:""` + WorkloadSize types.String `tfsdk:"workload_size" tf:"optional"` // The workload type of the served model. The workload type selects which // type of compute to use in the endpoint. The default value for this // parameter is "CPU". For deep learning workloads, GPU acceleration is diff --git a/internal/service/vectorsearch_tf/model.go b/internal/service/vectorsearch_tf/model.go index 3eb342a87f..494b51cbc6 100755 --- a/internal/service/vectorsearch_tf/model.go +++ b/internal/service/vectorsearch_tf/model.go @@ -97,6 +97,11 @@ type DeleteIndexResponse struct { } type DeltaSyncVectorIndexSpecRequest struct { + // [Optional] Select the columns to sync with the vector index. If you leave + // this field blank, all columns from the source table are synced with the + // index. The primary key column and embedding source column or embedding + // vector column are always synced. + ColumnsToSync []types.String `tfsdk:"columns_to_sync" tf:"optional"` // The columns that contain the embedding source. EmbeddingSourceColumns []EmbeddingSourceColumn `tfsdk:"embedding_source_columns" tf:"optional"` // The columns that contain the embedding vectors. The format should be From 375539af974e76224fd6da8a719218a8b95f7605 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 11 Sep 2024 12:51:47 +0200 Subject: [PATCH 08/99] [Doc] Update `databricks_vector_search_index` docs to match latest SDK (#4008) ## Changes Should be merged after #4007 ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/resources/vector_search_index.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/resources/vector_search_index.md b/docs/resources/vector_search_index.md index abf17447fb..0de0ac2c1f 100644 --- a/docs/resources/vector_search_index.md +++ b/docs/resources/vector_search_index.md @@ -38,6 +38,7 @@ The following arguments are supported (change of any parameter leads to recreati * `DIRECT_ACCESS`: An index that supports the direct read and write of vectors and metadata through our REST and SDK APIs. With this model, the user manages index updates. * `delta_sync_index_spec` - (object) Specification for Delta Sync Index. Required if `index_type` is `DELTA_SYNC`. * `source_table` (required) The name of the source table. + * `columns_to_sync` - (optional) list of columns to sync. If not specified, all columns are syncronized. * `embedding_source_columns` - (required if `embedding_vector_columns` isn't provided) array of objects representing columns that contain the embedding source. Each entry consists of: * `name` - The name of the column * `embedding_model_endpoint_name` - The name of the embedding model endpoint From 2c6b876fd9f6c38dee8e465849003e758c70423d Mon Sep 17 00:00:00 2001 From: Edward Feng <67326663+edwardfeng-db@users.noreply.github.com> Date: Thu, 12 Sep 2024 13:16:04 +0200 Subject: [PATCH 09/99] [Internal] Add `AddPlanModifer` method for AttributeBuilder (#4009) ## Changes - Add `AddPlanModifier` methods for all AttributeBuilders so that we can do things equivalent to set supress diff or force new ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- .../pluginfw/tfschema/bool_attribute.go | 8 +++++ .../pluginfw/tfschema/customizable_schema.go | 32 +++++++++++++++++++ .../tfschema/customizable_schema_test.go | 10 ++++++ .../pluginfw/tfschema/float64_attribute.go | 8 +++++ .../pluginfw/tfschema/int64_attribute.go | 8 +++++ .../pluginfw/tfschema/list_attribute.go | 8 +++++ .../tfschema/list_nested_attribute.go | 8 +++++ .../pluginfw/tfschema/map_attribute.go | 8 +++++ .../pluginfw/tfschema/map_nested_attribute.go | 8 +++++ .../tfschema/single_nested_attribute.go | 8 +++++ .../pluginfw/tfschema/string_attribute.go | 8 +++++ 11 files changed, 114 insertions(+) diff --git a/internal/providers/pluginfw/tfschema/bool_attribute.go b/internal/providers/pluginfw/tfschema/bool_attribute.go index 1b9f6b8919..5dc4727293 100644 --- a/internal/providers/pluginfw/tfschema/bool_attribute.go +++ b/internal/providers/pluginfw/tfschema/bool_attribute.go @@ -3,6 +3,7 @@ package tfschema import ( dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" ) @@ -13,6 +14,7 @@ type BoolAttributeBuilder struct { Computed bool DeprecationMessage string Validators []validator.Bool + PlanModifiers []planmodifier.Bool } func (a BoolAttributeBuilder) BuildDataSourceAttribute() dataschema.Attribute { @@ -34,6 +36,7 @@ func (a BoolAttributeBuilder) BuildResourceAttribute() schema.Attribute { DeprecationMessage: a.DeprecationMessage, Computed: a.Computed, Validators: a.Validators, + PlanModifiers: a.PlanModifiers, } } @@ -90,3 +93,8 @@ func (a BoolAttributeBuilder) AddValidator(v validator.Bool) AttributeBuilder { a.Validators = append(a.Validators, v) return a } + +func (a BoolAttributeBuilder) AddPlanModifier(v planmodifier.Bool) AttributeBuilder { + a.PlanModifiers = append(a.PlanModifiers, v) + return a +} diff --git a/internal/providers/pluginfw/tfschema/customizable_schema.go b/internal/providers/pluginfw/tfschema/customizable_schema.go index 129d8b161a..4f29f7765e 100644 --- a/internal/providers/pluginfw/tfschema/customizable_schema.go +++ b/internal/providers/pluginfw/tfschema/customizable_schema.go @@ -5,6 +5,7 @@ import ( "reflect" "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" ) @@ -72,6 +73,37 @@ func (s *CustomizableSchema) AddValidator(v any, path ...string) *CustomizableSc return s } +func (s *CustomizableSchema) AddPlanModifier(v any, path ...string) *CustomizableSchema { + cb := func(attr AttributeBuilder) AttributeBuilder { + switch a := attr.(type) { + case BoolAttributeBuilder: + return a.AddPlanModifier(v.(planmodifier.Bool)) + case Float64AttributeBuilder: + return a.AddPlanModifier(v.(planmodifier.Float64)) + case Int64AttributeBuilder: + return a.AddPlanModifier(v.(planmodifier.Int64)) + case ListAttributeBuilder: + return a.AddPlanModifier(v.(planmodifier.List)) + case ListNestedAttributeBuilder: + return a.AddPlanModifier(v.(planmodifier.List)) + case MapAttributeBuilder: + return a.AddPlanModifier(v.(planmodifier.Map)) + case MapNestedAttributeBuilder: + return a.AddPlanModifier(v.(planmodifier.Map)) + case SingleNestedAttributeBuilder: + return a.AddPlanModifier(v.(planmodifier.Object)) + case StringAttributeBuilder: + return a.AddPlanModifier(v.(planmodifier.String)) + default: + panic(fmt.Errorf("cannot add planmodifier, attribute builder type is invalid: %s. %s", reflect.TypeOf(attr).String(), common.TerraformBugErrorMessage)) + } + } + + navigateSchemaWithCallback(&s.attr, cb, path...) + + return s +} + func (s *CustomizableSchema) SetOptional(path ...string) *CustomizableSchema { cb := func(attr AttributeBuilder) AttributeBuilder { return attr.SetOptional() diff --git a/internal/providers/pluginfw/tfschema/customizable_schema_test.go b/internal/providers/pluginfw/tfschema/customizable_schema_test.go index fc8a2078fb..e134b47c2a 100644 --- a/internal/providers/pluginfw/tfschema/customizable_schema_test.go +++ b/internal/providers/pluginfw/tfschema/customizable_schema_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" @@ -111,3 +112,12 @@ func TestCustomizeSchemaAddValidator(t *testing.T) { assert.True(t, len(scm.Attributes["description"].(schema.StringAttribute).Validators) == 1) } + +func TestCustomizeSchemaAddPlanModifier(t *testing.T) { + scm := ResourceStructToSchema(TestTfSdk{}, func(c CustomizableSchema) CustomizableSchema { + c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "description") + return c + }) + + assert.True(t, len(scm.Attributes["description"].(schema.StringAttribute).PlanModifiers) == 1) +} diff --git a/internal/providers/pluginfw/tfschema/float64_attribute.go b/internal/providers/pluginfw/tfschema/float64_attribute.go index 43bfaf4830..913b747431 100644 --- a/internal/providers/pluginfw/tfschema/float64_attribute.go +++ b/internal/providers/pluginfw/tfschema/float64_attribute.go @@ -3,6 +3,7 @@ package tfschema import ( dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" ) @@ -13,6 +14,7 @@ type Float64AttributeBuilder struct { Computed bool DeprecationMessage string Validators []validator.Float64 + PlanModifiers []planmodifier.Float64 } func (a Float64AttributeBuilder) BuildDataSourceAttribute() dataschema.Attribute { @@ -34,6 +36,7 @@ func (a Float64AttributeBuilder) BuildResourceAttribute() schema.Attribute { DeprecationMessage: a.DeprecationMessage, Computed: a.Computed, Validators: a.Validators, + PlanModifiers: a.PlanModifiers, } } @@ -90,3 +93,8 @@ func (a Float64AttributeBuilder) AddValidator(v validator.Float64) AttributeBuil a.Validators = append(a.Validators, v) return a } + +func (a Float64AttributeBuilder) AddPlanModifier(v planmodifier.Float64) AttributeBuilder { + a.PlanModifiers = append(a.PlanModifiers, v) + return a +} diff --git a/internal/providers/pluginfw/tfschema/int64_attribute.go b/internal/providers/pluginfw/tfschema/int64_attribute.go index e9311c3a07..5c8bd9693e 100644 --- a/internal/providers/pluginfw/tfschema/int64_attribute.go +++ b/internal/providers/pluginfw/tfschema/int64_attribute.go @@ -3,6 +3,7 @@ package tfschema import ( dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" ) @@ -13,6 +14,7 @@ type Int64AttributeBuilder struct { Computed bool DeprecationMessage string Validators []validator.Int64 + PlanModifiers []planmodifier.Int64 } func (a Int64AttributeBuilder) BuildDataSourceAttribute() dataschema.Attribute { @@ -34,6 +36,7 @@ func (a Int64AttributeBuilder) BuildResourceAttribute() schema.Attribute { DeprecationMessage: a.DeprecationMessage, Computed: a.Computed, Validators: a.Validators, + PlanModifiers: a.PlanModifiers, } } @@ -90,3 +93,8 @@ func (a Int64AttributeBuilder) AddValidator(v validator.Int64) AttributeBuilder a.Validators = append(a.Validators, v) return a } + +func (a Int64AttributeBuilder) AddPlanModifier(v planmodifier.Int64) AttributeBuilder { + a.PlanModifiers = append(a.PlanModifiers, v) + return a +} diff --git a/internal/providers/pluginfw/tfschema/list_attribute.go b/internal/providers/pluginfw/tfschema/list_attribute.go index 5a822179ba..6e58165996 100644 --- a/internal/providers/pluginfw/tfschema/list_attribute.go +++ b/internal/providers/pluginfw/tfschema/list_attribute.go @@ -4,6 +4,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/attr" dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" ) @@ -16,6 +17,7 @@ type ListAttributeBuilder struct { Computed bool DeprecationMessage string Validators []validator.List + PlanModifiers []planmodifier.List } func (a ListAttributeBuilder) BuildDataSourceAttribute() dataschema.Attribute { @@ -39,6 +41,7 @@ func (a ListAttributeBuilder) BuildResourceAttribute() schema.Attribute { DeprecationMessage: a.DeprecationMessage, Computed: a.Computed, Validators: a.Validators, + PlanModifiers: a.PlanModifiers, } } @@ -95,3 +98,8 @@ func (a ListAttributeBuilder) AddValidator(v validator.List) AttributeBuilder { a.Validators = append(a.Validators, v) return a } + +func (a ListAttributeBuilder) AddPlanModifier(v planmodifier.List) AttributeBuilder { + a.PlanModifiers = append(a.PlanModifiers, v) + return a +} diff --git a/internal/providers/pluginfw/tfschema/list_nested_attribute.go b/internal/providers/pluginfw/tfschema/list_nested_attribute.go index ab0adb38d4..5d80ec8500 100644 --- a/internal/providers/pluginfw/tfschema/list_nested_attribute.go +++ b/internal/providers/pluginfw/tfschema/list_nested_attribute.go @@ -3,6 +3,7 @@ package tfschema import ( dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" ) @@ -15,6 +16,7 @@ type ListNestedAttributeBuilder struct { Computed bool DeprecationMessage string Validators []validator.List + PlanModifiers []planmodifier.List } func (a ListNestedAttributeBuilder) BuildDataSourceAttribute() dataschema.Attribute { @@ -38,6 +40,7 @@ func (a ListNestedAttributeBuilder) BuildResourceAttribute() schema.Attribute { DeprecationMessage: a.DeprecationMessage, Computed: a.Computed, Validators: a.Validators, + PlanModifiers: a.PlanModifiers, } } @@ -94,3 +97,8 @@ func (a ListNestedAttributeBuilder) AddValidator(v validator.List) AttributeBuil a.Validators = append(a.Validators, v) return a } + +func (a ListNestedAttributeBuilder) AddPlanModifier(v planmodifier.List) AttributeBuilder { + a.PlanModifiers = append(a.PlanModifiers, v) + return a +} diff --git a/internal/providers/pluginfw/tfschema/map_attribute.go b/internal/providers/pluginfw/tfschema/map_attribute.go index f02d1c55b8..3793b444bb 100644 --- a/internal/providers/pluginfw/tfschema/map_attribute.go +++ b/internal/providers/pluginfw/tfschema/map_attribute.go @@ -4,6 +4,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/attr" dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" ) @@ -16,6 +17,7 @@ type MapAttributeBuilder struct { Computed bool DeprecationMessage string Validators []validator.Map + PlanModifiers []planmodifier.Map } func (a MapAttributeBuilder) BuildDataSourceAttribute() dataschema.Attribute { @@ -39,6 +41,7 @@ func (a MapAttributeBuilder) BuildResourceAttribute() schema.Attribute { DeprecationMessage: a.DeprecationMessage, Computed: a.Computed, Validators: a.Validators, + PlanModifiers: a.PlanModifiers, } } @@ -95,3 +98,8 @@ func (a MapAttributeBuilder) AddValidator(v validator.Map) AttributeBuilder { a.Validators = append(a.Validators, v) return a } + +func (a MapAttributeBuilder) AddPlanModifier(v planmodifier.Map) AttributeBuilder { + a.PlanModifiers = append(a.PlanModifiers, v) + return a +} diff --git a/internal/providers/pluginfw/tfschema/map_nested_attribute.go b/internal/providers/pluginfw/tfschema/map_nested_attribute.go index 1c13bfc23e..bfcf5da968 100644 --- a/internal/providers/pluginfw/tfschema/map_nested_attribute.go +++ b/internal/providers/pluginfw/tfschema/map_nested_attribute.go @@ -3,6 +3,7 @@ package tfschema import ( dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" ) @@ -15,6 +16,7 @@ type MapNestedAttributeBuilder struct { Computed bool DeprecationMessage string Validators []validator.Map + PlanModifiers []planmodifier.Map } func (a MapNestedAttributeBuilder) BuildDataSourceAttribute() dataschema.Attribute { @@ -38,6 +40,7 @@ func (a MapNestedAttributeBuilder) BuildResourceAttribute() schema.Attribute { DeprecationMessage: a.DeprecationMessage, Computed: a.Computed, Validators: a.Validators, + PlanModifiers: a.PlanModifiers, } } @@ -94,3 +97,8 @@ func (a MapNestedAttributeBuilder) AddValidator(v validator.Map) AttributeBuilde a.Validators = append(a.Validators, v) return a } + +func (a MapNestedAttributeBuilder) AddPlanModifier(v planmodifier.Map) AttributeBuilder { + a.PlanModifiers = append(a.PlanModifiers, v) + return a +} diff --git a/internal/providers/pluginfw/tfschema/single_nested_attribute.go b/internal/providers/pluginfw/tfschema/single_nested_attribute.go index 95e0122167..ee234db903 100644 --- a/internal/providers/pluginfw/tfschema/single_nested_attribute.go +++ b/internal/providers/pluginfw/tfschema/single_nested_attribute.go @@ -3,6 +3,7 @@ package tfschema import ( dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" ) @@ -15,6 +16,7 @@ type SingleNestedAttributeBuilder struct { Computed bool DeprecationMessage string Validators []validator.Object + PlanModifiers []planmodifier.Object } func (a SingleNestedAttributeBuilder) BuildDataSourceAttribute() dataschema.Attribute { @@ -38,6 +40,7 @@ func (a SingleNestedAttributeBuilder) BuildResourceAttribute() schema.Attribute DeprecationMessage: a.DeprecationMessage, Computed: a.Computed, Validators: a.Validators, + PlanModifiers: a.PlanModifiers, } } @@ -94,3 +97,8 @@ func (a SingleNestedAttributeBuilder) AddValidator(v validator.Object) Attribute a.Validators = append(a.Validators, v) return a } + +func (a SingleNestedAttributeBuilder) AddPlanModifier(v planmodifier.Object) AttributeBuilder { + a.PlanModifiers = append(a.PlanModifiers, v) + return a +} diff --git a/internal/providers/pluginfw/tfschema/string_attribute.go b/internal/providers/pluginfw/tfschema/string_attribute.go index 7656f1a1a4..6b81b939f6 100644 --- a/internal/providers/pluginfw/tfschema/string_attribute.go +++ b/internal/providers/pluginfw/tfschema/string_attribute.go @@ -3,6 +3,7 @@ package tfschema import ( dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" ) @@ -13,6 +14,7 @@ type StringAttributeBuilder struct { Computed bool DeprecationMessage string Validators []validator.String + PlanModifiers []planmodifier.String } func (a StringAttributeBuilder) BuildDataSourceAttribute() dataschema.Attribute { @@ -34,6 +36,7 @@ func (a StringAttributeBuilder) BuildResourceAttribute() schema.Attribute { DeprecationMessage: a.DeprecationMessage, Computed: a.Computed, Validators: a.Validators, + PlanModifiers: a.PlanModifiers, } } @@ -90,3 +93,8 @@ func (a StringAttributeBuilder) AddValidator(v validator.String) AttributeBuilde a.Validators = append(a.Validators, v) return a } + +func (a StringAttributeBuilder) AddPlanModifier(v planmodifier.String) AttributeBuilder { + a.PlanModifiers = append(a.PlanModifiers, v) + return a +} From e8ce7e7e5f82d5cef5f02bac87dc5d50c744bd86 Mon Sep 17 00:00:00 2001 From: Edward Feng <67326663+edwardfeng-db@users.noreply.github.com> Date: Thu, 12 Sep 2024 18:19:02 +0200 Subject: [PATCH 10/99] [Feature] Library plugin framework migration (#3979) ## Changes - Add library resource to plugin framework, not officially migrating because it has suffix `_pluginframework` - Added integration test for creation and importing ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- internal/providers/pluginfw/pluginfw.go | 2 + .../resources/library/resource_library.go | 226 ++++++++++++++++++ .../library/resource_library_test.go | 92 +++++++ 3 files changed, 320 insertions(+) create mode 100644 internal/providers/pluginfw/resources/library/resource_library.go create mode 100644 internal/providers/pluginfw/resources/library/resource_library_test.go diff --git a/internal/providers/pluginfw/pluginfw.go b/internal/providers/pluginfw/pluginfw.go index 80ba2288cf..d09ec16e4a 100644 --- a/internal/providers/pluginfw/pluginfw.go +++ b/internal/providers/pluginfw/pluginfw.go @@ -16,6 +16,7 @@ import ( "github.com/databricks/terraform-provider-databricks/commands" "github.com/databricks/terraform-provider-databricks/common" providercommon "github.com/databricks/terraform-provider-databricks/internal/providers/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/library" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/qualitymonitor" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/volume" @@ -42,6 +43,7 @@ var _ provider.Provider = (*DatabricksProviderPluginFramework)(nil) func (p *DatabricksProviderPluginFramework) Resources(ctx context.Context) []func() resource.Resource { return []func() resource.Resource{ qualitymonitor.ResourceQualityMonitor, + library.ResourceLibrary, } } diff --git a/internal/providers/pluginfw/resources/library/resource_library.go b/internal/providers/pluginfw/resources/library/resource_library.go new file mode 100644 index 0000000000..2c452131a4 --- /dev/null +++ b/internal/providers/pluginfw/resources/library/resource_library.go @@ -0,0 +1,226 @@ +package library + +import ( + "context" + "fmt" + "time" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/terraform-provider-databricks/clusters" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/databricks/terraform-provider-databricks/internal/service/compute_tf" + "github.com/databricks/terraform-provider-databricks/libraries" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/databricks/databricks-sdk-go" +) + +const libraryDefaultInstallationTimeout = 15 * time.Minute + +var _ resource.ResourceWithConfigure = &LibraryResource{} + +func ResourceLibrary() resource.Resource { + return &LibraryResource{} +} + +func readLibrary(ctx context.Context, w *databricks.WorkspaceClient, waitParams compute.Wait, libraryRep string, libraryExtended *LibraryExtended) diag.Diagnostics { + res, err := libraries.WaitForLibrariesInstalledSdk(ctx, w, waitParams, libraryDefaultInstallationTimeout) + if err != nil { + return diag.Diagnostics{diag.NewErrorDiagnostic("failed to wait for library installation", err.Error())} + } + + for _, v := range res.LibraryStatuses { + thisRep := v.Library.String() + if thisRep == libraryRep { + // This is not entirely necessary as we can directly write the fields in the config into the state, because there's no computed field. + diags := converters.GoSdkToTfSdkStruct(ctx, v.Library, libraryExtended) + + if diags.HasError() { + return diags + } + + libraryExtended.ClusterId = types.StringValue(waitParams.ClusterID) + + return nil + } + } + return diag.Diagnostics{diag.NewErrorDiagnostic("failed to find the installed library", fmt.Sprintf("failed to find %s on %s", libraryRep, waitParams.ClusterID))} +} + +type LibraryExtended struct { + compute_tf.Library + ClusterId types.String `tfsdk:"cluster_id"` +} + +type LibraryResource struct { + Client *common.DatabricksClient +} + +func (r *LibraryResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "databricks_library_pluginframework" +} + +func (r *LibraryResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Description: "Terraform schema for Databricks Library", + Attributes: tfschema.ResourceStructToSchemaMap(LibraryExtended{}, func(c tfschema.CustomizableSchema) tfschema.CustomizableSchema { + // c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "cluster_id") + // c.AddPlanModifier(objectplanmodifier.RequiresReplace(), "cran") + // c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "egg") + // c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "jar") + // c.AddPlanModifier(objectplanmodifier.RequiresReplace(), "maven") + // c.AddPlanModifier(objectplanmodifier.RequiresReplace(), "pypi") + // c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "requirements") + // c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "whl") + for field, attribute := range c.ToAttributeMap() { + switch attribute.(type) { + case tfschema.StringAttributeBuilder: + c.AddPlanModifier(stringplanmodifier.RequiresReplace(), field) + case tfschema.SingleNestedAttributeBuilder: + c.AddPlanModifier(objectplanmodifier.RequiresReplace(), field) + } + } + return c + }), + } +} + +func (r *LibraryResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if r.Client == nil { + r.Client = pluginfwcommon.ConfigureResource(req, resp) + } +} + +func (r *LibraryResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + w, diags := r.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + var libraryTfSDK LibraryExtended + resp.Diagnostics.Append(req.Plan.Get(ctx, &libraryTfSDK)...) + if resp.Diagnostics.HasError() { + return + } + + var libGoSDK compute.Library + resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, libraryTfSDK, &libGoSDK)...) + if resp.Diagnostics.HasError() { + return + } + installLib := compute.InstallLibraries{ + Libraries: []compute.Library{libGoSDK}, + } + req.Plan.GetAttribute(ctx, path.Root("cluster_id"), &installLib.ClusterId) + err := w.Libraries.Install(ctx, installLib) + if err != nil { + resp.Diagnostics.AddError("failed to install library", err.Error()) + return + } + waitParams := compute.Wait{ + ClusterID: installLib.ClusterId, + IsRunning: true, + } + libraryRep := libGoSDK.String() + installedLib := LibraryExtended{} + + resp.Diagnostics.Append(readLibrary(ctx, w, waitParams, libraryRep, &installedLib)...) + + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, installedLib)...) +} + +func (r *LibraryResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + w, diags := r.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + var libraryTfSDK LibraryExtended + resp.Diagnostics.Append(req.State.Get(ctx, &libraryTfSDK)...) + if resp.Diagnostics.HasError() { + return + } + var libGoSDK compute.Library + resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, libraryTfSDK, &libGoSDK)...) + if resp.Diagnostics.HasError() { + return + } + clusterId := libraryTfSDK.ClusterId.ValueString() + libraryRep := libGoSDK.String() + installedLib := LibraryExtended{} + waitParams := compute.Wait{ + ClusterID: clusterId, + IsRefresh: true, + } + + resp.Diagnostics.Append(readLibrary(ctx, w, waitParams, libraryRep, &installedLib)...) + + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, installedLib)...) +} + +func (r *LibraryResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + resp.Diagnostics.AddError("failed to update library", "updating library is not supported") +} + +func (r *LibraryResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + w, diags := r.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + var libraryTfSDK LibraryExtended + resp.Diagnostics.Append(req.State.Get(ctx, &libraryTfSDK)...) + if resp.Diagnostics.HasError() { + return + } + clusterID := libraryTfSDK.ClusterId.ValueString() + var libGoSDK compute.Library + resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, libraryTfSDK, &libGoSDK)...) + if resp.Diagnostics.HasError() { + return + } + libraryRep := libGoSDK.String() + _, err := clusters.StartClusterAndGetInfo(ctx, w, clusterID) + if err != nil { + resp.Diagnostics.AddError("failed to start and get cluster", err.Error()) + return + } + cll, err := w.Libraries.ClusterStatusByClusterId(ctx, clusterID) + if err != nil { + resp.Diagnostics.AddError("failed to get libraries", err.Error()) + return + } + for _, v := range cll.LibraryStatuses { + if v.Library.String() != libraryRep { + continue + } + err := w.Libraries.Uninstall(ctx, compute.UninstallLibraries{ + ClusterId: clusterID, + Libraries: []compute.Library{*v.Library}, + }) + if err != nil { + resp.Diagnostics.AddError("failed to uninstall library", err.Error()) + } + return + } + // Keeping the implementation to be consistent with the sdk-v2 implementation. Eventually we should update this to be not + // an error, for cases such as the library being manually uninstalled. + resp.Diagnostics.AddError("failed to uninstall library", fmt.Sprintf("failed to find %s on %s", libraryRep, clusterID)) +} diff --git a/internal/providers/pluginfw/resources/library/resource_library_test.go b/internal/providers/pluginfw/resources/library/resource_library_test.go new file mode 100644 index 0000000000..96e699d85d --- /dev/null +++ b/internal/providers/pluginfw/resources/library/resource_library_test.go @@ -0,0 +1,92 @@ +package library_test + +import ( + "testing" + + "github.com/databricks/terraform-provider-databricks/internal/acceptance" +) + +func TestAccLibraryCreationPluginFramework(t *testing.T) { + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: `data "databricks_spark_version" "latest" { + } + resource "databricks_cluster" "this" { + cluster_name = "test-library-{var.RANDOM}" + spark_version = data.databricks_spark_version.latest.id + instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" + autotermination_minutes = 10 + num_workers = 0 + spark_conf = { + "spark.databricks.cluster.profile" = "singleNode" + "spark.master" = "local[*]" + } + custom_tags = { + "ResourceClass" = "SingleNode" + } + } + resource "databricks_library_pluginframework" "new_library" { + cluster_id = databricks_cluster.this.id + pypi = { + repo = "https://pypi.org/dummy" + package = "databricks-sdk" + } + } + `, + }) +} + +func TestAccLibraryUpdatePluginFramework(t *testing.T) { + acceptance.WorkspaceLevel(t, + acceptance.Step{ + Template: `data "databricks_spark_version" "latest" { + } + resource "databricks_cluster" "this" { + cluster_name = "cluster-{var.STICKY_RANDOM}" + spark_version = data.databricks_spark_version.latest.id + instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" + autotermination_minutes = 10 + num_workers = 0 + spark_conf = { + "spark.databricks.cluster.profile" = "singleNode" + "spark.master" = "local[*]" + } + custom_tags = { + "ResourceClass" = "SingleNode" + } + } + resource "databricks_library_pluginframework" "new_library" { + cluster_id = databricks_cluster.this.id + pypi = { + repo = "https://pypi.org/simple" + package = "databricks-sdk" + } + } + `, + }, + acceptance.Step{ + Template: `data "databricks_spark_version" "latest" { + } + resource "databricks_cluster" "this" { + cluster_name = "cluster-{var.STICKY_RANDOM}" + spark_version = data.databricks_spark_version.latest.id + instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" + autotermination_minutes = 10 + num_workers = 0 + spark_conf = { + "spark.databricks.cluster.profile" = "singleNode" + "spark.master" = "local[*]" + } + custom_tags = { + "ResourceClass" = "SingleNode" + } + } + resource "databricks_library_pluginframework" "new_library" { + cluster_id = databricks_cluster.this.id + pypi = { + package = "networkx" + } + } + `, + }, + ) +} From d3120159b2a19d00f26a37761463139ac4a623f2 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Fri, 13 Sep 2024 13:25:45 +0200 Subject: [PATCH 11/99] [Internal] Fix irregularities in plugin framework converter function errors (#4010) ## Changes I noticed few irregularities in converter functions while working on clusters plugin framework migration, creating a PR to address those: 1. Typo in error message -- in `GoSdkToTfSdkStruct` we say: `tfsdk to gosdk struct conversion failure`. It should be `gosdk to tfsdk struct conversion failure` 2. In `GoSdkToTfSdkStruct ` we don't specify destination value type but we do it in `TfSdkToGoSdkStruct`. 3. Abstract the error message out to reduce redundancy 4. Standardise similar types of error messages to be same so it's easier to maintain. ## Tests Added unit tests. - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- .../pluginfw/converters/converters_test.go | 19 +++++++++++++++++++ .../providers/pluginfw/converters/go_to_tf.go | 13 ++++++++----- .../providers/pluginfw/converters/tf_to_go.go | 13 ++++++++----- 3 files changed, 35 insertions(+), 10 deletions(-) diff --git a/internal/providers/pluginfw/converters/converters_test.go b/internal/providers/pluginfw/converters/converters_test.go index 7b87cc8b5f..7758345ad1 100644 --- a/internal/providers/pluginfw/converters/converters_test.go +++ b/internal/providers/pluginfw/converters/converters_test.go @@ -6,6 +6,7 @@ import ( "reflect" "testing" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" ) @@ -100,6 +101,24 @@ func RunConverterTest(t *testing.T, description string, tfSdkStruct DummyTfSdk, assert.True(t, reflect.DeepEqual(convertedTfSdkStruct, tfSdkStruct), fmt.Sprintf("gosdk to tfsdk conversion - %s", description)) } +func TestTfSdkToGoSdkStructConversionFailure(t *testing.T) { + tfSdkStruct := DummyTfSdk{} + goSdkStruct := DummyGoSdk{} + actualDiagnostics := TfSdkToGoSdkStruct(context.Background(), tfSdkStruct, goSdkStruct) + expectedDiagnostics := diag.Diagnostics{diag.NewErrorDiagnostic(tfSdkToGoSdkStructConversionFailureMessage, "please provide a pointer for the gosdk struct, got DummyGoSdk")} + assert.True(t, actualDiagnostics.HasError()) + assert.True(t, actualDiagnostics.Equal(expectedDiagnostics)) +} + +func TestGoSdkToTfSdkStructConversionFailure(t *testing.T) { + tfSdkStruct := DummyTfSdk{} + goSdkStruct := DummyGoSdk{} + actualDiagnostics := GoSdkToTfSdkStruct(context.Background(), goSdkStruct, tfSdkStruct) + expectedDiagnostics := diag.Diagnostics{diag.NewErrorDiagnostic(goSdkToTfSdkStructConversionFailureMessage, "please provide a pointer for the tfsdk struct, got DummyTfSdk")} + assert.True(t, actualDiagnostics.HasError()) + assert.True(t, actualDiagnostics.Equal(expectedDiagnostics)) +} + var tests = []struct { name string tfSdkStruct DummyTfSdk diff --git a/internal/providers/pluginfw/converters/go_to_tf.go b/internal/providers/pluginfw/converters/go_to_tf.go index ba924e5936..5e39086868 100644 --- a/internal/providers/pluginfw/converters/go_to_tf.go +++ b/internal/providers/pluginfw/converters/go_to_tf.go @@ -13,6 +13,9 @@ import ( "github.com/databricks/terraform-provider-databricks/internal/tfreflect" ) +const goSdkToTfSdkStructConversionFailureMessage = "gosdk to tfsdk struct conversion failure" +const goSdkToTfSdkFieldConversionFailureMessage = "gosdk to tfsdk field conversion failure" + // GoSdkToTfSdkStruct converts a gosdk struct into a tfsdk struct, with the folowing rules. // // string -> types.String @@ -38,12 +41,12 @@ func GoSdkToTfSdkStruct(ctx context.Context, gosdk interface{}, tfsdk interface{ } if destVal.Kind() != reflect.Ptr { - return diag.Diagnostics{diag.NewErrorDiagnostic("please provide a pointer for the tfsdk struct", "tfsdk to gosdk struct conversion failure")} + return diag.Diagnostics{diag.NewErrorDiagnostic(goSdkToTfSdkStructConversionFailureMessage, fmt.Sprintf("please provide a pointer for the tfsdk struct, got %s", destVal.Type().Name()))} } destVal = destVal.Elem() if srcVal.Kind() != reflect.Struct || destVal.Kind() != reflect.Struct { - return diag.Diagnostics{diag.NewErrorDiagnostic(fmt.Sprintf("input should be structs %s, %s", srcVal.Type().Name(), destVal.Type().Name()), "tfsdk to gosdk struct conversion failure")} + return diag.Diagnostics{diag.NewErrorDiagnostic(goSdkToTfSdkStructConversionFailureMessage, fmt.Sprintf("input should be structs %s, %s", srcVal.Type().Name(), destVal.Type().Name()))} } var forceSendFieldVal []string @@ -73,7 +76,7 @@ func GoSdkToTfSdkStruct(ctx context.Context, gosdk interface{}, tfsdk interface{ err := goSdkToTfSdkSingleField(ctx, srcField, destField, fieldInForceSendFields(srcFieldName, forceSendFieldVal)) if err != nil { - return diag.Diagnostics{diag.NewErrorDiagnostic(err.Error(), "gosdk to tfsdk field conversion failure")} + return diag.Diagnostics{diag.NewErrorDiagnostic(goSdkToTfSdkFieldConversionFailureMessage, err.Error())} } } return nil @@ -101,7 +104,7 @@ func goSdkToTfSdkSingleField(ctx context.Context, srcField reflect.Value, destFi // Recursively populate the nested struct. if GoSdkToTfSdkStruct(ctx, srcFieldValue, destField.Interface()).HasError() { - panic(fmt.Sprintf("Error converting gosdk to tfsdk struct. %s", common.TerraformBugErrorMessage)) + panic(fmt.Sprintf("%s. %s", goSdkToTfSdkStructConversionFailureMessage, common.TerraformBugErrorMessage)) } case reflect.Bool: boolVal := srcFieldValue.(bool) @@ -150,7 +153,7 @@ func goSdkToTfSdkSingleField(ctx context.Context, srcField reflect.Value, destFi } // resolve the nested struct by recursively calling the function if GoSdkToTfSdkStruct(ctx, srcFieldValue, destField.Addr().Interface()).HasError() { - panic(fmt.Sprintf("Error converting gosdk to tfsdk struct. %s", common.TerraformBugErrorMessage)) + panic(fmt.Sprintf("%s. %s", goSdkToTfSdkStructConversionFailureMessage, common.TerraformBugErrorMessage)) } case reflect.Slice: if srcField.IsNil() { diff --git a/internal/providers/pluginfw/converters/tf_to_go.go b/internal/providers/pluginfw/converters/tf_to_go.go index a3dc991320..6cf23decad 100644 --- a/internal/providers/pluginfw/converters/tf_to_go.go +++ b/internal/providers/pluginfw/converters/tf_to_go.go @@ -14,6 +14,9 @@ import ( "github.com/databricks/terraform-provider-databricks/internal/tfreflect" ) +const tfSdkToGoSdkStructConversionFailureMessage = "tfsdk to gosdk struct conversion failure" +const tfSdkToGoSdkFieldConversionFailureMessage = "tfsdk to gosdk field conversion failure" + // TfSdkToGoSdkStruct converts a tfsdk struct into a gosdk struct, with the folowing rules. // // types.String -> string @@ -37,12 +40,12 @@ func TfSdkToGoSdkStruct(ctx context.Context, tfsdk interface{}, gosdk interface{ } if destVal.Kind() != reflect.Ptr { - return diag.Diagnostics{diag.NewErrorDiagnostic(fmt.Sprintf("please provide a pointer for the gosdk struct, got %s", destVal.Type().Name()), "tfsdk to gosdk struct conversion failure")} + return diag.Diagnostics{diag.NewErrorDiagnostic(tfSdkToGoSdkStructConversionFailureMessage, fmt.Sprintf("please provide a pointer for the gosdk struct, got %s", destVal.Type().Name()))} } destVal = destVal.Elem() if srcVal.Kind() != reflect.Struct { - return diag.Diagnostics{diag.NewErrorDiagnostic(fmt.Sprintf("input should be structs, got %s,", srcVal.Type().Name()), "tfsdk to gosdk struct conversion failure")} + return diag.Diagnostics{diag.NewErrorDiagnostic(tfSdkToGoSdkStructConversionFailureMessage, fmt.Sprintf("input should be structs, got %s,", srcVal.Type().Name()))} } forceSendFieldsField := destVal.FieldByName("ForceSendFields") @@ -61,7 +64,7 @@ func TfSdkToGoSdkStruct(ctx context.Context, tfsdk interface{}, gosdk interface{ err := tfSdkToGoSdkSingleField(ctx, srcField, destField, srcFieldName, &forceSendFieldsField) if err != nil { - return diag.Diagnostics{diag.NewErrorDiagnostic(err.Error(), "tfsdk to gosdk field conversion failure")} + return diag.Diagnostics{diag.NewErrorDiagnostic(tfSdkToGoSdkFieldConversionFailureMessage, err.Error())} } } @@ -93,7 +96,7 @@ func tfSdkToGoSdkSingleField(ctx context.Context, srcField reflect.Value, destFi // Recursively populate the nested struct. if TfSdkToGoSdkStruct(ctx, srcFieldValue, destField.Interface()).HasError() { - panic(fmt.Sprintf("Error converting tfsdk to gosdk struct. %s", common.TerraformBugErrorMessage)) + panic(fmt.Sprintf("%s. %s", tfSdkToGoSdkStructConversionFailureMessage, common.TerraformBugErrorMessage)) } } else if srcField.Kind() == reflect.Struct { tfsdkToGoSdkStructField(srcField, destField, srcFieldName, forceSendFieldsField, ctx) @@ -199,7 +202,7 @@ func tfsdkToGoSdkStructField(srcField reflect.Value, destField reflect.Value, sr } // If it is a real stuct instead of a tfsdk type, recursively resolve it. if TfSdkToGoSdkStruct(ctx, srcFieldValue, destField.Addr().Interface()).HasError() { - panic(fmt.Sprintf("Error converting tfsdk to gosdk struct. %s", common.TerraformBugErrorMessage)) + panic(fmt.Sprintf("%s. %s", tfSdkToGoSdkStructConversionFailureMessage, common.TerraformBugErrorMessage)) } } } From 891e0aff45633be88a32415c68cc06afcfbf4696 Mon Sep 17 00:00:00 2001 From: Edward Feng <67326663+edwardfeng-db@users.noreply.github.com> Date: Fri, 13 Sep 2024 14:06:13 +0200 Subject: [PATCH 12/99] [Internal] Clean up comments in library resource (#4015) ## Changes - Clean up undeleted comments ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- .../pluginfw/resources/library/resource_library.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/internal/providers/pluginfw/resources/library/resource_library.go b/internal/providers/pluginfw/resources/library/resource_library.go index 2c452131a4..04b90f6298 100644 --- a/internal/providers/pluginfw/resources/library/resource_library.go +++ b/internal/providers/pluginfw/resources/library/resource_library.go @@ -73,14 +73,6 @@ func (r *LibraryResource) Schema(ctx context.Context, req resource.SchemaRequest resp.Schema = schema.Schema{ Description: "Terraform schema for Databricks Library", Attributes: tfschema.ResourceStructToSchemaMap(LibraryExtended{}, func(c tfschema.CustomizableSchema) tfschema.CustomizableSchema { - // c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "cluster_id") - // c.AddPlanModifier(objectplanmodifier.RequiresReplace(), "cran") - // c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "egg") - // c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "jar") - // c.AddPlanModifier(objectplanmodifier.RequiresReplace(), "maven") - // c.AddPlanModifier(objectplanmodifier.RequiresReplace(), "pypi") - // c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "requirements") - // c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "whl") for field, attribute := range c.ToAttributeMap() { switch attribute.(type) { case tfschema.StringAttributeBuilder: From 1e62576102711c03453c716da156be3b7f19be66 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Fri, 13 Sep 2024 16:08:23 +0200 Subject: [PATCH 13/99] [Internal] Migrate `databricks_cluster` data source to plugin framework (#3988) ## Changes - Migrates `databricks_cluster` data source to plugin framework - Check for different int and float types in Go to Tf converter function - Use computed tag to simplify customize schema for volumes Note: The resource will be suffixed with `_pluginframework` and will be made default in another PR so the change is easily revertible. ## Tests - Added Integration tests - Added Unit tests All are passing - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- .../providers/pluginfw/converters/go_to_tf.go | 4 +- internal/providers/pluginfw/pluginfw.go | 2 + .../resources/cluster/data_cluster.go | 129 ++++++++++++++++++ .../cluster/data_cluster_acc_test.go | 28 ++++ .../resources/cluster/data_cluster_test.go | 39 ++++++ ...y_test.go => resource_library_acc_test.go} | 0 ...o => resource_quality_monitor_acc_test.go} | 4 +- .../pluginfw/resources/volume/data_volumes.go | 12 +- ...lumes_test.go => data_volumes_acc_test.go} | 6 +- 9 files changed, 209 insertions(+), 15 deletions(-) create mode 100644 internal/providers/pluginfw/resources/cluster/data_cluster.go create mode 100644 internal/providers/pluginfw/resources/cluster/data_cluster_acc_test.go create mode 100644 internal/providers/pluginfw/resources/cluster/data_cluster_test.go rename internal/providers/pluginfw/resources/library/{resource_library_test.go => resource_library_acc_test.go} (100%) rename internal/providers/pluginfw/resources/qualitymonitor/{resource_quality_monitor_test.go => resource_quality_monitor_acc_test.go} (97%) rename internal/providers/pluginfw/resources/volume/{data_volumes_test.go => data_volumes_acc_test.go} (88%) diff --git a/internal/providers/pluginfw/converters/go_to_tf.go b/internal/providers/pluginfw/converters/go_to_tf.go index 5e39086868..b86c32a21b 100644 --- a/internal/providers/pluginfw/converters/go_to_tf.go +++ b/internal/providers/pluginfw/converters/go_to_tf.go @@ -114,7 +114,7 @@ func goSdkToTfSdkSingleField(ctx context.Context, srcField reflect.Value, destFi } else { destField.Set(reflect.ValueOf(types.BoolNull())) } - case reflect.Int64: + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: // convert any kind of integer to int64 intVal := srcField.Convert(reflect.TypeOf(int64(0))).Int() // check if the value is non-zero or if the field is in the forceSendFields list @@ -123,7 +123,7 @@ func goSdkToTfSdkSingleField(ctx context.Context, srcField reflect.Value, destFi } else { destField.Set(reflect.ValueOf(types.Int64Null())) } - case reflect.Float64: + case reflect.Float32, reflect.Float64: // convert any kind of float to float64 float64Val := srcField.Convert(reflect.TypeOf(float64(0))).Float() // check if the value is non-zero or if the field is in the forceSendFields list diff --git a/internal/providers/pluginfw/pluginfw.go b/internal/providers/pluginfw/pluginfw.go index d09ec16e4a..71e91dccd8 100644 --- a/internal/providers/pluginfw/pluginfw.go +++ b/internal/providers/pluginfw/pluginfw.go @@ -16,6 +16,7 @@ import ( "github.com/databricks/terraform-provider-databricks/commands" "github.com/databricks/terraform-provider-databricks/common" providercommon "github.com/databricks/terraform-provider-databricks/internal/providers/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/cluster" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/library" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/qualitymonitor" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/volume" @@ -49,6 +50,7 @@ func (p *DatabricksProviderPluginFramework) Resources(ctx context.Context) []fun func (p *DatabricksProviderPluginFramework) DataSources(ctx context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ + cluster.DataSourceCluster, volume.DataSourceVolumes, } } diff --git a/internal/providers/pluginfw/resources/cluster/data_cluster.go b/internal/providers/pluginfw/resources/cluster/data_cluster.go new file mode 100644 index 0000000000..9936df5fa2 --- /dev/null +++ b/internal/providers/pluginfw/resources/cluster/data_cluster.go @@ -0,0 +1,129 @@ +package cluster + +import ( + "context" + "fmt" + "strings" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/databricks/terraform-provider-databricks/internal/service/compute_tf" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func DataSourceCluster() datasource.DataSource { + return &ClusterDataSource{} +} + +var _ datasource.DataSourceWithConfigure = &ClusterDataSource{} + +type ClusterDataSource struct { + Client *common.DatabricksClient +} + +type ClusterInfo struct { + ClusterId types.String `tfsdk:"cluster_id" tf:"optional,computed"` + Name types.String `tfsdk:"cluster_name" tf:"optional,computed"` + ClusterInfo *compute_tf.ClusterDetails `tfsdk:"cluster_info" tf:"optional,computed"` +} + +func (d *ClusterDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = "databricks_cluster_pluginframework" +} + +func (d *ClusterDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: tfschema.DataSourceStructToSchemaMap(ClusterInfo{}, nil), + } +} + +func (d *ClusterDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if d.Client == nil { + d.Client = pluginfwcommon.ConfigureDataSource(req, resp) + } +} + +func validateClustersList(ctx context.Context, clusters []compute_tf.ClusterDetails, clusterName string) diag.Diagnostics { + if len(clusters) == 0 { + return diag.Diagnostics{diag.NewErrorDiagnostic(fmt.Sprintf("there is no cluster with name '%s'", clusterName), "")} + } + if len(clusters) > 1 { + clusterIDs := []string{} + for _, cluster := range clusters { + clusterIDs = append(clusterIDs, cluster.ClusterId.ValueString()) + } + return diag.Diagnostics{diag.NewErrorDiagnostic(fmt.Sprintf("there is more than one cluster with name '%s'", clusterName), fmt.Sprintf("The IDs of those clusters are: %s. When specifying a cluster name, the name must be unique. Alternatively, specify the cluster by ID using the cluster_id attribute.", strings.Join(clusterIDs, ", ")))} + } + return nil +} + +func (d *ClusterDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + w, diags := d.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var clusterInfo ClusterInfo + resp.Diagnostics.Append(req.Config.Get(ctx, &clusterInfo)...) + if resp.Diagnostics.HasError() { + return + } + clusterName := clusterInfo.Name.ValueString() + clusterId := clusterInfo.ClusterId.ValueString() + if clusterName != "" { + clustersGoSDk, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{}) + if err != nil { + resp.Diagnostics.AddError("failed to list clusters", err.Error()) + return + } + var clustersTfSDK []compute_tf.ClusterDetails + for _, cluster := range clustersGoSDk { + var clusterDetails compute_tf.ClusterDetails + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, cluster, &clusterDetails)...) + if resp.Diagnostics.HasError() { + return + } + clustersTfSDK = append(clustersTfSDK, clusterDetails) + } + namedClusters := []compute_tf.ClusterDetails{} + for _, cluster := range clustersTfSDK { + if cluster.ClusterName == clusterInfo.Name { + namedClusters = append(namedClusters, cluster) + } + } + resp.Diagnostics.Append(validateClustersList(ctx, namedClusters, clusterName)...) + if resp.Diagnostics.HasError() { + return + } + clusterInfo.ClusterInfo = &namedClusters[0] + } else if clusterId != "" { + cluster, err := w.Clusters.GetByClusterId(ctx, clusterId) + if err != nil { + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + } + resp.Diagnostics.AddError(fmt.Sprintf("failed to get cluster with cluster id: %s", clusterId), err.Error()) + return + } + var clusterDetails compute_tf.ClusterDetails + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, cluster, &clusterDetails)...) + if resp.Diagnostics.HasError() { + return + } + clusterInfo.ClusterInfo = &clusterDetails + } else { + resp.Diagnostics.AddError("you need to specify either `cluster_name` or `cluster_id`", "") + return + } + clusterInfo.ClusterId = clusterInfo.ClusterInfo.ClusterId + clusterInfo.Name = clusterInfo.ClusterInfo.ClusterName + resp.Diagnostics.Append(resp.State.Set(ctx, clusterInfo)...) +} diff --git a/internal/providers/pluginfw/resources/cluster/data_cluster_acc_test.go b/internal/providers/pluginfw/resources/cluster/data_cluster_acc_test.go new file mode 100644 index 0000000000..cbac44de04 --- /dev/null +++ b/internal/providers/pluginfw/resources/cluster/data_cluster_acc_test.go @@ -0,0 +1,28 @@ +package cluster_test + +import ( + "testing" + + "github.com/databricks/terraform-provider-databricks/internal/acceptance" +) + +const dataClusterTemplateById = ` + data "databricks_cluster_pluginframework" "by_id" { + cluster_id = "{env.TEST_DEFAULT_CLUSTER_ID}" + } +` + +func TestAccDataSourceClusterByID(t *testing.T) { + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: dataClusterTemplateById, + }) +} + +func TestAccDataSourceClusterByName(t *testing.T) { + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: dataClusterTemplateById + ` + data "databricks_cluster_pluginframework" "by_name" { + cluster_name = data.databricks_cluster_pluginframework.by_id.cluster_name + }`, + }) +} diff --git a/internal/providers/pluginfw/resources/cluster/data_cluster_test.go b/internal/providers/pluginfw/resources/cluster/data_cluster_test.go new file mode 100644 index 0000000000..83ee608a98 --- /dev/null +++ b/internal/providers/pluginfw/resources/cluster/data_cluster_test.go @@ -0,0 +1,39 @@ +package cluster + +import ( + "context" + "fmt" + "testing" + + "github.com/databricks/terraform-provider-databricks/internal/service/compute_tf" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" +) + +func TestNoClusterError(t *testing.T) { + clusterName := "test-cluster-name" + clusters := []compute_tf.ClusterDetails{} + actualDiagnostics := validateClustersList(context.Background(), clusters, clusterName) + expectedDiagnostics := diag.Diagnostics{diag.NewErrorDiagnostic(fmt.Sprintf("there is no cluster with name '%s'", clusterName), "")} + assert.True(t, actualDiagnostics.HasError()) + assert.Equal(t, expectedDiagnostics, actualDiagnostics) +} + +func TestMultipleClustersError(t *testing.T) { + clusterName := "test-cluster-name" + clusters := []compute_tf.ClusterDetails{ + { + ClusterName: types.StringValue("test-cluster-name"), + ClusterId: types.StringValue("123"), + }, + { + ClusterName: types.StringValue("test-cluster-name"), + ClusterId: types.StringValue("456"), + }, + } + actualDiagnostics := validateClustersList(context.Background(), clusters, clusterName) + expectedDiagnostics := diag.Diagnostics{diag.NewErrorDiagnostic(fmt.Sprintf("there is more than one cluster with name '%s'", clusterName), "The IDs of those clusters are: 123, 456. When specifying a cluster name, the name must be unique. Alternatively, specify the cluster by ID using the cluster_id attribute.")} + assert.True(t, actualDiagnostics.HasError()) + assert.Equal(t, expectedDiagnostics, actualDiagnostics) +} diff --git a/internal/providers/pluginfw/resources/library/resource_library_test.go b/internal/providers/pluginfw/resources/library/resource_library_acc_test.go similarity index 100% rename from internal/providers/pluginfw/resources/library/resource_library_test.go rename to internal/providers/pluginfw/resources/library/resource_library_acc_test.go diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_test.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go similarity index 97% rename from internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_test.go rename to internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go index a2e391bda3..f9934c9cd9 100644 --- a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_test.go +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go @@ -48,7 +48,7 @@ resource "databricks_sql_table" "myInferenceTable" { ` -func TestUcAccQualityMonitorPluginFramework(t *testing.T) { +func TestUcAccQualityMonitor(t *testing.T) { if os.Getenv("GOOGLE_CREDENTIALS") != "" { t.Skipf("databricks_quality_monitor resource is not available on GCP") } @@ -115,7 +115,7 @@ func TestUcAccQualityMonitorPluginFramework(t *testing.T) { }) } -func TestUcAccUpdateQualityMonitorPluginFramework(t *testing.T) { +func TestUcAccUpdateQualityMonitor(t *testing.T) { if os.Getenv("GOOGLE_CREDENTIALS") != "" { t.Skipf("databricks_quality_monitor resource is not available on GCP") } diff --git a/internal/providers/pluginfw/resources/volume/data_volumes.go b/internal/providers/pluginfw/resources/volume/data_volumes.go index cf9101b52e..590a85f95b 100644 --- a/internal/providers/pluginfw/resources/volume/data_volumes.go +++ b/internal/providers/pluginfw/resources/volume/data_volumes.go @@ -28,7 +28,7 @@ type VolumesDataSource struct { type VolumesList struct { CatalogName types.String `tfsdk:"catalog_name"` SchemaName types.String `tfsdk:"schema_name"` - Ids []types.String `tfsdk:"ids" tf:"optional"` + Ids []types.String `tfsdk:"ids" tf:"optional,computed"` } func (d *VolumesDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { @@ -37,10 +37,7 @@ func (d *VolumesDataSource) Metadata(ctx context.Context, req datasource.Metadat func (d *VolumesDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { resp.Schema = schema.Schema{ - Attributes: tfschema.DataSourceStructToSchemaMap(VolumesList{}, func(c tfschema.CustomizableSchema) tfschema.CustomizableSchema { - c.SetComputed("ids") - return c - }), + Attributes: tfschema.DataSourceStructToSchemaMap(VolumesList{}, nil), } } @@ -69,13 +66,12 @@ func (d *VolumesDataSource) Read(ctx context.Context, req datasource.ReadRequest if err != nil { if apierr.IsMissing(err) { resp.State.RemoveResource(ctx) - return } - resp.Diagnostics.AddError(fmt.Sprintf("Failed to get volumes for the catalog:%s and schema%s", listVolumesRequest.CatalogName, listVolumesRequest.SchemaName), err.Error()) + resp.Diagnostics.AddError(fmt.Sprintf("failed to get volumes for the catalog:%s and schema%s", listVolumesRequest.CatalogName, listVolumesRequest.SchemaName), err.Error()) return } for _, v := range volumes { volumesList.Ids = append(volumesList.Ids, types.StringValue(v.FullName)) } - resp.State.Set(ctx, volumesList) + resp.Diagnostics.Append(resp.State.Set(ctx, volumesList)...) } diff --git a/internal/providers/pluginfw/resources/volume/data_volumes_test.go b/internal/providers/pluginfw/resources/volume/data_volumes_acc_test.go similarity index 88% rename from internal/providers/pluginfw/resources/volume/data_volumes_test.go rename to internal/providers/pluginfw/resources/volume/data_volumes_acc_test.go index 89177583fb..0fdfc8aa50 100644 --- a/internal/providers/pluginfw/resources/volume/data_volumes_test.go +++ b/internal/providers/pluginfw/resources/volume/data_volumes_acc_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" ) -func checkDataSourceVolumesPluginFrameworkPopulated(t *testing.T) func(s *terraform.State) error { +func checkDataSourceVolumesPopulated(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { _, ok := s.Modules[0].Resources["data.databricks_volumes_pluginframework.this"] require.True(t, ok, "data.databricks_volumes_pluginframework.this has to be there") @@ -20,7 +20,7 @@ func checkDataSourceVolumesPluginFrameworkPopulated(t *testing.T) func(s *terraf } } -func TestUcAccDataSourceVolumesPluginFramework(t *testing.T) { +func TestUcAccDataSourceVolumes(t *testing.T) { acceptance.UnityWorkspaceLevel(t, acceptance.Step{ Template: ` resource "databricks_catalog" "sandbox" { @@ -54,6 +54,6 @@ func TestUcAccDataSourceVolumesPluginFramework(t *testing.T) { value = length(data.databricks_volumes_pluginframework.this.ids) } `, - Check: checkDataSourceVolumesPluginFrameworkPopulated(t), + Check: checkDataSourceVolumesPopulated(t), }) } From 1153bba6de9148ba60796d85f065b47462fc681b Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Mon, 16 Sep 2024 09:01:15 -0400 Subject: [PATCH 14/99] [Feature] Add support for filters in `databricks_clusters` data source (#4014) ## Changes Version 1.50 of the Terraform Provider featured an upgrade to the Go SDK affecting cluster listing. The new List Clusters API returns all terminated clusters in the last 30 days without a limit. This results in the list operation taking considerably longer for some workspaces, especially workspaces with many jobs where clusters are frequently created. This impacts the `databricks_clusters` data source, which can be slow. This PR partially addresses this by adding support for `filter_by` to the `databricks_clusters` API. Filters expressed here are pushed to the server and result in fewer clusters being returned by the API. Users of this data source can specify a particular cluster state, cluster source, pinned status, or cluster policy ID to limit the number of clusters returned by the API, drastically speeding up performance. ## Tests Integration tests for `databricks_cluster` data source test setting the `filter_by` parameter's attributes. - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- clusters/data_clusters.go | 11 ++-- docs/data-sources/clusters.md | 10 ++++ internal/acceptance/data_clusters_test.go | 70 +++++++++++++++++++++++ 3 files changed, 87 insertions(+), 4 deletions(-) diff --git a/clusters/data_clusters.go b/clusters/data_clusters.go index 45e41a432c..3fbf55c5b4 100644 --- a/clusters/data_clusters.go +++ b/clusters/data_clusters.go @@ -11,11 +11,14 @@ import ( func DataSourceClusters() common.Resource { return common.WorkspaceData(func(ctx context.Context, data *struct { - Id string `json:"id,omitempty" tf:"computed"` - Ids []string `json:"ids,omitempty" tf:"computed,slice_set"` - ClusterNameContains string `json:"cluster_name_contains,omitempty"` + Id string `json:"id,omitempty" tf:"computed"` + Ids []string `json:"ids,omitempty" tf:"computed,slice_set"` + ClusterNameContains string `json:"cluster_name_contains,omitempty"` + FilterBy *compute.ListClustersFilterBy `json:"filter_by,omitempty"` }, w *databricks.WorkspaceClient) error { - clusters, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{}) + clusters, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{ + FilterBy: data.FilterBy, + }) if err != nil { return err } diff --git a/docs/data-sources/clusters.md b/docs/data-sources/clusters.md index bfe875b73a..ee976406c5 100644 --- a/docs/data-sources/clusters.md +++ b/docs/data-sources/clusters.md @@ -27,6 +27,16 @@ data "databricks_clusters" "all_shared" { ## Argument Reference * `cluster_name_contains` - (Optional) Only return [databricks_cluster](../resources/cluster.md#cluster_id) ids that match the given name string. +* `filter_by` - (Optional) Filters to apply to the listed clusters. See [filter_by Configuration Block](#filter_by-configuration-block) below for details. + +### filter_by Configuration Block + +The `filter_by` block controls the filtering of the listed clusters. It supports the following arguments: + +* `cluster_sources` - (Optional) List of cluster sources to filter by. Possible values are `API`, `JOB`, `MODELS`, `PIPELINE`, `PIPELINE_MAINTENANCE`, `SQL`, and `UI`. +* `cluster_states` - (Optional) List of cluster states to filter by. Possible values are `RUNNING`, `PENDING`, `RESIZING`, `RESTARTING`, `TERMINATING`, `TERMINATED`, `ERROR`, and `UNKNOWN`. +* `is_pinned` - (Optional) Whether to filter by pinned clusters. +* `policy_id` - (Optional) Filter by [databricks_cluster_policy](../resources/cluster_policy.md) id. ## Attribute Reference diff --git a/internal/acceptance/data_clusters_test.go b/internal/acceptance/data_clusters_test.go index 45d578873d..f7e79a8103 100644 --- a/internal/acceptance/data_clusters_test.go +++ b/internal/acceptance/data_clusters_test.go @@ -1,7 +1,13 @@ package acceptance import ( + "context" "testing" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/assert" ) func TestAccDataSourceClustersNoFilter(t *testing.T) { @@ -20,3 +26,67 @@ func TestAccDataSourceClustersWithFilter(t *testing.T) { }`, }) } + +func checkFirstCluster(t *testing.T, f func(*compute.ClusterDetails)) func(*terraform.State) error { + return func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + firstClusterId, ok := s.RootModule().Resources["data.databricks_clusters.this"].Primary.Attributes["ids.0"] + if ok { + firstCluster, err := w.Clusters.GetByClusterId(context.Background(), firstClusterId) + assert.NoError(t, err) + f(firstCluster) + } + return nil + } +} + +func TestAccDataSourceClusters_FilterBy(t *testing.T) { + WorkspaceLevel(t, Step{ + Template: ` + data "databricks_clusters" "this" { + filter_by { + cluster_sources = ["UI", "API"] + } + }`, + Check: checkFirstCluster(t, func(c *compute.ClusterDetails) { + assert.Contains(t, []compute.ClusterSource{"UI", "API"}, c.ClusterSource) + }), + }, Step{ + Template: ` + data "databricks_clusters" "this" { + filter_by { + cluster_states = ["RUNNING", "RESIZING"] + } + }`, + Check: checkFirstCluster(t, func(c *compute.ClusterDetails) { + assert.Contains(t, []compute.State{"RUNNING", "RESIZING"}, c.State) + }), + }, Step{ + Template: ` + data "databricks_clusters" "this" { + filter_by { + is_pinned = true + } + }`, + // Not possible to get whether a cluster is pinned or not + }, Step{ + Template: ` + resource "databricks_cluster_policy" "this" { + name = "test" + definition = jsonencode({ + "spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL": { + "type": "fixed", + "value": "jdbc:sqlserver://" + } + }) + } + data "databricks_clusters" "this" { + filter_by { + policy_id = databricks_cluster_policy.this.id + } + }`, + Check: checkFirstCluster(t, func(c *compute.ClusterDetails) { + assert.Equal(t, "abc-123", c.PolicyId) + }), + }) +} From fb178f96aece64b0fa6aa7bbb2b780eba198eab7 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Mon, 16 Sep 2024 17:09:56 +0200 Subject: [PATCH 15/99] [Fix] Permanently delete `ERROR` and `TERMINATED` state clusters if their creation fails (#4021) ## Changes If we get error or terminated cluster (after getting WaitGetClusterRunning in Create) then we permanently delete them. ## Tests Unit tests - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- clusters/resource_cluster.go | 5 ++ clusters/resource_cluster_test.go | 111 ++++++++++++++++++++++++++++++ 2 files changed, 116 insertions(+) diff --git a/clusters/resource_cluster.go b/clusters/resource_cluster.go index 6595b92a9f..fb77a5f76d 100644 --- a/clusters/resource_cluster.go +++ b/clusters/resource_cluster.go @@ -472,6 +472,11 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, c *commo clusterInfo, err := clusterWaiter.GetWithTimeout(timeout) if err != nil { + // In case of "ERROR" or "TERMINATED" state, WaitGetClusterRunning returns an error and we should delete the cluster before returning + deleteError := resourceClusterDelete(ctx, d, c) + if deleteError != nil { + return fmt.Errorf("failed to create cluster: %v and failed to delete it during cleanup: %v", err, deleteError) + } return err } diff --git a/clusters/resource_cluster_test.go b/clusters/resource_cluster_test.go index b5af784be5..804067597b 100644 --- a/clusters/resource_cluster_test.go +++ b/clusters/resource_cluster_test.go @@ -164,6 +164,117 @@ func TestResourceClusterCreatePinned(t *testing.T) { assert.Equal(t, "abc", d.Id()) } +func TestResourceClusterCreateErrorFollowedByDeletion(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.1/clusters/create", + ExpectedRequest: compute.CreateCluster{ + NumWorkers: 100, + ClusterName: "Shared Autoscaling", + SparkVersion: "7.1-scala12", + NodeTypeId: "i3.xlarge", + AutoterminationMinutes: 15, + }, + Response: compute.ClusterDetails{ + ClusterId: "abc", + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.1/clusters/get?cluster_id=abc", + Response: compute.ClusterDetails{ + ClusterId: "abc", + NumWorkers: 100, + ClusterName: "Shared Autoscaling", + SparkVersion: "7.1-scala12", + NodeTypeId: "i3.xlarge", + AutoterminationMinutes: 15, + State: compute.StateTerminated, + }, + }, + { + Method: "POST", + Resource: "/api/2.1/clusters/permanent-delete", + ExpectedRequest: compute.PermanentDeleteCluster{ + ClusterId: "abc", + }, + }, + }, + Create: true, + Resource: ResourceCluster(), + State: map[string]any{ + "autotermination_minutes": 15, + "cluster_name": "Shared Autoscaling", + "spark_version": "7.1-scala12", + "node_type_id": "i3.xlarge", + "num_workers": 100, + }, + }.Apply(t) + assert.ErrorContains(t, err, "failed to reach RUNNING, got TERMINATED") + assert.Equal(t, "abc", d.Id()) +} + +func TestResourceClusterCreateErrorFollowedByDeletionError(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.1/clusters/create", + ExpectedRequest: compute.CreateCluster{ + NumWorkers: 100, + ClusterName: "Shared Autoscaling", + SparkVersion: "7.1-scala12", + NodeTypeId: "i3.xlarge", + AutoterminationMinutes: 15, + }, + Response: compute.ClusterDetails{ + ClusterId: "abc", + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.1/clusters/get?cluster_id=abc", + Response: compute.ClusterDetails{ + ClusterId: "abc", + NumWorkers: 100, + ClusterName: "Shared Autoscaling", + SparkVersion: "7.1-scala12", + NodeTypeId: "i3.xlarge", + AutoterminationMinutes: 15, + State: compute.StateTerminated, + }, + }, + { + Method: "POST", + Resource: "/api/2.1/clusters/permanent-delete", + ExpectedRequest: compute.PermanentDeleteCluster{ + ClusterId: "abc", + }, + Status: 500, + Response: common.APIErrorBody{ + ErrorCode: "INTERNAL_ERROR", + Message: "Internal error happened", + }, + }, + }, + Create: true, + Resource: ResourceCluster(), + State: map[string]any{ + "autotermination_minutes": 15, + "cluster_name": "Shared Autoscaling", + "spark_version": "7.1-scala12", + "node_type_id": "i3.xlarge", + "num_workers": 100, + }, + }.Apply(t) + assert.ErrorContains(t, err, "failed to create cluster: failed to reach RUNNING, got TERMINATED: and failed to delete it during cleanup: Internal error happened") + assert.Equal(t, "abc", d.Id()) +} + func TestResourceClusterCreate_WithLibraries(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ From d22064ebc231ca445d16389ba605e4a9edd767bd Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Tue, 17 Sep 2024 05:41:02 +0200 Subject: [PATCH 16/99] [Exporter] Expand list of non-interactive clusters (#4023) ## Changes So we can avoid generation of not related cluster resources ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- exporter/importables.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exporter/importables.go b/exporter/importables.go index 88a90e3bcd..8aeeaefeef 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -327,7 +327,7 @@ var resourcesMap map[string]importable = map[string]importable{ return err } lastActiveMs := ic.getLastActiveMs() - nonInteractiveClusters := []string{"JOB", "PIPELINE_MAINTENANCE", "PIPELINE", "SQL"} + nonInteractiveClusters := []string{"JOB", "MODELS", "PIPELINE_MAINTENANCE", "PIPELINE", "SQL"} for offset, c := range clusters { if slices.Contains(nonInteractiveClusters, string(c.ClusterSource)) { // TODO: Should we check cluster name as well? From 1908a92a8c23e6bc062527bced2028b5601008c4 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Tue, 17 Sep 2024 11:09:03 +0200 Subject: [PATCH 17/99] [Exporter] Ignore `databricks_artifact_allowlist` with zero `artifact_matcher` blocks (#4019) ## Changes Don't generate `databricks_artifact_allowlist` when no `artifact_matcher` blocks are defined ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- exporter/importables.go | 8 ++++++++ exporter/importables_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/exporter/importables.go b/exporter/importables.go index 8aeeaefeef..d2cb8d0f36 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -2518,6 +2518,14 @@ var resourcesMap map[string]importable = map[string]importable{ } return nil }, + Ignore: func(ic *importContext, r *resource) bool { + numBlocks := r.Data.Get("artifact_matcher.#").(int) + if numBlocks == 0 { + log.Printf("[WARN] Ignoring artifcat allowlist with ID %s", r.ID) + ic.addIgnoredResource(fmt.Sprintf("databricks_artifact_allowlist. id=%s", r.ID)) + } + return numBlocks == 0 + }, Depends: []reference{ {Path: "artifact_matcher.artifact", Resource: "databricks_volume", Match: "volume_path", IsValidApproximation: isMatchingAllowListArtifact}, diff --git a/exporter/importables_test.go b/exporter/importables_test.go index b503117595..544322a745 100644 --- a/exporter/importables_test.go +++ b/exporter/importables_test.go @@ -1474,6 +1474,34 @@ func TestListUcAllowListSuccess(t *testing.T) { err := resourcesMap["databricks_artifact_allowlist"].List(ic) assert.NoError(t, err) assert.Equal(t, len(ic.testEmits), 3) + // Test ignore function + d := tfcatalog.ResourceArtifactAllowlist().ToResource().TestResourceData() + d.MarkNewResource() + d.Set("id", "abc") + res := ic.Importables["databricks_artifact_allowlist"].Ignore(ic, &resource{ + ID: "abc", + Data: d, + }) + assert.True(t, res) + assert.Contains(t, ic.ignoredResources, "databricks_artifact_allowlist. id=abc") + // Test ignore function, with blocks + err = common.StructToData( + tfcatalog.ArtifactAllowlistInfo{ + ArtifactType: "INIT_SCRIPT", + ArtifactMatchers: []catalog.ArtifactMatcher{ + { + Artifact: "/Volumes/inits", + MatchType: "PREFIX_MATCH", + }, + }, + }, + tfcatalog.ResourceArtifactAllowlist().Schema, d) + assert.NoError(t, err) + res = ic.Importables["databricks_artifact_allowlist"].Ignore(ic, &resource{ + ID: "abc", + Data: d, + }) + assert.False(t, res) } func TestEmitSqlParent(t *testing.T) { From e13ce2cf33bf183c9543d3bb0df1476deaf1d765 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Tue, 17 Sep 2024 14:50:34 +0200 Subject: [PATCH 18/99] [Release] Release v1.52.0 (#4022) ### New Features and Improvements * Add support for filters in `databricks_clusters` data source ([#4014](https://github.com/databricks/terraform-provider-databricks/pull/4014)). * Added `no_wait` option for clusters to skip waiting to start on cluster creation ([#3953](https://github.com/databricks/terraform-provider-databricks/pull/3953)). * Introduced Plugin Framework ([#3920](https://github.com/databricks/terraform-provider-databricks/pull/3920)). ### Bug Fixes * Add suppress diff for `azure_attributes.spot_bid_max_price` in `databricks_instance_pool` ([#3970](https://github.com/databricks/terraform-provider-databricks/pull/3970)). * Correctly send workload_type fields in `databricks_cluster` to allow users to disable usage in certain contexts ([#3972](https://github.com/databricks/terraform-provider-databricks/pull/3972)). * Fix `databricks_sql_table` treatment of properties ([#3925](https://github.com/databricks/terraform-provider-databricks/pull/3925)). * Force send fields for settings resources ([#3978](https://github.com/databricks/terraform-provider-databricks/pull/3978)). * Handle cluster deletion in `databricks_library` read ([#3909](https://github.com/databricks/terraform-provider-databricks/pull/3909)). * Make subscriptions optional for SqlAlertTask ([#3983](https://github.com/databricks/terraform-provider-databricks/pull/3983)). * Permanently delete `ERROR` and `TERMINATED` state clusters if their creation fails ([#4021](https://github.com/databricks/terraform-provider-databricks/pull/4021)). ### Documentation * Add troubleshooting guide for Provider "registry.terraform.io/databricks/databricks" planned an invalid value ([#3961](https://github.com/databricks/terraform-provider-databricks/pull/3961)). * Adopt official naming of Mosaic AI Vector Search ([#3971](https://github.com/databricks/terraform-provider-databricks/pull/3971)). * Document Terraform 1.0 as minimum version ([#3952](https://github.com/databricks/terraform-provider-databricks/pull/3952)). * Mention Salesforce as supported type in `databricks_connection` ([#3949](https://github.com/databricks/terraform-provider-databricks/pull/3949)). * Reimplement Azure Databricks deployment guide to use VNet injection & NPIP ([#3986](https://github.com/databricks/terraform-provider-databricks/pull/3986)). * Resolves [#3127](https://github.com/databricks/terraform-provider-databricks/pull/3127): Remove deprecated account_id field from mws_credentials resource ([#3974](https://github.com/databricks/terraform-provider-databricks/pull/3974)). * Small Grammar Corrections in Docs ([#4006](https://github.com/databricks/terraform-provider-databricks/pull/4006)). * Update `databricks_vector_search_index` docs to match latest SDK ([#4008](https://github.com/databricks/terraform-provider-databricks/pull/4008)). * Update aws_unity_catalog_assume_role_policy.md ([#3968](https://github.com/databricks/terraform-provider-databricks/pull/3968)). * Update documentation regarding authentication with Azure-managed Service Principal using GITHUB OIDC ([#3932](https://github.com/databricks/terraform-provider-databricks/pull/3932)). * Update metastore_assignment.md to properly reflect possible usage ([#3967](https://github.com/databricks/terraform-provider-databricks/pull/3967)). * Update minimum supported terraform version to 1.1.5 ([#3965](https://github.com/databricks/terraform-provider-databricks/pull/3965)). * Update resources diagram to include newer resources ([#3962](https://github.com/databricks/terraform-provider-databricks/pull/3962)). * Update workspace_binding import command ([#3944](https://github.com/databricks/terraform-provider-databricks/pull/3944)). * fix possible values for `securable_type` in `databricks_workspace_binding` ([#3942](https://github.com/databricks/terraform-provider-databricks/pull/3942)). ### Internal Changes * Add `AddPlanModifer` method for AttributeBuilder ([#4009](https://github.com/databricks/terraform-provider-databricks/pull/4009)). * Add integration tests for volumes and quality monitor plugin framework ([#3975](https://github.com/databricks/terraform-provider-databricks/pull/3975)). * Add support for `computed` tag in TfSDK Structs ([#4005](https://github.com/databricks/terraform-provider-databricks/pull/4005)). * Added `databricks_quality_monitor` resource and `databricks_volumes` data source to plugin framework ([#3958](https://github.com/databricks/terraform-provider-databricks/pull/3958)). * Allow vector search tests to fail ([#3959](https://github.com/databricks/terraform-provider-databricks/pull/3959)). * Clean up comments in library resource ([#4015](https://github.com/databricks/terraform-provider-databricks/pull/4015)). * Fix irregularities in plugin framework converter function errors ([#4010](https://github.com/databricks/terraform-provider-databricks/pull/4010)). * Make test utils public and move integration test for quality monitor ([#3993](https://github.com/databricks/terraform-provider-databricks/pull/3993)). * Migrate Share resource to Go SDK ([#3916](https://github.com/databricks/terraform-provider-databricks/pull/3916)). * Migrate `databricks_cluster` data source to plugin framework ([#3988](https://github.com/databricks/terraform-provider-databricks/pull/3988)). * Migrate imports for terraform plugin framework + update init test provider factory ([#3943](https://github.com/databricks/terraform-provider-databricks/pull/3943)). * Move volumes test next to plugin framework data source ([#3995](https://github.com/databricks/terraform-provider-databricks/pull/3995)). * Refactor provider and related packages ([#3940](https://github.com/databricks/terraform-provider-databricks/pull/3940)). * Support import in acceptance test + adding import state for quality monitor ([#3994](https://github.com/databricks/terraform-provider-databricks/pull/3994)). * Library plugin framework migration ([#3979](https://github.com/databricks/terraform-provider-databricks/pull/3979)). * Fix `TestAccClusterResource_WorkloadType` ([#3989](https://github.com/databricks/terraform-provider-databricks/pull/3989)). ### Dependency Updates * Bump github.com/hashicorp/hcl/v2 from 2.21.0 to 2.22.0 ([#3948](https://github.com/databricks/terraform-provider-databricks/pull/3948)). * Update Go SDK to 0.46.0 ([#4007](https://github.com/databricks/terraform-provider-databricks/pull/4007)). ### Exporter * Don't generate instance pools if the pool name is empty ([#3960](https://github.com/databricks/terraform-provider-databricks/pull/3960)). * Expand list of non-interactive clusters ([#4023](https://github.com/databricks/terraform-provider-databricks/pull/4023)). * Ignore databricks_artifact_allowlist with zero artifact_matcher blocks ([#4019](https://github.com/databricks/terraform-provider-databricks/pull/4019)). ## [Release] Release v1.51.0 ### Breaking Changes With this release, only protocol version 6 will be supported which is compatible with terraform CLI version 1.1.5 and later. If you are using an older version of the terraform CLI, please upgrade it to use this and further releases of Databricks terraform provider. ### New Features and Improvements * Automatically create `parent_path` folder when creating `databricks_dashboard resource` if it doesn't exist ([#3778](https://github.com/databricks/terraform-provider-databricks/pull/3778)). ### Bug Fixes * Fixed logging for underlying Go SDK ([#3917](https://github.com/databricks/terraform-provider-databricks/pull/3917)). * Remove not necessary field in `databricks_job` schema ([#3907](https://github.com/databricks/terraform-provider-databricks/pull/3907)). ### Internal Changes * Add AttributeBuilder for Plugin Framework schema ([#3922](https://github.com/databricks/terraform-provider-databricks/pull/3922)). * Add CustomizableSchema for Plugin Framework ([#3927](https://github.com/databricks/terraform-provider-databricks/pull/3927)). * Add StructToSchema for Plugin Framework ([#3928](https://github.com/databricks/terraform-provider-databricks/pull/3928)). * Add codegen template and generated files for tfsdk structs ([#3911](https://github.com/databricks/terraform-provider-databricks/pull/3911)). * Add converter functions and tests for plugin framework ([#3914](https://github.com/databricks/terraform-provider-databricks/pull/3914)). * Added support to use protocol version 6 provider server for SDK plugin ([#3862](https://github.com/databricks/terraform-provider-databricks/pull/3862)). * Bump Go SDK to v0.45.0 ([#3933](https://github.com/databricks/terraform-provider-databricks/pull/3933)). * Change name with the aliases in codegen template ([#3936](https://github.com/databricks/terraform-provider-databricks/pull/3936)). * Update jd version from latest to 1.8.1 ([#3915](https://github.com/databricks/terraform-provider-databricks/pull/3915)). * Upgrade `staticcheck` to v0.5.1 to get Go 1.23 support ([#3931](https://github.com/databricks/terraform-provider-databricks/pull/3931)). * OPENAPI_SHA check ([#3935](https://github.com/databricks/terraform-provider-databricks/pull/3935)). * Use generic error for missing clusters ([#3938](https://github.com/databricks/terraform-provider-databricks/pull/3938)) ### Exporter * Better support for notebooks with /Workspace path ([#3901](https://github.com/databricks/terraform-provider-databricks/pull/3901)). * Improve exporting of DLT and test coverage ([#3898](https://github.com/databricks/terraform-provider-databricks/pull/3898)). --- CHANGELOG.md | 72 +++++++++++++++++++++++++++++++++++++++++++++++ common/version.go | 2 +- 2 files changed, 73 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c7171ba1e..aecd839d63 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,77 @@ # Version changelog +## [Release] Release v1.52.0 + +### New Features and Improvements + + * Add support for filters in `databricks_clusters` data source ([#4014](https://github.com/databricks/terraform-provider-databricks/pull/4014)). + * Added `no_wait` option for clusters to skip waiting to start on cluster creation ([#3953](https://github.com/databricks/terraform-provider-databricks/pull/3953)). + * Introduced Plugin Framework ([#3920](https://github.com/databricks/terraform-provider-databricks/pull/3920)). + + +### Bug Fixes + + * Add suppress diff for `azure_attributes.spot_bid_max_price` in `databricks_instance_pool` ([#3970](https://github.com/databricks/terraform-provider-databricks/pull/3970)). + * Correctly send workload_type fields in `databricks_cluster` to allow users to disable usage in certain contexts ([#3972](https://github.com/databricks/terraform-provider-databricks/pull/3972)). + * Fix `databricks_sql_table` treatment of properties ([#3925](https://github.com/databricks/terraform-provider-databricks/pull/3925)). + * Force send fields for settings resources ([#3978](https://github.com/databricks/terraform-provider-databricks/pull/3978)). + * Handle cluster deletion in `databricks_library` read ([#3909](https://github.com/databricks/terraform-provider-databricks/pull/3909)). + * Make subscriptions optional for SqlAlertTask ([#3983](https://github.com/databricks/terraform-provider-databricks/pull/3983)). + * Permanently delete `ERROR` and `TERMINATED` state clusters if their creation fails ([#4021](https://github.com/databricks/terraform-provider-databricks/pull/4021)). + + +### Documentation + + * Add troubleshooting guide for Provider "registry.terraform.io/databricks/databricks" planned an invalid value ([#3961](https://github.com/databricks/terraform-provider-databricks/pull/3961)). + * Adopt official naming of Mosaic AI Vector Search ([#3971](https://github.com/databricks/terraform-provider-databricks/pull/3971)). + * Document Terraform 1.0 as minimum version ([#3952](https://github.com/databricks/terraform-provider-databricks/pull/3952)). + * Mention Salesforce as supported type in `databricks_connection` ([#3949](https://github.com/databricks/terraform-provider-databricks/pull/3949)). + * Reimplement Azure Databricks deployment guide to use VNet injection & NPIP ([#3986](https://github.com/databricks/terraform-provider-databricks/pull/3986)). + * Resolves [#3127](https://github.com/databricks/terraform-provider-databricks/pull/3127): Remove deprecated account_id field from mws_credentials resource ([#3974](https://github.com/databricks/terraform-provider-databricks/pull/3974)). + * Small Grammar Corrections in Docs ([#4006](https://github.com/databricks/terraform-provider-databricks/pull/4006)). + * Update `databricks_vector_search_index` docs to match latest SDK ([#4008](https://github.com/databricks/terraform-provider-databricks/pull/4008)). + * Update aws_unity_catalog_assume_role_policy.md ([#3968](https://github.com/databricks/terraform-provider-databricks/pull/3968)). + * Update documentation regarding authentication with Azure-managed Service Principal using GITHUB OIDC ([#3932](https://github.com/databricks/terraform-provider-databricks/pull/3932)). + * Update metastore_assignment.md to properly reflect possible usage ([#3967](https://github.com/databricks/terraform-provider-databricks/pull/3967)). + * Update minimum supported terraform version to 1.1.5 ([#3965](https://github.com/databricks/terraform-provider-databricks/pull/3965)). + * Update resources diagram to include newer resources ([#3962](https://github.com/databricks/terraform-provider-databricks/pull/3962)). + * Update workspace_binding import command ([#3944](https://github.com/databricks/terraform-provider-databricks/pull/3944)). + * fix possible values for `securable_type` in `databricks_workspace_binding` ([#3942](https://github.com/databricks/terraform-provider-databricks/pull/3942)). + + +### Internal Changes + + * Add `AddPlanModifer` method for AttributeBuilder ([#4009](https://github.com/databricks/terraform-provider-databricks/pull/4009)). + * Add integration tests for volumes and quality monitor plugin framework ([#3975](https://github.com/databricks/terraform-provider-databricks/pull/3975)). + * Add support for `computed` tag in TfSDK Structs ([#4005](https://github.com/databricks/terraform-provider-databricks/pull/4005)). + * Added `databricks_quality_monitor` resource and `databricks_volumes` data source to plugin framework ([#3958](https://github.com/databricks/terraform-provider-databricks/pull/3958)). + * Allow vector search tests to fail ([#3959](https://github.com/databricks/terraform-provider-databricks/pull/3959)). + * Clean up comments in library resource ([#4015](https://github.com/databricks/terraform-provider-databricks/pull/4015)). + * Fix irregularities in plugin framework converter function errors ([#4010](https://github.com/databricks/terraform-provider-databricks/pull/4010)). + * Make test utils public and move integration test for quality monitor ([#3993](https://github.com/databricks/terraform-provider-databricks/pull/3993)). + * Migrate Share resource to Go SDK ([#3916](https://github.com/databricks/terraform-provider-databricks/pull/3916)). + * Migrate `databricks_cluster` data source to plugin framework ([#3988](https://github.com/databricks/terraform-provider-databricks/pull/3988)). + * Migrate imports for terraform plugin framework + update init test provider factory ([#3943](https://github.com/databricks/terraform-provider-databricks/pull/3943)). + * Move volumes test next to plugin framework data source ([#3995](https://github.com/databricks/terraform-provider-databricks/pull/3995)). + * Refactor provider and related packages ([#3940](https://github.com/databricks/terraform-provider-databricks/pull/3940)). + * Support import in acceptance test + adding import state for quality monitor ([#3994](https://github.com/databricks/terraform-provider-databricks/pull/3994)). + * Library plugin framework migration ([#3979](https://github.com/databricks/terraform-provider-databricks/pull/3979)). + * Fix `TestAccClusterResource_WorkloadType` ([#3989](https://github.com/databricks/terraform-provider-databricks/pull/3989)). + + +### Dependency Updates + + * Bump github.com/hashicorp/hcl/v2 from 2.21.0 to 2.22.0 ([#3948](https://github.com/databricks/terraform-provider-databricks/pull/3948)). + * Update Go SDK to 0.46.0 ([#4007](https://github.com/databricks/terraform-provider-databricks/pull/4007)). + + +### Exporter + + * Don't generate instance pools if the pool name is empty ([#3960](https://github.com/databricks/terraform-provider-databricks/pull/3960)). + * Expand list of non-interactive clusters ([#4023](https://github.com/databricks/terraform-provider-databricks/pull/4023)). + * Ignore databricks_artifact_allowlist with zero artifact_matcher blocks ([#4019](https://github.com/databricks/terraform-provider-databricks/pull/4019)). + + ## [Release] Release v1.51.0 ### Breaking Changes diff --git a/common/version.go b/common/version.go index 5deca1da97..417761fcf5 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.51.0" + version = "1.52.0" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider From af46555600f4895d84419675ae445bfe61c4143c Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Wed, 18 Sep 2024 07:41:00 -0400 Subject: [PATCH 19/99] [Doc] Add guide for OIDC authentication (#4016) ## Changes To encourage users to follow best security practices, we recommend authenticating to Databricks using OIDC where possible. This PR introduces a guide for setting up OIDC from GitHub Actions and Azure DevOps when authenticating to Azure and GCP Databricks. Currently, Databricks on AWS doesn't support OIDC authentication natively. ## Tests - [x] The GCP configuration is used internally already; this just documents how we've set up OIDC. Thanks @hectorcast-db! - [ ] The Azure configuration still needs to be tested. --- docs/guides/azure-authenticate-with-oidc.md | 199 ++++++++++++++++++++ docs/guides/gcp-authenticate-with-oidc.md | 144 ++++++++++++++ 2 files changed, 343 insertions(+) create mode 100644 docs/guides/azure-authenticate-with-oidc.md create mode 100644 docs/guides/gcp-authenticate-with-oidc.md diff --git a/docs/guides/azure-authenticate-with-oidc.md b/docs/guides/azure-authenticate-with-oidc.md new file mode 100644 index 0000000000..85015a77cb --- /dev/null +++ b/docs/guides/azure-authenticate-with-oidc.md @@ -0,0 +1,199 @@ +--- +page_title: "Authenticate with OpenID Connect: Azure" +--- + +# Authenticate with OpenID Connect + +OpenID Connect (OIDC) is an authentication protocol allowing users to authenticate to applications without managing long-lived credentials. The Terraform Provider for Databricks can leverage OIDC to authenticate to Databricks accounts and workspaces. This guide will walk you through the steps to authenticate to Azure Databricks using OIDC on GitHub Actions and Azure DevOps. + +This guide assumes that you have an existing Azure Databricks workspace. + +## GitHub Actions + +### Configure your service principal with federated credentials + +First, you need to create a service principal with federated credentials. This service principal will be used to authenticate to Azure Databricks. You can create a service principal using the `azuread` Terraform provider. + +```hcl +provider "azurerm" { + features {} +} + +resource "azuread_application_registration" "example" { + display_name = "example" +} + +resource "azuread_service_principal" "example" { + application_id = azuread_application_registration.example.application_id +} +``` + +Then, configure the service principal to use federated credentials issued by GitHub Actions. + +```hcl +resource "azuread_application_federated_identity_credential" "example" { + application_id = azuread_application_registration.example.id + display_name = "my-repo-deploy" + description = "Deployments for my-repo" + audiences = ["api://AzureADTokenExchange"] + issuer = "https://token.actions.githubusercontent.com" + subject = "repo:/:environment:" +} +``` + +Finally, grant the service principal access to the workspace. + +```hcl +resource "azurerm_role_assignment" "example" { + scope = "/subscriptions//resourceGroups//providers/Microsoft.Databricks/workspaces/" + role_definition_name = "Contributor" + principal_id = azuread_service_principal.example.id +} +``` + +### Configure the Databricks provider to use the service principal + +In your Terraform configuration, configure the Databricks provider to use the service principal. + +```hcl +provider "databricks" { + azure_client_id = "" + azure_tenant_id = "" + host = "https://" +} +``` + +### Create a GitHub Action that authenticates to Azure Databricks + +To create a GitHub Action, make a `.github/workflows/deploy.yml` file in your repository. + +To authenticate to Azure Databricks using OIDC, ensure that your action has the `id-token: write` permission. You can then authenticate to Azure using the `azure/login` action. Finally, run `terraform apply` with the `azure/cli` action. + +```yaml +name: Deploy to Azure Databricks +jobs: + deploy: + runs-on: ubuntu-latest + environments: production + permissions: + id-token: write + contents: read + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Set up Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Apply Terraform + run: | + terraform init + terraform plan + terraform apply -auto-approve + working-directory: path/to/terraform/module +``` + +### (Optional) GitHub Actions Details + +The `subject` field is used to scope the federated credentials to a specific GitHub Actions environment. The `subject` field is a string in the format `repo:/:environment:`. The `organization`, `repo`, and `environment` fields should be replaced with the appropriate values. + +If the action runs without an environment context, the `subject` field should be set to `repo:/:ref:refs/heads/` if the workflow is triggered from a branch, or `repo:/:ref:refs/tags/` when triggered from a tag. + +If needed, it is also possible to configure the `subject` field for your organization or repository. See the [GitHub Actions OIDC documentation](https://docs.github.com/en/actions/security-for-github-actions/security-hardening-your-deployments/about-security-hardening-with-openid-connect) for more information about how to configure the `subject` field. + +## Azure DevOps + +In Azure DevOps, you can use Workload Identity federation to authenticate to Azure Databricks using OIDC. This allows you to authenticate to Azure Databricks using federated credentials issued by Azure DevOps. Today, the Terraform Provider for Databricks leverages the Azure CLI to use workflow identity federation in Azure DevOps. This guide will walk you through the steps to authenticate to Azure Databricks using OIDC on Azure DevOps. + +### Configure a service connection for your DevOps pipeline + +First, you need to create a service principal in Azure Entra ID with federated credentials. This principal will be used to authenticate to Azure Databricks. You can create it using the `azuread` Terraform provider with the following template: + +```hcl +provider "azurerm" { + features {} +} + +resource "azuread_application_registration" "example" { + display_name = "example" +} + +resource "azuread_service_principal" "example" { + application_id = azuread_application_registration.example.application_id +} +``` + +Then, configure the service principal to use federated credentials issued by Azure DevOps. + +```hcl +resource "azuread_application_federated_identity_credential" "example" { + application_id = azuread_application_registration.example.id + display_name = "my-repo-deploy" + description = "Deployments for my-repo" + audiences = ["api://AzureADTokenExchange"] + issuer = "https://vstoken.dev.azure.com/" + subject = "sc:////" +} +``` + +Finally, grant the service principal access to the workspace. + +```hcl +resource "azurerm_role_assignment" "example" { + scope = "/subscriptions//resourceGroups//providers/Microsoft.Databricks/workspaces/" + role_definition_name = "Contributor" + principal_id = azuread_service_principal.example.id +} +``` + +In Azure DevOps, navigate to the project settings and create a new service connection. Select `Azure Resource Manager`, then `Workload Identity federation (manual)` and enter the subscription ID, subscription name, service principal ID and tenant ID in the dialog. Note that the Issuer and Subject Identifier fields must match the `issuer` and `subject` attributes of the `azuread_application_federated_identity_credential` resource. + +### Configure the Databricks provider to use the service principal + +In your Terraform configuration, configure the Databricks provider to use the service principal. + +```hcl +provider "databricks" { + azure_client_id = "" + azure_tenant_id = "" + host = "https://" +} +``` + +### Create a DevOps Pipeline that authenticates to Azure Databricks + +To create a pipeline, make a `pipelines/deploy.yml` file in your repository. + +To authenticate to Azure Databricks using OIDC, use the `AzureCLI@2` task. This automatically authenticates the Azure CLI using the service connection you created earlier. The Terraform Provider for Databricks will detect the authenticated CLI and use it to authenticate to Azure Databricks. + +```yaml +steps: + - task: Checkout@1 + displayName: "Checkout repository" + inputs: + repository: "self" + path: "main" + + - task: TerraformInstaller@0 + inputs: + terraformVersion: "latest" + + - task: AzureCLI@2 + displayName: "TF init" + inputs: + azureSubscription: + scriptType: bash + scriptLocation: inlineScript + workingDirectory: "$(Pipeline.Workspace)/main//path/to/terraform/module" + inlineScript: | + terraform init + terraform plan + terraform apply -auto-approve +``` + +## References + +For more information about OIDC and the above OIDC providers, see the following resources: + +- [GitHub Actions OIDC documentation](https://docs.github.com/en/actions/security-for-github-actions/security-hardening-your-deployments/about-security-hardening-with-openid-connect) +- [Azure DevOps Workload federation blog post](https://devblogs.microsoft.com/devops/introduction-to-azure-devops-workload-identity-federation-oidc-with-terraform/) diff --git a/docs/guides/gcp-authenticate-with-oidc.md b/docs/guides/gcp-authenticate-with-oidc.md new file mode 100644 index 0000000000..873d3ec96b --- /dev/null +++ b/docs/guides/gcp-authenticate-with-oidc.md @@ -0,0 +1,144 @@ +--- +page_title: "Authenticate with OpenID Connect: Google Cloud" +--- + +# Authenticate with OpenID Connect + +OpenID Connect (OIDC) is an authentication protocol allowing users to authenticate to applications without managing long-lived credentials. The Terraform Provider for Databricks can leverage OIDC to authenticate to Databricks accounts and workspaces. For Databricks on Google Cloud, the provider can authenticate leveraging OIDC using workload identity pools. This guide will walk you through the steps to authenticate to Databricks using OIDC on GitHub Actions. + +This guide assumes that you have an existing GCP Databricks workspace. + +## GitHub Actions + +### Configure your service account and workload identity pool + +First, you need to create a service account and a workload identity pool. The pool is configured to allow clients using OIDC to assume the identity of the service account. The service account will be used to authenticate to Databricks on GCP. You can create a service account using the `google` Terraform provider. + +```hcl +provider "google" { + features {} +} + +resource "google_service_account" "github_actions" { + project = "" + account_id = "github-actions" + display_name = "GitHub Actions Service Account" +} +``` + +Then, create the workload identity pool, and configure it to use the service account. + +```hcl +resource "google_iam_workload_identity_pool" "github_pool" { + project = "" + workload_identity_pool_id = "github-pool" + display_name = "GitHub Actions Pool" + description = "Identity pool for GitHub Actions" +} + +resource "google_iam_workload_identity_pool_provider" "github_provider" { + project = "" + workload_identity_pool_id = google_iam_workload_identity_pool.github_pool.workload_identity_pool_id + workload_identity_pool_provider_id = "github-provider" + display_name = "GitHub Actions Provider" + attribute_mapping = { + "google.subject" = "assertion.sub" + "attribute.actor" = "assertion.actor" + "attribute.repository" = "assertion.repository" + "attribute.ref" = "assertion.ref" + "attribute.event_name" = "assertion.event_name" + } + oidc { + issuer_uri = "https://token.actions.githubusercontent.com" + } + attribute_condition = "assertion.sub == 'repo:/:environment:'" +} + +resource "google_service_account_iam_binding" "workload_identity_user" { + service_account_id = google_service_account.github_actions.name + role = "roles/iam.workloadIdentityUser" + members = [ + "principalSet://iam.googleapis.com/${google_iam_workload_identity_pool.github_pool.name}/attribute.repository//" + ] +} + +resource "google_project_iam_member" "token_creator_binding" { + project = module.defaults.google_project_isolated + role = "roles/iam.serviceAccountTokenCreator" + member = "serviceAccount:${google_service_account.github_actions.email}" +} +``` + +Finally, grant the service principal access to the workspace by following the instructions in the [Databricks documentation](https://docs.gcp.databricks.com/en/dev-tools/google-id-auth.html#step-2-assign-your-google-cloud-service-account-to-your-databricks-account). + +### Configure the Databricks provider to use the service principal + +In your Terraform configuration, configure the Databricks provider to use the service principal. + +```hcl +# account-level provider +provider "databricks" { + host = "https://accounts.gcp.databricks.com" + account_id = "" + google_service_account = google_service_account.github_actions.email +} + +# workspace-level provider +provider "databricks" { + host = "https://" + google_service_account = google_service_account.github_actions.email +} +``` + +### Create a GitHub Action that authenticates to Databricks on Google Cloud + +To create a GitHub Action, make a `.github/workflows/deploy.yml` file in your repository. + +To authenticate to Databricks using OIDC, ensure that your action has the `id-token: write` permission. You can then authenticate to Google using the `google-github-actions/auth` action. Finally, run `terraform apply`. + +```yaml +name: Deploy to Databricks on GCP +jobs: + deploy: + runs-on: ubuntu-latest + environments: production + permissions: + id-token: write + contents: read + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: "Authenticate to Google Cloud" + uses: "google-github-actions/auth@v2" + with: + token_format: "access_token" + workload_identity_provider: "projects//locations/global/workloadIdentityPools/github-pool/providers/github-provider" + service_account: "" + + - name: Set up Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Terraform Init + run: terraform init + working-directory: path/to/terraform/module +``` + +### (Optional) GitHub Actions Details + +The `subject` field is used to scope the federated credentials to a specific GitHub Actions environment. The `subject` field is a string in the format `repo:/:environment:`. The `organization`, `repo`, and `environment` fields should be replaced with the appropriate values. + +If the action runs without an environment context, the `subject` field should be set to `repo:/:ref:refs/heads/` if the workflow is triggered from a branch, or `repo:/:ref:refs/tags/` when triggered from a tag. + +If needed, it is also possible to configure the `subject` field for your organization or repository. See the [GitHub Actions OIDC documentation](https://docs.github.com/en/actions/security-for-github-actions/security-hardening-your-deployments/about-security-hardening-with-openid-connect) for more information about how to configure the `subject` field. + +### (Optional) Restrict access to the workload identity pool + +The workload identity pool provider can be configured to restrict access to specific repositories, branches, or tags. The `attribute_condition` field in the `google_iam_workload_identity_pool_provider` resource specifies the conditions under which the provider will issue tokens. See [the Google Cloud reference](https://cloud.google.com/iam/docs/workload-identity-federation#conditions) for more information. + +## References + +For more information about OIDC and the above OIDC providers, see the following resources: + +- [GitHub Actions OIDC documentation](https://docs.github.com/en/actions/security-for-github-actions/security-hardening-your-deployments/about-security-hardening-with-openid-connect) +- [Google Cloud Workload Identity documentation](https://cloud.google.com/iam/docs/workload-identity-federation) From b827aeccb86c1cd7c5803e8a755d7085ebbdb207 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Tue, 24 Sep 2024 03:34:33 +0200 Subject: [PATCH 20/99] [Doc] Fixing links to `databricks_service_principal` in TF guides (#4020) ## Changes Resolves #4017 ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/guides/aws-e2-firewall-hub-and-spoke.md | 2 +- docs/guides/aws-e2-firewall-workspace.md | 2 +- docs/guides/aws-workspace.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/guides/aws-e2-firewall-hub-and-spoke.md b/docs/guides/aws-e2-firewall-hub-and-spoke.md index 78c0ff0cb2..9b5f785f67 100644 --- a/docs/guides/aws-e2-firewall-hub-and-spoke.md +++ b/docs/guides/aws-e2-firewall-hub-and-spoke.md @@ -12,7 +12,7 @@ You can provision multiple Databricks workspaces with Terraform, and where many ## Provider initialization for AWS workspaces -This guide assumes you have the `client_id`, which is the `application_id` of the [Service Principal](resources/service_principal.md), `client_secret`, which is its secret, and `databricks_account_id`, which can be found in the top right corner of the [Account Console](https://accounts.cloud.databricks.com). (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)). This guide is provided as is and assumes you will use it as the basis for your setup. If you use AWS Firewall to block most traffic but allow the URLs to which Databricks needs to connect, please update the configuration based on your region. You can get the configuration details for your region from [Firewall Appliance](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html#firewall-appliance-infrastructure) document. +This guide assumes you have the `client_id`, which is the `application_id` of the [Service Principal](../resources/service_principal.md), `client_secret`, which is its secret, and `databricks_account_id`, which can be found in the top right corner of the [Account Console](https://accounts.cloud.databricks.com). (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)). This guide is provided as is and assumes you will use it as the basis for your setup. If you use AWS Firewall to block most traffic but allow the URLs to which Databricks needs to connect, please update the configuration based on your region. You can get the configuration details for your region from [Firewall Appliance](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html#firewall-appliance-infrastructure) document. ```hcl variable "client_id" {} diff --git a/docs/guides/aws-e2-firewall-workspace.md b/docs/guides/aws-e2-firewall-workspace.md index d7b00334f9..5e7af4c57c 100644 --- a/docs/guides/aws-e2-firewall-workspace.md +++ b/docs/guides/aws-e2-firewall-workspace.md @@ -14,7 +14,7 @@ For more information, please visit [Data Exfiltration Protection With Databricks ## Provider initialization for AWS workspaces -This guide assumes you have the `client_id`, which is the `application_id` of the [Service Principal](resources/service_principal.md), `client_secret`, which is its secret, and `databricks_account_id`, which can be found in the top right corner of the [Account Console](https://accounts.cloud.databricks.com). (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)). This guide is provided as is and assumes you will use it as the basis for your setup. If you are using AWS Firewall to block most traffic but allow the URLs that Databricks needs to connect to, please update the configuration based on your region. You can get the configuration details for your region from [Firewall Appliance](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html#firewall-appliance-infrastructure) document. +This guide assumes you have the `client_id`, which is the `application_id` of the [Service Principal](../resources/service_principal.md), `client_secret`, which is its secret, and `databricks_account_id`, which can be found in the top right corner of the [Account Console](https://accounts.cloud.databricks.com). (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)). This guide is provided as is and assumes you will use it as the basis for your setup. If you are using AWS Firewall to block most traffic but allow the URLs that Databricks needs to connect to, please update the configuration based on your region. You can get the configuration details for your region from [Firewall Appliance](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html#firewall-appliance-infrastructure) document. ```hcl variable "client_id" {} diff --git a/docs/guides/aws-workspace.md b/docs/guides/aws-workspace.md index 596076f66c..47b9e15e24 100644 --- a/docs/guides/aws-workspace.md +++ b/docs/guides/aws-workspace.md @@ -12,7 +12,7 @@ You can provision multiple Databricks workspaces with Terraform. ## Provider initialization for AWS workspaces -This guide assumes you have the `client_id`, which is the `application_id` of the [Service Principal](resources/service_principal.md), `client_secret`, which is its secret, and `databricks_account_id`, which can be found in the top right corner of the [Account Console](https://accounts.cloud.databricks.com). (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)). This guide is provided as is and assumes you will use it as the basis for your setup. +This guide assumes you have the `client_id`, which is the `application_id` of the [Service Principal](../resources/service_principal.md), `client_secret`, which is its secret, and `databricks_account_id`, which can be found in the top right corner of the [Account Console](https://accounts.cloud.databricks.com). (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)). This guide is provided as is and assumes you will use it as the basis for your setup. ```hcl variable "client_id" {} From ae06c79a8faefce0faf97a48fe89e4826bee1ff9 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Sat, 28 Sep 2024 13:16:23 +0200 Subject: [PATCH 21/99] [Feature] Add computed attribute `table_serving_url` to `databricks_online_table` (#4048) ## Changes New attribute was added, but it wasn't marked as `computed` in the TF resource definition ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [x] using Go SDK --- catalog/resource_online_table.go | 1 + docs/resources/online_table.md | 1 + 2 files changed, 2 insertions(+) diff --git a/catalog/resource_online_table.go b/catalog/resource_online_table.go index 0dd75ffe90..ca46b8eed1 100644 --- a/catalog/resource_online_table.go +++ b/catalog/resource_online_table.go @@ -59,6 +59,7 @@ func ResourceOnlineTable() common.Resource { common.CustomizeSchemaPath(m, "spec", "source_table_full_name").SetCustomSuppressDiff(common.EqualFoldDiffSuppress) common.CustomizeSchemaPath(m, "name").SetRequired().SetForceNew() common.CustomizeSchemaPath(m, "status").SetReadOnly() + common.CustomizeSchemaPath(m, "table_serving_url").SetReadOnly() common.CustomizeSchemaPath(m, "spec", "pipeline_id").SetReadOnly() runTypes := []string{"spec.0.run_triggered", "spec.0.run_continuously"} diff --git a/docs/resources/online_table.md b/docs/resources/online_table.md index 66aa48025c..dbaaeafa58 100644 --- a/docs/resources/online_table.md +++ b/docs/resources/online_table.md @@ -50,6 +50,7 @@ In addition to all arguments above, the following attributes are exported: * `status` - object describing status of the online table: * `detailed_state` - The state of the online table. * `message` - A text description of the current state of the online table. +* `table_serving_url` - Data serving REST API URL for this table. ## Import From 1cfc531666d0179395a170f5ea61decfe22bf881 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Mon, 30 Sep 2024 11:02:22 +0200 Subject: [PATCH 22/99] [Feature] Add support for Identity Column in `databricks_sql_table` (#4035) ## Changes Add support for Identity Column in `databricks_sql_table` ## Tests - [X] `make test` run locally - [X] relevant change in `docs/` folder - [X] covered with integration tests in `internal/acceptance` - [X] relevant acceptance tests are passing - [X] using Go SDK --------- Co-authored-by: Miles Yucht --- catalog/resource_sql_table.go | 70 +++++++++- catalog/resource_sql_table_test.go | 193 ++++++++++++++++++++++++++ docs/resources/sql_table.md | 41 ++++++ internal/acceptance/sql_table_test.go | 64 +++++++++ 4 files changed, 363 insertions(+), 5 deletions(-) diff --git a/catalog/resource_sql_table.go b/catalog/resource_sql_table.go index 922d2d0cd2..ce9d4dbd7a 100644 --- a/catalog/resource_sql_table.go +++ b/catalog/resource_sql_table.go @@ -2,6 +2,7 @@ package catalog import ( "context" + "encoding/json" "fmt" "log" "reflect" @@ -22,12 +23,24 @@ import ( var MaxSqlExecWaitTimeout = 50 type SqlColumnInfo struct { - Name string `json:"name"` - Type string `json:"type_text,omitempty" tf:"alias:type,computed"` - Comment string `json:"comment,omitempty"` - Nullable bool `json:"nullable,omitempty" tf:"default:true"` + Name string `json:"name"` + Type string `json:"type_text,omitempty" tf:"alias:type,computed"` + Identity IdentityColumn `json:"identity,omitempty"` + Comment string `json:"comment,omitempty"` + Nullable bool `json:"nullable,omitempty" tf:"default:true"` + TypeJson string `json:"type_json,omitempty" tf:"computed"` } +type TypeJson struct { + Metadata map[string]any `json:"metadata,omitempty"` +} + +type IdentityColumn string + +const IdentityColumnNone IdentityColumn = "" +const IdentityColumnAlways IdentityColumn = "always" +const IdentityColumnDefault IdentityColumn = "default" + type SqlTableInfo struct { Name string `json:"name"` CatalogName string `json:"catalog_name" tf:"force_new"` @@ -108,6 +121,28 @@ func parseComment(s string) string { return strings.ReplaceAll(strings.ReplaceAll(s, `\'`, `'`), `'`, `\'`) } +func reconstructIdentity(c *SqlColumnInfo) (IdentityColumn, error) { + if c.TypeJson == "" { + return IdentityColumnNone, nil + } + var typeJson TypeJson + err := json.Unmarshal([]byte(c.TypeJson), &typeJson) + if err != nil { + return IdentityColumnNone, err + } + if _, ok := typeJson.Metadata["delta.identity.start"]; !ok { + return IdentityColumnNone, nil + } + explicit, ok := typeJson.Metadata["delta.identity.allowExplicitInsert"] + if !ok { + return IdentityColumnNone, nil + } + if explicit.(bool) { + return IdentityColumnDefault, nil + } + return IdentityColumnAlways, nil +} + func (ti *SqlTableInfo) initCluster(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) (err error) { defaultClusterName := "terraform-sql-table" clustersAPI := clusters.NewClustersAPI(ctx, c) @@ -171,7 +206,22 @@ func (ti *SqlTableInfo) getOrCreateCluster(clusterName string, clustersAPI clust return aclCluster.ClusterID, nil } +func (ci *SqlColumnInfo) getColumnType() string { + var colType string + switch ci.Identity { + case IdentityColumnAlways: + colType = fmt.Sprintf("%s GENERATED ALWAYS AS IDENTITY", ci.Type) + case IdentityColumnDefault: + colType = fmt.Sprintf("%s GENERATED BY DEFAULT AS IDENTITY", ci.Type) + default: + colType = ci.Type + } + return colType +} + func (ti *SqlTableInfo) serializeColumnInfo(col SqlColumnInfo) string { + var colType = col.getColumnType() + notNull := "" if !col.Nullable { notNull = " NOT NULL" @@ -181,7 +231,7 @@ func (ti *SqlTableInfo) serializeColumnInfo(col SqlColumnInfo) string { if col.Comment != "" { comment = fmt.Sprintf(" COMMENT '%s'", parseComment(col.Comment)) } - return fmt.Sprintf("%s %s%s%s", col.getWrappedColumnName(), col.Type, notNull, comment) // id INT NOT NULL COMMENT 'something' + return fmt.Sprintf("%s %s%s%s", col.getWrappedColumnName(), colType, notNull, comment) // id INT NOT NULL COMMENT 'something' } func (ti *SqlTableInfo) serializeColumnInfos() string { @@ -502,6 +552,9 @@ func assertNoColumnTypeDiff(oldCols []interface{}, newColumnInfos []SqlColumnInf if getColumnType(oldColMap["type"].(string)) != getColumnType(newColumnInfos[i].Type) { return fmt.Errorf("changing the 'type' of an existing column is not supported") } + if oldColMap["identity"].(string) != string(newColumnInfos[i].Identity) { + return fmt.Errorf("changing the 'identity' type of an existing column is not supported") + } } return nil } @@ -602,6 +655,13 @@ func ResourceSqlTable() common.Resource { if err != nil { return err } + for i := range ti.ColumnInfos { + c := &ti.ColumnInfos[i] + c.Identity, err = reconstructIdentity(c) + if err != nil { + return err + } + } return common.StructToData(ti, tableSchema, d) }, Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { diff --git a/catalog/resource_sql_table_test.go b/catalog/resource_sql_table_test.go index 795a1d3f31..f2f0a6c5e2 100644 --- a/catalog/resource_sql_table_test.go +++ b/catalog/resource_sql_table_test.go @@ -35,6 +35,36 @@ func TestResourceSqlTableCreateStatement_External(t *testing.T) { assert.Contains(t, stmt, "COMMENT 'terraform managed'") } +func TestResourceSqlTableCreateStatement_IdentityColumn(t *testing.T) { + ti := &SqlTableInfo{ + Name: "bar", + CatalogName: "main", + SchemaName: "foo", + TableType: "EXTERNAL", + DataSourceFormat: "DELTA", + StorageLocation: "s3://ext-main/foo/bar1", + StorageCredentialName: "somecred", + Comment: "terraform managed", + ColumnInfos: []SqlColumnInfo{ + { + Name: "id", + Type: "bigint", + Identity: "default", + }, + { + Name: "name", + Comment: "a comment", + }, + }, + } + stmt := ti.buildTableCreateStatement() + assert.Contains(t, stmt, "CREATE EXTERNAL TABLE `main`.`foo`.`bar`") + assert.Contains(t, stmt, "USING DELTA") + assert.Contains(t, stmt, "(`id` bigint GENERATED BY DEFAULT AS IDENTITY NOT NULL, `name` NOT NULL COMMENT 'a comment')") + assert.Contains(t, stmt, "LOCATION 's3://ext-main/foo/bar1' WITH (CREDENTIAL `somecred`)") + assert.Contains(t, stmt, "COMMENT 'terraform managed'") +} + func TestResourceSqlTableCreateStatement_View(t *testing.T) { ti := &SqlTableInfo{ Name: "bar", @@ -1334,6 +1364,169 @@ func TestResourceSqlTableCreateTable_ExistingSQLWarehouse(t *testing.T) { assert.NoError(t, err) } +func TestResourceSqlTableCreateTableWithIdentityColumn_ExistingSQLWarehouse(t *testing.T) { + qa.ResourceFixture{ + CommandMock: func(commandStr string) common.CommandResults { + return common.CommandResults{ + ResultType: "", + Data: nil, + } + }, + HCL: ` + name = "bar" + catalog_name = "main" + schema_name = "foo" + table_type = "MANAGED" + data_source_format = "DELTA" + storage_location = "abfss://container@account/somepath" + warehouse_id = "existingwarehouse" + + column { + name = "id" + type = "bigint" + identity = "default" + } + column { + name = "name" + type = "string" + comment = "name of thing" + } + column { + name = "number" + type = "bigint" + identity = "always" + } + comment = "this table is managed by terraform" + `, + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.0/sql/statements/", + ExpectedRequest: sql.ExecuteStatementRequest{ + Statement: "CREATE TABLE `main`.`foo`.`bar` (`id` bigint GENERATED BY DEFAULT AS IDENTITY, `name` string COMMENT 'name of thing', `number` bigint GENERATED ALWAYS AS IDENTITY)\nUSING DELTA\nCOMMENT 'this table is managed by terraform'\nLOCATION 'abfss://container@account/somepath';", + WaitTimeout: "50s", + WarehouseId: "existingwarehouse", + OnWaitTimeout: sql.ExecuteStatementRequestOnWaitTimeoutCancel, + }, + Response: sql.StatementResponse{ + StatementId: "statement1", + Status: &sql.StatementStatus{ + State: "SUCCEEDED", + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/tables/main.foo.bar", + Response: SqlTableInfo{ + Name: "bar", + CatalogName: "main", + SchemaName: "foo", + TableType: "EXTERNAL", + DataSourceFormat: "DELTA", + StorageLocation: "s3://ext-main/foo/bar1", + StorageCredentialName: "somecred", + Comment: "terraform managed", + Properties: map[string]string{ + "one": "two", + "three": "four", + }, + ColumnInfos: []SqlColumnInfo{ + { + Name: "id", + Type: "bigint", + TypeJson: "{\"type\":\"bigint\",\"nullable\":true, \"metadata\":{\"delta.identity.start\":1,\"delta.identity.allowExplicitInsert\":true}}", + }, + { + Name: "name", + Type: "string", + Comment: "name of thing", + }, + { + Name: "number", + Type: "bigint", + TypeJson: "{\"type\":\"bigint\",\"nullable\":true, \"metadata\":{\"delta.identity.start\":1,\"delta.identity.allowExplicitInsert\":false}}", + }, + }, + }, + }, + }, + Create: true, + Resource: ResourceSqlTable(), + }.ApplyAndExpectData(t, map[string]any{ + "column.0.identity": "default", + "column.1.identity": "", + "column.2.identity": "always", + }) +} + +func TestResourceSqlTableReadTableWithIdentityColumn_ExistingSQLWarehouse(t *testing.T) { + qa.ResourceFixture{ + CommandMock: func(commandStr string) common.CommandResults { + return common.CommandResults{ + ResultType: "", + Data: nil, + } + }, + HCL: ` + name = "bar" + catalog_name = "main" + schema_name = "foo" + table_type = "MANAGED" + data_source_format = "DELTA" + storage_location = "abfss://container@account/somepath" + warehouse_id = "existingwarehouse" + + + comment = "this table is managed by terraform" + `, + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/tables/main.foo.bar", + Response: SqlTableInfo{ + Name: "bar", + CatalogName: "main", + SchemaName: "foo", + TableType: "EXTERNAL", + DataSourceFormat: "DELTA", + StorageLocation: "s3://ext-main/foo/bar1", + StorageCredentialName: "somecred", + Comment: "terraform managed", + Properties: map[string]string{ + "one": "two", + "three": "four", + }, + ColumnInfos: []SqlColumnInfo{ + { + Name: "id", + Type: "bigint", + TypeJson: "{\"type\":\"bigint\",\"nullable\":true, \"metadata\":{\"delta.identity.start\":1,\"delta.identity.allowExplicitInsert\":false}}", + }, + { + Name: "name", + Type: "string", + Comment: "name of thing", + }, + { + Name: "number", + Type: "bigint", + TypeJson: "{\"type\":\"bigint\",\"nullable\":true, \"metadata\":{\"delta.identity.start\":1,\"delta.identity.allowExplicitInsert\":true}}", + }, + }, + }, + }, + }, + ID: "main.foo.bar", + Read: true, + Resource: ResourceSqlTable(), + }.ApplyAndExpectData(t, map[string]any{ + "column.0.identity": "always", + "column.1.identity": "", + "column.2.identity": "default", + }) +} + func TestResourceSqlTableCreateTable_OnlyManagedProperties(t *testing.T) { qa.ResourceFixture{ CommandMock: func(commandStr string) common.CommandResults { diff --git a/docs/resources/sql_table.md b/docs/resources/sql_table.md index aeec6d9ce9..67483248d6 100644 --- a/docs/resources/sql_table.md +++ b/docs/resources/sql_table.md @@ -109,6 +109,46 @@ resource "databricks_sql_table" "thing_view" { } ``` +## Use an Identity Column + +```hcl +resource "databricks_catalog" "sandbox" { + name = "sandbox" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } +} +resource "databricks_schema" "things" { + catalog_name = databricks_catalog.sandbox.id + name = "things" + comment = "this database is managed by terraform" + properties = { + kind = "various" + } +} +resource "databricks_sql_table" "thing" { + provider = databricks.workspace + name = "quickstart_table" + catalog_name = databricks_catalog.sandbox.name + schema_name = databricks_schema.things.name + table_type = "MANAGED" + data_source_format = "DELTA" + storage_location = "" + column { + name = "id" + type = "bigint" + identity = "default" + } + column { + name = "name" + type = "string" + comment = "name of thing" + } + comment = "this table is managed by terraform" +} +``` + ## Argument Reference The following arguments are supported: @@ -137,6 +177,7 @@ Currently, changing the column definitions for a table will require dropping and * `name` - User-visible name of column * `type` - Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. +* `identity` - (Optional) Whether field is an identity column. Can be `default`, `always` or unset. It is unset by default. * `comment` - (Optional) User-supplied free-form text. * `nullable` - (Optional) Whether field is nullable (Default: `true`) diff --git a/internal/acceptance/sql_table_test.go b/internal/acceptance/sql_table_test.go index 0f0a87dec9..6ba5a83714 100644 --- a/internal/acceptance/sql_table_test.go +++ b/internal/acceptance/sql_table_test.go @@ -72,6 +72,70 @@ func TestUcAccResourceSqlTable_Managed(t *testing.T) { }) } +func TestUcAccResourceSqlTableWithIdentityColumn_Managed(t *testing.T) { + if os.Getenv("GOOGLE_CREDENTIALS") != "" { + skipf(t)("databricks_sql_table resource not available on GCP") + } + UnityWorkspaceLevel(t, Step{ + Template: ` + resource "databricks_schema" "this" { + name = "{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_sql_table" "this" { + name = "bar" + catalog_name = "main" + schema_name = databricks_schema.this.name + table_type = "MANAGED" + properties = { + this = "that" + something = "else" + } + + column { + name = "id" + type = "bigint" + identity = "default" + } + column { + name = "name" + type = "string" + } + comment = "this table is managed by terraform" + owner = "account users" + }`, + }, Step{ + Template: ` + resource "databricks_schema" "this" { + name = "{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_sql_table" "this" { + name = "bar" + catalog_name = "main" + schema_name = databricks_schema.this.name + table_type = "MANAGED" + properties = { + that = "this" + something = "else2" + } + + column { + name = "id" + type = "bigint" + identity = "default" + } + column { + name = "name" + type = "string" + } + comment = "this table is managed by terraform..." + }`, + }) +} + func TestUcAccResourceSqlTable_External(t *testing.T) { UnityWorkspaceLevel(t, Step{ Template: ` From 9c2bf50e4fbfcc19774c38583a02745c727a8d20 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Tue, 1 Oct 2024 07:26:13 -0400 Subject: [PATCH 23/99] [Fix] Refactor `databricks_permissions` and allow the current user to set their own permissions (#3956) ## Changes In https://github.com/databricks/terraform-provider-databricks/commit/c441517af5ed3f2c6d793d64d5cf5d4e1ca0dc68, a check was added to prevent users from assigning any permissions for themselves in `databricks_permissions`. This unfortunately makes it impossible for users to legitimately assign themselves as the owner of certain resources, such as jobs, if they are currently owned by a different principal. This PR removes this unnecessary restriction. If the user requests to set permissions for an object in a way that is incompatible with the object, such as removing themselves as owner, the failure will be propagated from the backend to the user instead. This does not make any changes to the way the ownership ACLs are set up (e.g. for resources that require an owner, like jobs, if the Terraform user did not specify an owner, the provider will still set the current user as the owner). This PR also refactors the permissions resource substantially. The logic for implementing each resource type's permissions, including the field name, object type and resource-specific modifications, are all colocated with the resource's own definition. The type encapsulating this is called`resourcePermissions`. As a result, the control flow is easier to follow: * Read reads from the API, customizes the response in a resource-specific way, maps the response to the TF state representation, and stores, or marks as deleted if there are no permissions. * Create and Update read the desired permissions from ResourceData, perform some validation, perform resource-specific, then puts the update with an owner if not specified. * Delete resets the ACLs to only admins + resource-specific customizations. Customizations are defined in the permissions/read and permissions/update packages. For update, a mini expression language is defined to support conditional application of customizations. Lastly, this PR also migrates the resource to the Databricks SDK. Fixes #2407. ## Tests This PR adds integration test coverage for the `databricks_permissions` resource for nearly all supported resource types. I wasn't able to run the integration test for `authorization = "passwords"` because password-based auth is deprecated, nor for serving endpoints because of a likely race condition. Integration tests cover all permission levels, and all principal types. Included is special edge case testing for root directory and all registered models. - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [ ] using Go SDK --- docs/resources/permissions.md | 6 +- exporter/exporter_test.go | 48 +- exporter/importables.go | 4 +- exporter/importables_test.go | 5 +- internal/acceptance/permissions_test.go | 970 ++++++++-- permissions/entity/permissions_entity.go | 18 + permissions/permission_definitions.go | 731 ++++++++ permissions/read/customizers.go | 54 + permissions/resource_permissions.go | 540 ++---- permissions/resource_permissions_test.go | 2110 ++++++++++------------ permissions/update/customizers.go | 97 + 11 files changed, 2833 insertions(+), 1750 deletions(-) create mode 100644 permissions/entity/permissions_entity.go create mode 100644 permissions/permission_definitions.go create mode 100644 permissions/read/customizers.go create mode 100644 permissions/update/customizers.go diff --git a/docs/resources/permissions.md b/docs/resources/permissions.md index ef5e1c4de8..b47a43aba3 100644 --- a/docs/resources/permissions.md +++ b/docs/resources/permissions.md @@ -4,11 +4,11 @@ subcategory: "Security" # databricks_permissions Resource -This resource allows you to generically manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. It would guarantee that only _admins_, _authenticated principal_ and those declared within `access_control` blocks would have specified access. It is not possible to remove management rights from _admins_ group. +This resource allows you to generically manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspaces. It ensures that only _admins_, _authenticated principal_ and those declared within `access_control` blocks would have specified access. It is not possible to remove management rights from _admins_ group. --> **Note** Configuring this resource for an object will **OVERWRITE** any existing permissions of the same type unless imported, and changes made outside of Terraform will be reset unless the changes are also reflected in the configuration. +-> **Note** This resource is _authoritative_ for permissions on objects. Configuring this resource for an object will **OVERWRITE** any existing permissions of the same type unless imported, and changes made outside of Terraform will be reset. --> **Note** It is not possible to lower permissions for `admins` or your own user anywhere from `CAN_MANAGE` level, so Databricks Terraform Provider [removes](https://github.com/databricks/terraform-provider-databricks/blob/main/permissions/resource_permissions.go#L324-L332) those `access_control` blocks automatically. +-> **Note** It is not possible to lower permissions for `admins`, so Databricks Terraform Provider removes those `access_control` blocks automatically. -> **Note** If multiple permission levels are specified for an identity (e.g. `CAN_RESTART` and `CAN_MANAGE` for a cluster), only the highest level permission is returned and will cause permanent drift. diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index a027005090..43c6c10916 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -862,7 +862,7 @@ func TestImportingClusters(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/clusters/test1", + Resource: "/api/2.0/permissions/clusters/test1?", Response: getJSONObject("test-data/get-cluster-permissions-test1-response.json"), }, { @@ -913,7 +913,7 @@ func TestImportingClusters(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/clusters/test2", + Resource: "/api/2.0/permissions/clusters/test2?", Response: getJSONObject("test-data/get-cluster-permissions-test2-response.json"), }, { @@ -923,7 +923,7 @@ func TestImportingClusters(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/cluster-policies/123", + Resource: "/api/2.0/permissions/cluster-policies/123?", Response: getJSONObject("test-data/get-cluster-policy-permissions.json"), }, { @@ -949,7 +949,7 @@ func TestImportingClusters(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/clusters/awscluster", + Resource: "/api/2.0/permissions/clusters/awscluster?", Response: getJSONObject("test-data/get-cluster-permissions-awscluster-response.json"), }, { @@ -971,7 +971,7 @@ func TestImportingClusters(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/instance-pools/pool1", + Resource: "/api/2.0/permissions/instance-pools/pool1?", ReuseRequest: true, Response: getJSONObject("test-data/get-job-permissions-14.json"), }, @@ -1089,7 +1089,7 @@ func TestImportingJobs_JobList(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/jobs/14", + Resource: "/api/2.0/permissions/jobs/14?", Response: getJSONObject("test-data/get-job-permissions-14.json"), }, { @@ -1112,7 +1112,7 @@ func TestImportingJobs_JobList(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/instance-pools/pool1", + Resource: "/api/2.0/permissions/instance-pools/pool1?", ReuseRequest: true, Response: getJSONObject("test-data/get-job-permissions-14.json"), }, @@ -1202,7 +1202,7 @@ func TestImportingJobs_JobList(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/cluster-policies/123", + Resource: "/api/2.0/permissions/cluster-policies/123?", Response: getJSONObject("test-data/get-cluster-policy-permissions.json"), }, { @@ -1218,7 +1218,7 @@ func TestImportingJobs_JobList(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/instance-pools/pool1", + Resource: "/api/2.0/permissions/instance-pools/pool1?", ReuseRequest: true, Response: getJSONObject("test-data/get-job-permissions-14.json"), }, @@ -1307,7 +1307,7 @@ func TestImportingJobs_JobListMultiTask(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/jobs/14", + Resource: "/api/2.0/permissions/jobs/14?", Response: getJSONObject("test-data/get-job-permissions-14.json"), ReuseRequest: true, }, @@ -1331,7 +1331,7 @@ func TestImportingJobs_JobListMultiTask(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/instance-pools/pool1", + Resource: "/api/2.0/permissions/instance-pools/pool1?", ReuseRequest: true, Response: getJSONObject("test-data/get-job-permissions-14.json"), }, @@ -1470,7 +1470,7 @@ func TestImportingJobs_JobListMultiTask(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/cluster-policies/123", + Resource: "/api/2.0/permissions/cluster-policies/123?", Response: getJSONObject("test-data/get-cluster-policy-permissions.json"), }, { @@ -1486,7 +1486,7 @@ func TestImportingJobs_JobListMultiTask(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/instance-pools/pool1", + Resource: "/api/2.0/permissions/instance-pools/pool1?", ReuseRequest: true, Response: getJSONObject("test-data/get-job-permissions-14.json"), }, @@ -1777,7 +1777,7 @@ func TestImportingRepos(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/repos/121232342", + Resource: "/api/2.0/permissions/repos/121232342?", Response: getJSONObject("test-data/get-repo-permissions.json"), }, }, @@ -1902,7 +1902,7 @@ func TestImportingSqlObjects(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/directories/4451965692354143", + Resource: "/api/2.0/permissions/directories/4451965692354143?", Response: getJSONObject("test-data/get-directory-permissions.json"), }, { @@ -1933,7 +1933,7 @@ func TestImportingSqlObjects(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/sql/warehouses/f562046bc1272886", + Resource: "/api/2.0/permissions/sql/warehouses/f562046bc1272886?", Response: getJSONObject("test-data/get-sql-endpoint-permissions.json"), }, { @@ -1962,12 +1962,12 @@ func TestImportingSqlObjects(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/preview/sql/permissions/queries/16c4f969-eea0-4aad-8f82-03d79b078dcc", + Resource: "/api/2.0/permissions/sql/queries/16c4f969-eea0-4aad-8f82-03d79b078dcc?", Response: getJSONObject("test-data/get-sql-query-permissions.json"), }, { Method: "GET", - Resource: "/api/2.0/preview/sql/permissions/dashboards/9cb0c8f5-6262-4a1f-a741-2181de76028f", + Resource: "/api/2.0/permissions/dbsql-dashboards/9cb0c8f5-6262-4a1f-a741-2181de76028f?", Response: getJSONObject("test-data/get-sql-dashboard-permissions.json"), }, { @@ -1983,7 +1983,7 @@ func TestImportingSqlObjects(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/preview/sql/permissions/alerts/3cf91a42-6217-4f3c-a6f0-345d489051b9", + Resource: "/api/2.0/permissions/sql/alerts/3cf91a42-6217-4f3c-a6f0-345d489051b9?", Response: getJSONObject("test-data/get-sql-alert-permissions.json"), }, }, @@ -2039,7 +2039,7 @@ func TestImportingDLTPipelines(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/repos/123", + Resource: "/api/2.0/permissions/repos/123?", Response: getJSONObject("test-data/get-repo-permissions.json"), }, { @@ -2085,12 +2085,12 @@ func TestImportingDLTPipelines(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/pipelines/123", + Resource: "/api/2.0/permissions/pipelines/123?", Response: getJSONObject("test-data/get-pipeline-permissions.json"), }, { Method: "GET", - Resource: "/api/2.0/permissions/notebooks/123", + Resource: "/api/2.0/permissions/notebooks/123?", Response: getJSONObject("test-data/get-notebook-permissions.json"), }, { @@ -2169,7 +2169,7 @@ func TestImportingDLTPipelines(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/files/789", + Resource: "/api/2.0/permissions/files/789?", Response: getJSONObject("test-data/get-workspace-file-permissions.json"), }, }, @@ -2257,7 +2257,7 @@ func TestImportingDLTPipelinesMatchingOnly(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/pipelines/123", + Resource: "/api/2.0/permissions/pipelines/123?", Response: getJSONObject("test-data/get-pipeline-permissions.json"), }, { diff --git a/exporter/importables.go b/exporter/importables.go index d2cb8d0f36..5ea235c335 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -32,7 +32,7 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/jobs" "github.com/databricks/terraform-provider-databricks/mws" - "github.com/databricks/terraform-provider-databricks/permissions" + "github.com/databricks/terraform-provider-databricks/permissions/entity" tfpipelines "github.com/databricks/terraform-provider-databricks/pipelines" "github.com/databricks/terraform-provider-databricks/repos" tfsettings "github.com/databricks/terraform-provider-databricks/settings" @@ -1184,7 +1184,7 @@ var resourcesMap map[string]importable = map[string]importable{ return (r.Data.Get("access_control.#").(int) == 0) }, Import: func(ic *importContext, r *resource) error { - var permissions permissions.PermissionsEntity + var permissions entity.PermissionsEntity s := ic.Resources["databricks_permissions"].Schema common.DataToStructPointer(r.Data, s, &permissions) for _, ac := range permissions.AccessControlList { diff --git a/exporter/importables_test.go b/exporter/importables_test.go index 544322a745..6bea1a8cf0 100644 --- a/exporter/importables_test.go +++ b/exporter/importables_test.go @@ -25,6 +25,7 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/jobs" "github.com/databricks/terraform-provider-databricks/permissions" + "github.com/databricks/terraform-provider-databricks/permissions/entity" "github.com/databricks/terraform-provider-databricks/internal/providers/sdkv2" dlt_pipelines "github.com/databricks/terraform-provider-databricks/pipelines" @@ -220,8 +221,8 @@ func TestPermissions(t *testing.T) { assert.Equal(t, "abc", name) d.MarkNewResource() - err := common.StructToData(permissions.PermissionsEntity{ - AccessControlList: []permissions.AccessControlChange{ + err := common.StructToData(entity.PermissionsEntity{ + AccessControlList: []iam.AccessControlRequest{ { UserName: "a", }, diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index 5d803bd451..1386ee9db4 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -3,222 +3,836 @@ package acceptance import ( "context" "fmt" + "regexp" + "strconv" "testing" - "github.com/databricks/databricks-sdk-go/client" - "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/terraform-provider-databricks/common" - "github.com/databricks/terraform-provider-databricks/permissions" - - "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) -func TestAccDatabricksPermissionsResourceFullLifecycle(t *testing.T) { - randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - WorkspaceLevel(t, Step{ - Template: fmt.Sprintf(` - resource "databricks_notebook" "this" { - content_base64 = base64encode("# Databricks notebook source\nprint(1)") - path = "/Beginning/%[1]s/Init" - language = "PYTHON" +// +// databricks_permissions testing support +// + +type permissionSettings struct { + // Name of the SP or group. Must be quoted for a literal string, or can be a reference to another object. + ref string + // If true, the resource will not be created + skipCreation bool + permissionLevel string +} + +type makePermissionsConfig struct { + servicePrincipal []permissionSettings + group []permissionSettings + user []permissionSettings +} + +// Not used today, so this fails linting, but we can uncomment it if needed in the future. +// func servicePrincipalPermissions(permissionLevel ...string) func(*makePermissionsConfig) { +// return func(config *makePermissionsConfig) { +// config.servicePrincipal = simpleSettings(permissionLevel...) +// } +// } + +func groupPermissions(permissionLevel ...string) func(*makePermissionsConfig) { + return func(config *makePermissionsConfig) { + config.group = simpleSettings(permissionLevel...) + } +} + +func userPermissions(permissionLevel ...string) func(*makePermissionsConfig) { + return func(config *makePermissionsConfig) { + config.user = simpleSettings(permissionLevel...) + } +} + +func allPrincipalPermissions(permissionLevel ...string) func(*makePermissionsConfig) { + return func(config *makePermissionsConfig) { + config.servicePrincipal = append(config.servicePrincipal, simpleSettings(permissionLevel...)...) + config.group = append(config.group, simpleSettings(permissionLevel...)...) + config.user = append(config.user, simpleSettings(permissionLevel...)...) + } +} + +func currentPrincipalPermission(t *testing.T, permissionLevel string) func(*makePermissionsConfig) { + settings := permissionSettings{ + permissionLevel: permissionLevel, + ref: "data.databricks_current_user.me.user_name", + skipCreation: true, + } + return func(config *makePermissionsConfig) { + if isGcp(t) { + config.user = append(config.user, settings) + } else { + config.servicePrincipal = append(config.servicePrincipal, settings) } - resource "databricks_group" "first" { - display_name = "First %[1]s" + } +} + +func currentPrincipalType(t *testing.T) string { + if isGcp(t) { + return "user" + } + return "service_principal" +} + +func customPermission(name string, permissionSettings permissionSettings) func(*makePermissionsConfig) { + return func(config *makePermissionsConfig) { + switch name { + case "service_principal": + config.servicePrincipal = append(config.servicePrincipal, permissionSettings) + case "group": + config.group = append(config.group, permissionSettings) + case "user": + config.user = append(config.user, permissionSettings) + default: + panic(fmt.Sprintf("unknown permission type: %s", name)) } - resource "databricks_permissions" "dummy" { - notebook_path = databricks_notebook.this.id + } +} + +func simpleSettings(permissionLevel ...string) []permissionSettings { + var settings []permissionSettings + for _, level := range permissionLevel { + settings = append(settings, permissionSettings{permissionLevel: level}) + } + return settings +} + +func makePermissionsTestStage(idAttribute, idValue string, permissionOptions ...func(*makePermissionsConfig)) string { + config := makePermissionsConfig{} + for _, option := range permissionOptions { + option(&config) + } + var resources string + var accessControlBlocks string + addPermissions := func(permissionSettings []permissionSettings, resourceType, resourceNameAttribute, idAttribute, accessControlAttribute string, getName func(int) string) { + for i, permission := range permissionSettings { + if !permission.skipCreation { + resources += fmt.Sprintf(` + resource "%s" "_%d" { + %s = "permissions-%s" + }`, resourceType, i, resourceNameAttribute, getName(i)) + } + var name string + if permission.ref == "" { + name = fmt.Sprintf("%s._%d.%s", resourceType, i, idAttribute) + } else { + name = permission.ref + } + accessControlBlocks += fmt.Sprintf(` access_control { - group_name = databricks_group.first.display_name - permission_level = "CAN_MANAGE" + %s = %s + permission_level = "%s" + }`, accessControlAttribute, name, permission.permissionLevel) + } + } + addPermissions(config.servicePrincipal, "databricks_service_principal", "display_name", "application_id", "service_principal_name", func(i int) string { + return fmt.Sprintf("{var.STICKY_RANDOM}-%d", i) + }) + addPermissions(config.group, "databricks_group", "display_name", "display_name", "group_name", func(i int) string { + return fmt.Sprintf("{var.STICKY_RANDOM}-%d", i) + }) + addPermissions(config.user, "databricks_user", "user_name", "user_name", "user_name", func(i int) string { + return fmt.Sprintf("{var.STICKY_RANDOM}-%d@databricks.com", i) + }) + return fmt.Sprintf(` + data databricks_current_user me {} + %s + resource "databricks_permissions" "this" { + %s = %s + %s + } + `, resources, idAttribute, idValue, accessControlBlocks) +} + +func assertContainsPermission(t *testing.T, permissions *iam.ObjectPermissions, principalType, name string, permissionLevel iam.PermissionLevel) { + for _, acl := range permissions.AccessControlList { + switch principalType { + case "user": + if acl.UserName == name { + assert.Equal(t, permissionLevel, acl.AllPermissions[0].PermissionLevel) + return + } + case "service_principal": + if acl.ServicePrincipalName == name { + assert.Equal(t, permissionLevel, acl.AllPermissions[0].PermissionLevel) + return + } + case "group": + if acl.GroupName == name { + assert.Equal(t, permissionLevel, acl.AllPermissions[0].PermissionLevel) + return } - }`, randomName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("databricks_permissions.dummy", - "object_type", "notebook"), - resourceCheck("databricks_permissions.dummy", - func(ctx context.Context, client *common.DatabricksClient, id string) error { - permissions, err := permissions.NewPermissionsAPI(ctx, client).Read(id) - if err != nil { - return err - } - assert.GreaterOrEqual(t, len(permissions.AccessControlList), 1) - return nil - }), - ), - }, Step{ - Template: fmt.Sprintf(` - resource "databricks_notebook" "this" { - content_base64 = base64encode("# Databricks notebook source\nprint(1)") - path = "/Beginning/%[1]s/Init" - language = "PYTHON" } - resource "databricks_group" "first" { - display_name = "First %[1]s" + } + assert.Fail(t, fmt.Sprintf("permission not found for %s %s", principalType, name)) +} + +// +// databricks_permissions acceptance tests +// + +func TestAccPermissions_ClusterPolicy(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + policyTemplate := ` + resource "databricks_cluster_policy" "this" { + name = "{var.STICKY_RANDOM}" + definition = jsonencode({ + "spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL": { + "type": "fixed", + "value": "jdbc:sqlserver://" + } + }) + }` + + WorkspaceLevel(t, Step{ + Template: policyTemplate + makePermissionsTestStage("cluster_policy_id", "databricks_cluster_policy.this.id", groupPermissions("CAN_USE")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("cluster_policy_id", "databricks_cluster_policy.this.id", currentPrincipalPermission(t, "CAN_USE"), allPrincipalPermissions("CAN_USE")), + }) +} + +func TestAccPermissions_InstancePool(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + policyTemplate := ` + data "databricks_node_type" "smallest" { + local_disk = true + } + + resource "databricks_instance_pool" "this" { + instance_pool_name = "{var.STICKY_RANDOM}" + min_idle_instances = 0 + max_capacity = 1 + node_type_id = data.databricks_node_type.smallest.id + idle_instance_autotermination_minutes = 10 + }` + + WorkspaceLevel(t, Step{ + Template: policyTemplate + makePermissionsTestStage("instance_pool_id", "databricks_instance_pool.this.id", groupPermissions("CAN_ATTACH_TO")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("instance_pool_id", "databricks_instance_pool.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_ATTACH_TO", "CAN_MANAGE")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("instance_pool_id", "databricks_instance_pool.this.id", currentPrincipalPermission(t, "CAN_ATTACH_TO")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for instance-pool, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_Cluster(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + policyTemplate := ` + +data "databricks_spark_version" "latest" { +} + + resource "databricks_cluster" "this" { + cluster_name = "singlenode-{var.RANDOM}" + spark_version = data.databricks_spark_version.latest.id + instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" + num_workers = 0 + autotermination_minutes = 10 + spark_conf = { + "spark.databricks.cluster.profile" = "singleNode" + "spark.master" = "local[*]" } - resource "databricks_group" "second" { - display_name = "Second %[1]s" + custom_tags = { + "ResourceClass" = "SingleNode" } - resource "databricks_permissions" "dummy" { - notebook_path = databricks_notebook.this.id - access_control { - group_name = databricks_group.first.display_name - permission_level = "CAN_MANAGE" - } - access_control { - group_name = databricks_group.second.display_name - permission_level = "CAN_RUN" + }` + + WorkspaceLevel(t, Step{ + Template: policyTemplate + makePermissionsTestStage("cluster_id", "databricks_cluster.this.id", groupPermissions("CAN_ATTACH_TO")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("cluster_id", "databricks_cluster.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_ATTACH_TO", "CAN_RESTART", "CAN_MANAGE")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("cluster_id", "databricks_cluster.this.id", currentPrincipalPermission(t, "CAN_ATTACH_TO")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for cluster, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_Job(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` + resource "databricks_job" "this" { + name = "{var.STICKY_RANDOM}" + }` + WorkspaceLevel(t, Step{ + Template: template + makePermissionsTestStage("job_id", "databricks_job.this.id", groupPermissions("CAN_VIEW")), + }, Step{ + Template: template + makePermissionsTestStage("job_id", "databricks_job.this.id", currentPrincipalPermission(t, "IS_OWNER"), allPrincipalPermissions("CAN_VIEW", "CAN_MANAGE_RUN", "CAN_MANAGE")), + }, Step{ + Template: template + makePermissionsTestStage("job_id", "databricks_job.this.id", currentPrincipalPermission(t, "CAN_MANAGE_RUN")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for job, allowed levels: CAN_MANAGE, IS_OWNER"), + }, Step{ + Template: template + makePermissionsTestStage("job_id", "databricks_job.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), userPermissions("IS_OWNER")), + }, Step{ + Template: template, + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + jobId := s.RootModule().Resources["databricks_job.this"].Primary.ID + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "jobs", jobId) + assert.NoError(t, err) + idInt, err := strconv.Atoi(jobId) + assert.NoError(t, err) + job, err := w.Jobs.GetByJobId(context.Background(), int64(idInt)) + assert.NoError(t, err) + assertContainsPermission(t, permissions, currentPrincipalType(t), job.CreatorUserName, iam.PermissionLevelIsOwner) + return nil + }, + }) +} + +func TestAccPermissions_Pipeline(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + policyTemplate := ` + + locals { + name = "{var.STICKY_RANDOM}" + } + + resource "databricks_pipeline" "this" { + name = "${local.name}" + storage = "/test/${local.name}" + + library { + notebook { + path = databricks_notebook.this.path } - }`, randomName), - Check: resourceCheck("databricks_permissions.dummy", - func(ctx context.Context, client *common.DatabricksClient, id string) error { - permissions, err := permissions.NewPermissionsAPI(ctx, client).Read(id) - if err != nil { - return err - } - assert.GreaterOrEqual(t, len(permissions.AccessControlList), 2) - return nil - }), + } + continuous = false + }` + dltNotebookResource + + WorkspaceLevel(t, Step{ + Template: policyTemplate + makePermissionsTestStage("pipeline_id", "databricks_pipeline.this.id", groupPermissions("CAN_VIEW")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("pipeline_id", "databricks_pipeline.this.id", currentPrincipalPermission(t, "IS_OWNER"), allPrincipalPermissions("CAN_VIEW", "CAN_RUN", "CAN_MANAGE")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("pipeline_id", "databricks_pipeline.this.id", currentPrincipalPermission(t, "CAN_RUN")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for pipelines, allowed levels: CAN_MANAGE, IS_OWNER"), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("pipeline_id", "databricks_pipeline.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), userPermissions("IS_OWNER"), groupPermissions("CAN_VIEW", "CAN_RUN", "CAN_MANAGE")), + }, Step{ + Template: policyTemplate, + Check: resourceCheck("databricks_pipeline.this", func(ctx context.Context, c *common.DatabricksClient, id string) error { + w, err := c.WorkspaceClient() + assert.NoError(t, err) + pipeline, err := w.Pipelines.GetByPipelineId(context.Background(), id) + assert.NoError(t, err) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "pipelines", id) + assert.NoError(t, err) + assertContainsPermission(t, permissions, currentPrincipalType(t), pipeline.CreatorUserName, iam.PermissionLevelIsOwner) + return nil + }), + }) +} + +func TestAccPermissions_Notebook_Path(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + notebookTemplate := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + } + resource "databricks_notebook" "this" { + source = "{var.CWD}/../../storage/testdata/tf-test-python.py" + path = "${databricks_directory.this.path}/test_notebook" + }` + WorkspaceLevel(t, Step{ + Template: notebookTemplate + makePermissionsTestStage("notebook_path", "databricks_notebook.this.id", groupPermissions("CAN_RUN")), + }, Step{ + Template: notebookTemplate + makePermissionsTestStage("notebook_path", "databricks_notebook.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + // The current user can be removed from permissions since they inherit permissions from the directory they created. + Template: notebookTemplate + makePermissionsTestStage("notebook_path", "databricks_notebook.this.id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: notebookTemplate + makePermissionsTestStage("notebook_path", "databricks_notebook.this.id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for notebook, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_Notebook_Id(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + notebookTemplate := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + } + resource "databricks_notebook" "this" { + source = "{var.CWD}/../../storage/testdata/tf-test-python.py" + path = "${databricks_directory.this.path}/test_notebook" + }` + WorkspaceLevel(t, Step{ + Template: notebookTemplate + makePermissionsTestStage("notebook_id", "databricks_notebook.this.object_id", groupPermissions("CAN_RUN")), + }, Step{ + Template: notebookTemplate + makePermissionsTestStage("notebook_id", "databricks_notebook.this.object_id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + // The current user can be removed from permissions since they inherit permissions from the directory they created. + Template: notebookTemplate + makePermissionsTestStage("notebook_id", "databricks_notebook.this.object_id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: notebookTemplate + makePermissionsTestStage("notebook_id", "databricks_notebook.this.object_id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for notebook, allowed levels: CAN_MANAGE"), }) } -func TestAccDatabricksReposPermissionsResourceFullLifecycle(t *testing.T) { - randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) +func TestAccPermissions_Directory_Path(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + directoryTemplate := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + }` WorkspaceLevel(t, Step{ - Template: fmt.Sprintf(` + Template: directoryTemplate + makePermissionsTestStage("directory_path", "databricks_directory.this.id", groupPermissions("CAN_RUN")), + }, Step{ + Template: directoryTemplate + makePermissionsTestStage("directory_path", "databricks_directory.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + // The current user can be removed from permissions since they inherit permissions from the directory they created. + Template: directoryTemplate + makePermissionsTestStage("directory_path", "databricks_directory.this.id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: directoryTemplate + makePermissionsTestStage("directory_path", "databricks_directory.this.id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for directory, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_Directory_Id(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + directoryTemplate := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + }` + WorkspaceLevel(t, Step{ + Template: directoryTemplate + makePermissionsTestStage("directory_id", "databricks_directory.this.object_id", groupPermissions("CAN_RUN")), + }, Step{ + Template: directoryTemplate + makePermissionsTestStage("directory_id", "databricks_directory.this.object_id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + // The current user can be removed from permissions since they inherit permissions from the directory they created. + Template: directoryTemplate + makePermissionsTestStage("directory_id", "databricks_directory.this.object_id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: directoryTemplate + makePermissionsTestStage("directory_id", "databricks_directory.this.object_id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for directory, allowed levels: CAN_MANAGE"), + }) +} + +// This test exercises both by ID and by path permissions for the root directory. Testing them +// concurrently would result in a race condition. +func TestAccPermissions_Directory_RootDirectoryCorrectlyHandlesAdminUsers(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + expectedAclAfterDeletion := []iam.AccessControlResponse{ + { + GroupName: "admins", + AllPermissions: []iam.Permission{ + { + PermissionLevel: iam.PermissionLevelCanManage, + ForceSendFields: []string{"Inherited", "PermissionLevel"}, + }, + }, + ForceSendFields: []string{"GroupName"}, + }, + } + WorkspaceLevel(t, Step{ + Template: makePermissionsTestStage("directory_id", "\"0\"", groupPermissions("CAN_RUN")), + }, Step{ + Template: `data databricks_current_user me {}`, + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "directories", "0") + assert.NoError(t, err) + assert.Equal(t, expectedAclAfterDeletion, permissions.AccessControlList) + return nil + }, + }, Step{ + Template: makePermissionsTestStage("directory_path", "\"/\"", userPermissions("CAN_RUN")), + }, Step{ + Template: `data databricks_current_user me {}`, + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "directories", "0") + assert.NoError(t, err) + assert.Equal(t, expectedAclAfterDeletion, permissions.AccessControlList) + return nil + }, + }) +} + +func TestAccPermissions_WorkspaceFile_Path(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + workspaceFile := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + } + resource "databricks_workspace_file" "this" { + source = "{var.CWD}/../../storage/testdata/tf-test-python.py" + path = "${databricks_directory.this.path}/test_notebook" + }` + WorkspaceLevel(t, Step{ + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", groupPermissions("CAN_RUN")), + }, Step{ + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + // The current user can be removed from permissions since they inherit permissions from the directory they created. + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for file, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_WorkspaceFile_Id(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + workspaceFile := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + } + resource "databricks_workspace_file" "this" { + source = "{var.CWD}/../../storage/testdata/tf-test-python.py" + path = "${databricks_directory.this.path}/test_notebook" + }` + WorkspaceLevel(t, Step{ + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", groupPermissions("CAN_RUN")), + }, Step{ + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + // The current user can be removed from permissions since they inherit permissions from the directory they created. + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for file, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_Repo_Id(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` resource "databricks_repo" "this" { url = "https://github.com/databrickslabs/tempo.git" - path = "/Repos/terraform-tests/tempo-%[1]s" + path = "/Repos/terraform-tests/tempo-{var.STICKY_RANDOM}" } - resource "databricks_group" "first" { - display_name = "First %[1]s" - } - resource "databricks_group" "second" { - display_name = "Second %[1]s" + ` + WorkspaceLevel(t, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", groupPermissions("CAN_MANAGE", "CAN_READ")), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permissions.this", "object_type", "repo"), + func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + repoId := s.RootModule().Resources["databricks_repo.this"].Primary.ID + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "repos", repoId) + assert.NoError(t, err) + group1Name := s.RootModule().Resources["databricks_group._0"].Primary.Attributes["display_name"] + assertContainsPermission(t, permissions, "group", group1Name, iam.PermissionLevelCanManage) + group2Name := s.RootModule().Resources["databricks_group._1"].Primary.Attributes["display_name"] + assertContainsPermission(t, permissions, "group", group2Name, iam.PermissionLevelCanRead) + return nil + }, + ), + }, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_READ", "CAN_MANAGE", "CAN_RUN", "CAN_EDIT")), + }, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", allPrincipalPermissions("CAN_READ", "CAN_MANAGE", "CAN_RUN", "CAN_EDIT")), + }, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for repo, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_Repo_Path(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` + resource "databricks_repo" "this" { + url = "https://github.com/databrickslabs/tempo.git" + path = "/Repos/terraform-tests/tempo-{var.STICKY_RANDOM}" } - resource "databricks_permissions" "dummy" { - repo_path = databricks_repo.this.path - access_control { - group_name = databricks_group.first.display_name - permission_level = "CAN_MANAGE" - } - access_control { - group_name = databricks_group.second.display_name - permission_level = "CAN_RUN" - } - }`, randomName), + ` + WorkspaceLevel(t, Step{ + Template: template + makePermissionsTestStage("repo_path", "databricks_repo.this.path", groupPermissions("CAN_MANAGE", "CAN_RUN")), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("databricks_permissions.dummy", - "object_type", "repo"), - resourceCheck("databricks_permissions.dummy", - func(ctx context.Context, client *common.DatabricksClient, id string) error { - permissions, err := permissions.NewPermissionsAPI(ctx, client).Read(id) - if err != nil { - return err - } - assert.GreaterOrEqual(t, len(permissions.AccessControlList), 2) - return nil - }), + resource.TestCheckResourceAttr("databricks_permissions.this", "object_type", "repo"), + func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + repoId := s.RootModule().Resources["databricks_repo.this"].Primary.ID + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "repos", repoId) + assert.NoError(t, err) + group1Name := s.RootModule().Resources["databricks_group._0"].Primary.Attributes["display_name"] + assertContainsPermission(t, permissions, "group", group1Name, iam.PermissionLevelCanManage) + group2Name := s.RootModule().Resources["databricks_group._1"].Primary.Attributes["display_name"] + assertContainsPermission(t, permissions, "group", group2Name, iam.PermissionLevelCanRun) + return nil + }, ), + }, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_READ", "CAN_MANAGE", "CAN_RUN", "CAN_EDIT")), + }, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", allPrincipalPermissions("CAN_READ", "CAN_MANAGE", "CAN_RUN", "CAN_EDIT")), + }, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for repo, allowed levels: CAN_MANAGE"), }) } -func TestAccDatabricksPermissionsForSqlWarehouses(t *testing.T) { - // Random string to annotate newly created groups - randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) +func TestAccPermissions_Authorization_Passwords(t *testing.T) { + skipf(t)("ACLs for passwords are disabled on testing workspaces") + loadDebugEnvIfRunsFromIDE(t, "workspace") + WorkspaceLevel(t, Step{ + Template: makePermissionsTestStage("authorization", "\"passwords\"", groupPermissions("CAN_USE")), + }, Step{ + Template: makePermissionsTestStage("authorization", "\"passwords\"", customPermission("group", permissionSettings{ref: `"admins"`, skipCreation: true, permissionLevel: "CAN_USE"})), + }) +} - // Create a client to query the permissions API - c, err := client.New(&config.Config{}) - require.NoError(t, err) - permissionsClient := permissions.NewPermissionsAPI(context.Background(), &common.DatabricksClient{DatabricksClient: c}) +func TestAccPermissions_Authorization_Tokens(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + WorkspaceLevel(t, Step{ + Template: makePermissionsTestStage("authorization", "\"tokens\"", groupPermissions("CAN_USE")), + }, Step{ + Template: makePermissionsTestStage("authorization", "\"tokens\"", customPermission("group", permissionSettings{ref: `"users"`, skipCreation: true, permissionLevel: "CAN_USE"})), + }, Step{ + // Template needs to be non-empty + Template: "data databricks_current_user me {}", + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "authorization", "tokens") + assert.NoError(t, err) + assert.Len(t, permissions.AccessControlList, 1) + assert.Equal(t, iam.AccessControlResponse{ + GroupName: "admins", + AllPermissions: []iam.Permission{ + { + PermissionLevel: iam.PermissionLevelCanManage, + ForceSendFields: []string{"Inherited", "PermissionLevel"}, + }, + }, + ForceSendFields: []string{"GroupName"}, + }, permissions.AccessControlList[0]) + return nil + }, + }) +} + +func TestAccPermissions_SqlWarehouses(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + sqlWarehouseTemplate := ` + resource "databricks_sql_endpoint" "this" { + name = "{var.STICKY_RANDOM}" + cluster_size = "2X-Small" + }` + WorkspaceLevel(t, Step{ + Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", groupPermissions("CAN_USE")), + }, Step{ + Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", currentPrincipalPermission(t, "IS_OWNER"), allPrincipalPermissions("CAN_USE", "CAN_MANAGE", "CAN_MONITOR")), + // Note: ideally we could test making a new user/SP the owner of the warehouse, but the new user + // needs cluster creation permissions, and the SCIM API doesn't provide get-after-put consistency, + // so this would introduce flakiness. + // }, Step{ + // Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), servicePrincipalPermissions("IS_OWNER")) + ` + // resource databricks_entitlements "this" { + // application_id = databricks_service_principal._0.application_id + // allow_cluster_create = true + // } + // `, + }, Step{ + Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", currentPrincipalPermission(t, "CAN_USE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for warehouses, allowed levels: CAN_MANAGE, IS_OWNER"), + }, Step{ + Template: sqlWarehouseTemplate, + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + id := s.RootModule().Resources["databricks_sql_endpoint.this"].Primary.ID + warehouse, err := w.Warehouses.GetById(context.Background(), id) + assert.NoError(t, err) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "warehouses", id) + assert.NoError(t, err) + assertContainsPermission(t, permissions, currentPrincipalType(t), warehouse.CreatorName, iam.PermissionLevelIsOwner) + return nil + }, + }) +} - // Validates export attribute "object_type" for the permissions resource - // is set to warehouses - checkObjectType := resource.TestCheckResourceAttr("databricks_permissions.this", - "object_type", "warehouses") +func TestAccPermissions_SqlDashboard(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + dashboardTemplate := ` + resource "databricks_sql_dashboard" "this" { + name = "{var.STICKY_RANDOM}" + }` + WorkspaceLevel(t, Step{ + Template: dashboardTemplate + makePermissionsTestStage("sql_dashboard_id", "databricks_sql_dashboard.this.id", groupPermissions("CAN_VIEW")), + }, Step{ + Template: dashboardTemplate + makePermissionsTestStage("sql_dashboard_id", "databricks_sql_dashboard.this.id", currentPrincipalPermission(t, "CAN_VIEW")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for dashboard, allowed levels: CAN_MANAGE"), + }, Step{ + Template: dashboardTemplate + makePermissionsTestStage("sql_dashboard_id", "databricks_sql_dashboard.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_VIEW", "CAN_READ", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + }) +} - // Asserts value of a permission level for a group - assertPermissionLevel := func(t *testing.T, permissionId, groupName, permissionLevel string) { - // Query permissions on warehouse - warehousePermissions, err := permissionsClient.Read(permissionId) - require.NoError(t, err) +func TestAccPermissions_SqlAlert(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + alertTemplate := ` + resource "databricks_sql_query" "this" { + name = "{var.STICKY_RANDOM}-query" + query = "SELECT 1 AS p1, 2 as p2" + data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" + } + resource "databricks_sql_alert" "this" { + name = "{var.STICKY_RANDOM}-alert" + query_id = databricks_sql_query.this.id + options { + column = "p1" + op = ">=" + value = "3" + muted = false + } + }` + WorkspaceLevel(t, Step{ + Template: alertTemplate + makePermissionsTestStage("sql_alert_id", "databricks_sql_alert.this.id", groupPermissions("CAN_VIEW")), + }, Step{ + Template: alertTemplate + makePermissionsTestStage("sql_alert_id", "databricks_sql_alert.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_VIEW", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + }, Step{ + Template: alertTemplate + makePermissionsTestStage("sql_alert_id", "databricks_sql_alert.this.id", currentPrincipalPermission(t, "CAN_VIEW"), groupPermissions("CAN_VIEW", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for alert, allowed levels: CAN_MANAGE"), + }) +} - // Assert expected permission level is present - assert.Contains(t, warehousePermissions.AccessControlList, permissions.AccessControl{ - GroupName: groupName, - AllPermissions: []permissions.Permission{ - { - PermissionLevel: permissionLevel, - }, - }, - }) - } +func TestAccPermissions_SqlQuery(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + queryTemplate := ` + resource "databricks_sql_query" "this" { + name = "{var.STICKY_RANDOM}-query" + query = "SELECT 1 AS p1, 2 as p2" + data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" + }` + WorkspaceLevel(t, Step{ + Template: queryTemplate + makePermissionsTestStage("sql_query_id", "databricks_sql_query.this.id", groupPermissions("CAN_VIEW")), + }, Step{ + Template: queryTemplate + makePermissionsTestStage("sql_query_id", "databricks_sql_query.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_VIEW", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + }, Step{ + Template: queryTemplate + makePermissionsTestStage("sql_query_id", "databricks_sql_query.this.id", currentPrincipalPermission(t, "CAN_VIEW"), groupPermissions("CAN_VIEW", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for query, allowed levels: CAN_MANAGE"), + }) +} - // Get permission ID from the terraform state - getPermissionId := func(s *terraform.State) string { - resourcePermission, ok := s.RootModule().Resources["databricks_permissions.this"] - require.True(t, ok, "could not find permissions resource: databricks_permissions.this") - return resourcePermission.Primary.ID - } +func TestAccPermissions_Dashboard(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + dashboardTemplate := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + } + resource "databricks_dashboard" "dashboard" { + display_name = "TF New Dashboard" + warehouse_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" + parent_path = databricks_directory.this.path + } + ` + WorkspaceLevel(t, Step{ + Template: dashboardTemplate + makePermissionsTestStage("dashboard_id", "databricks_dashboard.dashboard.id", groupPermissions("CAN_READ")), + }, Step{ + Template: dashboardTemplate + makePermissionsTestStage("dashboard_id", "databricks_dashboard.dashboard.id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + }, Step{ + Template: dashboardTemplate + makePermissionsTestStage("dashboard_id", "databricks_dashboard.dashboard.id", currentPrincipalPermission(t, "CAN_READ"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for dashboard, allowed levels: CAN_MANAGE"), + }) +} - // Configuration for step 1 of the test. Create a databricks_permissions - // resources, assigning a group CAN_MANAGE permission to the warehouse. - config1 := fmt.Sprintf(` - resource "databricks_group" "one" { - display_name = "test-warehouse-permission-one-%s" - } - resource "databricks_permissions" "this" { - sql_endpoint_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" - access_control { - group_name = databricks_group.one.display_name - permission_level = "CAN_MANAGE" +func TestAccPermissions_Experiment(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + experimentTemplate := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" } - }`, randomName) + resource "databricks_mlflow_experiment" "this" { + name = "${databricks_directory.this.path}/experiment" + }` + WorkspaceLevel(t, Step{ + Template: experimentTemplate + makePermissionsTestStage("experiment_id", "databricks_mlflow_experiment.this.id", groupPermissions("CAN_READ")), + }, Step{ + Template: experimentTemplate + makePermissionsTestStage("experiment_id", "databricks_mlflow_experiment.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: experimentTemplate + makePermissionsTestStage("experiment_id", "databricks_mlflow_experiment.this.id", currentPrincipalPermission(t, "CAN_READ"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for mlflowExperiment, allowed levels: CAN_MANAGE"), + }) +} - // Configuration for step 2 of the test. Create another group and update - // permissions to CAN_USE for the second group - config2 := fmt.Sprintf(` - resource "databricks_group" "one" { - display_name = "test-warehouse-permission-one-%[1]s" - } - resource "databricks_group" "two" { - display_name = "test-warehouse-permission-two-%[1]s" +func TestAccPermissions_RegisteredModel(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + modelTemplate := ` + resource "databricks_mlflow_model" "m1" { + name = "tf-{var.STICKY_RANDOM}" + description = "tf-{var.STICKY_RANDOM} description" } - resource "databricks_permissions" "this" { - sql_endpoint_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" - access_control { - group_name = databricks_group.one.display_name - permission_level = "CAN_MANAGE" - } - access_control { - group_name = databricks_group.two.display_name - permission_level = "CAN_USE" - } - }`, randomName) - - WorkspaceLevel(t, - Step{ - Template: config1, - Check: resource.ComposeTestCheckFunc( - checkObjectType, - func(s *terraform.State) error { - id := getPermissionId(s) - assertPermissionLevel(t, id, "test-warehouse-permission-one-"+randomName, "CAN_MANAGE") - return nil + ` + WorkspaceLevel(t, Step{ + Template: modelTemplate + makePermissionsTestStage("registered_model_id", "databricks_mlflow_model.m1.registered_model_id", groupPermissions("CAN_READ")), + }, Step{ + Template: modelTemplate + makePermissionsTestStage("registered_model_id", "databricks_mlflow_model.m1.registered_model_id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_MANAGE_STAGING_VERSIONS", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE")), + }, Step{ + Template: modelTemplate + makePermissionsTestStage("registered_model_id", "databricks_mlflow_model.m1.registered_model_id", currentPrincipalPermission(t, "CAN_READ"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_MANAGE_STAGING_VERSIONS", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for registered-model, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_RegisteredModel_Root(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + WorkspaceLevel(t, Step{ + Template: makePermissionsTestStage("registered_model_id", "\"root\"", groupPermissions("CAN_READ")), + }, Step{ + Template: makePermissionsTestStage("registered_model_id", "\"root\"", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_MANAGE_STAGING_VERSIONS", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE")), + }, Step{ + Template: makePermissionsTestStage("registered_model_id", "\"root\"", currentPrincipalPermission(t, "CAN_READ"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_MANAGE_STAGING_VERSIONS", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for registered-model, allowed levels: CAN_MANAGE"), + }, Step{ + Template: "data databricks_current_user me {}", + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "registered-models", "root") + assert.NoError(t, err) + assert.Len(t, permissions.AccessControlList, 1) + assert.Equal(t, iam.AccessControlResponse{ + GroupName: "admins", + AllPermissions: []iam.Permission{ + { + PermissionLevel: iam.PermissionLevelCanManage, + ForceSendFields: []string{"Inherited", "PermissionLevel"}, + }, }, - ), + ForceSendFields: []string{"GroupName"}, + }, permissions.AccessControlList[0]) + return nil }, - Step{ - Template: config2, - Check: func(s *terraform.State) error { - id := getPermissionId(s) - assertPermissionLevel(t, id, "test-warehouse-permission-one-"+randomName, "CAN_MANAGE") - assertPermissionLevel(t, id, "test-warehouse-permission-two-"+randomName, "CAN_USE") - return nil - }, - }, - ) + }) +} + +func TestAccPermissions_ServingEndpoint(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + if isGcp(t) { + skipf(t)("Serving endpoints are not supported on GCP") + } + endpointTemplate := ` + resource "databricks_model_serving" "endpoint" { + name = "{var.STICKY_RANDOM}" + config { + served_models { + name = "prod_model" + model_name = "experiment-fixture-model" + model_version = "1" + workload_size = "Small" + scale_to_zero_enabled = true + } + traffic_config { + routes { + served_model_name = "prod_model" + traffic_percentage = 100 + } + } + } + }` + WorkspaceLevel(t, Step{ + Template: endpointTemplate + makePermissionsTestStage("serving_endpoint_id", "databricks_model_serving.endpoint.serving_endpoint_id", groupPermissions("CAN_VIEW")), + // Updating a serving endpoint seems to be flaky, so we'll only test that we can't remove management permissions for the current user. + // }, Step{ + // Template: endpointTemplate + makePermissionsTestStage("serving_endpoint_id", "databricks_model_serving.endpoint.id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_VIEW", "CAN_QUERY", "CAN_MANAGE")), + }, Step{ + Template: endpointTemplate + makePermissionsTestStage("serving_endpoint_id", "databricks_model_serving.endpoint.serving_endpoint_id", currentPrincipalPermission(t, "CAN_VIEW"), groupPermissions("CAN_VIEW", "CAN_QUERY", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for serving-endpoint, allowed levels: CAN_MANAGE"), + }) } diff --git a/permissions/entity/permissions_entity.go b/permissions/entity/permissions_entity.go new file mode 100644 index 0000000000..e8c1f4b067 --- /dev/null +++ b/permissions/entity/permissions_entity.go @@ -0,0 +1,18 @@ +package entity + +import "github.com/databricks/databricks-sdk-go/service/iam" + +// PermissionsEntity is the one used for resource metadata +type PermissionsEntity struct { + ObjectType string `json:"object_type,omitempty" tf:"computed"` + AccessControlList []iam.AccessControlRequest `json:"access_control" tf:"slice_set"` +} + +func (p PermissionsEntity) ContainsUserOrServicePrincipal(name string) bool { + for _, ac := range p.AccessControlList { + if ac.UserName == name || ac.ServicePrincipalName == name { + return true + } + } + return false +} diff --git a/permissions/permission_definitions.go b/permissions/permission_definitions.go new file mode 100644 index 0000000000..fbc9158517 --- /dev/null +++ b/permissions/permission_definitions.go @@ -0,0 +1,731 @@ +package permissions + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/databricks/terraform-provider-databricks/permissions/entity" + "github.com/databricks/terraform-provider-databricks/permissions/read" + "github.com/databricks/terraform-provider-databricks/permissions/update" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// resourcePermissions captures all the information needed to manage permissions for a given object type. +type resourcePermissions struct { + // Mandatory Fields + + // The attribute name that users configure with the ID of the object to manage + // e.g. "cluster_id" for a cluster + field string + // The object type to use in the Permissions API, e.g. "cluster" for a cluster. + objectType string + // The name of the object in the ID of the TF resource, e.g. "clusters" for a cluster, + // where the ID would be /clusters/. This should also match the prefix of the + // object ID in the API response, unless idMatcher is set. + requestObjectType string + // The allowed permission levels for this object type and its options. + allowedPermissionLevels map[string]permissionLevelOptions + + // ID Remapping Options + + // Returns the object ID for the given user-specified ID. This is necessary because permissions for + // some objects are done by path, whereas others are by ID. Those by path need to be converted to the + // internal object ID before being stored in the state. If not specified, the default ID is "//". + idRetriever func(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) + // By default, a resourcePermissions can be retrieved based on the structure of the ID, as described above. + // If this function is set, it will be used to determine whether the ID matches this resource type. + idMatcher func(id string) bool + // A custom matcher to check whether a given ID matches this resource type. + // Most resources can be determined by looking at the attribute name used to configure the permission, but + // tokens & passwords are special cases where the resource type is determined by the value of this attribute. + stateMatcher func(id string) bool + + // Behavior Options and Customizations + + // The alternative name of the "path" attribute for this resource. E.g. "workspace_file_path" for a file. + // If not set, default is "_path". + pathVariant string + // If true, the provider will allow the user to configure the "admins" group for this resource type. Otherwise, + // validation will fail if the user tries to configure the "admins" group, and admin configurations in API + // responses will be ignored. This should only be set to true for the "authorization = passwords" resource. + allowConfiguringAdmins bool + // Customizers when handling permission resource creation and update. + // + // Most resources that have a CAN_MANAGE permission level should add update.AddCurrentUserAsManage to this list + // to ensure that the user applying the template always has management permissions on the underlying resource. + updateAclCustomizers []update.ACLCustomizer + // Customizers when handling permission resource deletion. + // + // Most resources that have a CAN_MANAGE permission level should add update.AddCurrentUserAsManage to this list + // to ensure that the user applying the template always has management permissions on the underlying resource. + deleteAclCustomizers []update.ACLCustomizer + // Customizers when handling permission resource read. + // + // Resources for which admins inherit permissions should add removeAdminPermissionsCustomizer to this list. This + // prevents the admin group from being included in the permissions when reading the state. + readAclCustomizers []read.ACLCustomizer + + // Returns the creator of the object. Used when deleting databricks_permissions resources, when the + // creator of the object is restored as the owner. + fetchObjectCreator func(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (string, error) +} + +// getAllowedPermissionLevels returns the list of permission levels that are allowed for this resource type. +func (p resourcePermissions) getAllowedPermissionLevels(includeNonManagementPermissions bool) []string { + levels := make([]string, 0, len(p.allowedPermissionLevels)) + for level := range p.allowedPermissionLevels { + if includeNonManagementPermissions || p.allowedPermissionLevels[level].isManagementPermission { + levels = append(levels, level) + } + } + sort.Strings(levels) + return levels +} + +// resourceStatus captures the status of a resource with permissions. If the resource doesn't exist, +// the provider will not try to update its permissions. Otherwise, the creator will be returned if +// it can be determined for the given resource type. +type resourceStatus struct { + exists bool + creator string +} + +// getObjectStatus returns the creator of the object and whether the object exists. If the object creator cannot be determined for this +// resource type, an empty string is returned. Resources without fetchObjectCreator are assumed to exist and have an unknown creator. +func (p resourcePermissions) getObjectStatus(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (resourceStatus, error) { + if p.fetchObjectCreator != nil { + creator, err := p.fetchObjectCreator(ctx, w, objectID) + if err != nil { + return resourceStatus{}, err + } + return resourceStatus{exists: creator != "", creator: creator}, nil + } + return resourceStatus{exists: true, creator: ""}, nil +} + +// getPathVariant returns the name of the path attribute for this resource type. +func (p resourcePermissions) getPathVariant() string { + if p.pathVariant != "" { + return p.pathVariant + } + return p.objectType + "_path" +} + +// validate checks that the user is not trying to set permissions for the admin group or remove their own management permissions. +func (p resourcePermissions) validate(ctx context.Context, entity entity.PermissionsEntity, currentUsername string) error { + for _, change := range entity.AccessControlList { + // Prevent users from setting permissions for admins. + if change.GroupName == "admins" && !p.allowConfiguringAdmins { + return fmt.Errorf("it is not possible to modify admin permissions for %s resources", p.objectType) + } + // Check that the user is preventing themselves from managing the object + level := p.allowedPermissionLevels[string(change.PermissionLevel)] + if (change.UserName == currentUsername || change.ServicePrincipalName == currentUsername) && !level.isManagementPermission { + allowedLevelsForCurrentUser := p.getAllowedPermissionLevels(false) + return fmt.Errorf("cannot remove management permissions for the current user for %s, allowed levels: %s", p.objectType, strings.Join(allowedLevelsForCurrentUser, ", ")) + } + + if level.deprecated != "" { + tflog.Debug(ctx, fmt.Sprintf("the permission level %s for %s is deprecated: %s", change.PermissionLevel, p.objectType, level.deprecated)) + } + } + return nil +} + +// getID returns the object ID for the given user-specified ID. +func (p resourcePermissions) getID(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) { + var err error + if p.idRetriever != nil { + id, err = p.idRetriever(ctx, w, id) + if err != nil { + return "", err + } + } + return fmt.Sprintf("/%s/%s", p.requestObjectType, id), nil +} + +// prepareForUpdate prepares the access control list for an update request by calling all update customizers. +func (p resourcePermissions) prepareForUpdate(objectID string, e entity.PermissionsEntity, currentUser string) (entity.PermissionsEntity, error) { + cachedCurrentUser := func() (string, error) { return currentUser, nil } + ctx := update.ACLCustomizerContext{ + GetCurrentUser: cachedCurrentUser, + GetId: func() string { return objectID }, + } + var err error + for _, customizer := range p.updateAclCustomizers { + e.AccessControlList, err = customizer(ctx, e.AccessControlList) + if err != nil { + return entity.PermissionsEntity{}, err + } + } + return e, nil +} + +// prepareForDelete prepares the access control list for a delete request by calling all delete customizers. +func (p resourcePermissions) prepareForDelete(objectACL *iam.ObjectPermissions, getCurrentUser func() (string, error)) ([]iam.AccessControlRequest, error) { + accl := make([]iam.AccessControlRequest, 0, len(objectACL.AccessControlList)) + // By default, only admins have access to a resource when databricks_permissions for that resource are deleted. + for _, acl := range objectACL.AccessControlList { + if acl.GroupName != "admins" { + continue + } + for _, permission := range acl.AllPermissions { + if !permission.Inherited { + // keep everything direct for admin group + accl = append(accl, iam.AccessControlRequest{ + GroupName: acl.GroupName, + PermissionLevel: permission.PermissionLevel, + }) + break + } + } + } + ctx := update.ACLCustomizerContext{ + GetCurrentUser: getCurrentUser, + GetId: func() string { return objectACL.ObjectId }, + } + var err error + for _, customizer := range p.deleteAclCustomizers { + accl, err = customizer(ctx, accl) + if err != nil { + return nil, err + } + } + return accl, nil +} + +// prepareResponse prepares the access control list for a read response by calling all read customizers. +// +// If the user does not include an access_control block for themselves, it will not be included in the state. This +// prevents diffs when the applying user is not included in the access_control block for the resource but is +// added by update.AddCurrentUserAsManage. +// +// Read customizers are able to access the current state of the object in order to customize the response accordingly. +// For example, the SQL API previously used CAN_VIEW for read-only permission, but the GA API uses CAN_READ. Users may +// have CAN_VIEW in their resource configuration, so the read customizer will rewrite the response from CAN_READ to +// CAN_VIEW to match the user's configuration. +func (p resourcePermissions) prepareResponse(objectID string, objectACL *iam.ObjectPermissions, existing entity.PermissionsEntity, me string) (entity.PermissionsEntity, error) { + ctx := read.ACLCustomizerContext{ + GetId: func() string { return objectID }, + GetExistingPermissionsEntity: func() entity.PermissionsEntity { return existing }, + } + acl := *objectACL + for _, customizer := range p.readAclCustomizers { + acl = customizer(ctx, acl) + } + if acl.ObjectType != p.objectType { + return entity.PermissionsEntity{}, fmt.Errorf("expected object type %s, got %s", p.objectType, objectACL.ObjectType) + } + entity := entity.PermissionsEntity{} + for _, accessControl := range acl.AccessControlList { + // If the user doesn't include an access_control block for themselves, do not include it in the state. + // On create/update, the provider will automatically include the current user in the access_control block + // for appropriate resources. Otherwise, it must be included in state to prevent configuration drift. + if me == accessControl.UserName || me == accessControl.ServicePrincipalName { + if !existing.ContainsUserOrServicePrincipal(me) { + continue + } + } + // Skip admin permissions for resources where users are not allowed to explicitly configure them. + if accessControl.GroupName == "admins" && !p.allowConfiguringAdmins { + continue + } + for _, permission := range accessControl.AllPermissions { + // Inherited permissions can be ignored, as they are not set by the user. + if permission.Inherited { + continue + } + entity.AccessControlList = append(entity.AccessControlList, iam.AccessControlRequest{ + GroupName: accessControl.GroupName, + UserName: accessControl.UserName, + ServicePrincipalName: accessControl.ServicePrincipalName, + PermissionLevel: permission.PermissionLevel, + }) + } + } + return entity, nil +} + +// addOwnerPermissionIfNeeded adds the owner permission to the object ACL if the owner permission is allowed and not already set. +func (p resourcePermissions) addOwnerPermissionIfNeeded(objectACL []iam.AccessControlRequest, ownerOpt string) []iam.AccessControlRequest { + _, ok := p.allowedPermissionLevels["IS_OWNER"] + if !ok { + return objectACL + } + + for _, acl := range objectACL { + if acl.PermissionLevel == "IS_OWNER" { + return objectACL + } + } + + return append(objectACL, iam.AccessControlRequest{ + UserName: ownerOpt, + PermissionLevel: "IS_OWNER", + }) +} + +// permissionLevelOptions indicates the properties of a permissions level. Today, the only property +// is whether the current user can set the permission level for themselves. +type permissionLevelOptions struct { + // Whether users with this permission level are allowed to manage the resource. + // For some resources where ACLs don't define who can manage the resource, this might be unintuitive, + // e.g. all cluster policies permissions are considered management permissions because cluster policy + // ACLs don't define who can manage the cluster policy. + isManagementPermission bool + + // If non-empty, the permission level is deprecated. The string is a message to display to the user when + // this permission level is used. + deprecated string +} + +func getResourcePermissionsFromId(id string) (resourcePermissions, error) { + idParts := strings.Split(id, "/") + objectType := strings.Join(idParts[1:len(idParts)-1], "/") + for _, mapping := range allResourcePermissions() { + if mapping.idMatcher != nil { + if mapping.idMatcher(id) { + return mapping, nil + } + continue + } + if mapping.requestObjectType == objectType { + return mapping, nil + } + } + return resourcePermissions{}, fmt.Errorf("resource type for %s not found", id) +} + +// getResourcePermissionsFromState returns the resourcePermissions for the given state. +func getResourcePermissionsFromState(d interface{ GetOk(string) (any, bool) }) (resourcePermissions, string, error) { + allPermissions := allResourcePermissions() + for _, mapping := range allPermissions { + if v, ok := d.GetOk(mapping.field); ok { + id := v.(string) + if mapping.stateMatcher != nil && !mapping.stateMatcher(id) { + continue + } + return mapping, id, nil + } + } + allFields := make([]string, 0, len(allPermissions)) + seen := make(map[string]struct{}) + for _, mapping := range allPermissions { + if _, ok := seen[mapping.field]; ok { + continue + } + seen[mapping.field] = struct{}{} + allFields = append(allFields, mapping.field) + } + sort.Strings(allFields) + return resourcePermissions{}, "", fmt.Errorf("at least one type of resource identifier must be set; allowed fields: %s", strings.Join(allFields, ", ")) +} + +// getResourcePermissionsForObjectAcl returns the resourcePermissions for the given ObjectAclApiResponse. +// allResourcePermissions is the list of all resource types that can be managed by the databricks_permissions resource. +func allResourcePermissions() []resourcePermissions { + PATH := func(ctx context.Context, w *databricks.WorkspaceClient, path string) (string, error) { + info, err := w.Workspace.GetStatusByPath(ctx, path) + if err != nil { + return "", fmt.Errorf("cannot load path %s: %s", path, err) + } + return strconv.FormatInt(info.ObjectId, 10), nil + } + rewriteCanViewToCanRead := update.RewritePermissions(map[iam.PermissionLevel]iam.PermissionLevel{ + iam.PermissionLevelCanView: iam.PermissionLevelCanRead, + }) + rewriteCanReadToCanView := read.RewritePermissions(map[iam.PermissionLevel]iam.PermissionLevel{ + iam.PermissionLevelCanRead: iam.PermissionLevelCanView, + }) + return []resourcePermissions{ + { + field: "cluster_policy_id", + objectType: "cluster-policy", + requestObjectType: "cluster-policies", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_USE": {isManagementPermission: true}, + }, + }, + { + field: "instance_pool_id", + objectType: "instance-pool", + requestObjectType: "instance-pools", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_ATTACH_TO": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + deleteAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + }, + { + field: "cluster_id", + objectType: "cluster", + requestObjectType: "clusters", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_ATTACH_TO": {isManagementPermission: false}, + "CAN_RESTART": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + deleteAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + }, + { + field: "pipeline_id", + objectType: "pipelines", + requestObjectType: "pipelines", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_VIEW": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + "IS_OWNER": {isManagementPermission: true}, + }, + fetchObjectCreator: func(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (string, error) { + pipeline, err := w.Pipelines.GetByPipelineId(ctx, strings.ReplaceAll(objectID, "/pipelines/", "")) + if err != nil { + return "", common.IgnoreNotFoundError(err) + } + return pipeline.CreatorUserName, nil + }, + }, + { + field: "job_id", + objectType: "job", + requestObjectType: "jobs", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_VIEW": {isManagementPermission: false}, + "CAN_MANAGE_RUN": {isManagementPermission: false}, + "IS_OWNER": {isManagementPermission: true}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + fetchObjectCreator: func(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (string, error) { + jobId, err := strconv.ParseInt(strings.ReplaceAll(objectID, "/jobs/", ""), 10, 64) + if err != nil { + return "", err + } + job, err := w.Jobs.GetByJobId(ctx, jobId) + if err != nil { + return "", common.IgnoreNotFoundError(err) + } + return job.CreatorUserName, nil + }, + }, + { + field: "notebook_id", + objectType: "notebook", + requestObjectType: "notebooks", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + }, + { + field: "notebook_path", + objectType: "notebook", + requestObjectType: "notebooks", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + idRetriever: PATH, + }, + { + field: "directory_id", + objectType: "directory", + requestObjectType: "directories", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{ + update.If(update.ObjectIdMatches("/directories/0"), update.AddAdmin), + }, + deleteAclCustomizers: []update.ACLCustomizer{ + update.If(update.ObjectIdMatches("/directories/0"), update.AddAdmin), + }, + }, + { + field: "directory_path", + objectType: "directory", + requestObjectType: "directories", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + idRetriever: PATH, + updateAclCustomizers: []update.ACLCustomizer{ + update.If(update.ObjectIdMatches("/directories/0"), update.AddAdmin), + }, + deleteAclCustomizers: []update.ACLCustomizer{ + update.If(update.ObjectIdMatches("/directories/0"), update.AddAdmin), + }, + }, + { + field: "workspace_file_id", + objectType: "file", + requestObjectType: "files", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + pathVariant: "workspace_file_path", + }, + { + field: "workspace_file_path", + objectType: "file", + requestObjectType: "files", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + idRetriever: PATH, + pathVariant: "workspace_file_path", + }, + { + field: "repo_id", + objectType: "repo", + requestObjectType: "repos", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + }, + { + field: "repo_path", + objectType: "repo", + requestObjectType: "repos", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + idRetriever: PATH, + }, + { + field: "authorization", + objectType: "tokens", + requestObjectType: "authorization", + stateMatcher: func(id string) bool { + return id == "tokens" + }, + idMatcher: func(id string) bool { + return id == "/authorization/tokens" + }, + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_USE": {isManagementPermission: true}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{ + update.If(update.ObjectIdMatches("/authorization/tokens"), update.AddAdmin), + }, + }, + { + field: "authorization", + objectType: "passwords", + requestObjectType: "authorization", + stateMatcher: func(id string) bool { + return id == "passwords" + }, + idMatcher: func(id string) bool { + return id == "/authorization/passwords" + }, + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_USE": {isManagementPermission: true}, + }, + allowConfiguringAdmins: true, + }, + { + field: "sql_endpoint_id", + objectType: "warehouses", + requestObjectType: "sql/warehouses", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_USE": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + "CAN_MONITOR": {isManagementPermission: false}, + "IS_OWNER": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + deleteAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + fetchObjectCreator: func(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (string, error) { + warehouse, err := w.Warehouses.GetById(ctx, strings.ReplaceAll(objectID, "/sql/warehouses/", "")) + if err != nil { + return "", common.IgnoreNotFoundError(err) + } + return warehouse.CreatorName, nil + }, + }, + { + field: "sql_dashboard_id", + objectType: "dashboard", + requestObjectType: "dbsql-dashboards", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_EDIT": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + "CAN_READ": {isManagementPermission: false}, + // This was part of the original SQL permissions API but was replaced by CAN_READ in the GA API. + "CAN_VIEW": { + isManagementPermission: false, + deprecated: "use CAN_READ instead", + }, + }, + idMatcher: func(id string) bool { + return strings.HasPrefix(id, "/dbsql-dashboards/") || strings.HasPrefix(id, "/sql/dashboards/") + }, + updateAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + rewriteCanViewToCanRead, + }, + deleteAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + rewriteCanViewToCanRead, + }, + readAclCustomizers: []read.ACLCustomizer{ + rewriteCanReadToCanView, + func(ctx read.ACLCustomizerContext, objectAcls iam.ObjectPermissions) iam.ObjectPermissions { + // The object type in the new API is "dbsql-dashboard", but for compatibility this should + // be "dashboard" in the state. + objectAcls.ObjectType = "dashboard" + return objectAcls + }, + }, + }, + { + field: "sql_alert_id", + objectType: "alert", + requestObjectType: "sql/alerts", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_EDIT": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + "CAN_READ": {isManagementPermission: false}, + // This was part of the original SQL permissions API but was replaced by CAN_READ in the GA API. + // It should eventually be deprecated. + "CAN_VIEW": { + isManagementPermission: false, + deprecated: "use CAN_READ instead", + }, + }, + updateAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + rewriteCanViewToCanRead, + }, + deleteAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + rewriteCanViewToCanRead, + }, + readAclCustomizers: []read.ACLCustomizer{ + rewriteCanReadToCanView, + }, + }, + { + field: "sql_query_id", + objectType: "query", + requestObjectType: "sql/queries", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_EDIT": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + "CAN_READ": {isManagementPermission: false}, + // This was part of the original SQL permissions API but was replaced by CAN_READ in the GA API. + // It should eventually be deprecated. + "CAN_VIEW": { + isManagementPermission: false, + deprecated: "use CAN_READ instead", + }, + }, + updateAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + rewriteCanViewToCanRead, + }, + deleteAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + rewriteCanViewToCanRead, + }, + readAclCustomizers: []read.ACLCustomizer{ + rewriteCanReadToCanView, + }, + }, + { + field: "dashboard_id", + objectType: "dashboard", + requestObjectType: "dashboards", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_EDIT": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + "CAN_READ": {isManagementPermission: false}, + }, + readAclCustomizers: []read.ACLCustomizer{ + func(ctx read.ACLCustomizerContext, objectAcls iam.ObjectPermissions) iam.ObjectPermissions { + if strings.HasPrefix(objectAcls.ObjectId, "/dashboards/") { + // workaround for inconsistent API response returning object ID of file in the workspace + objectAcls.ObjectId = ctx.GetId() + } + return objectAcls + }, + }, + }, + { + field: "experiment_id", + objectType: "mlflowExperiment", + requestObjectType: "experiments", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + }, + { + field: "registered_model_id", + objectType: "registered-model", + requestObjectType: "registered-models", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE_STAGING_VERSIONS": {isManagementPermission: false}, + "CAN_MANAGE_PRODUCTION_VERSIONS": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + update.If(update.ObjectIdMatches("/registered-models/root"), update.AddAdmin), + }, + deleteAclCustomizers: []update.ACLCustomizer{ + update.If(update.Not(update.ObjectIdMatches("/registered-models/root")), update.AddCurrentUserAsManage), + }, + }, + { + field: "serving_endpoint_id", + objectType: "serving-endpoint", + requestObjectType: "serving-endpoints", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_VIEW": {isManagementPermission: false}, + "CAN_QUERY": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + deleteAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + }, + } +} diff --git a/permissions/read/customizers.go b/permissions/read/customizers.go new file mode 100644 index 0000000000..3cee278fbb --- /dev/null +++ b/permissions/read/customizers.go @@ -0,0 +1,54 @@ +package read + +import ( + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/terraform-provider-databricks/permissions/entity" +) + +// Context that is available to aclReadCustomizer implementations. +type ACLCustomizerContext struct { + GetId func() string + GetExistingPermissionsEntity func() entity.PermissionsEntity +} + +// ACLCustomizer is a function that modifies the access control list of an object after it is read. +type ACLCustomizer func(ctx ACLCustomizerContext, objectAcls iam.ObjectPermissions) iam.ObjectPermissions + +// Rewrites the permission level of the access control list of an object after it is read. +// This is done only for resources in state where the permission level is equal to the replacement value +// in the mapping. For example, the permissons endpoint used to use the "CAN_VIEW" permission level for +// read-only access, but this was changed to "CAN_READ". Users who previously used "CAN_VIEW" should not +// be forced to change to "CAN_READ". This customizer will rewrite "CAN_READ" to "CAN_VIEW" when the +// user-specified value is CAN_VIEW and the API response is CAN_READ. +func RewritePermissions(mapping map[iam.PermissionLevel]iam.PermissionLevel) ACLCustomizer { + findOriginalAcl := func(new iam.AccessControlResponse, original entity.PermissionsEntity) (iam.AccessControlRequest, bool) { + for _, old := range original.AccessControlList { + if new.GroupName != "" && old.GroupName == new.GroupName { + return old, true + } + if new.UserName != "" && old.UserName == new.UserName { + return old, true + } + if new.ServicePrincipalName != "" && old.ServicePrincipalName == new.ServicePrincipalName { + return old, true + } + } + return iam.AccessControlRequest{}, false + } + return func(ctx ACLCustomizerContext, acl iam.ObjectPermissions) iam.ObjectPermissions { + original := ctx.GetExistingPermissionsEntity() + for i := range acl.AccessControlList { + inState, found := findOriginalAcl(acl.AccessControlList[i], original) + for j := range acl.AccessControlList[i].AllPermissions { + // If the original permission level is remapped to a replacement level, and the permission level + // in state is equal to the replacement level, we rewrite it to the replacement level. + original := acl.AccessControlList[i].AllPermissions[j].PermissionLevel + replacement, ok := mapping[original] + if ok && found && inState.PermissionLevel == replacement { + acl.AccessControlList[i].AllPermissions[j].PermissionLevel = replacement + } + } + } + return acl + } +} diff --git a/permissions/resource_permissions.go b/permissions/resource_permissions.go index fb0b24eebf..6eb138fb80 100644 --- a/permissions/resource_permissions.go +++ b/permissions/resource_permissions.go @@ -4,96 +4,17 @@ import ( "context" "errors" "fmt" - "log" "path" - "strconv" "strings" - "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/terraform-provider-databricks/common" + "github.com/databricks/terraform-provider-databricks/permissions/entity" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -// ObjectACL is a structure to generically describe access control -type ObjectACL struct { - ObjectID string `json:"object_id,omitempty"` - ObjectType string `json:"object_type,omitempty"` - AccessControlList []AccessControl `json:"access_control_list"` -} - -// AccessControl is a structure to describe user/group permissions -type AccessControl struct { - UserName string `json:"user_name,omitempty"` - GroupName string `json:"group_name,omitempty"` - ServicePrincipalName string `json:"service_principal_name,omitempty"` - AllPermissions []Permission `json:"all_permissions,omitempty"` - - // SQLA entities don't use the `all_permissions` nesting, but rather a simple - // top level string with the permission level when retrieving permissions. - PermissionLevel string `json:"permission_level,omitempty"` -} - -func (ac AccessControl) toAccessControlChange() (AccessControlChange, bool) { - for _, permission := range ac.AllPermissions { - if permission.Inherited { - continue - } - return AccessControlChange{ - PermissionLevel: permission.PermissionLevel, - UserName: ac.UserName, - GroupName: ac.GroupName, - ServicePrincipalName: ac.ServicePrincipalName, - }, true - } - if ac.PermissionLevel != "" { - return AccessControlChange{ - PermissionLevel: ac.PermissionLevel, - UserName: ac.UserName, - GroupName: ac.GroupName, - ServicePrincipalName: ac.ServicePrincipalName, - }, true - } - return AccessControlChange{}, false -} - -func (ac AccessControl) String() string { - return fmt.Sprintf("%s%s%s%v", ac.GroupName, ac.UserName, ac.ServicePrincipalName, ac.AllPermissions) -} - -// Permission is a structure to describe permission level -type Permission struct { - PermissionLevel string `json:"permission_level"` - Inherited bool `json:"inherited,omitempty"` - InheritedFromObject []string `json:"inherited_from_object,omitempty"` -} - -func (p Permission) String() string { - if len(p.InheritedFromObject) > 0 { - return fmt.Sprintf("%s (from %s)", p.PermissionLevel, p.InheritedFromObject) - } - return p.PermissionLevel -} - -// AccessControlChangeList is wrapper around ACL changes for REST API -type AccessControlChangeList struct { - AccessControlList []AccessControlChange `json:"access_control_list"` -} - -// AccessControlChange is API wrapper for changing permissions -type AccessControlChange struct { - UserName string `json:"user_name,omitempty"` - GroupName string `json:"group_name,omitempty"` - ServicePrincipalName string `json:"service_principal_name,omitempty"` - PermissionLevel string `json:"permission_level"` -} - -func (acc AccessControlChange) String() string { - return fmt.Sprintf("%v%v%v %s", acc.UserName, acc.GroupName, acc.ServicePrincipalName, - acc.PermissionLevel) -} - // NewPermissionsAPI creates PermissionsAPI instance from provider meta func NewPermissionsAPI(ctx context.Context, m any) PermissionsAPI { return PermissionsAPI{ @@ -108,187 +29,103 @@ type PermissionsAPI struct { context context.Context } -func isDbsqlPermissionsWorkaroundNecessary(objectID string) bool { - return strings.HasPrefix(objectID, "/sql/") && !strings.HasPrefix(objectID, "/sql/warehouses") -} - -func urlPathForObjectID(objectID string) string { - if isDbsqlPermissionsWorkaroundNecessary(objectID) { - // Permissions for SQLA entities are routed differently from the others. - return "/preview/sql/permissions" + objectID[4:] - } - return "/permissions" + objectID -} - -// As described in https://github.com/databricks/terraform-provider-databricks/issues/1504, -// certain object types require that we explicitly grant the calling user CAN_MANAGE -// permissions when POSTing permissions changes through the REST API, to avoid accidentally -// revoking the calling user's ability to manage the current object. -func (a PermissionsAPI) shouldExplicitlyGrantCallingUserManagePermissions(objectID string) bool { - for _, prefix := range [...]string{"/registered-models/", "/clusters/", "/instance-pools/", "/serving-endpoints/", "/queries/", "/sql/warehouses"} { - if strings.HasPrefix(objectID, prefix) { - return true - } - } - return isDbsqlPermissionsWorkaroundNecessary(objectID) -} - -func isOwnershipWorkaroundNecessary(objectID string) bool { - return strings.HasPrefix(objectID, "/jobs") || strings.HasPrefix(objectID, "/pipelines") || strings.HasPrefix(objectID, "/sql/warehouses") -} - -func (a PermissionsAPI) getObjectCreator(objectID string) (string, error) { +// safePutWithOwner is a workaround for the limitation where warehouse without owners cannot have IS_OWNER set +func (a PermissionsAPI) safePutWithOwner(objectID string, objectACL []iam.AccessControlRequest, mapping resourcePermissions, ownerOpt string) error { w, err := a.client.WorkspaceClient() if err != nil { - return "", err + return err } - if strings.HasPrefix(objectID, "/jobs") { - jobId, err := strconv.ParseInt(strings.ReplaceAll(objectID, "/jobs/", ""), 10, 64) - if err != nil { - return "", err - } - job, err := w.Jobs.GetByJobId(a.context, jobId) - if err != nil { - return "", common.IgnoreNotFoundError(err) - } - return job.CreatorUserName, nil - } else if strings.HasPrefix(objectID, "/pipelines") { - pipeline, err := w.Pipelines.GetByPipelineId(a.context, strings.ReplaceAll(objectID, "/pipelines/", "")) - if err != nil { - return "", common.IgnoreNotFoundError(err) - } - return pipeline.CreatorUserName, nil - } else if strings.HasPrefix(objectID, "/sql/warehouses") { - warehouse, err := w.Warehouses.GetById(a.context, strings.ReplaceAll(objectID, "/sql/warehouses/", "")) - if err != nil { - return "", common.IgnoreNotFoundError(err) + idParts := strings.Split(objectID, "/") + id := idParts[len(idParts)-1] + withOwner := mapping.addOwnerPermissionIfNeeded(objectACL, ownerOpt) + _, err = w.Permissions.Set(a.context, iam.PermissionsRequest{ + RequestObjectId: id, + RequestObjectType: mapping.requestObjectType, + AccessControlList: withOwner, + }) + if err != nil { + if strings.Contains(err.Error(), "with no existing owner must provide a new owner") { + _, err = w.Permissions.Set(a.context, iam.PermissionsRequest{ + RequestObjectId: id, + RequestObjectType: mapping.requestObjectType, + AccessControlList: objectACL, + }) } - return warehouse.CreatorName, nil + return err } - return "", nil + return nil } -func (a PermissionsAPI) ensureCurrentUserCanManageObject(objectID string, objectACL AccessControlChangeList) (AccessControlChangeList, error) { - if !a.shouldExplicitlyGrantCallingUserManagePermissions(objectID) { - return objectACL, nil - } +func (a PermissionsAPI) getCurrentUser() (string, error) { w, err := a.client.WorkspaceClient() if err != nil { - return objectACL, err + return "", err } me, err := w.CurrentUser.Me(a.context) if err != nil { - return objectACL, err + return "", err } - objectACL.AccessControlList = append(objectACL.AccessControlList, AccessControlChange{ - UserName: me.UserName, - PermissionLevel: "CAN_MANAGE", - }) - return objectACL, nil + return me.UserName, nil } -// Helper function for applying permissions changes. Ensures that -// we select the correct HTTP method based on the object type and preserve the calling -// user's ability to manage the specified object when applying permissions changes. -func (a PermissionsAPI) put(objectID string, objectACL AccessControlChangeList) error { - objectACL, err := a.ensureCurrentUserCanManageObject(objectID, objectACL) +// Update updates object permissions. Technically, it's using method named SetOrDelete, but here we do more +func (a PermissionsAPI) Update(objectID string, entity entity.PermissionsEntity, mapping resourcePermissions) error { + currentUser, err := a.getCurrentUser() if err != nil { return err } - if isDbsqlPermissionsWorkaroundNecessary(objectID) { - // SQLA entities use POST for permission updates. - return a.client.Post(a.context, urlPathForObjectID(objectID), objectACL, nil) + // this logic was moved from CustomizeDiff because of undeterministic auth behavior + // in the corner-case scenarios. + // see https://github.com/databricks/terraform-provider-databricks/issues/2052 + err = mapping.validate(a.context, entity, currentUser) + if err != nil { + return err } - log.Printf("[DEBUG] PUT %s %v", objectID, objectACL) - return a.client.Put(a.context, urlPathForObjectID(objectID), objectACL) -} - -// safePutWithOwner is a workaround for the limitation where warehouse without owners cannot have IS_OWNER set -func (a PermissionsAPI) safePutWithOwner(objectID string, objectACL AccessControlChangeList, originalAcl []AccessControlChange) error { - err := a.put(objectID, objectACL) + prepared, err := mapping.prepareForUpdate(objectID, entity, currentUser) if err != nil { - if strings.Contains(err.Error(), "with no existing owner must provide a new owner") { - objectACL.AccessControlList = originalAcl - return a.put(objectID, objectACL) - } return err } - return nil + return a.safePutWithOwner(objectID, prepared.AccessControlList, mapping, currentUser) } -// Update updates object permissions. Technically, it's using method named SetOrDelete, but here we do more -func (a PermissionsAPI) Update(objectID string, objectACL AccessControlChangeList) error { - if objectID == "/authorization/tokens" || objectID == "/registered-models/root" || objectID == "/directories/0" { - // Prevent "Cannot change permissions for group 'admins' to None." - objectACL.AccessControlList = append(objectACL.AccessControlList, AccessControlChange{ - GroupName: "admins", - PermissionLevel: "CAN_MANAGE", - }) +// Delete gracefully removes permissions of non-admin users. After this operation, the object is managed +// by the current user and admin group. If the resource has IS_OWNER permissions, they are reset to the +// object creator, if it can be determined. +func (a PermissionsAPI) Delete(objectID string, mapping resourcePermissions) error { + objectACL, err := a.readRaw(objectID, mapping) + if err != nil { + return err } - originalAcl := make([]AccessControlChange, len(objectACL.AccessControlList)) - _ = copy(originalAcl, objectACL.AccessControlList) - if isOwnershipWorkaroundNecessary(objectID) { - owners := 0 - for _, acl := range objectACL.AccessControlList { - if acl.PermissionLevel == "IS_OWNER" { - owners++ - } - } - if owners == 0 { - w, err := a.client.WorkspaceClient() - if err != nil { - return err - } - me, err := w.CurrentUser.Me(a.context) - if err != nil { - return err - } - // add owner if it's missing, otherwise automated planning might be difficult - objectACL.AccessControlList = append(objectACL.AccessControlList, AccessControlChange{ - UserName: me.UserName, - PermissionLevel: "IS_OWNER", - }) - } + accl, err := mapping.prepareForDelete(objectACL, a.getCurrentUser) + if err != nil { + return err } - return a.safePutWithOwner(objectID, objectACL, originalAcl) -} - -// Delete gracefully removes permissions. Technically, it's using method named SetOrDelete, but here we do more -func (a PermissionsAPI) Delete(objectID string) error { - objectACL, err := a.Read(objectID) + w, err := a.client.WorkspaceClient() if err != nil { return err } - accl := AccessControlChangeList{} - for _, acl := range objectACL.AccessControlList { - if acl.GroupName == "admins" && objectID != "/authorization/passwords" { - if change, direct := acl.toAccessControlChange(); direct { - // keep everything direct for admin group - accl.AccessControlList = append(accl.AccessControlList, change) - } - } + resourceStatus, err := mapping.getObjectStatus(a.context, w, objectID) + if err != nil { + return err } - originalAcl := make([]AccessControlChange, len(accl.AccessControlList)) - _ = copy(originalAcl, accl.AccessControlList) - if isOwnershipWorkaroundNecessary(objectID) { - creator, err := a.getObjectCreator(objectID) - if err != nil { - return err - } - if creator == "" { - return nil - } - accl.AccessControlList = append(accl.AccessControlList, AccessControlChange{ - UserName: creator, - PermissionLevel: "IS_OWNER", - }) + // Do not bother resetting permissions for deleted resources + if !resourceStatus.exists { + return nil } - return a.safePutWithOwner(objectID, accl, originalAcl) + return a.safePutWithOwner(objectID, accl, mapping, resourceStatus.creator) } -// Read gets all relevant permissions for the object, including inherited ones -func (a PermissionsAPI) Read(objectID string) (objectACL ObjectACL, err error) { - err = a.client.Get(a.context, urlPathForObjectID(objectID), nil, &objectACL) +func (a PermissionsAPI) readRaw(objectID string, mapping resourcePermissions) (*iam.ObjectPermissions, error) { + w, err := a.client.WorkspaceClient() + if err != nil { + return nil, err + } + idParts := strings.Split(objectID, "/") + id := idParts[len(idParts)-1] + permissions, err := w.Permissions.Get(a.context, iam.GetPermissionRequest{ + RequestObjectId: id, + RequestObjectType: mapping.requestObjectType, + }) var apiErr *apierr.APIError // https://github.com/databricks/terraform-provider-databricks/issues/1227 // platform propagates INVALID_STATE error for auto-purged clusters in @@ -296,143 +133,34 @@ func (a PermissionsAPI) Read(objectID string) (objectACL ObjectACL, err error) { // cross-package dependency on "clusters". if errors.As(err, &apiErr) && strings.Contains(apiErr.Message, "Cannot access cluster") && apiErr.StatusCode == 400 { apiErr.StatusCode = 404 + apiErr.ErrorCode = "RESOURCE_DOES_NOT_EXIST" err = apiErr - return - } - if strings.HasPrefix(objectID, "/dashboards/") { - // workaround for inconsistent API response returning object ID of file in the workspace - objectACL.ObjectID = objectID - } - return -} - -// permissionsIDFieldMapping holds mapping -type permissionsIDFieldMapping struct { - field, objectType, resourceType string - - allowedPermissionLevels []string - - idRetriever func(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) -} - -// PermissionsResourceIDFields shows mapping of id columns to resource types -func permissionsResourceIDFields() []permissionsIDFieldMapping { - SIMPLE := func(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) { - return id, nil - } - PATH := func(ctx context.Context, w *databricks.WorkspaceClient, path string) (string, error) { - info, err := w.Workspace.GetStatusByPath(ctx, path) - if err != nil { - return "", fmt.Errorf("cannot load path %s: %s", path, err) - } - return strconv.FormatInt(info.ObjectId, 10), nil - } - return []permissionsIDFieldMapping{ - {"cluster_policy_id", "cluster-policy", "cluster-policies", []string{"CAN_USE"}, SIMPLE}, - {"instance_pool_id", "instance-pool", "instance-pools", []string{"CAN_ATTACH_TO", "CAN_MANAGE"}, SIMPLE}, - {"cluster_id", "cluster", "clusters", []string{"CAN_ATTACH_TO", "CAN_RESTART", "CAN_MANAGE"}, SIMPLE}, - {"pipeline_id", "pipelines", "pipelines", []string{"CAN_VIEW", "CAN_RUN", "CAN_MANAGE", "IS_OWNER"}, SIMPLE}, - {"job_id", "job", "jobs", []string{"CAN_VIEW", "CAN_MANAGE_RUN", "IS_OWNER", "CAN_MANAGE"}, SIMPLE}, - {"notebook_id", "notebook", "notebooks", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, SIMPLE}, - {"notebook_path", "notebook", "notebooks", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, PATH}, - {"directory_id", "directory", "directories", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, SIMPLE}, - {"directory_path", "directory", "directories", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, PATH}, - {"workspace_file_id", "file", "files", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, SIMPLE}, - {"workspace_file_path", "file", "files", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, PATH}, - {"repo_id", "repo", "repos", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, SIMPLE}, - {"repo_path", "repo", "repos", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, PATH}, - {"authorization", "tokens", "authorization", []string{"CAN_USE"}, SIMPLE}, - {"authorization", "passwords", "authorization", []string{"CAN_USE"}, SIMPLE}, - {"sql_endpoint_id", "warehouses", "sql/warehouses", []string{"CAN_USE", "CAN_MANAGE", "CAN_MONITOR", "IS_OWNER"}, SIMPLE}, - {"sql_dashboard_id", "dashboard", "sql/dashboards", []string{"CAN_EDIT", "CAN_RUN", "CAN_MANAGE", "CAN_VIEW"}, SIMPLE}, - {"sql_alert_id", "alert", "sql/alerts", []string{"CAN_EDIT", "CAN_RUN", "CAN_MANAGE", "CAN_VIEW"}, SIMPLE}, - {"sql_query_id", "query", "sql/queries", []string{"CAN_EDIT", "CAN_RUN", "CAN_MANAGE", "CAN_VIEW"}, SIMPLE}, - {"dashboard_id", "dashboard", "dashboards", []string{"CAN_EDIT", "CAN_RUN", "CAN_MANAGE", "CAN_READ"}, SIMPLE}, - {"experiment_id", "mlflowExperiment", "experiments", []string{"CAN_READ", "CAN_EDIT", "CAN_MANAGE"}, SIMPLE}, - {"registered_model_id", "registered-model", "registered-models", []string{ - "CAN_READ", "CAN_EDIT", "CAN_MANAGE_STAGING_VERSIONS", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE"}, SIMPLE}, - {"serving_endpoint_id", "serving-endpoint", "serving-endpoints", []string{"CAN_VIEW", "CAN_QUERY", "CAN_MANAGE"}, SIMPLE}, - } -} - -// PermissionsEntity is the one used for resource metadata -type PermissionsEntity struct { - ObjectType string `json:"object_type,omitempty" tf:"computed"` - AccessControlList []AccessControlChange `json:"access_control" tf:"slice_set"` -} - -func (oa *ObjectACL) isMatchingMapping(mapping permissionsIDFieldMapping) bool { - if mapping.objectType != oa.ObjectType { - return false - } - if oa.ObjectID != "" && oa.ObjectID[0] == '/' { - return strings.HasPrefix(oa.ObjectID[1:], mapping.resourceType) - } - if strings.HasPrefix(oa.ObjectID, "dashboards/") || strings.HasPrefix(oa.ObjectID, "alerts/") || strings.HasPrefix(oa.ObjectID, "queries/") { - idx := strings.Index(oa.ObjectID, "/") - if idx != -1 { - return mapping.resourceType == "sql/"+oa.ObjectID[:idx] - } - } - - return false -} - -func (oa *ObjectACL) ToPermissionsEntity(d *schema.ResourceData, me string) (PermissionsEntity, error) { - entity := PermissionsEntity{} - for _, accessControl := range oa.AccessControlList { - if accessControl.GroupName == "admins" && d.Id() != "/authorization/passwords" { - // not possible to lower admins permissions anywhere from CAN_MANAGE - continue - } - if me == accessControl.UserName || me == accessControl.ServicePrincipalName { - // not possible to lower one's permissions anywhere from CAN_MANAGE - continue - } - if change, direct := accessControl.toAccessControlChange(); direct { - entity.AccessControlList = append(entity.AccessControlList, change) - } } - for _, mapping := range permissionsResourceIDFields() { - if !oa.isMatchingMapping(mapping) { - continue - } - entity.ObjectType = mapping.objectType - var pathVariant any - if mapping.objectType == "file" { - pathVariant = d.Get("workspace_file_path") - } else { - pathVariant = d.Get(mapping.objectType + "_path") - } - if pathVariant != nil && pathVariant.(string) != "" { - // we're not importing and it's a path... it's set, so let's not re-set it - return entity, nil - } - identifier := path.Base(oa.ObjectID) - return entity, d.Set(mapping.field, identifier) + if err != nil { + return nil, err } - return entity, fmt.Errorf("unknown object type %s", oa.ObjectType) + return permissions, nil } -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } +// Read gets all relevant permissions for the object, including inherited ones +func (a PermissionsAPI) Read(objectID string, mapping resourcePermissions, existing entity.PermissionsEntity, me string) (entity.PermissionsEntity, error) { + permissions, err := a.readRaw(objectID, mapping) + if err != nil { + return entity.PermissionsEntity{}, err } - return false + return mapping.prepareResponse(objectID, permissions, existing, me) } // ResourcePermissions definition func ResourcePermissions() common.Resource { - s := common.StructToSchema(PermissionsEntity{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { - for _, mapping := range permissionsResourceIDFields() { + s := common.StructToSchema(entity.PermissionsEntity{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { + for _, mapping := range allResourcePermissions() { s[mapping.field] = &schema.Schema{ ForceNew: true, Type: schema.TypeString, Optional: true, } - for _, m := range permissionsResourceIDFields() { + for _, m := range allResourcePermissions() { if m.field == mapping.field { continue } @@ -445,38 +173,44 @@ func ResourcePermissions() common.Resource { return common.Resource{ Schema: s, CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff) error { + mapping, _, err := getResourcePermissionsFromState(diff) + if err != nil { + // This preserves current behavior but is likely only exercised in tests where + // the original config is not specified. + return nil + } + planned := entity.PermissionsEntity{} + common.DiffToStructPointer(diff, s, &planned) // Plan time validation for object permission levels - for _, mapping := range permissionsResourceIDFields() { - if _, ok := diff.GetOk(mapping.field); !ok { + for _, accessControl := range planned.AccessControlList { + permissionLevel := accessControl.PermissionLevel + // No diff in permission level, so don't need to check. + if permissionLevel == "" { continue } - access_control_list := diff.Get("access_control").(*schema.Set).List() - for _, access_control := range access_control_list { - m := access_control.(map[string]any) - permission_level := m["permission_level"].(string) - if !stringInSlice(permission_level, mapping.allowedPermissionLevels) { - return fmt.Errorf(`permission_level %s is not supported with %s objects`, - permission_level, mapping.field) - } + // TODO: only warn on unknown permission levels, as new levels may be released that the TF provider + // is not aware of. + if _, ok := mapping.allowedPermissionLevels[string(permissionLevel)]; !ok { + return fmt.Errorf(`permission_level %s is not supported with %s objects; allowed levels: %s`, + permissionLevel, mapping.field, strings.Join(mapping.getAllowedPermissionLevels(true), ", ")) } } return nil }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - id := d.Id() - w, err := c.WorkspaceClient() - if err != nil { - return err - } - objectACL, err := NewPermissionsAPI(ctx, c).Read(id) + a := NewPermissionsAPI(ctx, c) + mapping, err := getResourcePermissionsFromId(d.Id()) if err != nil { return err } - me, err := w.CurrentUser.Me(ctx) + var existing entity.PermissionsEntity + common.DataToStructPointer(d, s, &existing) + me, err := a.getCurrentUser() if err != nil { return err } - entity, err := objectACL.ToPermissionsEntity(d, me.UserName) + id := d.Id() + entity, err := a.Read(id, mapping, existing, me) if err != nil { return err } @@ -485,61 +219,53 @@ func ResourcePermissions() common.Resource { d.SetId("") return nil } + entity.ObjectType = mapping.objectType + pathVariant := d.Get(mapping.getPathVariant()) + if pathVariant == nil || pathVariant.(string) == "" { + identifier := path.Base(id) + if err = d.Set(mapping.field, identifier); err != nil { + return err + } + } return common.StructToData(entity, s, d) }, Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - var entity PermissionsEntity + var entity entity.PermissionsEntity common.DataToStructPointer(d, s, &entity) w, err := c.WorkspaceClient() if err != nil { return err } - me, err := w.CurrentUser.Me(ctx) + mapping, configuredValue, err := getResourcePermissionsFromState(d) if err != nil { return err } - for _, mapping := range permissionsResourceIDFields() { - if v, ok := d.GetOk(mapping.field); ok { - id, err := mapping.idRetriever(ctx, w, v.(string)) - if err != nil { - return err - } - objectID := fmt.Sprintf("/%s/%s", mapping.resourceType, id) - // this logic was moved from CustomizeDiff because of undeterministic auth behavior - // in the corner-case scenarios. - // see https://github.com/databricks/terraform-provider-databricks/issues/2052 - for _, v := range entity.AccessControlList { - if v.UserName == me.UserName { - format := "it is not possible to decrease administrative permissions for the current user: %s" - return fmt.Errorf(format, me.UserName) - } - - if v.GroupName == "admins" && mapping.resourceType != "authorization" { - // should allow setting admins permissions for passwords and tokens usage - return fmt.Errorf("it is not possible to restrict any permissions from `admins`") - } - } - err = NewPermissionsAPI(ctx, c).Update(objectID, AccessControlChangeList{ - AccessControlList: entity.AccessControlList, - }) - if err != nil { - return err - } - d.SetId(objectID) - return nil - } + objectID, err := mapping.getID(ctx, w, configuredValue) + if err != nil { + return err + } + err = NewPermissionsAPI(ctx, c).Update(objectID, entity, mapping) + if err != nil { + return err } - return errors.New("at least one type of resource identifiers must be set") + d.SetId(objectID) + return nil }, Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - var entity PermissionsEntity + var entity entity.PermissionsEntity common.DataToStructPointer(d, s, &entity) - return NewPermissionsAPI(ctx, c).Update(d.Id(), AccessControlChangeList{ - AccessControlList: entity.AccessControlList, - }) + mapping, err := getResourcePermissionsFromId(d.Id()) + if err != nil { + return err + } + return NewPermissionsAPI(ctx, c).Update(d.Id(), entity, mapping) }, Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - return NewPermissionsAPI(ctx, c).Delete(d.Id()) + mapping, err := getResourcePermissionsFromId(d.Id()) + if err != nil { + return err + } + return NewPermissionsAPI(ctx, c).Delete(d.Id(), mapping) }, } } diff --git a/permissions/resource_permissions_test.go b/permissions/resource_permissions_test.go index b01fddb1ca..7019ae5c56 100644 --- a/permissions/resource_permissions_test.go +++ b/permissions/resource_permissions_test.go @@ -2,17 +2,21 @@ package permissions import ( "context" - "net/http" + "fmt" "testing" + "github.com/stretchr/testify/mock" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/databricks/terraform-provider-databricks/common" - "github.com/databricks/terraform-provider-databricks/scim" - + "github.com/databricks/terraform-provider-databricks/permissions/entity" "github.com/databricks/terraform-provider-databricks/qa" - "github.com/databricks/terraform-provider-databricks/workspace" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -22,70 +26,39 @@ var ( TestingUser = "ben" TestingAdminUser = "admin" TestingOwner = "testOwner" - me = qa.HTTPFixture{ - ReuseRequest: true, - Method: "GET", - Resource: "/api/2.0/preview/scim/v2/Me", - Response: scim.User{ - UserName: TestingAdminUser, - }, - } ) -func TestEntityAccessControlChangeString(t *testing.T) { - assert.Equal(t, "me CAN_READ", AccessControlChange{ - UserName: "me", - PermissionLevel: "CAN_READ", - }.String()) -} - -func TestEntityAccessControlString(t *testing.T) { - assert.Equal(t, "me[CAN_READ (from [parent]) CAN_MANAGE]", AccessControl{ - UserName: "me", - AllPermissions: []Permission{ - { - InheritedFromObject: []string{"parent"}, - PermissionLevel: "CAN_READ", - }, - { - PermissionLevel: "CAN_MANAGE", - }, - }, - }.String()) -} - func TestResourcePermissionsRead(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectID: "/clusters/abc", - ObjectType: "cluster", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/clusters/abc", + ObjectType: "cluster", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), Read: true, @@ -104,17 +77,16 @@ func TestResourcePermissionsRead(t *testing.T) { // https://github.com/databricks/terraform-provider-databricks/issues/1227 func TestResourcePermissionsRead_RemovedCluster(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Status: 400, - Response: apierr.APIError{ - ErrorCode: "INVALID_STATE", - Message: "Cannot access cluster X that was terminated or unpinned more than Y days ago.", - }, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(nil, &apierr.APIError{ + StatusCode: 400, + ErrorCode: "INVALID_STATE", + Message: "Cannot access cluster X that was terminated or unpinned more than Y days ago.", + }) }, Resource: ResourcePermissions(), Read: true, @@ -126,27 +98,25 @@ func TestResourcePermissionsRead_RemovedCluster(t *testing.T) { func TestResourcePermissionsRead_Mlflow_Model(t *testing.T) { d, err := qa.ResourceFixture{ - // Pass list of API request mocks - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - Response: ObjectACL{ - ObjectID: "/registered-models/fakeuuid123", - ObjectType: "registered-model", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/registered-models/fakeuuid123", + ObjectType: "registered-model", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), Read: true, @@ -164,42 +134,40 @@ func TestResourcePermissionsRead_Mlflow_Model(t *testing.T) { func TestResourcePermissionsCreate_Mlflow_Model(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - Response: ObjectACL{ - ObjectID: "/registered-models/fakeuuid123", - ObjectType: "registered-model", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/registered-models/fakeuuid123", + ObjectType: "registered-model", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -223,42 +191,40 @@ func TestResourcePermissionsCreate_Mlflow_Model(t *testing.T) { func TestResourcePermissionsUpdate_Mlflow_Model(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - Response: ObjectACL{ - ObjectID: "/registered-models/fakeuuid123", - ObjectType: "registered-model", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/registered-models/fakeuuid123", + ObjectType: "registered-model", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, InstanceState: map[string]string{ "registered_model_id": "fakeuuid123", @@ -287,38 +253,36 @@ func TestResourcePermissionsUpdate_Mlflow_Model(t *testing.T) { func TestResourcePermissionsDelete_Mlflow_Model(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - Response: ObjectACL{ - ObjectID: "/registered-models/fakeuuid123", - ObjectType: "registered-model", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/registered-models/fakeuuid123", + ObjectType: "registered-model", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), Delete: true, @@ -330,31 +294,38 @@ func TestResourcePermissionsDelete_Mlflow_Model(t *testing.T) { func TestResourcePermissionsRead_SQLA_Asset(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/preview/sql/permissions/dashboards/abc", - Response: ObjectACL{ - ObjectID: "dashboards/abc", - ObjectType: "dashboard", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "dbsql-dashboards", + }).Return(&iam.ObjectPermissions{ + ObjectId: "dashboards/abc", + ObjectType: "dashboard", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), Read: true, New: true, ID: "/sql/dashboards/abc", + HCL: ` + sql_dashboard_id = "abc" + access_control { + user_name = "ben" + permission_level = "CAN_VIEW" + } + `, }.Apply(t) assert.NoError(t, err) assert.Equal(t, "/sql/dashboards/abc", d.Id()) @@ -362,31 +333,31 @@ func TestResourcePermissionsRead_SQLA_Asset(t *testing.T) { require.Equal(t, 1, len(ac.List())) firstElem := ac.List()[0].(map[string]any) assert.Equal(t, TestingUser, firstElem["user_name"]) - assert.Equal(t, "CAN_READ", firstElem["permission_level"]) + assert.Equal(t, "CAN_VIEW", firstElem["permission_level"]) } func TestResourcePermissionsRead_Dashboard(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/dashboards/abc", - Response: ObjectACL{ - ObjectID: "dashboards/abc", - ObjectType: "dashboard", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "dashboards", + }).Return(&iam.ObjectPermissions{ + ObjectId: "dashboards/abc", + ObjectType: "dashboard", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), Read: true, @@ -405,17 +376,16 @@ func TestResourcePermissionsRead_Dashboard(t *testing.T) { func TestResourcePermissionsRead_NotFound(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: apierr.APIError{ - ErrorCode: "NOT_FOUND", - Message: "Cluster does not exist", - }, - Status: 404, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(nil, &apierr.APIError{ + StatusCode: 404, + ErrorCode: "NOT_FOUND", + Message: "Cluster does not exist", + }) }, Resource: ResourcePermissions(), Read: true, @@ -427,17 +397,16 @@ func TestResourcePermissionsRead_NotFound(t *testing.T) { func TestResourcePermissionsRead_some_error(t *testing.T) { _, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: apierr.APIError{ - ErrorCode: "INVALID_REQUEST", - Message: "Internal error happened", - }, - Status: 400, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(nil, &apierr.APIError{ + StatusCode: 400, + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }) }, Resource: ResourcePermissions(), Read: true, @@ -455,56 +424,17 @@ func TestResourcePermissionsCustomizeDiff_ErrorOnCreate(t *testing.T) { access_control { permission_level = "WHATEVER" }`, - }.ExpectError(t, "permission_level WHATEVER is not supported with cluster_id objects") -} - -func TestResourcePermissionsCustomizeDiff_ErrorOnPermissionsDecreate(t *testing.T) { - qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - }, - Resource: ResourcePermissions(), - Create: true, - HCL: ` - cluster_id = "abc" - access_control { - permission_level = "CAN_ATTACH_TO" - user_name = "admin" - }`, - }.ExpectError(t, "it is not possible to decrease administrative permissions for the current user: admin") + }.ExpectError(t, "permission_level WHATEVER is not supported with cluster_id objects; allowed levels: CAN_ATTACH_TO, CAN_MANAGE, CAN_RESTART") } func TestResourcePermissionsRead_ErrorOnScimMe(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectID: "/clusters/abc", - ObjectType: "clusters", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, - }, - }, - }, - }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/preview/scim/v2/Me", - Response: apierr.APIError{ - ErrorCode: "INVALID_REQUEST", - Message: "Internal error happened", - }, - Status: 400, - }, - }, func(ctx context.Context, client *common.DatabricksClient) { + mock := func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(nil, &apierr.APIError{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }) + } + qa.MockWorkspaceApply(t, mock, func(ctx context.Context, client *common.DatabricksClient) { r := ResourcePermissions().ToResource() d := r.TestResourceData() d.SetId("/clusters/abc") @@ -516,35 +446,33 @@ func TestResourcePermissionsRead_ErrorOnScimMe(t *testing.T) { func TestResourcePermissionsRead_ToPermissionsEntity_Error(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectType: "teapot", - }, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(&iam.ObjectPermissions{ + ObjectType: "teapot", + }, nil) }, Resource: ResourcePermissions(), Read: true, New: true, ID: "/clusters/abc", - }.ExpectError(t, "unknown object type teapot") + }.ExpectError(t, "expected object type cluster, got teapot") } func TestResourcePermissionsRead_EmptyListResultsInRemoval(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectID: "/clusters/abc", - ObjectType: "cluster", - }, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/clusters/abc", + ObjectType: "cluster", + }, nil) }, Resource: ResourcePermissions(), Read: true, @@ -558,48 +486,46 @@ func TestResourcePermissionsRead_EmptyListResultsInRemoval(t *testing.T) { func TestResourcePermissionsDelete(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectID: "/clusters/abc", - ObjectType: "clusters", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/clusters/abc", + ObjectType: "cluster", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/clusters/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), Delete: true, @@ -611,53 +537,50 @@ func TestResourcePermissionsDelete(t *testing.T) { func TestResourcePermissionsDelete_error(t *testing.T) { _, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectID: "/clusters/abc", - ObjectType: "clusters", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/clusters/abc", + ObjectType: "cluster", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/clusters/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - Response: apierr.APIError{ - ErrorCode: "INVALID_REQUEST", - Message: "Internal error happened", - }, - Status: 400, - }, + }).Return(nil, &apierr.APIError{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + StatusCode: 400, + }) }, Resource: ResourcePermissions(), Delete: true, @@ -668,15 +591,13 @@ func TestResourcePermissionsDelete_error(t *testing.T) { func TestResourcePermissionsCreate_invalid(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{me}, Resource: ResourcePermissions(), Create: true, - }.ExpectError(t, "at least one type of resource identifiers must be set") + }.ExpectError(t, "at least one type of resource identifier must be set; allowed fields: authorization, cluster_id, cluster_policy_id, dashboard_id, directory_id, directory_path, experiment_id, instance_pool_id, job_id, notebook_id, notebook_path, pipeline_id, registered_model_id, repo_id, repo_path, serving_endpoint_id, sql_alert_id, sql_dashboard_id, sql_endpoint_id, sql_query_id, workspace_file_id, workspace_file_path") } func TestResourcePermissionsCreate_no_access_control(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{}, Resource: ResourcePermissions(), Create: true, State: map[string]any{ @@ -687,7 +608,6 @@ func TestResourcePermissionsCreate_no_access_control(t *testing.T) { func TestResourcePermissionsCreate_conflicting_fields(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{}, Resource: ResourcePermissions(), Create: true, State: map[string]any{ @@ -705,7 +625,9 @@ func TestResourcePermissionsCreate_conflicting_fields(t *testing.T) { func TestResourcePermissionsCreate_AdminsThrowError(t *testing.T) { _, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{me}, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + }, Resource: ResourcePermissions(), Create: true, HCL: ` @@ -716,57 +638,55 @@ func TestResourcePermissionsCreate_AdminsThrowError(t *testing.T) { } `, }.Apply(t) - assert.EqualError(t, err, "it is not possible to restrict any permissions from `admins`") + assert.EqualError(t, err, "it is not possible to modify admin permissions for cluster resources") } func TestResourcePermissionsCreate(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/clusters/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_ATTACH_TO", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, - }, - }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectID: "/clusters/abc", - ObjectType: "cluster", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_ATTACH_TO", - Inherited: false, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/clusters/abc", + ObjectType: "cluster", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_ATTACH_TO", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_ATTACH_TO", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", + }, + }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -790,42 +710,50 @@ func TestResourcePermissionsCreate(t *testing.T) { func TestResourcePermissionsCreate_SQLA_Asset(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPost, - Resource: "/api/2.0/preview/sql/permissions/dashboards/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_RUN", + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "dbsql-dashboards", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/dashboards/abc", + ObjectType: "dashboard", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_RUN", + Inherited: false, + }, }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, + }, }, }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/preview/sql/permissions/dashboards/abc", - Response: ObjectACL{ - ObjectID: "dashboards/abc", - ObjectType: "dashboard", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_RUN", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "dbsql-dashboards", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_RUN", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -849,50 +777,48 @@ func TestResourcePermissionsCreate_SQLA_Asset(t *testing.T) { func TestResourcePermissionsCreate_SQLA_Endpoint(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: "PUT", - Resource: "/api/2.0/permissions/sql/warehouses/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "IS_OWNER", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_USE", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "IS_OWNER", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/sql/warehouses/abc", - Response: ObjectACL{ - ObjectID: "dashboards/abc", - ObjectType: "dashboard", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "IS_OWNER", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + }).Return(&iam.ObjectPermissions{ + ObjectId: "warehouses/abc", + ObjectType: "warehouses", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanUse}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelIsOwner}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -916,71 +842,66 @@ func TestResourcePermissionsCreate_SQLA_Endpoint(t *testing.T) { func TestResourcePermissionsCreate_SQLA_Endpoint_WithOwnerError(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: "PUT", - Resource: "/api/2.0/permissions/sql/warehouses/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "IS_OWNER", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_USE", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "IS_OWNER", }, }, - Response: apierr.APIError{ - ErrorCode: "INVALID_PARAMETER_VALUE", - Message: "PUT requests for warehouse *** with no existing owner must provide a new owner.", - }, - Status: 400, - }, - { - Method: "PUT", - Resource: "/api/2.0/permissions/sql/warehouses/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, &apierr.APIError{ + ErrorCode: "INVALID_PARAMETER_VALUE", + Message: "PUT requests for warehouse *** with no existing owner must provide a new owner.", + StatusCode: 400, + }) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_USE", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/sql/warehouses/abc", - Response: ObjectACL{ - ObjectID: "dashboards/abc", - ObjectType: "dashboard", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "IS_OWNER", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + }).Return(&iam.ObjectPermissions{ + ObjectId: "warehouses/abc", + ObjectType: "warehouses", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanUse}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelIsOwner}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1004,50 +925,48 @@ func TestResourcePermissionsCreate_SQLA_Endpoint_WithOwnerError(t *testing.T) { func TestResourcePermissionsCreate_SQLA_Endpoint_WithOwner(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: "PUT", - Resource: "/api/2.0/permissions/sql/warehouses/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingOwner, - PermissionLevel: "IS_OWNER", - }, - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingOwner, + PermissionLevel: "IS_OWNER", + }, + { + UserName: TestingUser, + PermissionLevel: "CAN_USE", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/sql/warehouses/abc", - Response: ObjectACL{ - ObjectID: "dashboards/abc", - ObjectType: "dashboard", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, - { - UserName: TestingOwner, - PermissionLevel: "IS_OWNER", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + }).Return(&iam.ObjectPermissions{ + ObjectId: "warehouses/abc", + ObjectType: "warehouses", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanUse}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, + }, + { + UserName: TestingOwner, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelIsOwner}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1094,17 +1013,12 @@ func TestResourcePermissionsCreate_SQLA_Endpoint_WithOwner(t *testing.T) { func TestResourcePermissionsCreate_NotebookPath_NotExists(t *testing.T) { _, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/workspace/get-status?path=%2FDevelopment%2FInit", - Response: apierr.APIError{ - ErrorCode: "INVALID_REQUEST", - Message: "Internal error happened", - }, - Status: 400, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/Development/Init").Return(nil, &apierr.APIError{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + StatusCode: 400, + }) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1112,7 +1026,7 @@ func TestResourcePermissionsCreate_NotebookPath_NotExists(t *testing.T) { "access_control": []any{ map[string]any{ "user_name": TestingUser, - "permission_level": "CAN_USE", + "permission_level": "CAN_READ", }, }, }, @@ -1124,56 +1038,50 @@ func TestResourcePermissionsCreate_NotebookPath_NotExists(t *testing.T) { func TestResourcePermissionsCreate_NotebookPath(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/workspace/get-status?path=%2FDevelopment%2FInit", - Response: workspace.ObjectStatus{ - ObjectID: 988765, - ObjectType: "NOTEBOOK", - }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/notebooks/988765", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/Development/Init").Return(&workspace.ObjectInfo{ + ObjectId: 988765, + ObjectType: workspace.ObjectTypeNotebook, + }, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "988765", + RequestObjectType: "notebooks", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/notebooks/988765", - Response: ObjectACL{ - ObjectID: "/notebooks/988765", - ObjectType: "notebook", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "988765", + RequestObjectType: "notebooks", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/notebooks/988765", + ObjectType: "notebook", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1198,56 +1106,50 @@ func TestResourcePermissionsCreate_NotebookPath(t *testing.T) { func TestResourcePermissionsCreate_WorkspaceFilePath(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/workspace/get-status?path=%2FDevelopment%2FInit", - Response: workspace.ObjectStatus{ - ObjectID: 988765, - ObjectType: workspace.File, - }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/files/988765", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/Development/Init").Return(&workspace.ObjectInfo{ + ObjectId: 988765, + ObjectType: workspace.ObjectTypeFile, + }, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "988765", + RequestObjectType: "files", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/files/988765", - Response: ObjectACL{ - ObjectID: "/files/988765", - ObjectType: "file", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "988765", + RequestObjectType: "files", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/files/988765", + ObjectType: "file", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1272,18 +1174,6 @@ func TestResourcePermissionsCreate_WorkspaceFilePath(t *testing.T) { func TestResourcePermissionsCreate_error(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/clusters/abc", - Response: apierr.APIError{ - ErrorCode: "INVALID_REQUEST", - Message: "Internal error happened", - }, - Status: 400, - }, - }, Resource: ResourcePermissions(), State: map[string]any{ "cluster_id": "abc", @@ -1295,14 +1185,17 @@ func TestResourcePermissionsCreate_error(t *testing.T) { }, }, Create: true, - }.ExpectError(t, "permission_level CAN_USE is not supported with cluster_id objects") + }.ExpectError(t, "permission_level CAN_USE is not supported with cluster_id objects; allowed levels: CAN_ATTACH_TO, CAN_MANAGE, CAN_RESTART") } func TestResourcePermissionsCreate_PathIdRetriever_Error(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - qa.HTTPFailures[0], + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/foo/bar").Return(nil, &apierr.APIError{ + ErrorCode: "INVALID_REQUEST", + Message: "i'm a teapot", + StatusCode: 418, + }) }, Resource: ResourcePermissions(), Create: true, @@ -1317,9 +1210,13 @@ func TestResourcePermissionsCreate_PathIdRetriever_Error(t *testing.T) { func TestResourcePermissionsCreate_ActualUpdate_Error(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - qa.HTTPFailures[0], + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Set(mock.Anything, mock.Anything).Return(nil, &apierr.APIError{ + ErrorCode: "INVALID_REQUEST", + Message: "i'm a teapot", + StatusCode: 418, + }) }, Resource: ResourcePermissions(), Create: true, @@ -1334,52 +1231,50 @@ func TestResourcePermissionsCreate_ActualUpdate_Error(t *testing.T) { func TestResourcePermissionsUpdate(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/jobs/9", - Response: ObjectACL{ - ObjectID: "/jobs/9", - ObjectType: "job", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_VIEW", - Inherited: false, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "9", + RequestObjectType: "jobs", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/jobs/9", + ObjectType: "job", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_VIEW", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/jobs/9", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_VIEW", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "IS_OWNER", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "9", + RequestObjectType: "jobs", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_VIEW", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "IS_OWNER", }, }, - }, + }).Return(nil, nil) }, InstanceState: map[string]string{ "job_id": "9", @@ -1405,235 +1300,202 @@ func TestResourcePermissionsUpdate(t *testing.T) { assert.Equal(t, "CAN_VIEW", firstElem["permission_level"]) } +func getResourcePermissions(field, objectType string) resourcePermissions { + for _, mapping := range allResourcePermissions() { + if mapping.field == field && mapping.objectType == objectType { + return mapping + } + } + panic(fmt.Sprintf("could not find resource permissions for field %s and object type %s", field, objectType)) +} + func TestResourcePermissionsUpdateTokensAlwaysThereForAdmins(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - Method: "PUT", - Resource: "/api/2.0/permissions/authorization/tokens", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: "me", - PermissionLevel: "CAN_MANAGE", - }, - { - GroupName: "admins", - PermissionLevel: "CAN_MANAGE", - }, + qa.MockWorkspaceApply(t, func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "me"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "tokens", + RequestObjectType: "authorization", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: "me", + PermissionLevel: "CAN_MANAGE", + }, + { + GroupName: "admins", + PermissionLevel: "CAN_MANAGE", }, }, - }, + }).Return(nil, nil) }, func(ctx context.Context, client *common.DatabricksClient) { p := NewPermissionsAPI(ctx, client) - err := p.Update("/authorization/tokens", AccessControlChangeList{ - AccessControlList: []AccessControlChange{ + mapping := getResourcePermissions("authorization", "tokens") + err := p.Update("/authorization/tokens", entity.PermissionsEntity{ + AccessControlList: []iam.AccessControlRequest{ { UserName: "me", PermissionLevel: "CAN_MANAGE", }, }, - }) + }, mapping) assert.NoError(t, err) }) } func TestShouldKeepAdminsOnAnythingExceptPasswordsAndAssignsOwnerForJob(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/permissions/jobs/123", - Response: ObjectACL{ - ObjectID: "/jobs/123", - ObjectType: "job", - AccessControlList: []AccessControl{ - { - GroupName: "admins", - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_DO_EVERYTHING", - Inherited: true, - }, - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + qa.MockWorkspaceApply(t, func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockJobsAPI().EXPECT().GetByJobId(mock.Anything, int64(123)).Return(&jobs.Job{ + CreatorUserName: "creator@example.com", + }, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "123", + RequestObjectType: "jobs", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/jobs/123", + ObjectType: "job", + AccessControlList: []iam.AccessControlResponse{ + { + GroupName: "admins", + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_DO_EVERYTHING", + Inherited: true, + }, + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, - { - Method: "GET", - Resource: "/api/2.1/jobs/get?job_id=123", - Response: jobs.Job{ - CreatorUserName: "creator@example.com", - }, - }, - { - Method: "PUT", - Resource: "/api/2.0/permissions/jobs/123", - ExpectedRequest: ObjectACL{ - AccessControlList: []AccessControl{ - { - GroupName: "admins", - PermissionLevel: "CAN_MANAGE", - }, - { - UserName: "creator@example.com", - PermissionLevel: "IS_OWNER", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "123", + RequestObjectType: "jobs", + AccessControlList: []iam.AccessControlRequest{ + { + GroupName: "admins", + PermissionLevel: "CAN_MANAGE", + }, + { + UserName: "creator@example.com", + PermissionLevel: "IS_OWNER", }, }, - }, + }).Return(nil, nil) }, func(ctx context.Context, client *common.DatabricksClient) { p := NewPermissionsAPI(ctx, client) - err := p.Delete("/jobs/123") + mapping := getResourcePermissions("job_id", "job") + err := p.Delete("/jobs/123", mapping) assert.NoError(t, err) }) } func TestShouldDeleteNonExistentJob(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/permissions/jobs/123", - Response: ObjectACL{ - ObjectID: "/jobs/123", - ObjectType: "job", - AccessControlList: []AccessControl{ - { - GroupName: "admins", - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_DO_EVERYTHING", - Inherited: true, - }, - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + qa.MockWorkspaceApply(t, func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "123", + RequestObjectType: "jobs", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/jobs/123", + ObjectType: "job", + AccessControlList: []iam.AccessControlResponse{ + { + GroupName: "admins", + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_DO_EVERYTHING", + Inherited: true, + }, + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, - { - Method: "GET", - Resource: "/api/2.1/jobs/get?job_id=123", - Status: 400, - Response: apierr.APIError{ - StatusCode: 400, - Message: "Job 123 does not exist.", - ErrorCode: "INVALID_PARAMETER_VALUE", - }, - }, + }, nil) + mwc.GetMockJobsAPI().EXPECT().GetByJobId(mock.Anything, int64(123)).Return(nil, &apierr.APIError{ + StatusCode: 400, + Message: "Job 123 does not exist.", + ErrorCode: "INVALID_PARAMETER_VALUE", + }) }, func(ctx context.Context, client *common.DatabricksClient) { p := NewPermissionsAPI(ctx, client) - err := p.Delete("/jobs/123") + mapping := getResourcePermissions("job_id", "job") + err := p.Delete("/jobs/123", mapping) assert.NoError(t, err) }) } func TestShouldKeepAdminsOnAnythingExceptPasswordsAndAssignsOwnerForPipeline(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/permissions/pipelines/123", - Response: ObjectACL{ - ObjectID: "/pipelines/123", - ObjectType: "pipeline", - AccessControlList: []AccessControl{ - { - GroupName: "admins", - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_DO_EVERYTHING", - Inherited: true, - }, - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + qa.MockWorkspaceApply(t, func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockPipelinesAPI().EXPECT().GetByPipelineId(mock.Anything, "123").Return(&pipelines.GetPipelineResponse{ + CreatorUserName: "creator@example.com", + }, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "123", + RequestObjectType: "pipelines", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/pipelines/123", + ObjectType: "pipeline", + AccessControlList: []iam.AccessControlResponse{ + { + GroupName: "admins", + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_DO_EVERYTHING", + Inherited: true, + }, + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, - { - Method: "GET", - Resource: "/api/2.0/pipelines/123?", - Response: jobs.Job{ - CreatorUserName: "creator@example.com", - }, - }, - { - Method: "PUT", - Resource: "/api/2.0/permissions/pipelines/123", - ExpectedRequest: ObjectACL{ - AccessControlList: []AccessControl{ - { - GroupName: "admins", - PermissionLevel: "CAN_MANAGE", - }, - { - UserName: "creator@example.com", - PermissionLevel: "IS_OWNER", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "123", + RequestObjectType: "pipelines", + AccessControlList: []iam.AccessControlRequest{ + { + GroupName: "admins", + PermissionLevel: "CAN_MANAGE", + }, + { + UserName: "creator@example.com", + PermissionLevel: "IS_OWNER", }, }, - }, + }).Return(nil, nil) }, func(ctx context.Context, client *common.DatabricksClient) { p := NewPermissionsAPI(ctx, client) - err := p.Delete("/pipelines/123") + mapping := getResourcePermissions("pipeline_id", "pipelines") + err := p.Delete("/pipelines/123", mapping) assert.NoError(t, err) }) } func TestPathPermissionsResourceIDFields(t *testing.T) { - var m permissionsIDFieldMapping - for _, x := range permissionsResourceIDFields() { - if x.field == "notebook_path" { - m = x - } - } + m := getResourcePermissions("notebook_path", "notebook") w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) _, err = m.idRetriever(context.Background(), w, "x") assert.ErrorContains(t, err, "cannot load path x") } -func TestObjectACLToPermissionsEntityCornerCases(t *testing.T) { - _, err := (&ObjectACL{ - ObjectType: "bananas", - AccessControlList: []AccessControl{ - { - GroupName: "admins", - }, - }, - }).ToPermissionsEntity(ResourcePermissions().ToResource().TestResourceData(), "me") - assert.EqualError(t, err, "unknown object type bananas") -} - -func TestEntityAccessControlToAccessControlChange(t *testing.T) { - _, res := AccessControl{}.toAccessControlChange() - assert.False(t, res) -} - -func TestCornerCases(t *testing.T) { - qa.ResourceCornerCases(t, ResourcePermissions(), qa.CornerCaseSkipCRUD("create")) -} - func TestDeleteMissing(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - MatchAny: true, - Status: 404, - Response: apierr.NotFound("missing"), - }, + qa.MockWorkspaceApply(t, func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "x", + RequestObjectType: "clusters", + }).Return(nil, apierr.ErrNotFound) }, func(ctx context.Context, client *common.DatabricksClient) { p := ResourcePermissions().ToResource() d := p.TestResourceData() - d.SetId("x") + d.SetId("/clusters/x") diags := p.DeleteContext(ctx, d, client) assert.Nil(t, diags) }) @@ -1641,65 +1503,59 @@ func TestDeleteMissing(t *testing.T) { func TestResourcePermissionsCreate_RepoPath(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/workspace/get-status?path=%2FRepos%2FDevelopment%2FInit", - Response: workspace.ObjectStatus{ - ObjectID: 988765, - ObjectType: "repo", - }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/repos/988765", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - }, - }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/repos/988765", - Response: ObjectACL{ - ObjectID: "/repos/988765", - ObjectType: "repo", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: TestingAdminUser}, nil) + mwc.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/Repos/Development/Init").Return(&workspace.ObjectInfo{ + ObjectId: 988765, + ObjectType: workspace.ObjectTypeRepo, + }, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "988765", + RequestObjectType: "repos", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/repos/988765", + ObjectType: "repo", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_RUN", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_RUN", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "988765", + RequestObjectType: "repos", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", + }, + }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1725,42 +1581,40 @@ func TestResourcePermissionsCreate_RepoPath(t *testing.T) { // when caller does not specify CAN_MANAGE permission during create, it should be explictly added func TestResourcePermissionsCreate_Sql_Queries(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPost, - Resource: "/api/2.0/preview/sql/permissions/queries/id111", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_RUN", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: TestingAdminUser}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "id111", + RequestObjectType: "sql/queries", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_RUN", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/preview/sql/permissions/queries/id111", - Response: ObjectACL{ - ObjectID: "queries/id111", - ObjectType: "query", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_RUN", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "id111", + RequestObjectType: "sql/queries", + }).Return(&iam.ObjectPermissions{ + ObjectId: "queries/id111", + ObjectType: "query", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRun}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1785,42 +1639,40 @@ func TestResourcePermissionsCreate_Sql_Queries(t *testing.T) { // when caller does not specify CAN_MANAGE permission during update, it should be explictly added func TestResourcePermissionsUpdate_Sql_Queries(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPost, - Resource: "/api/2.0/preview/sql/permissions/queries/id111", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_RUN", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: TestingAdminUser}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "id111", + RequestObjectType: "sql/queries", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_RUN", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/preview/sql/permissions/queries/id111", - Response: ObjectACL{ - ObjectID: "queries/id111", - ObjectType: "query", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_RUN", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "id111", + RequestObjectType: "sql/queries", + }).Return(&iam.ObjectPermissions{ + ObjectId: "queries/id111", + ObjectType: "query", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRun}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, InstanceState: map[string]string{ "sql_query_id": "id111", @@ -1847,65 +1699,59 @@ func TestResourcePermissionsUpdate_Sql_Queries(t *testing.T) { func TestResourcePermissionsCreate_DirectoryPath(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/workspace/get-status?path=%2FFirst", - Response: workspace.ObjectStatus{ - ObjectID: 123456, - ObjectType: "directory", - }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/directories/123456", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: TestingAdminUser}, nil) + mwc.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/First").Return(&workspace.ObjectInfo{ + ObjectId: 123456, + ObjectType: workspace.ObjectTypeDirectory, + }, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "123456", + RequestObjectType: "directories", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/directories/123456", - Response: ObjectACL{ - ObjectID: "/directories/123456", - ObjectType: "directory", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "123456", + RequestObjectType: "directories", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/directories/123456", + ObjectType: "directory", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_RUN", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_RUN", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1930,34 +1776,32 @@ func TestResourcePermissionsCreate_DirectoryPath(t *testing.T) { func TestResourcePermissionsPasswordUsage(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/authorization/passwords", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - GroupName: "admins", - PermissionLevel: "CAN_USE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: TestingAdminUser}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "passwords", + RequestObjectType: "authorization", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/authorization/passwords", + ObjectType: "passwords", + AccessControlList: []iam.AccessControlResponse{ + { + GroupName: "admins", + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanUse}}, }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/authorization/passwords", - Response: ObjectACL{ - ObjectID: "/authorization/passwords", - ObjectType: "passwords", - AccessControlList: []AccessControl{ - { - GroupName: "admins", - PermissionLevel: "CAN_USE", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "passwords", + RequestObjectType: "authorization", + AccessControlList: []iam.AccessControlRequest{ + { + GroupName: "admins", + PermissionLevel: "CAN_USE", }, }, - }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), HCL: ` @@ -1979,42 +1823,40 @@ func TestResourcePermissionsPasswordUsage(t *testing.T) { func TestResourcePermissionsRootDirectory(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/directories/0", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - GroupName: "admins", - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: TestingAdminUser}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "0", + RequestObjectType: "directories", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/directories/0", + ObjectType: "directory", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + GroupName: "admins", + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/directories/0", - Response: ObjectACL{ - ObjectID: "/directories/0", - ObjectType: "directory", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - GroupName: "admins", - PermissionLevel: "CAN_MANAGE", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "0", + RequestObjectType: "directories", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", + }, + { + GroupName: "admins", + PermissionLevel: "CAN_MANAGE", }, }, - }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), HCL: ` diff --git a/permissions/update/customizers.go b/permissions/update/customizers.go new file mode 100644 index 0000000000..ea2c5dd5db --- /dev/null +++ b/permissions/update/customizers.go @@ -0,0 +1,97 @@ +package update + +import ( + "github.com/databricks/databricks-sdk-go/service/iam" +) + +// Context that is available to aclUpdateCustomizer implementations. +type ACLCustomizerContext struct { + GetCurrentUser func() (string, error) + GetId func() string +} + +// ACLCustomizer is a function that modifies the access control list of an object before it is updated. +type ACLCustomizer func(ctx ACLCustomizerContext, objectAcls []iam.AccessControlRequest) ([]iam.AccessControlRequest, error) + +// If applies ths customizer if the condition is true. +func If(condition func(ACLCustomizerContext, []iam.AccessControlRequest) bool, customizer ACLCustomizer) ACLCustomizer { + return func(ctx ACLCustomizerContext, acl []iam.AccessControlRequest) ([]iam.AccessControlRequest, error) { + if condition(ctx, acl) { + return customizer(ctx, acl) + } + return acl, nil + } +} + +func Not(condition func(ACLCustomizerContext, []iam.AccessControlRequest) bool) func(ACLCustomizerContext, []iam.AccessControlRequest) bool { + return func(ctx ACLCustomizerContext, acl []iam.AccessControlRequest) bool { + return !condition(ctx, acl) + } +} + +// ObjectIdMatches returns a condition that checks if the object ID matches the expected value. +func ObjectIdMatches(expected string) func(ACLCustomizerContext, []iam.AccessControlRequest) bool { + return func(ctx ACLCustomizerContext, acl []iam.AccessControlRequest) bool { + return ctx.GetId() == expected + } +} + +// AddAdmin adds an explicit CAN_MANAGE permission for the 'admins' group if explicitAdminPermissionCheck returns true +// for the provided object ID. +func AddAdmin(ctx ACLCustomizerContext, acl []iam.AccessControlRequest) ([]iam.AccessControlRequest, error) { + found := false + for _, acl := range acl { + if acl.GroupName == "admins" { + found = true + break + } + } + if !found { + // Prevent "Cannot change permissions for group 'admins' to None." + acl = append(acl, iam.AccessControlRequest{ + GroupName: "admins", + PermissionLevel: "CAN_MANAGE", + }) + } + return acl, nil +} + +// Whether the object requires explicit manage permissions for the calling user if not set. +// As described in https://github.com/databricks/terraform-provider-databricks/issues/1504, +// certain object types require that we explicitly grant the calling user CAN_MANAGE +// permissions when POSTing permissions changes through the REST API, to avoid accidentally +// revoking the calling user's ability to manage the current object. +func AddCurrentUserAsManage(ctx ACLCustomizerContext, acl []iam.AccessControlRequest) ([]iam.AccessControlRequest, error) { + currentUser, err := ctx.GetCurrentUser() + if err != nil { + return nil, err + } + // The validate() method called in Update() ensures that the current user's permissions are either CAN_MANAGE + // or IS_OWNER if they are specified. If the current user is not specified in the access control list, we add + // them with CAN_MANAGE permissions. + found := false + for _, acl := range acl { + if acl.UserName == currentUser || acl.ServicePrincipalName == currentUser { + found = true + break + } + } + if !found { + acl = append(acl, iam.AccessControlRequest{ + UserName: currentUser, + PermissionLevel: "CAN_MANAGE", + }) + } + return acl, nil +} + +func RewritePermissions(mapping map[iam.PermissionLevel]iam.PermissionLevel) ACLCustomizer { + return func(ctx ACLCustomizerContext, acl []iam.AccessControlRequest) ([]iam.AccessControlRequest, error) { + for i := range acl { + if new, ok := mapping[acl[i].PermissionLevel]; ok { + acl[i].PermissionLevel = new + } + } + return acl, nil + } +} From afdbafcb4d1530f021a8af94b94262ade87df239 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Tue, 1 Oct 2024 14:56:54 +0200 Subject: [PATCH 24/99] [Internal] Bump Go SDK to latest and generate TF structs (#4062) ## Changes Bump Go SDK to latest release and generate tf structs for same openapi sha as in Go SDK ## Tests Unit tests, nightly tests will run on release PR - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- .codegen/_openapi_sha | 2 +- go.mod | 2 +- go.sum | 4 +- internal/service/apps_tf/model.go | 31 +++--- internal/service/catalog_tf/model.go | 85 ++++++++++++++- internal/service/compute_tf/model.go | 60 ++++++++--- internal/service/jobs_tf/model.go | 42 +++++--- internal/service/pipelines_tf/model.go | 8 ++ internal/service/serving_tf/model.go | 128 +++++++++++++++++++++- internal/service/settings_tf/model.go | 142 +++++++++++++++++++++++++ internal/service/sql_tf/model.go | 79 -------------- 11 files changed, 454 insertions(+), 129 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 4ceeab3d38..ffd6f58dd9 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -d05898328669a3f8ab0c2ecee37db2673d3ea3f7 \ No newline at end of file +6f6b1371e640f2dfeba72d365ac566368656f6b6 \ No newline at end of file diff --git a/go.mod b/go.mod index 8f5de34e8d..cb0d35a5ba 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/databricks/terraform-provider-databricks go 1.22 require ( - github.com/databricks/databricks-sdk-go v0.46.0 + github.com/databricks/databricks-sdk-go v0.47.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 diff --git a/go.sum b/go.sum index 9cace277b5..8ff73d7ad5 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.46.0 h1:D0TxmtSVAOsdnfzH4OGtAmcq+8TyA7Z6fA6JEYhupeY= -github.com/databricks/databricks-sdk-go v0.46.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.47.0 h1:eE7dN9axviL8+s10jnQAayOYDaR+Mfu7E9COGjO4lrQ= +github.com/databricks/databricks-sdk-go v0.47.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index 2ae21cc7d9..74406307a5 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -17,6 +17,10 @@ import ( type App struct { // The active deployment of the app. ActiveDeployment *AppDeployment `tfsdk:"active_deployment" tf:"optional"` + + AppStatus *ApplicationStatus `tfsdk:"app_status" tf:"optional"` + + ComputeStatus *ComputeStatus `tfsdk:"compute_status" tf:"optional"` // The creation time of the app. Formatted timestamp in ISO 6801. CreateTime types.String `tfsdk:"create_time" tf:"optional"` // The email of the user that created the app. @@ -32,8 +36,6 @@ type App struct { ServicePrincipalId types.Int64 `tfsdk:"service_principal_id" tf:"optional"` ServicePrincipalName types.String `tfsdk:"service_principal_name" tf:"optional"` - - Status *AppStatus `tfsdk:"status" tf:"optional"` // The update time of the app. Formatted timestamp in ISO 6801. UpdateTime types.String `tfsdk:"update_time" tf:"optional"` // The email of the user that last updated the app. @@ -84,7 +86,7 @@ type AppDeployment struct { // the app in the workspace during deployment creation, whereas the latter // provides a system generated stable snapshotted source code path used by // the deployment. - SourceCodePath types.String `tfsdk:"source_code_path" tf:""` + SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` // Status and status message of the deployment Status *AppDeploymentStatus `tfsdk:"status" tf:"optional"` // The update time of the deployment. Formatted timestamp in ISO 6801. @@ -132,16 +134,25 @@ type AppPermissionsRequest struct { AppName types.String `tfsdk:"-"` } -type AppStatus struct { - // Message corresponding with the app state. +type ApplicationStatus struct { + // Application status message + Message types.String `tfsdk:"message" tf:"optional"` + // State of the application. + State types.String `tfsdk:"state" tf:"optional"` +} + +type ComputeStatus struct { + // Compute status message Message types.String `tfsdk:"message" tf:"optional"` - // State of the app. + // State of the app compute. State types.String `tfsdk:"state" tf:"optional"` } type CreateAppDeploymentRequest struct { // The name of the app. AppName types.String `tfsdk:"-"` + // The unique id of the deployment. + DeploymentId types.String `tfsdk:"deployment_id" tf:"optional"` // The mode of which the deployment will manage the source code. Mode types.String `tfsdk:"mode" tf:"optional"` // The workspace file system path of the source code used to create the app @@ -151,7 +162,7 @@ type CreateAppDeploymentRequest struct { // the app in the workspace during deployment creation, whereas the latter // provides a system generated stable snapshotted source code path used by // the deployment. - SourceCodePath types.String `tfsdk:"source_code_path" tf:""` + SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` } type CreateAppRequest struct { @@ -168,9 +179,6 @@ type DeleteAppRequest struct { Name types.String `tfsdk:"-"` } -type DeleteResponse struct { -} - // Get an app deployment type GetAppDeploymentRequest struct { // The name of the app. @@ -245,9 +253,6 @@ type StopAppRequest struct { Name types.String `tfsdk:"-"` } -type StopAppResponse struct { -} - type UpdateAppRequest struct { // The description of the app. Description types.String `tfsdk:"description" tf:"optional"` diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index e84b479703..78848824f1 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -88,6 +88,21 @@ type ArtifactMatcher struct { type AssignResponse struct { } +// AWS temporary credentials for API authentication. Read more at +// https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. +type AwsCredentials struct { + // The access key ID that identifies the temporary credentials. + AccessKeyId types.String `tfsdk:"access_key_id" tf:"optional"` + // The Amazon Resource Name (ARN) of the S3 access point for temporary + // credentials related the external location. + AccessPoint types.String `tfsdk:"access_point" tf:"optional"` + // The secret access key that can be used to sign AWS API requests. + SecretAccessKey types.String `tfsdk:"secret_access_key" tf:"optional"` + // The token that users must pass to AWS API to use the temporary + // credentials. + SessionToken types.String `tfsdk:"session_token" tf:"optional"` +} + type AwsIamRoleRequest struct { // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access. RoleArn types.String `tfsdk:"role_arn" tf:""` @@ -145,6 +160,13 @@ type AzureServicePrincipal struct { DirectoryId types.String `tfsdk:"directory_id" tf:""` } +// Azure temporary credentials for API authentication. Read more at +// https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas +type AzureUserDelegationSas struct { + // The signed URI (SAS Token) used to access blob services for a given path + SasToken types.String `tfsdk:"sas_token" tf:"optional"` +} + // Cancel refresh type CancelRefreshRequest struct { // ID of the refresh. @@ -404,7 +426,7 @@ type CreateFunction struct { // JSON-serialized key-value pair map, encoded (escaped) as a string. Properties types.String `tfsdk:"properties" tf:"optional"` // Table function return parameters. - ReturnParams FunctionParameterInfos `tfsdk:"return_params" tf:""` + ReturnParams *FunctionParameterInfos `tfsdk:"return_params" tf:"optional"` // Function language. When **EXTERNAL** is used, the language of the routine // function should be specified in the __external_language__ field, and the // __return_params__ of the function cannot be used (as **TABLE** return @@ -414,7 +436,7 @@ type CreateFunction struct { // Function body. RoutineDefinition types.String `tfsdk:"routine_definition" tf:""` // Function dependencies. - RoutineDependencies DependencyList `tfsdk:"routine_dependencies" tf:""` + RoutineDependencies *DependencyList `tfsdk:"routine_dependencies" tf:"optional"` // Name of parent schema relative to its parent catalog. SchemaName types.String `tfsdk:"schema_name" tf:""` // Function security type. @@ -1018,6 +1040,42 @@ type FunctionParameterInfos struct { Parameters []FunctionParameterInfo `tfsdk:"parameters" tf:"optional"` } +// GCP temporary credentials for API authentication. Read more at +// https://developers.google.com/identity/protocols/oauth2/service-account +type GcpOauthToken struct { + OauthToken types.String `tfsdk:"oauth_token" tf:"optional"` +} + +type GenerateTemporaryTableCredentialRequest struct { + // The operation performed against the table data, either READ or + // READ_WRITE. If READ_WRITE is specified, the credentials returned will + // have write permissions, otherwise, it will be read only. + Operation types.String `tfsdk:"operation" tf:"optional"` + // UUID of the table to read or write. + TableId types.String `tfsdk:"table_id" tf:"optional"` +} + +type GenerateTemporaryTableCredentialResponse struct { + // AWS temporary credentials for API authentication. Read more at + // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. + AwsTempCredentials *AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional"` + // Azure temporary credentials for API authentication. Read more at + // https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas + AzureUserDelegationSas *AzureUserDelegationSas `tfsdk:"azure_user_delegation_sas" tf:"optional"` + // Server time when the credential will expire, in unix epoch milliseconds + // since January 1, 1970 at 00:00:00 UTC. The API client is advised to cache + // the credential given this expiration time. + ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` + // GCP temporary credentials for API authentication. Read more at + // https://developers.google.com/identity/protocols/oauth2/service-account + GcpOauthToken *GcpOauthToken `tfsdk:"gcp_oauth_token" tf:"optional"` + // R2 temporary credentials for API authentication. Read more at + // https://developers.cloudflare.com/r2/api/s3/tokens/. + R2TempCredentials *R2Credentials `tfsdk:"r2_temp_credentials" tf:"optional"` + // The URL of the storage path accessible by the temporary credential. + Url types.String `tfsdk:"url" tf:"optional"` +} + // Gets the metastore assignment for a workspace type GetAccountMetastoreAssignmentRequest struct { // Workspace ID. @@ -1150,6 +1208,9 @@ type GetMetastoreSummaryResponse struct { DeltaSharingRecipientTokenLifetimeInSeconds types.Int64 `tfsdk:"delta_sharing_recipient_token_lifetime_in_seconds" tf:"optional"` // The scope of Delta Sharing enabled for the metastore. DeltaSharingScope types.String `tfsdk:"delta_sharing_scope" tf:"optional"` + // Whether to allow non-DBR clients to directly access entities under the + // metastore. + ExternalAccessEnabled types.Bool `tfsdk:"external_access_enabled" tf:"optional"` // Globally unique metastore ID across clouds and regions, of the form // `cloud:region:metastore_id`. GlobalMetastoreId types.String `tfsdk:"global_metastore_id" tf:"optional"` @@ -1262,6 +1323,8 @@ type GetTableRequest struct { IncludeBrowse types.Bool `tfsdk:"-"` // Whether delta metadata should be included in the response. IncludeDeltaMetadata types.Bool `tfsdk:"-"` + // Whether to include a manifest containing capabilities the table has. + IncludeManifestCapabilities types.Bool `tfsdk:"-"` } // Get catalog workspace bindings @@ -1546,6 +1609,8 @@ type ListStorageCredentialsResponse struct { type ListSummariesRequest struct { // Name of parent catalog for tables of interest. CatalogName types.String `tfsdk:"-"` + // Whether to include a manifest containing capabilities the table has. + IncludeManifestCapabilities types.Bool `tfsdk:"-"` // Maximum number of summaries for tables to return. If not set, the page // length is set to a server configured value (10000, as of 1/5/2024). - // when set to a value greater than 0, the page length is the minimum of @@ -1606,6 +1671,8 @@ type ListTablesRequest struct { IncludeBrowse types.Bool `tfsdk:"-"` // Whether delta metadata should be included in the response. IncludeDeltaMetadata types.Bool `tfsdk:"-"` + // Whether to include a manifest containing capabilities the table has. + IncludeManifestCapabilities types.Bool `tfsdk:"-"` // Maximum number of tables to return. If not set, all the tables are // returned (not recommended). - when set to a value greater than 0, the // page length is the minimum of this value and a server configured value; - @@ -1693,6 +1760,9 @@ type MetastoreInfo struct { DeltaSharingRecipientTokenLifetimeInSeconds types.Int64 `tfsdk:"delta_sharing_recipient_token_lifetime_in_seconds" tf:"optional"` // The scope of Delta Sharing enabled for the metastore. DeltaSharingScope types.String `tfsdk:"delta_sharing_scope" tf:"optional"` + // Whether to allow non-DBR clients to directly access entities under the + // metastore. + ExternalAccessEnabled types.Bool `tfsdk:"external_access_enabled" tf:"optional"` // Globally unique metastore ID across clouds and regions, of the form // `cloud:region:metastore_id`. GlobalMetastoreId types.String `tfsdk:"global_metastore_id" tf:"optional"` @@ -2098,6 +2168,17 @@ type QuotaInfo struct { QuotaName types.String `tfsdk:"quota_name" tf:"optional"` } +// R2 temporary credentials for API authentication. Read more at +// https://developers.cloudflare.com/r2/api/s3/tokens/. +type R2Credentials struct { + // The access key ID that identifies the temporary credentials. + AccessKeyId types.String `tfsdk:"access_key_id" tf:"optional"` + // The secret access key associated with the access key. + SecretAccessKey types.String `tfsdk:"secret_access_key" tf:"optional"` + // The generated JWT that users must pass to use the temporary credentials. + SessionToken types.String `tfsdk:"session_token" tf:"optional"` +} + // Get a Volume type ReadVolumeRequest struct { // Whether to include volumes in the response for which the principal can diff --git a/internal/service/compute_tf/model.go b/internal/service/compute_tf/model.go index e983a492c4..223ba1cb66 100755 --- a/internal/service/compute_tf/model.go +++ b/internal/service/compute_tf/model.go @@ -326,8 +326,14 @@ type ClusterAttributes struct { NodeTypeId types.String `tfsdk:"node_type_id" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -526,8 +532,14 @@ type ClusterDetails struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -844,8 +856,14 @@ type ClusterSpec struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -1040,8 +1058,14 @@ type CreateCluster struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -1423,8 +1447,14 @@ type EditCluster struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -2963,8 +2993,14 @@ type UpdateClusterResource struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index d5a1b57f58..457ea2bb4a 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -263,7 +263,11 @@ type CreateJob struct { // begin or complete as well as when this job is deleted. EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` // A list of task execution environment specifications that can be - // referenced by tasks of this job. + // referenced by serverless tasks of this job. An environment is required to + // be present for serverless tasks. For serverless notebook tasks, the + // environment is accessible in the notebook environment panel. For other + // serverless tasks, the task environment is required to be specified using + // environment_key in the task settings. Environments []JobEnvironment `tfsdk:"environment" tf:"optional"` // Used to tell what is the format of the job. This field is ignored in // Create/Update/Reset calls. When using the Jobs API 2.1 this value is @@ -309,12 +313,12 @@ type CreateJob struct { Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. Queue *QueueSettings `tfsdk:"queue" tf:"optional"` - // Write-only setting, available only in Create/Update/Reset and Submit - // calls. Specifies the user or service principal that the job runs as. If - // not specified, the job runs as the user who created the job. + // Write-only setting. Specifies the user, service principal or group that + // the job/pipeline runs as. If not specified, the job/pipeline runs as the + // user who created the job/pipeline. // - // Only `user_name` or `service_principal_name` can be specified. If both - // are specified, an error is thrown. + // Exactly one of `user_name`, `service_principal_name`, `group_name` should + // be specified. If not, an error is thrown. RunAs *JobRunAs `tfsdk:"run_as" tf:"optional"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI @@ -827,12 +831,12 @@ type JobPermissionsRequest struct { JobId types.String `tfsdk:"-"` } -// Write-only setting, available only in Create/Update/Reset and Submit calls. -// Specifies the user or service principal that the job runs as. If not -// specified, the job runs as the user who created the job. +// Write-only setting. Specifies the user, service principal or group that the +// job/pipeline runs as. If not specified, the job/pipeline runs as the user who +// created the job/pipeline. // -// Only `user_name` or `service_principal_name` can be specified. If both are -// specified, an error is thrown. +// Exactly one of `user_name`, `service_principal_name`, `group_name` should be +// specified. If not, an error is thrown. type JobRunAs struct { // Application ID of an active service principal. Setting this field // requires the `servicePrincipal/user` role. @@ -861,7 +865,11 @@ type JobSettings struct { // begin or complete as well as when this job is deleted. EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` // A list of task execution environment specifications that can be - // referenced by tasks of this job. + // referenced by serverless tasks of this job. An environment is required to + // be present for serverless tasks. For serverless notebook tasks, the + // environment is accessible in the notebook environment panel. For other + // serverless tasks, the task environment is required to be specified using + // environment_key in the task settings. Environments []JobEnvironment `tfsdk:"environment" tf:"optional"` // Used to tell what is the format of the job. This field is ignored in // Create/Update/Reset calls. When using the Jobs API 2.1 this value is @@ -907,12 +915,12 @@ type JobSettings struct { Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. Queue *QueueSettings `tfsdk:"queue" tf:"optional"` - // Write-only setting, available only in Create/Update/Reset and Submit - // calls. Specifies the user or service principal that the job runs as. If - // not specified, the job runs as the user who created the job. + // Write-only setting. Specifies the user, service principal or group that + // the job/pipeline runs as. If not specified, the job/pipeline runs as the + // user who created the job/pipeline. // - // Only `user_name` or `service_principal_name` can be specified. If both - // are specified, an error is thrown. + // Exactly one of `user_name`, `service_principal_name`, `group_name` should + // be specified. If not, an error is thrown. RunAs *JobRunAs `tfsdk:"run_as" tf:"optional"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI diff --git a/internal/service/pipelines_tf/model.go b/internal/service/pipelines_tf/model.go index 1caafa7419..b6abbbb71c 100755 --- a/internal/service/pipelines_tf/model.go +++ b/internal/service/pipelines_tf/model.go @@ -19,6 +19,8 @@ type CreatePipeline struct { // If false, deployment will fail if name conflicts with that of another // pipeline. AllowDuplicateNames types.Bool `tfsdk:"allow_duplicate_names" tf:"optional"` + // Budget policy of this pipeline. + BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, @@ -103,6 +105,8 @@ type EditPipeline struct { // If false, deployment will fail if name has changed and conflicts the name // of another pipeline. AllowDuplicateNames types.Bool `tfsdk:"allow_duplicate_names" tf:"optional"` + // Budget policy of this pipeline. + BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, @@ -209,6 +213,8 @@ type GetPipelineResponse struct { ClusterId types.String `tfsdk:"cluster_id" tf:"optional"` // The username of the pipeline creator. CreatorUserName types.String `tfsdk:"creator_user_name" tf:"optional"` + // Serverless budget policy ID of this pipeline. + EffectiveBudgetPolicyId types.String `tfsdk:"effective_budget_policy_id" tf:"optional"` // The health of a pipeline. Health types.String `tfsdk:"health" tf:"optional"` // The last time the pipeline settings were modified or created. @@ -642,6 +648,8 @@ type PipelinePermissionsRequest struct { } type PipelineSpec struct { + // Budget policy of this pipeline. + BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, diff --git a/internal/service/serving_tf/model.go b/internal/service/serving_tf/model.go index c40d18ee63..b22dc911a7 100755 --- a/internal/service/serving_tf/model.go +++ b/internal/service/serving_tf/model.go @@ -30,6 +30,85 @@ type Ai21LabsConfig struct { Ai21labsApiKeyPlaintext types.String `tfsdk:"ai21labs_api_key_plaintext" tf:"optional"` } +type AiGatewayConfig struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality. + InferenceTableConfig *AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` +} + +type AiGatewayGuardrailParameters struct { + // List of invalid keywords. AI guardrail uses keyword or string matching to + // decide if the keyword exists in the request or response content. + InvalidKeywords []types.String `tfsdk:"invalid_keywords" tf:"optional"` + // Configuration for guardrail PII filter. + Pii *AiGatewayGuardrailPiiBehavior `tfsdk:"pii" tf:"optional"` + // Indicates whether the safety filter is enabled. + Safety types.Bool `tfsdk:"safety" tf:"optional"` + // The list of allowed topics. Given a chat request, this guardrail flags + // the request if its topic is not in the allowed topics. + ValidTopics []types.String `tfsdk:"valid_topics" tf:"optional"` +} + +type AiGatewayGuardrailPiiBehavior struct { + // Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' + // is set for the input guardrail and the request contains PII, the request + // is not sent to the model server and 400 status code is returned; if + // 'BLOCK' is set for the output guardrail and the model response contains + // PII, the PII info in the response is redacted and 400 status code is + // returned. + Behavior types.String `tfsdk:"behavior" tf:""` +} + +type AiGatewayGuardrails struct { + // Configuration for input guardrail filters. + Input *AiGatewayGuardrailParameters `tfsdk:"input" tf:"optional"` + // Configuration for output guardrail filters. + Output *AiGatewayGuardrailParameters `tfsdk:"output" tf:"optional"` +} + +type AiGatewayInferenceTableConfig struct { + // The name of the catalog in Unity Catalog. Required when enabling + // inference tables. NOTE: On update, you have to disable inference table + // first in order to change the catalog name. + CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` + // Indicates whether the inference table is enabled. + Enabled types.Bool `tfsdk:"enabled" tf:"optional"` + // The name of the schema in Unity Catalog. Required when enabling inference + // tables. NOTE: On update, you have to disable inference table first in + // order to change the schema name. + SchemaName types.String `tfsdk:"schema_name" tf:"optional"` + // The prefix of the table in Unity Catalog. NOTE: On update, you have to + // disable inference table first in order to change the prefix name. + TableNamePrefix types.String `tfsdk:"table_name_prefix" tf:"optional"` +} + +type AiGatewayRateLimit struct { + // Used to specify how many calls are allowed for a key within the + // renewal_period. + Calls types.Int64 `tfsdk:"calls" tf:""` + // Key field for a rate limit. Currently, only 'user' and 'endpoint' are + // supported, with 'endpoint' being the default if not specified. + Key types.String `tfsdk:"key" tf:"optional"` + // Renewal period field for a rate limit. Currently, only 'minute' is + // supported. + RenewalPeriod types.String `tfsdk:"renewal_period" tf:""` +} + +type AiGatewayUsageTrackingConfig struct { + // Whether to enable usage tracking. + Enabled types.Bool `tfsdk:"enabled" tf:"optional"` +} + type AmazonBedrockConfig struct { // The Databricks secret key reference for an AWS access key ID with // permissions to interact with Bedrock services. If you prefer to paste @@ -147,14 +226,17 @@ type CohereConfig struct { } type CreateServingEndpoint struct { + // The AI Gateway configuration for the serving endpoint. NOTE: only + // external model endpoints are supported as of now. + AiGateway *AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` // The core config of the serving endpoint. Config EndpointCoreConfigInput `tfsdk:"config" tf:""` // The name of the serving endpoint. This field is required and must be // unique across a Databricks workspace. An endpoint name can consist of // alphanumeric characters, dashes, and underscores. Name types.String `tfsdk:"name" tf:""` - // Rate limits to be applied to the serving endpoint. NOTE: only external - // and foundation model endpoints are supported as of now. + // Rate limits to be applied to the serving endpoint. NOTE: this field is + // deprecated, please use AI Gateway to manage rate limits. RateLimits []RateLimit `tfsdk:"rate_limits" tf:"optional"` // Enable route optimization for the serving endpoint. RouteOptimized types.Bool `tfsdk:"route_optimized" tf:"optional"` @@ -520,6 +602,42 @@ type PayloadTable struct { StatusMessage types.String `tfsdk:"status_message" tf:"optional"` } +// Update AI Gateway of a serving endpoint +type PutAiGatewayRequest struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality. + InferenceTableConfig *AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + // The name of the serving endpoint whose AI Gateway is being updated. This + // field is required. + Name types.String `tfsdk:"-"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` +} + +type PutAiGatewayResponse struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality . + InferenceTableConfig *AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` +} + // Update rate limits of a serving endpoint type PutRequest struct { // The name of the serving endpoint whose rate limits are being updated. @@ -914,6 +1032,9 @@ type ServerLogsResponse struct { } type ServingEndpoint struct { + // The AI Gateway configuration for the serving endpoint. NOTE: Only + // external model endpoints are currently supported. + AiGateway *AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` // The config that is currently being served by the endpoint. Config *EndpointCoreConfigSummary `tfsdk:"config" tf:"optional"` // The timestamp when the endpoint was created in Unix time. @@ -960,6 +1081,9 @@ type ServingEndpointAccessControlResponse struct { } type ServingEndpointDetailed struct { + // The AI Gateway configuration for the serving endpoint. NOTE: Only + // external model endpoints are currently supported. + AiGateway *AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` // The config that is currently being served by the endpoint. Config *EndpointCoreConfigOutput `tfsdk:"config" tf:"optional"` // The timestamp when the endpoint was created in Unix time. diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index 55059248b5..117cf8d113 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -32,6 +32,10 @@ type AutomaticClusterUpdateSetting struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +type BooleanMessage struct { + Value types.Bool `tfsdk:"value" tf:"optional"` +} + type ClusterAutoRestartMessage struct { CanToggle types.Bool `tfsdk:"can_toggle" tf:"optional"` @@ -292,6 +296,54 @@ type DeleteDefaultNamespaceSettingResponse struct { Etag types.String `tfsdk:"etag" tf:""` } +// Delete Legacy Access Disablement Status +type DeleteDisableLegacyAccessRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +// The etag is returned. +type DeleteDisableLegacyAccessResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"etag" tf:""` +} + +// Delete the disable legacy features setting +type DeleteDisableLegacyFeaturesRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +// The etag is returned. +type DeleteDisableLegacyFeaturesResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"etag" tf:""` +} + // Delete access list type DeleteIpAccessListRequest struct { // The ID for the corresponding IP access list @@ -377,6 +429,42 @@ type DeleteTokenManagementRequest struct { TokenId types.String `tfsdk:"-"` } +type DisableLegacyAccess struct { + DisableLegacyAccess BooleanMessage `tfsdk:"disable_legacy_access" tf:""` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + +type DisableLegacyFeatures struct { + DisableLegacyFeatures BooleanMessage `tfsdk:"disable_legacy_features" tf:""` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + type EmailConfig struct { // Email addresses to notify. Addresses []types.String `tfsdk:"addresses" tf:"optional"` @@ -538,6 +626,30 @@ type GetDefaultNamespaceSettingRequest struct { Etag types.String `tfsdk:"-"` } +// Retrieve Legacy Access Disablement Status +type GetDisableLegacyAccessRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +// Get the disable legacy features setting +type GetDisableLegacyFeaturesRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + // Get the enhanced security monitoring setting type GetEnhancedSecurityMonitoringSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -1045,6 +1157,8 @@ type TokenInfo struct { OwnerId types.Int64 `tfsdk:"owner_id" tf:"optional"` // ID of the token. TokenId types.String `tfsdk:"token_id" tf:"optional"` + // If applicable, the ID of the workspace that the token was created in. + WorkspaceId types.Int64 `tfsdk:"workspace_id" tf:"optional"` } type TokenPermission struct { @@ -1137,6 +1251,34 @@ type UpdateDefaultNamespaceSettingRequest struct { Setting DefaultNamespaceSetting `tfsdk:"setting" tf:""` } +// Details required to update a setting. +type UpdateDisableLegacyAccessRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting DisableLegacyAccess `tfsdk:"setting" tf:""` +} + +// Details required to update a setting. +type UpdateDisableLegacyFeaturesRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting DisableLegacyFeatures `tfsdk:"setting" tf:""` +} + // Details required to update a setting. type UpdateEnhancedSecurityMonitoringSettingRequest struct { // This should always be set to true for Settings API. Added for AIP diff --git a/internal/service/sql_tf/model.go b/internal/service/sql_tf/model.go index 8bbdb536f4..cbee45561b 100755 --- a/internal/service/sql_tf/model.go +++ b/internal/service/sql_tf/model.go @@ -208,15 +208,6 @@ type ChannelInfo struct { Name types.String `tfsdk:"name" tf:"optional"` } -// Client code that triggered the request -type ClientCallContext struct { - // File name that contains the last line that triggered the request. - FileName *EncodedText `tfsdk:"file_name" tf:"optional"` - // Last line number within a file or notebook cell that triggered the - // request. - LineNumber types.Int64 `tfsdk:"line_number" tf:"optional"` -} - type ColumnInfo struct { // The name of the column. Name types.String `tfsdk:"name" tf:"optional"` @@ -710,13 +701,6 @@ type EditWarehouseResponse struct { type Empty struct { } -type EncodedText struct { - // Carry text data in different form. - Encoding types.String `tfsdk:"encoding" tf:"optional"` - // text data - Text types.String `tfsdk:"text" tf:"optional"` -} - type EndpointConfPair struct { Key types.String `tfsdk:"key" tf:"optional"` @@ -1673,8 +1657,6 @@ type QueryInfo struct { QueryEndTimeMs types.Int64 `tfsdk:"query_end_time_ms" tf:"optional"` // The query ID. QueryId types.String `tfsdk:"query_id" tf:"optional"` - - QuerySource *QuerySource `tfsdk:"query_source" tf:"optional"` // The time the query started. QueryStartTimeMs types.Int64 `tfsdk:"query_start_time_ms" tf:"optional"` // The text of the query. @@ -1834,62 +1816,6 @@ type QueryPostContent struct { Tags []types.String `tfsdk:"tags" tf:"optional"` } -type QuerySource struct { - // UUID - AlertId types.String `tfsdk:"alert_id" tf:"optional"` - // Client code that triggered the request - ClientCallContext *ClientCallContext `tfsdk:"client_call_context" tf:"optional"` - // Id associated with a notebook cell - CommandId types.String `tfsdk:"command_id" tf:"optional"` - // Id associated with a notebook run or execution - CommandRunId types.String `tfsdk:"command_run_id" tf:"optional"` - // UUID - DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` - // UUID for Lakeview Dashboards, separate from DBSQL Dashboards - // (dashboard_id) - DashboardV3Id types.String `tfsdk:"dashboard_v3_id" tf:"optional"` - - DriverInfo *QuerySourceDriverInfo `tfsdk:"driver_info" tf:"optional"` - // Spark service that received and processed the query - EntryPoint types.String `tfsdk:"entry_point" tf:"optional"` - // UUID for Genie space - GenieSpaceId types.String `tfsdk:"genie_space_id" tf:"optional"` - - IsCloudFetch types.Bool `tfsdk:"is_cloud_fetch" tf:"optional"` - - IsDatabricksSqlExecApi types.Bool `tfsdk:"is_databricks_sql_exec_api" tf:"optional"` - - JobId types.String `tfsdk:"job_id" tf:"optional"` - // With background compute, jobs can be managed by different internal teams. - // When not specified, not a background compute job When specified and the - // value is not JOBS, it is a background compute job - JobManagedBy types.String `tfsdk:"job_managed_by" tf:"optional"` - - NotebookId types.String `tfsdk:"notebook_id" tf:"optional"` - // String provided by a customer that'll help them identify the query - QueryTags types.String `tfsdk:"query_tags" tf:"optional"` - // Id associated with a job run or execution - RunId types.String `tfsdk:"run_id" tf:"optional"` - // Id associated with a notebook cell run or execution - RunnableCommandId types.String `tfsdk:"runnable_command_id" tf:"optional"` - - ScheduledBy types.String `tfsdk:"scheduled_by" tf:"optional"` - - ServerlessChannelInfo *ServerlessChannelInfo `tfsdk:"serverless_channel_info" tf:"optional"` - // UUID - SourceQueryId types.String `tfsdk:"source_query_id" tf:"optional"` -} - -type QuerySourceDriverInfo struct { - BiToolEntry types.String `tfsdk:"bi_tool_entry" tf:"optional"` - - DriverName types.String `tfsdk:"driver_name" tf:"optional"` - - SimbaBrandingVendor types.String `tfsdk:"simba_branding_vendor" tf:"optional"` - - VersionNumber types.String `tfsdk:"version_number" tf:"optional"` -} - type RepeatedEndpointConfPairs struct { // Deprecated: Use configuration_pairs ConfigPair []EndpointConfPair `tfsdk:"config_pair" tf:"optional"` @@ -1964,11 +1890,6 @@ type ResultSchema struct { Columns []ColumnInfo `tfsdk:"columns" tf:"optional"` } -type ServerlessChannelInfo struct { - // Name of the Channel - Name types.String `tfsdk:"name" tf:"optional"` -} - type ServiceError struct { ErrorCode types.String `tfsdk:"error_code" tf:"optional"` // A brief summary of the error condition. From 1e89ad4d0a71d2867d6fdddab1a5726b5287ee1d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 1 Oct 2024 15:19:16 +0200 Subject: [PATCH 25/99] [Fix] Ignore presence or absence of `/Workspace` prefix for dashboard resource (#4061) ## Changes Users may choose to include the `/Workspace` prefix in the `parent_path` attribute of a dashboard to unambiguously refer to the workspace file system. This prefix is not included in reads, triggering a recreate. This change ignores this type of diff. ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [ ] using Go SDK --- common/resource.go | 10 ++++++++++ common/resource_test.go | 8 ++++++++ dashboards/resource_dashboard.go | 2 +- internal/acceptance/dashboard_test.go | 18 +++++++++++++++++- 4 files changed, 36 insertions(+), 2 deletions(-) diff --git a/common/resource.go b/common/resource.go index 77ba894853..9e639eb962 100644 --- a/common/resource.go +++ b/common/resource.go @@ -440,6 +440,16 @@ func genericDatabricksData[T, P, C any]( } } +// WorkspacePathPrefixDiffSuppress suppresses diffs for workspace paths where both sides +// may or may not include the `/Workspace` prefix. +// +// This is the case for dashboards where at create time, the user may include the `/Workspace` +// prefix for the `parent_path` field, but the read response will not include the prefix. +func WorkspacePathPrefixDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + const prefix = "/Workspace" + return strings.TrimPrefix(old, prefix) == strings.TrimPrefix(new, prefix) +} + func EqualFoldDiffSuppress(k, old, new string, d *schema.ResourceData) bool { if strings.EqualFold(old, new) { log.Printf("[INFO] Suppressing diff on %s", k) diff --git a/common/resource_test.go b/common/resource_test.go index 360c10b476..e93885a02c 100644 --- a/common/resource_test.go +++ b/common/resource_test.go @@ -179,6 +179,14 @@ func TestCustomizeDiffRobustness(t *testing.T) { assert.EqualError(t, err, "cannot customize diff for sample: panic: oops") } +func TestWorkspacePathPrefixDiffSuppress(t *testing.T) { + assert.True(t, WorkspacePathPrefixDiffSuppress("k", "/Workspace/foo/bar", "/Workspace/foo/bar", nil)) + assert.True(t, WorkspacePathPrefixDiffSuppress("k", "/Workspace/foo/bar", "/foo/bar", nil)) + assert.True(t, WorkspacePathPrefixDiffSuppress("k", "/foo/bar", "/Workspace/foo/bar", nil)) + assert.True(t, WorkspacePathPrefixDiffSuppress("k", "/foo/bar", "/foo/bar", nil)) + assert.False(t, WorkspacePathPrefixDiffSuppress("k", "/Workspace/1", "/Workspace/2", nil)) +} + func TestEqualFoldDiffSuppress(t *testing.T) { assert.True(t, EqualFoldDiffSuppress("k", "A", "a", nil)) assert.False(t, EqualFoldDiffSuppress("k", "A", "A2", nil)) diff --git a/dashboards/resource_dashboard.go b/dashboards/resource_dashboard.go index 531a6e5de4..d872b33f49 100644 --- a/dashboards/resource_dashboard.go +++ b/dashboards/resource_dashboard.go @@ -42,7 +42,7 @@ func (Dashboard) CustomizeSchema(s *common.CustomizableSchema) *common.Customiza s.SchemaPath("md5").SetComputed() // ForceNew fields - s.SchemaPath("parent_path").SetForceNew() + s.SchemaPath("parent_path").SetCustomSuppressDiff(common.WorkspacePathPrefixDiffSuppress).SetForceNew() // ConflictsWith fields s.SchemaPath("serialized_dashboard").SetConflictsWith([]string{"file_path"}) diff --git a/internal/acceptance/dashboard_test.go b/internal/acceptance/dashboard_test.go index 5fbf28b03a..49118c9455 100644 --- a/internal/acceptance/dashboard_test.go +++ b/internal/acceptance/dashboard_test.go @@ -58,7 +58,7 @@ resource "databricks_permissions" "dashboard_usage" { return templateString } -// Altough EmbedCredentials is an optional field, please specify its value if you want to modify it. +// Although EmbedCredentials is an optional field, please specify its value if you want to modify it. func (t *templateStruct) SetAttributes(mapper map[string]string) templateStruct { // Switch case for each attribute. If it is set in the mapper, set it in the struct if val, ok := mapper["display_name"]; ok { @@ -491,3 +491,19 @@ func TestAccDashboardTestAll(t *testing.T) { }), }) } + +func TestAccDashboardWithWorkspacePrefix(t *testing.T) { + var template templateStruct + + // Test that the dashboard can use a /Workspace prefix on the parent path and not trigger recreation. + // If this does NOT work, the test fails with an error that the non-refresh plan is non-empty. + + WorkspaceLevel(t, Step{ + Template: makeTemplate(template.SetAttributes(map[string]string{ + "display_name": fmt.Sprintf("Test Dashboard - %s", qa.RandomName()), + "warehouse_id": "{env.TEST_DEFAULT_WAREHOUSE_ID}", + "parent_path": "/Workspace/Shared/provider-test", + "serialized_dashboard": `{\"pages\":[{\"name\":\"new_name\",\"displayName\":\"New Page\"}]}`, + })), + }) +} From f757db0bf02fd32549e57fec49fc0e40125167f8 Mon Sep 17 00:00:00 2001 From: Cedric <23346008+840@users.noreply.github.com> Date: Wed, 2 Oct 2024 04:37:22 +0200 Subject: [PATCH 26/99] [Feature] Add `databricks_budget` resource (#3955) ## Changes Add support for account-level [Budget](https://docs.databricks.com/api/account/budgets) resource Example structure ``` resource "databricks_budget" "this" { display_name = "data-science-budget" alert_configurations { time_period = "MONTH" trigger_type = "CUMULATIVE_SPENDING_EXCEEDED" quantity_type = "LIST_PRICE_DOLLARS_USD" quantity_threshold = "840" action_configurations { action_type = "EMAIL_NOTIFICATION" target = "me@databricks.com" } } filter { workspace_id { operator = "IN" values = [ 1234567890098765 ] } tags { key = "Databricks" value { operator = "IN" values = ["Data Science"] } } } } ``` Resolves #3887 ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [x] using Go SDK --------- Co-authored-by: Alex Ott --- docs/resources/budget.md | 103 ++++++++++++ finops/resource_budget.go | 102 ++++++++++++ finops/resource_budget_test.go | 243 +++++++++++++++++++++++++++++ internal/acceptance/budget_test.go | 57 +++++++ internal/providers/sdkv2/sdkv2.go | 2 + 5 files changed, 507 insertions(+) create mode 100644 docs/resources/budget.md create mode 100644 finops/resource_budget.go create mode 100644 finops/resource_budget_test.go create mode 100644 internal/acceptance/budget_test.go diff --git a/docs/resources/budget.md b/docs/resources/budget.md new file mode 100644 index 0000000000..3b99fa7c8a --- /dev/null +++ b/docs/resources/budget.md @@ -0,0 +1,103 @@ +--- +subcategory: "FinOps" +--- +# databricks_budget Resource + +-> **Note** Initialize provider with `alias = "account"`, and `host` pointing to the account URL, like, `host = "https://accounts.cloud.databricks.com"`. Use `provider = databricks.account` for all account-level resources. + +-> **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). + +This resource allows you to manage [Databricks Budgets](https://docs.databricks.com/en/admin/account-settings/budgets.html). + +## Example Usage + +```hcl +resource "databricks_budget" "this" { + display_name = "databricks-workspace-budget" + + alert_configurations { + time_period = "MONTH" + trigger_type = "CUMULATIVE_SPENDING_EXCEEDED" + quantity_type = "LIST_PRICE_DOLLARS_USD" + quantity_threshold = "840" + + action_configurations { + action_type = "EMAIL_NOTIFICATION" + target = "abc@gmail.com" + } + } + + filter { + workspace_id { + operator = "IN" + values = [ + 1234567890098765 + ] + } + + tags { + key = "Team" + value { + operator = "IN" + values = ["Data Science"] + } + } + + tags { + key = "Environment" + value { + operator = "IN" + values = ["Development"] + } + } + } +} +``` + +## Argument Reference + +The following arguments are available: + +* `display_name` - (Required) Name of the budget in Databricks Account. + +### alert_configurations Configuration Block (Required) + +* `time_period` - (Required, String Enum) The time window of usage data for the budget. (Enum: `MONTH`) +* `trigger_type` - (Required, String Enum) The evaluation method to determine when this budget alert is in a triggered state. (Enum: `CUMULATIVE_SPENDING_EXCEEDED`) +* `quantity_type` - (Required, String Enum) The way to calculate cost for this budget alert. This is what quantity_threshold is measured in. (Enum: `LIST_PRICE_DOLLARS_USD`) +* `quantity_threshold` - (Required, String) The threshold for the budget alert to determine if it is in a triggered state. The number is evaluated based on `quantity_type`. +* `action_configurations` - (Required) List of action configurations to take when the budget alert is triggered. Consists of the following fields: + * `action_type` - (Required, String Enum) The type of action to take when the budget alert is triggered. (Enum: `EMAIL_NOTIFICATION`) + * `target` - (Required, String) The target of the action. For `EMAIL_NOTIFICATION`, this is the email address to send the notification to. + +### filter Configuration Block (Optional) + +* `workspace_id` - (Optional) Filter by workspace ID (if empty, include usage all usage for this account). Consists of the following fields: + * `operator` - (Required, String Enum) The operator to use for the filter. (Enum: `IN`) + * `values` - (Required, List of numbers) The values to filter by. +* `tags` - (Optional) List of tags to filter by. Consists of the following fields: + * `key` - (Required, String) The key of the tag. + * `value` - (Required) Consists of the following fields: + * `operator` - (Required, String Enum) The operator to use for the filter. (Enum: `IN`) + * `values` - (Required, List of strings) The values to filter by. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `budget_configuration_id` - The ID of the budget configuration. +* `account_id` - The ID of the Databricks Account. + +## Import + +This resource can be imported by Databricks account ID and Budget. + +```sh +terraform import databricks_budget.this '|' +``` + +## Related Resources + +The following resources are used in the context: + +* [databricks_mws_workspaces](mws_workspaces.md) to set up Databricks workspaces. diff --git a/finops/resource_budget.go b/finops/resource_budget.go new file mode 100644 index 0000000000..3213907929 --- /dev/null +++ b/finops/resource_budget.go @@ -0,0 +1,102 @@ +package finops + +import ( + "context" + "strings" + + "github.com/databricks/databricks-sdk-go/service/billing" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceBudget() common.Resource { + s := common.StructToSchema(billing.BudgetConfiguration{}, func(m map[string]*schema.Schema) map[string]*schema.Schema { + common.CustomizeSchemaPath(m, "display_name").SetValidateFunc(validation.StringLenBetween(1, 128)) + for _, p := range []string{"account_id", "budget_configuration_id", "create_time", "update_time"} { + common.CustomizeSchemaPath(m, p).SetComputed() + } + common.CustomizeSchemaPath(m, "alert_configurations", "alert_configuration_id").SetComputed() + common.CustomizeSchemaPath(m, "alert_configurations", "action_configurations", "action_configuration_id").SetComputed() + // We need SuppressDiff because API returns a string representation of BigDecimal with a lot + // of trailing 0s, etc. + common.CustomizeSchemaPath(m, "alert_configurations", "quantity_threshold").SetCustomSuppressDiff(func(k, old, new string, d *schema.ResourceData) bool { + normalize := func(v string) string { + if strings.Contains(v, ".") { + v = strings.TrimRight(v, "0") + v = strings.TrimSuffix(v, ".") + } + return v + } + return normalize(old) == normalize(new) + }) + return m + }) + p := common.NewPairID("account_id", "budget_configuration_id") + return common.Resource{ + Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + var create billing.CreateBudgetConfigurationBudget + common.DataToStructPointer(d, s, &create) + acc, err := c.AccountClient() + if err != nil { + return err + } + budget, err := acc.Budgets.Create(ctx, billing.CreateBudgetConfigurationRequest{Budget: create}) + if err != nil { + return err + } + d.Set("budget_configuration_id", budget.Budget.BudgetConfigurationId) + d.Set("account_id", c.Config.AccountID) + common.StructToData(budget.Budget, s, d) + p.Pack(d) + return nil + }, + Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + _, id, err := p.Unpack(d) + if err != nil { + return err + } + acc, err := c.AccountClient() + if err != nil { + return err + } + budget, err := acc.Budgets.GetByBudgetId(ctx, id) + if err != nil { + return err + } + return common.StructToData(budget.Budget, s, d) + }, + Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + var update billing.UpdateBudgetConfigurationBudget + _, id, err := p.Unpack(d) + if err != nil { + return err + } + common.DataToStructPointer(d, s, &update) + acc, err := c.AccountClient() + if err != nil { + return err + } + budget, err := acc.Budgets.Update(ctx, billing.UpdateBudgetConfigurationRequest{ + Budget: update, + BudgetId: id, + }) + if err != nil { + return err + } + return common.StructToData(budget.Budget, s, d) + }, + Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + _, id, err := p.Unpack(d) + if err != nil { + return err + } + acc, err := c.AccountClient() + if err != nil { + return err + } + return acc.Budgets.DeleteByBudgetId(ctx, id) + }, + Schema: s, + } +} diff --git a/finops/resource_budget_test.go b/finops/resource_budget_test.go new file mode 100644 index 0000000000..311397155c --- /dev/null +++ b/finops/resource_budget_test.go @@ -0,0 +1,243 @@ +package finops + +import ( + "fmt" + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/billing" + "github.com/stretchr/testify/mock" + + "github.com/databricks/terraform-provider-databricks/qa" +) + +func getTestBudget() *billing.BudgetConfiguration { + return &billing.BudgetConfiguration{ + AccountId: "account_id", + AlertConfigurations: []billing.AlertConfiguration{ + { + ActionConfigurations: []billing.ActionConfiguration{ + { + ActionType: billing.ActionConfigurationTypeEmailNotification, + Target: "me@databricks.com", + }, + }, + QuantityThreshold: "840.840000000000000000", + QuantityType: billing.AlertConfigurationQuantityTypeListPriceDollarsUsd, + TimePeriod: billing.AlertConfigurationTimePeriodMonth, + TriggerType: billing.AlertConfigurationTriggerTypeCumulativeSpendingExceeded, + }, + }, + Filter: &billing.BudgetConfigurationFilter{ + Tags: []billing.BudgetConfigurationFilterTagClause{ + { + Key: "Environment", + Value: &billing.BudgetConfigurationFilterClause{ + Operator: billing.BudgetConfigurationFilterOperatorIn, + Values: []string{"Testing"}, + }, + }, + }, + WorkspaceId: &billing.BudgetConfigurationFilterWorkspaceIdClause{ + Operator: billing.BudgetConfigurationFilterOperatorIn, + Values: []int64{ + 1234567890098765, + }, + }, + }, + BudgetConfigurationId: "budget_configuration_id", + DisplayName: "budget_name", + } +} + +func TestResourceBudgetCreate(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + api := a.GetMockbudgetsAPI().EXPECT() + api.Create(mock.Anything, billing.CreateBudgetConfigurationRequest{ + Budget: billing.CreateBudgetConfigurationBudget{ + AlertConfigurations: []billing.CreateBudgetConfigurationBudgetAlertConfigurations{ + { + ActionConfigurations: []billing.CreateBudgetConfigurationBudgetActionConfigurations{ + { + ActionType: getTestBudget().AlertConfigurations[0].ActionConfigurations[0].ActionType, + Target: getTestBudget().AlertConfigurations[0].ActionConfigurations[0].Target, + }, + }, + QuantityThreshold: "840.84", + QuantityType: getTestBudget().AlertConfigurations[0].QuantityType, + TimePeriod: getTestBudget().AlertConfigurations[0].TimePeriod, + TriggerType: getTestBudget().AlertConfigurations[0].TriggerType, + }, + }, + DisplayName: getTestBudget().DisplayName, + Filter: getTestBudget().Filter, + }, + }).Return(&billing.CreateBudgetConfigurationResponse{Budget: getTestBudget()}, nil) + api.GetByBudgetId(mock.Anything, "budget_configuration_id").Return( + &billing.GetBudgetConfigurationResponse{Budget: getTestBudget()}, nil, + ) + }, + Create: true, + AccountID: "account_id", + HCL: ` + display_name = "budget_name" + + alert_configurations { + time_period = "MONTH" + trigger_type = "CUMULATIVE_SPENDING_EXCEEDED" + quantity_type = "LIST_PRICE_DOLLARS_USD" + quantity_threshold = "840.84" + + action_configurations { + action_type = "EMAIL_NOTIFICATION" + target = "me@databricks.com" + } + } + + filter { + tags { + key = "Environment" + value { + operator = "IN" + values = ["Testing"] + } + } + + workspace_id { + operator = "IN" + values = [ + 1234567890098765 + ] + } + } + `, + Resource: ResourceBudget(), + }.ApplyAndExpectData(t, map[string]any{ + "display_name": "budget_name", + "id": "account_id|budget_configuration_id", + "alert_configurations.#": 1, + "filter.#": 1, + }) +} + +func TestResourceBudgetRead(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + a.GetMockbudgetsAPI().EXPECT(). + GetByBudgetId(mock.Anything, "budget_configuration_id"). + Return(&billing.GetBudgetConfigurationResponse{Budget: getTestBudget()}, nil) + }, + Resource: ResourceBudget(), + Read: true, + New: true, + AccountID: "account_id", + ID: "account_id|budget_configuration_id", + }.ApplyAndExpectData(t, map[string]any{ + "display_name": "budget_name", + "id": "account_id|budget_configuration_id", + "alert_configurations.#": 1, + "filter.#": 1, + }) +} + +func TestResourceBudgetRead_UnpackError(t *testing.T) { + qa.ResourceFixture{ + Resource: ResourceBudget(), + Read: true, + New: true, + AccountID: "account_id", + ID: "budget_configuration_id", + }.ExpectError(t, "invalid ID: budget_configuration_id") +} + +func TestResourceBudgetUpdate(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + api := a.GetMockbudgetsAPI().EXPECT() + api.Update(mock.Anything, billing.UpdateBudgetConfigurationRequest{ + Budget: billing.UpdateBudgetConfigurationBudget{ + AccountId: getTestBudget().AccountId, + AlertConfigurations: []billing.AlertConfiguration{ + { + ActionConfigurations: []billing.ActionConfiguration{ + { + ActionType: getTestBudget().AlertConfigurations[0].ActionConfigurations[0].ActionType, + Target: getTestBudget().AlertConfigurations[0].ActionConfigurations[0].Target, + }, + }, + QuantityThreshold: "840.84", + QuantityType: getTestBudget().AlertConfigurations[0].QuantityType, + TimePeriod: getTestBudget().AlertConfigurations[0].TimePeriod, + TriggerType: getTestBudget().AlertConfigurations[0].TriggerType, + }, + }, + BudgetConfigurationId: getTestBudget().BudgetConfigurationId, + DisplayName: fmt.Sprintf("%s_update", getTestBudget().DisplayName), + Filter: getTestBudget().Filter, + }, + BudgetId: "budget_configuration_id", + }).Return(&billing.UpdateBudgetConfigurationResponse{Budget: getTestBudget()}, nil) + api.GetByBudgetId(mock.Anything, "budget_configuration_id").Return( + &billing.GetBudgetConfigurationResponse{Budget: &billing.BudgetConfiguration{ + AccountId: getTestBudget().AccountId, + AlertConfigurations: getTestBudget().AlertConfigurations, + BudgetConfigurationId: getTestBudget().BudgetConfigurationId, + DisplayName: fmt.Sprintf("%s_update", getTestBudget().DisplayName), + Filter: getTestBudget().Filter, + }}, nil, + ) + }, + Resource: ResourceBudget(), + Update: true, + HCL: ` + display_name = "budget_name_update" + + alert_configurations { + time_period = "MONTH" + trigger_type = "CUMULATIVE_SPENDING_EXCEEDED" + quantity_type = "LIST_PRICE_DOLLARS_USD" + quantity_threshold = "840.84" + + action_configurations { + action_type = "EMAIL_NOTIFICATION" + target = "me@databricks.com" + } + } + + filter { + tags { + key = "Environment" + value { + operator = "IN" + values = ["Testing"] + } + } + + workspace_id { + operator = "IN" + values = [ + 1234567890098765 + ] + } + } + `, + AccountID: "account_id", + ID: "account_id|budget_configuration_id", + }.ApplyAndExpectData(t, map[string]any{ + "display_name": "budget_name_update", + "id": "account_id|budget_configuration_id", + }) +} + +func TestResourceBudgetDelete(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + a.GetMockbudgetsAPI().EXPECT().DeleteByBudgetId(mock.Anything, "budget_configuration_id").Return(nil) + }, + Resource: ResourceBudget(), + AccountID: "account_id", + Delete: true, + ID: "account_id|budget_configuration_id", + }.ApplyAndExpectData(t, nil) +} diff --git a/internal/acceptance/budget_test.go b/internal/acceptance/budget_test.go new file mode 100644 index 0000000000..379ad84dc4 --- /dev/null +++ b/internal/acceptance/budget_test.go @@ -0,0 +1,57 @@ +package acceptance + +import ( + "fmt" + "testing" +) + +var ( + budgetTemplate = `resource "databricks_budget" "this" { + display_name = "tf-{var.RANDOM}" + + alert_configurations { + time_period = "MONTH" + trigger_type = "CUMULATIVE_SPENDING_EXCEEDED" + quantity_type = "LIST_PRICE_DOLLARS_USD" + quantity_threshold = "%s" + + action_configurations { + action_type = "EMAIL_NOTIFICATION" + target = "me@databricks.com" + } + } + + filter { + tags { + key = "Environment" + value { + operator = "IN" + values = ["Testing"] + } + } + + workspace_id { + operator = "IN" + values = [ + 1234567890098765 + ] + } + } + }` +) + +func TestMwsAccBudgetCreate(t *testing.T) { + loadAccountEnv(t) + AccountLevel(t, Step{ + Template: fmt.Sprintf(budgetTemplate, "840"), + }) +} + +func TestMwsAccBudgetUpdate(t *testing.T) { + loadAccountEnv(t) + AccountLevel(t, Step{ + Template: fmt.Sprintf(budgetTemplate, "840"), + }, Step{ + Template: fmt.Sprintf(budgetTemplate, "940"), + }) +} diff --git a/internal/providers/sdkv2/sdkv2.go b/internal/providers/sdkv2/sdkv2.go index b9ee686121..6d1b712cb0 100644 --- a/internal/providers/sdkv2/sdkv2.go +++ b/internal/providers/sdkv2/sdkv2.go @@ -29,6 +29,7 @@ import ( "github.com/databricks/terraform-provider-databricks/commands" "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/dashboards" + "github.com/databricks/terraform-provider-databricks/finops" providercommon "github.com/databricks/terraform-provider-databricks/internal/providers/common" "github.com/databricks/terraform-provider-databricks/jobs" "github.com/databricks/terraform-provider-databricks/logger" @@ -131,6 +132,7 @@ func DatabricksProvider() *schema.Provider { "databricks_azure_adls_gen1_mount": storage.ResourceAzureAdlsGen1Mount().ToResource(), "databricks_azure_adls_gen2_mount": storage.ResourceAzureAdlsGen2Mount().ToResource(), "databricks_azure_blob_mount": storage.ResourceAzureBlobMount().ToResource(), + "databricks_budget": finops.ResourceBudget().ToResource(), "databricks_catalog": catalog.ResourceCatalog().ToResource(), "databricks_catalog_workspace_binding": catalog.ResourceCatalogWorkspaceBinding().ToResource(), "databricks_connection": catalog.ResourceConnection().ToResource(), From 1da7d9342ba758c865f6506b48ff9713ee0a2f78 Mon Sep 17 00:00:00 2001 From: Cedric <23346008+840@users.noreply.github.com> Date: Wed, 2 Oct 2024 15:29:53 +0200 Subject: [PATCH 27/99] [Feature] Add `databricks_mlflow_models` data source (#3874) ## Changes Add `databricks_mlflow_models` data source ## Tests Add integration and acceptance test - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK Resolves #3791 --------- Co-authored-by: Alex Ott --- docs/data-sources/mlflow_models.md | 42 ++++++++++++++++++ .../acceptance/data_mlflow_models_test.go | 44 +++++++++++++++++++ internal/providers/sdkv2/sdkv2.go | 1 + mlflow/data_mlflow_models.go | 26 +++++++++++ mlflow/data_mlflow_models_test.go | 32 ++++++++++++++ 5 files changed, 145 insertions(+) create mode 100644 docs/data-sources/mlflow_models.md create mode 100644 internal/acceptance/data_mlflow_models_test.go create mode 100644 mlflow/data_mlflow_models.go create mode 100644 mlflow/data_mlflow_models_test.go diff --git a/docs/data-sources/mlflow_models.md b/docs/data-sources/mlflow_models.md new file mode 100644 index 0000000000..b8b67c9096 --- /dev/null +++ b/docs/data-sources/mlflow_models.md @@ -0,0 +1,42 @@ +--- +subcategory: "MLflow" +--- +# databricks_mlflow_models Data Source + +-> **Note** This data source could be only used with workspace-level provider! + +Retrieves a list of [databricks_mlflow_model](../resources/mlflow_model.md) objects, that were created by Terraform or manually, so that special handling could be applied. + +## Example Usage + +```hcl +data "databricks_mlflow_models" "this" {} + +output "model" { + value = data.databricks_mlflow_models.this +} +``` + +```hcl +data "databricks_mlflow_models" "this" {} + +check "model_list_not_empty" { + assert { + condition = length(data.databricks_mlflow_models.this.names) != 0 + error_message = "Model list is empty." + } +} + +check "model_list_contains_model" { + assert { + condition = contains(data.databricks_mlflow_models.this.names, "model_1") + error_message = "model_1 is missing in model list." + } +} +``` + +## Attribute Reference + +This data source exports the following attributes: + +* `names` - List of names of [databricks_mlflow_model](./mlflow_model.md) \ No newline at end of file diff --git a/internal/acceptance/data_mlflow_models_test.go b/internal/acceptance/data_mlflow_models_test.go new file mode 100644 index 0000000000..7cd13a2a02 --- /dev/null +++ b/internal/acceptance/data_mlflow_models_test.go @@ -0,0 +1,44 @@ +package acceptance + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/terraform" +) + +func TestAccDataMlflowModels(t *testing.T) { + WorkspaceLevel(t, + Step{ + Template: ` + resource "databricks_mlflow_model" "this" { + name = "model-{var.RANDOM}" + + description = "My MLflow model description" + + tags { + key = "key1" + value = "value1" + } + tags { + key = "key2" + value = "value2" + } + } + + data "databricks_mlflow_models" "this" { + depends_on = [databricks_mlflow_model.this] + }`, + Check: func(s *terraform.State) error { + r, ok := s.RootModule().Resources["data.databricks_mlflow_models.this"] + if !ok { + return fmt.Errorf("data not found in state") + } + names := r.Primary.Attributes["names.#"] + if names == "" { + return fmt.Errorf("names are empty: %v", r.Primary.Attributes) + } + return nil + }, + }) +} diff --git a/internal/providers/sdkv2/sdkv2.go b/internal/providers/sdkv2/sdkv2.go index 6d1b712cb0..65ab1f4973 100644 --- a/internal/providers/sdkv2/sdkv2.go +++ b/internal/providers/sdkv2/sdkv2.go @@ -100,6 +100,7 @@ func DatabricksProvider() *schema.Provider { "databricks_metastores": catalog.DataSourceMetastores().ToResource(), "databricks_mlflow_experiment": mlflow.DataSourceExperiment().ToResource(), "databricks_mlflow_model": mlflow.DataSourceModel().ToResource(), + "databricks_mlflow_models": mlflow.DataSourceModels().ToResource(), "databricks_mws_credentials": mws.DataSourceMwsCredentials().ToResource(), "databricks_mws_workspaces": mws.DataSourceMwsWorkspaces().ToResource(), "databricks_node_type": clusters.DataSourceNodeType().ToResource(), diff --git a/mlflow/data_mlflow_models.go b/mlflow/data_mlflow_models.go new file mode 100644 index 0000000000..96ce6bed05 --- /dev/null +++ b/mlflow/data_mlflow_models.go @@ -0,0 +1,26 @@ +package mlflow + +import ( + "context" + "github.com/databricks/databricks-sdk-go/service/ml" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/terraform-provider-databricks/common" +) + +type modelsData struct { + Names []string `json:"names,omitempty" tf:"computed"` +} + +func DataSourceModels() common.Resource { + return common.WorkspaceData(func(ctx context.Context, data *modelsData, w *databricks.WorkspaceClient) error { + list, err := w.ModelRegistry.ListModelsAll(ctx, ml.ListModelsRequest{}) + if err != nil { + return err + } + for _, m := range list { + data.Names = append(data.Names, m.Name) + } + return nil + }) +} diff --git a/mlflow/data_mlflow_models_test.go b/mlflow/data_mlflow_models_test.go new file mode 100644 index 0000000000..b115ce9e23 --- /dev/null +++ b/mlflow/data_mlflow_models_test.go @@ -0,0 +1,32 @@ +package mlflow + +import ( + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/stretchr/testify/mock" + "testing" + + "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/databricks/terraform-provider-databricks/qa" +) + +func TestDataSourceModels(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + api := w.GetMockModelRegistryAPI() + api.EXPECT().ListModelsAll(mock.Anything, ml.ListModelsRequest{}).Return([]ml.Model{ + { + Name: "model-01", + }, + { + Name: "model-02", + }, + }, nil) + }, + Read: true, + NonWritable: true, + Resource: DataSourceModels(), + ID: ".", + }.ApplyAndExpectData(t, map[string]interface{}{ + "names": []interface{}{"model-01", "model-02"}, + }) +} From ea7070f503d70ffe9f058965daa32f3e45106a9c Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Thu, 3 Oct 2024 16:48:14 +0200 Subject: [PATCH 28/99] [Internal] Skip Budget tests on GCP (#4070) It looks like we have environment problem on CGP (manual tests are working), so let skip tests until we fix internal environments. ## Changes ## Tests - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- internal/acceptance/budget_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/acceptance/budget_test.go b/internal/acceptance/budget_test.go index 379ad84dc4..44b8262de9 100644 --- a/internal/acceptance/budget_test.go +++ b/internal/acceptance/budget_test.go @@ -42,6 +42,9 @@ var ( func TestMwsAccBudgetCreate(t *testing.T) { loadAccountEnv(t) + if isGcp(t) { + skipf(t)("not available on GCP") + } AccountLevel(t, Step{ Template: fmt.Sprintf(budgetTemplate, "840"), }) @@ -49,6 +52,9 @@ func TestMwsAccBudgetCreate(t *testing.T) { func TestMwsAccBudgetUpdate(t *testing.T) { loadAccountEnv(t) + if isGcp(t) { + skipf(t)("not available on GCP") + } AccountLevel(t, Step{ Template: fmt.Sprintf(budgetTemplate, "840"), }, Step{ From 481d460d635ee170a9284f20e611bd9604a8c90b Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Thu, 3 Oct 2024 18:10:24 +0200 Subject: [PATCH 29/99] [Internal] Update to latest OpenAPI spec and bump Go SDK (#4069) ## Changes Update to latest OpenAPI spec and Bump go sdk ## Tests - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- .codegen/_openapi_sha | 2 +- go.mod | 2 +- go.sum | 4 +- internal/service/apps_tf/model.go | 59 ++++++++ internal/service/catalog_tf/model.go | 5 +- internal/service/dashboards_tf/model.go | 31 +++- internal/service/jobs_tf/model.go | 19 ++- internal/service/pipelines_tf/model.go | 12 ++ internal/service/sql_tf/model.go | 6 +- internal/service/workspace_tf/model.go | 186 +++++++++++++++--------- mlflow/data_mlflow_models.go | 1 + mlflow/data_mlflow_models_test.go | 3 +- repos/resource_git_credential.go | 10 +- repos/resource_git_credential_test.go | 22 +-- 14 files changed, 250 insertions(+), 112 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index ffd6f58dd9..303c785539 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -6f6b1371e640f2dfeba72d365ac566368656f6b6 \ No newline at end of file +0c86ea6dbd9a730c24ff0d4e509603e476955ac5 \ No newline at end of file diff --git a/go.mod b/go.mod index cb0d35a5ba..e01145f07e 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/databricks/terraform-provider-databricks go 1.22 require ( - github.com/databricks/databricks-sdk-go v0.47.0 + github.com/databricks/databricks-sdk-go v0.48.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 diff --git a/go.sum b/go.sum index 8ff73d7ad5..dfd13d335a 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.47.0 h1:eE7dN9axviL8+s10jnQAayOYDaR+Mfu7E9COGjO4lrQ= -github.com/databricks/databricks-sdk-go v0.47.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.48.0 h1:46KtsnRo+FGhC3izUXbpL0PXBNomvsdignYDhJZlm9s= +github.com/databricks/databricks-sdk-go v0.48.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index 74406307a5..41a6990157 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -25,6 +25,10 @@ type App struct { CreateTime types.String `tfsdk:"create_time" tf:"optional"` // The email of the user that created the app. Creator types.String `tfsdk:"creator" tf:"optional"` + // The default workspace file system path of the source code from which app + // deployment are created. This field tracks the workspace source code path + // of the last active deployment. + DefaultSourceCodePath types.String `tfsdk:"default_source_code_path" tf:"optional"` // The description of the app. Description types.String `tfsdk:"description" tf:"optional"` // The name of the app. The name must contain only lowercase alphanumeric @@ -32,6 +36,8 @@ type App struct { Name types.String `tfsdk:"name" tf:""` // The pending deployment of the app. PendingDeployment *AppDeployment `tfsdk:"pending_deployment" tf:"optional"` + // Resources for the app. + Resources []AppResource `tfsdk:"resources" tf:"optional"` ServicePrincipalId types.Int64 `tfsdk:"service_principal_id" tf:"optional"` @@ -134,6 +140,55 @@ type AppPermissionsRequest struct { AppName types.String `tfsdk:"-"` } +type AppResource struct { + // Description of the App Resource. + Description types.String `tfsdk:"description" tf:"optional"` + + Job *AppResourceJob `tfsdk:"job" tf:"optional"` + // Name of the App Resource. + Name types.String `tfsdk:"name" tf:""` + + Secret *AppResourceSecret `tfsdk:"secret" tf:"optional"` + + ServingEndpoint *AppResourceServingEndpoint `tfsdk:"serving_endpoint" tf:"optional"` + + SqlWarehouse *AppResourceSqlWarehouse `tfsdk:"sql_warehouse" tf:"optional"` +} + +type AppResourceJob struct { + // Id of the job to grant permission on. + Id types.String `tfsdk:"id" tf:""` + // Permissions to grant on the Job. Supported permissions are: "CAN_MANAGE", + // "IS_OWNER", "CAN_MANAGE_RUN", "CAN_VIEW". + Permission types.String `tfsdk:"permission" tf:""` +} + +type AppResourceSecret struct { + // Key of the secret to grant permission on. + Key types.String `tfsdk:"key" tf:""` + // Permission to grant on the secret scope. For secrets, only one permission + // is allowed. Permission must be one of: "READ", "WRITE", "MANAGE". + Permission types.String `tfsdk:"permission" tf:""` + // Scope of the secret to grant permission on. + Scope types.String `tfsdk:"scope" tf:""` +} + +type AppResourceServingEndpoint struct { + // Name of the serving endpoint to grant permission on. + Name types.String `tfsdk:"name" tf:""` + // Permission to grant on the serving endpoint. Supported permissions are: + // "CAN_MANAGE", "CAN_QUERY", "CAN_VIEW". + Permission types.String `tfsdk:"permission" tf:""` +} + +type AppResourceSqlWarehouse struct { + // Id of the SQL warehouse to grant permission on. + Id types.String `tfsdk:"id" tf:""` + // Permission to grant on the SQL warehouse. Supported permissions are: + // "CAN_MANAGE", "CAN_USE", "IS_OWNER". + Permission types.String `tfsdk:"permission" tf:""` +} + type ApplicationStatus struct { // Application status message Message types.String `tfsdk:"message" tf:"optional"` @@ -171,6 +226,8 @@ type CreateAppRequest struct { // The name of the app. The name must contain only lowercase alphanumeric // characters and hyphens. It must be unique within the workspace. Name types.String `tfsdk:"name" tf:""` + // Resources for the app. + Resources []AppResource `tfsdk:"resources" tf:"optional"` } // Delete an app @@ -259,4 +316,6 @@ type UpdateAppRequest struct { // The name of the app. The name must contain only lowercase alphanumeric // characters and hyphens. It must be unique within the workspace. Name types.String `tfsdk:"name" tf:""` + // Resources for the app. + Resources []AppResource `tfsdk:"resources" tf:"optional"` } diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index 78848824f1..358885d57d 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -1062,9 +1062,8 @@ type GenerateTemporaryTableCredentialResponse struct { // Azure temporary credentials for API authentication. Read more at // https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas AzureUserDelegationSas *AzureUserDelegationSas `tfsdk:"azure_user_delegation_sas" tf:"optional"` - // Server time when the credential will expire, in unix epoch milliseconds - // since January 1, 1970 at 00:00:00 UTC. The API client is advised to cache - // the credential given this expiration time. + // Server time when the credential will expire, in epoch milliseconds. The + // API client is advised to cache the credential given this expiration time. ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` // GCP temporary credentials for API authentication. Read more at // https://developers.google.com/identity/protocols/oauth2/service-account diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index fcc9f0adf6..2fcdbdc14c 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -23,7 +23,12 @@ type CreateDashboardRequest struct { // Dashboards responses. ParentPath types.String `tfsdk:"parent_path" tf:"optional"` // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. + // excluded in List Dashboards responses. Use the [get dashboard API] to + // retrieve an example response, which includes the `serialized_dashboard` + // field. This field provides the structure of the JSON string that + // represents the dashboard's layout and components. + // + // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` // The warehouse ID used to run the dashboard. WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` @@ -81,11 +86,17 @@ type Dashboard struct { // leading slash and no trailing slash. This field is excluded in List // Dashboards responses. ParentPath types.String `tfsdk:"parent_path" tf:"optional"` - // The workspace path of the dashboard asset, including the file name. This + // The workspace path of the dashboard asset, including the file name. + // Exported dashboards always have the file extension `.lvdash.json`. This // field is excluded in List Dashboards responses. Path types.String `tfsdk:"path" tf:"optional"` // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. + // excluded in List Dashboards responses. Use the [get dashboard API] to + // retrieve an example response, which includes the `serialized_dashboard` + // field. This field provides the structure of the JSON string that + // represents the dashboard's layout and components. + // + // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` // The timestamp of when the dashboard was last updated by the user. This // field is excluded in List Dashboards responses. @@ -213,9 +224,10 @@ type GenieMessage struct { // Genie space ID SpaceId types.String `tfsdk:"space_id" tf:""` // MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching - // metadata from the data sources. * `ASKING_AI`: Waiting for the LLM to - // respond to the users question. * `EXECUTING_QUERY`: Executing AI provided - // SQL query. Get the SQL query result by calling + // metadata from the data sources. * `FILTERING_CONTEXT`: Running smart + // context step to determine relevant context. * `ASKING_AI`: Waiting for + // the LLM to respond to the users question. * `EXECUTING_QUERY`: Executing + // AI provided SQL query. Get the SQL query result by calling // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. // **Important: The message status will stay in the `EXECUTING_QUERY` until // a client calls @@ -510,7 +522,12 @@ type UpdateDashboardRequest struct { // field is excluded in List Dashboards responses. Etag types.String `tfsdk:"etag" tf:"optional"` // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. + // excluded in List Dashboards responses. Use the [get dashboard API] to + // retrieve an example response, which includes the `serialized_dashboard` + // field. This field provides the structure of the JSON string that + // represents the dashboard's layout and components. + // + // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` // The warehouse ID used to run the dashboard. WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index 457ea2bb4a..2699dc2286 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -735,7 +735,8 @@ type JobDeployment struct { type JobEmailNotifications struct { // If true, do not send email to recipients specified in `on_failure` if the - // run is skipped. + // run is skipped. This field is `deprecated`. Please use the + // `notification_settings.no_alert_for_skipped_runs` field. NoAlertForSkippedRuns types.Bool `tfsdk:"no_alert_for_skipped_runs" tf:"optional"` // A list of email addresses to be notified when the duration of a run // exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in @@ -1274,7 +1275,7 @@ type RepairRun struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` - + // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `tfsdk:"pipeline_params" tf:"optional"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` @@ -1630,7 +1631,7 @@ type RunJobTask struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` - + // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `tfsdk:"pipeline_params" tf:"optional"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` @@ -1733,7 +1734,7 @@ type RunNow struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` - + // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `tfsdk:"pipeline_params" tf:"optional"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` @@ -1867,7 +1868,7 @@ type RunParameters struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` - + // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `tfsdk:"pipeline_params" tf:"optional"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` @@ -2613,7 +2614,8 @@ type TaskDependency struct { type TaskEmailNotifications struct { // If true, do not send email to recipients specified in `on_failure` if the - // run is skipped. + // run is skipped. This field is `deprecated`. Please use the + // `notification_settings.no_alert_for_skipped_runs` field. NoAlertForSkippedRuns types.Bool `tfsdk:"no_alert_for_skipped_runs" tf:"optional"` // A list of email addresses to be notified when the duration of a run // exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in @@ -2662,8 +2664,9 @@ type TaskNotificationSettings struct { type TerminationDetails struct { // The code indicates why the run was terminated. Additional codes might be // introduced in future releases. * `SUCCESS`: The run was completed - // successfully. * `CANCELED`: The run was canceled during execution by the - // Databricks platform; for example, if the maximum run duration was + // successfully. * `USER_CANCELED`: The run was successfully canceled during + // execution by a user. * `CANCELED`: The run was canceled during execution + // by the Databricks platform; for example, if the maximum run duration was // exceeded. * `SKIPPED`: Run was never executed, for example, if the // upstream task run failed, the dependency type condition was not met, or // there were no material tasks to execute. * `INTERNAL_ERROR`: The run diff --git a/internal/service/pipelines_tf/model.go b/internal/service/pipelines_tf/model.go index b6abbbb71c..1740d0ee0b 100755 --- a/internal/service/pipelines_tf/model.go +++ b/internal/service/pipelines_tf/model.go @@ -60,6 +60,10 @@ type CreatePipeline struct { Notifications []Notifications `tfsdk:"notifications" tf:"optional"` // Whether Photon is enabled for this pipeline. Photon types.Bool `tfsdk:"photon" tf:"optional"` + // The default schema (database) where tables are read from or published to. + // The presence of this field implies that the pipeline is in direct + // publishing mode. + Schema types.String `tfsdk:"schema" tf:"optional"` // Whether serverless compute is enabled for this pipeline. Serverless types.Bool `tfsdk:"serverless" tf:"optional"` // DBFS root directory for storing checkpoints and tables. @@ -150,6 +154,10 @@ type EditPipeline struct { Photon types.Bool `tfsdk:"photon" tf:"optional"` // Unique identifier for this pipeline. PipelineId types.String `tfsdk:"pipeline_id" tf:"optional"` + // The default schema (database) where tables are read from or published to. + // The presence of this field implies that the pipeline is in direct + // publishing mode. + Schema types.String `tfsdk:"schema" tf:"optional"` // Whether serverless compute is enabled for this pipeline. Serverless types.Bool `tfsdk:"serverless" tf:"optional"` // DBFS root directory for storing checkpoints and tables. @@ -687,6 +695,10 @@ type PipelineSpec struct { Notifications []Notifications `tfsdk:"notifications" tf:"optional"` // Whether Photon is enabled for this pipeline. Photon types.Bool `tfsdk:"photon" tf:"optional"` + // The default schema (database) where tables are read from or published to. + // The presence of this field implies that the pipeline is in direct + // publishing mode. + Schema types.String `tfsdk:"schema" tf:"optional"` // Whether serverless compute is enabled for this pipeline. Serverless types.Bool `tfsdk:"serverless" tf:"optional"` // DBFS root directory for storing checkpoints and tables. diff --git a/internal/service/sql_tf/model.go b/internal/service/sql_tf/model.go index cbee45561b..e912363c30 100755 --- a/internal/service/sql_tf/model.go +++ b/internal/service/sql_tf/model.go @@ -194,6 +194,8 @@ type CancelExecutionRequest struct { type CancelExecutionResponse struct { } +// Configures the channel name and DBSQL version of the warehouse. +// CHANNEL_NAME_CUSTOM should be chosen only when `dbsql_version` is specified. type Channel struct { DbsqlVersion types.String `tfsdk:"dbsql_version" tf:"optional"` @@ -347,7 +349,9 @@ type CreateWarehouseRequest struct { // The amount of time in minutes that a SQL warehouse must be idle (i.e., no // RUNNING queries) before it is automatically stopped. // - // Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. + // Supported values: - Must be >= 0 mins for serverless warehouses - Must be + // == 0 or >= 10 mins for non-serverless warehouses - 0 indicates no + // autostop. // // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` diff --git a/internal/service/workspace_tf/model.go b/internal/service/workspace_tf/model.go index d11553b3a0..fe451acf89 100755 --- a/internal/service/workspace_tf/model.go +++ b/internal/service/workspace_tf/model.go @@ -29,11 +29,11 @@ type AzureKeyVaultSecretScopeMetadata struct { ResourceId types.String `tfsdk:"resource_id" tf:""` } -type CreateCredentials struct { +type CreateCredentialsRequest struct { // Git provider. This field is case-insensitive. The available Git providers - // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, - // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and - // awsCodeCommit. + // are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, + // `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and + // `awsCodeCommit`. GitProvider types.String `tfsdk:"git_provider" tf:""` // The username or email provided with your Git provider account, depending // on which provider you are using. For GitHub, GitHub Enterprise Server, or @@ -45,8 +45,7 @@ type CreateCredentials struct { GitUsername types.String `tfsdk:"git_username" tf:"optional"` // The personal access token used to authenticate to the corresponding Git // provider. For certain providers, support may exist for other types of - // scoped access tokens. [Learn more]. The personal access token used to - // authenticate to the corresponding Git + // scoped access tokens. [Learn more]. // // [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html PersonalAccessToken types.String `tfsdk:"personal_access_token" tf:"optional"` @@ -54,31 +53,23 @@ type CreateCredentials struct { type CreateCredentialsResponse struct { // ID of the credential object in the workspace. - CredentialId types.Int64 `tfsdk:"credential_id" tf:"optional"` - // Git provider. This field is case-insensitive. The available Git providers - // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, - // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and - // awsCodeCommit. - GitProvider types.String `tfsdk:"git_provider" tf:"optional"` - // The username or email provided with your Git provider account, depending - // on which provider you are using. For GitHub, GitHub Enterprise Server, or - // Azure DevOps Services, either email or username may be used. For GitLab, - // GitLab Enterprise Edition, email must be used. For AWS CodeCommit, - // BitBucket or BitBucket Server, username must be used. For all other - // providers please see your provider's Personal Access Token authentication - // documentation to see what is supported. + CredentialId types.Int64 `tfsdk:"credential_id" tf:""` + // The Git provider associated with the credential. + GitProvider types.String `tfsdk:"git_provider" tf:""` + // The username or email provided with your Git provider account and + // associated with the credential. GitUsername types.String `tfsdk:"git_username" tf:"optional"` } -type CreateRepo struct { +type CreateRepoRequest struct { // Desired path for the repo in the workspace. Almost any path in the - // workspace can be chosen. If repo is created in /Repos, path must be in - // the format /Repos/{folder}/{repo-name}. + // workspace can be chosen. If repo is created in `/Repos`, path must be in + // the format `/Repos/{folder}/{repo-name}`. Path types.String `tfsdk:"path" tf:"optional"` // Git provider. This field is case-insensitive. The available Git providers - // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, - // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and - // awsCodeCommit. + // are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, + // `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and + // `awsCodeCommit`. Provider types.String `tfsdk:"provider" tf:""` // If specified, the repo will be created with sparse checkout enabled. You // cannot enable/disable sparse checkout after the repo is created. @@ -87,6 +78,24 @@ type CreateRepo struct { Url types.String `tfsdk:"url" tf:""` } +type CreateRepoResponse struct { + // Branch that the Git folder (repo) is checked out to. + Branch types.String `tfsdk:"branch" tf:"optional"` + // SHA-1 hash representing the commit ID of the current HEAD of the Git + // folder (repo). + HeadCommitId types.String `tfsdk:"head_commit_id" tf:"optional"` + // ID of the Git folder (repo) object in the workspace. + Id types.Int64 `tfsdk:"id" tf:"optional"` + // Path of the Git folder (repo) in the workspace. + Path types.String `tfsdk:"path" tf:"optional"` + // Git provider of the linked Git repository. + Provider types.String `tfsdk:"provider" tf:"optional"` + // Sparse checkout settings for the Git folder (repo). + SparseCheckout *SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + // URL of the linked Git repository. + Url types.String `tfsdk:"url" tf:"optional"` +} + type CreateScope struct { // The metadata for the secret scope if the type is `AZURE_KEYVAULT` BackendAzureKeyvault *AzureKeyVaultSecretScopeMetadata `tfsdk:"backend_azure_keyvault" tf:"optional"` @@ -105,19 +114,11 @@ type CreateScopeResponse struct { type CredentialInfo struct { // ID of the credential object in the workspace. - CredentialId types.Int64 `tfsdk:"credential_id" tf:"optional"` - // Git provider. This field is case-insensitive. The available Git providers - // are gitHub, gitHubOAuth, bitbucketCloud, gitLab, azureDevOpsServices, - // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and - // awsCodeCommit. + CredentialId types.Int64 `tfsdk:"credential_id" tf:""` + // The Git provider associated with the credential. GitProvider types.String `tfsdk:"git_provider" tf:"optional"` - // The username or email provided with your Git provider account, depending - // on which provider you are using. For GitHub, GitHub Enterprise Server, or - // Azure DevOps Services, either email or username may be used. For GitLab, - // GitLab Enterprise Edition, email must be used. For AWS CodeCommit, - // BitBucket or BitBucket Server, username must be used. For all other - // providers please see your provider's Personal Access Token authentication - // documentation to see what is supported. + // The username or email provided with your Git provider account and + // associated with the credential. GitUsername types.String `tfsdk:"git_username" tf:"optional"` } @@ -142,17 +143,23 @@ type DeleteAclResponse struct { } // Delete a credential -type DeleteGitCredentialRequest struct { +type DeleteCredentialsRequest struct { // The ID for the corresponding credential to access. CredentialId types.Int64 `tfsdk:"-"` } +type DeleteCredentialsResponse struct { +} + // Delete a repo type DeleteRepoRequest struct { - // The ID for the corresponding repo to access. + // ID of the Git folder (repo) object in the workspace. RepoId types.Int64 `tfsdk:"-"` } +type DeleteRepoResponse struct { +} + type DeleteResponse struct { } @@ -212,16 +219,22 @@ type GetAclRequest struct { Scope types.String `tfsdk:"-"` } -type GetCredentialsResponse struct { - Credentials []CredentialInfo `tfsdk:"credentials" tf:"optional"` -} - // Get a credential entry -type GetGitCredentialRequest struct { +type GetCredentialsRequest struct { // The ID for the corresponding credential to access. CredentialId types.Int64 `tfsdk:"-"` } +type GetCredentialsResponse struct { + // ID of the credential object in the workspace. + CredentialId types.Int64 `tfsdk:"credential_id" tf:""` + // The Git provider associated with the credential. + GitProvider types.String `tfsdk:"git_provider" tf:"optional"` + // The username or email provided with your Git provider account and + // associated with the credential. + GitUsername types.String `tfsdk:"git_username" tf:"optional"` +} + // Get repo permission levels type GetRepoPermissionLevelsRequest struct { // The repo for which to get or manage permissions. @@ -241,10 +254,27 @@ type GetRepoPermissionsRequest struct { // Get a repo type GetRepoRequest struct { - // The ID for the corresponding repo to access. + // ID of the Git folder (repo) object in the workspace. RepoId types.Int64 `tfsdk:"-"` } +type GetRepoResponse struct { + // Branch that the local version of the repo is checked out to. + Branch types.String `tfsdk:"branch" tf:"optional"` + // SHA-1 hash representing the commit ID of the current HEAD of the repo. + HeadCommitId types.String `tfsdk:"head_commit_id" tf:"optional"` + // ID of the Git folder (repo) object in the workspace. + Id types.Int64 `tfsdk:"id" tf:"optional"` + // Path of the Git folder (repo) in the workspace. + Path types.String `tfsdk:"path" tf:"optional"` + // Git provider of the linked Git repository. + Provider types.String `tfsdk:"provider" tf:"optional"` + // Sparse checkout settings for the Git folder (repo). + SparseCheckout *SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + // URL of the linked Git repository. + Url types.String `tfsdk:"url" tf:"optional"` +} + // Get a secret type GetSecretRequest struct { // The key to fetch secret for. @@ -334,6 +364,11 @@ type ListAclsResponse struct { Items []AclItem `tfsdk:"items" tf:"optional"` } +type ListCredentialsResponse struct { + // List of credentials. + Credentials []CredentialInfo `tfsdk:"credentials" tf:"optional"` +} + // Get repos type ListReposRequest struct { // Token used to get the next page of results. If not specified, returns the @@ -341,15 +376,16 @@ type ListReposRequest struct { // results. NextPageToken types.String `tfsdk:"-"` // Filters repos that have paths starting with the given path prefix. If not - // provided repos from /Repos will be served. + // provided or when provided an effectively empty prefix (`/` or + // `/Workspace`) Git folders (repos) from `/Workspace/Repos` will be served. PathPrefix types.String `tfsdk:"-"` } type ListReposResponse struct { - // Token that can be specified as a query parameter to the GET /repos + // Token that can be specified as a query parameter to the `GET /repos` // endpoint to retrieve the next page of results. NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` - + // List of Git folders (repos). Repos []RepoInfo `tfsdk:"repos" tf:"optional"` } @@ -467,25 +503,21 @@ type RepoAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +// Git folder (repo) information. type RepoInfo struct { - // Branch that the local version of the repo is checked out to. + // Name of the current git branch of the git folder (repo). Branch types.String `tfsdk:"branch" tf:"optional"` - // SHA-1 hash representing the commit ID of the current HEAD of the repo. + // Current git commit id of the git folder (repo). HeadCommitId types.String `tfsdk:"head_commit_id" tf:"optional"` - // ID of the repo object in the workspace. + // Id of the git folder (repo) in the Workspace. Id types.Int64 `tfsdk:"id" tf:"optional"` - // Desired path for the repo in the workspace. Almost any path in the - // workspace can be chosen. If repo is created in /Repos, path must be in - // the format /Repos/{folder}/{repo-name}. + // Root path of the git folder (repo) in the Workspace. Path types.String `tfsdk:"path" tf:"optional"` - // Git provider. This field is case-insensitive. The available Git providers - // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, - // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and - // awsCodeCommit. + // Git provider of the remote git repository, e.g. `gitHub`. Provider types.String `tfsdk:"provider" tf:"optional"` - + // Sparse checkout config for the git folder (repo). SparseCheckout *SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` - // URL of the Git repository to be linked. + // URL of the remote git repository. Url types.String `tfsdk:"url" tf:"optional"` } @@ -533,24 +565,32 @@ type SecretScope struct { Name types.String `tfsdk:"name" tf:"optional"` } +// Sparse checkout configuration, it contains options like cone patterns. type SparseCheckout struct { - // List of patterns to include for sparse checkout. + // List of sparse checkout cone patterns, see [cone mode handling] for + // details. + // + // [cone mode handling]: https://git-scm.com/docs/git-sparse-checkout#_internalscone_mode_handling Patterns []types.String `tfsdk:"patterns" tf:"optional"` } +// Sparse checkout configuration, it contains options like cone patterns. type SparseCheckoutUpdate struct { - // List of patterns to include for sparse checkout. + // List of sparse checkout cone patterns, see [cone mode handling] for + // details. + // + // [cone mode handling]: https://git-scm.com/docs/git-sparse-checkout#_internalscone_mode_handling Patterns []types.String `tfsdk:"patterns" tf:"optional"` } -type UpdateCredentials struct { +type UpdateCredentialsRequest struct { // The ID for the corresponding credential to access. CredentialId types.Int64 `tfsdk:"-"` // Git provider. This field is case-insensitive. The available Git providers - // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, - // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and - // awsCodeCommit. - GitProvider types.String `tfsdk:"git_provider" tf:"optional"` + // are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, + // `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and + // `awsCodeCommit`. + GitProvider types.String `tfsdk:"git_provider" tf:""` // The username or email provided with your Git provider account, depending // on which provider you are using. For GitHub, GitHub Enterprise Server, or // Azure DevOps Services, either email or username may be used. For GitLab, @@ -561,17 +601,19 @@ type UpdateCredentials struct { GitUsername types.String `tfsdk:"git_username" tf:"optional"` // The personal access token used to authenticate to the corresponding Git // provider. For certain providers, support may exist for other types of - // scoped access tokens. [Learn more]. The personal access token used to - // authenticate to the corresponding Git + // scoped access tokens. [Learn more]. // // [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html PersonalAccessToken types.String `tfsdk:"personal_access_token" tf:"optional"` } -type UpdateRepo struct { +type UpdateCredentialsResponse struct { +} + +type UpdateRepoRequest struct { // Branch that the local version of the repo is checked out to. Branch types.String `tfsdk:"branch" tf:"optional"` - // The ID for the corresponding repo to access. + // ID of the Git folder (repo) object in the workspace. RepoId types.Int64 `tfsdk:"-"` // If specified, update the sparse checkout settings. The update will fail // if sparse checkout is not enabled for the repo. @@ -583,7 +625,7 @@ type UpdateRepo struct { Tag types.String `tfsdk:"tag" tf:"optional"` } -type UpdateResponse struct { +type UpdateRepoResponse struct { } type WorkspaceObjectAccessControlRequest struct { diff --git a/mlflow/data_mlflow_models.go b/mlflow/data_mlflow_models.go index 96ce6bed05..127b4f465f 100644 --- a/mlflow/data_mlflow_models.go +++ b/mlflow/data_mlflow_models.go @@ -2,6 +2,7 @@ package mlflow import ( "context" + "github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go" diff --git a/mlflow/data_mlflow_models_test.go b/mlflow/data_mlflow_models_test.go index b115ce9e23..04ff88be8d 100644 --- a/mlflow/data_mlflow_models_test.go +++ b/mlflow/data_mlflow_models_test.go @@ -1,9 +1,10 @@ package mlflow import ( + "testing" + "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/stretchr/testify/mock" - "testing" "github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/terraform-provider-databricks/qa" diff --git a/repos/resource_git_credential.go b/repos/resource_git_credential.go index 38f7b94044..9858ff9683 100644 --- a/repos/resource_git_credential.go +++ b/repos/resource_git_credential.go @@ -12,7 +12,7 @@ import ( ) func ResourceGitCredential() common.Resource { - s := common.StructToSchema(workspace.CreateCredentials{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { + s := common.StructToSchema(workspace.CreateCredentialsRequest{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { s["force"] = &schema.Schema{ Type: schema.TypeBool, Optional: true, @@ -34,7 +34,7 @@ func ResourceGitCredential() common.Resource { return err } - var req workspace.CreateCredentials + var req workspace.CreateCredentialsRequest common.DataToStructPointer(d, s, &req) resp, err := w.GitCredentials.Create(ctx, req) @@ -49,7 +49,7 @@ func ResourceGitCredential() common.Resource { if len(creds) != 1 { return fmt.Errorf("list of credentials is either empty or have more than one entry (%d)", len(creds)) } - var req workspace.UpdateCredentials + var req workspace.UpdateCredentialsRequest common.DataToStructPointer(d, s, &req) req.CredentialId = creds[0].CredentialId @@ -71,7 +71,7 @@ func ResourceGitCredential() common.Resource { if err != nil { return err } - resp, err := w.GitCredentials.Get(ctx, workspace.GetGitCredentialRequest{CredentialId: cred_id}) + resp, err := w.GitCredentials.Get(ctx, workspace.GetCredentialsRequest{CredentialId: cred_id}) if err != nil { return err } @@ -80,7 +80,7 @@ func ResourceGitCredential() common.Resource { return nil }, Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - var req workspace.UpdateCredentials + var req workspace.UpdateCredentialsRequest common.DataToStructPointer(d, s, &req) cred_id, err := strconv.ParseInt(d.Id(), 10, 64) diff --git a/repos/resource_git_credential_test.go b/repos/resource_git_credential_test.go index 911a48eeb0..1a64cf273c 100644 --- a/repos/resource_git_credential_test.go +++ b/repos/resource_git_credential_test.go @@ -85,7 +85,7 @@ func TestResourceGitCredentialUpdate(t *testing.T) { { Method: "PATCH", Resource: fmt.Sprintf("/api/2.0/git-credentials/%d", credID), - ExpectedRequest: workspace.UpdateCredentials{ + ExpectedRequest: workspace.UpdateCredentialsRequest{ CredentialId: int64(credID), GitProvider: provider, GitUsername: user, @@ -125,7 +125,7 @@ func TestResourceGitCredentialUpdate_Error(t *testing.T) { { Method: "PATCH", Resource: fmt.Sprintf("/api/2.0/git-credentials/%d", credID), - ExpectedRequest: workspace.UpdateCredentials{ + ExpectedRequest: workspace.UpdateCredentialsRequest{ CredentialId: int64(credID), GitProvider: provider, GitUsername: user, @@ -168,7 +168,7 @@ func TestResourceGitCredentialCreate(t *testing.T) { { Method: "POST", Resource: "/api/2.0/git-credentials", - ExpectedRequest: workspace.CreateCredentials{ + ExpectedRequest: workspace.CreateCredentialsRequest{ GitProvider: provider, GitUsername: user, PersonalAccessToken: token, @@ -200,7 +200,7 @@ func TestResourceGitCredentialCreate_Error(t *testing.T) { { Method: "POST", Resource: "/api/2.0/git-credentials", - ExpectedRequest: workspace.CreateCredentials{ + ExpectedRequest: workspace.CreateCredentialsRequest{ GitProvider: provider, GitUsername: user, PersonalAccessToken: token, @@ -236,7 +236,7 @@ func TestResourceGitCredentialCreateWithForce(t *testing.T) { { Method: "POST", Resource: "/api/2.0/git-credentials", - ExpectedRequest: workspace.CreateCredentials{ + ExpectedRequest: workspace.CreateCredentialsRequest{ GitProvider: provider, GitUsername: user, PersonalAccessToken: token, @@ -250,14 +250,14 @@ func TestResourceGitCredentialCreateWithForce(t *testing.T) { { Method: http.MethodGet, Resource: "/api/2.0/git-credentials", - Response: workspace.GetCredentialsResponse{ + Response: workspace.ListCredentialsResponse{ Credentials: []workspace.CredentialInfo{resp}, }, }, { Method: http.MethodPatch, Resource: fmt.Sprintf("/api/2.0/git-credentials/%d", resp.CredentialId), - ExpectedRequest: workspace.UpdateCredentials{ + ExpectedRequest: workspace.UpdateCredentialsRequest{ CredentialId: resp.CredentialId, GitProvider: provider, GitUsername: user, @@ -291,7 +291,7 @@ func TestResourceGitCredentialCreateWithForce_Error_List(t *testing.T) { { Method: "POST", Resource: "/api/2.0/git-credentials", - ExpectedRequest: workspace.CreateCredentials{ + ExpectedRequest: workspace.CreateCredentialsRequest{ GitProvider: provider, GitUsername: user, PersonalAccessToken: token, @@ -332,7 +332,7 @@ func TestResourceGitCredentialCreateWithForce_ErrorEmptyList(t *testing.T) { { Method: "POST", Resource: "/api/2.0/git-credentials", - ExpectedRequest: workspace.CreateCredentials{ + ExpectedRequest: workspace.CreateCredentialsRequest{ GitProvider: provider, GitUsername: user, PersonalAccessToken: token, @@ -374,7 +374,7 @@ func TestResourceGitCredentialCreateWithForce_ErrorUpdate(t *testing.T) { { Method: "POST", Resource: "/api/2.0/git-credentials", - ExpectedRequest: workspace.CreateCredentials{ + ExpectedRequest: workspace.CreateCredentialsRequest{ GitProvider: provider, GitUsername: user, PersonalAccessToken: token, @@ -388,7 +388,7 @@ func TestResourceGitCredentialCreateWithForce_ErrorUpdate(t *testing.T) { { Method: http.MethodGet, Resource: "/api/2.0/git-credentials", - Response: workspace.GetCredentialsResponse{ + Response: workspace.ListCredentialsResponse{ Credentials: []workspace.CredentialInfo{resp}, }, }, From 7d0491f6e9fa30295bb88771d22af096354bde52 Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Fri, 4 Oct 2024 11:51:42 +0200 Subject: [PATCH 30/99] [Fix] Fix Permissions Dashboard Test (#4071) ## Changes It seems like we need to specify the `serialized_dashboard` field to be able to create the dashboard. I have raised this concern with the responsible team. But for unblocking our integration tests we can specify this for now. ## Tests Integration Tests Passing - [x] `make test` run locally - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing --- internal/acceptance/permissions_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index 1386ee9db4..bcd67fa8c9 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -722,6 +722,7 @@ func TestAccPermissions_Dashboard(t *testing.T) { display_name = "TF New Dashboard" warehouse_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" parent_path = databricks_directory.this.path + serialized_dashboard = "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page\"}]}" } ` WorkspaceLevel(t, Step{ From 60b8a6ffca1fdd540424acba3e26464a8168ca41 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Fri, 4 Oct 2024 14:36:52 +0200 Subject: [PATCH 31/99] [Fix] Set ID for online table resource if creation succeeds but it isn't available yet (#4072) ## Changes We should set the id right after creation and before waiting for online table to be available. This is because in case when online table isn't available, we still should have that resource in the state. Also the timeout has been increased to 2x (I am going to following up with online tables team for suitable timeout but since we have to do a release, going ahead with small time increase should be good) Note: We should add setting id right after creation for similar resources to CONTRIBUTING guide (which I will do in a separate PR) ## Tests Added unit test to check that pathway, id is set (which wasn't the case before) - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --------- Co-authored-by: Miles Yucht --- catalog/resource_online_table.go | 7 ++++--- catalog/resource_online_table_test.go | 7 +++++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/catalog/resource_online_table.go b/catalog/resource_online_table.go index ca46b8eed1..7c317a9742 100644 --- a/catalog/resource_online_table.go +++ b/catalog/resource_online_table.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -const onlineTableDefaultProvisionTimeout = 45 * time.Minute +const onlineTableDefaultProvisionTimeout = 90 * time.Minute func waitForOnlineTableCreation(w *databricks.WorkspaceClient, ctx context.Context, onlineTableName string) error { return retry.RetryContext(ctx, onlineTableDefaultProvisionTimeout, func() *retry.RetryError { @@ -80,13 +80,14 @@ func ResourceOnlineTable() common.Resource { if err != nil { return err } + // Note: We should set the id right after creation and before waiting for online table to be available. + // If the resource creation timeout is exceeded while waiting for the online table to be ready, this ensures the online table is persisted in the state. + d.SetId(res.Name) // this should be specified in the API Spec - filed a ticket to add it err = waitForOnlineTableCreation(w, ctx, res.Name) if err != nil { - return err } - d.SetId(res.Name) return nil }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { diff --git a/catalog/resource_online_table_test.go b/catalog/resource_online_table_test.go index 104d6a21c6..1deddd02a3 100644 --- a/catalog/resource_online_table_test.go +++ b/catalog/resource_online_table_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) @@ -108,7 +109,7 @@ func TestOnlineTableCreate_ErrorInWait(t *testing.T) { }, Status: &catalog.OnlineTableStatus{DetailedState: catalog.OnlineTableStateOfflineFailed}, } - qa.ResourceFixture{ + d, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockOnlineTablesAPI().EXPECT() e.Create(mock.Anything, catalog.CreateOnlineTableRequest{ @@ -124,7 +125,9 @@ func TestOnlineTableCreate_ErrorInWait(t *testing.T) { Resource: ResourceOnlineTable(), HCL: onlineTableHcl, Create: true, - }.ExpectError(t, "online table status returned OFFLINE_FAILED for online table: main.default.online_table") + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "online table status returned OFFLINE_FAILED for online table: main.default.online_table") + assert.Equal(t, "main.default.online_table", d.Id()) } func TestOnlineTableRead(t *testing.T) { From 704db81699e8b28607be930afb49a3e4b646538b Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Sat, 5 Oct 2024 02:01:26 -0400 Subject: [PATCH 32/99] [Doc] Correctly use native markdown callouts supported by TF Registry (#4073) ## Changes TF registry supports a [number of special callouts](https://developer.hashicorp.com/terraform/registry/providers/docs#callouts) to highlight paragraphs. These callouts automatically add text like `**Note**` or `**Warning**` so we don't need to add them ourselves. Also, make consistent usage of informational callouts (`->`), important callouts (`~>`, that uses yellow background for paragraph), and warnings (`!>`). ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/resources/access_control_rule_set.md | 6 +++--- docs/resources/artifact_allowlist.md | 5 ++--- .../automatic_cluster_update_setting.md | 2 +- docs/resources/budget.md | 4 ++-- docs/resources/catalog.md | 2 +- docs/resources/catalog_workspace_binding.md | 8 +++----- docs/resources/cluster.md | 8 ++++---- .../compliance_security_profile_setting.md | 4 ++-- docs/resources/connection.md | 2 +- docs/resources/dbfs_file.md | 2 +- docs/resources/default_namespace_setting.md | 2 +- .../enhanced_security_monitoring_setting.md | 2 +- docs/resources/entitlements.md | 2 +- docs/resources/external_location.md | 2 +- docs/resources/global_init_script.md | 2 +- docs/resources/grant.md | 6 ++---- docs/resources/grants.md | 6 ++---- docs/resources/group.md | 4 ++-- docs/resources/group_instance_profile.md | 2 +- docs/resources/group_role.md | 2 +- docs/resources/instance_pool.md | 2 +- docs/resources/instance_profile.md | 2 +- docs/resources/ip_access_list.md | 2 +- docs/resources/job.md | 10 +++++----- docs/resources/library.md | 6 +++--- docs/resources/metastore.md | 2 +- docs/resources/metastore_assignment.md | 2 +- docs/resources/metastore_data_access.md | 2 +- docs/resources/mlflow_model.md | 2 +- docs/resources/mlflow_webhook.md | 2 +- docs/resources/model_serving.md | 2 +- docs/resources/mount.md | 10 +++++----- docs/resources/mws_credentials.md | 2 +- docs/resources/mws_customer_managed_keys.md | 6 +++--- docs/resources/mws_log_delivery.md | 4 ++-- docs/resources/mws_ncc_binding.md | 4 ++-- .../resources/mws_ncc_private_endpoint_rule.md | 4 ++-- .../mws_network_connectivity_config.md | 2 +- docs/resources/mws_networks.md | 8 ++++---- docs/resources/mws_private_access_settings.md | 6 +++--- docs/resources/mws_storage_configurations.md | 4 ++-- docs/resources/mws_vpc_endpoint.md | 4 ++-- docs/resources/mws_workspaces.md | 10 +++++----- docs/resources/notebook.md | 2 +- docs/resources/obo_token.md | 4 ++-- docs/resources/online_table.md | 6 ++---- docs/resources/permissions.md | 18 +++++++++--------- docs/resources/provider.md | 4 ++-- docs/resources/recipient.md | 2 +- docs/resources/registered_model.md | 2 +- docs/resources/repo.md | 4 ++-- .../restrict_workspace_admins_setting.md | 4 ++-- docs/resources/schema.md | 2 +- docs/resources/service_principal.md | 4 ++-- docs/resources/service_principal_role.md | 2 +- docs/resources/service_principal_secret.md | 2 +- docs/resources/share.md | 4 ++-- docs/resources/sql_alert.md | 2 +- docs/resources/sql_dashboard.md | 4 ++-- docs/resources/sql_permissions.md | 2 +- docs/resources/sql_query.md | 2 +- docs/resources/sql_visualization.md | 2 +- docs/resources/sql_widget.md | 4 ++-- docs/resources/storage_credential.md | 2 +- docs/resources/system_schema.md | 4 ++-- docs/resources/token.md | 2 +- docs/resources/user.md | 4 ++-- docs/resources/user_instance_profile.md | 2 +- docs/resources/user_role.md | 2 +- docs/resources/vector_search_endpoint.md | 2 +- docs/resources/vector_search_index.md | 2 +- docs/resources/volume.md | 4 ++-- docs/resources/workspace_binding.md | 8 +++----- docs/resources/workspace_conf.md | 6 +++--- docs/resources/workspace_file.md | 2 +- 75 files changed, 138 insertions(+), 149 deletions(-) diff --git a/docs/resources/access_control_rule_set.md b/docs/resources/access_control_rule_set.md index 3a7767c570..1bd2ee1e50 100644 --- a/docs/resources/access_control_rule_set.md +++ b/docs/resources/access_control_rule_set.md @@ -4,13 +4,13 @@ subcategory: "Security" # databricks_access_control_rule_set Resource --> **Note** This resource can be used with an account or workspace-level provider. +-> This resource can be used with an account or workspace-level provider. This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. --> **Note** Currently, we only support managing access rules on service principal, group and account resources through `databricks_access_control_rule_set`. +-> Currently, we only support managing access rules on service principal, group and account resources through `databricks_access_control_rule_set`. --> **Warning** `databricks_access_control_rule_set` cannot be used to manage access rules for resources supported by [databricks_permissions](permissions.md). Refer to its documentation for more information. +!> `databricks_access_control_rule_set` cannot be used to manage access rules for resources supported by [databricks_permissions](permissions.md). Refer to its documentation for more information. ## Service principal rule set usage diff --git a/docs/resources/artifact_allowlist.md b/docs/resources/artifact_allowlist.md index a65fe6dd57..d6272b9322 100644 --- a/docs/resources/artifact_allowlist.md +++ b/docs/resources/artifact_allowlist.md @@ -3,10 +3,9 @@ subcategory: "Unity Catalog" --- # databricks_artifact_allowlist Resource --> **Note** - It is required to define all allowlist for an artifact type in a single resource, otherwise Terraform cannot guarantee config drift prevention. +~> It is required to define all allowlist for an artifact type in a single resource, otherwise Terraform cannot guarantee config drift prevention. --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! In Databricks Runtime 13.3 and above, you can add libraries and init scripts to the allowlist in UC so that users can leverage these artifacts on compute configured with shared access mode. diff --git a/docs/resources/automatic_cluster_update_setting.md b/docs/resources/automatic_cluster_update_setting.md index 152a95b9ea..b8f4e719aa 100644 --- a/docs/resources/automatic_cluster_update_setting.md +++ b/docs/resources/automatic_cluster_update_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_automatic_cluster_update_workspace_setting Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! The `databricks_automatic_cluster_update_workspace_setting` resource allows you to control whether automatic cluster update is enabled for the current workspace. By default, it is turned off. Enabling this feature on a workspace requires that you add the Enhanced Security and Compliance add-on. diff --git a/docs/resources/budget.md b/docs/resources/budget.md index 3b99fa7c8a..31378d4254 100644 --- a/docs/resources/budget.md +++ b/docs/resources/budget.md @@ -3,9 +3,9 @@ subcategory: "FinOps" --- # databricks_budget Resource --> **Note** Initialize provider with `alias = "account"`, and `host` pointing to the account URL, like, `host = "https://accounts.cloud.databricks.com"`. Use `provider = databricks.account` for all account-level resources. +-> Initialize provider with `alias = "account"`, and `host` pointing to the account URL, like, `host = "https://accounts.cloud.databricks.com"`. Use `provider = databricks.account` for all account-level resources. --> **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). +-> This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). This resource allows you to manage [Databricks Budgets](https://docs.databricks.com/en/admin/account-settings/budgets.html). diff --git a/docs/resources/catalog.md b/docs/resources/catalog.md index 980c6c837d..fb854f4786 100644 --- a/docs/resources/catalog.md +++ b/docs/resources/catalog.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_catalog Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views. diff --git a/docs/resources/catalog_workspace_binding.md b/docs/resources/catalog_workspace_binding.md index 5520192fb5..0eafd00a2c 100644 --- a/docs/resources/catalog_workspace_binding.md +++ b/docs/resources/catalog_workspace_binding.md @@ -3,17 +3,15 @@ subcategory: "Unity Catalog" --- # databricks_catalog_workspace_binding Resource --> **NOTE**This resource has been deprecated and will be removed soon. Please use the [databricks_workspace_binding resource](./workspace_binding.md) instead. +~> This resource has been deprecated and will be removed soon. Please use the [databricks_workspace_binding resource](./workspace_binding.md) instead. If you use workspaces to isolate user data access, you may want to limit catalog access to specific workspaces in your account, also known as workspace-catalog binding By default, Databricks assigns the catalog to all workspaces attached to the current metastore. By using `databricks_catalog_workspace_binding`, the catalog will be unassigned from all workspaces and only assigned explicitly using this resource. --> **Note** - To use this resource the catalog must have its isolation mode set to `ISOLATED` in the [`databricks_catalog`](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/catalog#isolation_mode) resource. Alternatively, the isolation mode can be set using the UI or API by following [this guide](https://docs.databricks.com/data-governance/unity-catalog/create-catalogs.html#configuration). +-> To use this resource the catalog must have its isolation mode set to `ISOLATED` in the [`databricks_catalog`](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/catalog#isolation_mode) resource. Alternatively, the isolation mode can be set using the UI or API by following [this guide](https://docs.databricks.com/data-governance/unity-catalog/create-catalogs.html#configuration). --> **Note** - If the catalog's isolation mode was set to `ISOLATED` using Terraform then the catalog will have been automatically bound to the workspace it was created from. +-> If the catalog's isolation mode was set to `ISOLATED` using Terraform then the catalog will have been automatically bound to the workspace it was created from. ## Example Usage diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index c68cc8aa81..a856812192 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -5,7 +5,7 @@ subcategory: "Compute" This resource allows you to manage [Databricks Clusters](https://docs.databricks.com/clusters/index.html). --> **Note** In case of [`Cannot access cluster ####-######-####### that was terminated or unpinned more than 30 days ago`](https://github.com/databricks/terraform-provider-databricks/issues/1197#issuecomment-1069386670) errors, please upgrade to v0.5.5 or later. If for some reason you cannot upgrade the version of provider, then the other viable option to unblock the apply pipeline is [`terraform state rm path.to.databricks_cluster.resource`](https://www.terraform.io/cli/commands/state/rm) command. +-> In case of [`Cannot access cluster ####-######-####### that was terminated or unpinned more than 30 days ago`](https://github.com/databricks/terraform-provider-databricks/issues/1197#issuecomment-1069386670) errors, please upgrade to v0.5.5 or later. If for some reason you cannot upgrade the version of provider, then the other viable option to unblock the apply pipeline is [`terraform state rm path.to.databricks_cluster.resource`](https://www.terraform.io/cli/commands/state/rm) command. ```hcl data "databricks_node_type" "smallest" { @@ -130,7 +130,7 @@ resource "databricks_cluster" "single_node" { ### (Legacy) High-Concurrency clusters --> **Note** This is a legacy cluster type, not related to the real serverless compute. See [Clusters UI changes and cluster access modes](https://docs.databricks.com/archive/compute/cluster-ui-preview.html#legacy) for information on what access mode to use when creating new clusters. +~> This is a legacy cluster type, not related to the real serverless compute. See [Clusters UI changes and cluster access modes](https://docs.databricks.com/archive/compute/cluster-ui-preview.html#legacy) for information on what access mode to use when creating new clusters. To create High-Concurrency cluster, following settings should be provided: @@ -163,7 +163,7 @@ resource "databricks_cluster" "cluster_with_table_access_control" { To install libraries, one must specify each library in a separate configuration block. Each different type of library has a slightly different syntax. It's possible to set only one type of library within one config block. Otherwise, the plan will fail with an error. --> **Note** Please consider using [databricks_library](library.md) resource for a more flexible setup. +-> Please consider using [databricks_library](library.md) resource for a more flexible setup. Installing JAR artifacts on a cluster. Location can be anything, that is DBFS or mounted object store (s3, adls, ...) @@ -484,7 +484,7 @@ resource "databricks_cluster" "this" { ### cluster_mount_info blocks (experimental) --> **Note** The underlying API is experimental and may change in the future. +~> The underlying API is experimental and may change in the future. It's possible to mount NFS (Network File System) resources into the Spark containers inside the cluster. You can specify one or more `cluster_mount_info` blocks describing the mount. This block has following attributes: diff --git a/docs/resources/compliance_security_profile_setting.md b/docs/resources/compliance_security_profile_setting.md index 6bb5afc090..acf7f6ef7e 100644 --- a/docs/resources/compliance_security_profile_setting.md +++ b/docs/resources/compliance_security_profile_setting.md @@ -4,9 +4,9 @@ subcategory: "Settings" # databricks_compliance_security_profile_workspace_setting Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! --> **Note** This setting can NOT be disabled once it is enabled. +~> This setting can NOT be disabled once it is enabled. The `databricks_compliance_security_profile_workspace_setting` resource allows you to control whether to enable the compliance security profile for the current workspace. Enabling it on a workspace is permanent. By default, it is diff --git a/docs/resources/connection.md b/docs/resources/connection.md index f7421bd5da..c568c72847 100644 --- a/docs/resources/connection.md +++ b/docs/resources/connection.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_connection (Resource) --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: diff --git a/docs/resources/dbfs_file.md b/docs/resources/dbfs_file.md index e3b9c73eea..78607c733f 100644 --- a/docs/resources/dbfs_file.md +++ b/docs/resources/dbfs_file.md @@ -49,7 +49,7 @@ resource "databricks_library" "app" { ## Argument Reference --> **Note** DBFS files would only be changed, if Terraform stage did change. This means that any manual changes to managed file won't be overwritten by Terraform, if there's no local change. +-> DBFS files would only be changed, if Terraform stage did change. This means that any manual changes to managed file won't be overwritten by Terraform, if there's no local change. The following arguments are supported: diff --git a/docs/resources/default_namespace_setting.md b/docs/resources/default_namespace_setting.md index cf3c5ee36a..f21698fcb6 100644 --- a/docs/resources/default_namespace_setting.md +++ b/docs/resources/default_namespace_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_default_namespace_setting Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! The `databricks_default_namespace_setting` resource allows you to operate the setting configuration for the default namespace in the Databricks workspace. Setting the default catalog for the workspace determines the catalog that is used when queries do not reference diff --git a/docs/resources/enhanced_security_monitoring_setting.md b/docs/resources/enhanced_security_monitoring_setting.md index 64a18c9fcb..18e0de3e1a 100644 --- a/docs/resources/enhanced_security_monitoring_setting.md +++ b/docs/resources/enhanced_security_monitoring_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_enhanced_security_monitoring_workspace_setting Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! The `databricks_enhanced_security_monitoring_workspace_setting` resource allows you to control whether enhanced security monitoring is enabled for the current workspace. If the compliance security profile is enabled, this is automatically enabled. By default, diff --git a/docs/resources/entitlements.md b/docs/resources/entitlements.md index 15d7f42f6f..10852c4bb9 100644 --- a/docs/resources/entitlements.md +++ b/docs/resources/entitlements.md @@ -5,7 +5,7 @@ subcategory: "Security" This resource allows you to set entitlements to existing [databricks_users](user.md), [databricks_group](group.md) or [databricks_service_principal](service_principal.md). --> **Note** You must define entitlements of a principal using either `databricks_entitlements` or directly within one of [databricks_users](user.md), [databricks_group](group.md) or [databricks_service_principal](service_principal.md). Having entitlements defined in both resources will result in non-deterministic behaviour. +-> You must define entitlements of a principal using either `databricks_entitlements` or directly within one of [databricks_users](user.md), [databricks_group](group.md) or [databricks_service_principal](service_principal.md). Having entitlements defined in both resources will result in non-deterministic behaviour. ## Example Usage diff --git a/docs/resources/external_location.md b/docs/resources/external_location.md index 59cc555685..2495510bb0 100644 --- a/docs/resources/external_location.md +++ b/docs/resources/external_location.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_external_location Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: diff --git a/docs/resources/global_init_script.md b/docs/resources/global_init_script.md index bb8e50d98b..90d5d42b36 100644 --- a/docs/resources/global_init_script.md +++ b/docs/resources/global_init_script.md @@ -31,7 +31,7 @@ resource "databricks_global_init_script" "init2" { ## Argument Reference --> **Note** Global init script in the Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed global init script won't be overwritten by Terraform, if there's no local change to source. +-> Global init script in the Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed global init script won't be overwritten by Terraform, if there's no local change to source. The size of a global init script source code must not exceed 64Kb. The following arguments are supported: diff --git a/docs/resources/grant.md b/docs/resources/grant.md index 2adfdb7d67..5f2f8b4326 100644 --- a/docs/resources/grant.md +++ b/docs/resources/grant.md @@ -3,11 +3,9 @@ subcategory: "Unity Catalog" --- # databricks_grant Resource --> **Note** - This article refers to the privileges and inheritance model in Privilege Model version 1.0. If you created your metastore during the public preview (before August 25, 2022), you can upgrade to Privilege Model version 1.0 following [Upgrade to privilege inheritance](https://docs.databricks.com/data-governance/unity-catalog/hive-metastore.html) +-> This article refers to the privileges and inheritance model in Privilege Model version 1.0. If you created your metastore during the public preview (before August 25, 2022), you can upgrade to Privilege Model version 1.0 following [Upgrade to privilege inheritance](https://docs.databricks.com/data-governance/unity-catalog/hive-metastore.html) --> **Note** - Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. Account-level principal grants can be assigned with any valid workspace as the Unity Catalog is decoupled from specific workspaces. More information in [the official documentation](https://docs.databricks.com/data-governance/unity-catalog/index.html). +-> Most of Unity Catalog APIs are only accessible via **workspace-level APIs**. This design may change in the future. Account-level principal grants can be assigned with any valid workspace as the Unity Catalog is decoupled from specific workspaces. More information in [the official documentation](https://docs.databricks.com/data-governance/unity-catalog/index.html). In Unity Catalog all users initially have no access to data. Only Metastore Admins can create objects and can grant/revoke access on individual objects to users and groups. Every securable object in Unity Catalog has an owner. The owner can be any account-level user or group, called principals in general. The principal that creates an object becomes its owner. Owners receive `ALL_PRIVILEGES` on the securable object (e.g., `SELECT` and `MODIFY` on a table), as well as the permission to grant privileges to other principals. diff --git a/docs/resources/grants.md b/docs/resources/grants.md index 25f22c91af..64a52d83df 100644 --- a/docs/resources/grants.md +++ b/docs/resources/grants.md @@ -3,11 +3,9 @@ subcategory: "Unity Catalog" --- # databricks_grants Resource --> **Note** - This article refers to the privileges and inheritance model in Privilege Model version 1.0. If you created your metastore during the public preview (before August 25, 2022), you can upgrade to Privilege Model version 1.0 following [Upgrade to privilege inheritance](https://docs.databricks.com/data-governance/unity-catalog/hive-metastore.html) +-> This article refers to the privileges and inheritance model in Privilege Model version 1.0. If you created your metastore during the public preview (before August 25, 2022), you can upgrade to Privilege Model version 1.0 following [Upgrade to privilege inheritance](https://docs.databricks.com/data-governance/unity-catalog/hive-metastore.html) --> **Note** - Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. Account-level principal grants can be assigned with any valid workspace as the Unity Catalog is decoupled from specific workspaces. More information in [the official documentation](https://docs.databricks.com/data-governance/unity-catalog/index.html). +-> Most of Unity Catalog APIs are only accessible via **workspace-level APIs**. This design may change in the future. Account-level principal grants can be assigned with any valid workspace as the Unity Catalog is decoupled from specific workspaces. More information in [the official documentation](https://docs.databricks.com/data-governance/unity-catalog/index.html). Two different resources help you manage your Unity Catalog grants for a securable. Each of these resources serves a different use case: diff --git a/docs/resources/group.md b/docs/resources/group.md index 5b23e83c66..aa6321e50f 100644 --- a/docs/resources/group.md +++ b/docs/resources/group.md @@ -5,9 +5,9 @@ subcategory: "Security" This resource allows you to manage both [account groups and workspace-local groups](https://docs.databricks.com/administration-guide/users-groups/groups.html). You can use the [databricks_group_member resource](group_member.md) to assign Databricks users, [service principals](service_principal.md) as well as other groups as members of the group. This is useful if you are using an application to sync users & groups with SCIM API. --> **Note** To assign an account level group to a workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). +-> To assign an account level group to a workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). --> **Note** Entitlements, like, `allow_cluster_create`, `allow_instance_pool_create`, `databricks_sql_access`, `workspace_access` applicable only for workspace-level groups. Use [databricks_entitlements](entitlements.md) resource to assign entitlements inside a workspace to account-level groups. +-> Entitlements, like, `allow_cluster_create`, `allow_instance_pool_create`, `databricks_sql_access`, `workspace_access` applicable only for workspace-level groups. Use [databricks_entitlements](entitlements.md) resource to assign entitlements inside a workspace to account-level groups. To create account groups in the Databricks account, the provider must be configured accordingly. On AWS deployment with `host = "https://accounts.cloud.databricks.com"` and `account_id = "00000000-0000-0000-0000-000000000000"`. On Azure deployments `host = "https://accounts.azuredatabricks.net"`, `account_id = "00000000-0000-0000-0000-000000000000"` and using [AAD tokens](https://registry.terraform.io/providers/databricks/databricks/latest/docs#special-configurations-for-azure) as authentication. diff --git a/docs/resources/group_instance_profile.md b/docs/resources/group_instance_profile.md index 9da28aeda1..01f9bfae97 100644 --- a/docs/resources/group_instance_profile.md +++ b/docs/resources/group_instance_profile.md @@ -39,7 +39,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/group_role.md b/docs/resources/group_role.md index 395df9f264..147d214ffa 100644 --- a/docs/resources/group_role.md +++ b/docs/resources/group_role.md @@ -59,7 +59,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/instance_pool.md b/docs/resources/instance_pool.md index 9663576c63..2c7e052cf5 100644 --- a/docs/resources/instance_pool.md +++ b/docs/resources/instance_pool.md @@ -5,7 +5,7 @@ subcategory: "Compute" This resource allows you to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce [cluster](cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use instances. An instance pool reduces [cluster](cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use cloud instances. When a [cluster](cluster.md) attached to a pool needs an instance, it first attempts to allocate one of the pool’s idle instances. If the pool has no idle instances, it expands by allocating a new instance from the instance provider in order to accommodate the cluster’s request. When a [cluster](cluster.md) releases an instance, it returns to the pool and is free for another [cluster](cluster.md) to use. Only clusters attached to a pool can use that pool’s idle instances. --> **Note** It is important to know that different cloud service providers have different `node_type_id`, `disk_specs` and potentially other configurations. +-> It is important to know that different cloud service providers have different `node_type_id`, `disk_specs` and potentially other configurations. ## Example Usage diff --git a/docs/resources/instance_profile.md b/docs/resources/instance_profile.md index 23ef84f205..b8461dd941 100644 --- a/docs/resources/instance_profile.md +++ b/docs/resources/instance_profile.md @@ -5,7 +5,7 @@ subcategory: "Deployment" This resource allows you to manage AWS EC2 instance profiles that users can launch [databricks_cluster](cluster.md) and access data, like [databricks_mount](mount.md). The following example demonstrates how to create an instance profile and create a cluster with it. When creating a new `databricks_instance_profile`, Databricks validates that it has sufficient permissions to launch instances with the instance profile. This validation uses AWS dry-run mode for the [AWS EC2 RunInstances API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html). --> **Note** Please switch to [databricks_storage_credential](storage_credential.md) with Unity Catalog to manage storage credentials, which provides a better and faster way for managing credential security. +-> Please switch to [databricks_storage_credential](storage_credential.md) with Unity Catalog to manage storage credentials, which provides a better and faster way for managing credential security. ```hcl variable "crossaccount_role_name" { diff --git a/docs/resources/ip_access_list.md b/docs/resources/ip_access_list.md index 44b8c20f46..107ea35144 100644 --- a/docs/resources/ip_access_list.md +++ b/docs/resources/ip_access_list.md @@ -5,7 +5,7 @@ subcategory: "Security" Security-conscious enterprises that use cloud SaaS applications need to restrict access to their own employees. Authentication helps to prove user identity, but that does not enforce network location of the users. Accessing a cloud service from an unsecured network can pose security risks to an enterprise, especially when the user may have authorized access to sensitive or personal data. Enterprise network perimeters apply security policies and limit access to external services (for example, firewalls, proxies, DLP, and logging), so access beyond these controls are assumed to be untrusted. Please see [IP Access List](https://docs.databricks.com/security/network/ip-access-list.html) for full feature documentation. --> **Note** The total number of IP addresses and CIDR scopes provided across all ACL Lists in a workspace can not exceed 1000. Refer to the docs above for specifics. +-> The total number of IP addresses and CIDR scopes provided across all ACL Lists in a workspace can not exceed 1000. Refer to the docs above for specifics. ## Example Usage diff --git a/docs/resources/job.md b/docs/resources/job.md index e8e3c9cdc2..dc8eebc587 100644 --- a/docs/resources/job.md +++ b/docs/resources/job.md @@ -8,7 +8,7 @@ The `databricks_job` resource allows you to manage [Databricks Jobs](https://doc ## Example Usage --> **Note** In Terraform configuration, it is recommended to define tasks in alphabetical order of their `task_key` arguments, so that you get consistent and readable diff. Whenever tasks are added or removed, or `task_key` is renamed, you'll observe a change in the majority of tasks. It's related to the fact that the current version of the provider treats `task` blocks as an ordered list. Alternatively, `task` block could have been an unordered set, though end-users would see the entire block replaced upon a change in single property of the task. +-> In Terraform configuration, it is recommended to define tasks in alphabetical order of their `task_key` arguments, so that you get consistent and readable diff. Whenever tasks are added or removed, or `task_key` is renamed, you'll observe a change in the majority of tasks. It's related to the fact that the current version of the provider treats `task` blocks as an ordered list. Alternatively, `task` block could have been an unordered set, though end-users would see the entire block replaced upon a change in single property of the task. It is possible to create [a Databricks job](https://docs.databricks.com/data-engineering/jobs/jobs-user-guide.html) using `task` blocks. A single task is defined with the `task` block containing one of the `*_task` blocks, `task_key`, and additional arguments described below. @@ -142,7 +142,7 @@ This block describes individual tasks: * `timeout_seconds` - (Optional) (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout. * `webhook_notifications` - (Optional) (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below. --> **Note** If no `job_cluster_key`, `existing_cluster_id`, or `new_cluster` were specified in task definition, then task will executed using serverless compute. +-> If no `job_cluster_key`, `existing_cluster_id`, or `new_cluster` were specified in task definition, then task will executed using serverless compute. #### condition_task Configuration Block @@ -186,7 +186,7 @@ You also need to include a `git_source` block to configure the repository that c * `pipeline_id` - (Required) The pipeline's unique ID. * `full_refresh` - (Optional) (Bool) Specifies if there should be full refresh of the pipeline. --> **Note** The following configuration blocks are only supported inside a `task` block +-> The following configuration blocks are only supported inside a `task` block #### python_wheel_task Configuration Block @@ -318,7 +318,7 @@ This block describes upstream dependencies of a given task. For multiple upstrea * `task_key` - (Required) The name of the task this task depends on. * `outcome` - (Optional, string) Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are `"true"` or `"false"`. --> **Note** Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Terraform diffs. +-> Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Terraform diffs. ### run_as Configuration Block @@ -434,7 +434,7 @@ webhook_notifications { * `id` - ID of the system notification that is notified when an event defined in `webhook_notifications` is triggered. --> **Note** The following configuration blocks can be standalone or nested inside a `task` block +-> The following configuration blocks can be standalone or nested inside a `task` block ### notification_settings Configuration Block diff --git a/docs/resources/library.md b/docs/resources/library.md index 5eab7306fb..c693bfed8d 100644 --- a/docs/resources/library.md +++ b/docs/resources/library.md @@ -5,7 +5,7 @@ subcategory: "Compute" Installs a [library](https://docs.databricks.com/libraries/index.html) on [databricks_cluster](cluster.md). Each different type of library has a slightly different syntax. It's possible to set only one type of library within one resource. Otherwise, the plan will fail with an error. --> **Note** `databricks_library` resource would always start the associated cluster if it's not running, so make sure to have auto-termination configured. It's not possible to atomically change the version of the same library without cluster restart. Libraries are fully removed from the cluster only after restart. +-> `databricks_library` resource would always start the associated cluster if it's not running, so make sure to have auto-termination configured. It's not possible to atomically change the version of the same library without cluster restart. Libraries are fully removed from the cluster only after restart. ## Installing library on all clusters @@ -71,7 +71,7 @@ resource "databricks_library" "app" { Installing Python PyPI artifacts. You can optionally also specify the `repo` parameter for a custom PyPI mirror, which should be accessible without any authentication for the network that cluster runs in. --> **Note** `repo` host should be accessible from the Internet by Databricks control plane. If connectivity to custom PyPI repositories is required, please modify cluster-node `/etc/pip.conf` through [databricks_global_init_script](global_init_script.md). +-> `repo` host should be accessible from the Internet by Databricks control plane. If connectivity to custom PyPI repositories is required, please modify cluster-node `/etc/pip.conf` through [databricks_global_init_script](global_init_script.md). ```hcl resource "databricks_library" "fbprophet" { @@ -126,7 +126,7 @@ resource "databricks_library" "rkeops" { ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/metastore.md b/docs/resources/metastore.md index 6cd5a8417e..8193f68600 100644 --- a/docs/resources/metastore.md +++ b/docs/resources/metastore.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_metastore Resource --> **Note** This resource can be used with an account or workspace-level provider. +-> This resource can be used with an account or workspace-level provider. A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. diff --git a/docs/resources/metastore_assignment.md b/docs/resources/metastore_assignment.md index 6a336a312b..11a94307cd 100644 --- a/docs/resources/metastore_assignment.md +++ b/docs/resources/metastore_assignment.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_metastore_assignment (Resource) --> **Note** This resource can be used with an account or workspace-level provider. +-> This resource can be used with an account or workspace-level provider. A single [databricks_metastore](metastore.md) can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates. diff --git a/docs/resources/metastore_data_access.md b/docs/resources/metastore_data_access.md index 290eb061cb..04823fe417 100644 --- a/docs/resources/metastore_data_access.md +++ b/docs/resources/metastore_data_access.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_metastore_data_access (Resource) --> **Note** This resource can be used with an account or workspace-level provider. +-> This resource can be used with an account or workspace-level provider. Optionally, each [databricks_metastore](metastore.md) can have a default [databricks_storage_credential](storage_credential.md) defined as `databricks_metastore_data_access`. This will be used by Unity Catalog to access data in the root storage location if defined. diff --git a/docs/resources/mlflow_model.md b/docs/resources/mlflow_model.md index 60c310d295..208866d8d7 100644 --- a/docs/resources/mlflow_model.md +++ b/docs/resources/mlflow_model.md @@ -5,7 +5,7 @@ subcategory: "MLflow" This resource allows you to create [MLflow models](https://docs.databricks.com/applications/mlflow/models.html) in Databricks. -**Note** This documentation covers the Workspace Model Registry. Databricks recommends using [Models in Unity Catalog](registered_model.md). Models in Unity Catalog provides centralized model governance, cross-workspace access, lineage, and deployment. +-> This documentation covers the Workspace Model Registry. Databricks recommends using [Models in Unity Catalog](registered_model.md). Models in Unity Catalog provides centralized model governance, cross-workspace access, lineage, and deployment. ## Example Usage diff --git a/docs/resources/mlflow_webhook.md b/docs/resources/mlflow_webhook.md index 96f62e20ff..fd280cf9b4 100644 --- a/docs/resources/mlflow_webhook.md +++ b/docs/resources/mlflow_webhook.md @@ -112,7 +112,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/model_serving.md b/docs/resources/model_serving.md index 0cac9cb8f5..0bb116bfa9 100644 --- a/docs/resources/model_serving.md +++ b/docs/resources/model_serving.md @@ -5,7 +5,7 @@ subcategory: "Serving" This resource allows you to manage [Model Serving](https://docs.databricks.com/machine-learning/model-serving/index.html) endpoints in Databricks. -**Note** If you replace `served_models` with `served_entities` in an existing serving endpoint, the serving endpoint will briefly go into an update state (~30 seconds) and increment the config version. +-> If you replace `served_models` with `served_entities` in an existing serving endpoint, the serving endpoint will briefly go into an update state (~30 seconds) and increment the config version. ## Example Usage diff --git a/docs/resources/mount.md b/docs/resources/mount.md index a0446b03ac..ff187b2daa 100644 --- a/docs/resources/mount.md +++ b/docs/resources/mount.md @@ -5,9 +5,9 @@ subcategory: "Storage" This resource will [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. Right now it supports mounting AWS S3, Azure (Blob Storage, ADLS Gen1 & Gen2), Google Cloud Storage. It is important to understand that this will start up the [cluster](cluster.md) if the cluster is terminated. The read and refresh terraform command will require a cluster and may take some time to validate the mount. -**Note** When `cluster_id` is not specified, it will create the smallest possible cluster in the default availability zone with name equal to or starting with `terraform-mount` for the shortest possible amount of time. To avoid mount failure due to potentially quota or capacity issues with the default cluster, we recommend specifying a cluster to use for mounting. +-> When `cluster_id` is not specified, it will create the smallest possible cluster in the default availability zone with name equal to or starting with `terraform-mount` for the shortest possible amount of time. To avoid mount failure due to potentially quota or capacity issues with the default cluster, we recommend specifying a cluster to use for mounting. -**Note** CRUD operations on a databricks mount require a running cluster. Due to limitations of terraform and the databricks mounts APIs, if the cluster the mount was most recently created / updated using no longer exists AND the mount is destroyed as a part of a terraform apply, we mark it as deleted without cleaning it up from the workspace. +-> CRUD operations on a databricks mount require a running cluster. Due to limitations of terraform and the databricks mounts APIs, if the cluster the mount was most recently created / updated using no longer exists AND the mount is destroyed as a part of a terraform apply, we mark it as deleted without cleaning it up from the workspace. This resource provides two ways of mounting a storage account: @@ -62,9 +62,9 @@ resource "databricks_mount" "this" { ### Example mounting ADLS Gen2 with AAD passthrough --> **Note** AAD passthrough is considered a legacy data access pattern. Use Unity Catalog for fine-grained data access control. +-> AAD passthrough is considered a legacy data access pattern. Use Unity Catalog for fine-grained data access control. --> **Note** Mounts using AAD passthrough cannot be created using a service principal. +-> Mounts using AAD passthrough cannot be created using a service principal. To mount ALDS Gen2 with Azure Active Directory Credentials passthrough we need to execute the mount commands using the cluster configured with AAD Credentials passthrough & provide necessary configuration parameters (see [documentation](https://docs.microsoft.com/en-us/azure/databricks/security/credential-passthrough/adls-passthrough#--mount-azure-data-lake-storage-to-dbfs-using-credential-passthrough) for more details). @@ -341,7 +341,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_credentials.md b/docs/resources/mws_credentials.md index 7271ee6a68..0a5c69daae 100644 --- a/docs/resources/mws_credentials.md +++ b/docs/resources/mws_credentials.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_credentials Resource --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` This resource to configure the cross-account role for creation of new workspaces within AWS. diff --git a/docs/resources/mws_customer_managed_keys.md b/docs/resources/mws_customer_managed_keys.md index 3d46c6707c..206158766d 100644 --- a/docs/resources/mws_customer_managed_keys.md +++ b/docs/resources/mws_customer_managed_keys.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_customer_managed_keys Resource --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` This resource to configure KMS keys for new workspaces within AWS or GCP. This is to support the following features: @@ -14,7 +14,7 @@ Please follow this [complete runnable example](../guides/aws-workspace.md) with ## Example Usage --> **Note** If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour. +-> If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour. ### Customer-managed key for managed services @@ -251,7 +251,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_log_delivery.md b/docs/resources/mws_log_delivery.md index f22b1abbf6..98e25273ff 100644 --- a/docs/resources/mws_log_delivery.md +++ b/docs/resources/mws_log_delivery.md @@ -3,7 +3,7 @@ subcategory: "Log Delivery" --- # databricks_mws_log_delivery Resource --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` This resource configures the delivery of the two supported log types from Databricks workspaces: [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). @@ -150,7 +150,7 @@ Resource exports the following attributes: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_ncc_binding.md b/docs/resources/mws_ncc_binding.md index af64e6a94f..6615294941 100644 --- a/docs/resources/mws_ncc_binding.md +++ b/docs/resources/mws_ncc_binding.md @@ -3,9 +3,9 @@ subcategory: "Deployment" --- # databricks_mws_ncc_binding Resource --> **Note** Initialize provider with `alias = "account"`, `host = "https://accounts.azuredatabricks.net"` and use `provider = databricks.account` for all `databricks_mws_*` resources. +-> Initialize provider with `alias = "account"`, `host = "https://accounts.azuredatabricks.net"` and use `provider = databricks.account` for all `databricks_mws_*` resources. --> **Public Preview** This feature is available for AWS & Azure only, and is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html) in AWS. +-> This feature is available for AWS & Azure only, and is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html) in AWS. Allows you to attach a [Network Connectivity Config](mws_network_connectivity_config) object to a [databricks_mws_workspaces](mws_workspaces.md) resource to create a [Databricks Workspace that leverages serverless network connectivity configs](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/serverless-firewall). diff --git a/docs/resources/mws_ncc_private_endpoint_rule.md b/docs/resources/mws_ncc_private_endpoint_rule.md index 0180f1e587..50fba93908 100644 --- a/docs/resources/mws_ncc_private_endpoint_rule.md +++ b/docs/resources/mws_ncc_private_endpoint_rule.md @@ -3,9 +3,9 @@ subcategory: "Deployment" --- # databricks_mws_ncc_private_endpoint_rule Resource --> **Note** Initialize provider with `alias = "account"`, `host = "https://accounts.azuredatabricks.net"` and use `provider = databricks.account` for all `databricks_mws_*` resources. +-> Initialize provider with `alias = "account"`, `host = "https://accounts.azuredatabricks.net"` and use `provider = databricks.account` for all `databricks_mws_*` resources. --> **Note** This feature is only available in Azure. +-> This feature is only available in Azure. Allows you to create a private endpoint in a [Network Connectivity Config](mws_network_connectivity_config.md) that can be used to [configure private connectivity from serverless compute](https://learn.microsoft.com/en-us/azure/databricks/security/network/serverless-network-security/serverless-private-link). diff --git a/docs/resources/mws_network_connectivity_config.md b/docs/resources/mws_network_connectivity_config.md index 401d8a98db..bf16a35caf 100644 --- a/docs/resources/mws_network_connectivity_config.md +++ b/docs/resources/mws_network_connectivity_config.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_network_connectivity_config Resource --> **Note** Initialize provider with `alias = "account"`, `host = "https://accounts.azuredatabricks.net"` and use `provider = databricks.account` for all `databricks_mws_*` resources. +-> Initialize provider with `alias = "account"`, `host = "https://accounts.azuredatabricks.net"` and use `provider = databricks.account` for all `databricks_mws_*` resources. -> **Public Preview** This feature is available for AWS & Azure only, and is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html) in AWS. diff --git a/docs/resources/mws_networks.md b/docs/resources/mws_networks.md index cc26d438c9..1c7f41ee01 100644 --- a/docs/resources/mws_networks.md +++ b/docs/resources/mws_networks.md @@ -5,7 +5,7 @@ subcategory: "Deployment" ## Databricks on AWS usage --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` Use this resource to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. It is essential to understand that this will require you to configure your provider separately for the multiple workspaces resources. @@ -16,13 +16,13 @@ Use this resource to [configure VPC](https://docs.databricks.com/administration- * Subnets must have outbound access to the public network using a [aws_nat_gateway](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/nat_gateway) and [aws_internet_gateway](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/internet_gateway), or other similar customer-managed appliance infrastructure. * The NAT gateway must be set up in its subnet (public_subnets in the example below) that routes quad-zero (0.0.0.0/0) traffic to an internet gateway or other customer-managed appliance infrastructure. --> **Note** The NAT gateway needs only one IP address per AZ. Hence, the public subnet only needs two IP addresses. In order to limit the number of IP addresses in the public subnet, you can specify a secondary CIDR block (cidr_block_public) using the argument secondary_cidr_blocks then pass it to the public_subnets argument. Please review the [IPv4 CIDR block association restrictions](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) when choosing the secondary cidr block. +-> The NAT gateway needs only one IP address per AZ. Hence, the public subnet only needs two IP addresses. In order to limit the number of IP addresses in the public subnet, you can specify a secondary CIDR block (cidr_block_public) using the argument secondary_cidr_blocks then pass it to the public_subnets argument. Please review the [IPv4 CIDR block association restrictions](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) when choosing the secondary cidr block. Please follow this [complete runnable example](../guides/aws-workspace.md) with new VPC and new workspace setup. Please pay special attention to the fact that there you have two different instances of a databricks provider - one for deploying workspaces (with `host="https://accounts.cloud.databricks.com/"`) and another for the workspace you've created with `databricks_mws_workspaces` resource. If you want both creations of workspaces & clusters within the same Terraform module (essentially the same directory), you should use the provider aliasing feature of Terraform. We strongly recommend having one terraform module to create workspace + PAT token and the rest in different modules. ## Databricks on GCP usage --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.gcp.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.gcp.databricks.com"` and use `provider = databricks.mws` Use this resource to [configure VPC](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/customer-managed-vpc.html) & subnet for new workspaces within GCP. It is essential to understand that this will require you to configure your provider separately for the multiple workspaces resources. @@ -215,7 +215,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_private_access_settings.md b/docs/resources/mws_private_access_settings.md index 3fbc4577b2..24de15f5e7 100644 --- a/docs/resources/mws_private_access_settings.md +++ b/docs/resources/mws_private_access_settings.md @@ -11,7 +11,7 @@ It is strongly recommended that customers read the [Enable AWS Private Link](htt ## Databricks on AWS usage --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` ```hcl resource "databricks_mws_private_access_settings" "pas" { @@ -42,7 +42,7 @@ resource "databricks_mws_workspaces" "this" { ## Databricks on GCP usage --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.gcp.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.gcp.databricks.com"` and use `provider = databricks.mws` ```hcl resource "databricks_mws_workspaces" "this" { @@ -85,7 +85,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_storage_configurations.md b/docs/resources/mws_storage_configurations.md index c7018a4205..779c5f3d67 100644 --- a/docs/resources/mws_storage_configurations.md +++ b/docs/resources/mws_storage_configurations.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_storage_configurations Resource --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` This resource to configure root bucket new workspaces within AWS. @@ -55,7 +55,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_vpc_endpoint.md b/docs/resources/mws_vpc_endpoint.md index 1600df1e40..97ed95330d 100644 --- a/docs/resources/mws_vpc_endpoint.md +++ b/docs/resources/mws_vpc_endpoint.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_vpc_endpoint Resource --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` Enables you to register [aws_vpc_endpoint](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_endpoint) resources or gcp vpc_endpoint resources with Databricks such that they can be used as part of a [databricks_mws_networks](mws_networks.md) configuration. @@ -200,7 +200,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +-> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_workspaces.md b/docs/resources/mws_workspaces.md index c56f1c51c6..4f48777274 100644 --- a/docs/resources/mws_workspaces.md +++ b/docs/resources/mws_workspaces.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_workspaces resource --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws`. We require all `databricks_mws_*` resources to be created within its own dedicated terraform module of your environment. Usually this module creates VPC and IAM roles as well. Code that creates workspaces and code that [manages workspaces](../guides/workspace-management.md) must be in separate terraform modules to avoid common confusion between `provider = databricks.mws` and `provider = databricks.created_workspace`. This is why we specify `databricks_host` and `databricks_token` outputs, that have to be used in the latter modules: +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws`. We require all `databricks_mws_*` resources to be created within its own dedicated terraform module of your environment. Usually this module creates VPC and IAM roles as well. Code that creates workspaces and code that [manages workspaces](../guides/workspace-management.md) must be in separate terraform modules to avoid common confusion between `provider = databricks.mws` and `provider = databricks.created_workspace`. This is why we specify `databricks_host` and `databricks_token` outputs, that have to be used in the latter modules: ```hcl provider "databricks" { @@ -14,7 +14,7 @@ provider "databricks" { This resource allows you to set up [workspaces on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1) or [workspaces on GCP](https://docs.gcp.databricks.com/administration-guide/account-settings-gcp/workspaces.html). Please follow this complete runnable example on [AWS](../guides/aws-workspace.md) or [GCP](../guides/gcp-workspace.md) with new VPC and new workspace setup. --> **Note** On Azure you need to use [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace) resource to create Azure Databricks workspaces. +-> On Azure you need to use [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace) resource to create Azure Databricks workspaces. ## Example Usage @@ -315,7 +315,7 @@ output "databricks_token" { ## Argument Reference --> **Note** All workspaces would be verified to get into runnable state or deleted upon failure. You can only update `credentials_id`, `network_id`, and `storage_customer_managed_key_id`, `private_access_settings_id` on a running workspace. +-> All workspaces would be verified to get into runnable state or deleted upon failure. You can only update `credentials_id`, `network_id`, and `storage_customer_managed_key_id`, `private_access_settings_id` on a running workspace. The following arguments are available: @@ -342,7 +342,7 @@ The following arguments are available: You can specify a `token` block in the body of the workspace resource, so that Terraform manages the refresh of the PAT token for the deployment user. The other option is to create [databricks_obo_token](obo_token.md), though it requires Premium or Enterprise plan enabled as well as more complex setup. Token block exposes `token_value`, that holds sensitive PAT token and optionally it can accept two arguments: --> **Note** Tokens managed by `token {}` block are recreated when expired. +-> Tokens managed by `token {}` block are recreated when expired. * `comment` - (Optional) Comment, that will appear in "User Settings / Access Tokens" page on Workspace UI. By default it's "Terraform PAT". * `lifetime_seconds` - (Optional) Token expiry lifetime. By default its 2592000 (30 days). @@ -392,7 +392,7 @@ You can reset local DNS caches before provisioning new workspaces with one of th ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/notebook.md b/docs/resources/notebook.md index 1c00e64d4d..2f2bc46e79 100644 --- a/docs/resources/notebook.md +++ b/docs/resources/notebook.md @@ -44,7 +44,7 @@ resource "databricks_notebook" "lesson" { ## Argument Reference --> **Note** Notebook on Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed notebook won't be overwritten by Terraform, if there's no local change to notebook sources. Notebooks are identified by their path, so changing notebook's name manually on the workspace and then applying Terraform state would result in creation of notebook from Terraform state. +-> Notebook on Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed notebook won't be overwritten by Terraform, if there's no local change to notebook sources. Notebooks are identified by their path, so changing notebook's name manually on the workspace and then applying Terraform state would result in creation of notebook from Terraform state. The size of a notebook source code must not exceed a few megabytes. The following arguments are supported: diff --git a/docs/resources/obo_token.md b/docs/resources/obo_token.md index d43c6b6975..bd5a00c6ca 100644 --- a/docs/resources/obo_token.md +++ b/docs/resources/obo_token.md @@ -9,7 +9,7 @@ This resource creates [On-Behalf-Of tokens](https://docs.databricks.com/administ Creating a token for a narrowly-scoped service principal, that would be the only one (besides admins) allowed to use PAT token in this given workspace, keeping your automated deployment highly secure. --> **Note** A given declaration of `databricks_permissions.token_usage` would OVERWRITE permissions to use PAT tokens from any existing groups with token usage permissions such as the `users` group. To avoid this, be sure to include any desired groups in additional `access_control` blocks in the Terraform configuration file. +-> A given declaration of `databricks_permissions.token_usage` would OVERWRITE permissions to use PAT tokens from any existing groups with token usage permissions such as the `users` group. To avoid this, be sure to include any desired groups in additional `access_control` blocks in the Terraform configuration file. ```hcl resource "databricks_service_principal" "this" { @@ -78,7 +78,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/online_table.md b/docs/resources/online_table.md index dbaaeafa58..df026991aa 100644 --- a/docs/resources/online_table.md +++ b/docs/resources/online_table.md @@ -3,8 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_online_table (Resource) - --> **Note** This resource can only be used on a Unity Catalog-enabled workspace! +-> This resource can only be used on a Unity Catalog-enabled workspace! This resource allows you to create [Online Table](https://docs.databricks.com/en/machine-learning/feature-store/online-tables.html) in Databricks. An online table is a read-only copy of a Delta Table that is stored in row-oriented format optimized for online access. Online tables are fully serverless tables that auto-scale throughput capacity with the request load and provide low latency and high throughput access to data of any scale. Online tables are designed to work with Databricks Model Serving, Feature Serving, and retrieval-augmented generation (RAG) applications where they are used for fast data lookups. @@ -26,8 +25,7 @@ resource "databricks_online_table" "this" { ## Argument Reference --> **Note** If any parameter changes, online table is recreated. - +~> If any parameter changes, online table is recreated. The following arguments are supported - check [API docs](https://docs.databricks.com/api/workspace/onlinetables/create) for all supported parameters: diff --git a/docs/resources/permissions.md b/docs/resources/permissions.md index b47a43aba3..8e2e236dfe 100644 --- a/docs/resources/permissions.md +++ b/docs/resources/permissions.md @@ -6,13 +6,13 @@ subcategory: "Security" This resource allows you to generically manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspaces. It ensures that only _admins_, _authenticated principal_ and those declared within `access_control` blocks would have specified access. It is not possible to remove management rights from _admins_ group. --> **Note** This resource is _authoritative_ for permissions on objects. Configuring this resource for an object will **OVERWRITE** any existing permissions of the same type unless imported, and changes made outside of Terraform will be reset. +~> This resource is _authoritative_ for permissions on objects. Configuring this resource for an object will **OVERWRITE** any existing permissions of the same type unless imported, and changes made outside of Terraform will be reset. --> **Note** It is not possible to lower permissions for `admins`, so Databricks Terraform Provider removes those `access_control` blocks automatically. +-> It is not possible to lower permissions for `admins`, so Databricks Terraform Provider removes those `access_control` blocks automatically. --> **Note** If multiple permission levels are specified for an identity (e.g. `CAN_RESTART` and `CAN_MANAGE` for a cluster), only the highest level permission is returned and will cause permanent drift. +-> If multiple permission levels are specified for an identity (e.g. `CAN_RESTART` and `CAN_MANAGE` for a cluster), only the highest level permission is returned and will cause permanent drift. --> **Warning** To manage access control on service principals, use [databricks_access_control_rule_set](access_control_rule_set.md). +~> To manage access control on service principals, use [databricks_access_control_rule_set](access_control_rule_set.md). ## Cluster usage @@ -347,7 +347,7 @@ resource "databricks_permissions" "notebook_usage_by_id" { } ``` --> **Note**: when importing a permissions resource, only the `notebook_id` is filled! +-> when importing a permissions resource, only the `notebook_id` is filled! ## Workspace file usage @@ -408,7 +408,7 @@ resource "databricks_permissions" "workspace_file_usage_by_id" { } ``` --> **Note**: when importing a permissions resource, only the `workspace_file_id` is filled! +-> when importing a permissions resource, only the `workspace_file_id` is filled! ## Folder usage @@ -474,7 +474,7 @@ resource "databricks_permissions" "folder_usage_by_id" { } ``` --> **Note**: when importing a permissions resource, only the `directory_id` is filled! +-> when importing a permissions resource, only the `directory_id` is filled! ## Repos usage @@ -801,7 +801,7 @@ resource "databricks_permissions" "sql_dashboard_usage" { [SQL queries](https://docs.databricks.com/sql/user/security/access-control/query-acl.html) have three possible permissions: `CAN_VIEW`, `CAN_RUN` and `CAN_MANAGE`: --> **Note** If you do not define an `access_control` block granting `CAN_MANAGE` explictly for the user calling this provider, Databricks Terraform Provider will add `CAN_MANAGE` permission for the caller. This is a failsafe to prevent situations where the caller is locked out from making changes to the targeted `databricks_sql_query` resource when backend API do not apply permission inheritance correctly. +-> If you do not define an `access_control` block granting `CAN_MANAGE` explictly for the user calling this provider, Databricks Terraform Provider will add `CAN_MANAGE` permission for the caller. This is a failsafe to prevent situations where the caller is locked out from making changes to the targeted `databricks_sql_query` resource when backend API do not apply permission inheritance correctly. ```hcl resource "databricks_group" "auto" { @@ -912,7 +912,7 @@ access_control { Arguments for the `access_control` block are: --> **Note** It is not possible to lower permissions for `admins` or your own user anywhere from `CAN_MANAGE` level, so Databricks Terraform Provider [removes](https://github.com/databricks/terraform-provider-databricks/blob/main/permissions/resource_permissions.go#L324-L332) those `access_control` blocks automatically. +-> It is not possible to lower permissions for `admins` or your own user anywhere from `CAN_MANAGE` level, so Databricks Terraform Provider [removes](https://github.com/databricks/terraform-provider-databricks/blob/main/permissions/resource_permissions.go#L324-L332) those `access_control` blocks automatically. - `permission_level` - (Required) permission level according to specific resource. See examples above for the reference. diff --git a/docs/resources/provider.md b/docs/resources/provider.md index 25ebe76601..6366a1f69b 100644 --- a/docs/resources/provider.md +++ b/docs/resources/provider.md @@ -3,13 +3,13 @@ subcategory: "Delta Sharing" --- # databricks_provider Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! In Delta Sharing, a provider is an entity that shares data with a recipient. Within a metastore, Unity Catalog provides the ability to create a provider which contains a list of shares that have been shared with you. A `databricks_provider` is contained within [databricks_metastore](metastore.md) and can contain a list of shares that have been shared with you. -Note that Databricks to Databricks sharing automatically creates the provider. +-> Databricks to Databricks sharing automatically creates the provider. ## Example Usage diff --git a/docs/resources/recipient.md b/docs/resources/recipient.md index 6df597ea92..0f88cd05ea 100644 --- a/docs/resources/recipient.md +++ b/docs/resources/recipient.md @@ -3,7 +3,7 @@ subcategory: "Delta Sharing" --- # databricks_recipient Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! In Delta Sharing, a recipient is an entity that receives shares from a provider. In Unity Catalog, a share is a securable object that represents an organization and associates it with a credential or secure sharing identifier that allows that organization to access one or more shares. diff --git a/docs/resources/registered_model.md b/docs/resources/registered_model.md index 44c583102b..4de27e474d 100644 --- a/docs/resources/registered_model.md +++ b/docs/resources/registered_model.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_registered_model Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! This resource allows you to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. diff --git a/docs/resources/repo.md b/docs/resources/repo.md index b7d40b93ab..c03805ee4f 100644 --- a/docs/resources/repo.md +++ b/docs/resources/repo.md @@ -5,7 +5,7 @@ subcategory: "Workspace" This resource allows you to manage [Databricks Git folders](https://docs.databricks.com/en/repos/index.html) (formerly known as Databricks Repos). --> **Note** To create a Git folder from a private repository you need to configure Git token as described in the [documentation](https://docs.databricks.com/en/repos/index.html#configure-your-git-integration-with-databricks). To set this token you can use [databricks_git_credential](git_credential.md) resource. +-> To create a Git folder from a private repository you need to configure Git token as described in the [documentation](https://docs.databricks.com/en/repos/index.html#configure-your-git-integration-with-databricks). To set this token you can use [databricks_git_credential](git_credential.md) resource. ## Example Usage @@ -20,7 +20,7 @@ resource "databricks_repo" "nutter_in_home" { ## Argument Reference --> **Note** Git folder in Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed repository won't be overwritten by Terraform, if there's no local changes to configuration. If Git folder in Databricks workspace is modified, application of configuration changes will fail. +-> Git folder in Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed repository won't be overwritten by Terraform, if there's no local changes to configuration. If Git folder in Databricks workspace is modified, application of configuration changes will fail. The following arguments are supported: diff --git a/docs/resources/restrict_workspace_admins_setting.md b/docs/resources/restrict_workspace_admins_setting.md index 765825f866..988fed0052 100644 --- a/docs/resources/restrict_workspace_admins_setting.md +++ b/docs/resources/restrict_workspace_admins_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_restrict_workspace_admins_setting Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! The `databricks_restrict_workspace_admins_setting` resource lets you control the capabilities of workspace admins. @@ -20,7 +20,7 @@ With the status set to `RESTRICT_TOKENS_AND_JOB_RUN_AS`, workspace admins can: 2. Only change a job owner to themselves. 3. Only change the job run_as setting to themselves a service principal on which they have the Service Principal User role. --> **Note** Only account admins can update the setting. And the account admin must be part of the workspace to change the setting status. +~> Only account admins can update the setting. And the account admin must be part of the workspace to change the setting status. ## Example Usage diff --git a/docs/resources/schema.md b/docs/resources/schema.md index 65a144c4e9..1f1442ee7d 100644 --- a/docs/resources/schema.md +++ b/docs/resources/schema.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_schema Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views. diff --git a/docs/resources/service_principal.md b/docs/resources/service_principal.md index 717b76acc7..cac064512a 100644 --- a/docs/resources/service_principal.md +++ b/docs/resources/service_principal.md @@ -11,9 +11,9 @@ There are different types of service principals: * Databricks-managed - exists only inside the Databricks platform (all clouds) and couldn't be used for accessing non-Databricks services. * Azure-managed - existing Azure service principal (enterprise application) is registered inside Databricks. It could be used to work with other Azure services. --> **Note** To assign account level service principals to workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). +-> To assign account level service principals to workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). --> **Note** Entitlements, like, `allow_cluster_create`, `allow_instance_pool_create`, `databricks_sql_access`, `workspace_access` applicable only for workspace-level service principals. Use [databricks_entitlements](entitlements.md) resource to assign entitlements inside a workspace to account-level service principals. +-> Entitlements, like, `allow_cluster_create`, `allow_instance_pool_create`, `databricks_sql_access`, `workspace_access` applicable only for workspace-level service principals. Use [databricks_entitlements](entitlements.md) resource to assign entitlements inside a workspace to account-level service principals. To create service principals in the Databricks account, the provider must be configured with `host = "https://accounts.cloud.databricks.com"` on AWS deployments or `host = "https://accounts.azuredatabricks.net"` and authenticate using the supported authentication method for account operations. diff --git a/docs/resources/service_principal_role.md b/docs/resources/service_principal_role.md index 511089d7b0..f7ef4371d0 100644 --- a/docs/resources/service_principal_role.md +++ b/docs/resources/service_principal_role.md @@ -39,7 +39,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/service_principal_secret.md b/docs/resources/service_principal_secret.md index f98abd9b3e..05f740d864 100644 --- a/docs/resources/service_principal_secret.md +++ b/docs/resources/service_principal_secret.md @@ -3,7 +3,7 @@ subcategory: "Security" --- # databricks_service_principal_secret Resource --> **Note** This resource can only be used with an account-level provider. +-> This resource can only be used with an account-level provider. With this resource you can create a secret for a given [Service Principals](https://docs.databricks.com/administration-guide/users-groups/service-principals.html). diff --git a/docs/resources/share.md b/docs/resources/share.md index 38252a8818..5dfb7128c0 100644 --- a/docs/resources/share.md +++ b/docs/resources/share.md @@ -3,7 +3,7 @@ subcategory: "Delta Sharing" --- # databricks_share Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! In Delta Sharing, a share is a read-only collection of tables and table partitions that a provider wants to share with one or more recipients. If your recipient uses a Unity Catalog-enabled Databricks workspace, you can also include notebook files, views (including dynamic views that restrict access at the row and column level), Unity Catalog volumes, and Unity Catalog models in a share. @@ -11,7 +11,7 @@ In a Unity Catalog-enabled Databricks workspace, a share is a securable object r ## Example Usage --> **Note** In Terraform configuration, it is recommended to define objects in alphabetical order of their `name` arguments, so that you get consistent and readable diff. Whenever objects are added or removed, or `name` is renamed, you'll observe a change in the majority of tasks. It's related to the fact that the current version of the provider treats `object` blocks as an ordered list. Alternatively, `object` block could have been an unordered set, though end-users would see the entire block replaced upon a change in single property of the task. +-> In Terraform configuration, it is recommended to define objects in alphabetical order of their `name` arguments, so that you get consistent and readable diff. Whenever objects are added or removed, or `name` is renamed, you'll observe a change in the majority of tasks. It's related to the fact that the current version of the provider treats `object` blocks as an ordered list. Alternatively, `object` block could have been an unordered set, though end-users would see the entire block replaced upon a change in single property of the task. Creating a Delta Sharing share and add some existing tables to it diff --git a/docs/resources/sql_alert.md b/docs/resources/sql_alert.md index f523c2acc7..689a52a5d5 100644 --- a/docs/resources/sql_alert.md +++ b/docs/resources/sql_alert.md @@ -5,7 +5,7 @@ subcategory: "Databricks SQL" This resource allows you to manage [Databricks SQL Alerts](https://docs.databricks.com/sql/user/queries/index.html). -**Note:** To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your [databricks_group](group.md#databricks_sql_access) or [databricks_user](user.md#databricks_sql_access). +-> To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your [databricks_group](group.md#databricks_sql_access) or [databricks_user](user.md#databricks_sql_access). ## Example Usage diff --git a/docs/resources/sql_dashboard.md b/docs/resources/sql_dashboard.md index 42551121a2..5c153f96c1 100644 --- a/docs/resources/sql_dashboard.md +++ b/docs/resources/sql_dashboard.md @@ -3,12 +3,12 @@ subcategory: "Databricks SQL" --- # databricks_sql_dashboard Resource --> **Note:** Please switch to [databricks_dashboard](dashboard.md) to author new AI/BI dashboards using the latest tooling +-> Please switch to [databricks_dashboard](dashboard.md) to author new AI/BI dashboards using the latest tooling. This resource is used to manage [Legacy dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). To manage [SQL resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your [databricks_group](group.md#databricks_sql_access) or [databricks_user](user.md#databricks_sql_access). --> **Note:** documentation for this resource is a work in progress. +-> documentation for this resource is a work in progress. A dashboard may have one or more [widgets](sql_widget.md). diff --git a/docs/resources/sql_permissions.md b/docs/resources/sql_permissions.md index 663cd92dbe..43f754391e 100644 --- a/docs/resources/sql_permissions.md +++ b/docs/resources/sql_permissions.md @@ -3,7 +3,7 @@ subcategory: "Security" --- # databricks_sql_permissions Resource --> **Note** Please switch to [databricks_grants](grants.md) with Unity Catalog to manage data access, which provides a better and faster way for managing data security. `databricks_grants` resource *doesn't require a technical cluster to perform operations*. On workspaces with Unity Catalog enabled, you may run into errors such as `Error: cannot create sql permissions: cannot read current grants: For unity catalog, please specify the catalog name explicitly. E.g. SHOW GRANT ``your.address@email.com`` ON CATALOG main`. This happens if your `default_catalog_name` was set to a UC catalog instead of `hive_metastore`. The workaround is to re-assign the metastore again with the default catalog set to be `hive_metastore`. See [databricks_metastore_assignment](metastore_assignment.md). +-> Please switch to [databricks_grants](grants.md) with Unity Catalog to manage data access, which provides a better and faster way for managing data security. `databricks_grants` resource *doesn't require a technical cluster to perform operations*. On workspaces with Unity Catalog enabled, you may run into errors such as `Error: cannot create sql permissions: cannot read current grants: For unity catalog, please specify the catalog name explicitly. E.g. SHOW GRANT ``your.address@email.com`` ON CATALOG main`. This happens if your `default_catalog_name` was set to a UC catalog instead of `hive_metastore`. The workaround is to re-assign the metastore again with the default catalog set to be `hive_metastore`. See [databricks_metastore_assignment](metastore_assignment.md). This resource manages data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). In order to enable Table Access control, you have to login to the workspace as administrator, go to `Admin Console`, pick `Access Control` tab, click on `Enable` button in `Table Access Control` section, and click `Confirm`. The security guarantees of table access control **will only be effective if cluster access control is also turned on**. Please make sure that no users can create clusters in your workspace and all [databricks_cluster](cluster.md) have approximately the following configuration: diff --git a/docs/resources/sql_query.md b/docs/resources/sql_query.md index 27e12e03ea..90120a28a2 100644 --- a/docs/resources/sql_query.md +++ b/docs/resources/sql_query.md @@ -5,7 +5,7 @@ subcategory: "Databricks SQL" To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your [databricks_group](group.md#databricks_sql_access) or [databricks_user](user.md#databricks_sql_access). -**Note:** documentation for this resource is a work in progress. +-> documentation for this resource is a work in progress. A query may have one or more [visualizations](sql_visualization.md). diff --git a/docs/resources/sql_visualization.md b/docs/resources/sql_visualization.md index b9ea7d6c99..b9dcf3b3a6 100644 --- a/docs/resources/sql_visualization.md +++ b/docs/resources/sql_visualization.md @@ -5,7 +5,7 @@ subcategory: "Databricks SQL" To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your [databricks_group](group.md#databricks_sql_access) or [databricks_user](user.md#databricks_sql_access). -**Note:** documentation for this resource is a work in progress. +-> documentation for this resource is a work in progress. A visualization is always tied to a [query](sql_query.md). Every query may have one or more visualizations. diff --git a/docs/resources/sql_widget.md b/docs/resources/sql_widget.md index e890142dd8..05fed72737 100644 --- a/docs/resources/sql_widget.md +++ b/docs/resources/sql_widget.md @@ -3,11 +3,11 @@ subcategory: "Databricks SQL" --- # databricks_sql_widget Resource --> **Note:** Please switch to [databricks_dashboard](dashboard.md) to author new AI/BI dashboards using the latest tooling +-> Please switch to [databricks_dashboard](dashboard.md) to author new AI/BI dashboards using the latest tooling To manage [SQL resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your [databricks_group](group.md#databricks_sql_access) or [databricks_user](user.md#databricks_sql_access). --> **Note:** documentation for this resource is a work in progress. +-> documentation for this resource is a work in progress. A widget is always tied to a [Legacy dashboard](sql_dashboard.md). Every dashboard may have one or more widgets. diff --git a/docs/resources/storage_credential.md b/docs/resources/storage_credential.md index b57120e8dd..87d90b853b 100644 --- a/docs/resources/storage_credential.md +++ b/docs/resources/storage_credential.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_storage_credential Resource --> **Note** This resource can be used with an account or workspace-level provider. +-> This resource can be used with an account or workspace-level provider. To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: diff --git a/docs/resources/system_schema.md b/docs/resources/system_schema.md index 80634f0859..02945703f9 100644 --- a/docs/resources/system_schema.md +++ b/docs/resources/system_schema.md @@ -3,9 +3,9 @@ subcategory: "Unity Catalog" --- # databricks_system_schema Resource --> **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). +-> This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! Manages system tables enablement. System tables are a Databricks-hosted analytical store of your account’s operational data. System tables can be used for historical observability across your account. System tables must be enabled by an account admin. diff --git a/docs/resources/token.md b/docs/resources/token.md index 307a604c11..281399cffc 100644 --- a/docs/resources/token.md +++ b/docs/resources/token.md @@ -62,4 +62,4 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. diff --git a/docs/resources/user.md b/docs/resources/user.md index 03e16365c3..1e5633e541 100644 --- a/docs/resources/user.md +++ b/docs/resources/user.md @@ -5,9 +5,9 @@ subcategory: "Security" This resource allows you to manage [users in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/users.html), [Databricks Account Console](https://accounts.cloud.databricks.com/) or [Azure Databricks Account Console](https://accounts.azuredatabricks.net). You can also [associate](group_member.md) Databricks users to [databricks_group](group.md). Upon user creation the user will receive a welcome email. You can also get information about caller identity using [databricks_current_user](../data-sources/current_user.md) data source. --> **Note** To assign account level users to workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). +-> To assign account level users to workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). --> **Note** Entitlements, like, `allow_cluster_create`, `allow_instance_pool_create`, `databricks_sql_access`, `workspace_access` applicable only for workspace-level users. Use [databricks_entitlements](entitlements.md) resource to assign entitlements inside a workspace to account-level users. +-> Entitlements, like, `allow_cluster_create`, `allow_instance_pool_create`, `databricks_sql_access`, `workspace_access` applicable only for workspace-level users. Use [databricks_entitlements](entitlements.md) resource to assign entitlements inside a workspace to account-level users. To create users in the Databricks account, the provider must be configured with `host = "https://accounts.cloud.databricks.com"` on AWS deployments or `host = "https://accounts.azuredatabricks.net"` and authenticate using [AAD tokens](https://registry.terraform.io/providers/databricks/databricks/latest/docs#special-configurations-for-azure) on Azure deployments. diff --git a/docs/resources/user_instance_profile.md b/docs/resources/user_instance_profile.md index 88e6016c8e..1b050b386f 100644 --- a/docs/resources/user_instance_profile.md +++ b/docs/resources/user_instance_profile.md @@ -39,7 +39,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/user_role.md b/docs/resources/user_role.md index 5921b2c886..8ece375046 100644 --- a/docs/resources/user_role.md +++ b/docs/resources/user_role.md @@ -59,7 +59,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/vector_search_endpoint.md b/docs/resources/vector_search_endpoint.md index c90de0c25a..4f167bc9f6 100644 --- a/docs/resources/vector_search_endpoint.md +++ b/docs/resources/vector_search_endpoint.md @@ -3,7 +3,7 @@ subcategory: "Mosaic AI Vector Search" --- # databricks_vector_search_endpoint Resource --> **Note** This resource can only be used on a Unity Catalog-enabled workspace! +-> This resource can only be used on a Unity Catalog-enabled workspace! This resource allows you to create [Mosaic AI Vector Search Endpoint](https://docs.databricks.com/en/generative-ai/vector-search.html) in Databricks. Mosaic AI Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. The Mosaic AI Vector Search Endpoint is used to create and access vector search indexes. diff --git a/docs/resources/vector_search_index.md b/docs/resources/vector_search_index.md index 0de0ac2c1f..d06db90637 100644 --- a/docs/resources/vector_search_index.md +++ b/docs/resources/vector_search_index.md @@ -3,7 +3,7 @@ subcategory: "Mosaic AI Vector Search" --- # databricks_vector_search_index Resource --> **Note** This resource can only be used on a Unity Catalog-enabled workspace! +-> This resource can only be used on a Unity Catalog-enabled workspace! This resource allows you to create [Mosaic AI Vector Search Index](https://docs.databricks.com/en/generative-ai/create-query-vector-search.html) in Databricks. Mosaic AI Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. The Mosaic AI Vector Search Index provides the ability to search data in the linked Delta Table. diff --git a/docs/resources/volume.md b/docs/resources/volume.md index b116e42129..e95f54d8f3 100644 --- a/docs/resources/volume.md +++ b/docs/resources/volume.md @@ -3,9 +3,9 @@ subcategory: "Unity Catalog" --- # databricks_volume (Resource) --> **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). +-> This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! Volumes are Unity Catalog objects representing a logical volume of storage in a cloud object storage location. Volumes provide capabilities for accessing, storing, governing, and organizing files. While tables provide governance over tabular datasets, volumes add governance over non-tabular datasets. You can use volumes to store and access files in any format, including structured, semi-structured, and unstructured data. diff --git a/docs/resources/workspace_binding.md b/docs/resources/workspace_binding.md index 5a649bd9e4..e7dd0105af 100644 --- a/docs/resources/workspace_binding.md +++ b/docs/resources/workspace_binding.md @@ -3,17 +3,15 @@ subcategory: "Unity Catalog" --- # databricks_workspace_binding Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! If you use workspaces to isolate user data access, you may want to limit access to catalog, external locations or storage credentials from specific workspaces in your account, also known as workspace binding By default, Databricks assigns the securable to all workspaces attached to the current metastore. By using `databricks_workspace_binding`, the securable will be unassigned from all workspaces and only assigned explicitly using this resource. --> **Note** - To use this resource the securable must have its isolation mode set to `ISOLATED` (for [databricks_catalog](catalog.md)) or `ISOLATION_MODE_ISOLATED` (for (for [databricks_external_location](external_location.md) or [databricks_storage_credential](storage_credential.md)) for the `isolation_mode` attribute. Alternatively, the isolation mode can be set using the UI or API by following [this guide](https://docs.databricks.com/data-governance/unity-catalog/create-catalogs.html#configuration), [this guide](https://docs.databricks.com/en/connect/unity-catalog/external-locations.html#workspace-binding) or [this guide](https://docs.databricks.com/en/connect/unity-catalog/storage-credentials.html#optional-assign-a-storage-credential-to-specific-workspaces). +-> To use this resource the securable must have its isolation mode set to `ISOLATED` (for [databricks_catalog](catalog.md)) or `ISOLATION_MODE_ISOLATED` (for (for [databricks_external_location](external_location.md) or [databricks_storage_credential](storage_credential.md)) for the `isolation_mode` attribute. Alternatively, the isolation mode can be set using the UI or API by following [this guide](https://docs.databricks.com/data-governance/unity-catalog/create-catalogs.html#configuration), [this guide](https://docs.databricks.com/en/connect/unity-catalog/external-locations.html#workspace-binding) or [this guide](https://docs.databricks.com/en/connect/unity-catalog/storage-credentials.html#optional-assign-a-storage-credential-to-specific-workspaces). --> **Note** - If the securable's isolation mode was set to `ISOLATED` using Terraform then the securable will have been automatically bound to the workspace it was created from. +-> If the securable's isolation mode was set to `ISOLATED` using Terraform then the securable will have been automatically bound to the workspace it was created from. ## Example Usage diff --git a/docs/resources/workspace_conf.md b/docs/resources/workspace_conf.md index 0986f8a133..6e02461381 100644 --- a/docs/resources/workspace_conf.md +++ b/docs/resources/workspace_conf.md @@ -4,11 +4,11 @@ subcategory: "Workspace" # databricks_workspace_conf Resource --> **Note** This resource has an evolving API, which may change in future versions of the provider. +~> This resource has an evolving API, which may change in future versions of the provider. Manages workspace configuration for expert usage. Currently, more than one instance of resource can exist in Terraform state, though there's no deterministic behavior, when they manage the same property. We strongly recommend to use a single `databricks_workspace_conf` per workspace. --> **Note** Deleting `databricks_workspace_conf` resources may fail depending on the configuration properties set, including but not limited to `enableIpAccessLists`, `enableGp3`, and `maxTokenLifetimeDays`. The provider will print a warning if this occurs. You can verify the workspace configuration by reviewing [the workspace settings in the UI](https://docs.databricks.com/en/admin/workspace-settings/index.html). +-> Deleting `databricks_workspace_conf` resources may fail depending on the configuration properties set, including but not limited to `enableIpAccessLists`, `enableGp3`, and `maxTokenLifetimeDays`. The provider will print a warning if this occurs. You can verify the workspace configuration by reviewing [the workspace settings in the UI](https://docs.databricks.com/en/admin/workspace-settings/index.html). ## Example Usage @@ -36,4 +36,4 @@ The following arguments are available: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. diff --git a/docs/resources/workspace_file.md b/docs/resources/workspace_file.md index f7cbc8e1de..997e7eac24 100644 --- a/docs/resources/workspace_file.md +++ b/docs/resources/workspace_file.md @@ -34,7 +34,7 @@ resource "databricks_workspace_file" "init_script" { ## Argument Reference --> **Note** Files in Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed workspace files won't be overwritten by Terraform, if there's no local change to file sources. Workspace files are identified by their path, so changing file's name manually on the workspace and then applying Terraform state would result in creation of workspace file from Terraform state. +-> Files in Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed workspace files won't be overwritten by Terraform, if there's no local change to file sources. Workspace files are identified by their path, so changing file's name manually on the workspace and then applying Terraform state would result in creation of workspace file from Terraform state. The size of a workspace file source code must not exceed a few megabytes. The following arguments are supported: From c56bc9028885e588204aec574a2e55c5ff38cb3f Mon Sep 17 00:00:00 2001 From: Callum Dempsey Leach Date: Mon, 7 Oct 2024 12:25:40 +0100 Subject: [PATCH 33/99] [Fix] Add Sufficient Network Privileges to the Databricks Default Cross Account Policy (#4027) ## Changes Currently, the Databricks-provided Cross Account Policy IAM Role does not include all the necessary permissions to set up a workspace. Attempting to set up a workspace using this policy results in the following error (see [Issue #4026](https://github.com/databricks/terraform-provider-databricks/issues/4026)): ``` MALFORMED_REQUEST: Failed credentials validation checks: Allocate Address ``` This makes it difficult for new engineers to onboard to Databricks without troubleshooting unexpected errors. This PR adds the missing network permissions to the Databricks Managed VPC policy types ("managed" and "customer"), ensuring that all required permissions are included for successful workspace deployment. These changes are not applied to the "restricted" policy type to avoid allowing Elastic IP allocations, which may not be desirable for some Databricks customers. See the bottom of the description for the full list. ## Tests This change has been tested locally and is running in our staging workspace using the same configuration. As this is a fix for 'managed' type Databricks deployment configurations, I have matched this with positive and negative unit tests to guard precise and expected roles. I have then added extra tests to confirm the expected policies across each branch, 'managed', 'customer', and 'restricted'. Feel free to remove these if overboard, as I recognise you _could_ make a similar weaker assertion using 'len'. - [x] `make test` run locally - [x] Relevant acceptance tests are passing - [ ] Relevant change in `docs/` folder (if necessary) - [x] Covered with integration tests in `internal/acceptance` - [ ] Using Go SDK (N/A) The full list of permissions which align with the Databricks documentation, now included in the "managed" policy type, are: ```json [ "ec2:AllocateAddress", "ec2:AssignPrivateIpAddresses", "ec2:AssociateDhcpOptions", "ec2:AssociateIamInstanceProfile", "ec2:AssociateRouteTable", "ec2:AttachInternetGateway", "ec2:AttachVolume", "ec2:AuthorizeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupIngress", "ec2:CancelSpotInstanceRequests", "ec2:CreateDhcpOptions", "ec2:CreateFleet", "ec2:CreateInternetGateway", "ec2:CreateLaunchTemplate", "ec2:CreateLaunchTemplateVersion", "ec2:CreateNatGateway", "ec2:CreateRoute", "ec2:CreateRouteTable", "ec2:CreateSecurityGroup", "ec2:CreateSubnet", "ec2:CreateTags", "ec2:CreateVolume", "ec2:CreateVpc", "ec2:CreateVpcEndpoint", "ec2:DeleteDhcpOptions", "ec2:DeleteFleets", "ec2:DeleteInternetGateway", "ec2:DeleteLaunchTemplate", "ec2:DeleteLaunchTemplateVersions", "ec2:DeleteNatGateway", "ec2:DeleteRoute", "ec2:DeleteRouteTable", "ec2:DeleteSecurityGroup", "ec2:DeleteSubnet", "ec2:DeleteTags", "ec2:DeleteVolume", "ec2:DeleteVpc", "ec2:DeleteVpcEndpoints", "ec2:DescribeAvailabilityZones", "ec2:DescribeFleetHistory", "ec2:DescribeFleetInstances", "ec2:DescribeFleets", "ec2:DescribeIamInstanceProfileAssociations", "ec2:DescribeInstanceStatus", "ec2:DescribeInstances", "ec2:DescribeInternetGateways", "ec2:DescribeLaunchTemplates", "ec2:DescribeLaunchTemplateVersions", "ec2:DescribeNatGateways", "ec2:DescribeNetworkAcls", "ec2:DescribePrefixLists", "ec2:DescribeReservedInstancesOfferings", "ec2:DescribeRouteTables", "ec2:DescribeSecurityGroups", "ec2:DescribeSpotInstanceRequests", "ec2:DescribeSpotPriceHistory", "ec2:DescribeSubnets", "ec2:DescribeVolumes", "ec2:DescribeVpcAttribute", "ec2:DescribeVpcs", "ec2:DetachInternetGateway", "ec2:DisassociateIamInstanceProfile", "ec2:DisassociateRouteTable", "ec2:GetLaunchTemplateData", "ec2:GetSpotPlacementScores", "ec2:ModifyFleet", "ec2:ModifyLaunchTemplate", "ec2:ModifyVpcAttribute", "ec2:ReleaseAddress", "ec2:ReplaceIamInstanceProfileAssociation", "ec2:RequestSpotInstances", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", "ec2:RunInstances", "ec2:TerminateInstances" ] ``` Resolves #4026 --- aws/data_aws_crossaccount_policy.go | 4 + aws/data_aws_crossaccount_policy_test.go | 471 ++++++++++++++++++++++- 2 files changed, 473 insertions(+), 2 deletions(-) diff --git a/aws/data_aws_crossaccount_policy.go b/aws/data_aws_crossaccount_policy.go index 6737e9376a..a5da5d9365 100644 --- a/aws/data_aws_crossaccount_policy.go +++ b/aws/data_aws_crossaccount_policy.go @@ -103,6 +103,10 @@ func DataAwsCrossaccountPolicy() common.Resource { // additional permissions for Databricks-managed VPC policy if data.PolicyType == "managed" { actions = append(actions, []string{ + "ec2:AttachInternetGateway", + "ec2:AllocateAddress", + "ec2:AssociateDhcpOptions", + "ec2:AssociateRouteTable", "ec2:CreateDhcpOptions", "ec2:CreateInternetGateway", "ec2:CreateNatGateway", diff --git a/aws/data_aws_crossaccount_policy_test.go b/aws/data_aws_crossaccount_policy_test.go index 2bdf183993..177cb166e9 100644 --- a/aws/data_aws_crossaccount_policy_test.go +++ b/aws/data_aws_crossaccount_policy_test.go @@ -16,7 +16,7 @@ func TestDataAwsCrossAccountDatabricksManagedPolicy(t *testing.T) { }.Apply(t) assert.NoError(t, err) j := d.Get("json") - assert.Lenf(t, j, 3032, "Strange length for policy: %s", j) + assert.Lenf(t, j, 3171, "Strange length for policy: %s", j) } func TestDataAwsCrossAccountCustomerManagedPolicy(t *testing.T) { @@ -42,7 +42,474 @@ func TestDataAwsCrossAccountPolicy_WithPassRoles(t *testing.T) { }.Apply(t) assert.NoError(t, err) j := d.Get("json") - assert.Lenf(t, j, 3168, "Strange length for policy: %s", j) + assert.Lenf(t, j, 3307, "Strange length for policy: %s", j) +} + +func TestDataAwsCrossAccountManagedPolicyRoles(t *testing.T) { + expectedJSON := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AssignPrivateIpAddresses", + "ec2:CancelSpotInstanceRequests", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeIamInstanceProfileAssociations", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribePrefixLists", + "ec2:DescribeReservedInstancesOfferings", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:GetSpotPlacementScores", + "ec2:RequestSpotInstances", + "ec2:DescribeFleetHistory", + "ec2:ModifyFleet", + "ec2:DeleteFleets", + "ec2:DescribeFleetInstances", + "ec2:DescribeFleets", + "ec2:CreateFleet", + "ec2:DeleteLaunchTemplate", + "ec2:GetLaunchTemplateData", + "ec2:CreateLaunchTemplate", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + "ec2:ModifyLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", + "ec2:CreateLaunchTemplateVersion", + "ec2:AssociateIamInstanceProfile", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateVolume", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:DisassociateIamInstanceProfile", + "ec2:ReplaceIamInstanceProfileAssociation", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:RunInstances", + "ec2:TerminateInstances", + "ec2:AttachInternetGateway", + "ec2:AllocateAddress", + "ec2:AssociateDhcpOptions", + "ec2:AssociateRouteTable", + "ec2:CreateDhcpOptions", + "ec2:CreateInternetGateway", + "ec2:CreateNatGateway", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateVpc", + "ec2:CreateVpcEndpoint", + "ec2:DeleteDhcpOptions", + "ec2:DeleteInternetGateway", + "ec2:DeleteNatGateway", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSubnet", + "ec2:DeleteVpc", + "ec2:DeleteVpcEndpoints", + "ec2:DetachInternetGateway", + "ec2:DisassociateRouteTable", + "ec2:ModifyVpcAttribute", + "ec2:ReleaseAddress" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "iam:CreateServiceLinkedRole", + "iam:PutRolePolicy" + ], + "Resource": "arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "spot.amazonaws.com" + } + } + } + ] +}` + + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `policy_type = "managed"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + actualJSON := d.Get("json").(string) + assert.Equal(t, expectedJSON, actualJSON) + + // Negative test: ensure that customer policy is not equal to customer policy + managedD, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `policy_type = "customer"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + managedJSON := managedD.Get("json").(string) + assert.NotEqual(t, actualJSON, managedJSON) +} + +func TestDataAwsCrossAccountCustomerManagedPolicyRoles(t *testing.T) { + expectedJSON := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AssignPrivateIpAddresses", + "ec2:CancelSpotInstanceRequests", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeIamInstanceProfileAssociations", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribePrefixLists", + "ec2:DescribeReservedInstancesOfferings", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:GetSpotPlacementScores", + "ec2:RequestSpotInstances", + "ec2:DescribeFleetHistory", + "ec2:ModifyFleet", + "ec2:DeleteFleets", + "ec2:DescribeFleetInstances", + "ec2:DescribeFleets", + "ec2:CreateFleet", + "ec2:DeleteLaunchTemplate", + "ec2:GetLaunchTemplateData", + "ec2:CreateLaunchTemplate", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + "ec2:ModifyLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", + "ec2:CreateLaunchTemplateVersion", + "ec2:AssociateIamInstanceProfile", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateVolume", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:DisassociateIamInstanceProfile", + "ec2:ReplaceIamInstanceProfileAssociation", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:RunInstances", + "ec2:TerminateInstances" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "iam:CreateServiceLinkedRole", + "iam:PutRolePolicy" + ], + "Resource": "arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "spot.amazonaws.com" + } + } + } + ] +}` + + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `policy_type = "customer"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + actualJSON := d.Get("json").(string) + assert.Equal(t, expectedJSON, actualJSON) + + // Negative test: ensure that customer policy is not equal to managed policy + managedD, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `policy_type = "managed"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + managedJSON := managedD.Get("json").(string) + assert.NotEqual(t, actualJSON, managedJSON) +} + +func TestDataAwsCrossAccountRestrictedPolicyRoles(t *testing.T) { + expectedJSON := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AssignPrivateIpAddresses", + "ec2:CancelSpotInstanceRequests", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeIamInstanceProfileAssociations", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribePrefixLists", + "ec2:DescribeReservedInstancesOfferings", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:GetSpotPlacementScores", + "ec2:RequestSpotInstances", + "ec2:DescribeFleetHistory", + "ec2:ModifyFleet", + "ec2:DeleteFleets", + "ec2:DescribeFleetInstances", + "ec2:DescribeFleets", + "ec2:CreateFleet", + "ec2:DeleteLaunchTemplate", + "ec2:GetLaunchTemplateData", + "ec2:CreateLaunchTemplate", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + "ec2:ModifyLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", + "ec2:CreateLaunchTemplateVersion" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "iam:CreateServiceLinkedRole", + "iam:PutRolePolicy" + ], + "Resource": "arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "spot.amazonaws.com" + } + } + }, + { + "Sid": "InstancePoolsSupport", + "Effect": "Allow", + "Action": [ + "ec2:AssociateIamInstanceProfile", + "ec2:DisassociateIamInstanceProfile", + "ec2:ReplaceIamInstanceProfileAssociation" + ], + "Resource": "arn:aws:ec2:us-west-2:123456789012:instance/*", + "Condition": { + "StringEquals": { + "ec2:ResourceTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "AllowEc2RunInstancePerTag", + "Effect": "Allow", + "Action": "ec2:RunInstances", + "Resource": [ + "arn:aws:ec2:us-west-2:123456789012:volume/*", + "arn:aws:ec2:us-west-2:123456789012:instance/*" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "AllowEc2RunInstanceImagePerTag", + "Effect": "Allow", + "Action": "ec2:RunInstances", + "Resource": "arn:aws:ec2:us-west-2:123456789012:image/*", + "Condition": { + "StringEquals": { + "aws:ResourceTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "AllowEc2RunInstancePerVPCid", + "Effect": "Allow", + "Action": "ec2:RunInstances", + "Resource": [ + "arn:aws:ec2:us-west-2:123456789012:network-interface/*", + "arn:aws:ec2:us-west-2:123456789012:subnet/*", + "arn:aws:ec2:us-west-2:123456789012:security-group/*" + ], + "Condition": { + "StringEquals": { + "ec2:vpc": "arn:aws:ec2:us-west-2:123456789012:vpc/vpc-abcdefg12345" + } + } + }, + { + "Sid": "AllowEc2RunInstanceOtherResources", + "Effect": "Allow", + "Action": "ec2:RunInstances", + "NotResource": [ + "arn:aws:ec2:us-west-2:123456789012:image/*", + "arn:aws:ec2:us-west-2:123456789012:network-interface/*", + "arn:aws:ec2:us-west-2:123456789012:subnet/*", + "arn:aws:ec2:us-west-2:123456789012:security-group/*", + "arn:aws:ec2:us-west-2:123456789012:volume/*", + "arn:aws:ec2:us-west-2:123456789012:instance/*" + ] + }, + { + "Sid": "EC2TerminateInstancesTag", + "Effect": "Allow", + "Action": "ec2:TerminateInstances", + "Resource": "arn:aws:ec2:us-west-2:123456789012:instance/*", + "Condition": { + "StringEquals": { + "ec2:ResourceTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "EC2AttachDetachVolumeTag", + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": [ + "arn:aws:ec2:us-west-2:123456789012:instance/*", + "arn:aws:ec2:us-west-2:123456789012:volume/*" + ], + "Condition": { + "StringEquals": { + "ec2:ResourceTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "EC2CreateVolumeByTag", + "Effect": "Allow", + "Action": "ec2:CreateVolume", + "Resource": "arn:aws:ec2:us-west-2:123456789012:volume/*", + "Condition": { + "StringEquals": { + "aws:RequestTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "EC2DeleteVolumeByTag", + "Effect": "Allow", + "Action": "ec2:DeleteVolume", + "Resource": [ + "arn:aws:ec2:us-west-2:123456789012:volume/*" + ], + "Condition": { + "StringEquals": { + "ec2:ResourceTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "VpcNonresourceSpecificActions", + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress" + ], + "Resource": "arn:aws:ec2:us-west-2:123456789012:security-group/sg-12345678", + "Condition": { + "StringEquals": { + "ec2:vpc": "arn:aws:ec2:us-west-2:123456789012:vpc/vpc-abcdefg12345" + } + } + } + ] +}` + + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: ` +policy_type = "restricted" +aws_account_id = "123456789012" +vpc_id = "vpc-abcdefg12345" +region = "us-west-2" +security_group_id = "sg-12345678" +`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + actualJSON := d.Get("json").(string) + assert.Equal(t, expectedJSON, actualJSON) + + // Negative test: ensure that restricted policy is not equal to managed policy + managedD, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `policy_type = "managed"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + managedJSON := managedD.Get("json").(string) + assert.NotEqual(t, actualJSON, managedJSON) + + // Negative test: ensure that restricted policy is not equal to customer policy + customerD, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `policy_type = "customer"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + customerJSON := customerD.Get("json").(string) + assert.NotEqual(t, actualJSON, customerJSON) } func TestDataAwsCrossAccountRestrictedPolicy(t *testing.T) { From 54d9b1762974050c2be68298ad4871720109b601 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Mon, 7 Oct 2024 16:49:59 +0200 Subject: [PATCH 34/99] [Doc] Update CONTRIBUTING guide for plugin framework resources (#4078) ## Changes Update the guide to give detailed information on how to navigate and get started with adding / migrating resources to plugin framework. ## Tests NA - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- CONTRIBUTING.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4062159c53..ab38782660 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -118,6 +118,22 @@ We are migrating the resource from SDKv2 to Plugin Framework provider and hence - `pluginfw`: Contains the changes specific to Plugin Framework. This package shouldn't depend on sdkv2 or common. - `sdkv2`: Contains the changes specific to SDKv2. This package shouldn't depend on pluginfw or common. +### Adding a new resource +1. Check if the directory for this particular resource exists under `internal/providers/pluginfw/resources`, if not create the directory eg: `cluster`, `volume` etc... Please note: Resources and Data sources are organized under the same package for that service. +2. Create a file with resource_resource-name.go and write the CRUD methods, schema for that resource. For reference, please take a look at existing resources eg: `resource_quality_monitor.go` +3. Create a file with `resource_resource-name_acc_test.go` and add integration tests here. +4. Create a file with `resource_resource-name_test.go` and add unit tests here. Note: Please make sure to abstract specific method of the resource so they are unit test friendly and not testing internal part of terraform plugin framework library. You can compare the diagnostics, for example: please take a look at: `data_cluster_test.go` +5. Add the resource under `internal/providers/pluginfw/pluginfw.go` in `Resources()` method. Please update the list so that it stays in alphabetically sorted order. +6. Create a PR and send it for review. + +### Adding a new data source +1. Check if the directory for this particular datasource exists under `internal/providers/pluginfw/resources`, if not create the directory eg: `cluster`, `volume` etc... Please note: Resources and Data sources are organized under the same package for that service. +2. Create a file with `data_resource-name.go` and write the CRUD methods, schema for that data source. For reference, please take a look at existing data sources eg: `data_cluster.go` +3. Create a file with `data_resource-name_acc_test.go` and add integration tests here. +4. Create a file with `data_resource-name_test.go` and add unit tests here. Note: Please make sure to abstract specific method of the resource so they are unit test friendly and not testing internal part of terraform plugin framework library. You can compare the diagnostics, for example: please take a look at: `data_cluster_test.go` +5. Add the resource under `internal/providers/pluginfw/pluginfw.go` in `DataSources()` method. Please update the list so that it stays in alphabetically sorted order. +6. Create a PR and send it for review. + ### Migrating resource to plugin framework Ideally there shouldn't be any behaviour change when migrating a resource or data source to either Go SDk or Plugin Framework. - Please make sure there are no breaking differences due to changes in schema by running: `make diff-schema`. From 2557380827bf1c643a5c1525a3ff7362eb565c3d Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Mon, 7 Oct 2024 18:38:29 +0200 Subject: [PATCH 35/99] [Release] Release v1.53.0 (#4076) ### New Features and Improvements * Add `databricks_budget` resource ([#3955](https://github.com/databricks/terraform-provider-databricks/pull/3955)). * Add `databricks_mlflow_models` data source ([#3874](https://github.com/databricks/terraform-provider-databricks/pull/3874)). * Add computed attribute `table_serving_url` to `databricks_online_table` ([#4048](https://github.com/databricks/terraform-provider-databricks/pull/4048)). * Add support for Identity Column in `databricks_sql_table` ([#4035](https://github.com/databricks/terraform-provider-databricks/pull/4035)). ### Bug Fixes * Add Sufficient Network Privileges to the Databricks Default Cross Account Policy ([#4027](https://github.com/databricks/terraform-provider-databricks/pull/4027)) * Ignore presence or absence of `/Workspace` prefix for dashboard resource ([#4061](https://github.com/databricks/terraform-provider-databricks/pull/4061)). * Refactor `databricks_permissions` and allow the current user to set their own permissions ([#3956](https://github.com/databricks/terraform-provider-databricks/pull/3956)). * Set ID for online table resource if creation succeeds but it isn't available yet ([#4072](https://github.com/databricks/terraform-provider-databricks/pull/4072)). ### Documentation * Update CONTRIBUTING guide for plugin framework resources ([#4078](https://github.com/databricks/terraform-provider-databricks/pull/4078)) * Add guide for OIDC authentication ([#4016](https://github.com/databricks/terraform-provider-databricks/pull/4016)). * Correctly use native markdown callouts supported by TF Registry ([#4073](https://github.com/databricks/terraform-provider-databricks/pull/4073)). * Fixing links to `databricks_service_principal` in TF guides ([#4020](https://github.com/databricks/terraform-provider-databricks/pull/4020)). ### Internal Changes * Fix Permissions Dashboard Test ([#4071](https://github.com/databricks/terraform-provider-databricks/pull/4071)). * Bump Go SDK to latest and generate TF structs ([#4062](https://github.com/databricks/terraform-provider-databricks/pull/4062)). * Skip Budget tests on GCP ([#4070](https://github.com/databricks/terraform-provider-databricks/pull/4070)). * Update to latest OpenAPI spec and bump Go SDK ([#4069](https://github.com/databricks/terraform-provider-databricks/pull/4069)). --- CHANGELOG.md | 34 ++++++++++++++++++++++++++++++++++ common/version.go | 2 +- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aecd839d63..769b48fa35 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,39 @@ # Version changelog +## [Release] Release v1.53.0 + +### New Features and Improvements + + * Add `databricks_budget` resource ([#3955](https://github.com/databricks/terraform-provider-databricks/pull/3955)). + * Add `databricks_mlflow_models` data source ([#3874](https://github.com/databricks/terraform-provider-databricks/pull/3874)). + * Add computed attribute `table_serving_url` to `databricks_online_table` ([#4048](https://github.com/databricks/terraform-provider-databricks/pull/4048)). + * Add support for Identity Column in `databricks_sql_table` ([#4035](https://github.com/databricks/terraform-provider-databricks/pull/4035)). + + +### Bug Fixes + + * Add Sufficient Network Privileges to the Databricks Default Cross Account Policy ([#4027](https://github.com/databricks/terraform-provider-databricks/pull/4027)) + * Ignore presence or absence of `/Workspace` prefix for dashboard resource ([#4061](https://github.com/databricks/terraform-provider-databricks/pull/4061)). + * Refactor `databricks_permissions` and allow the current user to set their own permissions ([#3956](https://github.com/databricks/terraform-provider-databricks/pull/3956)). + * Set ID for online table resource if creation succeeds but it isn't available yet ([#4072](https://github.com/databricks/terraform-provider-databricks/pull/4072)). + + +### Documentation + + * Update CONTRIBUTING guide for plugin framework resources ([#4078](https://github.com/databricks/terraform-provider-databricks/pull/4078)) + * Add guide for OIDC authentication ([#4016](https://github.com/databricks/terraform-provider-databricks/pull/4016)). + * Correctly use native markdown callouts supported by TF Registry ([#4073](https://github.com/databricks/terraform-provider-databricks/pull/4073)). + * Fixing links to `databricks_service_principal` in TF guides ([#4020](https://github.com/databricks/terraform-provider-databricks/pull/4020)). + + +### Internal Changes + + * Fix Permissions Dashboard Test ([#4071](https://github.com/databricks/terraform-provider-databricks/pull/4071)). + * Bump Go SDK to latest and generate TF structs ([#4062](https://github.com/databricks/terraform-provider-databricks/pull/4062)). + * Skip Budget tests on GCP ([#4070](https://github.com/databricks/terraform-provider-databricks/pull/4070)). + * Update to latest OpenAPI spec and bump Go SDK ([#4069](https://github.com/databricks/terraform-provider-databricks/pull/4069)). + + ## [Release] Release v1.52.0 ### New Features and Improvements diff --git a/common/version.go b/common/version.go index 417761fcf5..86d57fba64 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.52.0" + version = "1.53.0" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider From b937e0bc8d0a55a735e437366bb701e89a332cb6 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 9 Oct 2024 06:04:19 -0400 Subject: [PATCH 36/99] [Fix] force send `read_only` in `databricks_external_location` when it's changed (#4067) ## Changes See linked issue for details Resolves #4037, Resolves #4004 ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- catalog/resource_external_location.go | 3 ++ catalog/resource_external_location_test.go | 47 ++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/catalog/resource_external_location.go b/catalog/resource_external_location.go index f21549cb98..04985370ee 100644 --- a/catalog/resource_external_location.go +++ b/catalog/resource_external_location.go @@ -116,6 +116,9 @@ func ResourceExternalLocation() common.Resource { if !d.HasChangeExcept("owner") { return nil } + if d.HasChange("read_only") { + updateExternalLocationRequest.ForceSendFields = append(updateExternalLocationRequest.ForceSendFields, "ReadOnly") + } updateExternalLocationRequest.Owner = "" _, err = w.ExternalLocations.Update(ctx, updateExternalLocationRequest) diff --git a/catalog/resource_external_location_test.go b/catalog/resource_external_location_test.go index 26493c1005..c424bbc2fc 100644 --- a/catalog/resource_external_location_test.go +++ b/catalog/resource_external_location_test.go @@ -293,6 +293,7 @@ func TestUpdateExternalLocation(t *testing.T) { Url: "s3://foo/bar", CredentialName: "bcd", Comment: "def", + ReadOnly: false, }, }, { @@ -324,6 +325,52 @@ func TestUpdateExternalLocation(t *testing.T) { }.ApplyNoError(t) } +func TestUpdateExternalLocation_FromReadOnly(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "PATCH", + Resource: "/api/2.1/unity-catalog/external-locations/abc", + ExpectedRequest: catalog.UpdateExternalLocation{ + Url: "s3://foo/bar", + CredentialName: "bcd", + Comment: "def", + ReadOnly: false, + ForceSendFields: []string{"ReadOnly"}, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/external-locations/abc?", + Response: catalog.ExternalLocationInfo{ + Name: "abc", + Url: "s3://foo/bar", + CredentialName: "bcd", + Comment: "def", + ReadOnly: false, + }, + }, + }, + Resource: ResourceExternalLocation(), + Update: true, + ID: "abc", + InstanceState: map[string]string{ + "name": "abc", + "url": "s3://foo/bar", + "credential_name": "abc", + "comment": "def", + "read_only": "true", + }, + HCL: ` + name = "abc" + url = "s3://foo/bar" + credential_name = "bcd" + comment = "def" + read_only = false + `, + }.ApplyNoError(t) +} + func TestUpdateExternalLocationOnlyOwner(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ From 4e5951e3141019caffaf2a2450da45e70c3c59e7 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 9 Oct 2024 07:05:04 -0400 Subject: [PATCH 37/99] [Fix] force send `read_only` in `databricks_storage_credential` when it's changed (#4083) ## Changes It's similar to #4067 - the `read_only = false` wasn't sent for storage credentials as well ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- catalog/resource_storage_credential.go | 6 +++ catalog/resource_storage_credential_test.go | 48 +++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/catalog/resource_storage_credential.go b/catalog/resource_storage_credential.go index 38c6a92109..17e7896ae3 100644 --- a/catalog/resource_storage_credential.go +++ b/catalog/resource_storage_credential.go @@ -196,6 +196,9 @@ func ResourceStorageCredential() common.Resource { return nil } + if d.HasChange("read_only") { + update.ForceSendFields = append(update.ForceSendFields, "ReadOnly") + } update.Owner = "" _, err := acc.StorageCredentials.Update(ctx, catalog.AccountsUpdateStorageCredential{ CredentialInfo: &update, @@ -240,6 +243,9 @@ func ResourceStorageCredential() common.Resource { return nil } + if d.HasChange("read_only") { + update.ForceSendFields = append(update.ForceSendFields, "ReadOnly") + } update.Owner = "" _, err = w.StorageCredentials.Update(ctx, update) if err != nil { diff --git a/catalog/resource_storage_credential_test.go b/catalog/resource_storage_credential_test.go index e09f8af8e8..7e3c31b35a 100644 --- a/catalog/resource_storage_credential_test.go +++ b/catalog/resource_storage_credential_test.go @@ -432,6 +432,54 @@ func TestUpdateStorageCredentials(t *testing.T) { }.ApplyNoError(t) } +func TestUpdateStorageCredentialsFromReadOnly(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "PATCH", + Resource: "/api/2.1/unity-catalog/storage-credentials/a", + ExpectedRequest: catalog.UpdateStorageCredential{ + AwsIamRole: &catalog.AwsIamRoleRequest{ + RoleArn: "CHANGED", + }, + Comment: "c", + ReadOnly: false, + ForceSendFields: []string{"ReadOnly"}, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/storage-credentials/a?", + Response: catalog.StorageCredentialInfo{ + Name: "a", + AwsIamRole: &catalog.AwsIamRoleResponse{ + RoleArn: "CHANGED", + }, + MetastoreId: "d", + Comment: "c", + ReadOnly: false, + }, + }, + }, + Resource: ResourceStorageCredential(), + Update: true, + ID: "a", + InstanceState: map[string]string{ + "name": "a", + "comment": "c", + "read_only": "true", + }, + HCL: ` + name = "a" + aws_iam_role { + role_arn = "CHANGED" + } + comment = "c" + read_only = false + `, + }.ApplyNoError(t) +} + func TestUpdateStorageCredentialsWithOwnerOnly(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ From bac842d603465ddbf3582e2dab2d9189c17e3113 Mon Sep 17 00:00:00 2001 From: Edward Feng <67326663+edwardfeng-db@users.noreply.github.com> Date: Wed, 9 Oct 2024 20:14:58 -0700 Subject: [PATCH 38/99] [Internal] Update plugin framework schema to use ListNestedBlocks (#4040) ## Changes - To make sure we can release the resources with plugin framework that's compatible with our sdkv2 resources, we need to use `ListNestedBlocks` for struct types so that the `x.0.y` access pattern can be preserved, also in the HCL we don't need to add extra `=` for blocks. - To achieve this, we need corresponding changes in AttributeBuilder and CustomizableSchema - Updated tfsdk struct generations to generate list types for previous struct types, so that the set and get functions will still work - Updated the converter functions to cover cases that are structs in `gosdk` but slices in `tfsdk` - Updated acceptance tests to remove `=` for block types ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- .codegen/model.go.tmpl | 37 ++- .../pluginfw/converters/converters_test.go | 46 ++- .../providers/pluginfw/converters/go_to_tf.go | 40 ++- .../providers/pluginfw/converters/tf_to_go.go | 36 ++- .../resources/cluster/data_cluster.go | 4 +- .../resources/library/resource_library.go | 31 +- .../library/resource_library_acc_test.go | 6 +- .../resource_quality_monitor.go | 24 +- .../resource_quality_monitor_acc_test.go | 12 +- .../pluginfw/resources/volume/data_volumes.go | 4 +- .../pluginfw/tfschema/attribute_builder.go | 7 +- .../pluginfw/tfschema/base_schema_builder.go | 12 + .../pluginfw/tfschema/block_builder.go | 35 ++ .../pluginfw/tfschema/bool_attribute.go | 16 +- .../pluginfw/tfschema/customizable_schema.go | 88 +++-- .../tfschema/customizable_schema_test.go | 4 +- .../pluginfw/tfschema/float64_attribute.go | 16 +- .../pluginfw/tfschema/int64_attribute.go | 16 +- .../pluginfw/tfschema/list_attribute.go | 16 +- .../tfschema/list_nested_attribute.go | 16 +- .../pluginfw/tfschema/list_nested_block.go | 97 ++++++ .../pluginfw/tfschema/map_attribute.go | 16 +- .../pluginfw/tfschema/map_nested_attribute.go | 16 +- .../pluginfw/tfschema/nested_block_object.go | 32 ++ .../tfschema/single_nested_attribute.go | 16 +- .../pluginfw/tfschema/single_nested_block.go | 109 +++++++ .../pluginfw/tfschema/string_attribute.go | 16 +- .../pluginfw/tfschema/struct_to_schema.go | 84 ++--- .../tfschema/struct_to_schema_test.go | 26 +- internal/service/apps_tf/model.go | 20 +- internal/service/billing_tf/model.go | 26 +- internal/service/catalog_tf/model.go | 190 +++++------ internal/service/compute_tf/model.go | 198 ++++++------ internal/service/dashboards_tf/model.go | 28 +- internal/service/iam_tf/model.go | 8 +- internal/service/jobs_tf/model.go | 300 +++++++++--------- internal/service/marketplace_tf/model.go | 86 ++--- internal/service/ml_tf/model.go | 54 ++-- internal/service/oauth2_tf/model.go | 12 +- internal/service/pipelines_tf/model.go | 78 ++--- internal/service/provisioning_tf/model.go | 48 +-- internal/service/serving_tf/model.go | 98 +++--- internal/service/settings_tf/model.go | 88 ++--- internal/service/sharing_tf/model.go | 34 +- internal/service/sql_tf/model.go | 142 ++++----- internal/service/vectorsearch_tf/model.go | 28 +- internal/service/workspace_tf/model.go | 14 +- 47 files changed, 1364 insertions(+), 966 deletions(-) create mode 100644 internal/providers/pluginfw/tfschema/base_schema_builder.go create mode 100644 internal/providers/pluginfw/tfschema/block_builder.go create mode 100644 internal/providers/pluginfw/tfschema/list_nested_block.go create mode 100644 internal/providers/pluginfw/tfschema/nested_block_object.go create mode 100644 internal/providers/pluginfw/tfschema/single_nested_block.go diff --git a/.codegen/model.go.tmpl b/.codegen/model.go.tmpl index dd9c3d3849..52a478f71b 100644 --- a/.codegen/model.go.tmpl +++ b/.codegen/model.go.tmpl @@ -24,7 +24,7 @@ import ( type {{.PascalName}} struct { {{- range .Fields}} {{.Comment " // " 80}} - {{.PascalName}} {{if .IsOptionalObject}}*{{end}}{{template "type" .Entity}} `{{template "field-tag" . }}`{{end}} + {{.PascalName}} {{template "type" .Entity}} `{{template "field-tag" . }}`{{end}} } {{end}} @@ -35,20 +35,23 @@ type {{.PascalName}} struct { {{- end -}} {{- define "type" -}} - {{- if not . }}any /* ERROR */ - {{- else if .IsExternal }}{{.Package.Name}}.{{.PascalName}} - {{- else if .IsAny}}any - {{- else if .IsEmpty}}{{.PascalName}} - {{- else if .IsString}}types.String - {{- else if .IsBool}}types.Bool - {{- else if .IsInt64}}types.Int64 - {{- else if .IsFloat64}}types.Float64 - {{- else if .IsInt}}types.Int64 - {{- else if .IsByteStream}}io.ReadCloser - {{- else if .ArrayValue }}[]{{template "type" .ArrayValue}} - {{- else if .MapValue }}map[string]{{template "type" .MapValue}} - {{- else if .IsObject }}{{.PascalName}} - {{- else if .Enum }}types.String - {{- else}}any /* MISSING TYPE */ - {{- end -}} + {{- if not . }}any /* ERROR */ + {{- else if .IsExternal }}{{.Package.Name}}.{{.PascalName}} + {{- else if .IsAny}}any + {{- else if .IsEmpty}}[]{{.PascalName}} + {{- else if .IsString}}types.String + {{- else if .IsBool}}types.Bool + {{- else if .IsInt64}}types.Int64 + {{- else if .IsFloat64}}types.Float64 + {{- else if .IsInt}}types.Int64 + {{- else if .IsByteStream}}io.ReadCloser + {{- else if .ArrayValue }} + {{- if .ArrayValue.IsObject }}{{template "type" .ArrayValue}} + {{- else }}[]{{template "type" .ArrayValue}} + {{- end }} + {{- else if .MapValue }}map[string]{{template "type" .MapValue}} + {{- else if .IsObject }}[]{{.PascalName}} + {{- else if .Enum }}types.String + {{- else}}any /* MISSING TYPE */ + {{- end -}} {{- end -}} diff --git a/internal/providers/pluginfw/converters/converters_test.go b/internal/providers/pluginfw/converters/converters_test.go index 7758345ad1..75f4cef630 100644 --- a/internal/providers/pluginfw/converters/converters_test.go +++ b/internal/providers/pluginfw/converters/converters_test.go @@ -17,7 +17,6 @@ type DummyTfSdk struct { Floats types.Float64 `tfsdk:"floats" tf:""` Description types.String `tfsdk:"description" tf:""` Tasks types.String `tfsdk:"task" tf:"optional"` - Nested *DummyNestedTfSdk `tfsdk:"nested" tf:"optional"` NoPointerNested DummyNestedTfSdk `tfsdk:"no_pointer_nested" tf:"optional"` NestedList []DummyNestedTfSdk `tfsdk:"nested_list" tf:"optional"` NestedPointerList []*DummyNestedTfSdk `tfsdk:"nested_pointer_list" tf:"optional"` @@ -28,6 +27,8 @@ type DummyTfSdk struct { EnumField types.String `tfsdk:"enum_field" tf:"optional"` AdditionalField types.String `tfsdk:"additional_field" tf:"optional"` DistinctField types.String `tfsdk:"distinct_field" tf:"optional"` + SliceStruct []DummyNestedTfSdk `tfsdk:"slice_struct" tf:"optional"` + SliceStructPtr []DummyNestedTfSdk `tfsdk:"slice_struct_ptr" tf:"optional"` Irrelevant types.String `tfsdk:"-"` } @@ -69,7 +70,6 @@ type DummyGoSdk struct { Floats float64 `json:"floats"` Description string `json:"description"` Tasks string `json:"tasks"` - Nested *DummyNestedGoSdk `json:"nested"` NoPointerNested DummyNestedGoSdk `json:"no_pointer_nested"` NestedList []DummyNestedGoSdk `json:"nested_list"` NestedPointerList []*DummyNestedGoSdk `json:"nested_pointer_list"` @@ -80,6 +80,8 @@ type DummyGoSdk struct { EnumField TestEnum `json:"enum_field"` AdditionalField string `json:"additional_field"` DistinctField string `json:"distinct_field"` // distinct field that the tfsdk struct doesn't have + SliceStruct DummyNestedGoSdk `json:"slice_struct"` + SliceStructPtr *DummyNestedGoSdk `json:"slice_struct_ptr"` ForceSendFields []string `json:"-"` } @@ -186,18 +188,6 @@ var tests = []struct { ForceSendFields: []string{"Name", "Enabled"}, }}, }, - { - "pointer conversion", - DummyTfSdk{Nested: &DummyNestedTfSdk{ - Name: types.StringValue("def"), - Enabled: types.BoolValue(true), - }}, - DummyGoSdk{Nested: &DummyNestedGoSdk{ - Name: "def", - Enabled: true, - ForceSendFields: []string{"Name", "Enabled"}, - }}, - }, { "list conversion", DummyTfSdk{Repeated: []types.Int64{types.Int64Value(12), types.Int64Value(34)}}, @@ -258,6 +248,34 @@ var tests = []struct { }, }}, }, + { + "list representation of struct conversion", // we use list with one element in the tfsdk to represent struct in gosdk + DummyTfSdk{SliceStruct: []DummyNestedTfSdk{ + { + Name: types.StringValue("def"), + Enabled: types.BoolValue(true), + }, + }}, + DummyGoSdk{SliceStruct: DummyNestedGoSdk{ + Name: "def", + Enabled: true, + ForceSendFields: []string{"Name", "Enabled"}, + }}, + }, + { + "list representation of struct pointer conversion", // we use list with one element in the tfsdk to represent struct in gosdk + DummyTfSdk{SliceStructPtr: []DummyNestedTfSdk{ + { + Name: types.StringValue("def"), + Enabled: types.BoolValue(true), + }, + }}, + DummyGoSdk{SliceStructPtr: &DummyNestedGoSdk{ + Name: "def", + Enabled: true, + ForceSendFields: []string{"Name", "Enabled"}, + }}, + }, } func TestConverter(t *testing.T) { diff --git a/internal/providers/pluginfw/converters/go_to_tf.go b/internal/providers/pluginfw/converters/go_to_tf.go index b86c32a21b..9fed34b494 100644 --- a/internal/providers/pluginfw/converters/go_to_tf.go +++ b/internal/providers/pluginfw/converters/go_to_tf.go @@ -26,11 +26,13 @@ const goSdkToTfSdkFieldConversionFailureMessage = "gosdk to tfsdk field conversi // // NOTE: // -// If field name doesn't show up in ForceSendFields and the field is zero value, we set the null value on the tfsdk. -// types.list and types.map are not supported -// map keys should always be a string -// tfsdk structs use types.String for all enum values -// non-json fields will be omitted +// # Structs in gosdk are represented as slices of structs in tfsdk, and pointers are removed +// +// If field name doesn't show up in ForceSendFields and the field is zero value, we set the null value on the tfsdk. +// types.list and types.map are not supported +// map keys should always be a string +// tfsdk structs use types.String for all enum values +// non-json fields will be omitted func GoSdkToTfSdkStruct(ctx context.Context, gosdk interface{}, tfsdk interface{}) diag.Diagnostics { srcVal := reflect.ValueOf(gosdk) @@ -100,10 +102,23 @@ func goSdkToTfSdkSingleField(ctx context.Context, srcField reflect.Value, destFi // Skip nils return nil } - destField.Set(reflect.New(destField.Type().Elem())) + + var fieldToSetInterface any + + if destField.Kind() == reflect.Slice { + sliceType := destField.Type() + newSlice := reflect.MakeSlice(sliceType, 1, 1) + newSlice.Index(0).Set(reflect.New(sliceType.Elem()).Elem()) + + destField.Set(newSlice) + fieldToSetInterface = newSlice.Index(0).Addr().Interface() + } else { + destField.Set(reflect.New(destField.Type().Elem())) + fieldToSetInterface = destField.Interface() + } // Recursively populate the nested struct. - if GoSdkToTfSdkStruct(ctx, srcFieldValue, destField.Interface()).HasError() { + if GoSdkToTfSdkStruct(ctx, srcFieldValue, fieldToSetInterface).HasError() { panic(fmt.Sprintf("%s. %s", goSdkToTfSdkStructConversionFailureMessage, common.TerraformBugErrorMessage)) } case reflect.Bool: @@ -151,8 +166,17 @@ func goSdkToTfSdkSingleField(ctx context.Context, srcField reflect.Value, destFi // Skip zeros return nil } + var dest any + if destField.Kind() == reflect.Slice { + // allocate a slice first + destSlice := reflect.MakeSlice(destField.Type(), 1, 1) + destField.Set(destSlice) + dest = destSlice.Index(0).Addr().Interface() + } else { + dest = destField.Addr().Interface() + } // resolve the nested struct by recursively calling the function - if GoSdkToTfSdkStruct(ctx, srcFieldValue, destField.Addr().Interface()).HasError() { + if GoSdkToTfSdkStruct(ctx, srcFieldValue, dest).HasError() { panic(fmt.Sprintf("%s. %s", goSdkToTfSdkStructConversionFailureMessage, common.TerraformBugErrorMessage)) } case reflect.Slice: diff --git a/internal/providers/pluginfw/converters/tf_to_go.go b/internal/providers/pluginfw/converters/tf_to_go.go index 6cf23decad..70efd92a36 100644 --- a/internal/providers/pluginfw/converters/tf_to_go.go +++ b/internal/providers/pluginfw/converters/tf_to_go.go @@ -27,10 +27,12 @@ const tfSdkToGoSdkFieldConversionFailureMessage = "tfsdk to gosdk field conversi // // NOTE: // -// ForceSendFields are populated for string, bool, int64, float64 on non null values -// types.list and types.map are not supported -// map keys should always be a string -// tfsdk structs use types.String for all enum values +// # Structs are represented as slice of structs in tfsdk, and pointers are removed +// +// ForceSendFields are populated for string, bool, int64, float64 on non null values +// types.list and types.map are not supported +// map keys should always be a string +// tfsdk structs use types.String for all enum values func TfSdkToGoSdkStruct(ctx context.Context, tfsdk interface{}, gosdk interface{}) diag.Diagnostics { srcVal := reflect.ValueOf(tfsdk) destVal := reflect.ValueOf(gosdk) @@ -98,6 +100,26 @@ func tfSdkToGoSdkSingleField(ctx context.Context, srcField reflect.Value, destFi if TfSdkToGoSdkStruct(ctx, srcFieldValue, destField.Interface()).HasError() { panic(fmt.Sprintf("%s. %s", tfSdkToGoSdkStructConversionFailureMessage, common.TerraformBugErrorMessage)) } + } else if srcField.Kind() == reflect.Slice && destField.Kind() == reflect.Struct { + if srcField.IsNil() { + // Skip nils + return nil + } + assertStructSliceLengthIsOne(srcField) + tfsdkToGoSdkStructField(srcField.Index(0), destField, srcFieldName, forceSendFieldsField, ctx) + } else if srcField.Kind() == reflect.Slice && destField.Kind() == reflect.Ptr { + if srcField.IsNil() { + // Skip nils + return nil + } + destField.Set(reflect.New(destField.Type().Elem())) + + assertStructSliceLengthIsOne(srcField) + + // Recursively populate the nested struct. + if TfSdkToGoSdkStruct(ctx, srcField.Index(0).Interface(), destField.Interface()).HasError() { + panic(fmt.Sprintf("%s. %s", tfSdkToGoSdkStructConversionFailureMessage, common.TerraformBugErrorMessage)) + } } else if srcField.Kind() == reflect.Struct { tfsdkToGoSdkStructField(srcField, destField, srcFieldName, forceSendFieldsField, ctx) } else if srcField.Kind() == reflect.Slice { @@ -207,6 +229,12 @@ func tfsdkToGoSdkStructField(srcField reflect.Value, destField reflect.Value, sr } } +func assertStructSliceLengthIsOne(srcSlice reflect.Value) { + if srcSlice.Len() > 1 { + panic(fmt.Sprintf("The length of a slice can not be greater than 1 if it is representing a struct, %s", common.TerraformBugErrorMessage)) + } +} + func addToForceSendFields(ctx context.Context, fieldName string, forceSendFieldsField *reflect.Value) { if forceSendFieldsField == nil || !forceSendFieldsField.IsValid() || !forceSendFieldsField.CanSet() { tflog.Debug(ctx, fmt.Sprintf("[Debug] forceSendFieldsField is nil, invalid or not settable. %s", fieldName)) diff --git a/internal/providers/pluginfw/resources/cluster/data_cluster.go b/internal/providers/pluginfw/resources/cluster/data_cluster.go index 9936df5fa2..1fae8670df 100644 --- a/internal/providers/pluginfw/resources/cluster/data_cluster.go +++ b/internal/providers/pluginfw/resources/cluster/data_cluster.go @@ -39,8 +39,10 @@ func (d *ClusterDataSource) Metadata(ctx context.Context, req datasource.Metadat } func (d *ClusterDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + attrs, blocks := tfschema.DataSourceStructToSchemaMap(ClusterInfo{}, nil) resp.Schema = schema.Schema{ - Attributes: tfschema.DataSourceStructToSchemaMap(ClusterInfo{}, nil), + Attributes: attrs, + Blocks: blocks, } } diff --git a/internal/providers/pluginfw/resources/library/resource_library.go b/internal/providers/pluginfw/resources/library/resource_library.go index 04b90f6298..21e4e0f897 100644 --- a/internal/providers/pluginfw/resources/library/resource_library.go +++ b/internal/providers/pluginfw/resources/library/resource_library.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/types" @@ -70,19 +71,27 @@ func (r *LibraryResource) Metadata(ctx context.Context, req resource.MetadataReq } func (r *LibraryResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + attrs, blocks := tfschema.ResourceStructToSchemaMap(LibraryExtended{}, func(c tfschema.CustomizableSchema) tfschema.CustomizableSchema { + for field, attribute := range c.ToNestedBlockObject().Attributes { + switch attribute.(type) { + case tfschema.StringAttributeBuilder: + c.AddPlanModifier(stringplanmodifier.RequiresReplace(), field) + case tfschema.SingleNestedAttributeBuilder: + c.AddPlanModifier(objectplanmodifier.RequiresReplace(), field) + } + } + for field, block := range c.ToNestedBlockObject().Blocks { + switch block.(type) { + case tfschema.ListNestedBlockBuilder: + c.AddPlanModifier(listplanmodifier.RequiresReplace(), field) + } + } + return c + }) resp.Schema = schema.Schema{ Description: "Terraform schema for Databricks Library", - Attributes: tfschema.ResourceStructToSchemaMap(LibraryExtended{}, func(c tfschema.CustomizableSchema) tfschema.CustomizableSchema { - for field, attribute := range c.ToAttributeMap() { - switch attribute.(type) { - case tfschema.StringAttributeBuilder: - c.AddPlanModifier(stringplanmodifier.RequiresReplace(), field) - case tfschema.SingleNestedAttributeBuilder: - c.AddPlanModifier(objectplanmodifier.RequiresReplace(), field) - } - } - return c - }), + Attributes: attrs, + Blocks: blocks, } } diff --git a/internal/providers/pluginfw/resources/library/resource_library_acc_test.go b/internal/providers/pluginfw/resources/library/resource_library_acc_test.go index 96e699d85d..153657ae41 100644 --- a/internal/providers/pluginfw/resources/library/resource_library_acc_test.go +++ b/internal/providers/pluginfw/resources/library/resource_library_acc_test.go @@ -26,7 +26,7 @@ func TestAccLibraryCreationPluginFramework(t *testing.T) { } resource "databricks_library_pluginframework" "new_library" { cluster_id = databricks_cluster.this.id - pypi = { + pypi { repo = "https://pypi.org/dummy" package = "databricks-sdk" } @@ -56,7 +56,7 @@ func TestAccLibraryUpdatePluginFramework(t *testing.T) { } resource "databricks_library_pluginframework" "new_library" { cluster_id = databricks_cluster.this.id - pypi = { + pypi { repo = "https://pypi.org/simple" package = "databricks-sdk" } @@ -82,7 +82,7 @@ func TestAccLibraryUpdatePluginFramework(t *testing.T) { } resource "databricks_library_pluginframework" "new_library" { cluster_id = databricks_cluster.this.id - pypi = { + pypi { package = "networkx" } } diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go index 4dd21b44da..d1e455f5b4 100644 --- a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go @@ -66,19 +66,21 @@ func (r *QualityMonitorResource) Metadata(ctx context.Context, req resource.Meta } func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + attrs, blocks := tfschema.ResourceStructToSchemaMap(MonitorInfoExtended{}, func(c tfschema.CustomizableSchema) tfschema.CustomizableSchema { + c.SetRequired("assets_dir") + c.SetRequired("output_schema_name") + c.SetReadOnly("monitor_version") + c.SetReadOnly("drift_metrics_table_name") + c.SetReadOnly("profile_metrics_table_name") + c.SetReadOnly("status") + c.SetReadOnly("dashboard_id") + c.SetReadOnly("schedule", "pause_status") + return c + }) resp.Schema = schema.Schema{ Description: "Terraform schema for Databricks Quality Monitor", - Attributes: tfschema.ResourceStructToSchemaMap(MonitorInfoExtended{}, func(c tfschema.CustomizableSchema) tfschema.CustomizableSchema { - c.SetRequired("assets_dir") - c.SetRequired("output_schema_name") - c.SetReadOnly("monitor_version") - c.SetReadOnly("drift_metrics_table_name") - c.SetReadOnly("profile_metrics_table_name") - c.SetReadOnly("status") - c.SetReadOnly("dashboard_id") - c.SetReadOnly("schedule", "pause_status") - return c - }), + Attributes: attrs, + Blocks: blocks, } } diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go index f9934c9cd9..7f303d482e 100644 --- a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go @@ -59,7 +59,7 @@ func TestUcAccQualityMonitor(t *testing.T) { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" output_schema_name = databricks_schema.things.id - inference_log = { + inference_log { granularities = ["1 day"] timestamp_col = "timestamp" prediction_col = "prediction" @@ -85,7 +85,7 @@ func TestUcAccQualityMonitor(t *testing.T) { table_name = databricks_sql_table.myTimeseries.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myTimeseries.name}" output_schema_name = databricks_schema.things.id - time_series = { + time_series { granularities = ["1 day"] timestamp_col = "timestamp" } @@ -108,7 +108,7 @@ func TestUcAccQualityMonitor(t *testing.T) { table_name = databricks_sql_table.mySnapshot.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myTimeseries.name}" output_schema_name = databricks_schema.things.id - snapshot = { + snapshot { } } `, @@ -125,7 +125,7 @@ func TestUcAccUpdateQualityMonitor(t *testing.T) { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" output_schema_name = databricks_schema.things.id - inference_log = { + inference_log { granularities = ["1 day"] timestamp_col = "timestamp" prediction_col = "prediction" @@ -140,7 +140,7 @@ func TestUcAccUpdateQualityMonitor(t *testing.T) { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" output_schema_name = databricks_schema.things.id - inference_log = { + inference_log { granularities = ["1 hour"] timestamp_col = "timestamp" prediction_col = "prediction" @@ -164,7 +164,7 @@ func TestUcAccQualityMonitorImportPluginFramework(t *testing.T) { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" output_schema_name = databricks_schema.things.id - inference_log = { + inference_log { granularities = ["1 day"] timestamp_col = "timestamp" prediction_col = "prediction" diff --git a/internal/providers/pluginfw/resources/volume/data_volumes.go b/internal/providers/pluginfw/resources/volume/data_volumes.go index 590a85f95b..73290bb80c 100644 --- a/internal/providers/pluginfw/resources/volume/data_volumes.go +++ b/internal/providers/pluginfw/resources/volume/data_volumes.go @@ -36,8 +36,10 @@ func (d *VolumesDataSource) Metadata(ctx context.Context, req datasource.Metadat } func (d *VolumesDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + attrs, blocks := tfschema.DataSourceStructToSchemaMap(VolumesList{}, nil) resp.Schema = schema.Schema{ - Attributes: tfschema.DataSourceStructToSchemaMap(VolumesList{}, nil), + Attributes: attrs, + Blocks: blocks, } } diff --git a/internal/providers/pluginfw/tfschema/attribute_builder.go b/internal/providers/pluginfw/tfschema/attribute_builder.go index 45b7102f70..f467c74d0f 100644 --- a/internal/providers/pluginfw/tfschema/attribute_builder.go +++ b/internal/providers/pluginfw/tfschema/attribute_builder.go @@ -9,14 +9,9 @@ import ( // We need this because in terraform plugin framework, the datasource schema and resource schema are in two separate packages. // This common interface prevents us from keeping two copies of StructToSchema and CustomizableSchema. type AttributeBuilder interface { + BaseSchemaBuilder BuildDataSourceAttribute() dataschema.Attribute BuildResourceAttribute() schema.Attribute - SetOptional() AttributeBuilder - SetRequired() AttributeBuilder - SetSensitive() AttributeBuilder - SetComputed() AttributeBuilder - SetReadOnly() AttributeBuilder - SetDeprecated(string) AttributeBuilder } // BuildDataSourceAttributeMap takes a map from string to AttributeBuilder and returns a map from string to datasource.schema.Attribute. diff --git a/internal/providers/pluginfw/tfschema/base_schema_builder.go b/internal/providers/pluginfw/tfschema/base_schema_builder.go new file mode 100644 index 0000000000..8bdd471e63 --- /dev/null +++ b/internal/providers/pluginfw/tfschema/base_schema_builder.go @@ -0,0 +1,12 @@ +package tfschema + +// BaseSchemaBuilder is the common interface for all blocks and attributes, it can be used to build data source and resource. +// Both AttributeBuilder and BlockBuilder extend this interface. +type BaseSchemaBuilder interface { + SetOptional() BaseSchemaBuilder + SetRequired() BaseSchemaBuilder + SetSensitive() BaseSchemaBuilder + SetComputed() BaseSchemaBuilder + SetReadOnly() BaseSchemaBuilder + SetDeprecated(string) BaseSchemaBuilder +} diff --git a/internal/providers/pluginfw/tfschema/block_builder.go b/internal/providers/pluginfw/tfschema/block_builder.go new file mode 100644 index 0000000000..abf3f38cb6 --- /dev/null +++ b/internal/providers/pluginfw/tfschema/block_builder.go @@ -0,0 +1,35 @@ +package tfschema + +import ( + dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" +) + +// BlockBuilder is the common interface for all blocks, it can be used to build data source blocks and resource blocks. +// We need this because in terraform plugin framework, the datasource schema and resource schema are in two separate packages. +// This common interface prevents us from keeping two copies of StructToSchema and CustomizableSchema. +type BlockBuilder interface { + BaseSchemaBuilder + BuildDataSourceBlock() dataschema.Block + BuildResourceBlock() schema.Block +} + +func BuildDataSourceBlockMap(attributes map[string]BlockBuilder) map[string]dataschema.Block { + dataSourceAttributes := make(map[string]dataschema.Block) + + for key, attribute := range attributes { + dataSourceAttributes[key] = attribute.BuildDataSourceBlock() + } + + return dataSourceAttributes +} + +func BuildResourceBlockMap(attributes map[string]BlockBuilder) map[string]schema.Block { + resourceAttributes := make(map[string]schema.Block) + + for key, attribute := range attributes { + resourceAttributes[key] = attribute.BuildResourceBlock() + } + + return resourceAttributes +} diff --git a/internal/providers/pluginfw/tfschema/bool_attribute.go b/internal/providers/pluginfw/tfschema/bool_attribute.go index 5dc4727293..54b0ea424a 100644 --- a/internal/providers/pluginfw/tfschema/bool_attribute.go +++ b/internal/providers/pluginfw/tfschema/bool_attribute.go @@ -40,7 +40,7 @@ func (a BoolAttributeBuilder) BuildResourceAttribute() schema.Attribute { } } -func (a BoolAttributeBuilder) SetOptional() AttributeBuilder { +func (a BoolAttributeBuilder) SetOptional() BaseSchemaBuilder { if a.Optional && !a.Required { panic("attribute is already optional") } @@ -49,7 +49,7 @@ func (a BoolAttributeBuilder) SetOptional() AttributeBuilder { return a } -func (a BoolAttributeBuilder) SetRequired() AttributeBuilder { +func (a BoolAttributeBuilder) SetRequired() BaseSchemaBuilder { if !a.Optional && a.Required { panic("attribute is already required") } @@ -58,7 +58,7 @@ func (a BoolAttributeBuilder) SetRequired() AttributeBuilder { return a } -func (a BoolAttributeBuilder) SetSensitive() AttributeBuilder { +func (a BoolAttributeBuilder) SetSensitive() BaseSchemaBuilder { if a.Sensitive { panic("attribute is already sensitive") } @@ -66,7 +66,7 @@ func (a BoolAttributeBuilder) SetSensitive() AttributeBuilder { return a } -func (a BoolAttributeBuilder) SetComputed() AttributeBuilder { +func (a BoolAttributeBuilder) SetComputed() BaseSchemaBuilder { if a.Computed { panic("attribute is already computed") } @@ -74,7 +74,7 @@ func (a BoolAttributeBuilder) SetComputed() AttributeBuilder { return a } -func (a BoolAttributeBuilder) SetReadOnly() AttributeBuilder { +func (a BoolAttributeBuilder) SetReadOnly() BaseSchemaBuilder { if a.Computed && !a.Optional && !a.Required { panic("attribute is already read only") } @@ -84,17 +84,17 @@ func (a BoolAttributeBuilder) SetReadOnly() AttributeBuilder { return a } -func (a BoolAttributeBuilder) SetDeprecated(msg string) AttributeBuilder { +func (a BoolAttributeBuilder) SetDeprecated(msg string) BaseSchemaBuilder { a.DeprecationMessage = msg return a } -func (a BoolAttributeBuilder) AddValidator(v validator.Bool) AttributeBuilder { +func (a BoolAttributeBuilder) AddValidator(v validator.Bool) BaseSchemaBuilder { a.Validators = append(a.Validators, v) return a } -func (a BoolAttributeBuilder) AddPlanModifier(v planmodifier.Bool) AttributeBuilder { +func (a BoolAttributeBuilder) AddPlanModifier(v planmodifier.Bool) BaseSchemaBuilder { a.PlanModifiers = append(a.PlanModifiers, v) return a } diff --git a/internal/providers/pluginfw/tfschema/customizable_schema.go b/internal/providers/pluginfw/tfschema/customizable_schema.go index 4f29f7765e..162e7392d5 100644 --- a/internal/providers/pluginfw/tfschema/customizable_schema.go +++ b/internal/providers/pluginfw/tfschema/customizable_schema.go @@ -9,41 +9,47 @@ import ( "github.com/hashicorp/terraform-plugin-framework/schema/validator" ) -// CustomizableSchema is a wrapper struct on top of AttributeBuilder that can be used to navigate through nested schema add customizations. +// CustomizableSchema is a wrapper struct on top of BaseSchemaBuilder that can be used to navigate through nested schema add customizations. type CustomizableSchema struct { - attr AttributeBuilder + attr BaseSchemaBuilder } -// ConstructCustomizableSchema constructs a CustomizableSchema given a map from string to AttributeBuilder. -func ConstructCustomizableSchema(attributes map[string]AttributeBuilder) *CustomizableSchema { - attr := AttributeBuilder(SingleNestedAttributeBuilder{Attributes: attributes}) +// ConstructCustomizableSchema constructs a CustomizableSchema given a NestedBlockObject. +func ConstructCustomizableSchema(nestedObject NestedBlockObject) *CustomizableSchema { + attr := AttributeBuilder(SingleNestedBlockBuilder{NestedObject: nestedObject}) return &CustomizableSchema{attr: attr} } -// ToAttributeMap converts CustomizableSchema into a map from string to Attribute. -func (s *CustomizableSchema) ToAttributeMap() map[string]AttributeBuilder { - return attributeToMap(&s.attr) +// ToAttributeMap converts CustomizableSchema into BaseSchemaBuilder. +func (s *CustomizableSchema) ToNestedBlockObject() NestedBlockObject { + return attributeToNestedBlockObject(&s.attr) } // attributeToMap converts AttributeBuilder into a map from string to AttributeBuilder. -func attributeToMap(attr *AttributeBuilder) map[string]AttributeBuilder { - var m map[string]AttributeBuilder +func attributeToNestedBlockObject(attr *BaseSchemaBuilder) NestedBlockObject { + var res = NestedBlockObject{} switch attr := (*attr).(type) { case SingleNestedAttributeBuilder: - m = attr.Attributes + res.Attributes = attr.Attributes case ListNestedAttributeBuilder: - m = attr.NestedObject.Attributes + res.Attributes = attr.NestedObject.Attributes case MapNestedAttributeBuilder: - m = attr.NestedObject.Attributes + res.Attributes = attr.NestedObject.Attributes + case SingleNestedBlockBuilder: + res.Attributes = attr.NestedObject.Attributes + res.Blocks = attr.NestedObject.Blocks + case ListNestedBlockBuilder: + res.Attributes = attr.NestedObject.Attributes + res.Blocks = attr.NestedObject.Blocks default: panic(fmt.Errorf("cannot convert to map, attribute is not nested")) } - return m + return res } func (s *CustomizableSchema) AddValidator(v any, path ...string) *CustomizableSchema { - cb := func(attr AttributeBuilder) AttributeBuilder { + cb := func(attr BaseSchemaBuilder) BaseSchemaBuilder { switch a := attr.(type) { case BoolAttributeBuilder: return a.AddValidator(v.(validator.Bool)) @@ -74,7 +80,7 @@ func (s *CustomizableSchema) AddValidator(v any, path ...string) *CustomizableSc } func (s *CustomizableSchema) AddPlanModifier(v any, path ...string) *CustomizableSchema { - cb := func(attr AttributeBuilder) AttributeBuilder { + cb := func(attr BaseSchemaBuilder) BaseSchemaBuilder { switch a := attr.(type) { case BoolAttributeBuilder: return a.AddPlanModifier(v.(planmodifier.Bool)) @@ -94,6 +100,10 @@ func (s *CustomizableSchema) AddPlanModifier(v any, path ...string) *Customizabl return a.AddPlanModifier(v.(planmodifier.Object)) case StringAttributeBuilder: return a.AddPlanModifier(v.(planmodifier.String)) + case ListNestedBlockBuilder: + return a.AddPlanModifier(v.(planmodifier.List)) + case SingleNestedBlockBuilder: + return a.AddPlanModifier(v.(planmodifier.Object)) default: panic(fmt.Errorf("cannot add planmodifier, attribute builder type is invalid: %s. %s", reflect.TypeOf(attr).String(), common.TerraformBugErrorMessage)) } @@ -105,7 +115,7 @@ func (s *CustomizableSchema) AddPlanModifier(v any, path ...string) *Customizabl } func (s *CustomizableSchema) SetOptional(path ...string) *CustomizableSchema { - cb := func(attr AttributeBuilder) AttributeBuilder { + cb := func(attr BaseSchemaBuilder) BaseSchemaBuilder { return attr.SetOptional() } @@ -115,7 +125,7 @@ func (s *CustomizableSchema) SetOptional(path ...string) *CustomizableSchema { } func (s *CustomizableSchema) SetRequired(path ...string) *CustomizableSchema { - cb := func(attr AttributeBuilder) AttributeBuilder { + cb := func(attr BaseSchemaBuilder) BaseSchemaBuilder { return attr.SetRequired() } @@ -125,7 +135,7 @@ func (s *CustomizableSchema) SetRequired(path ...string) *CustomizableSchema { } func (s *CustomizableSchema) SetSensitive(path ...string) *CustomizableSchema { - cb := func(attr AttributeBuilder) AttributeBuilder { + cb := func(attr BaseSchemaBuilder) BaseSchemaBuilder { return attr.SetSensitive() } @@ -134,7 +144,7 @@ func (s *CustomizableSchema) SetSensitive(path ...string) *CustomizableSchema { } func (s *CustomizableSchema) SetDeprecated(msg string, path ...string) *CustomizableSchema { - cb := func(attr AttributeBuilder) AttributeBuilder { + cb := func(attr BaseSchemaBuilder) BaseSchemaBuilder { return attr.SetDeprecated(msg) } @@ -144,7 +154,7 @@ func (s *CustomizableSchema) SetDeprecated(msg string, path ...string) *Customiz } func (s *CustomizableSchema) SetComputed(path ...string) *CustomizableSchema { - cb := func(attr AttributeBuilder) AttributeBuilder { + cb := func(attr BaseSchemaBuilder) BaseSchemaBuilder { return attr.SetComputed() } @@ -156,7 +166,7 @@ func (s *CustomizableSchema) SetComputed(path ...string) *CustomizableSchema { // This should be used for fields that are not user-configurable but are returned // by the platform. func (s *CustomizableSchema) SetReadOnly(path ...string) *CustomizableSchema { - cb := func(attr AttributeBuilder) AttributeBuilder { + cb := func(attr BaseSchemaBuilder) BaseSchemaBuilder { return attr.SetReadOnly() } @@ -166,21 +176,33 @@ func (s *CustomizableSchema) SetReadOnly(path ...string) *CustomizableSchema { } // navigateSchemaWithCallback navigates through schema attributes and executes callback on the target, panics if path does not exist or invalid. -func navigateSchemaWithCallback(s *AttributeBuilder, cb func(AttributeBuilder) AttributeBuilder, path ...string) (AttributeBuilder, error) { - current_scm := s +func navigateSchemaWithCallback(s *BaseSchemaBuilder, cb func(BaseSchemaBuilder) BaseSchemaBuilder, path ...string) (BaseSchemaBuilder, error) { + currentScm := s for i, p := range path { - m := attributeToMap(current_scm) - - v, ok := m[p] - if !ok { + m := attributeToNestedBlockObject(currentScm) + mAttr := m.Attributes + mBlock := m.Blocks + + if v, ok := mAttr[p]; ok { + if i == len(path)-1 { + newV := cb(v).(AttributeBuilder) + mAttr[p] = newV + return mAttr[p], nil + } + castedV := v.(BaseSchemaBuilder) + currentScm = &castedV + } else if v, ok := mBlock[p]; ok { + if i == len(path)-1 { + newV := cb(v).(BlockBuilder) + mBlock[p] = newV + return mBlock[p], nil + } + castedV := v.(BaseSchemaBuilder) + currentScm = &castedV + } else { return nil, fmt.Errorf("missing key %s", p) } - if i == len(path)-1 { - m[p] = cb(v) - return m[p], nil - } - current_scm = &v } return nil, fmt.Errorf("path %v is incomplete", path) } diff --git a/internal/providers/pluginfw/tfschema/customizable_schema_test.go b/internal/providers/pluginfw/tfschema/customizable_schema_test.go index e134b47c2a..d86be910b3 100644 --- a/internal/providers/pluginfw/tfschema/customizable_schema_test.go +++ b/internal/providers/pluginfw/tfschema/customizable_schema_test.go @@ -64,7 +64,7 @@ func TestCustomizeSchemaSetRequired(t *testing.T) { return c }) - assert.True(t, scm.Attributes["nested"].(schema.SingleNestedAttribute).Attributes["enabled"].IsRequired()) + assert.True(t, scm.Blocks["nested"].(schema.ListNestedBlock).NestedObject.Attributes["enabled"].IsRequired()) } func TestCustomizeSchemaSetOptional(t *testing.T) { @@ -82,7 +82,7 @@ func TestCustomizeSchemaSetSensitive(t *testing.T) { return c }) - assert.True(t, scm.Attributes["nested"].(schema.SingleNestedAttribute).Attributes["name"].IsSensitive()) + assert.True(t, scm.Blocks["nested"].(schema.ListNestedBlock).NestedObject.Attributes["name"].IsSensitive()) } func TestCustomizeSchemaSetDeprecated(t *testing.T) { diff --git a/internal/providers/pluginfw/tfschema/float64_attribute.go b/internal/providers/pluginfw/tfschema/float64_attribute.go index 913b747431..ca173df02a 100644 --- a/internal/providers/pluginfw/tfschema/float64_attribute.go +++ b/internal/providers/pluginfw/tfschema/float64_attribute.go @@ -40,7 +40,7 @@ func (a Float64AttributeBuilder) BuildResourceAttribute() schema.Attribute { } } -func (a Float64AttributeBuilder) SetOptional() AttributeBuilder { +func (a Float64AttributeBuilder) SetOptional() BaseSchemaBuilder { if a.Optional && !a.Required { panic("attribute is already optional") } @@ -49,7 +49,7 @@ func (a Float64AttributeBuilder) SetOptional() AttributeBuilder { return a } -func (a Float64AttributeBuilder) SetRequired() AttributeBuilder { +func (a Float64AttributeBuilder) SetRequired() BaseSchemaBuilder { if !a.Optional && a.Required { panic("attribute is already required") } @@ -58,7 +58,7 @@ func (a Float64AttributeBuilder) SetRequired() AttributeBuilder { return a } -func (a Float64AttributeBuilder) SetSensitive() AttributeBuilder { +func (a Float64AttributeBuilder) SetSensitive() BaseSchemaBuilder { if a.Sensitive { panic("attribute is already sensitive") } @@ -66,7 +66,7 @@ func (a Float64AttributeBuilder) SetSensitive() AttributeBuilder { return a } -func (a Float64AttributeBuilder) SetComputed() AttributeBuilder { +func (a Float64AttributeBuilder) SetComputed() BaseSchemaBuilder { if a.Computed { panic("attribute is already computed") } @@ -74,7 +74,7 @@ func (a Float64AttributeBuilder) SetComputed() AttributeBuilder { return a } -func (a Float64AttributeBuilder) SetReadOnly() AttributeBuilder { +func (a Float64AttributeBuilder) SetReadOnly() BaseSchemaBuilder { if a.Computed && !a.Optional && !a.Required { panic("attribute is already read only") } @@ -84,17 +84,17 @@ func (a Float64AttributeBuilder) SetReadOnly() AttributeBuilder { return a } -func (a Float64AttributeBuilder) SetDeprecated(msg string) AttributeBuilder { +func (a Float64AttributeBuilder) SetDeprecated(msg string) BaseSchemaBuilder { a.DeprecationMessage = msg return a } -func (a Float64AttributeBuilder) AddValidator(v validator.Float64) AttributeBuilder { +func (a Float64AttributeBuilder) AddValidator(v validator.Float64) BaseSchemaBuilder { a.Validators = append(a.Validators, v) return a } -func (a Float64AttributeBuilder) AddPlanModifier(v planmodifier.Float64) AttributeBuilder { +func (a Float64AttributeBuilder) AddPlanModifier(v planmodifier.Float64) BaseSchemaBuilder { a.PlanModifiers = append(a.PlanModifiers, v) return a } diff --git a/internal/providers/pluginfw/tfschema/int64_attribute.go b/internal/providers/pluginfw/tfschema/int64_attribute.go index 5c8bd9693e..c7cc4892b4 100644 --- a/internal/providers/pluginfw/tfschema/int64_attribute.go +++ b/internal/providers/pluginfw/tfschema/int64_attribute.go @@ -40,7 +40,7 @@ func (a Int64AttributeBuilder) BuildResourceAttribute() schema.Attribute { } } -func (a Int64AttributeBuilder) SetOptional() AttributeBuilder { +func (a Int64AttributeBuilder) SetOptional() BaseSchemaBuilder { if a.Optional && !a.Required { panic("attribute is already optional") } @@ -49,7 +49,7 @@ func (a Int64AttributeBuilder) SetOptional() AttributeBuilder { return a } -func (a Int64AttributeBuilder) SetRequired() AttributeBuilder { +func (a Int64AttributeBuilder) SetRequired() BaseSchemaBuilder { if !a.Optional && a.Required { panic("attribute is already required") } @@ -58,7 +58,7 @@ func (a Int64AttributeBuilder) SetRequired() AttributeBuilder { return a } -func (a Int64AttributeBuilder) SetSensitive() AttributeBuilder { +func (a Int64AttributeBuilder) SetSensitive() BaseSchemaBuilder { if a.Sensitive { panic("attribute is already sensitive") } @@ -66,7 +66,7 @@ func (a Int64AttributeBuilder) SetSensitive() AttributeBuilder { return a } -func (a Int64AttributeBuilder) SetComputed() AttributeBuilder { +func (a Int64AttributeBuilder) SetComputed() BaseSchemaBuilder { if a.Computed { panic("attribute is already computed") } @@ -74,7 +74,7 @@ func (a Int64AttributeBuilder) SetComputed() AttributeBuilder { return a } -func (a Int64AttributeBuilder) SetReadOnly() AttributeBuilder { +func (a Int64AttributeBuilder) SetReadOnly() BaseSchemaBuilder { if a.Computed && !a.Optional && !a.Required { panic("attribute is already read only") } @@ -84,17 +84,17 @@ func (a Int64AttributeBuilder) SetReadOnly() AttributeBuilder { return a } -func (a Int64AttributeBuilder) SetDeprecated(msg string) AttributeBuilder { +func (a Int64AttributeBuilder) SetDeprecated(msg string) BaseSchemaBuilder { a.DeprecationMessage = msg return a } -func (a Int64AttributeBuilder) AddValidator(v validator.Int64) AttributeBuilder { +func (a Int64AttributeBuilder) AddValidator(v validator.Int64) BaseSchemaBuilder { a.Validators = append(a.Validators, v) return a } -func (a Int64AttributeBuilder) AddPlanModifier(v planmodifier.Int64) AttributeBuilder { +func (a Int64AttributeBuilder) AddPlanModifier(v planmodifier.Int64) BaseSchemaBuilder { a.PlanModifiers = append(a.PlanModifiers, v) return a } diff --git a/internal/providers/pluginfw/tfschema/list_attribute.go b/internal/providers/pluginfw/tfschema/list_attribute.go index 6e58165996..21a2556cb3 100644 --- a/internal/providers/pluginfw/tfschema/list_attribute.go +++ b/internal/providers/pluginfw/tfschema/list_attribute.go @@ -45,7 +45,7 @@ func (a ListAttributeBuilder) BuildResourceAttribute() schema.Attribute { } } -func (a ListAttributeBuilder) SetOptional() AttributeBuilder { +func (a ListAttributeBuilder) SetOptional() BaseSchemaBuilder { if a.Optional && !a.Required { panic("attribute is already optional") } @@ -54,7 +54,7 @@ func (a ListAttributeBuilder) SetOptional() AttributeBuilder { return a } -func (a ListAttributeBuilder) SetRequired() AttributeBuilder { +func (a ListAttributeBuilder) SetRequired() BaseSchemaBuilder { if !a.Optional && a.Required { panic("attribute is already required") } @@ -63,7 +63,7 @@ func (a ListAttributeBuilder) SetRequired() AttributeBuilder { return a } -func (a ListAttributeBuilder) SetSensitive() AttributeBuilder { +func (a ListAttributeBuilder) SetSensitive() BaseSchemaBuilder { if a.Sensitive { panic("attribute is already sensitive") } @@ -71,7 +71,7 @@ func (a ListAttributeBuilder) SetSensitive() AttributeBuilder { return a } -func (a ListAttributeBuilder) SetComputed() AttributeBuilder { +func (a ListAttributeBuilder) SetComputed() BaseSchemaBuilder { if a.Computed { panic("attribute is already computed") } @@ -79,7 +79,7 @@ func (a ListAttributeBuilder) SetComputed() AttributeBuilder { return a } -func (a ListAttributeBuilder) SetReadOnly() AttributeBuilder { +func (a ListAttributeBuilder) SetReadOnly() BaseSchemaBuilder { if a.Computed && !a.Optional && !a.Required { panic("attribute is already read only") } @@ -89,17 +89,17 @@ func (a ListAttributeBuilder) SetReadOnly() AttributeBuilder { return a } -func (a ListAttributeBuilder) SetDeprecated(msg string) AttributeBuilder { +func (a ListAttributeBuilder) SetDeprecated(msg string) BaseSchemaBuilder { a.DeprecationMessage = msg return a } -func (a ListAttributeBuilder) AddValidator(v validator.List) AttributeBuilder { +func (a ListAttributeBuilder) AddValidator(v validator.List) BaseSchemaBuilder { a.Validators = append(a.Validators, v) return a } -func (a ListAttributeBuilder) AddPlanModifier(v planmodifier.List) AttributeBuilder { +func (a ListAttributeBuilder) AddPlanModifier(v planmodifier.List) BaseSchemaBuilder { a.PlanModifiers = append(a.PlanModifiers, v) return a } diff --git a/internal/providers/pluginfw/tfschema/list_nested_attribute.go b/internal/providers/pluginfw/tfschema/list_nested_attribute.go index 5d80ec8500..5efc6105f0 100644 --- a/internal/providers/pluginfw/tfschema/list_nested_attribute.go +++ b/internal/providers/pluginfw/tfschema/list_nested_attribute.go @@ -44,7 +44,7 @@ func (a ListNestedAttributeBuilder) BuildResourceAttribute() schema.Attribute { } } -func (a ListNestedAttributeBuilder) SetOptional() AttributeBuilder { +func (a ListNestedAttributeBuilder) SetOptional() BaseSchemaBuilder { if a.Optional && !a.Required { panic("attribute is already optional") } @@ -53,7 +53,7 @@ func (a ListNestedAttributeBuilder) SetOptional() AttributeBuilder { return a } -func (a ListNestedAttributeBuilder) SetRequired() AttributeBuilder { +func (a ListNestedAttributeBuilder) SetRequired() BaseSchemaBuilder { if !a.Optional && a.Required { panic("attribute is already required") } @@ -62,7 +62,7 @@ func (a ListNestedAttributeBuilder) SetRequired() AttributeBuilder { return a } -func (a ListNestedAttributeBuilder) SetSensitive() AttributeBuilder { +func (a ListNestedAttributeBuilder) SetSensitive() BaseSchemaBuilder { if a.Sensitive { panic("attribute is already sensitive") } @@ -70,7 +70,7 @@ func (a ListNestedAttributeBuilder) SetSensitive() AttributeBuilder { return a } -func (a ListNestedAttributeBuilder) SetComputed() AttributeBuilder { +func (a ListNestedAttributeBuilder) SetComputed() BaseSchemaBuilder { if a.Computed { panic("attribute is already computed") } @@ -78,7 +78,7 @@ func (a ListNestedAttributeBuilder) SetComputed() AttributeBuilder { return a } -func (a ListNestedAttributeBuilder) SetReadOnly() AttributeBuilder { +func (a ListNestedAttributeBuilder) SetReadOnly() BaseSchemaBuilder { if a.Computed && !a.Optional && !a.Required { panic("attribute is already read only") } @@ -88,17 +88,17 @@ func (a ListNestedAttributeBuilder) SetReadOnly() AttributeBuilder { return a } -func (a ListNestedAttributeBuilder) SetDeprecated(msg string) AttributeBuilder { +func (a ListNestedAttributeBuilder) SetDeprecated(msg string) BaseSchemaBuilder { a.DeprecationMessage = msg return a } -func (a ListNestedAttributeBuilder) AddValidator(v validator.List) AttributeBuilder { +func (a ListNestedAttributeBuilder) AddValidator(v validator.List) BaseSchemaBuilder { a.Validators = append(a.Validators, v) return a } -func (a ListNestedAttributeBuilder) AddPlanModifier(v planmodifier.List) AttributeBuilder { +func (a ListNestedAttributeBuilder) AddPlanModifier(v planmodifier.List) BaseSchemaBuilder { a.PlanModifiers = append(a.PlanModifiers, v) return a } diff --git a/internal/providers/pluginfw/tfschema/list_nested_block.go b/internal/providers/pluginfw/tfschema/list_nested_block.go new file mode 100644 index 0000000000..2edb9a6a22 --- /dev/null +++ b/internal/providers/pluginfw/tfschema/list_nested_block.go @@ -0,0 +1,97 @@ +package tfschema + +import ( + dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// ListNestedBlockBuilder represents a list of complex (non-primitive) types. +// To be compatible with our sdkv2 schema, all struct types in the gosdk are represented with this type. +type ListNestedBlockBuilder struct { + NestedObject NestedBlockObject + Optional bool + Required bool + Sensitive bool + Computed bool + DeprecationMessage string + Validators []validator.List + PlanModifiers []planmodifier.List +} + +func (a ListNestedBlockBuilder) BuildDataSourceBlock() dataschema.Block { + return dataschema.ListNestedBlock{ + NestedObject: a.NestedObject.BuildDataSourceAttribute(), + DeprecationMessage: a.DeprecationMessage, + Validators: a.Validators, + } +} + +func (a ListNestedBlockBuilder) BuildResourceBlock() schema.Block { + return schema.ListNestedBlock{ + NestedObject: a.NestedObject.BuildResourceAttribute(), + DeprecationMessage: a.DeprecationMessage, + Validators: a.Validators, + PlanModifiers: a.PlanModifiers, + } +} + +func (a ListNestedBlockBuilder) SetOptional() BaseSchemaBuilder { + if a.Optional && !a.Required { + panic("attribute is already optional") + } + a.Optional = true + a.Required = false + return a +} + +func (a ListNestedBlockBuilder) SetRequired() BaseSchemaBuilder { + if !a.Optional && a.Required { + panic("attribute is already required") + } + a.Optional = false + a.Required = true + return a +} + +func (a ListNestedBlockBuilder) SetSensitive() BaseSchemaBuilder { + if a.Sensitive { + panic("attribute is already sensitive") + } + a.Sensitive = true + return a +} + +func (a ListNestedBlockBuilder) SetComputed() BaseSchemaBuilder { + if a.Computed { + panic("attribute is already computed") + } + a.Computed = true + return a +} + +func (a ListNestedBlockBuilder) SetReadOnly() BaseSchemaBuilder { + if a.Computed && !a.Optional && !a.Required { + panic("attribute is already read only") + } + a.Computed = true + a.Optional = false + a.Required = false + return a +} + +func (a ListNestedBlockBuilder) SetDeprecated(msg string) BaseSchemaBuilder { + a.DeprecationMessage = msg + return a +} + +func (a ListNestedBlockBuilder) AddValidator(v validator.List) BaseSchemaBuilder { + a.Validators = append(a.Validators, v) + return a +} + +func (a ListNestedBlockBuilder) AddPlanModifier(v planmodifier.List) BaseSchemaBuilder { + a.PlanModifiers = append(a.PlanModifiers, v) + return a +} diff --git a/internal/providers/pluginfw/tfschema/map_attribute.go b/internal/providers/pluginfw/tfschema/map_attribute.go index 3793b444bb..4596462d1c 100644 --- a/internal/providers/pluginfw/tfschema/map_attribute.go +++ b/internal/providers/pluginfw/tfschema/map_attribute.go @@ -45,7 +45,7 @@ func (a MapAttributeBuilder) BuildResourceAttribute() schema.Attribute { } } -func (a MapAttributeBuilder) SetOptional() AttributeBuilder { +func (a MapAttributeBuilder) SetOptional() BaseSchemaBuilder { if a.Optional && !a.Required { panic("attribute is already optional") } @@ -54,7 +54,7 @@ func (a MapAttributeBuilder) SetOptional() AttributeBuilder { return a } -func (a MapAttributeBuilder) SetRequired() AttributeBuilder { +func (a MapAttributeBuilder) SetRequired() BaseSchemaBuilder { if !a.Optional && a.Required { panic("attribute is already required") } @@ -63,7 +63,7 @@ func (a MapAttributeBuilder) SetRequired() AttributeBuilder { return a } -func (a MapAttributeBuilder) SetSensitive() AttributeBuilder { +func (a MapAttributeBuilder) SetSensitive() BaseSchemaBuilder { if a.Sensitive { panic("attribute is already sensitive") } @@ -71,7 +71,7 @@ func (a MapAttributeBuilder) SetSensitive() AttributeBuilder { return a } -func (a MapAttributeBuilder) SetComputed() AttributeBuilder { +func (a MapAttributeBuilder) SetComputed() BaseSchemaBuilder { if a.Computed { panic("attribute is already computed") } @@ -79,7 +79,7 @@ func (a MapAttributeBuilder) SetComputed() AttributeBuilder { return a } -func (a MapAttributeBuilder) SetReadOnly() AttributeBuilder { +func (a MapAttributeBuilder) SetReadOnly() BaseSchemaBuilder { if a.Computed && !a.Optional && !a.Required { panic("attribute is already read only") } @@ -89,17 +89,17 @@ func (a MapAttributeBuilder) SetReadOnly() AttributeBuilder { return a } -func (a MapAttributeBuilder) SetDeprecated(msg string) AttributeBuilder { +func (a MapAttributeBuilder) SetDeprecated(msg string) BaseSchemaBuilder { a.DeprecationMessage = msg return a } -func (a MapAttributeBuilder) AddValidator(v validator.Map) AttributeBuilder { +func (a MapAttributeBuilder) AddValidator(v validator.Map) BaseSchemaBuilder { a.Validators = append(a.Validators, v) return a } -func (a MapAttributeBuilder) AddPlanModifier(v planmodifier.Map) AttributeBuilder { +func (a MapAttributeBuilder) AddPlanModifier(v planmodifier.Map) BaseSchemaBuilder { a.PlanModifiers = append(a.PlanModifiers, v) return a } diff --git a/internal/providers/pluginfw/tfschema/map_nested_attribute.go b/internal/providers/pluginfw/tfschema/map_nested_attribute.go index bfcf5da968..08cb0a0bce 100644 --- a/internal/providers/pluginfw/tfschema/map_nested_attribute.go +++ b/internal/providers/pluginfw/tfschema/map_nested_attribute.go @@ -44,7 +44,7 @@ func (a MapNestedAttributeBuilder) BuildResourceAttribute() schema.Attribute { } } -func (a MapNestedAttributeBuilder) SetOptional() AttributeBuilder { +func (a MapNestedAttributeBuilder) SetOptional() BaseSchemaBuilder { if a.Optional && !a.Required { panic("attribute is already optional") } @@ -53,7 +53,7 @@ func (a MapNestedAttributeBuilder) SetOptional() AttributeBuilder { return a } -func (a MapNestedAttributeBuilder) SetRequired() AttributeBuilder { +func (a MapNestedAttributeBuilder) SetRequired() BaseSchemaBuilder { if !a.Optional && a.Required { panic("attribute is already required") } @@ -62,7 +62,7 @@ func (a MapNestedAttributeBuilder) SetRequired() AttributeBuilder { return a } -func (a MapNestedAttributeBuilder) SetSensitive() AttributeBuilder { +func (a MapNestedAttributeBuilder) SetSensitive() BaseSchemaBuilder { if a.Sensitive { panic("attribute is already sensitive") } @@ -70,7 +70,7 @@ func (a MapNestedAttributeBuilder) SetSensitive() AttributeBuilder { return a } -func (a MapNestedAttributeBuilder) SetComputed() AttributeBuilder { +func (a MapNestedAttributeBuilder) SetComputed() BaseSchemaBuilder { if a.Computed { panic("attribute is already computed") } @@ -78,7 +78,7 @@ func (a MapNestedAttributeBuilder) SetComputed() AttributeBuilder { return a } -func (a MapNestedAttributeBuilder) SetReadOnly() AttributeBuilder { +func (a MapNestedAttributeBuilder) SetReadOnly() BaseSchemaBuilder { if a.Computed && !a.Optional && !a.Required { panic("attribute is already read only") } @@ -88,17 +88,17 @@ func (a MapNestedAttributeBuilder) SetReadOnly() AttributeBuilder { return a } -func (a MapNestedAttributeBuilder) SetDeprecated(msg string) AttributeBuilder { +func (a MapNestedAttributeBuilder) SetDeprecated(msg string) BaseSchemaBuilder { a.DeprecationMessage = msg return a } -func (a MapNestedAttributeBuilder) AddValidator(v validator.Map) AttributeBuilder { +func (a MapNestedAttributeBuilder) AddValidator(v validator.Map) BaseSchemaBuilder { a.Validators = append(a.Validators, v) return a } -func (a MapNestedAttributeBuilder) AddPlanModifier(v planmodifier.Map) AttributeBuilder { +func (a MapNestedAttributeBuilder) AddPlanModifier(v planmodifier.Map) BaseSchemaBuilder { a.PlanModifiers = append(a.PlanModifiers, v) return a } diff --git a/internal/providers/pluginfw/tfschema/nested_block_object.go b/internal/providers/pluginfw/tfschema/nested_block_object.go new file mode 100644 index 0000000000..2f9853eb76 --- /dev/null +++ b/internal/providers/pluginfw/tfschema/nested_block_object.go @@ -0,0 +1,32 @@ +package tfschema + +import ( + dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" +) + +// NestedAttributteObject is the intermediate type for nested complex (non-primitive) types. +type NestedBlockObject struct { + Attributes map[string]AttributeBuilder + Blocks map[string]BlockBuilder +} + +func (a NestedBlockObject) BuildDataSourceAttribute() dataschema.NestedBlockObject { + dataSourceAttributes := BuildDataSourceAttributeMap(a.Attributes) + dataSourceBlocks := BuildDataSourceBlockMap(a.Blocks) + + return dataschema.NestedBlockObject{ + Attributes: dataSourceAttributes, + Blocks: dataSourceBlocks, + } +} + +func (a NestedBlockObject) BuildResourceAttribute() schema.NestedBlockObject { + resourceAttributes := BuildResourceAttributeMap(a.Attributes) + resourceBlocks := BuildResourceBlockMap(a.Blocks) + + return schema.NestedBlockObject{ + Attributes: resourceAttributes, + Blocks: resourceBlocks, + } +} diff --git a/internal/providers/pluginfw/tfschema/single_nested_attribute.go b/internal/providers/pluginfw/tfschema/single_nested_attribute.go index ee234db903..bd2a960557 100644 --- a/internal/providers/pluginfw/tfschema/single_nested_attribute.go +++ b/internal/providers/pluginfw/tfschema/single_nested_attribute.go @@ -44,7 +44,7 @@ func (a SingleNestedAttributeBuilder) BuildResourceAttribute() schema.Attribute } } -func (a SingleNestedAttributeBuilder) SetOptional() AttributeBuilder { +func (a SingleNestedAttributeBuilder) SetOptional() BaseSchemaBuilder { if a.Optional && !a.Required { panic("attribute is already optional") } @@ -53,7 +53,7 @@ func (a SingleNestedAttributeBuilder) SetOptional() AttributeBuilder { return a } -func (a SingleNestedAttributeBuilder) SetRequired() AttributeBuilder { +func (a SingleNestedAttributeBuilder) SetRequired() BaseSchemaBuilder { if !a.Optional && a.Required { panic("attribute is already required") } @@ -62,7 +62,7 @@ func (a SingleNestedAttributeBuilder) SetRequired() AttributeBuilder { return a } -func (a SingleNestedAttributeBuilder) SetSensitive() AttributeBuilder { +func (a SingleNestedAttributeBuilder) SetSensitive() BaseSchemaBuilder { if a.Sensitive { panic("attribute is already sensitive") } @@ -70,7 +70,7 @@ func (a SingleNestedAttributeBuilder) SetSensitive() AttributeBuilder { return a } -func (a SingleNestedAttributeBuilder) SetComputed() AttributeBuilder { +func (a SingleNestedAttributeBuilder) SetComputed() BaseSchemaBuilder { if a.Computed { panic("attribute is already computed") } @@ -78,7 +78,7 @@ func (a SingleNestedAttributeBuilder) SetComputed() AttributeBuilder { return a } -func (a SingleNestedAttributeBuilder) SetReadOnly() AttributeBuilder { +func (a SingleNestedAttributeBuilder) SetReadOnly() BaseSchemaBuilder { if a.Computed && !a.Optional && !a.Required { panic("attribute is already read only") } @@ -88,17 +88,17 @@ func (a SingleNestedAttributeBuilder) SetReadOnly() AttributeBuilder { return a } -func (a SingleNestedAttributeBuilder) SetDeprecated(msg string) AttributeBuilder { +func (a SingleNestedAttributeBuilder) SetDeprecated(msg string) BaseSchemaBuilder { a.DeprecationMessage = msg return a } -func (a SingleNestedAttributeBuilder) AddValidator(v validator.Object) AttributeBuilder { +func (a SingleNestedAttributeBuilder) AddValidator(v validator.Object) BaseSchemaBuilder { a.Validators = append(a.Validators, v) return a } -func (a SingleNestedAttributeBuilder) AddPlanModifier(v planmodifier.Object) AttributeBuilder { +func (a SingleNestedAttributeBuilder) AddPlanModifier(v planmodifier.Object) BaseSchemaBuilder { a.PlanModifiers = append(a.PlanModifiers, v) return a } diff --git a/internal/providers/pluginfw/tfschema/single_nested_block.go b/internal/providers/pluginfw/tfschema/single_nested_block.go new file mode 100644 index 0000000000..aace19618b --- /dev/null +++ b/internal/providers/pluginfw/tfschema/single_nested_block.go @@ -0,0 +1,109 @@ +package tfschema + +import ( + "fmt" + + "github.com/databricks/terraform-provider-databricks/common" + dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +// SingleNestedBlockBuilder represents a single nested complex (non-primitive) type. +type SingleNestedBlockBuilder struct { + NestedObject NestedBlockObject + Optional bool + Required bool + Sensitive bool + Computed bool + DeprecationMessage string + Validators []validator.Object + PlanModifiers []planmodifier.Object +} + +func (a SingleNestedBlockBuilder) BuildDataSourceAttribute() dataschema.Attribute { + panic(fmt.Errorf("BuildDataSourceBlock should never be called for SingleNestedBlockBuilder. %s", common.TerraformBugErrorMessage)) +} + +func (a SingleNestedBlockBuilder) BuildResourceAttribute() schema.Attribute { + panic(fmt.Errorf("BuildResourceBlock should never be called for SingleNestedBlockBuilder. %s", common.TerraformBugErrorMessage)) +} + +func (a SingleNestedBlockBuilder) BuildDataSourceBlock() dataschema.Block { + return dataschema.SingleNestedBlock{ + Attributes: a.NestedObject.BuildDataSourceAttribute().Attributes, + Blocks: a.NestedObject.BuildDataSourceAttribute().Blocks, + DeprecationMessage: a.DeprecationMessage, + Validators: a.Validators, + } +} + +func (a SingleNestedBlockBuilder) BuildResourceBlock() schema.Block { + return schema.SingleNestedBlock{ + Attributes: a.NestedObject.BuildResourceAttribute().Attributes, + Blocks: a.NestedObject.BuildResourceAttribute().Blocks, + DeprecationMessage: a.DeprecationMessage, + Validators: a.Validators, + PlanModifiers: a.PlanModifiers, + } +} + +func (a SingleNestedBlockBuilder) SetOptional() BaseSchemaBuilder { + if a.Optional && !a.Required { + panic("attribute is already optional") + } + a.Optional = true + a.Required = false + return a +} + +func (a SingleNestedBlockBuilder) SetRequired() BaseSchemaBuilder { + if !a.Optional && a.Required { + panic("attribute is already required") + } + a.Optional = false + a.Required = true + return a +} + +func (a SingleNestedBlockBuilder) SetSensitive() BaseSchemaBuilder { + if a.Sensitive { + panic("attribute is already sensitive") + } + a.Sensitive = true + return a +} + +func (a SingleNestedBlockBuilder) SetComputed() BaseSchemaBuilder { + if a.Computed { + panic("attribute is already computed") + } + a.Computed = true + return a +} + +func (a SingleNestedBlockBuilder) SetReadOnly() BaseSchemaBuilder { + if a.Computed && !a.Optional && !a.Required { + panic("attribute is already read only") + } + a.Computed = true + a.Optional = false + a.Required = false + return a +} + +func (a SingleNestedBlockBuilder) SetDeprecated(msg string) BaseSchemaBuilder { + a.DeprecationMessage = msg + return a +} + +func (a SingleNestedBlockBuilder) AddValidator(v validator.Object) BaseSchemaBuilder { + a.Validators = append(a.Validators, v) + return a +} + +func (a SingleNestedBlockBuilder) AddPlanModifier(v planmodifier.Object) BaseSchemaBuilder { + a.PlanModifiers = append(a.PlanModifiers, v) + return a +} diff --git a/internal/providers/pluginfw/tfschema/string_attribute.go b/internal/providers/pluginfw/tfschema/string_attribute.go index 6b81b939f6..e08be60c06 100644 --- a/internal/providers/pluginfw/tfschema/string_attribute.go +++ b/internal/providers/pluginfw/tfschema/string_attribute.go @@ -40,7 +40,7 @@ func (a StringAttributeBuilder) BuildResourceAttribute() schema.Attribute { } } -func (a StringAttributeBuilder) SetOptional() AttributeBuilder { +func (a StringAttributeBuilder) SetOptional() BaseSchemaBuilder { if a.Optional && !a.Required { panic("attribute is already optional") } @@ -49,7 +49,7 @@ func (a StringAttributeBuilder) SetOptional() AttributeBuilder { return a } -func (a StringAttributeBuilder) SetRequired() AttributeBuilder { +func (a StringAttributeBuilder) SetRequired() BaseSchemaBuilder { if !a.Optional && a.Required { panic("attribute is already required") } @@ -58,7 +58,7 @@ func (a StringAttributeBuilder) SetRequired() AttributeBuilder { return a } -func (a StringAttributeBuilder) SetSensitive() AttributeBuilder { +func (a StringAttributeBuilder) SetSensitive() BaseSchemaBuilder { if a.Sensitive { panic("attribute is already sensitive") } @@ -66,7 +66,7 @@ func (a StringAttributeBuilder) SetSensitive() AttributeBuilder { return a } -func (a StringAttributeBuilder) SetComputed() AttributeBuilder { +func (a StringAttributeBuilder) SetComputed() BaseSchemaBuilder { if a.Computed { panic("attribute is already computed") } @@ -74,7 +74,7 @@ func (a StringAttributeBuilder) SetComputed() AttributeBuilder { return a } -func (a StringAttributeBuilder) SetReadOnly() AttributeBuilder { +func (a StringAttributeBuilder) SetReadOnly() BaseSchemaBuilder { if a.Computed && !a.Optional && !a.Required { panic("attribute is already read only") } @@ -84,17 +84,17 @@ func (a StringAttributeBuilder) SetReadOnly() AttributeBuilder { return a } -func (a StringAttributeBuilder) SetDeprecated(msg string) AttributeBuilder { +func (a StringAttributeBuilder) SetDeprecated(msg string) BaseSchemaBuilder { a.DeprecationMessage = msg return a } -func (a StringAttributeBuilder) AddValidator(v validator.String) AttributeBuilder { +func (a StringAttributeBuilder) AddValidator(v validator.String) BaseSchemaBuilder { a.Validators = append(a.Validators, v) return a } -func (a StringAttributeBuilder) AddPlanModifier(v planmodifier.String) AttributeBuilder { +func (a StringAttributeBuilder) AddPlanModifier(v planmodifier.String) BaseSchemaBuilder { a.PlanModifiers = append(a.PlanModifiers, v) return a } diff --git a/internal/providers/pluginfw/tfschema/struct_to_schema.go b/internal/providers/pluginfw/tfschema/struct_to_schema.go index 2ac1303e19..aa473e3014 100644 --- a/internal/providers/pluginfw/tfschema/struct_to_schema.go +++ b/internal/providers/pluginfw/tfschema/struct_to_schema.go @@ -12,8 +12,9 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -func typeToSchema(v reflect.Value) map[string]AttributeBuilder { - scm := map[string]AttributeBuilder{} +func typeToSchema(v reflect.Value) NestedBlockObject { + scmAttr := map[string]AttributeBuilder{} + scmBlock := map[string]BlockBuilder{} rk := v.Kind() if rk == reflect.Ptr { v = v.Elem() @@ -49,28 +50,28 @@ func typeToSchema(v reflect.Value) map[string]AttributeBuilder { } switch elemType { case reflect.TypeOf(types.Bool{}): - scm[fieldName] = ListAttributeBuilder{ + scmAttr[fieldName] = ListAttributeBuilder{ ElementType: types.BoolType, Optional: isOptional, Required: !isOptional, Computed: isComputed, } case reflect.TypeOf(types.Int64{}): - scm[fieldName] = ListAttributeBuilder{ + scmAttr[fieldName] = ListAttributeBuilder{ ElementType: types.Int64Type, Optional: isOptional, Required: !isOptional, Computed: isComputed, } case reflect.TypeOf(types.Float64{}): - scm[fieldName] = ListAttributeBuilder{ + scmAttr[fieldName] = ListAttributeBuilder{ ElementType: types.Float64Type, Optional: isOptional, Required: !isOptional, Computed: isComputed, } case reflect.TypeOf(types.String{}): - scm[fieldName] = ListAttributeBuilder{ + scmAttr[fieldName] = ListAttributeBuilder{ ElementType: types.StringType, Optional: isOptional, Required: !isOptional, @@ -79,9 +80,10 @@ func typeToSchema(v reflect.Value) map[string]AttributeBuilder { default: // Nested struct nestedScm := typeToSchema(reflect.New(elemType).Elem()) - scm[fieldName] = ListNestedAttributeBuilder{ - NestedObject: NestedAttributeObject{ - Attributes: nestedScm, + scmBlock[fieldName] = ListNestedBlockBuilder{ + NestedObject: NestedBlockObject{ + Attributes: nestedScm.Attributes, + Blocks: nestedScm.Blocks, }, Optional: isOptional, Required: !isOptional, @@ -98,28 +100,28 @@ func typeToSchema(v reflect.Value) map[string]AttributeBuilder { } switch elemType { case reflect.TypeOf(types.Bool{}): - scm[fieldName] = MapAttributeBuilder{ + scmAttr[fieldName] = MapAttributeBuilder{ ElementType: types.BoolType, Optional: isOptional, Required: !isOptional, Computed: isComputed, } case reflect.TypeOf(types.Int64{}): - scm[fieldName] = MapAttributeBuilder{ + scmAttr[fieldName] = MapAttributeBuilder{ ElementType: types.Int64Type, Optional: isOptional, Required: !isOptional, Computed: isComputed, } case reflect.TypeOf(types.Float64{}): - scm[fieldName] = MapAttributeBuilder{ + scmAttr[fieldName] = MapAttributeBuilder{ ElementType: types.Float64Type, Optional: isOptional, Required: !isOptional, Computed: isComputed, } case reflect.TypeOf(types.String{}): - scm[fieldName] = MapAttributeBuilder{ + scmAttr[fieldName] = MapAttributeBuilder{ ElementType: types.StringType, Optional: isOptional, Required: !isOptional, @@ -128,9 +130,9 @@ func typeToSchema(v reflect.Value) map[string]AttributeBuilder { default: // Nested struct nestedScm := typeToSchema(reflect.New(elemType).Elem()) - scm[fieldName] = MapNestedAttributeBuilder{ + scmAttr[fieldName] = MapNestedAttributeBuilder{ NestedObject: NestedAttributeObject{ - Attributes: nestedScm, + Attributes: nestedScm.Attributes, }, Optional: isOptional, Required: !isOptional, @@ -140,25 +142,25 @@ func typeToSchema(v reflect.Value) map[string]AttributeBuilder { } else if kind == reflect.Struct { switch value.Interface().(type) { case types.Bool: - scm[fieldName] = BoolAttributeBuilder{ + scmAttr[fieldName] = BoolAttributeBuilder{ Optional: isOptional, Required: !isOptional, Computed: isComputed, } case types.Int64: - scm[fieldName] = Int64AttributeBuilder{ + scmAttr[fieldName] = Int64AttributeBuilder{ Optional: isOptional, Required: !isOptional, Computed: isComputed, } case types.Float64: - scm[fieldName] = Float64AttributeBuilder{ + scmAttr[fieldName] = Float64AttributeBuilder{ Optional: isOptional, Required: !isOptional, Computed: isComputed, } case types.String: - scm[fieldName] = StringAttributeBuilder{ + scmAttr[fieldName] = StringAttributeBuilder{ Optional: isOptional, Required: !isOptional, Computed: isComputed, @@ -172,18 +174,18 @@ func typeToSchema(v reflect.Value) map[string]AttributeBuilder { elem := typeFieldType sv := reflect.New(elem) nestedScm := typeToSchema(sv) - scm[fieldName] = SingleNestedAttributeBuilder{ - Attributes: nestedScm, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + scmBlock[fieldName] = ListNestedBlockBuilder{ + NestedObject: nestedScm, + Optional: isOptional, + Required: !isOptional, + Computed: isComputed, } } } else { panic(fmt.Errorf("unknown type for field: %s. %s", typeField.Name, common.TerraformBugErrorMessage)) } } - return scm + return NestedBlockObject{Attributes: scmAttr, Blocks: scmBlock} } func fieldIsComputed(field reflect.StructField) bool { @@ -198,36 +200,36 @@ func fieldIsOptional(field reflect.StructField) bool { // ResourceStructToSchema builds a resource schema from a tfsdk struct, with custoimzations applied. func ResourceStructToSchema(v any, customizeSchema func(CustomizableSchema) CustomizableSchema) schema.Schema { - attributes := ResourceStructToSchemaMap(v, customizeSchema) - return schema.Schema{Attributes: attributes} + attributes, blocks := ResourceStructToSchemaMap(v, customizeSchema) + return schema.Schema{Attributes: attributes, Blocks: blocks} } // DataSourceStructToSchema builds a data source schema from a tfsdk struct, with custoimzations applied. func DataSourceStructToSchema(v any, customizeSchema func(CustomizableSchema) CustomizableSchema) dataschema.Schema { - attributes := DataSourceStructToSchemaMap(v, customizeSchema) - return dataschema.Schema{Attributes: attributes} + attributes, blocks := DataSourceStructToSchemaMap(v, customizeSchema) + return dataschema.Schema{Attributes: attributes, Blocks: blocks} } -// ResourceStructToSchemaMap returns a map from string to resource schema attributes using a tfsdk struct, with custoimzations applied. -func ResourceStructToSchemaMap(v any, customizeSchema func(CustomizableSchema) CustomizableSchema) map[string]schema.Attribute { - attributes := typeToSchema(reflect.ValueOf(v)) +// ResourceStructToSchemaMap returns two maps from string to resource schema attributes and blocks using a tfsdk struct, with custoimzations applied. +func ResourceStructToSchemaMap(v any, customizeSchema func(CustomizableSchema) CustomizableSchema) (map[string]schema.Attribute, map[string]schema.Block) { + nestedBlockObj := typeToSchema(reflect.ValueOf(v)) if customizeSchema != nil { - cs := customizeSchema(*ConstructCustomizableSchema(attributes)) - return BuildResourceAttributeMap(cs.ToAttributeMap()) + cs := customizeSchema(*ConstructCustomizableSchema(nestedBlockObj)) + return BuildResourceAttributeMap(cs.ToNestedBlockObject().Attributes), BuildResourceBlockMap(cs.ToNestedBlockObject().Blocks) } else { - return BuildResourceAttributeMap(attributes) + return BuildResourceAttributeMap(nestedBlockObj.Attributes), BuildResourceBlockMap(nestedBlockObj.Blocks) } } -// DataSourceStructToSchemaMap returns a map from string to data source schema attributes using a tfsdk struct, with custoimzations applied. -func DataSourceStructToSchemaMap(v any, customizeSchema func(CustomizableSchema) CustomizableSchema) map[string]dataschema.Attribute { - attributes := typeToSchema(reflect.ValueOf(v)) +// DataSourceStructToSchemaMap returns twp maps from string to data source schema attributes and blocks using a tfsdk struct, with custoimzations applied. +func DataSourceStructToSchemaMap(v any, customizeSchema func(CustomizableSchema) CustomizableSchema) (map[string]dataschema.Attribute, map[string]dataschema.Block) { + nestedBlockObj := typeToSchema(reflect.ValueOf(v)) if customizeSchema != nil { - cs := customizeSchema(*ConstructCustomizableSchema(attributes)) - return BuildDataSourceAttributeMap(cs.ToAttributeMap()) + cs := customizeSchema(*ConstructCustomizableSchema(nestedBlockObj)) + return BuildDataSourceAttributeMap(cs.ToNestedBlockObject().Attributes), BuildDataSourceBlockMap(cs.ToNestedBlockObject().Blocks) } else { - return BuildDataSourceAttributeMap(attributes) + return BuildDataSourceAttributeMap(nestedBlockObj.Attributes), BuildDataSourceBlockMap(nestedBlockObj.Blocks) } } diff --git a/internal/providers/pluginfw/tfschema/struct_to_schema_test.go b/internal/providers/pluginfw/tfschema/struct_to_schema_test.go index f566c5feab..a6005844c0 100644 --- a/internal/providers/pluginfw/tfschema/struct_to_schema_test.go +++ b/internal/providers/pluginfw/tfschema/struct_to_schema_test.go @@ -52,7 +52,7 @@ type DummyNested struct { } type DummyDoubleNested struct { - Nested *DummyNested `tfsdk:"nested" tf:"optional"` + Nested []*DummyNested `tfsdk:"nested" tf:"optional"` } type TestNestedMapTfSdk struct { @@ -60,11 +60,11 @@ type TestNestedMapTfSdk struct { } type TestPointerTfSdk struct { - Nested *DummyNested `tfsdk:"nested" tf:"optional"` + Nested *[]DummyNested `tfsdk:"nested" tf:"optional"` } type TestNestedPointerTfSdk struct { - Nested DummyDoubleNested `tfsdk:"nested" tf:"optional"` + Nested []DummyDoubleNested `tfsdk:"nested" tf:"optional"` } var tests = []struct { @@ -124,19 +124,25 @@ var tests = []struct { { "pointer to a struct conversion", TestPointerTfSdk{ - &DummyNested{ - Name: types.StringValue("def"), - Enabled: types.BoolValue(true), + &[]DummyNested{ + { + Name: types.StringValue("def"), + Enabled: types.BoolValue(true), + }, }, }, }, { "nested pointer to a struct conversion", TestNestedPointerTfSdk{ - DummyDoubleNested{ - Nested: &DummyNested{ - Name: types.StringValue("def"), - Enabled: types.BoolValue(true), + []DummyDoubleNested{ + { + Nested: []*DummyNested{ + { + Name: types.StringValue("def"), + Enabled: types.BoolValue(true), + }, + }, }, }, }, diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index 41a6990157..ab9b6220dc 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -16,11 +16,11 @@ import ( type App struct { // The active deployment of the app. - ActiveDeployment *AppDeployment `tfsdk:"active_deployment" tf:"optional"` + ActiveDeployment []AppDeployment `tfsdk:"active_deployment" tf:"optional"` - AppStatus *ApplicationStatus `tfsdk:"app_status" tf:"optional"` + AppStatus []ApplicationStatus `tfsdk:"app_status" tf:"optional"` - ComputeStatus *ComputeStatus `tfsdk:"compute_status" tf:"optional"` + ComputeStatus []ComputeStatus `tfsdk:"compute_status" tf:"optional"` // The creation time of the app. Formatted timestamp in ISO 6801. CreateTime types.String `tfsdk:"create_time" tf:"optional"` // The email of the user that created the app. @@ -35,7 +35,7 @@ type App struct { // characters and hyphens. It must be unique within the workspace. Name types.String `tfsdk:"name" tf:""` // The pending deployment of the app. - PendingDeployment *AppDeployment `tfsdk:"pending_deployment" tf:"optional"` + PendingDeployment []AppDeployment `tfsdk:"pending_deployment" tf:"optional"` // Resources for the app. Resources []AppResource `tfsdk:"resources" tf:"optional"` @@ -80,7 +80,7 @@ type AppDeployment struct { // The email of the user creates the deployment. Creator types.String `tfsdk:"creator" tf:"optional"` // The deployment artifacts for an app. - DeploymentArtifacts *AppDeploymentArtifacts `tfsdk:"deployment_artifacts" tf:"optional"` + DeploymentArtifacts []AppDeploymentArtifacts `tfsdk:"deployment_artifacts" tf:"optional"` // The unique id of the deployment. DeploymentId types.String `tfsdk:"deployment_id" tf:"optional"` // The mode of which the deployment will manage the source code. @@ -94,7 +94,7 @@ type AppDeployment struct { // the deployment. SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` // Status and status message of the deployment - Status *AppDeploymentStatus `tfsdk:"status" tf:"optional"` + Status []AppDeploymentStatus `tfsdk:"status" tf:"optional"` // The update time of the deployment. Formatted timestamp in ISO 6801. UpdateTime types.String `tfsdk:"update_time" tf:"optional"` } @@ -144,15 +144,15 @@ type AppResource struct { // Description of the App Resource. Description types.String `tfsdk:"description" tf:"optional"` - Job *AppResourceJob `tfsdk:"job" tf:"optional"` + Job []AppResourceJob `tfsdk:"job" tf:"optional"` // Name of the App Resource. Name types.String `tfsdk:"name" tf:""` - Secret *AppResourceSecret `tfsdk:"secret" tf:"optional"` + Secret []AppResourceSecret `tfsdk:"secret" tf:"optional"` - ServingEndpoint *AppResourceServingEndpoint `tfsdk:"serving_endpoint" tf:"optional"` + ServingEndpoint []AppResourceServingEndpoint `tfsdk:"serving_endpoint" tf:"optional"` - SqlWarehouse *AppResourceSqlWarehouse `tfsdk:"sql_warehouse" tf:"optional"` + SqlWarehouse []AppResourceSqlWarehouse `tfsdk:"sql_warehouse" tf:"optional"` } type AppResourceJob struct { diff --git a/internal/service/billing_tf/model.go b/internal/service/billing_tf/model.go index 75c5062896..8eba23e7e3 100755 --- a/internal/service/billing_tf/model.go +++ b/internal/service/billing_tf/model.go @@ -60,7 +60,7 @@ type BudgetConfiguration struct { // usage to limit the scope of what is considered for this budget. Leave // empty to include all usage for this account. All provided filters must be // matched for usage to be included. - Filter *BudgetConfigurationFilter `tfsdk:"filter" tf:"optional"` + Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional"` // Update time of this budget configuration. UpdateTime types.Int64 `tfsdk:"update_time" tf:"optional"` } @@ -71,7 +71,7 @@ type BudgetConfigurationFilter struct { // be entered exactly as they appear in your usage data. Tags []BudgetConfigurationFilterTagClause `tfsdk:"tags" tf:"optional"` // If provided, usage must match with the provided Databricks workspace IDs. - WorkspaceId *BudgetConfigurationFilterWorkspaceIdClause `tfsdk:"workspace_id" tf:"optional"` + WorkspaceId []BudgetConfigurationFilterWorkspaceIdClause `tfsdk:"workspace_id" tf:"optional"` } type BudgetConfigurationFilterClause struct { @@ -83,7 +83,7 @@ type BudgetConfigurationFilterClause struct { type BudgetConfigurationFilterTagClause struct { Key types.String `tfsdk:"key" tf:"optional"` - Value *BudgetConfigurationFilterClause `tfsdk:"value" tf:"optional"` + Value []BudgetConfigurationFilterClause `tfsdk:"value" tf:"optional"` } type BudgetConfigurationFilterWorkspaceIdClause struct { @@ -119,7 +119,7 @@ type CreateBudgetConfigurationBudget struct { // usage to limit the scope of what is considered for this budget. Leave // empty to include all usage for this account. All provided filters must be // matched for usage to be included. - Filter *BudgetConfigurationFilter `tfsdk:"filter" tf:"optional"` + Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional"` } type CreateBudgetConfigurationBudgetActionConfigurations struct { @@ -148,12 +148,12 @@ type CreateBudgetConfigurationBudgetAlertConfigurations struct { type CreateBudgetConfigurationRequest struct { // Properties of the new budget configuration. - Budget CreateBudgetConfigurationBudget `tfsdk:"budget" tf:""` + Budget []CreateBudgetConfigurationBudget `tfsdk:"budget" tf:""` } type CreateBudgetConfigurationResponse struct { // The created budget configuration. - Budget *BudgetConfiguration `tfsdk:"budget" tf:"optional"` + Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional"` } type CreateLogDeliveryConfigurationParams struct { @@ -280,7 +280,7 @@ type GetBudgetConfigurationRequest struct { } type GetBudgetConfigurationResponse struct { - Budget *BudgetConfiguration `tfsdk:"budget" tf:"optional"` + Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional"` } // Get log delivery configuration @@ -342,7 +342,7 @@ type LogDeliveryConfiguration struct { // available for usage before March 2019 (`2019-03`). DeliveryStartTime types.String `tfsdk:"delivery_start_time" tf:"optional"` // Databricks log delivery status. - LogDeliveryStatus *LogDeliveryStatus `tfsdk:"log_delivery_status" tf:"optional"` + LogDeliveryStatus []LogDeliveryStatus `tfsdk:"log_delivery_status" tf:"optional"` // Log delivery type. Supported values are: // // * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the @@ -438,20 +438,20 @@ type UpdateBudgetConfigurationBudget struct { // usage to limit the scope of what is considered for this budget. Leave // empty to include all usage for this account. All provided filters must be // matched for usage to be included. - Filter *BudgetConfigurationFilter `tfsdk:"filter" tf:"optional"` + Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional"` } type UpdateBudgetConfigurationRequest struct { // The updated budget. This will overwrite the budget specified by the // budget ID. - Budget UpdateBudgetConfigurationBudget `tfsdk:"budget" tf:""` + Budget []UpdateBudgetConfigurationBudget `tfsdk:"budget" tf:""` // The Databricks budget configuration ID. BudgetId types.String `tfsdk:"-"` } type UpdateBudgetConfigurationResponse struct { // The updated budget. - Budget *BudgetConfiguration `tfsdk:"budget" tf:"optional"` + Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional"` } type UpdateLogDeliveryConfigurationStatusRequest struct { @@ -466,11 +466,11 @@ type UpdateLogDeliveryConfigurationStatusRequest struct { } type WrappedCreateLogDeliveryConfiguration struct { - LogDeliveryConfiguration *CreateLogDeliveryConfigurationParams `tfsdk:"log_delivery_configuration" tf:"optional"` + LogDeliveryConfiguration []CreateLogDeliveryConfigurationParams `tfsdk:"log_delivery_configuration" tf:"optional"` } type WrappedLogDeliveryConfiguration struct { - LogDeliveryConfiguration *LogDeliveryConfiguration `tfsdk:"log_delivery_configuration" tf:"optional"` + LogDeliveryConfiguration []LogDeliveryConfiguration `tfsdk:"log_delivery_configuration" tf:"optional"` } type WrappedLogDeliveryConfigurations struct { diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index 358885d57d..dea0528dfb 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -15,11 +15,11 @@ import ( ) type AccountsCreateMetastore struct { - MetastoreInfo *CreateMetastore `tfsdk:"metastore_info" tf:"optional"` + MetastoreInfo []CreateMetastore `tfsdk:"metastore_info" tf:"optional"` } type AccountsCreateMetastoreAssignment struct { - MetastoreAssignment *CreateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional"` + MetastoreAssignment []CreateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` // Workspace ID. @@ -27,32 +27,32 @@ type AccountsCreateMetastoreAssignment struct { } type AccountsCreateStorageCredential struct { - CredentialInfo *CreateStorageCredential `tfsdk:"credential_info" tf:"optional"` + CredentialInfo []CreateStorageCredential `tfsdk:"credential_info" tf:"optional"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` } type AccountsMetastoreAssignment struct { - MetastoreAssignment *MetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional"` + MetastoreAssignment []MetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional"` } type AccountsMetastoreInfo struct { - MetastoreInfo *MetastoreInfo `tfsdk:"metastore_info" tf:"optional"` + MetastoreInfo []MetastoreInfo `tfsdk:"metastore_info" tf:"optional"` } type AccountsStorageCredentialInfo struct { - CredentialInfo *StorageCredentialInfo `tfsdk:"credential_info" tf:"optional"` + CredentialInfo []StorageCredentialInfo `tfsdk:"credential_info" tf:"optional"` } type AccountsUpdateMetastore struct { // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` - MetastoreInfo *UpdateMetastore `tfsdk:"metastore_info" tf:"optional"` + MetastoreInfo []UpdateMetastore `tfsdk:"metastore_info" tf:"optional"` } type AccountsUpdateMetastoreAssignment struct { - MetastoreAssignment *UpdateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional"` + MetastoreAssignment []UpdateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` // Workspace ID. @@ -60,7 +60,7 @@ type AccountsUpdateMetastoreAssignment struct { } type AccountsUpdateStorageCredential struct { - CredentialInfo *UpdateStorageCredential `tfsdk:"credential_info" tf:"optional"` + CredentialInfo []UpdateStorageCredential `tfsdk:"credential_info" tf:"optional"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` // Name of the storage credential. @@ -194,7 +194,7 @@ type CatalogInfo struct { // Username of catalog creator. CreatedBy types.String `tfsdk:"created_by" tf:"optional"` - EffectivePredictiveOptimizationFlag *EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional"` + EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional"` // Whether predictive optimization should be enabled for this object and // objects under it. EnablePredictiveOptimization types.String `tfsdk:"enable_predictive_optimization" tf:"optional"` @@ -219,7 +219,7 @@ type CatalogInfo struct { // remote sharing server. ProviderName types.String `tfsdk:"provider_name" tf:"optional"` // Status of an asynchronously provisioned resource. - ProvisioningInfo *ProvisioningInfo `tfsdk:"provisioning_info" tf:"optional"` + ProvisioningInfo []ProvisioningInfo `tfsdk:"provisioning_info" tf:"optional"` // Kind of catalog securable. SecurableKind types.String `tfsdk:"securable_kind" tf:"optional"` @@ -249,7 +249,7 @@ type ColumnInfo struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` - Mask *ColumnMask `tfsdk:"mask" tf:"optional"` + Mask []ColumnMask `tfsdk:"mask" tf:"optional"` // Name of Column. Name types.String `tfsdk:"name" tf:"optional"` // Whether field may be Null (default: true). @@ -309,7 +309,7 @@ type ConnectionInfo struct { // connection. Properties map[string]types.String `tfsdk:"properties" tf:"optional"` // Status of an asynchronously provisioned resource. - ProvisioningInfo *ProvisioningInfo `tfsdk:"provisioning_info" tf:"optional"` + ProvisioningInfo []ProvisioningInfo `tfsdk:"provisioning_info" tf:"optional"` // If the connection is read only. ReadOnly types.Bool `tfsdk:"read_only" tf:"optional"` // Kind of connection securable. @@ -328,7 +328,7 @@ type ConnectionInfo struct { // ONLINE_CONTINUOUS_UPDATE or the ONLINE_UPDATING_PIPELINE_RESOURCES state. type ContinuousUpdateStatus struct { // Progress of the initial data synchronization. - InitialPipelineSyncProgress *PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional"` + InitialPipelineSyncProgress []PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional"` // The last source table Delta version that was synced to the online table. // Note that this Delta version may not be completely synced to the online // table yet. @@ -384,7 +384,7 @@ type CreateExternalLocation struct { // Name of the storage credential used with this location. CredentialName types.String `tfsdk:"credential_name" tf:""` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails *EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` // Indicates whether fallback mode is enabled for this external location. // When fallback mode is enabled, the access to the location falls back to // cluster credentials if UC credentials are not sufficient. @@ -414,7 +414,7 @@ type CreateFunction struct { // Pretty printed function data type. FullDataType types.String `tfsdk:"full_data_type" tf:""` - InputParams FunctionParameterInfos `tfsdk:"input_params" tf:""` + InputParams []FunctionParameterInfos `tfsdk:"input_params" tf:""` // Whether the function is deterministic. IsDeterministic types.Bool `tfsdk:"is_deterministic" tf:""` // Function null call. @@ -426,7 +426,7 @@ type CreateFunction struct { // JSON-serialized key-value pair map, encoded (escaped) as a string. Properties types.String `tfsdk:"properties" tf:"optional"` // Table function return parameters. - ReturnParams *FunctionParameterInfos `tfsdk:"return_params" tf:"optional"` + ReturnParams []FunctionParameterInfos `tfsdk:"return_params" tf:"optional"` // Function language. When **EXTERNAL** is used, the language of the routine // function should be specified in the __external_language__ field, and the // __return_params__ of the function cannot be used (as **TABLE** return @@ -436,7 +436,7 @@ type CreateFunction struct { // Function body. RoutineDefinition types.String `tfsdk:"routine_definition" tf:""` // Function dependencies. - RoutineDependencies *DependencyList `tfsdk:"routine_dependencies" tf:"optional"` + RoutineDependencies []DependencyList `tfsdk:"routine_dependencies" tf:"optional"` // Name of parent schema relative to its parent catalog. SchemaName types.String `tfsdk:"schema_name" tf:""` // Function security type. @@ -451,7 +451,7 @@ type CreateFunction struct { type CreateFunctionRequest struct { // Partial __FunctionInfo__ specifying the function to be created. - FunctionInfo CreateFunction `tfsdk:"function_info" tf:""` + FunctionInfo []CreateFunction `tfsdk:"function_info" tf:""` } type CreateMetastore struct { @@ -489,15 +489,15 @@ type CreateMonitor struct { // drift metrics (comparing metrics across time windows). CustomMetrics []MonitorMetric `tfsdk:"custom_metrics" tf:"optional"` // The data classification config for the monitor. - DataClassificationConfig *MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional"` + DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional"` // Configuration for monitoring inference logs. - InferenceLog *MonitorInferenceLog `tfsdk:"inference_log" tf:"optional"` + InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional"` // The notification settings for the monitor. - Notifications *MonitorNotifications `tfsdk:"notifications" tf:"optional"` + Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional"` // Schema where output metric tables are created. OutputSchemaName types.String `tfsdk:"output_schema_name" tf:""` // The schedule for automatically updating and refreshing metric tables. - Schedule *MonitorCronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional"` // Whether to skip creating a default dashboard summarizing data quality // metrics. SkipBuiltinDashboard types.Bool `tfsdk:"skip_builtin_dashboard" tf:"optional"` @@ -508,11 +508,11 @@ type CreateMonitor struct { // slices. SlicingExprs []types.String `tfsdk:"slicing_exprs" tf:"optional"` // Configuration for monitoring snapshot tables. - Snapshot *MonitorSnapshot `tfsdk:"snapshot" tf:"optional"` + Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional"` // Full name of the table. TableName types.String `tfsdk:"-"` // Configuration for monitoring time series tables. - TimeSeries *MonitorTimeSeries `tfsdk:"time_series" tf:"optional"` + TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional"` // Optional argument to specify the warehouse for dashboard creation. If not // specified, the first running warehouse will be used. WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` @@ -523,7 +523,7 @@ type CreateOnlineTableRequest struct { // Full three-part (catalog, schema, table) name of the table. Name types.String `tfsdk:"name" tf:"optional"` // Specification of the online table. - Spec *OnlineTableSpec `tfsdk:"spec" tf:"optional"` + Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional"` } type CreateRegisteredModelRequest struct { @@ -558,17 +558,17 @@ type CreateSchema struct { type CreateStorageCredential struct { // The AWS IAM role configuration. - AwsIamRole *AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional"` + AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional"` // The Azure managed identity configuration. - AzureManagedIdentity *AzureManagedIdentityRequest `tfsdk:"azure_managed_identity" tf:"optional"` + AzureManagedIdentity []AzureManagedIdentityRequest `tfsdk:"azure_managed_identity" tf:"optional"` // The Azure service principal configuration. - AzureServicePrincipal *AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` + AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` // The Cloudflare API token configuration. - CloudflareApiToken *CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` + CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` // Comment associated with the credential. Comment types.String `tfsdk:"comment" tf:"optional"` // The Databricks managed GCP service account configuration. - DatabricksGcpServiceAccount *DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional"` + DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional"` // The credential name. The name must be unique within the metastore. Name types.String `tfsdk:"name" tf:""` // Whether the storage credential is only usable for read operations. @@ -582,7 +582,7 @@ type CreateTableConstraint struct { // A table constraint, as defined by *one* of the following fields being // set: __primary_key_constraint__, __foreign_key_constraint__, // __named_table_constraint__. - Constraint TableConstraint `tfsdk:"constraint" tf:""` + Constraint []TableConstraint `tfsdk:"constraint" tf:""` // The full name of the table referenced by the constraint. FullNameArg types.String `tfsdk:"full_name_arg" tf:""` } @@ -778,9 +778,9 @@ type DeltaRuntimePropertiesKvPairs struct { // field must be defined. type Dependency struct { // A function that is dependent on a SQL object. - Function *FunctionDependency `tfsdk:"function" tf:"optional"` + Function []FunctionDependency `tfsdk:"function" tf:"optional"` // A table that is dependent on a SQL object. - Table *TableDependency `tfsdk:"table" tf:"optional"` + Table []TableDependency `tfsdk:"table" tf:"optional"` } // A list of dependencies. @@ -853,7 +853,7 @@ type EnableResponse struct { // Encryption options that apply to clients connecting to cloud storage. type EncryptionDetails struct { // Server-Side Encryption properties for clients communicating with AWS s3. - SseEncryptionDetails *SseEncryptionDetails `tfsdk:"sse_encryption_details" tf:"optional"` + SseEncryptionDetails []SseEncryptionDetails `tfsdk:"sse_encryption_details" tf:"optional"` } // Get boolean reflecting if table exists @@ -880,7 +880,7 @@ type ExternalLocationInfo struct { // Name of the storage credential used with this location. CredentialName types.String `tfsdk:"credential_name" tf:"optional"` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails *EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` // Indicates whether fallback mode is enabled for this external location. // When fallback mode is enabled, the access to the location falls back to // cluster credentials if UC credentials are not sufficient. @@ -964,7 +964,7 @@ type FunctionInfo struct { // Id of Function, relative to parent schema. FunctionId types.String `tfsdk:"function_id" tf:"optional"` - InputParams *FunctionParameterInfos `tfsdk:"input_params" tf:"optional"` + InputParams []FunctionParameterInfos `tfsdk:"input_params" tf:"optional"` // Whether the function is deterministic. IsDeterministic types.Bool `tfsdk:"is_deterministic" tf:"optional"` // Function null call. @@ -980,7 +980,7 @@ type FunctionInfo struct { // JSON-serialized key-value pair map, encoded (escaped) as a string. Properties types.String `tfsdk:"properties" tf:"optional"` // Table function return parameters. - ReturnParams *FunctionParameterInfos `tfsdk:"return_params" tf:"optional"` + ReturnParams []FunctionParameterInfos `tfsdk:"return_params" tf:"optional"` // Function language. When **EXTERNAL** is used, the language of the routine // function should be specified in the __external_language__ field, and the // __return_params__ of the function cannot be used (as **TABLE** return @@ -990,7 +990,7 @@ type FunctionInfo struct { // Function body. RoutineDefinition types.String `tfsdk:"routine_definition" tf:"optional"` // Function dependencies. - RoutineDependencies *DependencyList `tfsdk:"routine_dependencies" tf:"optional"` + RoutineDependencies []DependencyList `tfsdk:"routine_dependencies" tf:"optional"` // Name of parent schema relative to its parent catalog. SchemaName types.String `tfsdk:"schema_name" tf:"optional"` // Function security type. @@ -1058,19 +1058,19 @@ type GenerateTemporaryTableCredentialRequest struct { type GenerateTemporaryTableCredentialResponse struct { // AWS temporary credentials for API authentication. Read more at // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. - AwsTempCredentials *AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional"` + AwsTempCredentials []AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional"` // Azure temporary credentials for API authentication. Read more at // https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas - AzureUserDelegationSas *AzureUserDelegationSas `tfsdk:"azure_user_delegation_sas" tf:"optional"` + AzureUserDelegationSas []AzureUserDelegationSas `tfsdk:"azure_user_delegation_sas" tf:"optional"` // Server time when the credential will expire, in epoch milliseconds. The // API client is advised to cache the credential given this expiration time. ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` // GCP temporary credentials for API authentication. Read more at // https://developers.google.com/identity/protocols/oauth2/service-account - GcpOauthToken *GcpOauthToken `tfsdk:"gcp_oauth_token" tf:"optional"` + GcpOauthToken []GcpOauthToken `tfsdk:"gcp_oauth_token" tf:"optional"` // R2 temporary credentials for API authentication. Read more at // https://developers.cloudflare.com/r2/api/s3/tokens/. - R2TempCredentials *R2Credentials `tfsdk:"r2_temp_credentials" tf:"optional"` + R2TempCredentials []R2Credentials `tfsdk:"r2_temp_credentials" tf:"optional"` // The URL of the storage path accessible by the temporary credential. Url types.String `tfsdk:"url" tf:"optional"` } @@ -1276,7 +1276,7 @@ type GetQuotaRequest struct { type GetQuotaResponse struct { // The returned QuotaInfo. - QuotaInfo *QuotaInfo `tfsdk:"quota_info" tf:"optional"` + QuotaInfo []QuotaInfo `tfsdk:"quota_info" tf:"optional"` } // Get refresh @@ -1811,7 +1811,7 @@ type ModelVersionInfo struct { // parent schema ModelName types.String `tfsdk:"model_name" tf:"optional"` // Model version dependencies, for feature-store packaged models - ModelVersionDependencies *DependencyList `tfsdk:"model_version_dependencies" tf:"optional"` + ModelVersionDependencies []DependencyList `tfsdk:"model_version_dependencies" tf:"optional"` // MLflow run ID used when creating the model version, if ``source`` was // generated by an experiment run stored in an MLflow tracking server RunId types.String `tfsdk:"run_id" tf:"optional"` @@ -1910,26 +1910,26 @@ type MonitorInfo struct { // if the monitor is in PENDING state. DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` // The data classification config for the monitor. - DataClassificationConfig *MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional"` + DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional"` // The full name of the drift metrics table. Format: // __catalog_name__.__schema_name__.__table_name__. DriftMetricsTableName types.String `tfsdk:"drift_metrics_table_name" tf:""` // Configuration for monitoring inference logs. - InferenceLog *MonitorInferenceLog `tfsdk:"inference_log" tf:"optional"` + InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional"` // The latest failure message of the monitor (if any). LatestMonitorFailureMsg types.String `tfsdk:"latest_monitor_failure_msg" tf:"optional"` // The version of the monitor config (e.g. 1,2,3). If negative, the monitor // may be corrupted. MonitorVersion types.String `tfsdk:"monitor_version" tf:""` // The notification settings for the monitor. - Notifications *MonitorNotifications `tfsdk:"notifications" tf:"optional"` + Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional"` // Schema where output metric tables are created. OutputSchemaName types.String `tfsdk:"output_schema_name" tf:"optional"` // The full name of the profile metrics table. Format: // __catalog_name__.__schema_name__.__table_name__. ProfileMetricsTableName types.String `tfsdk:"profile_metrics_table_name" tf:""` // The schedule for automatically updating and refreshing metric tables. - Schedule *MonitorCronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional"` // List of column expressions to slice data with for targeted analysis. The // data is grouped by each expression independently, resulting in a separate // slice for each predicate and its complements. For high-cardinality @@ -1937,14 +1937,14 @@ type MonitorInfo struct { // slices. SlicingExprs []types.String `tfsdk:"slicing_exprs" tf:"optional"` // Configuration for monitoring snapshot tables. - Snapshot *MonitorSnapshot `tfsdk:"snapshot" tf:"optional"` + Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional"` // The status of the monitor. Status types.String `tfsdk:"status" tf:""` // The full name of the table to monitor. Format: // __catalog_name__.__schema_name__.__table_name__. TableName types.String `tfsdk:"table_name" tf:""` // Configuration for monitoring time series tables. - TimeSeries *MonitorTimeSeries `tfsdk:"time_series" tf:"optional"` + TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional"` } type MonitorMetric struct { @@ -1976,10 +1976,10 @@ type MonitorMetric struct { type MonitorNotifications struct { // Who to send notifications to on monitor failure. - OnFailure *MonitorDestination `tfsdk:"on_failure" tf:"optional"` + OnFailure []MonitorDestination `tfsdk:"on_failure" tf:"optional"` // Who to send notifications to when new data classification tags are // detected. - OnNewClassificationTagDetected *MonitorDestination `tfsdk:"on_new_classification_tag_detected" tf:"optional"` + OnNewClassificationTagDetected []MonitorDestination `tfsdk:"on_new_classification_tag_detected" tf:"optional"` } type MonitorRefreshInfo struct { @@ -2033,9 +2033,9 @@ type OnlineTable struct { // Full three-part (catalog, schema, table) name of the table. Name types.String `tfsdk:"name" tf:"optional"` // Specification of the online table. - Spec *OnlineTableSpec `tfsdk:"spec" tf:"optional"` + Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional"` // Online Table status - Status *OnlineTableStatus `tfsdk:"status" tf:"optional"` + Status []OnlineTableStatus `tfsdk:"status" tf:"optional"` // Data serving REST API URL for this table TableServingUrl types.String `tfsdk:"table_serving_url" tf:"optional"` } @@ -2056,10 +2056,10 @@ type OnlineTableSpec struct { // Primary Key columns to be used for data insert/update in the destination. PrimaryKeyColumns []types.String `tfsdk:"primary_key_columns" tf:"optional"` // Pipeline runs continuously after generating the initial data. - RunContinuously *OnlineTableSpecContinuousSchedulingPolicy `tfsdk:"run_continuously" tf:"optional"` + RunContinuously []OnlineTableSpecContinuousSchedulingPolicy `tfsdk:"run_continuously" tf:"optional"` // Pipeline stops after generating the initial data and can be triggered // later (manually, through a cron job or through data triggers) - RunTriggered *OnlineTableSpecTriggeredSchedulingPolicy `tfsdk:"run_triggered" tf:"optional"` + RunTriggered []OnlineTableSpecTriggeredSchedulingPolicy `tfsdk:"run_triggered" tf:"optional"` // Three-part (catalog, schema, table) name of the source Delta table. SourceTableFullName types.String `tfsdk:"source_table_full_name" tf:"optional"` // Time series key to deduplicate (tie-break) rows with the same primary @@ -2077,21 +2077,21 @@ type OnlineTableSpecTriggeredSchedulingPolicy struct { type OnlineTableStatus struct { // Detailed status of an online table. Shown if the online table is in the // ONLINE_CONTINUOUS_UPDATE or the ONLINE_UPDATING_PIPELINE_RESOURCES state. - ContinuousUpdateStatus *ContinuousUpdateStatus `tfsdk:"continuous_update_status" tf:"optional"` + ContinuousUpdateStatus []ContinuousUpdateStatus `tfsdk:"continuous_update_status" tf:"optional"` // The state of the online table. DetailedState types.String `tfsdk:"detailed_state" tf:"optional"` // Detailed status of an online table. Shown if the online table is in the // OFFLINE_FAILED or the ONLINE_PIPELINE_FAILED state. - FailedStatus *FailedStatus `tfsdk:"failed_status" tf:"optional"` + FailedStatus []FailedStatus `tfsdk:"failed_status" tf:"optional"` // A text description of the current state of the online table. Message types.String `tfsdk:"message" tf:"optional"` // Detailed status of an online table. Shown if the online table is in the // PROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT // state. - ProvisioningStatus *ProvisioningStatus `tfsdk:"provisioning_status" tf:"optional"` + ProvisioningStatus []ProvisioningStatus `tfsdk:"provisioning_status" tf:"optional"` // Detailed status of an online table. Shown if the online table is in the // ONLINE_TRIGGERED_UPDATE or the ONLINE_NO_PENDING_UPDATE state. - TriggeredUpdateStatus *TriggeredUpdateStatus `tfsdk:"triggered_update_status" tf:"optional"` + TriggeredUpdateStatus []TriggeredUpdateStatus `tfsdk:"triggered_update_status" tf:"optional"` } type PermissionsChange struct { @@ -2148,7 +2148,7 @@ type ProvisioningInfo struct { type ProvisioningStatus struct { // Details about initial data synchronization. Only populated when in the // PROVISIONING_INITIAL_SNAPSHOT state. - InitialPipelineSyncProgress *PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional"` + InitialPipelineSyncProgress []PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional"` } type QuotaInfo struct { @@ -2268,7 +2268,7 @@ type SchemaInfo struct { // Username of schema creator. CreatedBy types.String `tfsdk:"created_by" tf:"optional"` - EffectivePredictiveOptimizationFlag *EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional"` + EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional"` // Whether predictive optimization should be enabled for this object and // objects under it. EnablePredictiveOptimization types.String `tfsdk:"enable_predictive_optimization" tf:"optional"` @@ -2321,13 +2321,13 @@ type SseEncryptionDetails struct { type StorageCredentialInfo struct { // The AWS IAM role configuration. - AwsIamRole *AwsIamRoleResponse `tfsdk:"aws_iam_role" tf:"optional"` + AwsIamRole []AwsIamRoleResponse `tfsdk:"aws_iam_role" tf:"optional"` // The Azure managed identity configuration. - AzureManagedIdentity *AzureManagedIdentityResponse `tfsdk:"azure_managed_identity" tf:"optional"` + AzureManagedIdentity []AzureManagedIdentityResponse `tfsdk:"azure_managed_identity" tf:"optional"` // The Azure service principal configuration. - AzureServicePrincipal *AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` + AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` // The Cloudflare API token configuration. - CloudflareApiToken *CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` + CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` // Comment associated with the credential. Comment types.String `tfsdk:"comment" tf:"optional"` // Time at which this Credential was created, in epoch milliseconds. @@ -2335,7 +2335,7 @@ type StorageCredentialInfo struct { // Username of credential creator. CreatedBy types.String `tfsdk:"created_by" tf:"optional"` // The Databricks managed GCP service account configuration. - DatabricksGcpServiceAccount *DatabricksGcpServiceAccountResponse `tfsdk:"databricks_gcp_service_account" tf:"optional"` + DatabricksGcpServiceAccount []DatabricksGcpServiceAccountResponse `tfsdk:"databricks_gcp_service_account" tf:"optional"` // The unique identifier of the credential. Id types.String `tfsdk:"id" tf:"optional"` // Whether the current securable is accessible from all workspaces or a @@ -2370,11 +2370,11 @@ type SystemSchemaInfo struct { // __primary_key_constraint__, __foreign_key_constraint__, // __named_table_constraint__. type TableConstraint struct { - ForeignKeyConstraint *ForeignKeyConstraint `tfsdk:"foreign_key_constraint" tf:"optional"` + ForeignKeyConstraint []ForeignKeyConstraint `tfsdk:"foreign_key_constraint" tf:"optional"` - NamedTableConstraint *NamedTableConstraint `tfsdk:"named_table_constraint" tf:"optional"` + NamedTableConstraint []NamedTableConstraint `tfsdk:"named_table_constraint" tf:"optional"` - PrimaryKeyConstraint *PrimaryKeyConstraint `tfsdk:"primary_key_constraint" tf:"optional"` + PrimaryKeyConstraint []PrimaryKeyConstraint `tfsdk:"primary_key_constraint" tf:"optional"` } // A table that is dependent on a SQL object. @@ -2414,14 +2414,14 @@ type TableInfo struct { // omitted if table is not deleted. DeletedAt types.Int64 `tfsdk:"deleted_at" tf:"optional"` // Information pertaining to current state of the delta table. - DeltaRuntimePropertiesKvpairs *DeltaRuntimePropertiesKvPairs `tfsdk:"delta_runtime_properties_kvpairs" tf:"optional"` + DeltaRuntimePropertiesKvpairs []DeltaRuntimePropertiesKvPairs `tfsdk:"delta_runtime_properties_kvpairs" tf:"optional"` - EffectivePredictiveOptimizationFlag *EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional"` + EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional"` // Whether predictive optimization should be enabled for this object and // objects under it. EnablePredictiveOptimization types.String `tfsdk:"enable_predictive_optimization" tf:"optional"` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails *EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` // Full name of table, in form of // __catalog_name__.__schema_name__.__table_name__ FullName types.String `tfsdk:"full_name" tf:"optional"` @@ -2437,7 +2437,7 @@ type TableInfo struct { // A map of key-value properties attached to the securable. Properties map[string]types.String `tfsdk:"properties" tf:"optional"` - RowFilter *TableRowFilter `tfsdk:"row_filter" tf:"optional"` + RowFilter []TableRowFilter `tfsdk:"row_filter" tf:"optional"` // Name of parent schema relative to its parent catalog. SchemaName types.String `tfsdk:"schema_name" tf:"optional"` // List of schemes whose objects can be referenced without qualification. @@ -2466,7 +2466,7 @@ type TableInfo struct { // provided; - when DependencyList is an empty list, the dependency is // provided but is empty; - when DependencyList is not an empty list, // dependencies are provided and recorded. - ViewDependencies *DependencyList `tfsdk:"view_dependencies" tf:"optional"` + ViewDependencies []DependencyList `tfsdk:"view_dependencies" tf:"optional"` } type TableRowFilter struct { @@ -2496,7 +2496,7 @@ type TriggeredUpdateStatus struct { // table to the online table. Timestamp types.String `tfsdk:"timestamp" tf:"optional"` // Progress of the active data synchronization pipeline. - TriggeredUpdateProgress *PipelineProgress `tfsdk:"triggered_update_progress" tf:"optional"` + TriggeredUpdateProgress []PipelineProgress `tfsdk:"triggered_update_progress" tf:"optional"` } // Delete an assignment @@ -2551,7 +2551,7 @@ type UpdateExternalLocation struct { // Name of the storage credential used with this location. CredentialName types.String `tfsdk:"credential_name" tf:"optional"` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails *EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` // Indicates whether fallback mode is enabled for this external location. // When fallback mode is enabled, the access to the location falls back to // cluster credentials if UC credentials are not sufficient. @@ -2639,15 +2639,15 @@ type UpdateMonitor struct { // if the monitor is in PENDING state. DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` // The data classification config for the monitor. - DataClassificationConfig *MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional"` + DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional"` // Configuration for monitoring inference logs. - InferenceLog *MonitorInferenceLog `tfsdk:"inference_log" tf:"optional"` + InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional"` // The notification settings for the monitor. - Notifications *MonitorNotifications `tfsdk:"notifications" tf:"optional"` + Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional"` // Schema where output metric tables are created. OutputSchemaName types.String `tfsdk:"output_schema_name" tf:""` // The schedule for automatically updating and refreshing metric tables. - Schedule *MonitorCronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional"` // List of column expressions to slice data with for targeted analysis. The // data is grouped by each expression independently, resulting in a separate // slice for each predicate and its complements. For high-cardinality @@ -2655,11 +2655,11 @@ type UpdateMonitor struct { // slices. SlicingExprs []types.String `tfsdk:"slicing_exprs" tf:"optional"` // Configuration for monitoring snapshot tables. - Snapshot *MonitorSnapshot `tfsdk:"snapshot" tf:"optional"` + Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional"` // Full name of the table. TableName types.String `tfsdk:"-"` // Configuration for monitoring time series tables. - TimeSeries *MonitorTimeSeries `tfsdk:"time_series" tf:"optional"` + TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional"` } type UpdatePermissions struct { @@ -2703,17 +2703,17 @@ type UpdateSchema struct { type UpdateStorageCredential struct { // The AWS IAM role configuration. - AwsIamRole *AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional"` + AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional"` // The Azure managed identity configuration. - AzureManagedIdentity *AzureManagedIdentityResponse `tfsdk:"azure_managed_identity" tf:"optional"` + AzureManagedIdentity []AzureManagedIdentityResponse `tfsdk:"azure_managed_identity" tf:"optional"` // The Azure service principal configuration. - AzureServicePrincipal *AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` + AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` // The Cloudflare API token configuration. - CloudflareApiToken *CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` + CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` // Comment associated with the credential. Comment types.String `tfsdk:"comment" tf:"optional"` // The Databricks managed GCP service account configuration. - DatabricksGcpServiceAccount *DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional"` + DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional"` // Force update even if there are dependent external locations or external // tables. Force types.Bool `tfsdk:"force" tf:"optional"` @@ -2774,15 +2774,15 @@ type UpdateWorkspaceBindingsParameters struct { type ValidateStorageCredential struct { // The AWS IAM role configuration. - AwsIamRole *AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional"` + AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional"` // The Azure managed identity configuration. - AzureManagedIdentity *AzureManagedIdentityRequest `tfsdk:"azure_managed_identity" tf:"optional"` + AzureManagedIdentity []AzureManagedIdentityRequest `tfsdk:"azure_managed_identity" tf:"optional"` // The Azure service principal configuration. - AzureServicePrincipal *AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` + AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` // The Cloudflare API token configuration. - CloudflareApiToken *CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` + CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` // The Databricks created GCP service account configuration. - DatabricksGcpServiceAccount *DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional"` + DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional"` // The name of an existing external location to validate. ExternalLocationName types.String `tfsdk:"external_location_name" tf:"optional"` // Whether the storage credential is only usable for read operations. @@ -2825,7 +2825,7 @@ type VolumeInfo struct { // The identifier of the user who created the volume CreatedBy types.String `tfsdk:"created_by" tf:"optional"` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails *EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` // The three-level (fully qualified) name of the volume FullName types.String `tfsdk:"full_name" tf:"optional"` // The unique identifier of the metastore diff --git a/internal/service/compute_tf/model.go b/internal/service/compute_tf/model.go index 223ba1cb66..91eef81ce0 100755 --- a/internal/service/compute_tf/model.go +++ b/internal/service/compute_tf/model.go @@ -166,7 +166,7 @@ type AzureAttributes struct { // mutated over the lifetime of a cluster. FirstOnDemand types.Int64 `tfsdk:"first_on_demand" tf:"optional"` // Defines values necessary to configure and run Azure Log Analytics agent - LogAnalyticsInfo *LogAnalyticsInfo `tfsdk:"log_analytics_info" tf:"optional"` + LogAnalyticsInfo []LogAnalyticsInfo `tfsdk:"log_analytics_info" tf:"optional"` // The max bid price to be used for Azure spot instances. The Max price for // the bid cannot be higher than the on-demand price of the instance. If not // specified, the default value is -1, which specifies that the instance @@ -245,17 +245,17 @@ type ClusterAttributes struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes *AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes *AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only // one destination can be specified for one cluster. If the conf is given, // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf *ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -292,7 +292,7 @@ type ClusterAttributes struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage *DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -310,7 +310,7 @@ type ClusterAttributes struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes *GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -366,7 +366,7 @@ type ClusterAttributes struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType *WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` } type ClusterCompliance struct { @@ -386,7 +386,7 @@ type ClusterDetails struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale *AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -395,10 +395,10 @@ type ClusterDetails struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes *AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes *AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` // Number of CPU cores available for this cluster. Note that this can be // fractional, e.g. 7.5 cores, since certain node types are configured to // share cores between Spark nodes on the same instance. @@ -412,9 +412,9 @@ type ClusterDetails struct { // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf *ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` // Cluster log delivery status. - ClusterLogStatus *LogSyncStatus `tfsdk:"cluster_log_status" tf:"optional"` + ClusterLogStatus []LogSyncStatus `tfsdk:"cluster_log_status" tf:"optional"` // Total amount of cluster memory, in megabytes ClusterMemoryMb types.Int64 `tfsdk:"cluster_memory_mb" tf:"optional"` // Cluster name requested by the user. This doesn't have to be unique. If @@ -473,11 +473,11 @@ type ClusterDetails struct { // - Name: DefaultTags map[string]types.String `tfsdk:"default_tags" tf:"optional"` - DockerImage *DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` // Node on which the Spark driver resides. The driver node contains the // Spark master and the Databricks application that manages the per-notebook // Spark REPLs. - Driver *SparkNode `tfsdk:"driver" tf:"optional"` + Driver []SparkNode `tfsdk:"driver" tf:"optional"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -497,7 +497,7 @@ type ClusterDetails struct { Executors []SparkNode `tfsdk:"executors" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes *GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -575,7 +575,7 @@ type ClusterDetails struct { // or edit this cluster. The contents of `spec` can be used in the body of a // create cluster request. This field might not be populated for older // clusters. Note: not included in the response of the ListClusters API. - Spec *ClusterSpec `tfsdk:"spec" tf:"optional"` + Spec []ClusterSpec `tfsdk:"spec" tf:"optional"` // SSH public key contents that will be added to each Spark node in this // cluster. The corresponding private keys can be used to login with the // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. @@ -593,18 +593,18 @@ type ClusterDetails struct { TerminatedTime types.Int64 `tfsdk:"terminated_time" tf:"optional"` // Information about why the cluster was terminated. This field only appears // when the cluster is in a `TERMINATING` or `TERMINATED` state. - TerminationReason *TerminationReason `tfsdk:"termination_reason" tf:"optional"` + TerminationReason []TerminationReason `tfsdk:"termination_reason" tf:"optional"` - WorkloadType *WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` } type ClusterEvent struct { // ClusterId types.String `tfsdk:"cluster_id" tf:""` // - DataPlaneEventDetails *DataPlaneEventDetails `tfsdk:"data_plane_event_details" tf:"optional"` + DataPlaneEventDetails []DataPlaneEventDetails `tfsdk:"data_plane_event_details" tf:"optional"` // - Details *EventDetails `tfsdk:"details" tf:"optional"` + Details []EventDetails `tfsdk:"details" tf:"optional"` // The timestamp when the event occurred, stored as the number of // milliseconds since the Unix epoch. If not provided, this will be assigned // by the Timeline service. @@ -623,13 +623,13 @@ type ClusterLibraryStatuses struct { type ClusterLogConf struct { // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : // "dbfs:/home/cluster_log" } }` - Dbfs *DbfsStorageInfo `tfsdk:"dbfs" tf:"optional"` + Dbfs []DbfsStorageInfo `tfsdk:"dbfs" tf:"optional"` // destination and either the region or endpoint need to be provided. e.g. // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : // "us-west-2" } }` Cluster iam role is used to access s3, please make sure // the cluster iam role in `instance_profile_arn` has permission to write // data to the s3 destination. - S3 *S3StorageInfo `tfsdk:"s3" tf:"optional"` + S3 []S3StorageInfo `tfsdk:"s3" tf:"optional"` } type ClusterPermission struct { @@ -733,7 +733,7 @@ type ClusterSize struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale *AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` // Number of worker nodes that this cluster should have. A cluster has one // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 // Spark nodes. @@ -755,7 +755,7 @@ type ClusterSpec struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale *AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -764,17 +764,17 @@ type ClusterSpec struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes *AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes *AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only // one destination can be specified for one cluster. If the conf is given, // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf *ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -811,7 +811,7 @@ type ClusterSpec struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage *DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -829,7 +829,7 @@ type ClusterSpec struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes *GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -896,7 +896,7 @@ type ClusterSpec struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType *WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` } // Get status @@ -928,7 +928,7 @@ type CommandStatusRequest struct { type CommandStatusResponse struct { Id types.String `tfsdk:"id" tf:"optional"` - Results *Results `tfsdk:"results" tf:"optional"` + Results []Results `tfsdk:"results" tf:"optional"` Status types.String `tfsdk:"status" tf:"optional"` } @@ -954,7 +954,7 @@ type CreateCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale *AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -963,20 +963,20 @@ type CreateCluster struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes *AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes *AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` // When specified, this clones libraries from a source cluster during the // creation of a new cluster. - CloneFrom *CloneCluster `tfsdk:"clone_from" tf:"optional"` + CloneFrom []CloneCluster `tfsdk:"clone_from" tf:"optional"` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only // one destination can be specified for one cluster. If the conf is given, // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf *ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -1013,7 +1013,7 @@ type CreateCluster struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage *DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -1031,7 +1031,7 @@ type CreateCluster struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes *GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -1098,7 +1098,7 @@ type CreateCluster struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType *WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` } type CreateClusterResponse struct { @@ -1115,10 +1115,10 @@ type CreateContext struct { type CreateInstancePool struct { // Attributes related to instance pools running on Amazon Web Services. If // not specified at pool creation, a set of default values will be used. - AwsAttributes *InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional"` // Attributes related to instance pools running on Azure. If not specified // at pool creation, a set of default values will be used. - AzureAttributes *InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional"` // Additional tags for pool resources. Databricks will tag all pool // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: @@ -1127,7 +1127,7 @@ type CreateInstancePool struct { CustomTags map[string]types.String `tfsdk:"custom_tags" tf:"optional"` // Defines the specification of the disks that will be attached to all spark // containers. - DiskSpec *DiskSpec `tfsdk:"disk_spec" tf:"optional"` + DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional"` // Autoscaling Local Storage: when enabled, this instances in this pool will // dynamically acquire additional disk space when its Spark workers are // running low on disk space. In AWS, this feature requires specific AWS @@ -1136,7 +1136,7 @@ type CreateInstancePool struct { EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk" tf:"optional"` // Attributes related to instance pools running on Google Cloud Platform. If // not specified at pool creation, a set of default values will be used. - GcpAttributes *InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` // Automatically terminates the extra instances in the pool cache after they // are inactive for this time in minutes if min_idle_instances requirement // is already met. If not set, the extra pool instances will be @@ -1314,7 +1314,7 @@ type DiskSpec struct { DiskThroughput types.Int64 `tfsdk:"disk_throughput" tf:"optional"` // The type of disks that will be launched with this cluster. - DiskType *DiskType `tfsdk:"disk_type" tf:"optional"` + DiskType []DiskType `tfsdk:"disk_type" tf:"optional"` } type DiskType struct { @@ -1331,7 +1331,7 @@ type DockerBasicAuth struct { } type DockerImage struct { - BasicAuth *DockerBasicAuth `tfsdk:"basic_auth" tf:"optional"` + BasicAuth []DockerBasicAuth `tfsdk:"basic_auth" tf:"optional"` // URL of the docker image. Url types.String `tfsdk:"url" tf:"optional"` } @@ -1344,7 +1344,7 @@ type EditCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale *AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -1353,10 +1353,10 @@ type EditCluster struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes *AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes *AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` // ID of the cluser ClusterId types.String `tfsdk:"cluster_id" tf:""` // The configuration for delivering spark logs to a long-term storage @@ -1365,7 +1365,7 @@ type EditCluster struct { // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf *ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -1402,7 +1402,7 @@ type EditCluster struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage *DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -1420,7 +1420,7 @@ type EditCluster struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes *GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -1487,7 +1487,7 @@ type EditCluster struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType *WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` } type EditClusterResponse struct { @@ -1612,11 +1612,11 @@ type Environment struct { type EventDetails struct { // * For created clusters, the attributes of the cluster. * For edited // clusters, the new attributes of the cluster. - Attributes *ClusterAttributes `tfsdk:"attributes" tf:"optional"` + Attributes []ClusterAttributes `tfsdk:"attributes" tf:"optional"` // The cause of a change in target size. Cause types.String `tfsdk:"cause" tf:"optional"` // The actual cluster size that was set in the cluster creation or edit. - ClusterSize *ClusterSize `tfsdk:"cluster_size" tf:"optional"` + ClusterSize []ClusterSize `tfsdk:"cluster_size" tf:"optional"` // The current number of vCPUs in the cluster. CurrentNumVcpus types.Int64 `tfsdk:"current_num_vcpus" tf:"optional"` // The current number of nodes in the cluster. @@ -1634,7 +1634,7 @@ type EventDetails struct { FreeSpace types.Int64 `tfsdk:"free_space" tf:"optional"` // List of global and cluster init scripts associated with this cluster // event. - InitScripts *InitScriptEventDetails `tfsdk:"init_scripts" tf:"optional"` + InitScripts []InitScriptEventDetails `tfsdk:"init_scripts" tf:"optional"` // Instance Id where the event originated from InstanceId types.String `tfsdk:"instance_id" tf:"optional"` // Unique identifier of the specific job run associated with this cluster @@ -1642,15 +1642,15 @@ type EventDetails struct { // cluster name JobRunName types.String `tfsdk:"job_run_name" tf:"optional"` // The cluster attributes before a cluster was edited. - PreviousAttributes *ClusterAttributes `tfsdk:"previous_attributes" tf:"optional"` + PreviousAttributes []ClusterAttributes `tfsdk:"previous_attributes" tf:"optional"` // The size of the cluster before an edit or resize. - PreviousClusterSize *ClusterSize `tfsdk:"previous_cluster_size" tf:"optional"` + PreviousClusterSize []ClusterSize `tfsdk:"previous_cluster_size" tf:"optional"` // Previous disk size in bytes PreviousDiskSize types.Int64 `tfsdk:"previous_disk_size" tf:"optional"` // A termination reason: * On a TERMINATED event, this is the reason of the // termination. * On a RESIZE_COMPLETE event, this indicates the reason that // we failed to acquire some nodes. - Reason *TerminationReason `tfsdk:"reason" tf:"optional"` + Reason []TerminationReason `tfsdk:"reason" tf:"optional"` // The targeted number of vCPUs in the cluster. TargetNumVcpus types.Int64 `tfsdk:"target_num_vcpus" tf:"optional"` // The targeted number of nodes in the cluster. @@ -1791,7 +1791,7 @@ type GetEventsResponse struct { Events []ClusterEvent `tfsdk:"events" tf:"optional"` // The parameters required to retrieve the next page of events. Omitted if // there are no more events to read. - NextPage *GetEvents `tfsdk:"next_page" tf:"optional"` + NextPage []GetEvents `tfsdk:"next_page" tf:"optional"` // The total number of events filtered by the start_time, end_time, and // event_types. TotalCount types.Int64 `tfsdk:"total_count" tf:"optional"` @@ -1806,10 +1806,10 @@ type GetGlobalInitScriptRequest struct { type GetInstancePool struct { // Attributes related to instance pools running on Amazon Web Services. If // not specified at pool creation, a set of default values will be used. - AwsAttributes *InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional"` // Attributes related to instance pools running on Azure. If not specified // at pool creation, a set of default values will be used. - AzureAttributes *InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional"` // Additional tags for pool resources. Databricks will tag all pool // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: @@ -1829,7 +1829,7 @@ type GetInstancePool struct { DefaultTags map[string]types.String `tfsdk:"default_tags" tf:"optional"` // Defines the specification of the disks that will be attached to all spark // containers. - DiskSpec *DiskSpec `tfsdk:"disk_spec" tf:"optional"` + DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional"` // Autoscaling Local Storage: when enabled, this instances in this pool will // dynamically acquire additional disk space when its Spark workers are // running low on disk space. In AWS, this feature requires specific AWS @@ -1838,7 +1838,7 @@ type GetInstancePool struct { EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk" tf:"optional"` // Attributes related to instance pools running on Google Cloud Platform. If // not specified at pool creation, a set of default values will be used. - GcpAttributes *InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` // Automatically terminates the extra instances in the pool cache after they // are inactive for this time in minutes if min_idle_instances requirement // is already met. If not set, the extra pool instances will be @@ -1874,9 +1874,9 @@ type GetInstancePool struct { // Current state of the instance pool. State types.String `tfsdk:"state" tf:"optional"` // Usage statistics about the instance pool. - Stats *InstancePoolStats `tfsdk:"stats" tf:"optional"` + Stats []InstancePoolStats `tfsdk:"stats" tf:"optional"` // Status of failed pending instances in the pool. - Status *InstancePoolStatus `tfsdk:"status" tf:"optional"` + Status []InstancePoolStatus `tfsdk:"status" tf:"optional"` } // Get instance pool permission levels @@ -2029,35 +2029,35 @@ type InitScriptInfo struct { // destination needs to be provided. e.g. `{ "abfss" : { "destination" : // "abfss://@.dfs.core.windows.net/" // } } - Abfss *Adlsgen2Info `tfsdk:"abfss" tf:"optional"` + Abfss []Adlsgen2Info `tfsdk:"abfss" tf:"optional"` // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : // "dbfs:/home/cluster_log" } }` - Dbfs *DbfsStorageInfo `tfsdk:"dbfs" tf:"optional"` + Dbfs []DbfsStorageInfo `tfsdk:"dbfs" tf:"optional"` // destination needs to be provided. e.g. `{ "file" : { "destination" : // "file:/my/local/file.sh" } }` - File *LocalFileInfo `tfsdk:"file" tf:"optional"` + File []LocalFileInfo `tfsdk:"file" tf:"optional"` // destination needs to be provided. e.g. `{ "gcs": { "destination": // "gs://my-bucket/file.sh" } }` - Gcs *GcsStorageInfo `tfsdk:"gcs" tf:"optional"` + Gcs []GcsStorageInfo `tfsdk:"gcs" tf:"optional"` // destination and either the region or endpoint need to be provided. e.g. // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : // "us-west-2" } }` Cluster iam role is used to access s3, please make sure // the cluster iam role in `instance_profile_arn` has permission to write // data to the s3 destination. - S3 *S3StorageInfo `tfsdk:"s3" tf:"optional"` + S3 []S3StorageInfo `tfsdk:"s3" tf:"optional"` // destination needs to be provided. e.g. `{ "volumes" : { "destination" : // "/Volumes/my-init.sh" } }` - Volumes *VolumesStorageInfo `tfsdk:"volumes" tf:"optional"` + Volumes []VolumesStorageInfo `tfsdk:"volumes" tf:"optional"` // destination needs to be provided. e.g. `{ "workspace" : { "destination" : // "/Users/user1@databricks.com/my-init.sh" } }` - Workspace *WorkspaceStorageInfo `tfsdk:"workspace" tf:"optional"` + Workspace []WorkspaceStorageInfo `tfsdk:"workspace" tf:"optional"` } type InitScriptInfoAndExecutionDetails struct { // Details about the script - ExecutionDetails *InitScriptExecutionDetails `tfsdk:"execution_details" tf:"optional"` + ExecutionDetails []InitScriptExecutionDetails `tfsdk:"execution_details" tf:"optional"` // The script - Script *InitScriptInfo `tfsdk:"script" tf:"optional"` + Script []InitScriptInfo `tfsdk:"script" tf:"optional"` } type InstallLibraries struct { @@ -2097,10 +2097,10 @@ type InstancePoolAccessControlResponse struct { type InstancePoolAndStats struct { // Attributes related to instance pools running on Amazon Web Services. If // not specified at pool creation, a set of default values will be used. - AwsAttributes *InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional"` // Attributes related to instance pools running on Azure. If not specified // at pool creation, a set of default values will be used. - AzureAttributes *InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional"` // Additional tags for pool resources. Databricks will tag all pool // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: @@ -2120,7 +2120,7 @@ type InstancePoolAndStats struct { DefaultTags map[string]types.String `tfsdk:"default_tags" tf:"optional"` // Defines the specification of the disks that will be attached to all spark // containers. - DiskSpec *DiskSpec `tfsdk:"disk_spec" tf:"optional"` + DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional"` // Autoscaling Local Storage: when enabled, this instances in this pool will // dynamically acquire additional disk space when its Spark workers are // running low on disk space. In AWS, this feature requires specific AWS @@ -2129,7 +2129,7 @@ type InstancePoolAndStats struct { EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk" tf:"optional"` // Attributes related to instance pools running on Google Cloud Platform. If // not specified at pool creation, a set of default values will be used. - GcpAttributes *InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` // Automatically terminates the extra instances in the pool cache after they // are inactive for this time in minutes if min_idle_instances requirement // is already met. If not set, the extra pool instances will be @@ -2165,9 +2165,9 @@ type InstancePoolAndStats struct { // Current state of the instance pool. State types.String `tfsdk:"state" tf:"optional"` // Usage statistics about the instance pool. - Stats *InstancePoolStats `tfsdk:"stats" tf:"optional"` + Stats []InstancePoolStats `tfsdk:"stats" tf:"optional"` // Status of failed pending instances in the pool. - Status *InstancePoolStatus `tfsdk:"status" tf:"optional"` + Status []InstancePoolStatus `tfsdk:"status" tf:"optional"` } type InstancePoolAwsAttributes struct { @@ -2314,7 +2314,7 @@ type InstanceProfile struct { type Library struct { // Specification of a CRAN library to be installed as part of the library - Cran *RCranLibrary `tfsdk:"cran" tf:"optional"` + Cran []RCranLibrary `tfsdk:"cran" tf:"optional"` // Deprecated. URI of the egg library to install. Installing Python egg // files is deprecated and is not supported in Databricks Runtime 14.0 and // above. @@ -2329,10 +2329,10 @@ type Library struct { Jar types.String `tfsdk:"jar" tf:"optional"` // Specification of a maven library to be installed. For example: `{ // "coordinates": "org.jsoup:jsoup:1.7.2" }` - Maven *MavenLibrary `tfsdk:"maven" tf:"optional"` + Maven []MavenLibrary `tfsdk:"maven" tf:"optional"` // Specification of a PyPi library to be installed. For example: `{ // "package": "simplejson" }` - Pypi *PythonPyPiLibrary `tfsdk:"pypi" tf:"optional"` + Pypi []PythonPyPiLibrary `tfsdk:"pypi" tf:"optional"` // URI of the requirements.txt file to install. Only Workspace paths and // Unity Catalog Volumes paths are supported. For example: `{ // "requirements": "/Workspace/path/to/requirements.txt" }` or `{ @@ -2354,7 +2354,7 @@ type LibraryFullStatus struct { // libraries UI. IsLibraryForAllClusters types.Bool `tfsdk:"is_library_for_all_clusters" tf:"optional"` // Unique identifier for the library. - Library *Library `tfsdk:"library" tf:"optional"` + Library []Library `tfsdk:"library" tf:"optional"` // All the info and warning messages that have occurred so far for this // library. Messages []types.String `tfsdk:"messages" tf:"optional"` @@ -2425,7 +2425,7 @@ type ListClustersFilterBy struct { // List clusters type ListClustersRequest struct { // Filters to apply to the list of clusters. - FilterBy *ListClustersFilterBy `tfsdk:"-"` + FilterBy []ListClustersFilterBy `tfsdk:"-"` // Use this field to specify the maximum number of results to be returned by // the server. The server may further constrain the maximum number of // results returned in a single page. @@ -2434,7 +2434,7 @@ type ListClustersRequest struct { // to list the next or previous page of clusters respectively. PageToken types.String `tfsdk:"-"` // Sort the list of clusters by a specific criteria. - SortBy *ListClustersSortBy `tfsdk:"-"` + SortBy []ListClustersSortBy `tfsdk:"-"` } type ListClustersResponse struct { @@ -2568,9 +2568,9 @@ type NodeType struct { // Memory (in MB) available for this node type. MemoryMb types.Int64 `tfsdk:"memory_mb" tf:""` - NodeInfo *CloudProviderNodeInfo `tfsdk:"node_info" tf:"optional"` + NodeInfo []CloudProviderNodeInfo `tfsdk:"node_info" tf:"optional"` - NodeInstanceType *NodeInstanceType `tfsdk:"node_instance_type" tf:"optional"` + NodeInstanceType []NodeInstanceType `tfsdk:"node_instance_type" tf:"optional"` // Unique identifier for this node type. NodeTypeId types.String `tfsdk:"node_type_id" tf:""` // Number of CPU cores available for this node type. Note that this can be @@ -2709,7 +2709,7 @@ type ResizeCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale *AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` // The cluster to be resized. ClusterId types.String `tfsdk:"cluster_id" tf:""` // Number of worker nodes that this cluster should have. A cluster has one @@ -2801,7 +2801,7 @@ type SparkNode struct { // Globally unique identifier for the host instance from the cloud provider. InstanceId types.String `tfsdk:"instance_id" tf:"optional"` // Attributes specific to AWS for a Spark node. - NodeAwsAttributes *SparkNodeAwsAttributes `tfsdk:"node_aws_attributes" tf:"optional"` + NodeAwsAttributes []SparkNodeAwsAttributes `tfsdk:"node_aws_attributes" tf:"optional"` // Globally unique identifier for this node. NodeId types.String `tfsdk:"node_id" tf:"optional"` // Private IP address (typically a 10.x.x.x address) of the Spark node. Note @@ -2877,7 +2877,7 @@ type UnpinClusterResponse struct { type UpdateCluster struct { // The cluster to be updated. - Cluster *UpdateClusterResource `tfsdk:"cluster" tf:"optional"` + Cluster []UpdateClusterResource `tfsdk:"cluster" tf:"optional"` // ID of the cluster. ClusterId types.String `tfsdk:"cluster_id" tf:""` // Specifies which fields of the cluster will be updated. This is required @@ -2892,7 +2892,7 @@ type UpdateClusterResource struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale *AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -2901,17 +2901,17 @@ type UpdateClusterResource struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes *AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes *AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only // one destination can be specified for one cluster. If the conf is given, // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf *ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -2948,7 +2948,7 @@ type UpdateClusterResource struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage *DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -2966,7 +2966,7 @@ type UpdateClusterResource struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes *GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -3033,7 +3033,7 @@ type UpdateClusterResource struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType *WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` } type UpdateClusterResponse struct { @@ -3049,7 +3049,7 @@ type VolumesStorageInfo struct { type WorkloadType struct { // defined what type of clients can use the cluster. E.g. Notebooks, Jobs - Clients ClientsTypes `tfsdk:"clients" tf:""` + Clients []ClientsTypes `tfsdk:"clients" tf:""` } type WorkspaceStorageInfo struct { diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index 2fcdbdc14c..876787f5d9 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -37,7 +37,7 @@ type CreateDashboardRequest struct { type CreateScheduleRequest struct { // The cron expression describing the frequency of the periodic refresh for // this schedule. - CronSchedule CronSchedule `tfsdk:"cron_schedule" tf:""` + CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:""` // UUID identifying the dashboard to which the schedule belongs. DashboardId types.String `tfsdk:"-"` // The display name for schedule. @@ -53,7 +53,7 @@ type CreateSubscriptionRequest struct { ScheduleId types.String `tfsdk:"-"` // Subscriber details for users and destinations to be added as subscribers // to the schedule. - Subscriber Subscriber `tfsdk:"subscriber" tf:""` + Subscriber []Subscriber `tfsdk:"subscriber" tf:""` } type CronSchedule struct { @@ -147,9 +147,9 @@ type ExecuteMessageQueryRequest struct { // Genie AI Response type GenieAttachment struct { - Query *QueryAttachment `tfsdk:"query" tf:"optional"` + Query []QueryAttachment `tfsdk:"query" tf:"optional"` - Text *TextAttachment `tfsdk:"text" tf:"optional"` + Text []TextAttachment `tfsdk:"text" tf:"optional"` } type GenieConversation struct { @@ -201,7 +201,7 @@ type GenieGetMessageQueryResultRequest struct { type GenieGetMessageQueryResultResponse struct { // SQL Statement Execution response. See [Get status, manifest, and result // first chunk](:method:statementexecution/getstatement) for more details. - StatementResponse *sql.StatementResponse `tfsdk:"statement_response" tf:"optional"` + StatementResponse sql.StatementResponse `tfsdk:"statement_response" tf:"optional"` } type GenieMessage struct { @@ -214,13 +214,13 @@ type GenieMessage struct { // Timestamp when the message was created CreatedTimestamp types.Int64 `tfsdk:"created_timestamp" tf:"optional"` // Error message if AI failed to respond to the message - Error *MessageError `tfsdk:"error" tf:"optional"` + Error []MessageError `tfsdk:"error" tf:"optional"` // Message ID Id types.String `tfsdk:"id" tf:""` // Timestamp when the message was last updated LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp" tf:"optional"` // The result of SQL query if the message has a query attachment - QueryResult *Result `tfsdk:"query_result" tf:"optional"` + QueryResult []Result `tfsdk:"query_result" tf:"optional"` // Genie space ID SpaceId types.String `tfsdk:"space_id" tf:""` // MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching @@ -253,11 +253,11 @@ type GenieStartConversationMessageRequest struct { } type GenieStartConversationResponse struct { - Conversation *GenieConversation `tfsdk:"conversation" tf:"optional"` + Conversation []GenieConversation `tfsdk:"conversation" tf:"optional"` // Conversation ID ConversationId types.String `tfsdk:"conversation_id" tf:""` - Message *GenieMessage `tfsdk:"message" tf:"optional"` + Message []GenieMessage `tfsdk:"message" tf:"optional"` // Message ID MessageId types.String `tfsdk:"message_id" tf:""` } @@ -427,7 +427,7 @@ type Schedule struct { CreateTime types.String `tfsdk:"create_time" tf:"optional"` // The cron expression describing the frequency of the periodic refresh for // this schedule. - CronSchedule CronSchedule `tfsdk:"cron_schedule" tf:""` + CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:""` // UUID identifying the dashboard to which the schedule belongs. DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` // The display name for schedule. @@ -447,10 +447,10 @@ type Schedule struct { type Subscriber struct { // The destination to receive the subscription email. This parameter is // mutually exclusive with `user_subscriber`. - DestinationSubscriber *SubscriptionSubscriberDestination `tfsdk:"destination_subscriber" tf:"optional"` + DestinationSubscriber []SubscriptionSubscriberDestination `tfsdk:"destination_subscriber" tf:"optional"` // The user to receive the subscription email. This parameter is mutually // exclusive with `destination_subscriber`. - UserSubscriber *SubscriptionSubscriberUser `tfsdk:"user_subscriber" tf:"optional"` + UserSubscriber []SubscriptionSubscriberUser `tfsdk:"user_subscriber" tf:"optional"` } type Subscription struct { @@ -469,7 +469,7 @@ type Subscription struct { ScheduleId types.String `tfsdk:"schedule_id" tf:"optional"` // Subscriber details for users and destinations to be added as subscribers // to the schedule. - Subscriber Subscriber `tfsdk:"subscriber" tf:""` + Subscriber []Subscriber `tfsdk:"subscriber" tf:""` // UUID identifying the subscription. SubscriptionId types.String `tfsdk:"subscription_id" tf:"optional"` // A timestamp indicating when the subscription was last updated. @@ -536,7 +536,7 @@ type UpdateDashboardRequest struct { type UpdateScheduleRequest struct { // The cron expression describing the frequency of the periodic refresh for // this schedule. - CronSchedule CronSchedule `tfsdk:"cron_schedule" tf:""` + CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:""` // UUID identifying the dashboard to which the schedule belongs. DashboardId types.String `tfsdk:"-"` // The display name for schedule. diff --git a/internal/service/iam_tf/model.go b/internal/service/iam_tf/model.go index 0d056da55b..a590d05629 100755 --- a/internal/service/iam_tf/model.go +++ b/internal/service/iam_tf/model.go @@ -263,7 +263,7 @@ type Group struct { Members []ComplexValue `tfsdk:"members" tf:"optional"` // Container for the group identifier. Workspace local versus account. - Meta *ResourceMeta `tfsdk:"meta" tf:"optional"` + Meta []ResourceMeta `tfsdk:"meta" tf:"optional"` // Corresponds to AWS instance profile/arn role. Roles []ComplexValue `tfsdk:"roles" tf:"optional"` // The schema of the group. @@ -584,7 +584,7 @@ type PermissionAssignment struct { // The permissions level of the principal. Permissions []types.String `tfsdk:"permissions" tf:"optional"` // Information about the principal assigned to the workspace. - Principal *PrincipalOutput `tfsdk:"principal" tf:"optional"` + Principal []PrincipalOutput `tfsdk:"principal" tf:"optional"` } type PermissionAssignments struct { @@ -694,7 +694,7 @@ type UpdateRuleSetRequest struct { // Name of the rule set. Name types.String `tfsdk:"name" tf:""` - RuleSet RuleSetUpdateRequest `tfsdk:"rule_set" tf:""` + RuleSet []RuleSetUpdateRequest `tfsdk:"rule_set" tf:""` } type UpdateWorkspaceAssignments struct { @@ -736,7 +736,7 @@ type User struct { // provided by the client will be ignored. Id types.String `tfsdk:"id" tf:"optional"` - Name *Name `tfsdk:"name" tf:"optional"` + Name []Name `tfsdk:"name" tf:"optional"` // Corresponds to AWS instance profile/arn role. Roles []ComplexValue `tfsdk:"roles" tf:"optional"` // The schema of the user. diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index 2699dc2286..71c3096a41 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -26,7 +26,7 @@ type BaseJob struct { JobId types.Int64 `tfsdk:"job_id" tf:"optional"` // Settings for this job and all of its runs. These settings can be updated // using the `resetJob` method. - Settings *JobSettings `tfsdk:"settings" tf:"optional"` + Settings []JobSettings `tfsdk:"settings" tf:"optional"` } type BaseRun struct { @@ -47,10 +47,10 @@ type BaseRun struct { // The cluster used for this run. If the run is specified to use a new // cluster, this field is set once the Jobs service has requested a cluster // for the run. - ClusterInstance *ClusterInstance `tfsdk:"cluster_instance" tf:"optional"` + ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional"` // A snapshot of the job’s cluster specification when this run was // created. - ClusterSpec *ClusterSpec `tfsdk:"cluster_spec" tf:"optional"` + ClusterSpec []ClusterSpec `tfsdk:"cluster_spec" tf:"optional"` // The creator user name. This field won’t be included in the response if // the user has already been deleted. CreatorUserName types.String `tfsdk:"creator_user_name" tf:"optional"` @@ -77,7 +77,7 @@ type BaseRun struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource *GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. @@ -98,7 +98,7 @@ type BaseRun struct { // run_id of the original attempt; otherwise, it is the same as the run_id. OriginalAttemptRunId types.Int64 `tfsdk:"original_attempt_run_id" tf:"optional"` // The parameters used for this run. - OverridingParameters *RunParameters `tfsdk:"overriding_parameters" tf:"optional"` + OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional"` // The time in milliseconds that the run has spent in the queue. QueueDuration types.Int64 `tfsdk:"queue_duration" tf:"optional"` // The repair history of the run. @@ -123,7 +123,7 @@ type BaseRun struct { RunType types.String `tfsdk:"run_type" tf:"optional"` // The cron schedule that triggered this run if it was triggered by the // periodic scheduler. - Schedule *CronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []CronSchedule `tfsdk:"schedule" tf:"optional"` // The time in milliseconds it took to set up the cluster. For runs that run // on new clusters this is the cluster creation time, for runs that run on // existing clusters this time should be very short. The duration of a task @@ -138,9 +138,9 @@ type BaseRun struct { // new cluster, this is the time the cluster creation call is issued. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // Deprecated. Please use the `status` field instead. - State *RunState `tfsdk:"state" tf:"optional"` + State []RunState `tfsdk:"state" tf:"optional"` // The current status of the run - Status *RunStatus `tfsdk:"status" tf:"optional"` + Status []RunStatus `tfsdk:"status" tf:"optional"` // The list of tasks performed by the run. Each task has its own `run_id` // which you can use to call `JobsGetOutput` to retrieve the run resutls. Tasks []RunTask `tfsdk:"tasks" tf:"optional"` @@ -156,7 +156,7 @@ type BaseRun struct { // arrival. * `TABLE`: Indicates a run that is triggered by a table update. Trigger types.String `tfsdk:"trigger" tf:"optional"` // Additional details about what triggered the run - TriggerInfo *TriggerInfo `tfsdk:"trigger_info" tf:"optional"` + TriggerInfo []TriggerInfo `tfsdk:"trigger_info" tf:"optional"` } type CancelAllRuns struct { @@ -210,10 +210,10 @@ type ClusterSpec struct { JobClusterKey types.String `tfsdk:"job_cluster_key" tf:"optional"` // An optional list of libraries to be installed on the cluster. The default // value is an empty list. - Libraries []compute.Library `tfsdk:"library" tf:"optional"` + Libraries compute.Library `tfsdk:"library" tf:"optional"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster *compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` } type ConditionTask struct { @@ -248,9 +248,9 @@ type CreateJob struct { // An optional continuous property for this job. The continuous property // will ensure that there is always one run executing. Only one of // `schedule` and `continuous` can be used. - Continuous *Continuous `tfsdk:"continuous" tf:"optional"` + Continuous []Continuous `tfsdk:"continuous" tf:"optional"` // Deployment information for jobs managed by external sources. - Deployment *JobDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []JobDeployment `tfsdk:"deployment" tf:"optional"` // An optional description for the job. The maximum length is 27700 // characters in UTF-8 encoding. Description types.String `tfsdk:"description" tf:"optional"` @@ -261,7 +261,7 @@ type CreateJob struct { EditMode types.String `tfsdk:"edit_mode" tf:"optional"` // An optional set of email addresses that is notified when runs of this job // begin or complete as well as when this job is deleted. - EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` // A list of task execution environment specifications that can be // referenced by serverless tasks of this job. An environment is required to // be present for serverless tasks. For serverless notebook tasks, the @@ -283,9 +283,9 @@ type CreateJob struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource *GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional"` // An optional set of health rules that can be defined for this job. - Health *JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. @@ -308,22 +308,22 @@ type CreateJob struct { // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // job. - NotificationSettings *JobNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional"` // Job-level parameter definitions Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. - Queue *QueueSettings `tfsdk:"queue" tf:"optional"` + Queue []QueueSettings `tfsdk:"queue" tf:"optional"` // Write-only setting. Specifies the user, service principal or group that // the job/pipeline runs as. If not specified, the job/pipeline runs as the // user who created the job/pipeline. // // Exactly one of `user_name`, `service_principal_name`, `group_name` should // be specified. If not, an error is thrown. - RunAs *JobRunAs `tfsdk:"run_as" tf:"optional"` + RunAs []JobRunAs `tfsdk:"run_as" tf:"optional"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI // or sending an API request to `runNow`. - Schedule *CronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []CronSchedule `tfsdk:"schedule" tf:"optional"` // A map of tags associated with the job. These are forwarded to the cluster // as cluster tags for jobs clusters, and are subject to the same // limitations as cluster tags. A maximum of 25 tags can be added to the @@ -337,10 +337,10 @@ type CreateJob struct { // A configuration to trigger a run when certain conditions are met. The // default behavior is that the job runs only when triggered by clicking // “Run Now” in the Jobs UI or sending an API request to `runNow`. - Trigger *TriggerSettings `tfsdk:"trigger" tf:"optional"` + Trigger []TriggerSettings `tfsdk:"trigger" tf:"optional"` // A collection of system notification IDs to notify when runs of this job // begin or complete. - WebhookNotifications *WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` } // Job was created successfully @@ -467,7 +467,7 @@ type EnforcePolicyComplianceResponse struct { // clusters. Updated job settings are derived by applying policy default // values to the existing job clusters in order to satisfy policy // requirements. - Settings *JobSettings `tfsdk:"settings" tf:"optional"` + Settings []JobSettings `tfsdk:"settings" tf:"optional"` } // Run was exported successfully. @@ -507,7 +507,7 @@ type ForEachStats struct { // Sample of 3 most common error messages occurred during the iteration. ErrorMessageStats []ForEachTaskErrorMessageStats `tfsdk:"error_message_stats" tf:"optional"` // Describes stats of the iteration. Only latest retries are considered. - TaskRunStats *ForEachTaskTaskRunStats `tfsdk:"task_run_stats" tf:"optional"` + TaskRunStats []ForEachTaskTaskRunStats `tfsdk:"task_run_stats" tf:"optional"` } type ForEachTask struct { @@ -519,7 +519,7 @@ type ForEachTask struct { // an array parameter. Inputs types.String `tfsdk:"inputs" tf:""` // Configuration for the task that will be run for each element in the array - Task Task `tfsdk:"task" tf:""` + Task []Task `tfsdk:"task" tf:""` } type ForEachTaskErrorMessageStats struct { @@ -644,7 +644,7 @@ type GitSource struct { GitProvider types.String `tfsdk:"git_provider" tf:""` // Read-only state of the remote repository at the time the job was run. // This field is only included on job runs. - GitSnapshot *GitSnapshot `tfsdk:"git_snapshot" tf:"optional"` + GitSnapshot []GitSnapshot `tfsdk:"git_snapshot" tf:"optional"` // Name of the tag to be checked out and used by this job. This field cannot // be specified in conjunction with git_branch or git_commit. GitTag types.String `tfsdk:"tag" tf:"optional"` @@ -652,7 +652,7 @@ type GitSource struct { GitUrl types.String `tfsdk:"url" tf:""` // The source of the job specification in the remote repository when the job // is source controlled. - JobSource *JobSource `tfsdk:"job_source" tf:"optional"` + JobSource []JobSource `tfsdk:"job_source" tf:"optional"` } // Job was retrieved successfully. @@ -675,7 +675,7 @@ type Job struct { RunAsUserName types.String `tfsdk:"run_as_user_name" tf:"optional"` // Settings for this job and all of its runs. These settings can be updated // using the `resetJob` method. - Settings *JobSettings `tfsdk:"settings" tf:"optional"` + Settings []JobSettings `tfsdk:"settings" tf:"optional"` } type JobAccessControlRequest struct { @@ -775,7 +775,7 @@ type JobEnvironment struct { // The environment entity used to preserve serverless environment side panel // and jobs' environment for non-notebook task. In this minimal environment // spec, only pip dependencies are supported. - Spec *compute.Environment `tfsdk:"spec" tf:"optional"` + Spec compute.Environment `tfsdk:"spec" tf:"optional"` } type JobNotificationSettings struct { @@ -851,9 +851,9 @@ type JobSettings struct { // An optional continuous property for this job. The continuous property // will ensure that there is always one run executing. Only one of // `schedule` and `continuous` can be used. - Continuous *Continuous `tfsdk:"continuous" tf:"optional"` + Continuous []Continuous `tfsdk:"continuous" tf:"optional"` // Deployment information for jobs managed by external sources. - Deployment *JobDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []JobDeployment `tfsdk:"deployment" tf:"optional"` // An optional description for the job. The maximum length is 27700 // characters in UTF-8 encoding. Description types.String `tfsdk:"description" tf:"optional"` @@ -864,7 +864,7 @@ type JobSettings struct { EditMode types.String `tfsdk:"edit_mode" tf:"optional"` // An optional set of email addresses that is notified when runs of this job // begin or complete as well as when this job is deleted. - EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` // A list of task execution environment specifications that can be // referenced by serverless tasks of this job. An environment is required to // be present for serverless tasks. For serverless notebook tasks, the @@ -886,9 +886,9 @@ type JobSettings struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource *GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional"` // An optional set of health rules that can be defined for this job. - Health *JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. @@ -911,22 +911,22 @@ type JobSettings struct { // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // job. - NotificationSettings *JobNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional"` // Job-level parameter definitions Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. - Queue *QueueSettings `tfsdk:"queue" tf:"optional"` + Queue []QueueSettings `tfsdk:"queue" tf:"optional"` // Write-only setting. Specifies the user, service principal or group that // the job/pipeline runs as. If not specified, the job/pipeline runs as the // user who created the job/pipeline. // // Exactly one of `user_name`, `service_principal_name`, `group_name` should // be specified. If not, an error is thrown. - RunAs *JobRunAs `tfsdk:"run_as" tf:"optional"` + RunAs []JobRunAs `tfsdk:"run_as" tf:"optional"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI // or sending an API request to `runNow`. - Schedule *CronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []CronSchedule `tfsdk:"schedule" tf:"optional"` // A map of tags associated with the job. These are forwarded to the cluster // as cluster tags for jobs clusters, and are subject to the same // limitations as cluster tags. A maximum of 25 tags can be added to the @@ -940,10 +940,10 @@ type JobSettings struct { // A configuration to trigger a run when certain conditions are met. The // default behavior is that the job runs only when triggered by clicking // “Run Now” in the Jobs UI or sending an API request to `runNow`. - Trigger *TriggerSettings `tfsdk:"trigger" tf:"optional"` + Trigger []TriggerSettings `tfsdk:"trigger" tf:"optional"` // A collection of system notification IDs to notify when runs of this job // begin or complete. - WebhookNotifications *WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` } // The source of the job specification in the remote repository when the job is @@ -1221,9 +1221,9 @@ type RepairHistoryItem struct { // The start time of the (repaired) run. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // Deprecated. Please use the `status` field instead. - State *RunState `tfsdk:"state" tf:"optional"` + State []RunState `tfsdk:"state" tf:"optional"` // The current status of the run - Status *RunStatus `tfsdk:"status" tf:"optional"` + Status []RunStatus `tfsdk:"status" tf:"optional"` // The run IDs of the task runs that ran as part of this repair history // item. TaskRunIds []types.Int64 `tfsdk:"task_run_ids" tf:"optional"` @@ -1276,7 +1276,7 @@ type RepairRun struct { // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` // Controls whether the pipeline should perform a full refresh - PipelineParams *PipelineParams `tfsdk:"pipeline_params" tf:"optional"` + PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` // A list of parameters for jobs with Python tasks, for example @@ -1348,7 +1348,7 @@ type ResetJob struct { // // Changes to the field `JobBaseSettings.timeout_seconds` are applied to // active runs. Changes to other fields are applied to future runs only. - NewSettings JobSettings `tfsdk:"new_settings" tf:""` + NewSettings []JobSettings `tfsdk:"new_settings" tf:""` } type ResetResponse struct { @@ -1389,25 +1389,25 @@ type ResolvedStringParamsValues struct { } type ResolvedValues struct { - ConditionTask *ResolvedConditionTaskValues `tfsdk:"condition_task" tf:"optional"` + ConditionTask []ResolvedConditionTaskValues `tfsdk:"condition_task" tf:"optional"` - DbtTask *ResolvedDbtTaskValues `tfsdk:"dbt_task" tf:"optional"` + DbtTask []ResolvedDbtTaskValues `tfsdk:"dbt_task" tf:"optional"` - NotebookTask *ResolvedNotebookTaskValues `tfsdk:"notebook_task" tf:"optional"` + NotebookTask []ResolvedNotebookTaskValues `tfsdk:"notebook_task" tf:"optional"` - PythonWheelTask *ResolvedPythonWheelTaskValues `tfsdk:"python_wheel_task" tf:"optional"` + PythonWheelTask []ResolvedPythonWheelTaskValues `tfsdk:"python_wheel_task" tf:"optional"` - RunJobTask *ResolvedRunJobTaskValues `tfsdk:"run_job_task" tf:"optional"` + RunJobTask []ResolvedRunJobTaskValues `tfsdk:"run_job_task" tf:"optional"` - SimulationTask *ResolvedParamPairValues `tfsdk:"simulation_task" tf:"optional"` + SimulationTask []ResolvedParamPairValues `tfsdk:"simulation_task" tf:"optional"` - SparkJarTask *ResolvedStringParamsValues `tfsdk:"spark_jar_task" tf:"optional"` + SparkJarTask []ResolvedStringParamsValues `tfsdk:"spark_jar_task" tf:"optional"` - SparkPythonTask *ResolvedStringParamsValues `tfsdk:"spark_python_task" tf:"optional"` + SparkPythonTask []ResolvedStringParamsValues `tfsdk:"spark_python_task" tf:"optional"` - SparkSubmitTask *ResolvedStringParamsValues `tfsdk:"spark_submit_task" tf:"optional"` + SparkSubmitTask []ResolvedStringParamsValues `tfsdk:"spark_submit_task" tf:"optional"` - SqlTask *ResolvedParamPairValues `tfsdk:"sql_task" tf:"optional"` + SqlTask []ResolvedParamPairValues `tfsdk:"sql_task" tf:"optional"` } // Run was retrieved successfully @@ -1429,10 +1429,10 @@ type Run struct { // The cluster used for this run. If the run is specified to use a new // cluster, this field is set once the Jobs service has requested a cluster // for the run. - ClusterInstance *ClusterInstance `tfsdk:"cluster_instance" tf:"optional"` + ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional"` // A snapshot of the job’s cluster specification when this run was // created. - ClusterSpec *ClusterSpec `tfsdk:"cluster_spec" tf:"optional"` + ClusterSpec []ClusterSpec `tfsdk:"cluster_spec" tf:"optional"` // The creator user name. This field won’t be included in the response if // the user has already been deleted. CreatorUserName types.String `tfsdk:"creator_user_name" tf:"optional"` @@ -1459,7 +1459,7 @@ type Run struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource *GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional"` // Only populated by for-each iterations. The parent for-each task is // located in tasks array. Iterations []RunTask `tfsdk:"iterations" tf:"optional"` @@ -1485,7 +1485,7 @@ type Run struct { // run_id of the original attempt; otherwise, it is the same as the run_id. OriginalAttemptRunId types.Int64 `tfsdk:"original_attempt_run_id" tf:"optional"` // The parameters used for this run. - OverridingParameters *RunParameters `tfsdk:"overriding_parameters" tf:"optional"` + OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional"` // A token that can be used to list the previous page of sub-resources. PrevPageToken types.String `tfsdk:"prev_page_token" tf:"optional"` // The time in milliseconds that the run has spent in the queue. @@ -1512,7 +1512,7 @@ type Run struct { RunType types.String `tfsdk:"run_type" tf:"optional"` // The cron schedule that triggered this run if it was triggered by the // periodic scheduler. - Schedule *CronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []CronSchedule `tfsdk:"schedule" tf:"optional"` // The time in milliseconds it took to set up the cluster. For runs that run // on new clusters this is the cluster creation time, for runs that run on // existing clusters this time should be very short. The duration of a task @@ -1527,9 +1527,9 @@ type Run struct { // new cluster, this is the time the cluster creation call is issued. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // Deprecated. Please use the `status` field instead. - State *RunState `tfsdk:"state" tf:"optional"` + State []RunState `tfsdk:"state" tf:"optional"` // The current status of the run - Status *RunStatus `tfsdk:"status" tf:"optional"` + Status []RunStatus `tfsdk:"status" tf:"optional"` // The list of tasks performed by the run. Each task has its own `run_id` // which you can use to call `JobsGetOutput` to retrieve the run resutls. Tasks []RunTask `tfsdk:"tasks" tf:"optional"` @@ -1545,7 +1545,7 @@ type Run struct { // arrival. * `TABLE`: Indicates a run that is triggered by a table update. Trigger types.String `tfsdk:"trigger" tf:"optional"` // Additional details about what triggered the run - TriggerInfo *TriggerInfo `tfsdk:"trigger_info" tf:"optional"` + TriggerInfo []TriggerInfo `tfsdk:"trigger_info" tf:"optional"` } type RunConditionTask struct { @@ -1581,9 +1581,9 @@ type RunForEachTask struct { Inputs types.String `tfsdk:"inputs" tf:""` // Read only field. Populated for GetRun and ListRuns RPC calls and stores // the execution stats of an For each task - Stats *ForEachStats `tfsdk:"stats" tf:"optional"` + Stats []ForEachStats `tfsdk:"stats" tf:"optional"` // Configuration for the task that will be run for each element in the array - Task Task `tfsdk:"task" tf:""` + Task []Task `tfsdk:"task" tf:""` } type RunJobOutput struct { @@ -1632,7 +1632,7 @@ type RunJobTask struct { // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` // Controls whether the pipeline should perform a full refresh - PipelineParams *PipelineParams `tfsdk:"pipeline_params" tf:"optional"` + PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` // A list of parameters for jobs with Python tasks, for example @@ -1735,7 +1735,7 @@ type RunNow struct { // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` // Controls whether the pipeline should perform a full refresh - PipelineParams *PipelineParams `tfsdk:"pipeline_params" tf:"optional"` + PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` // A list of parameters for jobs with Python tasks, for example @@ -1757,7 +1757,7 @@ type RunNow struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables PythonParams []types.String `tfsdk:"python_params" tf:"optional"` // The queue settings of the run. - Queue *QueueSettings `tfsdk:"queue" tf:"optional"` + Queue []QueueSettings `tfsdk:"queue" tf:"optional"` // A list of parameters for jobs with spark submit task, for example // `"spark_submit_params": ["--class", // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to @@ -1795,7 +1795,7 @@ type RunNowResponse struct { // Run output was retrieved successfully. type RunOutput struct { // The output of a dbt task, if available. - DbtOutput *DbtOutput `tfsdk:"dbt_output" tf:"optional"` + DbtOutput []DbtOutput `tfsdk:"dbt_output" tf:"optional"` // An error message indicating why a task failed or why output is not // available. The message is unstructured, and its exact format is subject // to change. @@ -1816,7 +1816,7 @@ type RunOutput struct { // Whether the logs are truncated. LogsTruncated types.Bool `tfsdk:"logs_truncated" tf:"optional"` // All details of the run except for its output. - Metadata *Run `tfsdk:"metadata" tf:"optional"` + Metadata []Run `tfsdk:"metadata" tf:"optional"` // The output of a notebook task, if available. A notebook task that // terminates (either successfully or with a failure) without calling // `dbutils.notebook.exit()` is considered to have an empty output. This @@ -1825,11 +1825,11 @@ type RunOutput struct { // the [ClusterLogConf] field to configure log storage for the job cluster. // // [ClusterLogConf]: https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterlogconf - NotebookOutput *NotebookOutput `tfsdk:"notebook_output" tf:"optional"` + NotebookOutput []NotebookOutput `tfsdk:"notebook_output" tf:"optional"` // The output of a run job task, if available - RunJobOutput *RunJobOutput `tfsdk:"run_job_output" tf:"optional"` + RunJobOutput []RunJobOutput `tfsdk:"run_job_output" tf:"optional"` // The output of a SQL task, if available. - SqlOutput *SqlOutput `tfsdk:"sql_output" tf:"optional"` + SqlOutput []SqlOutput `tfsdk:"sql_output" tf:"optional"` } type RunParameters struct { @@ -1869,7 +1869,7 @@ type RunParameters struct { // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` // Controls whether the pipeline should perform a full refresh - PipelineParams *PipelineParams `tfsdk:"pipeline_params" tf:"optional"` + PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` // A list of parameters for jobs with Python tasks, for example @@ -1936,12 +1936,12 @@ type RunState struct { // The current status of the run type RunStatus struct { // If the run was queued, details about the reason for queuing the run. - QueueDetails *QueueDetails `tfsdk:"queue_details" tf:"optional"` + QueueDetails []QueueDetails `tfsdk:"queue_details" tf:"optional"` // The current state of the run. State types.String `tfsdk:"state" tf:"optional"` // If the run is in a TERMINATING or TERMINATED state, details about the // reason for terminating the run. - TerminationDetails *TerminationDetails `tfsdk:"termination_details" tf:"optional"` + TerminationDetails []TerminationDetails `tfsdk:"termination_details" tf:"optional"` } // Used when outputting a child run, in GetRun or ListRuns. @@ -1963,15 +1963,15 @@ type RunTask struct { // The cluster used for this run. If the run is specified to use a new // cluster, this field is set once the Jobs service has requested a cluster // for the run. - ClusterInstance *ClusterInstance `tfsdk:"cluster_instance" tf:"optional"` + ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional"` // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. - ConditionTask *RunConditionTask `tfsdk:"condition_task" tf:"optional"` + ConditionTask []RunConditionTask `tfsdk:"condition_task" tf:"optional"` // If dbt_task, indicates that this must execute a dbt task. It requires // both Databricks SQL and the ability to use a serverless or a pro SQL // warehouse. - DbtTask *DbtTask `tfsdk:"dbt_task" tf:"optional"` + DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before // executing this task. The key is `task_key`, and the value is the name @@ -1981,7 +1981,7 @@ type RunTask struct { Description types.String `tfsdk:"description" tf:"optional"` // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. - EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. EndTime types.Int64 `tfsdk:"end_time" tf:"optional"` @@ -2004,7 +2004,7 @@ type RunTask struct { ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` // If for_each_task, indicates that this task must execute the nested task // within it. - ForEachTask *RunForEachTask `tfsdk:"for_each_task" tf:"optional"` + ForEachTask []RunForEachTask `tfsdk:"for_each_task" tf:"optional"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by // notebook, dbt, Python script, and SQL File tasks. If `git_source` is set, @@ -2013,31 +2013,31 @@ type RunTask struct { // `WORKSPACE` on the task. Note: dbt and SQL File tasks support only // version-controlled sources. If dbt or SQL File tasks are used, // `git_source` must be defined on the job. - GitSource *GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional"` // If job_cluster_key, this task is executed reusing the cluster specified // in `job.settings.job_clusters`. JobClusterKey types.String `tfsdk:"job_cluster_key" tf:"optional"` // An optional list of libraries to be installed on the cluster. The default // value is an empty list. - Libraries []compute.Library `tfsdk:"library" tf:"optional"` + Libraries compute.Library `tfsdk:"library" tf:"optional"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster *compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. - NotebookTask *NotebookTask `tfsdk:"notebook_task" tf:"optional"` + NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. - NotificationSettings *TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional"` // If pipeline_task, indicates that this task must execute a Pipeline. - PipelineTask *PipelineTask `tfsdk:"pipeline_task" tf:"optional"` + PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional"` // If python_wheel_task, indicates that this job must execute a PythonWheel. - PythonWheelTask *PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional"` + PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional"` // The time in milliseconds that the run has spent in the queue. QueueDuration types.Int64 `tfsdk:"queue_duration" tf:"optional"` // Parameter values including resolved references - ResolvedValues *ResolvedValues `tfsdk:"resolved_values" tf:"optional"` + ResolvedValues []ResolvedValues `tfsdk:"resolved_values" tf:"optional"` // The time in milliseconds it took the job run and all of its repairs to // finish. RunDuration types.Int64 `tfsdk:"run_duration" tf:"optional"` @@ -2049,7 +2049,7 @@ type RunTask struct { // possible values. RunIf types.String `tfsdk:"run_if" tf:"optional"` // If run_job_task, indicates that this task must execute another job. - RunJobTask *RunJobTask `tfsdk:"run_job_task" tf:"optional"` + RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional"` RunPageUrl types.String `tfsdk:"run_page_url" tf:"optional"` // The time in milliseconds it took to set up the cluster. For runs that run @@ -2061,9 +2061,9 @@ type RunTask struct { // `run_duration` field. SetupDuration types.Int64 `tfsdk:"setup_duration" tf:"optional"` // If spark_jar_task, indicates that this task must run a JAR. - SparkJarTask *SparkJarTask `tfsdk:"spark_jar_task" tf:"optional"` + SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional"` // If spark_python_task, indicates that this task must run a Python file. - SparkPythonTask *SparkPythonTask `tfsdk:"spark_python_task" tf:"optional"` + SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // @@ -2081,18 +2081,18 @@ type RunTask struct { // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. - SparkSubmitTask *SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional"` + SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional"` // If sql_task, indicates that this job must execute a SQL task. - SqlTask *SqlTask `tfsdk:"sql_task" tf:"optional"` + SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional"` // The time at which this run was started in epoch milliseconds // (milliseconds since 1/1/1970 UTC). This may not be the time when the job // task starts executing, for example, if the job is scheduled to run on a // new cluster, this is the time the cluster creation call is issued. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // Deprecated. Please use the `status` field instead. - State *RunState `tfsdk:"state" tf:"optional"` + State []RunState `tfsdk:"state" tf:"optional"` // The current status of the run - Status *RunStatus `tfsdk:"status" tf:"optional"` + Status []RunStatus `tfsdk:"status" tf:"optional"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be @@ -2104,7 +2104,7 @@ type RunTask struct { // A collection of system notification IDs to notify when the run begins or // completes. The default behavior is to not send any system notifications. // Task webhooks respect the task notification settings. - WebhookNotifications *WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` } type SparkJarTask struct { @@ -2191,7 +2191,7 @@ type SqlDashboardWidgetOutput struct { // Time (in epoch milliseconds) when execution of the SQL widget ends. EndTime types.Int64 `tfsdk:"end_time" tf:"optional"` // The information about the error when execution fails. - Error *SqlOutputError `tfsdk:"error" tf:"optional"` + Error []SqlOutputError `tfsdk:"error" tf:"optional"` // The link to find the output results. OutputLink types.String `tfsdk:"output_link" tf:"optional"` // Time (in epoch milliseconds) when execution of the SQL widget starts. @@ -2206,11 +2206,11 @@ type SqlDashboardWidgetOutput struct { type SqlOutput struct { // The output of a SQL alert task, if available. - AlertOutput *SqlAlertOutput `tfsdk:"alert_output" tf:"optional"` + AlertOutput []SqlAlertOutput `tfsdk:"alert_output" tf:"optional"` // The output of a SQL dashboard task, if available. - DashboardOutput *SqlDashboardOutput `tfsdk:"dashboard_output" tf:"optional"` + DashboardOutput []SqlDashboardOutput `tfsdk:"dashboard_output" tf:"optional"` // The output of a SQL query task, if available. - QueryOutput *SqlQueryOutput `tfsdk:"query_output" tf:"optional"` + QueryOutput []SqlQueryOutput `tfsdk:"query_output" tf:"optional"` } type SqlOutputError struct { @@ -2238,17 +2238,17 @@ type SqlStatementOutput struct { type SqlTask struct { // If alert, indicates that this job must refresh a SQL alert. - Alert *SqlTaskAlert `tfsdk:"alert" tf:"optional"` + Alert []SqlTaskAlert `tfsdk:"alert" tf:"optional"` // If dashboard, indicates that this job must refresh a SQL dashboard. - Dashboard *SqlTaskDashboard `tfsdk:"dashboard" tf:"optional"` + Dashboard []SqlTaskDashboard `tfsdk:"dashboard" tf:"optional"` // If file, indicates that this job runs a SQL file in a remote Git // repository. - File *SqlTaskFile `tfsdk:"file" tf:"optional"` + File []SqlTaskFile `tfsdk:"file" tf:"optional"` // Parameters to be used for each run of this job. The SQL alert task does // not support custom parameters. Parameters map[string]types.String `tfsdk:"parameters" tf:"optional"` // If query, indicates that this job must execute a SQL query. - Query *SqlTaskQuery `tfsdk:"query" tf:"optional"` + Query []SqlTaskQuery `tfsdk:"query" tf:"optional"` // The canonical identifier of the SQL warehouse. Recommended to use with // serverless or pro SQL warehouses. Classic SQL warehouses are only // supported for SQL alert, dashboard and query tasks and are limited to @@ -2314,7 +2314,7 @@ type SubmitRun struct { AccessControlList []JobAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // An optional set of email addresses notified when the run begins or // completes. - EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` // A list of task execution environment specifications that can be // referenced by tasks of this run. Environments []JobEnvironment `tfsdk:"environments" tf:"optional"` @@ -2328,9 +2328,9 @@ type SubmitRun struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource *GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional"` // An optional set of health rules that can be defined for this job. - Health *JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional"` // An optional token that can be used to guarantee the idempotency of job // run requests. If a run with the provided token already exists, the // request does not create a new run but returns the ID of the existing run @@ -2350,12 +2350,12 @@ type SubmitRun struct { // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // run. - NotificationSettings *JobNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional"` // The queue settings of the one-time run. - Queue *QueueSettings `tfsdk:"queue" tf:"optional"` + Queue []QueueSettings `tfsdk:"queue" tf:"optional"` // Specifies the user or service principal that the job runs as. If not // specified, the job runs as the user who submits the request. - RunAs *JobRunAs `tfsdk:"run_as" tf:"optional"` + RunAs []JobRunAs `tfsdk:"run_as" tf:"optional"` // An optional name for the run. The default value is `Untitled`. RunName types.String `tfsdk:"run_name" tf:"optional"` @@ -2365,7 +2365,7 @@ type SubmitRun struct { TimeoutSeconds types.Int64 `tfsdk:"timeout_seconds" tf:"optional"` // A collection of system notification IDs to notify when the run begins or // completes. - WebhookNotifications *WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` } // Run was created and started successfully. @@ -2378,11 +2378,11 @@ type SubmitTask struct { // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. - ConditionTask *ConditionTask `tfsdk:"condition_task" tf:"optional"` + ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional"` // If dbt_task, indicates that this must execute a dbt task. It requires // both Databricks SQL and the ability to use a serverless or a pro SQL // warehouse. - DbtTask *DbtTask `tfsdk:"dbt_task" tf:"optional"` + DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before // executing this task. The key is `task_key`, and the value is the name @@ -2392,7 +2392,7 @@ type SubmitTask struct { Description types.String `tfsdk:"description" tf:"optional"` // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. - EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` // The key that references an environment spec in a job. This field is // required for Python script, Python wheel and dbt tasks when using // serverless compute. @@ -2404,37 +2404,37 @@ type SubmitTask struct { ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` // If for_each_task, indicates that this task must execute the nested task // within it. - ForEachTask *ForEachTask `tfsdk:"for_each_task" tf:"optional"` + ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional"` // An optional set of health rules that can be defined for this job. - Health *JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional"` // An optional list of libraries to be installed on the cluster. The default // value is an empty list. - Libraries []compute.Library `tfsdk:"library" tf:"optional"` + Libraries compute.Library `tfsdk:"library" tf:"optional"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster *compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. - NotebookTask *NotebookTask `tfsdk:"notebook_task" tf:"optional"` + NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. - NotificationSettings *TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional"` // If pipeline_task, indicates that this task must execute a Pipeline. - PipelineTask *PipelineTask `tfsdk:"pipeline_task" tf:"optional"` + PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional"` // If python_wheel_task, indicates that this job must execute a PythonWheel. - PythonWheelTask *PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional"` + PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional"` // An optional value indicating the condition that determines whether the // task should be run once its dependencies have been completed. When // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf types.String `tfsdk:"run_if" tf:"optional"` // If run_job_task, indicates that this task must execute another job. - RunJobTask *RunJobTask `tfsdk:"run_job_task" tf:"optional"` + RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional"` // If spark_jar_task, indicates that this task must run a JAR. - SparkJarTask *SparkJarTask `tfsdk:"spark_jar_task" tf:"optional"` + SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional"` // If spark_python_task, indicates that this task must run a Python file. - SparkPythonTask *SparkPythonTask `tfsdk:"spark_python_task" tf:"optional"` + SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // @@ -2452,9 +2452,9 @@ type SubmitTask struct { // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. - SparkSubmitTask *SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional"` + SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional"` // If sql_task, indicates that this job must execute a SQL task. - SqlTask *SqlTask `tfsdk:"sql_task" tf:"optional"` + SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be @@ -2466,7 +2466,7 @@ type SubmitTask struct { // A collection of system notification IDs to notify when the run begins or // completes. The default behavior is to not send any system notifications. // Task webhooks respect the task notification settings. - WebhookNotifications *WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` } type TableUpdateTriggerConfiguration struct { @@ -2490,11 +2490,11 @@ type Task struct { // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. - ConditionTask *ConditionTask `tfsdk:"condition_task" tf:"optional"` + ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional"` // If dbt_task, indicates that this must execute a dbt task. It requires // both Databricks SQL and the ability to use a serverless or a pro SQL // warehouse. - DbtTask *DbtTask `tfsdk:"dbt_task" tf:"optional"` + DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete before executing this // task. The task will run only if the `run_if` condition is true. The key @@ -2507,7 +2507,7 @@ type Task struct { // An optional set of email addresses that is notified when runs of this // task begin or complete as well as when this task is deleted. The default // behavior is to not send any emails. - EmailNotifications *TaskEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []TaskEmailNotifications `tfsdk:"email_notifications" tf:"optional"` // The key that references an environment spec in a job. This field is // required for Python script, Python wheel and dbt tasks when using // serverless compute. @@ -2519,15 +2519,15 @@ type Task struct { ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` // If for_each_task, indicates that this task must execute the nested task // within it. - ForEachTask *ForEachTask `tfsdk:"for_each_task" tf:"optional"` + ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional"` // An optional set of health rules that can be defined for this job. - Health *JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional"` // If job_cluster_key, this task is executed reusing the cluster specified // in `job.settings.job_clusters`. JobClusterKey types.String `tfsdk:"job_cluster_key" tf:"optional"` // An optional list of libraries to be installed on the cluster. The default // value is an empty list. - Libraries []compute.Library `tfsdk:"library" tf:"optional"` + Libraries compute.Library `tfsdk:"library" tf:"optional"` // An optional maximum number of times to retry an unsuccessful run. A run // is considered to be unsuccessful if it completes with the `FAILED` // result_state or `INTERNAL_ERROR` `life_cycle_state`. The value `-1` means @@ -2539,18 +2539,18 @@ type Task struct { MinRetryIntervalMillis types.Int64 `tfsdk:"min_retry_interval_millis" tf:"optional"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster *compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. - NotebookTask *NotebookTask `tfsdk:"notebook_task" tf:"optional"` + NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task. - NotificationSettings *TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional"` // If pipeline_task, indicates that this task must execute a Pipeline. - PipelineTask *PipelineTask `tfsdk:"pipeline_task" tf:"optional"` + PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional"` // If python_wheel_task, indicates that this job must execute a PythonWheel. - PythonWheelTask *PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional"` + PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional"` // An optional policy to specify whether to retry a job when it times out. // The default behavior is to not retry on timeout. RetryOnTimeout types.Bool `tfsdk:"retry_on_timeout" tf:"optional"` @@ -2565,11 +2565,11 @@ type Task struct { // dependencies have failed RunIf types.String `tfsdk:"run_if" tf:"optional"` // If run_job_task, indicates that this task must execute another job. - RunJobTask *RunJobTask `tfsdk:"run_job_task" tf:"optional"` + RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional"` // If spark_jar_task, indicates that this task must run a JAR. - SparkJarTask *SparkJarTask `tfsdk:"spark_jar_task" tf:"optional"` + SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional"` // If spark_python_task, indicates that this task must run a Python file. - SparkPythonTask *SparkPythonTask `tfsdk:"spark_python_task" tf:"optional"` + SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // @@ -2587,9 +2587,9 @@ type Task struct { // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. - SparkSubmitTask *SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional"` + SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional"` // If sql_task, indicates that this job must execute a SQL task. - SqlTask *SqlTask `tfsdk:"sql_task" tf:"optional"` + SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be @@ -2601,7 +2601,7 @@ type Task struct { // A collection of system notification IDs to notify when runs of this task // begin or complete. The default behavior is to not send any system // notifications. - WebhookNotifications *WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` } type TaskDependency struct { @@ -2730,15 +2730,15 @@ type TriggerInfo struct { type TriggerSettings struct { // File arrival trigger settings. - FileArrival *FileArrivalTriggerConfiguration `tfsdk:"file_arrival" tf:"optional"` + FileArrival []FileArrivalTriggerConfiguration `tfsdk:"file_arrival" tf:"optional"` // Whether this trigger is paused or not. PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` // Periodic trigger settings. - Periodic *PeriodicTriggerConfiguration `tfsdk:"periodic" tf:"optional"` + Periodic []PeriodicTriggerConfiguration `tfsdk:"periodic" tf:"optional"` // Old table trigger settings name. Deprecated in favor of `table_update`. - Table *TableUpdateTriggerConfiguration `tfsdk:"table" tf:"optional"` + Table []TableUpdateTriggerConfiguration `tfsdk:"table" tf:"optional"` - TableUpdate *TableUpdateTriggerConfiguration `tfsdk:"table_update" tf:"optional"` + TableUpdate []TableUpdateTriggerConfiguration `tfsdk:"table_update" tf:"optional"` } type UpdateJob struct { @@ -2759,7 +2759,7 @@ type UpdateJob struct { // // Changes to the field `JobSettings.timeout_seconds` are applied to active // runs. Changes to other fields are applied to future runs only. - NewSettings *JobSettings `tfsdk:"new_settings" tf:"optional"` + NewSettings []JobSettings `tfsdk:"new_settings" tf:"optional"` } type UpdateResponse struct { diff --git a/internal/service/marketplace_tf/model.go b/internal/service/marketplace_tf/model.go index a2aad3e3d1..692e1c80d5 100755 --- a/internal/service/marketplace_tf/model.go +++ b/internal/service/marketplace_tf/model.go @@ -21,7 +21,7 @@ type AddExchangeForListingRequest struct { } type AddExchangeForListingResponse struct { - ExchangeForListing *ExchangeListing `tfsdk:"exchange_for_listing" tf:"optional"` + ExchangeForListing []ExchangeListing `tfsdk:"exchange_for_listing" tf:"optional"` } // Get one batch of listings. One may specify up to 50 IDs per request. @@ -59,7 +59,7 @@ type ContactInfo struct { } type CreateExchangeFilterRequest struct { - Filter ExchangeFilter `tfsdk:"filter" tf:""` + Filter []ExchangeFilter `tfsdk:"filter" tf:""` } type CreateExchangeFilterResponse struct { @@ -67,7 +67,7 @@ type CreateExchangeFilterResponse struct { } type CreateExchangeRequest struct { - Exchange Exchange `tfsdk:"exchange" tf:""` + Exchange []Exchange `tfsdk:"exchange" tf:""` } type CreateExchangeResponse struct { @@ -77,7 +77,7 @@ type CreateExchangeResponse struct { type CreateFileRequest struct { DisplayName types.String `tfsdk:"display_name" tf:"optional"` - FileParent FileParent `tfsdk:"file_parent" tf:""` + FileParent []FileParent `tfsdk:"file_parent" tf:""` MarketplaceFileType types.String `tfsdk:"marketplace_file_type" tf:""` @@ -85,13 +85,13 @@ type CreateFileRequest struct { } type CreateFileResponse struct { - FileInfo *FileInfo `tfsdk:"file_info" tf:"optional"` + FileInfo []FileInfo `tfsdk:"file_info" tf:"optional"` // Pre-signed POST URL to blob storage UploadUrl types.String `tfsdk:"upload_url" tf:"optional"` } type CreateInstallationRequest struct { - AcceptedConsumerTerms *ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:"optional"` + AcceptedConsumerTerms []ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:"optional"` CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` @@ -99,13 +99,13 @@ type CreateInstallationRequest struct { RecipientType types.String `tfsdk:"recipient_type" tf:"optional"` // for git repo installations - RepoDetail *RepoInstallation `tfsdk:"repo_detail" tf:"optional"` + RepoDetail []RepoInstallation `tfsdk:"repo_detail" tf:"optional"` ShareName types.String `tfsdk:"share_name" tf:"optional"` } type CreateListingRequest struct { - Listing Listing `tfsdk:"listing" tf:""` + Listing []Listing `tfsdk:"listing" tf:""` } type CreateListingResponse struct { @@ -114,7 +114,7 @@ type CreateListingResponse struct { // Data request messages also creates a lead (maybe) type CreatePersonalizationRequest struct { - AcceptedConsumerTerms ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:""` + AcceptedConsumerTerms []ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:""` Comment types.String `tfsdk:"comment" tf:"optional"` @@ -138,7 +138,7 @@ type CreatePersonalizationRequestResponse struct { } type CreateProviderRequest struct { - Provider ProviderInfo `tfsdk:"provider" tf:""` + Provider []ProviderInfo `tfsdk:"provider" tf:""` } type CreateProviderResponse struct { @@ -264,7 +264,7 @@ type FileInfo struct { DownloadLink types.String `tfsdk:"download_link" tf:"optional"` - FileParent *FileParent `tfsdk:"file_parent" tf:"optional"` + FileParent []FileParent `tfsdk:"file_parent" tf:"optional"` Id types.String `tfsdk:"id" tf:"optional"` @@ -292,7 +292,7 @@ type GetExchangeRequest struct { } type GetExchangeResponse struct { - Exchange *Exchange `tfsdk:"exchange" tf:"optional"` + Exchange []Exchange `tfsdk:"exchange" tf:"optional"` } // Get a file @@ -301,7 +301,7 @@ type GetFileRequest struct { } type GetFileResponse struct { - FileInfo *FileInfo `tfsdk:"file_info" tf:"optional"` + FileInfo []FileInfo `tfsdk:"file_info" tf:"optional"` } type GetLatestVersionProviderAnalyticsDashboardResponse struct { @@ -330,7 +330,7 @@ type GetListingRequest struct { } type GetListingResponse struct { - Listing *Listing `tfsdk:"listing" tf:"optional"` + Listing []Listing `tfsdk:"listing" tf:"optional"` } // List listings @@ -361,11 +361,11 @@ type GetProviderRequest struct { } type GetProviderResponse struct { - Provider *ProviderInfo `tfsdk:"provider" tf:"optional"` + Provider []ProviderInfo `tfsdk:"provider" tf:"optional"` } type Installation struct { - Installation *InstallationDetail `tfsdk:"installation" tf:"optional"` + Installation []InstallationDetail `tfsdk:"installation" tf:"optional"` } type InstallationDetail struct { @@ -391,7 +391,7 @@ type InstallationDetail struct { Status types.String `tfsdk:"status" tf:"optional"` - TokenDetail *TokenDetail `tfsdk:"token_detail" tf:"optional"` + TokenDetail []TokenDetail `tfsdk:"token_detail" tf:"optional"` Tokens []TokenInfo `tfsdk:"tokens" tf:"optional"` } @@ -467,7 +467,7 @@ type ListExchangesResponse struct { // List files type ListFilesRequest struct { - FileParent FileParent `tfsdk:"-"` + FileParent []FileParent `tfsdk:"-"` PageSize types.Int64 `tfsdk:"-"` @@ -578,11 +578,11 @@ type ListProvidersResponse struct { } type Listing struct { - Detail *ListingDetail `tfsdk:"detail" tf:"optional"` + Detail []ListingDetail `tfsdk:"detail" tf:"optional"` Id types.String `tfsdk:"id" tf:"optional"` // Next Number: 26 - Summary ListingSummary `tfsdk:"summary" tf:""` + Summary []ListingSummary `tfsdk:"summary" tf:""` } type ListingDetail struct { @@ -594,7 +594,7 @@ type ListingDetail struct { // The starting date timestamp for when the data spans CollectionDateStart types.Int64 `tfsdk:"collection_date_start" tf:"optional"` // Smallest unit of time in the dataset - CollectionGranularity *DataRefreshInfo `tfsdk:"collection_granularity" tf:"optional"` + CollectionGranularity []DataRefreshInfo `tfsdk:"collection_granularity" tf:"optional"` // Whether the dataset is free or paid Cost types.String `tfsdk:"cost" tf:"optional"` // Where/how the data is sourced @@ -633,7 +633,7 @@ type ListingDetail struct { TermsOfService types.String `tfsdk:"terms_of_service" tf:"optional"` // How often data is updated - UpdateFrequency *DataRefreshInfo `tfsdk:"update_frequency" tf:"optional"` + UpdateFrequency []DataRefreshInfo `tfsdk:"update_frequency" tf:"optional"` } type ListingFulfillment struct { @@ -643,9 +643,9 @@ type ListingFulfillment struct { RecipientType types.String `tfsdk:"recipient_type" tf:"optional"` - RepoInfo *RepoInfo `tfsdk:"repo_info" tf:"optional"` + RepoInfo []RepoInfo `tfsdk:"repo_info" tf:"optional"` - ShareInfo *ShareInfo `tfsdk:"share_info" tf:"optional"` + ShareInfo []ShareInfo `tfsdk:"share_info" tf:"optional"` } type ListingSetting struct { @@ -665,7 +665,7 @@ type ListingSummary struct { ExchangeIds []types.String `tfsdk:"exchange_ids" tf:"optional"` // if a git repo is being created, a listing will be initialized with this // field as opposed to a share - GitRepo *RepoInfo `tfsdk:"git_repo" tf:"optional"` + GitRepo []RepoInfo `tfsdk:"git_repo" tf:"optional"` ListingType types.String `tfsdk:"listingType" tf:""` @@ -673,15 +673,15 @@ type ListingSummary struct { ProviderId types.String `tfsdk:"provider_id" tf:"optional"` - ProviderRegion *RegionInfo `tfsdk:"provider_region" tf:"optional"` + ProviderRegion []RegionInfo `tfsdk:"provider_region" tf:"optional"` PublishedAt types.Int64 `tfsdk:"published_at" tf:"optional"` PublishedBy types.String `tfsdk:"published_by" tf:"optional"` - Setting *ListingSetting `tfsdk:"setting" tf:"optional"` + Setting []ListingSetting `tfsdk:"setting" tf:"optional"` - Share *ShareInfo `tfsdk:"share" tf:"optional"` + Share []ShareInfo `tfsdk:"share" tf:"optional"` // Enums Status types.String `tfsdk:"status" tf:"optional"` @@ -705,10 +705,10 @@ type ListingTag struct { type PersonalizationRequest struct { Comment types.String `tfsdk:"comment" tf:"optional"` - ConsumerRegion RegionInfo `tfsdk:"consumer_region" tf:""` + ConsumerRegion []RegionInfo `tfsdk:"consumer_region" tf:""` // contact info for the consumer requesting data or performing a listing // installation - ContactInfo *ContactInfo `tfsdk:"contact_info" tf:"optional"` + ContactInfo []ContactInfo `tfsdk:"contact_info" tf:"optional"` CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` @@ -728,7 +728,7 @@ type PersonalizationRequest struct { RecipientType types.String `tfsdk:"recipient_type" tf:"optional"` - Share *ShareInfo `tfsdk:"share" tf:"optional"` + Share []ShareInfo `tfsdk:"share" tf:"optional"` Status types.String `tfsdk:"status" tf:"optional"` @@ -870,27 +870,27 @@ type TokenInfo struct { } type UpdateExchangeFilterRequest struct { - Filter ExchangeFilter `tfsdk:"filter" tf:""` + Filter []ExchangeFilter `tfsdk:"filter" tf:""` Id types.String `tfsdk:"-"` } type UpdateExchangeFilterResponse struct { - Filter *ExchangeFilter `tfsdk:"filter" tf:"optional"` + Filter []ExchangeFilter `tfsdk:"filter" tf:"optional"` } type UpdateExchangeRequest struct { - Exchange Exchange `tfsdk:"exchange" tf:""` + Exchange []Exchange `tfsdk:"exchange" tf:""` Id types.String `tfsdk:"-"` } type UpdateExchangeResponse struct { - Exchange *Exchange `tfsdk:"exchange" tf:"optional"` + Exchange []Exchange `tfsdk:"exchange" tf:"optional"` } type UpdateInstallationRequest struct { - Installation InstallationDetail `tfsdk:"installation" tf:""` + Installation []InstallationDetail `tfsdk:"installation" tf:""` InstallationId types.String `tfsdk:"-"` @@ -900,17 +900,17 @@ type UpdateInstallationRequest struct { } type UpdateInstallationResponse struct { - Installation *InstallationDetail `tfsdk:"installation" tf:"optional"` + Installation []InstallationDetail `tfsdk:"installation" tf:"optional"` } type UpdateListingRequest struct { Id types.String `tfsdk:"-"` - Listing Listing `tfsdk:"listing" tf:""` + Listing []Listing `tfsdk:"listing" tf:""` } type UpdateListingResponse struct { - Listing *Listing `tfsdk:"listing" tf:"optional"` + Listing []Listing `tfsdk:"listing" tf:"optional"` } type UpdatePersonalizationRequestRequest struct { @@ -920,13 +920,13 @@ type UpdatePersonalizationRequestRequest struct { RequestId types.String `tfsdk:"-"` - Share *ShareInfo `tfsdk:"share" tf:"optional"` + Share []ShareInfo `tfsdk:"share" tf:"optional"` Status types.String `tfsdk:"status" tf:""` } type UpdatePersonalizationRequestResponse struct { - Request *PersonalizationRequest `tfsdk:"request" tf:"optional"` + Request []PersonalizationRequest `tfsdk:"request" tf:"optional"` } type UpdateProviderAnalyticsDashboardRequest struct { @@ -950,9 +950,9 @@ type UpdateProviderAnalyticsDashboardResponse struct { type UpdateProviderRequest struct { Id types.String `tfsdk:"-"` - Provider ProviderInfo `tfsdk:"provider" tf:""` + Provider []ProviderInfo `tfsdk:"provider" tf:""` } type UpdateProviderResponse struct { - Provider *ProviderInfo `tfsdk:"provider" tf:"optional"` + Provider []ProviderInfo `tfsdk:"provider" tf:"optional"` } diff --git a/internal/service/ml_tf/model.go b/internal/service/ml_tf/model.go index e8735e8d51..71e0ecbc1e 100755 --- a/internal/service/ml_tf/model.go +++ b/internal/service/ml_tf/model.go @@ -94,7 +94,7 @@ type ApproveTransitionRequest struct { type ApproveTransitionRequestResponse struct { // Activity recorded for the action. - Activity *Activity `tfsdk:"activity" tf:"optional"` + Activity []Activity `tfsdk:"activity" tf:"optional"` } // Comment details. @@ -124,7 +124,7 @@ type CreateComment struct { type CreateCommentResponse struct { // Comment details. - Comment *CommentObject `tfsdk:"comment" tf:"optional"` + Comment []CommentObject `tfsdk:"comment" tf:"optional"` } type CreateExperiment struct { @@ -156,7 +156,7 @@ type CreateModelRequest struct { } type CreateModelResponse struct { - RegisteredModel *Model `tfsdk:"registered_model" tf:"optional"` + RegisteredModel []Model `tfsdk:"registered_model" tf:"optional"` } type CreateModelVersionRequest struct { @@ -178,7 +178,7 @@ type CreateModelVersionRequest struct { type CreateModelVersionResponse struct { // Return new version number generated for this model in registry. - ModelVersion *ModelVersion `tfsdk:"model_version" tf:"optional"` + ModelVersion []ModelVersion `tfsdk:"model_version" tf:"optional"` } type CreateRegistryWebhook struct { @@ -219,9 +219,9 @@ type CreateRegistryWebhook struct { // version be archived. Events []types.String `tfsdk:"events" tf:""` - HttpUrlSpec *HttpUrlSpec `tfsdk:"http_url_spec" tf:"optional"` + HttpUrlSpec []HttpUrlSpec `tfsdk:"http_url_spec" tf:"optional"` - JobSpec *JobSpec `tfsdk:"job_spec" tf:"optional"` + JobSpec []JobSpec `tfsdk:"job_spec" tf:"optional"` // Name of the model whose events would trigger this webhook. ModelName types.String `tfsdk:"model_name" tf:"optional"` // Enable or disable triggering the webhook, or put the webhook into test @@ -250,7 +250,7 @@ type CreateRun struct { type CreateRunResponse struct { // The newly created run. - Run *Run `tfsdk:"run" tf:"optional"` + Run []Run `tfsdk:"run" tf:"optional"` } type CreateTransitionRequest struct { @@ -274,11 +274,11 @@ type CreateTransitionRequest struct { type CreateTransitionRequestResponse struct { // Transition request details. - Request *TransitionRequest `tfsdk:"request" tf:"optional"` + Request []TransitionRequest `tfsdk:"request" tf:"optional"` } type CreateWebhookResponse struct { - Webhook *RegistryWebhook `tfsdk:"webhook" tf:"optional"` + Webhook []RegistryWebhook `tfsdk:"webhook" tf:"optional"` } type Dataset struct { @@ -306,7 +306,7 @@ type Dataset struct { type DatasetInput struct { // The dataset being used as a Run input. - Dataset *Dataset `tfsdk:"dataset" tf:"optional"` + Dataset []Dataset `tfsdk:"dataset" tf:"optional"` // A list of tags for the dataset input, e.g. a “context” tag with value // “training” Tags []InputTag `tfsdk:"tags" tf:"optional"` @@ -562,7 +562,7 @@ type GetExperimentRequest struct { type GetExperimentResponse struct { // Experiment details. - Experiment *Experiment `tfsdk:"experiment" tf:"optional"` + Experiment []Experiment `tfsdk:"experiment" tf:"optional"` } // Get history of a given metric within a run @@ -611,7 +611,7 @@ type GetModelRequest struct { } type GetModelResponse struct { - RegisteredModelDatabricks *ModelDatabricks `tfsdk:"registered_model_databricks" tf:"optional"` + RegisteredModelDatabricks []ModelDatabricks `tfsdk:"registered_model_databricks" tf:"optional"` } // Get a model version URI @@ -636,7 +636,7 @@ type GetModelVersionRequest struct { } type GetModelVersionResponse struct { - ModelVersion *ModelVersion `tfsdk:"model_version" tf:"optional"` + ModelVersion []ModelVersion `tfsdk:"model_version" tf:"optional"` } // Get registered model permission levels @@ -668,7 +668,7 @@ type GetRunRequest struct { type GetRunResponse struct { // Run metadata (name, start time, etc) and data (metrics, params, and // tags). - Run *Run `tfsdk:"run" tf:"optional"` + Run []Run `tfsdk:"run" tf:"optional"` } type HttpUrlSpec struct { @@ -1152,11 +1152,11 @@ type RegistryWebhook struct { // version be archived. Events []types.String `tfsdk:"events" tf:"optional"` - HttpUrlSpec *HttpUrlSpecWithoutSecret `tfsdk:"http_url_spec" tf:"optional"` + HttpUrlSpec []HttpUrlSpecWithoutSecret `tfsdk:"http_url_spec" tf:"optional"` // Webhook ID Id types.String `tfsdk:"id" tf:"optional"` - JobSpec *JobSpecWithoutSecret `tfsdk:"job_spec" tf:"optional"` + JobSpec []JobSpecWithoutSecret `tfsdk:"job_spec" tf:"optional"` // Time of the object at last update, as a Unix timestamp in milliseconds. LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp" tf:"optional"` // Name of the model whose events would trigger this webhook. @@ -1193,7 +1193,7 @@ type RejectTransitionRequest struct { type RejectTransitionRequestResponse struct { // Activity recorded for the action. - Activity *Activity `tfsdk:"activity" tf:"optional"` + Activity []Activity `tfsdk:"activity" tf:"optional"` } type RenameModelRequest struct { @@ -1204,7 +1204,7 @@ type RenameModelRequest struct { } type RenameModelResponse struct { - RegisteredModel *Model `tfsdk:"registered_model" tf:"optional"` + RegisteredModel []Model `tfsdk:"registered_model" tf:"optional"` } type RestoreExperiment struct { @@ -1242,11 +1242,11 @@ type RestoreRunsResponse struct { type Run struct { // Run data. - Data *RunData `tfsdk:"data" tf:"optional"` + Data []RunData `tfsdk:"data" tf:"optional"` // Run metadata. - Info *RunInfo `tfsdk:"info" tf:"optional"` + Info []RunInfo `tfsdk:"info" tf:"optional"` // Run inputs. - Inputs *RunInputs `tfsdk:"inputs" tf:"optional"` + Inputs []RunInputs `tfsdk:"inputs" tf:"optional"` } type RunData struct { @@ -1497,7 +1497,7 @@ type TestRegistryWebhookRequest struct { type TestRegistryWebhookResponse struct { // Test webhook response object. - Webhook *TestRegistryWebhook `tfsdk:"webhook" tf:"optional"` + Webhook []TestRegistryWebhook `tfsdk:"webhook" tf:"optional"` } type TransitionModelVersionStageDatabricks struct { @@ -1546,7 +1546,7 @@ type TransitionRequest struct { } type TransitionStageResponse struct { - ModelVersion *ModelVersionDatabricks `tfsdk:"model_version" tf:"optional"` + ModelVersion []ModelVersionDatabricks `tfsdk:"model_version" tf:"optional"` } type UpdateComment struct { @@ -1558,7 +1558,7 @@ type UpdateComment struct { type UpdateCommentResponse struct { // Comment details. - Comment *CommentObject `tfsdk:"comment" tf:"optional"` + Comment []CommentObject `tfsdk:"comment" tf:"optional"` } type UpdateExperiment struct { @@ -1632,11 +1632,11 @@ type UpdateRegistryWebhook struct { // version be archived. Events []types.String `tfsdk:"events" tf:"optional"` - HttpUrlSpec *HttpUrlSpec `tfsdk:"http_url_spec" tf:"optional"` + HttpUrlSpec []HttpUrlSpec `tfsdk:"http_url_spec" tf:"optional"` // Webhook ID Id types.String `tfsdk:"id" tf:""` - JobSpec *JobSpec `tfsdk:"job_spec" tf:"optional"` + JobSpec []JobSpec `tfsdk:"job_spec" tf:"optional"` // Enable or disable triggering the webhook, or put the webhook into test // mode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an // associated event happens. @@ -1662,7 +1662,7 @@ type UpdateRun struct { type UpdateRunResponse struct { // Updated metadata of the run. - RunInfo *RunInfo `tfsdk:"run_info" tf:"optional"` + RunInfo []RunInfo `tfsdk:"run_info" tf:"optional"` } type UpdateWebhookResponse struct { diff --git a/internal/service/oauth2_tf/model.go b/internal/service/oauth2_tf/model.go index 611a5c1ef1..3efb4b2b43 100755 --- a/internal/service/oauth2_tf/model.go +++ b/internal/service/oauth2_tf/model.go @@ -26,7 +26,7 @@ type CreateCustomAppIntegration struct { // offline_access, openid, profile, email. Scopes []types.String `tfsdk:"scopes" tf:"optional"` // Token access policy - TokenAccessPolicy *TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` } type CreateCustomAppIntegrationOutput struct { @@ -44,7 +44,7 @@ type CreatePublishedAppIntegration struct { // tableau-deskop AppId types.String `tfsdk:"app_id" tf:"optional"` // Token access policy - TokenAccessPolicy *TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` } type CreatePublishedAppIntegrationOutput struct { @@ -128,7 +128,7 @@ type GetCustomAppIntegrationOutput struct { Scopes []types.String `tfsdk:"scopes" tf:"optional"` // Token access policy - TokenAccessPolicy *TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` } // Get OAuth Custom App Integration @@ -155,7 +155,7 @@ type GetPublishedAppIntegrationOutput struct { // Display name of the published OAuth app Name types.String `tfsdk:"name" tf:"optional"` // Token access policy - TokenAccessPolicy *TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` } // Get OAuth Published App Integration @@ -258,7 +258,7 @@ type UpdateCustomAppIntegration struct { // integration RedirectUrls []types.String `tfsdk:"redirect_urls" tf:"optional"` // Token access policy to be updated in the custom OAuth app integration - TokenAccessPolicy *TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` } type UpdateCustomAppIntegrationOutput struct { @@ -267,7 +267,7 @@ type UpdateCustomAppIntegrationOutput struct { type UpdatePublishedAppIntegration struct { IntegrationId types.String `tfsdk:"-"` // Token access policy to be updated in the published OAuth app integration - TokenAccessPolicy *TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` } type UpdatePublishedAppIntegrationOutput struct { diff --git a/internal/service/pipelines_tf/model.go b/internal/service/pipelines_tf/model.go index 1740d0ee0b..68ee17d592 100755 --- a/internal/service/pipelines_tf/model.go +++ b/internal/service/pipelines_tf/model.go @@ -36,7 +36,7 @@ type CreatePipeline struct { // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous types.Bool `tfsdk:"continuous" tf:"optional"` // Deployment type of this pipeline. - Deployment *PipelineDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional"` // Whether the pipeline is in Development mode. Defaults to false. Development types.Bool `tfsdk:"development" tf:"optional"` @@ -44,14 +44,14 @@ type CreatePipeline struct { // Pipeline product edition. Edition types.String `tfsdk:"edition" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. - Filters *Filters `tfsdk:"filters" tf:"optional"` + Filters []Filters `tfsdk:"filters" tf:"optional"` // The definition of a gateway pipeline to support CDC. - GatewayDefinition *IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional"` + GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` // The configuration for a managed ingestion pipeline. These settings cannot // be used with the 'libraries', 'target' or 'catalog' settings. - IngestionDefinition *IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional"` + IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `tfsdk:"libraries" tf:"optional"` // Friendly identifier for this pipeline. @@ -73,12 +73,12 @@ type CreatePipeline struct { // To publish to Unity Catalog, also specify `catalog`. Target types.String `tfsdk:"target" tf:"optional"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. - Trigger *PipelineTrigger `tfsdk:"trigger" tf:"optional"` + Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional"` } type CreatePipelineResponse struct { // Only returned when dry_run is true. - EffectiveSettings *PipelineSpec `tfsdk:"effective_settings" tf:"optional"` + EffectiveSettings []PipelineSpec `tfsdk:"effective_settings" tf:"optional"` // The unique identifier for the newly created pipeline. Only returned when // dry_run is false. PipelineId types.String `tfsdk:"pipeline_id" tf:"optional"` @@ -126,7 +126,7 @@ type EditPipeline struct { // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous types.Bool `tfsdk:"continuous" tf:"optional"` // Deployment type of this pipeline. - Deployment *PipelineDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional"` // Whether the pipeline is in Development mode. Defaults to false. Development types.Bool `tfsdk:"development" tf:"optional"` // Pipeline product edition. @@ -136,14 +136,14 @@ type EditPipeline struct { // will fail with a conflict. ExpectedLastModified types.Int64 `tfsdk:"expected_last_modified" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. - Filters *Filters `tfsdk:"filters" tf:"optional"` + Filters []Filters `tfsdk:"filters" tf:"optional"` // The definition of a gateway pipeline to support CDC. - GatewayDefinition *IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional"` + GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` // The configuration for a managed ingestion pipeline. These settings cannot // be used with the 'libraries', 'target' or 'catalog' settings. - IngestionDefinition *IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional"` + IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `tfsdk:"libraries" tf:"optional"` // Friendly identifier for this pipeline. @@ -167,7 +167,7 @@ type EditPipeline struct { // To publish to Unity Catalog, also specify `catalog`. Target types.String `tfsdk:"target" tf:"optional"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. - Trigger *PipelineTrigger `tfsdk:"trigger" tf:"optional"` + Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional"` } type EditPipelineResponse struct { @@ -238,7 +238,7 @@ type GetPipelineResponse struct { RunAsUserName types.String `tfsdk:"run_as_user_name" tf:"optional"` // The pipeline specification. This field is not returned when called by // `ListPipelines`. - Spec *PipelineSpec `tfsdk:"spec" tf:"optional"` + Spec []PipelineSpec `tfsdk:"spec" tf:"optional"` // The pipeline state. State types.String `tfsdk:"state" tf:"optional"` } @@ -253,14 +253,14 @@ type GetUpdateRequest struct { type GetUpdateResponse struct { // The current update info. - Update *UpdateInfo `tfsdk:"update" tf:"optional"` + Update []UpdateInfo `tfsdk:"update" tf:"optional"` } type IngestionConfig struct { // Select tables from a specific source schema. - Schema *SchemaSpec `tfsdk:"schema" tf:"optional"` + Schema []SchemaSpec `tfsdk:"schema" tf:"optional"` // Select tables from a specific source table. - Table *TableSpec `tfsdk:"table" tf:"optional"` + Table []TableSpec `tfsdk:"table" tf:"optional"` } type IngestionGatewayPipelineDefinition struct { @@ -294,7 +294,7 @@ type IngestionPipelineDefinition struct { Objects []IngestionConfig `tfsdk:"objects" tf:"optional"` // Configuration settings to control the ingestion of tables. These settings // are applied to all tables in the pipeline. - TableConfiguration *TableSpecificConfig `tfsdk:"table_configuration" tf:"optional"` + TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional"` } // List pipeline events @@ -478,20 +478,20 @@ type PipelineCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale *PipelineClusterAutoscale `tfsdk:"autoscale" tf:"optional"` + Autoscale []PipelineClusterAutoscale `tfsdk:"autoscale" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes *compute.AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes compute.AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes *compute.AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes compute.AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` // The configuration for delivering spark logs to a long-term storage // destination. Only dbfs destinations are supported. Only one destination // can be specified for one cluster. If the conf is given, the logs will be // delivered to the destination every `5 mins`. The destination of driver // logs is `$destination/$clusterId/driver`, while the destination of // executor logs is `$destination/$clusterId/executor`. - ClusterLogConf *compute.ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf compute.ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` // Additional tags for cluster resources. Databricks will tag all cluster // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: @@ -513,12 +513,12 @@ type PipelineCluster struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes *compute.GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes compute.GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent // to `//init_scripts`. - InitScripts []compute.InitScriptInfo `tfsdk:"init_scripts" tf:"optional"` + InitScripts compute.InitScriptInfo `tfsdk:"init_scripts" tf:"optional"` // The optional ID of the instance pool to which the cluster belongs. InstancePoolId types.String `tfsdk:"instance_pool_id" tf:"optional"` // A label for the cluster specification, either `default` to configure the @@ -593,7 +593,7 @@ type PipelineDeployment struct { type PipelineEvent struct { // Information about an error captured by the event. - Error *ErrorDetail `tfsdk:"error" tf:"optional"` + Error []ErrorDetail `tfsdk:"error" tf:"optional"` // The event type. Should always correspond to the details EventType types.String `tfsdk:"event_type" tf:"optional"` // A time-based, globally unique id. @@ -605,9 +605,9 @@ type PipelineEvent struct { // The display message associated with the event. Message types.String `tfsdk:"message" tf:"optional"` // Describes where the event originates from. - Origin *Origin `tfsdk:"origin" tf:"optional"` + Origin []Origin `tfsdk:"origin" tf:"optional"` // A sequencing object to identify and order events. - Sequence *Sequencing `tfsdk:"sequence" tf:"optional"` + Sequence []Sequencing `tfsdk:"sequence" tf:"optional"` // The time of the event. Timestamp types.String `tfsdk:"timestamp" tf:"optional"` } @@ -615,14 +615,14 @@ type PipelineEvent struct { type PipelineLibrary struct { // The path to a file that defines a pipeline and is stored in the // Databricks Repos. - File *FileLibrary `tfsdk:"file" tf:"optional"` + File []FileLibrary `tfsdk:"file" tf:"optional"` // URI of the jar to be installed. Currently only DBFS is supported. Jar types.String `tfsdk:"jar" tf:"optional"` // Specification of a maven library to be installed. - Maven *compute.MavenLibrary `tfsdk:"maven" tf:"optional"` + Maven compute.MavenLibrary `tfsdk:"maven" tf:"optional"` // The path to a notebook that defines a pipeline and is stored in the // Databricks workspace. - Notebook *NotebookLibrary `tfsdk:"notebook" tf:"optional"` + Notebook []NotebookLibrary `tfsdk:"notebook" tf:"optional"` // URI of the whl to be installed. Whl types.String `tfsdk:"whl" tf:"optional"` } @@ -673,20 +673,20 @@ type PipelineSpec struct { // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous types.Bool `tfsdk:"continuous" tf:"optional"` // Deployment type of this pipeline. - Deployment *PipelineDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional"` // Whether the pipeline is in Development mode. Defaults to false. Development types.Bool `tfsdk:"development" tf:"optional"` // Pipeline product edition. Edition types.String `tfsdk:"edition" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. - Filters *Filters `tfsdk:"filters" tf:"optional"` + Filters []Filters `tfsdk:"filters" tf:"optional"` // The definition of a gateway pipeline to support CDC. - GatewayDefinition *IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional"` + GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` // The configuration for a managed ingestion pipeline. These settings cannot // be used with the 'libraries', 'target' or 'catalog' settings. - IngestionDefinition *IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional"` + IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `tfsdk:"libraries" tf:"optional"` // Friendly identifier for this pipeline. @@ -708,7 +708,7 @@ type PipelineSpec struct { // To publish to Unity Catalog, also specify `catalog`. Target types.String `tfsdk:"target" tf:"optional"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. - Trigger *PipelineTrigger `tfsdk:"trigger" tf:"optional"` + Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional"` } type PipelineStateInfo struct { @@ -733,9 +733,9 @@ type PipelineStateInfo struct { } type PipelineTrigger struct { - Cron *CronTrigger `tfsdk:"cron" tf:"optional"` + Cron []CronTrigger `tfsdk:"cron" tf:"optional"` - Manual *ManualTrigger `tfsdk:"manual" tf:"optional"` + Manual []ManualTrigger `tfsdk:"manual" tf:"optional"` } type SchemaSpec struct { @@ -753,14 +753,14 @@ type SchemaSpec struct { // Configuration settings to control the ingestion of tables. These settings // are applied to all tables in this schema and override the // table_configuration defined in the IngestionPipelineDefinition object. - TableConfiguration *TableSpecificConfig `tfsdk:"table_configuration" tf:"optional"` + TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional"` } type Sequencing struct { // A sequence number, unique and increasing within the control plane. ControlPlaneSeqNo types.Int64 `tfsdk:"control_plane_seq_no" tf:"optional"` // the ID assigned by the data plane. - DataPlaneId *DataPlaneId `tfsdk:"data_plane_id" tf:"optional"` + DataPlaneId []DataPlaneId `tfsdk:"data_plane_id" tf:"optional"` } type SerializedException struct { @@ -834,7 +834,7 @@ type TableSpec struct { // Configuration settings to control the ingestion of tables. These settings // override the table_configuration defined in the // IngestionPipelineDefinition object and the SchemaSpec. - TableConfiguration *TableSpecificConfig `tfsdk:"table_configuration" tf:"optional"` + TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional"` } type TableSpecificConfig struct { @@ -854,7 +854,7 @@ type UpdateInfo struct { ClusterId types.String `tfsdk:"cluster_id" tf:"optional"` // The pipeline configuration with system defaults applied where unspecified // by the user. Not returned by ListUpdates. - Config *PipelineSpec `tfsdk:"config" tf:"optional"` + Config []PipelineSpec `tfsdk:"config" tf:"optional"` // The time when this update was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // If true, this update will reset all tables before running. diff --git a/internal/service/provisioning_tf/model.go b/internal/service/provisioning_tf/model.go index 691c118d6d..8880df8f4c 100755 --- a/internal/service/provisioning_tf/model.go +++ b/internal/service/provisioning_tf/model.go @@ -15,7 +15,7 @@ import ( ) type AwsCredentials struct { - StsRole *StsRole `tfsdk:"sts_role" tf:"optional"` + StsRole []StsRole `tfsdk:"sts_role" tf:"optional"` } type AwsKeyInfo struct { @@ -42,7 +42,7 @@ type AzureWorkspaceInfo struct { // The general workspace configurations that are specific to cloud providers. type CloudResourceContainer struct { // The general workspace configurations that are specific to Google Cloud. - Gcp *CustomerFacingGcpCloudResourceContainer `tfsdk:"gcp" tf:"optional"` + Gcp []CustomerFacingGcpCloudResourceContainer `tfsdk:"gcp" tf:"optional"` } type CreateAwsKeyInfo struct { @@ -59,11 +59,11 @@ type CreateAwsKeyInfo struct { } type CreateCredentialAwsCredentials struct { - StsRole *CreateCredentialStsRole `tfsdk:"sts_role" tf:"optional"` + StsRole []CreateCredentialStsRole `tfsdk:"sts_role" tf:"optional"` } type CreateCredentialRequest struct { - AwsCredentials CreateCredentialAwsCredentials `tfsdk:"aws_credentials" tf:""` + AwsCredentials []CreateCredentialAwsCredentials `tfsdk:"aws_credentials" tf:""` // The human-readable name of the credential configuration object. CredentialsName types.String `tfsdk:"credentials_name" tf:""` } @@ -74,9 +74,9 @@ type CreateCredentialStsRole struct { } type CreateCustomerManagedKeyRequest struct { - AwsKeyInfo *CreateAwsKeyInfo `tfsdk:"aws_key_info" tf:"optional"` + AwsKeyInfo []CreateAwsKeyInfo `tfsdk:"aws_key_info" tf:"optional"` - GcpKeyInfo *CreateGcpKeyInfo `tfsdk:"gcp_key_info" tf:"optional"` + GcpKeyInfo []CreateGcpKeyInfo `tfsdk:"gcp_key_info" tf:"optional"` // The cases that the key can be used for. UseCases []types.String `tfsdk:"use_cases" tf:""` } @@ -89,7 +89,7 @@ type CreateGcpKeyInfo struct { type CreateNetworkRequest struct { // The Google Cloud specific information for this network (for example, the // VPC ID, subnet ID, and secondary IP ranges). - GcpNetworkInfo *GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional"` + GcpNetworkInfo []GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional"` // The human-readable name of the network configuration. NetworkName types.String `tfsdk:"network_name" tf:""` // IDs of one to five security groups associated with this network. Security @@ -102,7 +102,7 @@ type CreateNetworkRequest struct { // communication from this VPC over [AWS PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ - VpcEndpoints *NetworkVpcEndpoints `tfsdk:"vpc_endpoints" tf:"optional"` + VpcEndpoints []NetworkVpcEndpoints `tfsdk:"vpc_endpoints" tf:"optional"` // The ID of the VPC associated with this network. VPC IDs can be used in // multiple network configurations. VpcId types.String `tfsdk:"vpc_id" tf:"optional"` @@ -110,7 +110,7 @@ type CreateNetworkRequest struct { type CreateStorageConfigurationRequest struct { // Root S3 bucket information. - RootBucketInfo RootBucketInfo `tfsdk:"root_bucket_info" tf:""` + RootBucketInfo []RootBucketInfo `tfsdk:"root_bucket_info" tf:""` // The human-readable name of the storage configuration. StorageConfigurationName types.String `tfsdk:"storage_configuration_name" tf:""` } @@ -120,7 +120,7 @@ type CreateVpcEndpointRequest struct { AwsVpcEndpointId types.String `tfsdk:"aws_vpc_endpoint_id" tf:"optional"` // The Google Cloud specific information for this Private Service Connect // endpoint. - GcpVpcEndpointInfo *GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info" tf:"optional"` + GcpVpcEndpointInfo []GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info" tf:"optional"` // The AWS region in which this VPC endpoint object exists. Region types.String `tfsdk:"region" tf:"optional"` // The human-readable name of the storage configuration. @@ -135,7 +135,7 @@ type CreateWorkspaceRequest struct { Cloud types.String `tfsdk:"cloud" tf:"optional"` // The general workspace configurations that are specific to cloud // providers. - CloudResourceContainer *CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional"` + CloudResourceContainer []CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional"` // ID of the workspace's credential configuration object. CredentialsId types.String `tfsdk:"credentials_id" tf:"optional"` // The custom tags key-value pairing that is attached to this workspace. The @@ -196,9 +196,9 @@ type CreateWorkspaceRequest struct { // for a new workspace]. // // [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html - GcpManagedNetworkConfig *GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional"` + GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional"` // The configurations for the GKE cluster of a Databricks workspace. - GkeConfig *GkeConfig `tfsdk:"gke_config" tf:"optional"` + GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional"` // The Google Cloud region of the workspace data plane in your Google // account. For example, `us-east4`. Location types.String `tfsdk:"location" tf:"optional"` @@ -242,7 +242,7 @@ type Credential struct { // The Databricks account ID that hosts the credential. AccountId types.String `tfsdk:"account_id" tf:"optional"` - AwsCredentials *AwsCredentials `tfsdk:"aws_credentials" tf:"optional"` + AwsCredentials []AwsCredentials `tfsdk:"aws_credentials" tf:"optional"` // Time in epoch milliseconds when the credential was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // Databricks credential configuration ID. @@ -262,13 +262,13 @@ type CustomerManagedKey struct { // The Databricks account ID that holds the customer-managed key. AccountId types.String `tfsdk:"account_id" tf:"optional"` - AwsKeyInfo *AwsKeyInfo `tfsdk:"aws_key_info" tf:"optional"` + AwsKeyInfo []AwsKeyInfo `tfsdk:"aws_key_info" tf:"optional"` // Time in epoch milliseconds when the customer key was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // ID of the encryption key configuration object. CustomerManagedKeyId types.String `tfsdk:"customer_managed_key_id" tf:"optional"` - GcpKeyInfo *GcpKeyInfo `tfsdk:"gcp_key_info" tf:"optional"` + GcpKeyInfo []GcpKeyInfo `tfsdk:"gcp_key_info" tf:"optional"` // The cases that the key can be used for. UseCases []types.String `tfsdk:"use_cases" tf:"optional"` } @@ -466,7 +466,7 @@ type Network struct { ErrorMessages []NetworkHealth `tfsdk:"error_messages" tf:"optional"` // The Google Cloud specific information for this network (for example, the // VPC ID, subnet ID, and secondary IP ranges). - GcpNetworkInfo *GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional"` + GcpNetworkInfo []GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional"` // The Databricks network configuration ID. NetworkId types.String `tfsdk:"network_id" tf:"optional"` // The human-readable name of the network configuration. @@ -479,7 +479,7 @@ type Network struct { // communication from this VPC over [AWS PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ - VpcEndpoints *NetworkVpcEndpoints `tfsdk:"vpc_endpoints" tf:"optional"` + VpcEndpoints []NetworkVpcEndpoints `tfsdk:"vpc_endpoints" tf:"optional"` // The ID of the VPC associated with this network configuration. VPC IDs can // be used in multiple networks. VpcId types.String `tfsdk:"vpc_id" tf:"optional"` @@ -564,7 +564,7 @@ type StorageConfiguration struct { // Time in epoch milliseconds when the storage configuration was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // Root S3 bucket information. - RootBucketInfo *RootBucketInfo `tfsdk:"root_bucket_info" tf:"optional"` + RootBucketInfo []RootBucketInfo `tfsdk:"root_bucket_info" tf:"optional"` // Databricks storage configuration ID. StorageConfigurationId types.String `tfsdk:"storage_configuration_id" tf:"optional"` // The human-readable name of the storage configuration. @@ -668,7 +668,7 @@ type VpcEndpoint struct { AwsVpcEndpointId types.String `tfsdk:"aws_vpc_endpoint_id" tf:"optional"` // The Google Cloud specific information for this Private Service Connect // endpoint. - GcpVpcEndpointInfo *GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info" tf:"optional"` + GcpVpcEndpointInfo []GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info" tf:"optional"` // The AWS region in which this VPC endpoint object exists. Region types.String `tfsdk:"region" tf:"optional"` // The current state (such as `available` or `rejected`) of the VPC @@ -696,12 +696,12 @@ type Workspace struct { // The AWS region of the workspace data plane (for example, `us-west-2`). AwsRegion types.String `tfsdk:"aws_region" tf:"optional"` - AzureWorkspaceInfo *AzureWorkspaceInfo `tfsdk:"azure_workspace_info" tf:"optional"` + AzureWorkspaceInfo []AzureWorkspaceInfo `tfsdk:"azure_workspace_info" tf:"optional"` // The cloud name. This field always has the value `gcp`. Cloud types.String `tfsdk:"cloud" tf:"optional"` // The general workspace configurations that are specific to cloud // providers. - CloudResourceContainer *CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional"` + CloudResourceContainer []CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional"` // Time in epoch milliseconds when the workspace was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // ID of the workspace's credential configuration object. @@ -741,9 +741,9 @@ type Workspace struct { // for a new workspace]. // // [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html - GcpManagedNetworkConfig *GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional"` + GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional"` // The configurations for the GKE cluster of a Databricks workspace. - GkeConfig *GkeConfig `tfsdk:"gke_config" tf:"optional"` + GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional"` // The Google Cloud region of the workspace data plane in your Google // account (for example, `us-east4`). Location types.String `tfsdk:"location" tf:"optional"` diff --git a/internal/service/serving_tf/model.go b/internal/service/serving_tf/model.go index b22dc911a7..e34dcc1037 100755 --- a/internal/service/serving_tf/model.go +++ b/internal/service/serving_tf/model.go @@ -33,17 +33,17 @@ type Ai21LabsConfig struct { type AiGatewayConfig struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. - Guardrails *AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` // Configuration for payload logging using inference tables. Use these // tables to monitor and audit data being sent to and received from model // APIs and to improve model quality. - InferenceTableConfig *AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` // Configuration for rate limits which can be set to limit endpoint traffic. RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` // Configuration to enable usage tracking using system tables. These tables // allow you to monitor operational usage on endpoints and their associated // costs. - UsageTrackingConfig *AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` + UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` } type AiGatewayGuardrailParameters struct { @@ -51,7 +51,7 @@ type AiGatewayGuardrailParameters struct { // decide if the keyword exists in the request or response content. InvalidKeywords []types.String `tfsdk:"invalid_keywords" tf:"optional"` // Configuration for guardrail PII filter. - Pii *AiGatewayGuardrailPiiBehavior `tfsdk:"pii" tf:"optional"` + Pii []AiGatewayGuardrailPiiBehavior `tfsdk:"pii" tf:"optional"` // Indicates whether the safety filter is enabled. Safety types.Bool `tfsdk:"safety" tf:"optional"` // The list of allowed topics. Given a chat request, this guardrail flags @@ -71,9 +71,9 @@ type AiGatewayGuardrailPiiBehavior struct { type AiGatewayGuardrails struct { // Configuration for input guardrail filters. - Input *AiGatewayGuardrailParameters `tfsdk:"input" tf:"optional"` + Input []AiGatewayGuardrailParameters `tfsdk:"input" tf:"optional"` // Configuration for output guardrail filters. - Output *AiGatewayGuardrailParameters `tfsdk:"output" tf:"optional"` + Output []AiGatewayGuardrailParameters `tfsdk:"output" tf:"optional"` } type AiGatewayInferenceTableConfig struct { @@ -178,13 +178,13 @@ type AutoCaptureConfigOutput struct { // The name of the schema in Unity Catalog. SchemaName types.String `tfsdk:"schema_name" tf:"optional"` - State *AutoCaptureState `tfsdk:"state" tf:"optional"` + State []AutoCaptureState `tfsdk:"state" tf:"optional"` // The prefix of the table in Unity Catalog. TableNamePrefix types.String `tfsdk:"table_name_prefix" tf:"optional"` } type AutoCaptureState struct { - PayloadTable *PayloadTable `tfsdk:"payload_table" tf:"optional"` + PayloadTable []PayloadTable `tfsdk:"payload_table" tf:"optional"` } // Get build logs for a served model @@ -228,9 +228,9 @@ type CohereConfig struct { type CreateServingEndpoint struct { // The AI Gateway configuration for the serving endpoint. NOTE: only // external model endpoints are supported as of now. - AiGateway *AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` + AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` // The core config of the serving endpoint. - Config EndpointCoreConfigInput `tfsdk:"config" tf:""` + Config []EndpointCoreConfigInput `tfsdk:"config" tf:""` // The name of the serving endpoint. This field is required and must be // unique across a Databricks workspace. An endpoint name can consist of // alphanumeric characters, dashes, and underscores. @@ -293,7 +293,7 @@ type EmbeddingsV1ResponseEmbeddingElement struct { type EndpointCoreConfigInput struct { // Configuration for Inference Tables which automatically logs requests and // responses to Unity Catalog. - AutoCaptureConfig *AutoCaptureConfigInput `tfsdk:"auto_capture_config" tf:"optional"` + AutoCaptureConfig []AutoCaptureConfigInput `tfsdk:"auto_capture_config" tf:"optional"` // The name of the serving endpoint to update. This field is required. Name types.String `tfsdk:"-"` // A list of served entities for the endpoint to serve. A serving endpoint @@ -304,13 +304,13 @@ type EndpointCoreConfigInput struct { ServedModels []ServedModelInput `tfsdk:"served_models" tf:"optional"` // The traffic config defining how invocations to the serving endpoint // should be routed. - TrafficConfig *TrafficConfig `tfsdk:"traffic_config" tf:"optional"` + TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional"` } type EndpointCoreConfigOutput struct { // Configuration for Inference Tables which automatically logs requests and // responses to Unity Catalog. - AutoCaptureConfig *AutoCaptureConfigOutput `tfsdk:"auto_capture_config" tf:"optional"` + AutoCaptureConfig []AutoCaptureConfigOutput `tfsdk:"auto_capture_config" tf:"optional"` // The config version that the serving endpoint is currently serving. ConfigVersion types.Int64 `tfsdk:"config_version" tf:"optional"` // The list of served entities under the serving endpoint config. @@ -319,7 +319,7 @@ type EndpointCoreConfigOutput struct { // the serving endpoint config. ServedModels []ServedModelOutput `tfsdk:"served_models" tf:"optional"` // The traffic configuration associated with the serving endpoint config. - TrafficConfig *TrafficConfig `tfsdk:"traffic_config" tf:"optional"` + TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional"` } type EndpointCoreConfigSummary struct { @@ -333,7 +333,7 @@ type EndpointCoreConfigSummary struct { type EndpointPendingConfig struct { // Configuration for Inference Tables which automatically logs requests and // responses to Unity Catalog. - AutoCaptureConfig *AutoCaptureConfigOutput `tfsdk:"auto_capture_config" tf:"optional"` + AutoCaptureConfig []AutoCaptureConfigOutput `tfsdk:"auto_capture_config" tf:"optional"` // The config version that the serving endpoint is currently serving. ConfigVersion types.Int64 `tfsdk:"config_version" tf:"optional"` // The list of served entities belonging to the last issued update to the @@ -346,7 +346,7 @@ type EndpointPendingConfig struct { StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // The traffic config defining how invocations to the serving endpoint // should be routed. - TrafficConfig *TrafficConfig `tfsdk:"traffic_config" tf:"optional"` + TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional"` } type EndpointState struct { @@ -383,25 +383,25 @@ type ExportMetricsResponse struct { type ExternalModel struct { // AI21Labs Config. Only required if the provider is 'ai21labs'. - Ai21labsConfig *Ai21LabsConfig `tfsdk:"ai21labs_config" tf:"optional"` + Ai21labsConfig []Ai21LabsConfig `tfsdk:"ai21labs_config" tf:"optional"` // Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'. - AmazonBedrockConfig *AmazonBedrockConfig `tfsdk:"amazon_bedrock_config" tf:"optional"` + AmazonBedrockConfig []AmazonBedrockConfig `tfsdk:"amazon_bedrock_config" tf:"optional"` // Anthropic Config. Only required if the provider is 'anthropic'. - AnthropicConfig *AnthropicConfig `tfsdk:"anthropic_config" tf:"optional"` + AnthropicConfig []AnthropicConfig `tfsdk:"anthropic_config" tf:"optional"` // Cohere Config. Only required if the provider is 'cohere'. - CohereConfig *CohereConfig `tfsdk:"cohere_config" tf:"optional"` + CohereConfig []CohereConfig `tfsdk:"cohere_config" tf:"optional"` // Databricks Model Serving Config. Only required if the provider is // 'databricks-model-serving'. - DatabricksModelServingConfig *DatabricksModelServingConfig `tfsdk:"databricks_model_serving_config" tf:"optional"` + DatabricksModelServingConfig []DatabricksModelServingConfig `tfsdk:"databricks_model_serving_config" tf:"optional"` // Google Cloud Vertex AI Config. Only required if the provider is // 'google-cloud-vertex-ai'. - GoogleCloudVertexAiConfig *GoogleCloudVertexAiConfig `tfsdk:"google_cloud_vertex_ai_config" tf:"optional"` + GoogleCloudVertexAiConfig []GoogleCloudVertexAiConfig `tfsdk:"google_cloud_vertex_ai_config" tf:"optional"` // The name of the external model. Name types.String `tfsdk:"name" tf:""` // OpenAI Config. Only required if the provider is 'openai'. - OpenaiConfig *OpenAiConfig `tfsdk:"openai_config" tf:"optional"` + OpenaiConfig []OpenAiConfig `tfsdk:"openai_config" tf:"optional"` // PaLM Config. Only required if the provider is 'palm'. - PalmConfig *PaLmConfig `tfsdk:"palm_config" tf:"optional"` + PalmConfig []PaLmConfig `tfsdk:"palm_config" tf:"optional"` // The name of the provider for the external model. Currently, the supported // providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', // 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and @@ -513,7 +513,7 @@ type LogsRequest struct { type ModelDataPlaneInfo struct { // Information required to query DataPlane API 'query' endpoint. - QueryInfo *oauth2.DataPlaneInfo `tfsdk:"query_info" tf:"optional"` + QueryInfo oauth2.DataPlaneInfo `tfsdk:"query_info" tf:"optional"` } type OpenAiConfig struct { @@ -606,11 +606,11 @@ type PayloadTable struct { type PutAiGatewayRequest struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. - Guardrails *AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` // Configuration for payload logging using inference tables. Use these // tables to monitor and audit data being sent to and received from model // APIs and to improve model quality. - InferenceTableConfig *AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` // The name of the serving endpoint whose AI Gateway is being updated. This // field is required. Name types.String `tfsdk:"-"` @@ -619,23 +619,23 @@ type PutAiGatewayRequest struct { // Configuration to enable usage tracking using system tables. These tables // allow you to monitor operational usage on endpoints and their associated // costs. - UsageTrackingConfig *AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` + UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` } type PutAiGatewayResponse struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. - Guardrails *AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` // Configuration for payload logging using inference tables. Use these // tables to monitor and audit data being sent to and received from model // APIs and to improve model quality . - InferenceTableConfig *AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` // Configuration for rate limits which can be set to limit endpoint traffic. RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` // Configuration to enable usage tracking using system tables. These tables // allow you to monitor operational usage on endpoints and their associated // costs. - UsageTrackingConfig *AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` + UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` } // Update rate limits of a serving endpoint @@ -656,7 +656,7 @@ type QueryEndpointInput struct { // Pandas Dataframe input in the records orientation. DataframeRecords []any `tfsdk:"dataframe_records" tf:"optional"` // Pandas Dataframe input in the split orientation. - DataframeSplit *DataframeSplitInput `tfsdk:"dataframe_split" tf:"optional"` + DataframeSplit []DataframeSplitInput `tfsdk:"dataframe_split" tf:"optional"` // The extra parameters field used ONLY for __completions, chat,__ and // __embeddings external & foundation model__ serving endpoints. This is a // map of strings and should only be used with other external/foundation @@ -732,7 +732,7 @@ type QueryEndpointResponse struct { // The usage object that may be returned by the __external/foundation // model__ serving endpoint. This contains information about the number of // tokens used in the prompt and response. - Usage *ExternalModelUsageElement `tfsdk:"usage" tf:"optional"` + Usage []ExternalModelUsageElement `tfsdk:"usage" tf:"optional"` } type RateLimit struct { @@ -781,7 +781,7 @@ type ServedEntityInput struct { // endpoint without external_model. If the endpoint is created without // external_model, users cannot update it to add external_model later. The // task type of all external models within an endpoint must be the same. - ExternalModel *ExternalModel `tfsdk:"external_model" tf:"optional"` + ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional"` // ARN of the instance profile that the served entity uses to access AWS // resources. InstanceProfileArn types.String `tfsdk:"instance_profile_arn" tf:"optional"` @@ -842,12 +842,12 @@ type ServedEntityOutput struct { // foundation_model, and (entity_name, entity_version, workload_size, // workload_type, and scale_to_zero_enabled) is returned based on the // endpoint type. - ExternalModel *ExternalModel `tfsdk:"external_model" tf:"optional"` + ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional"` // The foundation model that is served. NOTE: Only one of foundation_model, // external_model, and (entity_name, entity_version, workload_size, // workload_type, and scale_to_zero_enabled) is returned based on the // endpoint type. - FoundationModel *FoundationModel `tfsdk:"foundation_model" tf:"optional"` + FoundationModel []FoundationModel `tfsdk:"foundation_model" tf:"optional"` // ARN of the instance profile that the served entity uses to access AWS // resources. InstanceProfileArn types.String `tfsdk:"instance_profile_arn" tf:"optional"` @@ -861,7 +861,7 @@ type ServedEntityOutput struct { // zero. ScaleToZeroEnabled types.Bool `tfsdk:"scale_to_zero_enabled" tf:"optional"` // Information corresponding to the state of the served entity. - State *ServedModelState `tfsdk:"state" tf:"optional"` + State []ServedModelState `tfsdk:"state" tf:"optional"` // The workload size of the served entity. The workload size corresponds to // a range of provisioned concurrency that the compute autoscales between. A // single unit of provisioned concurrency can process one request at a time. @@ -893,11 +893,11 @@ type ServedEntitySpec struct { // The external model that is served. NOTE: Only one of external_model, // foundation_model, and (entity_name, entity_version) is returned based on // the endpoint type. - ExternalModel *ExternalModel `tfsdk:"external_model" tf:"optional"` + ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional"` // The foundation model that is served. NOTE: Only one of foundation_model, // external_model, and (entity_name, entity_version) is returned based on // the endpoint type. - FoundationModel *FoundationModel `tfsdk:"foundation_model" tf:"optional"` + FoundationModel []FoundationModel `tfsdk:"foundation_model" tf:"optional"` // The name of the served entity. Name types.String `tfsdk:"name" tf:"optional"` } @@ -977,7 +977,7 @@ type ServedModelOutput struct { // zero. ScaleToZeroEnabled types.Bool `tfsdk:"scale_to_zero_enabled" tf:"optional"` // Information corresponding to the state of the Served Model. - State *ServedModelState `tfsdk:"state" tf:"optional"` + State []ServedModelState `tfsdk:"state" tf:"optional"` // The workload size of the served model. The workload size corresponds to a // range of provisioned concurrency that the compute will autoscale between. // A single unit of provisioned concurrency can process one request at a @@ -1034,9 +1034,9 @@ type ServerLogsResponse struct { type ServingEndpoint struct { // The AI Gateway configuration for the serving endpoint. NOTE: Only // external model endpoints are currently supported. - AiGateway *AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` + AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` // The config that is currently being served by the endpoint. - Config *EndpointCoreConfigSummary `tfsdk:"config" tf:"optional"` + Config []EndpointCoreConfigSummary `tfsdk:"config" tf:"optional"` // The timestamp when the endpoint was created in Unix time. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` // The email of the user who created the serving endpoint. @@ -1049,7 +1049,7 @@ type ServingEndpoint struct { // The name of the serving endpoint. Name types.String `tfsdk:"name" tf:"optional"` // Information corresponding to the state of the serving endpoint. - State *EndpointState `tfsdk:"state" tf:"optional"` + State []EndpointState `tfsdk:"state" tf:"optional"` // Tags attached to the serving endpoint. Tags []EndpointTag `tfsdk:"tags" tf:"optional"` // The task type of the serving endpoint. @@ -1083,15 +1083,15 @@ type ServingEndpointAccessControlResponse struct { type ServingEndpointDetailed struct { // The AI Gateway configuration for the serving endpoint. NOTE: Only // external model endpoints are currently supported. - AiGateway *AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` + AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` // The config that is currently being served by the endpoint. - Config *EndpointCoreConfigOutput `tfsdk:"config" tf:"optional"` + Config []EndpointCoreConfigOutput `tfsdk:"config" tf:"optional"` // The timestamp when the endpoint was created in Unix time. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` // The email of the user who created the serving endpoint. Creator types.String `tfsdk:"creator" tf:"optional"` // Information required to query DataPlane APIs. - DataPlaneInfo *ModelDataPlaneInfo `tfsdk:"data_plane_info" tf:"optional"` + DataPlaneInfo []ModelDataPlaneInfo `tfsdk:"data_plane_info" tf:"optional"` // Endpoint invocation url if route optimization is enabled for endpoint EndpointUrl types.String `tfsdk:"endpoint_url" tf:"optional"` // System-generated ID of the endpoint. This is used to refer to the @@ -1102,14 +1102,14 @@ type ServingEndpointDetailed struct { // The name of the serving endpoint. Name types.String `tfsdk:"name" tf:"optional"` // The config that the endpoint is attempting to update to. - PendingConfig *EndpointPendingConfig `tfsdk:"pending_config" tf:"optional"` + PendingConfig []EndpointPendingConfig `tfsdk:"pending_config" tf:"optional"` // The permission level of the principal making the request. PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` // Boolean representing if route optimization has been enabled for the // endpoint RouteOptimized types.Bool `tfsdk:"route_optimized" tf:"optional"` // Information corresponding to the state of the serving endpoint. - State *EndpointState `tfsdk:"state" tf:"optional"` + State []EndpointState `tfsdk:"state" tf:"optional"` // Tags attached to the serving endpoint. Tags []EndpointTag `tfsdk:"tags" tf:"optional"` // The task type of the serving endpoint. @@ -1157,7 +1157,7 @@ type V1ResponseChoiceElement struct { // The logprobs returned only by the __completions__ endpoint. Logprobs types.Int64 `tfsdk:"logprobs" tf:"optional"` // The message response from the __chat__ endpoint. - Message *ChatMessage `tfsdk:"message" tf:"optional"` + Message []ChatMessage `tfsdk:"message" tf:"optional"` // The text response from the __completions__ endpoint. Text types.String `tfsdk:"text" tf:"optional"` } diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index 117cf8d113..1ee6dcc0a1 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -15,7 +15,7 @@ import ( ) type AutomaticClusterUpdateSetting struct { - AutomaticClusterUpdateWorkspace ClusterAutoRestartMessage `tfsdk:"automatic_cluster_update_workspace" tf:""` + AutomaticClusterUpdateWorkspace []ClusterAutoRestartMessage `tfsdk:"automatic_cluster_update_workspace" tf:""` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -46,9 +46,9 @@ type ClusterAutoRestartMessage struct { // intended to use only for purposes like showing an error message to the // customer with the additional details. For example, using these details we // can check why exactly the feature is disabled for this customer. - EnablementDetails *ClusterAutoRestartMessageEnablementDetails `tfsdk:"enablement_details" tf:"optional"` + EnablementDetails []ClusterAutoRestartMessageEnablementDetails `tfsdk:"enablement_details" tf:"optional"` - MaintenanceWindow *ClusterAutoRestartMessageMaintenanceWindow `tfsdk:"maintenance_window" tf:"optional"` + MaintenanceWindow []ClusterAutoRestartMessageMaintenanceWindow `tfsdk:"maintenance_window" tf:"optional"` RestartEvenIfNoUpdatesAvailable types.Bool `tfsdk:"restart_even_if_no_updates_available" tf:"optional"` } @@ -70,7 +70,7 @@ type ClusterAutoRestartMessageEnablementDetails struct { } type ClusterAutoRestartMessageMaintenanceWindow struct { - WeekDayBasedSchedule *ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule `tfsdk:"week_day_based_schedule" tf:"optional"` + WeekDayBasedSchedule []ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule `tfsdk:"week_day_based_schedule" tf:"optional"` } type ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule struct { @@ -78,7 +78,7 @@ type ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule struct { Frequency types.String `tfsdk:"frequency" tf:"optional"` - WindowStartTime *ClusterAutoRestartMessageMaintenanceWindowWindowStartTime `tfsdk:"window_start_time" tf:"optional"` + WindowStartTime []ClusterAutoRestartMessageMaintenanceWindowWindowStartTime `tfsdk:"window_start_time" tf:"optional"` } type ClusterAutoRestartMessageMaintenanceWindowWindowStartTime struct { @@ -97,7 +97,7 @@ type ComplianceSecurityProfile struct { type ComplianceSecurityProfileSetting struct { // SHIELD feature: CSP - ComplianceSecurityProfileWorkspace ComplianceSecurityProfile `tfsdk:"compliance_security_profile_workspace" tf:""` + ComplianceSecurityProfileWorkspace []ComplianceSecurityProfile `tfsdk:"compliance_security_profile_workspace" tf:""` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -115,15 +115,15 @@ type ComplianceSecurityProfileSetting struct { } type Config struct { - Email *EmailConfig `tfsdk:"email" tf:"optional"` + Email []EmailConfig `tfsdk:"email" tf:"optional"` - GenericWebhook *GenericWebhookConfig `tfsdk:"generic_webhook" tf:"optional"` + GenericWebhook []GenericWebhookConfig `tfsdk:"generic_webhook" tf:"optional"` - MicrosoftTeams *MicrosoftTeamsConfig `tfsdk:"microsoft_teams" tf:"optional"` + MicrosoftTeams []MicrosoftTeamsConfig `tfsdk:"microsoft_teams" tf:"optional"` - Pagerduty *PagerdutyConfig `tfsdk:"pagerduty" tf:"optional"` + Pagerduty []PagerdutyConfig `tfsdk:"pagerduty" tf:"optional"` - Slack *SlackConfig `tfsdk:"slack" tf:"optional"` + Slack []SlackConfig `tfsdk:"slack" tf:"optional"` } // Details required to configure a block list or allow list. @@ -143,7 +143,7 @@ type CreateIpAccessList struct { // An IP access list was successfully created. type CreateIpAccessListResponse struct { // Definition of an IP Access list - IpAccessList *IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional"` } type CreateNetworkConnectivityConfigRequest struct { @@ -161,7 +161,7 @@ type CreateNetworkConnectivityConfigRequest struct { type CreateNotificationDestinationRequest struct { // The configuration for the notification destination. Must wrap EXACTLY one // of the nested configs. - Config *Config `tfsdk:"config" tf:"optional"` + Config []Config `tfsdk:"config" tf:"optional"` // The display name for the notification destination. DisplayName types.String `tfsdk:"display_name" tf:"optional"` } @@ -178,7 +178,7 @@ type CreateOboTokenRequest struct { // An on-behalf token was successfully created for the service principal. type CreateOboTokenResponse struct { - TokenInfo *TokenInfo `tfsdk:"token_info" tf:"optional"` + TokenInfo []TokenInfo `tfsdk:"token_info" tf:"optional"` // Value of the token. TokenValue types.String `tfsdk:"token_value" tf:"optional"` } @@ -205,7 +205,7 @@ type CreateTokenRequest struct { type CreateTokenResponse struct { // The information for the new token. - TokenInfo *PublicTokenInfo `tfsdk:"token_info" tf:"optional"` + TokenInfo []PublicTokenInfo `tfsdk:"token_info" tf:"optional"` // The value of the new token. TokenValue types.String `tfsdk:"token_value" tf:"optional"` } @@ -221,7 +221,7 @@ type CspEnablementAccount struct { type CspEnablementAccountSetting struct { // Account level policy for CSP - CspEnablementAccount CspEnablementAccount `tfsdk:"csp_enablement_account" tf:""` + CspEnablementAccount []CspEnablementAccount `tfsdk:"csp_enablement_account" tf:""` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -257,7 +257,7 @@ type DefaultNamespaceSetting struct { // PATCH request to identify the setting version you are updating. Etag types.String `tfsdk:"etag" tf:"optional"` - Namespace StringMessage `tfsdk:"namespace" tf:""` + Namespace []StringMessage `tfsdk:"namespace" tf:""` // Name of the corresponding setting. This field is populated in the // response, but it will not be respected even if it's set in the request // body. The setting name in the path parameter will be respected instead. @@ -430,7 +430,7 @@ type DeleteTokenManagementRequest struct { } type DisableLegacyAccess struct { - DisableLegacyAccess BooleanMessage `tfsdk:"disable_legacy_access" tf:""` + DisableLegacyAccess []BooleanMessage `tfsdk:"disable_legacy_access" tf:""` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -448,7 +448,7 @@ type DisableLegacyAccess struct { } type DisableLegacyFeatures struct { - DisableLegacyFeatures BooleanMessage `tfsdk:"disable_legacy_features" tf:""` + DisableLegacyFeatures []BooleanMessage `tfsdk:"disable_legacy_features" tf:""` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -480,7 +480,7 @@ type EnhancedSecurityMonitoring struct { type EnhancedSecurityMonitoringSetting struct { // SHIELD feature: ESM - EnhancedSecurityMonitoringWorkspace EnhancedSecurityMonitoring `tfsdk:"enhanced_security_monitoring_workspace" tf:""` + EnhancedSecurityMonitoringWorkspace []EnhancedSecurityMonitoring `tfsdk:"enhanced_security_monitoring_workspace" tf:""` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -504,7 +504,7 @@ type EsmEnablementAccount struct { type EsmEnablementAccountSetting struct { // Account level policy for ESM - EsmEnablementAccount EsmEnablementAccount `tfsdk:"esm_enablement_account" tf:""` + EsmEnablementAccount []EsmEnablementAccount `tfsdk:"esm_enablement_account" tf:""` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -539,7 +539,7 @@ type ExchangeToken struct { // Exchange a token with the IdP type ExchangeTokenRequest struct { // The partition of Credentials store - PartitionId PartitionId `tfsdk:"partitionId" tf:""` + PartitionId []PartitionId `tfsdk:"partitionId" tf:""` // Array of scopes for the token request. Scopes []types.String `tfsdk:"scopes" tf:""` // A list of token types being requested @@ -554,7 +554,7 @@ type ExchangeTokenResponse struct { // An IP access list was successfully returned. type FetchIpAccessListResponse struct { // Definition of an IP Access list - IpAccessList *IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional"` } type GenericWebhookConfig struct { @@ -682,7 +682,7 @@ type GetIpAccessListRequest struct { type GetIpAccessListResponse struct { // Definition of an IP Access list - IpAccessList *IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional"` } // IP access lists were successfully returned. @@ -751,7 +751,7 @@ type GetTokenPermissionLevelsResponse struct { // Token with specified Token ID was successfully returned. type GetTokenResponse struct { - TokenInfo *TokenInfo `tfsdk:"token_info" tf:"optional"` + TokenInfo []TokenInfo `tfsdk:"token_info" tf:"optional"` } // Definition of an IP Access list @@ -931,10 +931,10 @@ type NccEgressConfig struct { // The network connectivity rules that are applied by default without // resource specific configurations. You can find the stable network // information of your serverless compute resources here. - DefaultRules *NccEgressDefaultRules `tfsdk:"default_rules" tf:"optional"` + DefaultRules []NccEgressDefaultRules `tfsdk:"default_rules" tf:"optional"` // The network connectivity rules that configured for each destinations. // These rules override default rules. - TargetRules *NccEgressTargetRules `tfsdk:"target_rules" tf:"optional"` + TargetRules []NccEgressTargetRules `tfsdk:"target_rules" tf:"optional"` } // The network connectivity rules that are applied by default without resource @@ -944,11 +944,11 @@ type NccEgressDefaultRules struct { // The stable AWS IP CIDR blocks. You can use these to configure the // firewall of your resources to allow traffic from your Databricks // workspace. - AwsStableIpRule *NccAwsStableIpRule `tfsdk:"aws_stable_ip_rule" tf:"optional"` + AwsStableIpRule []NccAwsStableIpRule `tfsdk:"aws_stable_ip_rule" tf:"optional"` // The stable Azure service endpoints. You can configure the firewall of // your Azure resources to allow traffic from your Databricks serverless // compute resources. - AzureServiceEndpointRule *NccAzureServiceEndpointRule `tfsdk:"azure_service_endpoint_rule" tf:"optional"` + AzureServiceEndpointRule []NccAzureServiceEndpointRule `tfsdk:"azure_service_endpoint_rule" tf:"optional"` } // The network connectivity rules that configured for each destinations. These @@ -964,7 +964,7 @@ type NetworkConnectivityConfiguration struct { CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // The network connectivity rules that apply to network traffic from your // serverless compute resources. - EgressConfig *NccEgressConfig `tfsdk:"egress_config" tf:"optional"` + EgressConfig []NccEgressConfig `tfsdk:"egress_config" tf:"optional"` // The name of the network connectivity configuration. The name can contain // alphanumeric characters, hyphens, and underscores. The length must be // between 3 and 30 characters. The name must match the regular expression @@ -984,7 +984,7 @@ type NotificationDestination struct { // The configuration for the notification destination. Will be exactly one // of the nested configs. Only returns for users with workspace admin // permissions. - Config *Config `tfsdk:"config" tf:"optional"` + Config []Config `tfsdk:"config" tf:"optional"` // [Output-only] The type of the notification destination. The type can not // be changed once set. DestinationType types.String `tfsdk:"destination_type" tf:"optional"` @@ -1028,7 +1028,7 @@ type PersonalComputeSetting struct { // PATCH request to identify the setting version you are updating. Etag types.String `tfsdk:"etag" tf:"optional"` - PersonalCompute PersonalComputeMessage `tfsdk:"personal_compute" tf:""` + PersonalCompute []PersonalComputeMessage `tfsdk:"personal_compute" tf:""` // Name of the corresponding setting. This field is populated in the // response, but it will not be respected even if it's set in the request // body. The setting name in the path parameter will be respected instead. @@ -1085,7 +1085,7 @@ type RestrictWorkspaceAdminsSetting struct { // PATCH request to identify the setting version you are updating. Etag types.String `tfsdk:"etag" tf:"optional"` - RestrictWorkspaceAdmins RestrictWorkspaceAdminsMessage `tfsdk:"restrict_workspace_admins" tf:""` + RestrictWorkspaceAdmins []RestrictWorkspaceAdminsMessage `tfsdk:"restrict_workspace_admins" tf:""` // Name of the corresponding setting. This field is populated in the // response, but it will not be respected even if it's set in the request // body. The setting name in the path parameter will be respected instead. @@ -1198,7 +1198,7 @@ type UpdateAutomaticClusterUpdateSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting AutomaticClusterUpdateSetting `tfsdk:"setting" tf:""` + Setting []AutomaticClusterUpdateSetting `tfsdk:"setting" tf:""` } // Details required to update a setting. @@ -1212,7 +1212,7 @@ type UpdateComplianceSecurityProfileSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting ComplianceSecurityProfileSetting `tfsdk:"setting" tf:""` + Setting []ComplianceSecurityProfileSetting `tfsdk:"setting" tf:""` } // Details required to update a setting. @@ -1226,7 +1226,7 @@ type UpdateCspEnablementAccountSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting CspEnablementAccountSetting `tfsdk:"setting" tf:""` + Setting []CspEnablementAccountSetting `tfsdk:"setting" tf:""` } // Details required to update a setting. @@ -1248,7 +1248,7 @@ type UpdateDefaultNamespaceSettingRequest struct { // assumed). This setting requires a restart of clusters and SQL warehouses // to take effect. Additionally, the default namespace only applies when // using Unity Catalog-enabled compute. - Setting DefaultNamespaceSetting `tfsdk:"setting" tf:""` + Setting []DefaultNamespaceSetting `tfsdk:"setting" tf:""` } // Details required to update a setting. @@ -1262,7 +1262,7 @@ type UpdateDisableLegacyAccessRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting DisableLegacyAccess `tfsdk:"setting" tf:""` + Setting []DisableLegacyAccess `tfsdk:"setting" tf:""` } // Details required to update a setting. @@ -1276,7 +1276,7 @@ type UpdateDisableLegacyFeaturesRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting DisableLegacyFeatures `tfsdk:"setting" tf:""` + Setting []DisableLegacyFeatures `tfsdk:"setting" tf:""` } // Details required to update a setting. @@ -1290,7 +1290,7 @@ type UpdateEnhancedSecurityMonitoringSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting EnhancedSecurityMonitoringSetting `tfsdk:"setting" tf:""` + Setting []EnhancedSecurityMonitoringSetting `tfsdk:"setting" tf:""` } // Details required to update a setting. @@ -1304,7 +1304,7 @@ type UpdateEsmEnablementAccountSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting EsmEnablementAccountSetting `tfsdk:"setting" tf:""` + Setting []EsmEnablementAccountSetting `tfsdk:"setting" tf:""` } // Details required to update an IP access list. @@ -1329,7 +1329,7 @@ type UpdateIpAccessList struct { type UpdateNotificationDestinationRequest struct { // The configuration for the notification destination. Must wrap EXACTLY one // of the nested configs. - Config *Config `tfsdk:"config" tf:"optional"` + Config []Config `tfsdk:"config" tf:"optional"` // The display name for the notification destination. DisplayName types.String `tfsdk:"display_name" tf:"optional"` @@ -1347,7 +1347,7 @@ type UpdatePersonalComputeSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting PersonalComputeSetting `tfsdk:"setting" tf:""` + Setting []PersonalComputeSetting `tfsdk:"setting" tf:""` } type UpdateResponse struct { @@ -1364,5 +1364,5 @@ type UpdateRestrictWorkspaceAdminsSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting RestrictWorkspaceAdminsSetting `tfsdk:"setting" tf:""` + Setting []RestrictWorkspaceAdminsSetting `tfsdk:"setting" tf:""` } diff --git a/internal/service/sharing_tf/model.go b/internal/service/sharing_tf/model.go index 13229078de..210f466b32 100755 --- a/internal/service/sharing_tf/model.go +++ b/internal/service/sharing_tf/model.go @@ -22,7 +22,7 @@ type CentralCleanRoomInfo struct { // All collaborators who are in the clean room. Collaborators []CleanRoomCollaboratorInfo `tfsdk:"collaborators" tf:"optional"` // The collaborator who created the clean room. - Creator *CleanRoomCollaboratorInfo `tfsdk:"creator" tf:"optional"` + Creator []CleanRoomCollaboratorInfo `tfsdk:"creator" tf:"optional"` // The cloud where clean room tasks will be run. StationCloud types.String `tfsdk:"station_cloud" tf:"optional"` // The region where clean room tasks will be run. @@ -33,11 +33,11 @@ type CleanRoomAssetInfo struct { // Time at which this asset was added, in epoch milliseconds. AddedAt types.Int64 `tfsdk:"added_at" tf:"optional"` // Details about the notebook asset. - NotebookInfo *CleanRoomNotebookInfo `tfsdk:"notebook_info" tf:"optional"` + NotebookInfo []CleanRoomNotebookInfo `tfsdk:"notebook_info" tf:"optional"` // The collaborator who owns the asset. - Owner *CleanRoomCollaboratorInfo `tfsdk:"owner" tf:"optional"` + Owner []CleanRoomCollaboratorInfo `tfsdk:"owner" tf:"optional"` // Details about the table asset. - TableInfo *CleanRoomTableInfo `tfsdk:"table_info" tf:"optional"` + TableInfo []CleanRoomTableInfo `tfsdk:"table_info" tf:"optional"` // Time at which this asset was updated, in epoch milliseconds. UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` } @@ -55,7 +55,7 @@ type CleanRoomCatalogUpdate struct { // The name of the catalog to update assets. CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` // The updates to the assets in the catalog. - Updates *SharedDataObjectUpdate `tfsdk:"updates" tf:"optional"` + Updates []SharedDataObjectUpdate `tfsdk:"updates" tf:"optional"` } type CleanRoomCollaboratorInfo struct { @@ -83,7 +83,7 @@ type CleanRoomInfo struct { // Username of current owner of clean room. Owner types.String `tfsdk:"owner" tf:"optional"` // Central clean room details. - RemoteDetailedInfo *CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"optional"` + RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"optional"` // Time at which this clean room was updated, in epoch milliseconds. UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` // Username of clean room updater. @@ -115,7 +115,7 @@ type ColumnInfo struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` - Mask *ColumnMask `tfsdk:"mask" tf:"optional"` + Mask []ColumnMask `tfsdk:"mask" tf:"optional"` // Name of Column. Name types.String `tfsdk:"name" tf:"optional"` // Whether field may be Null (default: true). @@ -154,7 +154,7 @@ type CreateCleanRoom struct { // Name of the clean room. Name types.String `tfsdk:"name" tf:""` // Central clean room details. - RemoteDetailedInfo CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:""` + RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:""` } type CreateProvider struct { @@ -182,13 +182,13 @@ type CreateRecipient struct { // Expiration timestamp of the token, in epoch milliseconds. ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` // IP Access List - IpAccessList *IpAccessList `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional"` // Name of Recipient. Name types.String `tfsdk:"name" tf:""` // Username of the recipient owner. Owner types.String `tfsdk:"owner" tf:"optional"` // Recipient properties as map of string key-value pairs. - PropertiesKvpairs *SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional"` + PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional"` // The one-time sharing code provided by the data recipient. This field is // required when the __authentication_type__ is **DATABRICKS**. SharingCode types.String `tfsdk:"sharing_code" tf:"optional"` @@ -447,7 +447,7 @@ type ProviderInfo struct { Owner types.String `tfsdk:"owner" tf:"optional"` // The recipient profile. This field is only present when the // authentication_type is `TOKEN`. - RecipientProfile *RecipientProfile `tfsdk:"recipient_profile" tf:"optional"` + RecipientProfile []RecipientProfile `tfsdk:"recipient_profile" tf:"optional"` // This field is only present when the authentication_type is `TOKEN` or not // provided. RecipientProfileStr types.String `tfsdk:"recipient_profile_str" tf:"optional"` @@ -489,7 +489,7 @@ type RecipientInfo struct { // __cloud__:__region__:__metastore-uuid__. DataRecipientGlobalMetastoreId types.String `tfsdk:"data_recipient_global_metastore_id" tf:"optional"` // IP Access List - IpAccessList *IpAccessList `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional"` // Unique identifier of recipient's Unity Catalog metastore. This field is // only present when the __authentication_type__ is **DATABRICKS** MetastoreId types.String `tfsdk:"metastore_id" tf:"optional"` @@ -498,7 +498,7 @@ type RecipientInfo struct { // Username of the recipient owner. Owner types.String `tfsdk:"owner" tf:"optional"` // Recipient properties as map of string key-value pairs. - PropertiesKvpairs *SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional"` + PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional"` // Cloud region of the recipient's Unity Catalog Metstore. This field is // only present when the __authentication_type__ is **DATABRICKS**. Region types.String `tfsdk:"region" tf:"optional"` @@ -676,7 +676,7 @@ type SharedDataObjectUpdate struct { // One of: **ADD**, **REMOVE**, **UPDATE**. Action types.String `tfsdk:"action" tf:"optional"` // The data object that is being added, removed, or updated. - DataObject *SharedDataObject `tfsdk:"data_object" tf:"optional"` + DataObject []SharedDataObject `tfsdk:"data_object" tf:"optional"` } type UpdateCleanRoom struct { @@ -713,7 +713,7 @@ type UpdateRecipient struct { // Expiration timestamp of the token, in epoch milliseconds. ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` // IP Access List - IpAccessList *IpAccessList `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional"` // Name of the recipient. Name types.String `tfsdk:"-"` // New name for the recipient. @@ -724,7 +724,7 @@ type UpdateRecipient struct { // update request, the specified properties will override the existing // properties. To add and remove properties, one would need to perform a // read-modify-write. - PropertiesKvpairs *SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional"` + PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional"` } type UpdateResponse struct { @@ -747,7 +747,7 @@ type UpdateShare struct { type UpdateSharePermissions struct { // Array of permission changes. - Changes []catalog.PermissionsChange `tfsdk:"changes" tf:"optional"` + Changes catalog.PermissionsChange `tfsdk:"changes" tf:"optional"` // Maximum number of permissions to return. - when set to 0, the page length // is set to a server configured value (recommended); - when set to a value // greater than 0, the page length is the minimum of this value and a server diff --git a/internal/service/sql_tf/model.go b/internal/service/sql_tf/model.go index e912363c30..f7b5567a20 100755 --- a/internal/service/sql_tf/model.go +++ b/internal/service/sql_tf/model.go @@ -25,7 +25,7 @@ type AccessControl struct { type Alert struct { // Trigger conditions of the alert. - Condition *AlertCondition `tfsdk:"condition" tf:"optional"` + Condition []AlertCondition `tfsdk:"condition" tf:"optional"` // The timestamp indicating when the alert was created. CreateTime types.String `tfsdk:"create_time" tf:"optional"` // Custom body of alert notification, if it exists. See [here] for custom @@ -74,17 +74,17 @@ type AlertCondition struct { Op types.String `tfsdk:"op" tf:"optional"` // Name of the column from the query result to use for comparison in alert // evaluation. - Operand *AlertConditionOperand `tfsdk:"operand" tf:"optional"` + Operand []AlertConditionOperand `tfsdk:"operand" tf:"optional"` // Threshold value used for comparison in alert evaluation. - Threshold *AlertConditionThreshold `tfsdk:"threshold" tf:"optional"` + Threshold []AlertConditionThreshold `tfsdk:"threshold" tf:"optional"` } type AlertConditionOperand struct { - Column *AlertOperandColumn `tfsdk:"column" tf:"optional"` + Column []AlertOperandColumn `tfsdk:"column" tf:"optional"` } type AlertConditionThreshold struct { - Value *AlertOperandValue `tfsdk:"value" tf:"optional"` + Value []AlertOperandValue `tfsdk:"value" tf:"optional"` } type AlertOperandColumn struct { @@ -158,7 +158,7 @@ type AlertQuery struct { // on the query page. Name types.String `tfsdk:"name" tf:"optional"` - Options *QueryOptions `tfsdk:"options" tf:"optional"` + Options []QueryOptions `tfsdk:"options" tf:"optional"` // The text of the query to be run. Query types.String `tfsdk:"query" tf:"optional"` @@ -234,7 +234,7 @@ type CreateAlert struct { // Name of the alert. Name types.String `tfsdk:"name" tf:""` // Alert configuration options. - Options AlertOptions `tfsdk:"options" tf:""` + Options []AlertOptions `tfsdk:"options" tf:""` // The identifier of the workspace folder containing the object. Parent types.String `tfsdk:"parent" tf:"optional"` // Query ID. @@ -246,12 +246,12 @@ type CreateAlert struct { } type CreateAlertRequest struct { - Alert *CreateAlertRequestAlert `tfsdk:"alert" tf:"optional"` + Alert []CreateAlertRequestAlert `tfsdk:"alert" tf:"optional"` } type CreateAlertRequestAlert struct { // Trigger conditions of the alert. - Condition *AlertCondition `tfsdk:"condition" tf:"optional"` + Condition []AlertCondition `tfsdk:"condition" tf:"optional"` // Custom body of alert notification, if it exists. See [here] for custom // templating instructions. // @@ -276,7 +276,7 @@ type CreateAlertRequestAlert struct { } type CreateQueryRequest struct { - Query *CreateQueryRequestQuery `tfsdk:"query" tf:"optional"` + Query []CreateQueryRequestQuery `tfsdk:"query" tf:"optional"` } type CreateQueryRequestQuery struct { @@ -325,7 +325,7 @@ type CreateQueryVisualizationsLegacyRequest struct { } type CreateVisualizationRequest struct { - Visualization *CreateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional"` + Visualization []CreateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional"` } type CreateVisualizationRequestVisualization struct { @@ -356,7 +356,7 @@ type CreateWarehouseRequest struct { // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` // Channel Details - Channel *Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional"` // Size of the clusters allocated for this warehouse. Increasing the size of // a spark cluster allows you to run larger queries on it. If you want to // increase the number of concurrent queries, please tune max_num_clusters. @@ -402,7 +402,7 @@ type CreateWarehouseRequest struct { // instances and EBS volumes) associated with this SQL warehouse. // // Supported values: - Number of tags < 45. - Tags *EndpointTags `tfsdk:"tags" tf:"optional"` + Tags []EndpointTags `tfsdk:"tags" tf:"optional"` // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless // compute, you must set to `PRO` and also set the field // `enable_serverless_compute` to `true`. @@ -420,7 +420,7 @@ type CreateWidget struct { // Widget ID returned by :method:dashboardwidgets/create Id types.String `tfsdk:"-"` - Options WidgetOptions `tfsdk:"options" tf:""` + Options []WidgetOptions `tfsdk:"options" tf:""` // If this is a textbox widget, the application displays this text. This // field is ignored if the widget contains a visualization in the // `visualization` field. @@ -459,7 +459,7 @@ type Dashboard struct { // the dashboard page. Name types.String `tfsdk:"name" tf:"optional"` - Options *DashboardOptions `tfsdk:"options" tf:"optional"` + Options []DashboardOptions `tfsdk:"options" tf:"optional"` // The identifier of the workspace folder containing the object. Parent types.String `tfsdk:"parent" tf:"optional"` // * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * @@ -473,7 +473,7 @@ type Dashboard struct { // Timestamp when this dashboard was last updated. UpdatedAt types.String `tfsdk:"updated_at" tf:"optional"` - User *User `tfsdk:"user" tf:"optional"` + User []User `tfsdk:"user" tf:"optional"` // The ID of the user who owns the dashboard. UserId types.Int64 `tfsdk:"user_id" tf:"optional"` @@ -555,7 +555,7 @@ type DateRange struct { type DateRangeValue struct { // Manually specified date-time range value. - DateRangeValue *DateRange `tfsdk:"date_range_value" tf:"optional"` + DateRangeValue []DateRange `tfsdk:"date_range_value" tf:"optional"` // Dynamic date-time range value based on current date-time. DynamicDateRangeValue types.String `tfsdk:"dynamic_date_range_value" tf:"optional"` // Date-time precision to format the value into when the query is run. @@ -624,7 +624,7 @@ type EditAlert struct { // Name of the alert. Name types.String `tfsdk:"name" tf:""` // Alert configuration options. - Options AlertOptions `tfsdk:"options" tf:""` + Options []AlertOptions `tfsdk:"options" tf:""` // Query ID. QueryId types.String `tfsdk:"query_id" tf:""` // Number of seconds after being triggered before the alert rearms itself @@ -642,7 +642,7 @@ type EditWarehouseRequest struct { // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` // Channel Details - Channel *Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional"` // Size of the clusters allocated for this warehouse. Increasing the size of // a spark cluster allows you to run larger queries on it. If you want to // increase the number of concurrent queries, please tune max_num_clusters. @@ -690,7 +690,7 @@ type EditWarehouseRequest struct { // instances and EBS volumes) associated with this SQL warehouse. // // Supported values: - Number of tags < 45. - Tags *EndpointTags `tfsdk:"tags" tf:"optional"` + Tags []EndpointTags `tfsdk:"tags" tf:"optional"` // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless // compute, you must set to `PRO` and also set the field // `enable_serverless_compute` to `true`. @@ -716,7 +716,7 @@ type EndpointHealth struct { Details types.String `tfsdk:"details" tf:"optional"` // The reason for failure to bring up clusters for this warehouse. This is // available when status is 'FAILED' and sometimes when it is DEGRADED. - FailureReason *TerminationReason `tfsdk:"failure_reason" tf:"optional"` + FailureReason []TerminationReason `tfsdk:"failure_reason" tf:"optional"` // Deprecated. split into summary and details for security Message types.String `tfsdk:"message" tf:"optional"` // Health status of the warehouse. @@ -735,7 +735,7 @@ type EndpointInfo struct { // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` // Channel Details - Channel *Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional"` // Size of the clusters allocated for this warehouse. Increasing the size of // a spark cluster allows you to run larger queries on it. If you want to // increase the number of concurrent queries, please tune max_num_clusters. @@ -753,7 +753,7 @@ type EndpointInfo struct { EnableServerlessCompute types.Bool `tfsdk:"enable_serverless_compute" tf:"optional"` // Optional health status. Assume the warehouse is healthy if this field is // not set. - Health *EndpointHealth `tfsdk:"health" tf:"optional"` + Health []EndpointHealth `tfsdk:"health" tf:"optional"` // unique identifier for warehouse Id types.String `tfsdk:"id" tf:"optional"` // Deprecated. Instance profile used to pass IAM role to the cluster @@ -787,7 +787,7 @@ type EndpointInfo struct { // current number of clusters running for the service NumClusters types.Int64 `tfsdk:"num_clusters" tf:"optional"` // ODBC parameters for the SQL warehouse - OdbcParams *OdbcParams `tfsdk:"odbc_params" tf:"optional"` + OdbcParams []OdbcParams `tfsdk:"odbc_params" tf:"optional"` // Configurations whether the warehouse should use spot instances. SpotInstancePolicy types.String `tfsdk:"spot_instance_policy" tf:"optional"` // State of the warehouse @@ -796,7 +796,7 @@ type EndpointInfo struct { // instances and EBS volumes) associated with this SQL warehouse. // // Supported values: - Number of tags < 45. - Tags *EndpointTags `tfsdk:"tags" tf:"optional"` + Tags []EndpointTags `tfsdk:"tags" tf:"optional"` // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless // compute, you must set to `PRO` and also set the field // `enable_serverless_compute` to `true`. @@ -817,7 +817,7 @@ type EnumValue struct { // List of valid query parameter values, newline delimited. EnumOptions types.String `tfsdk:"enum_options" tf:"optional"` // If specified, allows multiple values to be selected for this parameter. - MultiValuesOptions *MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional"` + MultiValuesOptions []MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional"` // List of selected query parameter values. Values []types.String `tfsdk:"values" tf:"optional"` } @@ -1069,7 +1069,7 @@ type GetWarehouseResponse struct { // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` // Channel Details - Channel *Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional"` // Size of the clusters allocated for this warehouse. Increasing the size of // a spark cluster allows you to run larger queries on it. If you want to // increase the number of concurrent queries, please tune max_num_clusters. @@ -1087,7 +1087,7 @@ type GetWarehouseResponse struct { EnableServerlessCompute types.Bool `tfsdk:"enable_serverless_compute" tf:"optional"` // Optional health status. Assume the warehouse is healthy if this field is // not set. - Health *EndpointHealth `tfsdk:"health" tf:"optional"` + Health []EndpointHealth `tfsdk:"health" tf:"optional"` // unique identifier for warehouse Id types.String `tfsdk:"id" tf:"optional"` // Deprecated. Instance profile used to pass IAM role to the cluster @@ -1121,7 +1121,7 @@ type GetWarehouseResponse struct { // current number of clusters running for the service NumClusters types.Int64 `tfsdk:"num_clusters" tf:"optional"` // ODBC parameters for the SQL warehouse - OdbcParams *OdbcParams `tfsdk:"odbc_params" tf:"optional"` + OdbcParams []OdbcParams `tfsdk:"odbc_params" tf:"optional"` // Configurations whether the warehouse should use spot instances. SpotInstancePolicy types.String `tfsdk:"spot_instance_policy" tf:"optional"` // State of the warehouse @@ -1130,7 +1130,7 @@ type GetWarehouseResponse struct { // instances and EBS volumes) associated with this SQL warehouse. // // Supported values: - Number of tags < 45. - Tags *EndpointTags `tfsdk:"tags" tf:"optional"` + Tags []EndpointTags `tfsdk:"tags" tf:"optional"` // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless // compute, you must set to `PRO` and also set the field // `enable_serverless_compute` to `true`. @@ -1139,9 +1139,9 @@ type GetWarehouseResponse struct { type GetWorkspaceWarehouseConfigResponse struct { // Optional: Channel selection details - Channel *Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional"` // Deprecated: Use sql_configuration_parameters - ConfigParam *RepeatedEndpointConfPairs `tfsdk:"config_param" tf:"optional"` + ConfigParam []RepeatedEndpointConfPairs `tfsdk:"config_param" tf:"optional"` // Spark confs for external hive metastore configuration JSON serialized // size must be less than <= 512K DataAccessConfig []EndpointConfPair `tfsdk:"data_access_config" tf:"optional"` @@ -1153,7 +1153,7 @@ type GetWorkspaceWarehouseConfigResponse struct { // specific type availability in the warehouse create and edit form UI. EnabledWarehouseTypes []WarehouseTypePair `tfsdk:"enabled_warehouse_types" tf:"optional"` // Deprecated: Use sql_configuration_parameters - GlobalParam *RepeatedEndpointConfPairs `tfsdk:"global_param" tf:"optional"` + GlobalParam []RepeatedEndpointConfPairs `tfsdk:"global_param" tf:"optional"` // GCP only: Google Service Account used to pass to cluster to access Google // Cloud Storage GoogleServiceAccount types.String `tfsdk:"google_service_account" tf:"optional"` @@ -1162,7 +1162,7 @@ type GetWorkspaceWarehouseConfigResponse struct { // Security policy for warehouses SecurityPolicy types.String `tfsdk:"security_policy" tf:"optional"` // SQL configuration parameters - SqlConfigurationParameters *RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional"` + SqlConfigurationParameters []RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional"` } type LegacyAlert struct { @@ -1175,11 +1175,11 @@ type LegacyAlert struct { // Name of the alert. Name types.String `tfsdk:"name" tf:"optional"` // Alert configuration options. - Options *AlertOptions `tfsdk:"options" tf:"optional"` + Options []AlertOptions `tfsdk:"options" tf:"optional"` // The identifier of the workspace folder containing the object. Parent types.String `tfsdk:"parent" tf:"optional"` - Query *AlertQuery `tfsdk:"query" tf:"optional"` + Query []AlertQuery `tfsdk:"query" tf:"optional"` // Number of seconds after being triggered before the alert rearms itself // and can be triggered again. If `null`, alert will never be triggered // again. @@ -1191,7 +1191,7 @@ type LegacyAlert struct { // Timestamp when the alert was last updated. UpdatedAt types.String `tfsdk:"updated_at" tf:"optional"` - User *User `tfsdk:"user" tf:"optional"` + User []User `tfsdk:"user" tf:"optional"` } type LegacyQuery struct { @@ -1228,7 +1228,7 @@ type LegacyQuery struct { // type parameters are handled safely. IsSafe types.Bool `tfsdk:"is_safe" tf:"optional"` - LastModifiedBy *User `tfsdk:"last_modified_by" tf:"optional"` + LastModifiedBy []User `tfsdk:"last_modified_by" tf:"optional"` // The ID of the user who last saved changes to this query. LastModifiedById types.Int64 `tfsdk:"last_modified_by_id" tf:"optional"` // If there is a cached result for this query and user, this field includes @@ -1239,7 +1239,7 @@ type LegacyQuery struct { // on the query page. Name types.String `tfsdk:"name" tf:"optional"` - Options *QueryOptions `tfsdk:"options" tf:"optional"` + Options []QueryOptions `tfsdk:"options" tf:"optional"` // The identifier of the workspace folder containing the object. Parent types.String `tfsdk:"parent" tf:"optional"` // * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * @@ -1258,7 +1258,7 @@ type LegacyQuery struct { // The timestamp at which this query was last updated. UpdatedAt types.String `tfsdk:"updated_at" tf:"optional"` - User *User `tfsdk:"user" tf:"optional"` + User []User `tfsdk:"user" tf:"optional"` // The ID of the user who owns the query. UserId types.Int64 `tfsdk:"user_id" tf:"optional"` @@ -1285,7 +1285,7 @@ type LegacyVisualization struct { // settings in JSON. Options any `tfsdk:"options" tf:"optional"` - Query *LegacyQuery `tfsdk:"query" tf:"optional"` + Query []LegacyQuery `tfsdk:"query" tf:"optional"` // The type of visualization: chart, table, pivot table, and so on. Type types.String `tfsdk:"type" tf:"optional"` @@ -1307,7 +1307,7 @@ type ListAlertsResponse struct { type ListAlertsResponseAlert struct { // Trigger conditions of the alert. - Condition *AlertCondition `tfsdk:"condition" tf:"optional"` + Condition []AlertCondition `tfsdk:"condition" tf:"optional"` // The timestamp indicating when the alert was created. CreateTime types.String `tfsdk:"create_time" tf:"optional"` // Custom body of alert notification, if it exists. See [here] for custom @@ -1403,7 +1403,7 @@ type ListQueriesResponse struct { // List Queries type ListQueryHistoryRequest struct { // A filter to limit query history results. This field is optional. - FilterBy *QueryFilter `tfsdk:"-"` + FilterBy []QueryFilter `tfsdk:"-"` // Whether to include the query metrics with each query. Only use this for a // small subset of queries (max_results). Defaults to false. IncludeMetrics types.Bool `tfsdk:"-"` @@ -1528,7 +1528,7 @@ type Parameter struct { EnumOptions types.String `tfsdk:"enumOptions" tf:"optional"` // If specified, allows multiple values to be selected for this parameter. // Only applies to dropdown list and query-based dropdown list parameters. - MultiValuesOptions *MultiValuesOptions `tfsdk:"multiValuesOptions" tf:"optional"` + MultiValuesOptions []MultiValuesOptions `tfsdk:"multiValuesOptions" tf:"optional"` // The literal parameter marker that appears between double curly braces in // the query text. Name types.String `tfsdk:"name" tf:"optional"` @@ -1584,7 +1584,7 @@ type Query struct { type QueryBackedValue struct { // If specified, allows multiple values to be selected for this parameter. - MultiValuesOptions *MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional"` + MultiValuesOptions []MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional"` // UUID of the query that provides the parameter values. QueryId types.String `tfsdk:"query_id" tf:"optional"` // List of selected query parameter values. @@ -1622,7 +1622,7 @@ type QueryEditContent struct { type QueryFilter struct { // A range filter for query submitted time. The time range must be <= 30 // days. - QueryStartTimeRange *TimeRange `tfsdk:"query_start_time_range" tf:"optional"` + QueryStartTimeRange []TimeRange `tfsdk:"query_start_time_range" tf:"optional"` // A list of statement IDs. StatementIds []types.String `tfsdk:"statement_ids" tf:"optional"` @@ -1635,7 +1635,7 @@ type QueryFilter struct { type QueryInfo struct { // SQL Warehouse channel information at the time of query execution - ChannelUsed *ChannelInfo `tfsdk:"channel_used" tf:"optional"` + ChannelUsed []ChannelInfo `tfsdk:"channel_used" tf:"optional"` // Total execution time of the statement ( excluding result fetch time ). Duration types.Int64 `tfsdk:"duration" tf:"optional"` // Alias for `warehouse_id`. @@ -1654,7 +1654,7 @@ type QueryInfo struct { // A key that can be used to look up query details. LookupKey types.String `tfsdk:"lookup_key" tf:"optional"` // Metrics about query execution. - Metrics *QueryMetrics `tfsdk:"metrics" tf:"optional"` + Metrics []QueryMetrics `tfsdk:"metrics" tf:"optional"` // Whether plans exist for the execution, or the reason why they are missing PlansState types.String `tfsdk:"plans_state" tf:"optional"` // The time the query ended. @@ -1773,21 +1773,21 @@ type QueryOptions struct { type QueryParameter struct { // Date-range query parameter value. Can only specify one of // `dynamic_date_range_value` or `date_range_value`. - DateRangeValue *DateRangeValue `tfsdk:"date_range_value" tf:"optional"` + DateRangeValue []DateRangeValue `tfsdk:"date_range_value" tf:"optional"` // Date query parameter value. Can only specify one of `dynamic_date_value` // or `date_value`. - DateValue *DateValue `tfsdk:"date_value" tf:"optional"` + DateValue []DateValue `tfsdk:"date_value" tf:"optional"` // Dropdown query parameter value. - EnumValue *EnumValue `tfsdk:"enum_value" tf:"optional"` + EnumValue []EnumValue `tfsdk:"enum_value" tf:"optional"` // Literal parameter marker that appears between double curly braces in the // query text. Name types.String `tfsdk:"name" tf:"optional"` // Numeric query parameter value. - NumericValue *NumericValue `tfsdk:"numeric_value" tf:"optional"` + NumericValue []NumericValue `tfsdk:"numeric_value" tf:"optional"` // Query-based dropdown query parameter value. - QueryBackedValue *QueryBackedValue `tfsdk:"query_backed_value" tf:"optional"` + QueryBackedValue []QueryBackedValue `tfsdk:"query_backed_value" tf:"optional"` // Text query parameter value. - TextValue *TextValue `tfsdk:"text_value" tf:"optional"` + TextValue []TextValue `tfsdk:"text_value" tf:"optional"` // Text displayed in the user-facing parameter widget in the UI. Title types.String `tfsdk:"title" tf:"optional"` } @@ -1874,7 +1874,7 @@ type ResultManifest struct { Format types.String `tfsdk:"format" tf:"optional"` // The schema is an ordered list of column descriptions. - Schema *ResultSchema `tfsdk:"schema" tf:"optional"` + Schema []ResultSchema `tfsdk:"schema" tf:"optional"` // The total number of bytes in the result set. This field is not available // when using `INLINE` disposition. TotalByteCount types.Int64 `tfsdk:"total_byte_count" tf:"optional"` @@ -1920,9 +1920,9 @@ type SetResponse struct { type SetWorkspaceWarehouseConfigRequest struct { // Optional: Channel selection details - Channel *Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional"` // Deprecated: Use sql_configuration_parameters - ConfigParam *RepeatedEndpointConfPairs `tfsdk:"config_param" tf:"optional"` + ConfigParam []RepeatedEndpointConfPairs `tfsdk:"config_param" tf:"optional"` // Spark confs for external hive metastore configuration JSON serialized // size must be less than <= 512K DataAccessConfig []EndpointConfPair `tfsdk:"data_access_config" tf:"optional"` @@ -1934,7 +1934,7 @@ type SetWorkspaceWarehouseConfigRequest struct { // specific type availability in the warehouse create and edit form UI. EnabledWarehouseTypes []WarehouseTypePair `tfsdk:"enabled_warehouse_types" tf:"optional"` // Deprecated: Use sql_configuration_parameters - GlobalParam *RepeatedEndpointConfPairs `tfsdk:"global_param" tf:"optional"` + GlobalParam []RepeatedEndpointConfPairs `tfsdk:"global_param" tf:"optional"` // GCP only: Google Service Account used to pass to cluster to access Google // Cloud Storage GoogleServiceAccount types.String `tfsdk:"google_service_account" tf:"optional"` @@ -1943,7 +1943,7 @@ type SetWorkspaceWarehouseConfigRequest struct { // Security policy for warehouses SecurityPolicy types.String `tfsdk:"security_policy" tf:"optional"` // SQL configuration parameters - SqlConfigurationParameters *RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional"` + SqlConfigurationParameters []RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional"` } type SetWorkspaceWarehouseConfigResponse struct { @@ -1976,21 +1976,21 @@ type StatementParameterListItem struct { type StatementResponse struct { // The result manifest provides schema and metadata for the result set. - Manifest *ResultManifest `tfsdk:"manifest" tf:"optional"` + Manifest []ResultManifest `tfsdk:"manifest" tf:"optional"` - Result *ResultData `tfsdk:"result" tf:"optional"` + Result []ResultData `tfsdk:"result" tf:"optional"` // The statement ID is returned upon successfully submitting a SQL // statement, and is a required reference for all subsequent calls. StatementId types.String `tfsdk:"statement_id" tf:"optional"` // The status response includes execution state and if relevant, error // information. - Status *StatementStatus `tfsdk:"status" tf:"optional"` + Status []StatementStatus `tfsdk:"status" tf:"optional"` } // The status response includes execution state and if relevant, error // information. type StatementStatus struct { - Error *ServiceError `tfsdk:"error" tf:"optional"` + Error []ServiceError `tfsdk:"error" tf:"optional"` // Statement execution state: - `PENDING`: waiting for warehouse - // `RUNNING`: running - `SUCCEEDED`: execution was successful, result data // available for fetch - `FAILED`: execution failed; reason for failure @@ -2045,7 +2045,7 @@ type TransferOwnershipRequest struct { // Email address for the new owner, who must exist in the workspace. NewOwner types.String `tfsdk:"new_owner" tf:"optional"` // The ID of the object on which to change ownership. - ObjectId TransferOwnershipObjectId `tfsdk:"-"` + ObjectId []TransferOwnershipObjectId `tfsdk:"-"` // The type of object on which to change ownership. ObjectType types.String `tfsdk:"-"` } @@ -2061,7 +2061,7 @@ type TrashQueryRequest struct { } type UpdateAlertRequest struct { - Alert *UpdateAlertRequestAlert `tfsdk:"alert" tf:"optional"` + Alert []UpdateAlertRequestAlert `tfsdk:"alert" tf:"optional"` Id types.String `tfsdk:"-"` // Field mask is required to be passed into the PATCH request. Field mask @@ -2073,7 +2073,7 @@ type UpdateAlertRequest struct { type UpdateAlertRequestAlert struct { // Trigger conditions of the alert. - Condition *AlertCondition `tfsdk:"condition" tf:"optional"` + Condition []AlertCondition `tfsdk:"condition" tf:"optional"` // Custom body of alert notification, if it exists. See [here] for custom // templating instructions. // @@ -2101,7 +2101,7 @@ type UpdateAlertRequestAlert struct { type UpdateQueryRequest struct { Id types.String `tfsdk:"-"` - Query *UpdateQueryRequestQuery `tfsdk:"query" tf:"optional"` + Query []UpdateQueryRequestQuery `tfsdk:"query" tf:"optional"` // Field mask is required to be passed into the PATCH request. Field mask // specifies which fields of the setting payload will be updated. The field // mask needs to be supplied as single string. To specify multiple fields in @@ -2147,7 +2147,7 @@ type UpdateVisualizationRequest struct { // the field mask, use comma as the separator (no space). UpdateMask types.String `tfsdk:"update_mask" tf:""` - Visualization *UpdateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional"` + Visualization []UpdateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional"` } type UpdateVisualizationRequestVisualization struct { @@ -2260,13 +2260,13 @@ type Widget struct { // The unique ID for this widget. Id types.String `tfsdk:"id" tf:"optional"` - Options *WidgetOptions `tfsdk:"options" tf:"optional"` + Options []WidgetOptions `tfsdk:"options" tf:"optional"` // The visualization description API changes frequently and is unsupported. // You can duplicate a visualization by copying description objects received // _from the API_ and then using them to create a new one with a POST // request to the same endpoint. Databricks does not recommend constructing // ad-hoc visualizations entirely in JSON. - Visualization *LegacyVisualization `tfsdk:"visualization" tf:"optional"` + Visualization []LegacyVisualization `tfsdk:"visualization" tf:"optional"` // Unused field. Width types.Int64 `tfsdk:"width" tf:"optional"` } @@ -2284,7 +2284,7 @@ type WidgetOptions struct { ParameterMappings any `tfsdk:"parameterMappings" tf:"optional"` // Coordinates of this widget on a dashboard. This portion of the API // changes frequently and is unsupported. - Position *WidgetPosition `tfsdk:"position" tf:"optional"` + Position []WidgetPosition `tfsdk:"position" tf:"optional"` // Custom title of the widget Title types.String `tfsdk:"title" tf:"optional"` // Timestamp of the last time this object was updated. diff --git a/internal/service/vectorsearch_tf/model.go b/internal/service/vectorsearch_tf/model.go index 494b51cbc6..11f4179394 100755 --- a/internal/service/vectorsearch_tf/model.go +++ b/internal/service/vectorsearch_tf/model.go @@ -29,10 +29,10 @@ type CreateEndpoint struct { type CreateVectorIndexRequest struct { // Specification for Delta Sync Index. Required if `index_type` is // `DELTA_SYNC`. - DeltaSyncIndexSpec *DeltaSyncVectorIndexSpecRequest `tfsdk:"delta_sync_index_spec" tf:"optional"` + DeltaSyncIndexSpec []DeltaSyncVectorIndexSpecRequest `tfsdk:"delta_sync_index_spec" tf:"optional"` // Specification for Direct Vector Access Index. Required if `index_type` is // `DIRECT_ACCESS`. - DirectAccessIndexSpec *DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec" tf:"optional"` + DirectAccessIndexSpec []DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec" tf:"optional"` // Name of the endpoint to be used for serving the index EndpointName types.String `tfsdk:"endpoint_name" tf:""` // There are 2 types of Vector Search indexes: @@ -50,7 +50,7 @@ type CreateVectorIndexRequest struct { } type CreateVectorIndexResponse struct { - VectorIndex *VectorIndex `tfsdk:"vector_index" tf:"optional"` + VectorIndex []VectorIndex `tfsdk:"vector_index" tf:"optional"` } // Result of the upsert or delete operation. @@ -73,7 +73,7 @@ type DeleteDataVectorIndexRequest struct { // Response to a delete data vector index request. type DeleteDataVectorIndexResponse struct { // Result of the upsert or delete operation. - Result *DeleteDataResult `tfsdk:"result" tf:"optional"` + Result []DeleteDataResult `tfsdk:"result" tf:"optional"` // Status of the delete operation. Status types.String `tfsdk:"status" tf:"optional"` } @@ -181,7 +181,7 @@ type EndpointInfo struct { // Creator of the endpoint Creator types.String `tfsdk:"creator" tf:"optional"` // Current status of the endpoint - EndpointStatus *EndpointStatus `tfsdk:"endpoint_status" tf:"optional"` + EndpointStatus []EndpointStatus `tfsdk:"endpoint_status" tf:"optional"` // Type of endpoint. EndpointType types.String `tfsdk:"endpoint_type" tf:"optional"` // Unique identifier of the endpoint @@ -255,7 +255,7 @@ type MapStringValueEntry struct { // Column name. Key types.String `tfsdk:"key" tf:"optional"` // Column value, nullable. - Value *Value `tfsdk:"value" tf:"optional"` + Value []Value `tfsdk:"value" tf:"optional"` } type MiniVectorIndex struct { @@ -315,13 +315,13 @@ type QueryVectorIndexRequest struct { type QueryVectorIndexResponse struct { // Metadata about the result set. - Manifest *ResultManifest `tfsdk:"manifest" tf:"optional"` + Manifest []ResultManifest `tfsdk:"manifest" tf:"optional"` // [Optional] Token that can be used in `QueryVectorIndexNextPage` API to // get next page of results. If more than 1000 results satisfy the query, // they are returned in groups of 1000. Empty value means no more results. NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` // Data returned in the query result. - Result *ResultData `tfsdk:"result" tf:"optional"` + Result []ResultData `tfsdk:"result" tf:"optional"` } // Data returned in the query result. @@ -392,7 +392,7 @@ type UpsertDataVectorIndexRequest struct { // Response to an upsert data vector index request. type UpsertDataVectorIndexResponse struct { // Result of the upsert or delete operation. - Result *UpsertDataResult `tfsdk:"result" tf:"optional"` + Result []UpsertDataResult `tfsdk:"result" tf:"optional"` // Status of the upsert operation. Status types.String `tfsdk:"status" tf:"optional"` } @@ -400,7 +400,7 @@ type UpsertDataVectorIndexResponse struct { type Value struct { BoolValue types.Bool `tfsdk:"bool_value" tf:"optional"` - ListValue *ListValue `tfsdk:"list_value" tf:"optional"` + ListValue []ListValue `tfsdk:"list_value" tf:"optional"` NullValue types.String `tfsdk:"null_value" tf:"optional"` @@ -408,16 +408,16 @@ type Value struct { StringValue types.String `tfsdk:"string_value" tf:"optional"` - StructValue *Struct `tfsdk:"struct_value" tf:"optional"` + StructValue []Struct `tfsdk:"struct_value" tf:"optional"` } type VectorIndex struct { // The user who created the index. Creator types.String `tfsdk:"creator" tf:"optional"` - DeltaSyncIndexSpec *DeltaSyncVectorIndexSpecResponse `tfsdk:"delta_sync_index_spec" tf:"optional"` + DeltaSyncIndexSpec []DeltaSyncVectorIndexSpecResponse `tfsdk:"delta_sync_index_spec" tf:"optional"` - DirectAccessIndexSpec *DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec" tf:"optional"` + DirectAccessIndexSpec []DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec" tf:"optional"` // Name of the endpoint associated with the index EndpointName types.String `tfsdk:"endpoint_name" tf:"optional"` // There are 2 types of Vector Search indexes: @@ -433,7 +433,7 @@ type VectorIndex struct { // Primary key of the index PrimaryKey types.String `tfsdk:"primary_key" tf:"optional"` - Status *VectorIndexStatus `tfsdk:"status" tf:"optional"` + Status []VectorIndexStatus `tfsdk:"status" tf:"optional"` } type VectorIndexStatus struct { diff --git a/internal/service/workspace_tf/model.go b/internal/service/workspace_tf/model.go index fe451acf89..7564d08cce 100755 --- a/internal/service/workspace_tf/model.go +++ b/internal/service/workspace_tf/model.go @@ -73,7 +73,7 @@ type CreateRepoRequest struct { Provider types.String `tfsdk:"provider" tf:""` // If specified, the repo will be created with sparse checkout enabled. You // cannot enable/disable sparse checkout after the repo is created. - SparseCheckout *SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` // URL of the Git repository to be linked. Url types.String `tfsdk:"url" tf:""` } @@ -91,14 +91,14 @@ type CreateRepoResponse struct { // Git provider of the linked Git repository. Provider types.String `tfsdk:"provider" tf:"optional"` // Sparse checkout settings for the Git folder (repo). - SparseCheckout *SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` // URL of the linked Git repository. Url types.String `tfsdk:"url" tf:"optional"` } type CreateScope struct { // The metadata for the secret scope if the type is `AZURE_KEYVAULT` - BackendAzureKeyvault *AzureKeyVaultSecretScopeMetadata `tfsdk:"backend_azure_keyvault" tf:"optional"` + BackendAzureKeyvault []AzureKeyVaultSecretScopeMetadata `tfsdk:"backend_azure_keyvault" tf:"optional"` // The principal that is initially granted `MANAGE` permission to the // created scope. InitialManagePrincipal types.String `tfsdk:"initial_manage_principal" tf:"optional"` @@ -270,7 +270,7 @@ type GetRepoResponse struct { // Git provider of the linked Git repository. Provider types.String `tfsdk:"provider" tf:"optional"` // Sparse checkout settings for the Git folder (repo). - SparseCheckout *SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` // URL of the linked Git repository. Url types.String `tfsdk:"url" tf:"optional"` } @@ -516,7 +516,7 @@ type RepoInfo struct { // Git provider of the remote git repository, e.g. `gitHub`. Provider types.String `tfsdk:"provider" tf:"optional"` // Sparse checkout config for the git folder (repo). - SparseCheckout *SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` // URL of the remote git repository. Url types.String `tfsdk:"url" tf:"optional"` } @@ -560,7 +560,7 @@ type SecretScope struct { // The type of secret scope backend. BackendType types.String `tfsdk:"backend_type" tf:"optional"` // The metadata for the secret scope if the type is `AZURE_KEYVAULT` - KeyvaultMetadata *AzureKeyVaultSecretScopeMetadata `tfsdk:"keyvault_metadata" tf:"optional"` + KeyvaultMetadata []AzureKeyVaultSecretScopeMetadata `tfsdk:"keyvault_metadata" tf:"optional"` // A unique name to identify the secret scope. Name types.String `tfsdk:"name" tf:"optional"` } @@ -617,7 +617,7 @@ type UpdateRepoRequest struct { RepoId types.Int64 `tfsdk:"-"` // If specified, update the sparse checkout settings. The update will fail // if sparse checkout is not enabled for the repo. - SparseCheckout *SparseCheckoutUpdate `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckoutUpdate `tfsdk:"sparse_checkout" tf:"optional"` // Tag that the local version of the repo is checked out to. Updating the // repo to a tag puts the repo in a detached HEAD state. Before committing // new changes, you must update the repo to a branch instead of the detached From 2ad3f1b7a6fd0866d9d253105928507f7c0e7579 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Thu, 10 Oct 2024 14:42:51 +0200 Subject: [PATCH 39/99] [Internal] Support adding context in resources and data sources (#4085) ## Changes Set `resource` and `data` context for each resource in their respective CRUD operations. Two methods are defined to make setting resources / datasources as default easier: `GetDatabricksProductionName` and `GetDatabricksStagingName`. Note: Library update isn't supported and hence we shouldn't inject useragent there. Every resource has: ````` ctx = pluginfwcommon.SetResourceNameInContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } ````` We could avoid the repetition by creating a common base struct but since this is going to be autogenerated soon, I think it's fine to have these as is due to return statements / clarity of reading the code. ## Tests Unit tests - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- internal/providers/pluginfw/common/common.go | 10 +++++++ .../providers/pluginfw/common/common_test.go | 21 +++++++++++++++ .../providers/pluginfw/context/context.go | 15 +++++++++++ .../pluginfw/context/context_test.go | 27 +++++++++++++++++++ .../resources/cluster/data_cluster.go | 6 ++++- .../resources/library/resource_library.go | 7 ++++- .../resource_quality_monitor.go | 9 ++++++- .../pluginfw/resources/volume/data_volumes.go | 6 ++++- 8 files changed, 97 insertions(+), 4 deletions(-) create mode 100644 internal/providers/pluginfw/common/common_test.go create mode 100644 internal/providers/pluginfw/context/context.go create mode 100644 internal/providers/pluginfw/context/context_test.go diff --git a/internal/providers/pluginfw/common/common.go b/internal/providers/pluginfw/common/common.go index 4351a5416c..f8dd957b9f 100644 --- a/internal/providers/pluginfw/common/common.go +++ b/internal/providers/pluginfw/common/common.go @@ -45,3 +45,13 @@ func ConfigureResource(req resource.ConfigureRequest, resp *resource.ConfigureRe } return client } + +// GetDatabricksStagingName returns the resource name for a given resource with _pluginframework suffix. +// Once a migrated resource is ready to be used as default, the Metadata method for that resource should be updated to use GetDatabricksProductionName. +func GetDatabricksStagingName(name string) string { + return fmt.Sprintf("databricks_%s_pluginframework", name) +} + +func GetDatabricksProductionName(name string) string { + return fmt.Sprintf("databricks_%s", name) +} diff --git a/internal/providers/pluginfw/common/common_test.go b/internal/providers/pluginfw/common/common_test.go new file mode 100644 index 0000000000..dca9729ac8 --- /dev/null +++ b/internal/providers/pluginfw/common/common_test.go @@ -0,0 +1,21 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetDatabricksStagingName(t *testing.T) { + resourceName := "test" + expected := "databricks_test_pluginframework" + result := GetDatabricksStagingName(resourceName) + assert.Equal(t, expected, result, "GetDatabricksStagingName should return the expected staging name") +} + +func TestGetDatabricksProductionName(t *testing.T) { + resourceName := "test" + expected := "databricks_test" + result := GetDatabricksProductionName(resourceName) + assert.Equal(t, expected, result, "GetDatabricksProductionName should return the expected production name") +} diff --git a/internal/providers/pluginfw/context/context.go b/internal/providers/pluginfw/context/context.go new file mode 100644 index 0000000000..fc59432e46 --- /dev/null +++ b/internal/providers/pluginfw/context/context.go @@ -0,0 +1,15 @@ +package common + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/useragent" +) + +func SetResourceNameInContext(ctx context.Context, resourceName string) context.Context { + return useragent.InContext(ctx, "resource", resourceName) +} + +func SetDataSourceNameInContext(ctx context.Context, dataSourceName string) context.Context { + return useragent.InContext(ctx, "data", dataSourceName) +} diff --git a/internal/providers/pluginfw/context/context_test.go b/internal/providers/pluginfw/context/context_test.go new file mode 100644 index 0000000000..378c26787e --- /dev/null +++ b/internal/providers/pluginfw/context/context_test.go @@ -0,0 +1,27 @@ +package common + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/useragent" + "github.com/stretchr/testify/assert" +) + +func TestSetResourceNameInContext(t *testing.T) { + ctx := context.Background() + resourceKey := "resource" + resourceName := "test-resource" + actualContext := SetResourceNameInContext(ctx, resourceName) + expectedContext := useragent.InContext(ctx, resourceKey, resourceName) + assert.Equal(t, expectedContext, actualContext) +} + +func TestSetDataSourceNameInContext(t *testing.T) { + ctx := context.Background() + dataSourceKey := "data" + dataSourceName := "test-datasource" + actualContext := SetDataSourceNameInContext(ctx, dataSourceName) + expectedContext := useragent.InContext(ctx, dataSourceKey, dataSourceName) + assert.Equal(t, expectedContext, actualContext) +} diff --git a/internal/providers/pluginfw/resources/cluster/data_cluster.go b/internal/providers/pluginfw/resources/cluster/data_cluster.go index 1fae8670df..76b7035f3c 100644 --- a/internal/providers/pluginfw/resources/cluster/data_cluster.go +++ b/internal/providers/pluginfw/resources/cluster/data_cluster.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/terraform-provider-databricks/common" pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + pluginfwcontext "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/context" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" "github.com/databricks/terraform-provider-databricks/internal/service/compute_tf" @@ -18,6 +19,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) +const dataSourceName = "cluster" + func DataSourceCluster() datasource.DataSource { return &ClusterDataSource{} } @@ -35,7 +38,7 @@ type ClusterInfo struct { } func (d *ClusterDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { - resp.TypeName = "databricks_cluster_pluginframework" + resp.TypeName = pluginfwcommon.GetDatabricksStagingName(dataSourceName) } func (d *ClusterDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { @@ -67,6 +70,7 @@ func validateClustersList(ctx context.Context, clusters []compute_tf.ClusterDeta } func (d *ClusterDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + ctx = pluginfwcontext.SetDataSourceNameInContext(ctx, dataSourceName) w, diags := d.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { diff --git a/internal/providers/pluginfw/resources/library/resource_library.go b/internal/providers/pluginfw/resources/library/resource_library.go index 21e4e0f897..ccfc2204a1 100644 --- a/internal/providers/pluginfw/resources/library/resource_library.go +++ b/internal/providers/pluginfw/resources/library/resource_library.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/terraform-provider-databricks/clusters" "github.com/databricks/terraform-provider-databricks/common" pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + pluginfwcontext "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/context" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" "github.com/databricks/terraform-provider-databricks/internal/service/compute_tf" @@ -25,6 +26,7 @@ import ( "github.com/databricks/databricks-sdk-go" ) +const resourceName = "library" const libraryDefaultInstallationTimeout = 15 * time.Minute var _ resource.ResourceWithConfigure = &LibraryResource{} @@ -67,7 +69,7 @@ type LibraryResource struct { } func (r *LibraryResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = "databricks_library_pluginframework" + resp.TypeName = pluginfwcommon.GetDatabricksStagingName(resourceName) } func (r *LibraryResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { @@ -102,6 +104,7 @@ func (r *LibraryResource) Configure(ctx context.Context, req resource.ConfigureR } func (r *LibraryResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { @@ -144,6 +147,7 @@ func (r *LibraryResource) Create(ctx context.Context, req resource.CreateRequest } func (r *LibraryResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { @@ -181,6 +185,7 @@ func (r *LibraryResource) Update(ctx context.Context, req resource.UpdateRequest } func (r *LibraryResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go index d1e455f5b4..28642dfcae 100644 --- a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/terraform-provider-databricks/common" pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + pluginfwcontext "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/context" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" "github.com/databricks/terraform-provider-databricks/internal/service/catalog_tf" @@ -21,6 +22,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) +const resourceName = "quality_monitor" + const qualityMonitorDefaultProvisionTimeout = 15 * time.Minute var _ resource.ResourceWithConfigure = &QualityMonitorResource{} @@ -62,7 +65,7 @@ type QualityMonitorResource struct { } func (r *QualityMonitorResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = "databricks_quality_monitor_pluginframework" + resp.TypeName = pluginfwcommon.GetDatabricksStagingName(resourceName) } func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { @@ -95,6 +98,7 @@ func (d *QualityMonitorResource) ImportState(ctx context.Context, req resource.I } func (r *QualityMonitorResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { @@ -131,6 +135,7 @@ func (r *QualityMonitorResource) Create(ctx context.Context, req resource.Create } func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { @@ -161,6 +166,7 @@ func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequ } func (r *QualityMonitorResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { @@ -203,6 +209,7 @@ func (r *QualityMonitorResource) Update(ctx context.Context, req resource.Update } func (r *QualityMonitorResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { diff --git a/internal/providers/pluginfw/resources/volume/data_volumes.go b/internal/providers/pluginfw/resources/volume/data_volumes.go index 73290bb80c..d98144590b 100644 --- a/internal/providers/pluginfw/resources/volume/data_volumes.go +++ b/internal/providers/pluginfw/resources/volume/data_volumes.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/terraform-provider-databricks/common" pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + pluginfwcontext "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/context" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" "github.com/hashicorp/terraform-plugin-framework/datasource" @@ -15,6 +16,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) +const dataSourceName = "volumes" + func DataSourceVolumes() datasource.DataSource { return &VolumesDataSource{} } @@ -32,7 +35,7 @@ type VolumesList struct { } func (d *VolumesDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { - resp.TypeName = "databricks_volumes_pluginframework" + resp.TypeName = pluginfwcommon.GetDatabricksStagingName(dataSourceName) } func (d *VolumesDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { @@ -50,6 +53,7 @@ func (d *VolumesDataSource) Configure(_ context.Context, req datasource.Configur } func (d *VolumesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + ctx = pluginfwcontext.SetDataSourceNameInContext(ctx, dataSourceName) w, diags := d.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { From 2c5dc8a9d56fa1cc6b27204cf6111788bb5b335e Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Thu, 10 Oct 2024 17:03:04 +0200 Subject: [PATCH 40/99] [Internal] Set SDK used in the useragent in context (#4092) ## Changes There are 2 SDKs -- sdkv2 and pluginframework. We set this information while setting useragent in the context for a resource and data source Context: https://github.com/databricks/terraform-provider-databricks/pull/4085#pullrequestreview-2359698343 ## Tests Unit tests - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- common/context.go | 9 +++++++-- common/context_test.go | 1 + common/version.go | 2 ++ internal/providers/common/common.go | 12 +++++++++++- internal/providers/pluginfw/context/context.go | 11 ++++++++--- .../providers/pluginfw/context/context_test.go | 16 +++++++++------- .../pluginfw/resources/cluster/data_cluster.go | 2 +- .../resources/library/resource_library.go | 6 +++--- .../qualitymonitor/resource_quality_monitor.go | 8 ++++---- .../pluginfw/resources/volume/data_volumes.go | 2 +- 10 files changed, 47 insertions(+), 22 deletions(-) diff --git a/common/context.go b/common/context.go index 9c00b9492e..9e6326e4e2 100644 --- a/common/context.go +++ b/common/context.go @@ -5,15 +5,18 @@ import ( "strings" "github.com/databricks/databricks-sdk-go/useragent" + "github.com/databricks/terraform-provider-databricks/internal/providers/common" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) +const sdkName = "sdkv2" + // AddContextToAllResources ... func AddContextToAllResources(p *schema.Provider, prefix string) { for k, r := range p.DataSourcesMap { name := strings.ReplaceAll(k, prefix+"_", "") - wrap := op(r.ReadContext).addContext(ResourceName, name).addContext(IsData, "yes") + wrap := op(r.ReadContext).addContext(ResourceName, name).addContext(IsData, "yes").addContext(Sdk, sdkName) r.ReadContext = schema.ReadContextFunc(wrap) } for k, r := range p.ResourcesMap { @@ -33,6 +36,8 @@ func (f op) addContext(k contextKey, v string) op { ctx = useragent.InContext(ctx, "resource", v) case IsData: ctx = useragent.InContext(ctx, "data", v) + case Sdk: + ctx = common.SetSDKInContext(ctx, v) } ctx = context.WithValue(ctx, k, v) return f(ctx, d, m) @@ -41,7 +46,7 @@ func (f op) addContext(k contextKey, v string) op { func addContextToResource(name string, r *schema.Resource) { addName := func(a op) func(ctx context.Context, d *schema.ResourceData, m any) diag.Diagnostics { - return a.addContext(ResourceName, name) + return a.addContext(ResourceName, name).addContext(Sdk, sdkName) } if r.CreateContext != nil { r.CreateContext = addName(op(r.CreateContext)) diff --git a/common/context_test.go b/common/context_test.go index 792067fb9f..c1dc9e4db3 100644 --- a/common/context_test.go +++ b/common/context_test.go @@ -12,6 +12,7 @@ import ( func TestAddContextToAllResources(t *testing.T) { check := func(ctx context.Context, rd *schema.ResourceData, i any) diag.Diagnostics { assert.Equal(t, "bar", ResourceName.GetOrUnknown(ctx)) + assert.Equal(t, "sdkv2", Sdk.GetOrUnknown(ctx)) return nil } p := &schema.Provider{ diff --git a/common/version.go b/common/version.go index 86d57fba64..592603882a 100644 --- a/common/version.go +++ b/common/version.go @@ -14,6 +14,8 @@ var ( IsData contextKey = 4 // apiVersion Api contextKey = 5 + // SDK used + Sdk contextKey = 6 ) type contextKey int diff --git a/internal/providers/common/common.go b/internal/providers/common/common.go index 7be08e9302..106420febc 100644 --- a/internal/providers/common/common.go +++ b/internal/providers/common/common.go @@ -3,6 +3,16 @@ // Note: This is different from internal/providers which contains the changes that *depends* on both: // internal/providers/sdkv2 and internal/providers/pluginfw packages. Whereas, internal/providers/common package contains // the changes *used* by both internal/providers/sdkv2 and internal/providers/pluginfw packages. -package internal +package common + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/useragent" +) const ProviderName = "databricks-tf-provider" + +func SetSDKInContext(ctx context.Context, sdkUsed string) context.Context { + return useragent.InContext(ctx, "sdk", sdkUsed) +} diff --git a/internal/providers/pluginfw/context/context.go b/internal/providers/pluginfw/context/context.go index fc59432e46..c5c13c18b9 100644 --- a/internal/providers/pluginfw/context/context.go +++ b/internal/providers/pluginfw/context/context.go @@ -1,15 +1,20 @@ -package common +package context import ( "context" "github.com/databricks/databricks-sdk-go/useragent" + "github.com/databricks/terraform-provider-databricks/internal/providers/common" ) -func SetResourceNameInContext(ctx context.Context, resourceName string) context.Context { +const sdkName = "pluginframework" + +func SetUserAgentInResourceContext(ctx context.Context, resourceName string) context.Context { + ctx = common.SetSDKInContext(ctx, sdkName) return useragent.InContext(ctx, "resource", resourceName) } -func SetDataSourceNameInContext(ctx context.Context, dataSourceName string) context.Context { +func SetUserAgentInDataSourceContext(ctx context.Context, dataSourceName string) context.Context { + ctx = common.SetSDKInContext(ctx, sdkName) return useragent.InContext(ctx, "data", dataSourceName) } diff --git a/internal/providers/pluginfw/context/context_test.go b/internal/providers/pluginfw/context/context_test.go index 378c26787e..790c27727e 100644 --- a/internal/providers/pluginfw/context/context_test.go +++ b/internal/providers/pluginfw/context/context_test.go @@ -1,4 +1,4 @@ -package common +package context import ( "context" @@ -8,20 +8,22 @@ import ( "github.com/stretchr/testify/assert" ) -func TestSetResourceNameInContext(t *testing.T) { +func TestSetUserAgentInResourceContext(t *testing.T) { ctx := context.Background() resourceKey := "resource" resourceName := "test-resource" - actualContext := SetResourceNameInContext(ctx, resourceName) - expectedContext := useragent.InContext(ctx, resourceKey, resourceName) + actualContext := SetUserAgentInResourceContext(ctx, resourceName) + expectedContext := useragent.InContext(ctx, "sdk", "pluginframework") + expectedContext = useragent.InContext(expectedContext, resourceKey, resourceName) assert.Equal(t, expectedContext, actualContext) } -func TestSetDataSourceNameInContext(t *testing.T) { +func TestSetUserAgentInDataSourceContext(t *testing.T) { ctx := context.Background() dataSourceKey := "data" dataSourceName := "test-datasource" - actualContext := SetDataSourceNameInContext(ctx, dataSourceName) - expectedContext := useragent.InContext(ctx, dataSourceKey, dataSourceName) + actualContext := SetUserAgentInDataSourceContext(ctx, dataSourceName) + expectedContext := useragent.InContext(ctx, "sdk", "pluginframework") + expectedContext = useragent.InContext(expectedContext, dataSourceKey, dataSourceName) assert.Equal(t, expectedContext, actualContext) } diff --git a/internal/providers/pluginfw/resources/cluster/data_cluster.go b/internal/providers/pluginfw/resources/cluster/data_cluster.go index 76b7035f3c..b497d0e8f2 100644 --- a/internal/providers/pluginfw/resources/cluster/data_cluster.go +++ b/internal/providers/pluginfw/resources/cluster/data_cluster.go @@ -70,7 +70,7 @@ func validateClustersList(ctx context.Context, clusters []compute_tf.ClusterDeta } func (d *ClusterDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { - ctx = pluginfwcontext.SetDataSourceNameInContext(ctx, dataSourceName) + ctx = pluginfwcontext.SetUserAgentInDataSourceContext(ctx, dataSourceName) w, diags := d.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { diff --git a/internal/providers/pluginfw/resources/library/resource_library.go b/internal/providers/pluginfw/resources/library/resource_library.go index ccfc2204a1..1c999bd2ed 100644 --- a/internal/providers/pluginfw/resources/library/resource_library.go +++ b/internal/providers/pluginfw/resources/library/resource_library.go @@ -104,7 +104,7 @@ func (r *LibraryResource) Configure(ctx context.Context, req resource.ConfigureR } func (r *LibraryResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { @@ -147,7 +147,7 @@ func (r *LibraryResource) Create(ctx context.Context, req resource.CreateRequest } func (r *LibraryResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { @@ -185,7 +185,7 @@ func (r *LibraryResource) Update(ctx context.Context, req resource.UpdateRequest } func (r *LibraryResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go index 28642dfcae..c0047d55cc 100644 --- a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go @@ -98,7 +98,7 @@ func (d *QualityMonitorResource) ImportState(ctx context.Context, req resource.I } func (r *QualityMonitorResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { @@ -135,7 +135,7 @@ func (r *QualityMonitorResource) Create(ctx context.Context, req resource.Create } func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { @@ -166,7 +166,7 @@ func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequ } func (r *QualityMonitorResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { @@ -209,7 +209,7 @@ func (r *QualityMonitorResource) Update(ctx context.Context, req resource.Update } func (r *QualityMonitorResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - ctx = pluginfwcontext.SetResourceNameInContext(ctx, resourceName) + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) w, diags := r.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { diff --git a/internal/providers/pluginfw/resources/volume/data_volumes.go b/internal/providers/pluginfw/resources/volume/data_volumes.go index d98144590b..54eccf7bde 100644 --- a/internal/providers/pluginfw/resources/volume/data_volumes.go +++ b/internal/providers/pluginfw/resources/volume/data_volumes.go @@ -53,7 +53,7 @@ func (d *VolumesDataSource) Configure(_ context.Context, req datasource.Configur } func (d *VolumesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { - ctx = pluginfwcontext.SetDataSourceNameInContext(ctx, dataSourceName) + ctx = pluginfwcontext.SetUserAgentInDataSourceContext(ctx, dataSourceName) w, diags := d.Client.GetWorkspaceClient() resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { From 300949a0299a543d803e869f7cc313d13c9df9b7 Mon Sep 17 00:00:00 2001 From: ryan-gord-db <60911136+ryan-gord-db@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:34:17 -0400 Subject: [PATCH 41/99] [Doc] Updates to resource examples (#4093) ## Changes Updated a few code examples to reflect syntax for the current provider version and to address referential integrity issues ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/data-sources/aws_bucket_policy.md | 6 ++---- docs/data-sources/aws_unity_catalog_policy.md | 2 +- docs/data-sources/cluster.md | 2 +- docs/data-sources/instance_pool.md | 2 +- docs/data-sources/metastore.md | 4 ++++ docs/data-sources/sql_warehouse.md | 4 ++-- docs/data-sources/views.md | 2 +- docs/data-sources/volume.md | 4 ++-- docs/resources/cluster_policy.md | 2 +- docs/resources/lakehouse_monitor.md | 1 - docs/resources/mws_credentials.md | 10 +++++++--- docs/resources/mws_customer_managed_keys.md | 2 ++ docs/resources/permissions.md | 2 ++ docs/resources/quality_monitor.md | 1 - 14 files changed, 26 insertions(+), 18 deletions(-) diff --git a/docs/data-sources/aws_bucket_policy.md b/docs/data-sources/aws_bucket_policy.md index d3e6bded1c..e42949e06a 100644 --- a/docs/data-sources/aws_bucket_policy.md +++ b/docs/data-sources/aws_bucket_policy.md @@ -10,12 +10,11 @@ This datasource configures a simple access policy for AWS S3 buckets, so that Da ```hcl resource "aws_s3_bucket" "this" { bucket = "" - acl = "private" force_destroy = true } -data "databricks_aws_bucket_policy" "stuff" { - bucket_name = aws_s3_bucket.this.bucket +data "databricks_aws_bucket_policy" "this" { + bucket = aws_s3_bucket.this.bucket } resource "aws_s3_bucket_policy" "this" { @@ -29,7 +28,6 @@ Bucket policy with full access: ```hcl resource "aws_s3_bucket" "ds" { bucket = "${var.prefix}-ds" - acl = "private" force_destroy = true tags = merge(var.tags, { Name = "${var.prefix}-ds" diff --git a/docs/data-sources/aws_unity_catalog_policy.md b/docs/data-sources/aws_unity_catalog_policy.md index 6a929b70d2..3804b1d5fa 100644 --- a/docs/data-sources/aws_unity_catalog_policy.md +++ b/docs/data-sources/aws_unity_catalog_policy.md @@ -30,7 +30,7 @@ resource "aws_iam_policy" "unity_metastore" { resource "aws_iam_role" "metastore_data_access" { name = "${var.prefix}-uc-access" - assume_role_policy = data.aws_iam_policy_document.this.json + assume_role_policy = data.databricks_aws_unity_catalog_assume_role_policy.this.json managed_policy_arns = [aws_iam_policy.unity_metastore.arn] } ``` diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md index 7e5cdbba2b..73146d05ac 100644 --- a/docs/data-sources/cluster.md +++ b/docs/data-sources/cluster.md @@ -16,7 +16,7 @@ data "databricks_clusters" "all" { } data "databricks_cluster" "all" { - for_each = data.databricks_clusters.ids + for_each = data.databricks_clusters.all.ids cluster_id = each.value } diff --git a/docs/data-sources/instance_pool.md b/docs/data-sources/instance_pool.md index 052e4dea28..72f0a6f50a 100644 --- a/docs/data-sources/instance_pool.md +++ b/docs/data-sources/instance_pool.md @@ -13,7 +13,7 @@ Retrieves information about [databricks_instance_pool](../resources/instance_poo Referring to an instance pool by name: ```hcl -data "databricks_instance_pool" "Pool" { +data "databricks_instance_pool" "pool" { name = "All spot" } diff --git a/docs/data-sources/metastore.md b/docs/data-sources/metastore.md index db59fc7488..33a6060f15 100644 --- a/docs/data-sources/metastore.md +++ b/docs/data-sources/metastore.md @@ -14,6 +14,10 @@ Retrieves information about metastore for a given id of [databricks_metastore](. MetastoreInfo response for a given metastore id ```hcl +resource "aws_s3_bucket" "metastore" { + bucket = "${var.prefix}-metastore" + force_destroy = true +} resource "databricks_metastore" "this" { provider = databricks.workspace diff --git a/docs/data-sources/sql_warehouse.md b/docs/data-sources/sql_warehouse.md index b930545f89..28080d6518 100644 --- a/docs/data-sources/sql_warehouse.md +++ b/docs/data-sources/sql_warehouse.md @@ -15,8 +15,8 @@ Retrieves information about a [databricks_sql_warehouse](../resources/sql_endpoi data "databricks_sql_warehouses" "all" { } -data "databricks_sql_warehouse" "all" { - for_each = data.databricks_sql.warehouses.ids +data "databricks_sql_warehouse" "this" { + for_each = data.databricks_sql_warehouses.all.ids id = each.value } ``` diff --git a/docs/data-sources/views.md b/docs/data-sources/views.md index df076fbc4d..d36315438a 100644 --- a/docs/data-sources/views.md +++ b/docs/data-sources/views.md @@ -20,7 +20,7 @@ data "databricks_views" "things" { resource "databricks_grants" "things" { for_each = data.databricks_views.things.ids - view = each.value + table = each.value grant { principal = "sensitive" diff --git a/docs/data-sources/volume.md b/docs/data-sources/volume.md index 3a6ebeba3f..5a4bcc0e4d 100644 --- a/docs/data-sources/volume.md +++ b/docs/data-sources/volume.md @@ -16,8 +16,8 @@ data "databricks_volumes" "all" { schema_name = "things" } -data "databricks_volume" { - for_each = data.datatbricks_volumes.all.ids +data "databricks_volume" "this" { + for_each = data.databricks_volumes.all.ids name = each.value } ``` diff --git a/docs/resources/cluster_policy.md b/docs/resources/cluster_policy.md index 186c04cd7f..df94d849b6 100644 --- a/docs/resources/cluster_policy.md +++ b/docs/resources/cluster_policy.md @@ -132,7 +132,7 @@ locals { resource "databricks_cluster_policy" "personal_vm" { policy_family_id = "personal-vm" - policy_family_definition_overrides = jsonencode(personal_vm_override) + policy_family_definition_overrides = jsonencode(local.personal_vm_override) name = "Personal Compute" } ``` diff --git a/docs/resources/lakehouse_monitor.md b/docs/resources/lakehouse_monitor.md index d526f6fbc1..635399dcc9 100644 --- a/docs/resources/lakehouse_monitor.md +++ b/docs/resources/lakehouse_monitor.md @@ -38,7 +38,6 @@ resource "databricks_sql_table" "myTestTable" { column { name = "timestamp" - position = 1 type = "int" } } diff --git a/docs/resources/mws_credentials.md b/docs/resources/mws_credentials.md index 0a5c69daae..1676d89d6b 100644 --- a/docs/resources/mws_credentials.md +++ b/docs/resources/mws_credentials.md @@ -16,12 +16,16 @@ variable "databricks_account_id" { description = "Account Id that could be found in the top right corner of https://accounts.cloud.databricks.com/" } +variable "prefix" { + description = "Names of created resources will be prefixed with this value" +} + data "databricks_aws_assume_role_policy" "this" { external_id = var.databricks_account_id } resource "aws_iam_role" "cross_account_role" { - name = "${local.prefix}-crossaccount" + name = "${var.prefix}-crossaccount" assume_role_policy = data.databricks_aws_assume_role_policy.this.json tags = var.tags } @@ -30,14 +34,14 @@ data "databricks_aws_crossaccount_policy" "this" { } resource "aws_iam_role_policy" "this" { - name = "${local.prefix}-policy" + name = "${var.prefix}-policy" role = aws_iam_role.cross_account_role.id policy = data.databricks_aws_crossaccount_policy.this.json } resource "databricks_mws_credentials" "this" { provider = databricks.mws - credentials_name = "${local.prefix}-creds" + credentials_name = "${var.prefix}-creds" role_arn = aws_iam_role.cross_account_role.arn } ``` diff --git a/docs/resources/mws_customer_managed_keys.md b/docs/resources/mws_customer_managed_keys.md index 206158766d..cc48954402 100644 --- a/docs/resources/mws_customer_managed_keys.md +++ b/docs/resources/mws_customer_managed_keys.md @@ -110,6 +110,8 @@ variable "databricks_cross_account_role" { description = "AWS ARN for the Databricks cross account role" } +data "aws_caller_identity" "current" {} + data "aws_iam_policy_document" "databricks_storage_cmk" { version = "2012-10-17" statement { diff --git a/docs/resources/permissions.md b/docs/resources/permissions.md index 8e2e236dfe..868e2aa835 100644 --- a/docs/resources/permissions.md +++ b/docs/resources/permissions.md @@ -232,6 +232,8 @@ There are four assignable [permission levels](https://docs.databricks.com/securi - Read [main documentation](https://docs.databricks.com/security/access-control/dlt-acl.html) for additional detail. ```hcl +data "databricks_current_user" "me" {} + resource "databricks_group" "eng" { display_name = "Engineering" } diff --git a/docs/resources/quality_monitor.md b/docs/resources/quality_monitor.md index b01208c80e..71613a6e0d 100644 --- a/docs/resources/quality_monitor.md +++ b/docs/resources/quality_monitor.md @@ -36,7 +36,6 @@ resource "databricks_sql_table" "myTestTable" { column { name = "timestamp" - position = 1 type = "int" } } From e1c683fd4b1000452a18dd279401f1590ee9a70a Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Fri, 11 Oct 2024 07:34:13 -0400 Subject: [PATCH 42/99] [Fix] Fix databricks_cluster_pluginframework data source (#4097) ## Changes After https://github.com/databricks/terraform-provider-databricks/commit/bac842d603465ddbf3582e2dab2d9189c17e3113 , the autogenerated structures used for interacting with state/config/plan always use Lists for nested structures, even if there is only ever at most one, for compatibility with older versions of the TF provider. The cluster data source on the plugin framework is handwritten and also needs to be converted to use lists instead of a pointer as well. ## Tests Ran the `TestAccDataSourceClusterByID` and `TestAccDataSourceClusterByName` integration tests, which now pass. - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- .../pluginfw/resources/cluster/data_cluster.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/providers/pluginfw/resources/cluster/data_cluster.go b/internal/providers/pluginfw/resources/cluster/data_cluster.go index b497d0e8f2..8d0499ccb8 100644 --- a/internal/providers/pluginfw/resources/cluster/data_cluster.go +++ b/internal/providers/pluginfw/resources/cluster/data_cluster.go @@ -32,9 +32,9 @@ type ClusterDataSource struct { } type ClusterInfo struct { - ClusterId types.String `tfsdk:"cluster_id" tf:"optional,computed"` - Name types.String `tfsdk:"cluster_name" tf:"optional,computed"` - ClusterInfo *compute_tf.ClusterDetails `tfsdk:"cluster_info" tf:"optional,computed"` + ClusterId types.String `tfsdk:"cluster_id" tf:"optional,computed"` + Name types.String `tfsdk:"cluster_name" tf:"optional,computed"` + ClusterInfo []compute_tf.ClusterDetails `tfsdk:"cluster_info" tf:"optional,computed"` } func (d *ClusterDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { @@ -109,7 +109,7 @@ func (d *ClusterDataSource) Read(ctx context.Context, req datasource.ReadRequest if resp.Diagnostics.HasError() { return } - clusterInfo.ClusterInfo = &namedClusters[0] + clusterInfo.ClusterInfo = namedClusters[0:1] } else if clusterId != "" { cluster, err := w.Clusters.GetByClusterId(ctx, clusterId) if err != nil { @@ -124,12 +124,12 @@ func (d *ClusterDataSource) Read(ctx context.Context, req datasource.ReadRequest if resp.Diagnostics.HasError() { return } - clusterInfo.ClusterInfo = &clusterDetails + clusterInfo.ClusterInfo = []compute_tf.ClusterDetails{clusterDetails} } else { resp.Diagnostics.AddError("you need to specify either `cluster_name` or `cluster_id`", "") return } - clusterInfo.ClusterId = clusterInfo.ClusterInfo.ClusterId - clusterInfo.Name = clusterInfo.ClusterInfo.ClusterName + clusterInfo.ClusterId = clusterInfo.ClusterInfo[0].ClusterId + clusterInfo.Name = clusterInfo.ClusterInfo[0].ClusterName resp.Diagnostics.Append(resp.State.Set(ctx, clusterInfo)...) } From 4a70e64a4732a0adfc34c710d95dd81b76293aa9 Mon Sep 17 00:00:00 2001 From: Edward Feng <67326663+edwardfeng-db@users.noreply.github.com> Date: Fri, 11 Oct 2024 10:57:09 -0700 Subject: [PATCH 43/99] [Internal] Add maxItem=1 validator for object types in plugin framework schema (#4094) ## Changes - Added `object` tag for object types in tfsdk struct - Added `maxItem=1` validator for object types in the plugin framework schema ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- .codegen/model.go.tmpl | 2 +- go.mod | 1 + go.sum | 2 + .../tfschema/customizable_schema_test.go | 15 +- .../pluginfw/tfschema/struct_to_schema.go | 119 +++---- internal/service/apps_tf/model.go | 20 +- internal/service/billing_tf/model.go | 26 +- internal/service/catalog_tf/model.go | 190 +++++------ internal/service/compute_tf/model.go | 194 ++++++------ internal/service/dashboards_tf/model.go | 28 +- internal/service/iam_tf/model.go | 8 +- internal/service/jobs_tf/model.go | 294 +++++++++--------- internal/service/marketplace_tf/model.go | 84 ++--- internal/service/ml_tf/model.go | 54 ++-- internal/service/oauth2_tf/model.go | 12 +- internal/service/pipelines_tf/model.go | 76 ++--- internal/service/provisioning_tf/model.go | 48 +-- internal/service/serving_tf/model.go | 98 +++--- internal/service/settings_tf/model.go | 88 +++--- internal/service/sharing_tf/model.go | 32 +- internal/service/sql_tf/model.go | 138 ++++---- internal/service/vectorsearch_tf/model.go | 28 +- internal/service/workspace_tf/model.go | 14 +- 23 files changed, 797 insertions(+), 774 deletions(-) diff --git a/.codegen/model.go.tmpl b/.codegen/model.go.tmpl index 52a478f71b..40eae0d683 100644 --- a/.codegen/model.go.tmpl +++ b/.codegen/model.go.tmpl @@ -31,7 +31,7 @@ type {{.PascalName}} struct { {{end}} {{- define "field-tag" -}} - {{if .IsJson}}tfsdk:"{{if and (ne .Entity.Terraform nil) (ne .Entity.Terraform.Alias "") }}{{.Entity.Terraform.Alias}}{{else}}{{.Name}}{{end}}" tf:"{{if not .Required}}optional{{end}}"{{else}}tfsdk:"-"{{end -}} + {{if .IsJson}}tfsdk:"{{if and (ne .Entity.Terraform nil) (ne .Entity.Terraform.Alias "") }}{{.Entity.Terraform.Alias}}{{else}}{{.Name}}{{end}}" tf:"{{- $first := true -}}{{- if not .Required -}}{{- if not $first -}},{{end}}optional{{- $first = false -}}{{- end -}}{{- if .Entity.IsObject -}}{{- if not $first -}},{{end}}object{{- $first = false -}}{{- end -}}"{{else}}tfsdk:"-"{{end -}} {{- end -}} {{- define "type" -}} diff --git a/go.mod b/go.mod index e01145f07e..1fc08e53ba 100644 --- a/go.mod +++ b/go.mod @@ -52,6 +52,7 @@ require ( github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect github.com/hashicorp/terraform-json v0.22.1 // indirect + github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect diff --git a/go.sum b/go.sum index dfd13d335a..c323c71e4c 100644 --- a/go.sum +++ b/go.sum @@ -134,6 +134,8 @@ github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7 github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= github.com/hashicorp/terraform-plugin-framework v1.11.0 h1:M7+9zBArexHFXDx/pKTxjE6n/2UCXY6b8FIq9ZYhwfE= github.com/hashicorp/terraform-plugin-framework v1.11.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 h1:bxZfGo9DIUoLLtHMElsu+zwqI4IsMZQBRRy4iLzZJ8E= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0/go.mod h1:wGeI02gEhj9nPANU62F2jCaHjXulejm/X+af4PdZaNo= github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= diff --git a/internal/providers/pluginfw/tfschema/customizable_schema_test.go b/internal/providers/pluginfw/tfschema/customizable_schema_test.go index d86be910b3..ff949d9daf 100644 --- a/internal/providers/pluginfw/tfschema/customizable_schema_test.go +++ b/internal/providers/pluginfw/tfschema/customizable_schema_test.go @@ -13,9 +13,10 @@ import ( ) type TestTfSdk struct { - Description types.String `tfsdk:"description" tf:""` - Nested *NestedTfSdk `tfsdk:"nested" tf:"optional"` - Map map[string]types.String `tfsdk:"map" tf:"optional"` + Description types.String `tfsdk:"description" tf:""` + Nested *NestedTfSdk `tfsdk:"nested" tf:"optional"` + NestedSliceObject []NestedTfSdk `tfsdk:"nested_slice_object" tf:"optional,object"` + Map map[string]types.String `tfsdk:"map" tf:"optional"` } type NestedTfSdk struct { @@ -121,3 +122,11 @@ func TestCustomizeSchemaAddPlanModifier(t *testing.T) { assert.True(t, len(scm.Attributes["description"].(schema.StringAttribute).PlanModifiers) == 1) } + +func TestCustomizeSchemaObjectTypeValidatorAdded(t *testing.T) { + scm := ResourceStructToSchema(TestTfSdk{}, func(c CustomizableSchema) CustomizableSchema { + return c + }) + + assert.True(t, len(scm.Blocks["nested_slice_object"].(schema.ListNestedBlock).Validators) == 1) +} diff --git a/internal/providers/pluginfw/tfschema/struct_to_schema.go b/internal/providers/pluginfw/tfschema/struct_to_schema.go index aa473e3014..206af67fb2 100644 --- a/internal/providers/pluginfw/tfschema/struct_to_schema.go +++ b/internal/providers/pluginfw/tfschema/struct_to_schema.go @@ -7,11 +7,19 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/internal/tfreflect" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" dataschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" ) +type structTag struct { + optional bool + computed bool + singleObject bool +} + func typeToSchema(v reflect.Value) NestedBlockObject { scmAttr := map[string]AttributeBuilder{} scmBlock := map[string]BlockBuilder{} @@ -30,8 +38,7 @@ func typeToSchema(v reflect.Value) NestedBlockObject { if fieldName == "-" { continue } - isOptional := fieldIsOptional(typeField) - isComputed := fieldIsComputed(typeField) + structTag := getStructTag(typeField) kind := typeField.Type.Kind() value := field.Value typeFieldType := typeField.Type @@ -52,42 +59,47 @@ func typeToSchema(v reflect.Value) NestedBlockObject { case reflect.TypeOf(types.Bool{}): scmAttr[fieldName] = ListAttributeBuilder{ ElementType: types.BoolType, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case reflect.TypeOf(types.Int64{}): scmAttr[fieldName] = ListAttributeBuilder{ ElementType: types.Int64Type, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case reflect.TypeOf(types.Float64{}): scmAttr[fieldName] = ListAttributeBuilder{ ElementType: types.Float64Type, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case reflect.TypeOf(types.String{}): scmAttr[fieldName] = ListAttributeBuilder{ ElementType: types.StringType, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } default: // Nested struct nestedScm := typeToSchema(reflect.New(elemType).Elem()) + var validators []validator.List + if structTag.singleObject { + validators = append(validators, listvalidator.SizeAtMost(1)) + } scmBlock[fieldName] = ListNestedBlockBuilder{ NestedObject: NestedBlockObject{ Attributes: nestedScm.Attributes, Blocks: nestedScm.Blocks, }, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, + Validators: validators, } } } else if kind == reflect.Map { @@ -102,30 +114,30 @@ func typeToSchema(v reflect.Value) NestedBlockObject { case reflect.TypeOf(types.Bool{}): scmAttr[fieldName] = MapAttributeBuilder{ ElementType: types.BoolType, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case reflect.TypeOf(types.Int64{}): scmAttr[fieldName] = MapAttributeBuilder{ ElementType: types.Int64Type, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case reflect.TypeOf(types.Float64{}): scmAttr[fieldName] = MapAttributeBuilder{ ElementType: types.Float64Type, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case reflect.TypeOf(types.String{}): scmAttr[fieldName] = MapAttributeBuilder{ ElementType: types.StringType, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } default: // Nested struct @@ -134,36 +146,36 @@ func typeToSchema(v reflect.Value) NestedBlockObject { NestedObject: NestedAttributeObject{ Attributes: nestedScm.Attributes, }, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } } } else if kind == reflect.Struct { switch value.Interface().(type) { case types.Bool: scmAttr[fieldName] = BoolAttributeBuilder{ - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case types.Int64: scmAttr[fieldName] = Int64AttributeBuilder{ - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case types.Float64: scmAttr[fieldName] = Float64AttributeBuilder{ - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case types.String: scmAttr[fieldName] = StringAttributeBuilder{ - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } case types.List: panic(fmt.Errorf("types.List should never be used in tfsdk structs. %s", common.TerraformBugErrorMessage)) @@ -176,9 +188,9 @@ func typeToSchema(v reflect.Value) NestedBlockObject { nestedScm := typeToSchema(sv) scmBlock[fieldName] = ListNestedBlockBuilder{ NestedObject: nestedScm, - Optional: isOptional, - Required: !isOptional, - Computed: isComputed, + Optional: structTag.optional, + Required: !structTag.optional, + Computed: structTag.computed, } } } else { @@ -188,14 +200,13 @@ func typeToSchema(v reflect.Value) NestedBlockObject { return NestedBlockObject{Attributes: scmAttr, Blocks: scmBlock} } -func fieldIsComputed(field reflect.StructField) bool { +func getStructTag(field reflect.StructField) structTag { tagValue := field.Tag.Get("tf") - return strings.Contains(tagValue, "computed") -} - -func fieldIsOptional(field reflect.StructField) bool { - tagValue := field.Tag.Get("tf") - return strings.Contains(tagValue, "optional") + return structTag{ + optional: strings.Contains(tagValue, "optional"), + computed: strings.Contains(tagValue, "computed"), + singleObject: strings.Contains(tagValue, "object"), + } } // ResourceStructToSchema builds a resource schema from a tfsdk struct, with custoimzations applied. diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index ab9b6220dc..0b4c6101e2 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -16,11 +16,11 @@ import ( type App struct { // The active deployment of the app. - ActiveDeployment []AppDeployment `tfsdk:"active_deployment" tf:"optional"` + ActiveDeployment []AppDeployment `tfsdk:"active_deployment" tf:"optional,object"` - AppStatus []ApplicationStatus `tfsdk:"app_status" tf:"optional"` + AppStatus []ApplicationStatus `tfsdk:"app_status" tf:"optional,object"` - ComputeStatus []ComputeStatus `tfsdk:"compute_status" tf:"optional"` + ComputeStatus []ComputeStatus `tfsdk:"compute_status" tf:"optional,object"` // The creation time of the app. Formatted timestamp in ISO 6801. CreateTime types.String `tfsdk:"create_time" tf:"optional"` // The email of the user that created the app. @@ -35,7 +35,7 @@ type App struct { // characters and hyphens. It must be unique within the workspace. Name types.String `tfsdk:"name" tf:""` // The pending deployment of the app. - PendingDeployment []AppDeployment `tfsdk:"pending_deployment" tf:"optional"` + PendingDeployment []AppDeployment `tfsdk:"pending_deployment" tf:"optional,object"` // Resources for the app. Resources []AppResource `tfsdk:"resources" tf:"optional"` @@ -80,7 +80,7 @@ type AppDeployment struct { // The email of the user creates the deployment. Creator types.String `tfsdk:"creator" tf:"optional"` // The deployment artifacts for an app. - DeploymentArtifacts []AppDeploymentArtifacts `tfsdk:"deployment_artifacts" tf:"optional"` + DeploymentArtifacts []AppDeploymentArtifacts `tfsdk:"deployment_artifacts" tf:"optional,object"` // The unique id of the deployment. DeploymentId types.String `tfsdk:"deployment_id" tf:"optional"` // The mode of which the deployment will manage the source code. @@ -94,7 +94,7 @@ type AppDeployment struct { // the deployment. SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` // Status and status message of the deployment - Status []AppDeploymentStatus `tfsdk:"status" tf:"optional"` + Status []AppDeploymentStatus `tfsdk:"status" tf:"optional,object"` // The update time of the deployment. Formatted timestamp in ISO 6801. UpdateTime types.String `tfsdk:"update_time" tf:"optional"` } @@ -144,15 +144,15 @@ type AppResource struct { // Description of the App Resource. Description types.String `tfsdk:"description" tf:"optional"` - Job []AppResourceJob `tfsdk:"job" tf:"optional"` + Job []AppResourceJob `tfsdk:"job" tf:"optional,object"` // Name of the App Resource. Name types.String `tfsdk:"name" tf:""` - Secret []AppResourceSecret `tfsdk:"secret" tf:"optional"` + Secret []AppResourceSecret `tfsdk:"secret" tf:"optional,object"` - ServingEndpoint []AppResourceServingEndpoint `tfsdk:"serving_endpoint" tf:"optional"` + ServingEndpoint []AppResourceServingEndpoint `tfsdk:"serving_endpoint" tf:"optional,object"` - SqlWarehouse []AppResourceSqlWarehouse `tfsdk:"sql_warehouse" tf:"optional"` + SqlWarehouse []AppResourceSqlWarehouse `tfsdk:"sql_warehouse" tf:"optional,object"` } type AppResourceJob struct { diff --git a/internal/service/billing_tf/model.go b/internal/service/billing_tf/model.go index 8eba23e7e3..f2a63fde2b 100755 --- a/internal/service/billing_tf/model.go +++ b/internal/service/billing_tf/model.go @@ -60,7 +60,7 @@ type BudgetConfiguration struct { // usage to limit the scope of what is considered for this budget. Leave // empty to include all usage for this account. All provided filters must be // matched for usage to be included. - Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional"` + Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional,object"` // Update time of this budget configuration. UpdateTime types.Int64 `tfsdk:"update_time" tf:"optional"` } @@ -71,7 +71,7 @@ type BudgetConfigurationFilter struct { // be entered exactly as they appear in your usage data. Tags []BudgetConfigurationFilterTagClause `tfsdk:"tags" tf:"optional"` // If provided, usage must match with the provided Databricks workspace IDs. - WorkspaceId []BudgetConfigurationFilterWorkspaceIdClause `tfsdk:"workspace_id" tf:"optional"` + WorkspaceId []BudgetConfigurationFilterWorkspaceIdClause `tfsdk:"workspace_id" tf:"optional,object"` } type BudgetConfigurationFilterClause struct { @@ -83,7 +83,7 @@ type BudgetConfigurationFilterClause struct { type BudgetConfigurationFilterTagClause struct { Key types.String `tfsdk:"key" tf:"optional"` - Value []BudgetConfigurationFilterClause `tfsdk:"value" tf:"optional"` + Value []BudgetConfigurationFilterClause `tfsdk:"value" tf:"optional,object"` } type BudgetConfigurationFilterWorkspaceIdClause struct { @@ -119,7 +119,7 @@ type CreateBudgetConfigurationBudget struct { // usage to limit the scope of what is considered for this budget. Leave // empty to include all usage for this account. All provided filters must be // matched for usage to be included. - Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional"` + Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional,object"` } type CreateBudgetConfigurationBudgetActionConfigurations struct { @@ -148,12 +148,12 @@ type CreateBudgetConfigurationBudgetAlertConfigurations struct { type CreateBudgetConfigurationRequest struct { // Properties of the new budget configuration. - Budget []CreateBudgetConfigurationBudget `tfsdk:"budget" tf:""` + Budget []CreateBudgetConfigurationBudget `tfsdk:"budget" tf:"object"` } type CreateBudgetConfigurationResponse struct { // The created budget configuration. - Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional"` + Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional,object"` } type CreateLogDeliveryConfigurationParams struct { @@ -280,7 +280,7 @@ type GetBudgetConfigurationRequest struct { } type GetBudgetConfigurationResponse struct { - Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional"` + Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional,object"` } // Get log delivery configuration @@ -342,7 +342,7 @@ type LogDeliveryConfiguration struct { // available for usage before March 2019 (`2019-03`). DeliveryStartTime types.String `tfsdk:"delivery_start_time" tf:"optional"` // Databricks log delivery status. - LogDeliveryStatus []LogDeliveryStatus `tfsdk:"log_delivery_status" tf:"optional"` + LogDeliveryStatus []LogDeliveryStatus `tfsdk:"log_delivery_status" tf:"optional,object"` // Log delivery type. Supported values are: // // * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the @@ -438,20 +438,20 @@ type UpdateBudgetConfigurationBudget struct { // usage to limit the scope of what is considered for this budget. Leave // empty to include all usage for this account. All provided filters must be // matched for usage to be included. - Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional"` + Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional,object"` } type UpdateBudgetConfigurationRequest struct { // The updated budget. This will overwrite the budget specified by the // budget ID. - Budget []UpdateBudgetConfigurationBudget `tfsdk:"budget" tf:""` + Budget []UpdateBudgetConfigurationBudget `tfsdk:"budget" tf:"object"` // The Databricks budget configuration ID. BudgetId types.String `tfsdk:"-"` } type UpdateBudgetConfigurationResponse struct { // The updated budget. - Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional"` + Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional,object"` } type UpdateLogDeliveryConfigurationStatusRequest struct { @@ -466,11 +466,11 @@ type UpdateLogDeliveryConfigurationStatusRequest struct { } type WrappedCreateLogDeliveryConfiguration struct { - LogDeliveryConfiguration []CreateLogDeliveryConfigurationParams `tfsdk:"log_delivery_configuration" tf:"optional"` + LogDeliveryConfiguration []CreateLogDeliveryConfigurationParams `tfsdk:"log_delivery_configuration" tf:"optional,object"` } type WrappedLogDeliveryConfiguration struct { - LogDeliveryConfiguration []LogDeliveryConfiguration `tfsdk:"log_delivery_configuration" tf:"optional"` + LogDeliveryConfiguration []LogDeliveryConfiguration `tfsdk:"log_delivery_configuration" tf:"optional,object"` } type WrappedLogDeliveryConfigurations struct { diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index dea0528dfb..25fa29c013 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -15,11 +15,11 @@ import ( ) type AccountsCreateMetastore struct { - MetastoreInfo []CreateMetastore `tfsdk:"metastore_info" tf:"optional"` + MetastoreInfo []CreateMetastore `tfsdk:"metastore_info" tf:"optional,object"` } type AccountsCreateMetastoreAssignment struct { - MetastoreAssignment []CreateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional"` + MetastoreAssignment []CreateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional,object"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` // Workspace ID. @@ -27,32 +27,32 @@ type AccountsCreateMetastoreAssignment struct { } type AccountsCreateStorageCredential struct { - CredentialInfo []CreateStorageCredential `tfsdk:"credential_info" tf:"optional"` + CredentialInfo []CreateStorageCredential `tfsdk:"credential_info" tf:"optional,object"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` } type AccountsMetastoreAssignment struct { - MetastoreAssignment []MetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional"` + MetastoreAssignment []MetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional,object"` } type AccountsMetastoreInfo struct { - MetastoreInfo []MetastoreInfo `tfsdk:"metastore_info" tf:"optional"` + MetastoreInfo []MetastoreInfo `tfsdk:"metastore_info" tf:"optional,object"` } type AccountsStorageCredentialInfo struct { - CredentialInfo []StorageCredentialInfo `tfsdk:"credential_info" tf:"optional"` + CredentialInfo []StorageCredentialInfo `tfsdk:"credential_info" tf:"optional,object"` } type AccountsUpdateMetastore struct { // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` - MetastoreInfo []UpdateMetastore `tfsdk:"metastore_info" tf:"optional"` + MetastoreInfo []UpdateMetastore `tfsdk:"metastore_info" tf:"optional,object"` } type AccountsUpdateMetastoreAssignment struct { - MetastoreAssignment []UpdateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional"` + MetastoreAssignment []UpdateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional,object"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` // Workspace ID. @@ -60,7 +60,7 @@ type AccountsUpdateMetastoreAssignment struct { } type AccountsUpdateStorageCredential struct { - CredentialInfo []UpdateStorageCredential `tfsdk:"credential_info" tf:"optional"` + CredentialInfo []UpdateStorageCredential `tfsdk:"credential_info" tf:"optional,object"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` // Name of the storage credential. @@ -194,7 +194,7 @@ type CatalogInfo struct { // Username of catalog creator. CreatedBy types.String `tfsdk:"created_by" tf:"optional"` - EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional"` + EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional,object"` // Whether predictive optimization should be enabled for this object and // objects under it. EnablePredictiveOptimization types.String `tfsdk:"enable_predictive_optimization" tf:"optional"` @@ -219,7 +219,7 @@ type CatalogInfo struct { // remote sharing server. ProviderName types.String `tfsdk:"provider_name" tf:"optional"` // Status of an asynchronously provisioned resource. - ProvisioningInfo []ProvisioningInfo `tfsdk:"provisioning_info" tf:"optional"` + ProvisioningInfo []ProvisioningInfo `tfsdk:"provisioning_info" tf:"optional,object"` // Kind of catalog securable. SecurableKind types.String `tfsdk:"securable_kind" tf:"optional"` @@ -249,7 +249,7 @@ type ColumnInfo struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` - Mask []ColumnMask `tfsdk:"mask" tf:"optional"` + Mask []ColumnMask `tfsdk:"mask" tf:"optional,object"` // Name of Column. Name types.String `tfsdk:"name" tf:"optional"` // Whether field may be Null (default: true). @@ -309,7 +309,7 @@ type ConnectionInfo struct { // connection. Properties map[string]types.String `tfsdk:"properties" tf:"optional"` // Status of an asynchronously provisioned resource. - ProvisioningInfo []ProvisioningInfo `tfsdk:"provisioning_info" tf:"optional"` + ProvisioningInfo []ProvisioningInfo `tfsdk:"provisioning_info" tf:"optional,object"` // If the connection is read only. ReadOnly types.Bool `tfsdk:"read_only" tf:"optional"` // Kind of connection securable. @@ -328,7 +328,7 @@ type ConnectionInfo struct { // ONLINE_CONTINUOUS_UPDATE or the ONLINE_UPDATING_PIPELINE_RESOURCES state. type ContinuousUpdateStatus struct { // Progress of the initial data synchronization. - InitialPipelineSyncProgress []PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional"` + InitialPipelineSyncProgress []PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional,object"` // The last source table Delta version that was synced to the online table. // Note that this Delta version may not be completely synced to the online // table yet. @@ -384,7 +384,7 @@ type CreateExternalLocation struct { // Name of the storage credential used with this location. CredentialName types.String `tfsdk:"credential_name" tf:""` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional,object"` // Indicates whether fallback mode is enabled for this external location. // When fallback mode is enabled, the access to the location falls back to // cluster credentials if UC credentials are not sufficient. @@ -414,7 +414,7 @@ type CreateFunction struct { // Pretty printed function data type. FullDataType types.String `tfsdk:"full_data_type" tf:""` - InputParams []FunctionParameterInfos `tfsdk:"input_params" tf:""` + InputParams []FunctionParameterInfos `tfsdk:"input_params" tf:"object"` // Whether the function is deterministic. IsDeterministic types.Bool `tfsdk:"is_deterministic" tf:""` // Function null call. @@ -426,7 +426,7 @@ type CreateFunction struct { // JSON-serialized key-value pair map, encoded (escaped) as a string. Properties types.String `tfsdk:"properties" tf:"optional"` // Table function return parameters. - ReturnParams []FunctionParameterInfos `tfsdk:"return_params" tf:"optional"` + ReturnParams []FunctionParameterInfos `tfsdk:"return_params" tf:"optional,object"` // Function language. When **EXTERNAL** is used, the language of the routine // function should be specified in the __external_language__ field, and the // __return_params__ of the function cannot be used (as **TABLE** return @@ -436,7 +436,7 @@ type CreateFunction struct { // Function body. RoutineDefinition types.String `tfsdk:"routine_definition" tf:""` // Function dependencies. - RoutineDependencies []DependencyList `tfsdk:"routine_dependencies" tf:"optional"` + RoutineDependencies []DependencyList `tfsdk:"routine_dependencies" tf:"optional,object"` // Name of parent schema relative to its parent catalog. SchemaName types.String `tfsdk:"schema_name" tf:""` // Function security type. @@ -451,7 +451,7 @@ type CreateFunction struct { type CreateFunctionRequest struct { // Partial __FunctionInfo__ specifying the function to be created. - FunctionInfo []CreateFunction `tfsdk:"function_info" tf:""` + FunctionInfo []CreateFunction `tfsdk:"function_info" tf:"object"` } type CreateMetastore struct { @@ -489,15 +489,15 @@ type CreateMonitor struct { // drift metrics (comparing metrics across time windows). CustomMetrics []MonitorMetric `tfsdk:"custom_metrics" tf:"optional"` // The data classification config for the monitor. - DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional"` + DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional,object"` // Configuration for monitoring inference logs. - InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional"` + InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional,object"` // The notification settings for the monitor. - Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional"` + Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional,object"` // Schema where output metric tables are created. OutputSchemaName types.String `tfsdk:"output_schema_name" tf:""` // The schedule for automatically updating and refreshing metric tables. - Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional,object"` // Whether to skip creating a default dashboard summarizing data quality // metrics. SkipBuiltinDashboard types.Bool `tfsdk:"skip_builtin_dashboard" tf:"optional"` @@ -508,11 +508,11 @@ type CreateMonitor struct { // slices. SlicingExprs []types.String `tfsdk:"slicing_exprs" tf:"optional"` // Configuration for monitoring snapshot tables. - Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional"` + Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional,object"` // Full name of the table. TableName types.String `tfsdk:"-"` // Configuration for monitoring time series tables. - TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional"` + TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional,object"` // Optional argument to specify the warehouse for dashboard creation. If not // specified, the first running warehouse will be used. WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` @@ -523,7 +523,7 @@ type CreateOnlineTableRequest struct { // Full three-part (catalog, schema, table) name of the table. Name types.String `tfsdk:"name" tf:"optional"` // Specification of the online table. - Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional"` + Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional,object"` } type CreateRegisteredModelRequest struct { @@ -558,17 +558,17 @@ type CreateSchema struct { type CreateStorageCredential struct { // The AWS IAM role configuration. - AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional"` + AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional,object"` // The Azure managed identity configuration. - AzureManagedIdentity []AzureManagedIdentityRequest `tfsdk:"azure_managed_identity" tf:"optional"` + AzureManagedIdentity []AzureManagedIdentityRequest `tfsdk:"azure_managed_identity" tf:"optional,object"` // The Azure service principal configuration. - AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` + AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional,object"` // The Cloudflare API token configuration. - CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` + CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional,object"` // Comment associated with the credential. Comment types.String `tfsdk:"comment" tf:"optional"` // The Databricks managed GCP service account configuration. - DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional"` + DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` // The credential name. The name must be unique within the metastore. Name types.String `tfsdk:"name" tf:""` // Whether the storage credential is only usable for read operations. @@ -582,7 +582,7 @@ type CreateTableConstraint struct { // A table constraint, as defined by *one* of the following fields being // set: __primary_key_constraint__, __foreign_key_constraint__, // __named_table_constraint__. - Constraint []TableConstraint `tfsdk:"constraint" tf:""` + Constraint []TableConstraint `tfsdk:"constraint" tf:"object"` // The full name of the table referenced by the constraint. FullNameArg types.String `tfsdk:"full_name_arg" tf:""` } @@ -778,9 +778,9 @@ type DeltaRuntimePropertiesKvPairs struct { // field must be defined. type Dependency struct { // A function that is dependent on a SQL object. - Function []FunctionDependency `tfsdk:"function" tf:"optional"` + Function []FunctionDependency `tfsdk:"function" tf:"optional,object"` // A table that is dependent on a SQL object. - Table []TableDependency `tfsdk:"table" tf:"optional"` + Table []TableDependency `tfsdk:"table" tf:"optional,object"` } // A list of dependencies. @@ -853,7 +853,7 @@ type EnableResponse struct { // Encryption options that apply to clients connecting to cloud storage. type EncryptionDetails struct { // Server-Side Encryption properties for clients communicating with AWS s3. - SseEncryptionDetails []SseEncryptionDetails `tfsdk:"sse_encryption_details" tf:"optional"` + SseEncryptionDetails []SseEncryptionDetails `tfsdk:"sse_encryption_details" tf:"optional,object"` } // Get boolean reflecting if table exists @@ -880,7 +880,7 @@ type ExternalLocationInfo struct { // Name of the storage credential used with this location. CredentialName types.String `tfsdk:"credential_name" tf:"optional"` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional,object"` // Indicates whether fallback mode is enabled for this external location. // When fallback mode is enabled, the access to the location falls back to // cluster credentials if UC credentials are not sufficient. @@ -964,7 +964,7 @@ type FunctionInfo struct { // Id of Function, relative to parent schema. FunctionId types.String `tfsdk:"function_id" tf:"optional"` - InputParams []FunctionParameterInfos `tfsdk:"input_params" tf:"optional"` + InputParams []FunctionParameterInfos `tfsdk:"input_params" tf:"optional,object"` // Whether the function is deterministic. IsDeterministic types.Bool `tfsdk:"is_deterministic" tf:"optional"` // Function null call. @@ -980,7 +980,7 @@ type FunctionInfo struct { // JSON-serialized key-value pair map, encoded (escaped) as a string. Properties types.String `tfsdk:"properties" tf:"optional"` // Table function return parameters. - ReturnParams []FunctionParameterInfos `tfsdk:"return_params" tf:"optional"` + ReturnParams []FunctionParameterInfos `tfsdk:"return_params" tf:"optional,object"` // Function language. When **EXTERNAL** is used, the language of the routine // function should be specified in the __external_language__ field, and the // __return_params__ of the function cannot be used (as **TABLE** return @@ -990,7 +990,7 @@ type FunctionInfo struct { // Function body. RoutineDefinition types.String `tfsdk:"routine_definition" tf:"optional"` // Function dependencies. - RoutineDependencies []DependencyList `tfsdk:"routine_dependencies" tf:"optional"` + RoutineDependencies []DependencyList `tfsdk:"routine_dependencies" tf:"optional,object"` // Name of parent schema relative to its parent catalog. SchemaName types.String `tfsdk:"schema_name" tf:"optional"` // Function security type. @@ -1058,19 +1058,19 @@ type GenerateTemporaryTableCredentialRequest struct { type GenerateTemporaryTableCredentialResponse struct { // AWS temporary credentials for API authentication. Read more at // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. - AwsTempCredentials []AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional"` + AwsTempCredentials []AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional,object"` // Azure temporary credentials for API authentication. Read more at // https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas - AzureUserDelegationSas []AzureUserDelegationSas `tfsdk:"azure_user_delegation_sas" tf:"optional"` + AzureUserDelegationSas []AzureUserDelegationSas `tfsdk:"azure_user_delegation_sas" tf:"optional,object"` // Server time when the credential will expire, in epoch milliseconds. The // API client is advised to cache the credential given this expiration time. ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` // GCP temporary credentials for API authentication. Read more at // https://developers.google.com/identity/protocols/oauth2/service-account - GcpOauthToken []GcpOauthToken `tfsdk:"gcp_oauth_token" tf:"optional"` + GcpOauthToken []GcpOauthToken `tfsdk:"gcp_oauth_token" tf:"optional,object"` // R2 temporary credentials for API authentication. Read more at // https://developers.cloudflare.com/r2/api/s3/tokens/. - R2TempCredentials []R2Credentials `tfsdk:"r2_temp_credentials" tf:"optional"` + R2TempCredentials []R2Credentials `tfsdk:"r2_temp_credentials" tf:"optional,object"` // The URL of the storage path accessible by the temporary credential. Url types.String `tfsdk:"url" tf:"optional"` } @@ -1276,7 +1276,7 @@ type GetQuotaRequest struct { type GetQuotaResponse struct { // The returned QuotaInfo. - QuotaInfo []QuotaInfo `tfsdk:"quota_info" tf:"optional"` + QuotaInfo []QuotaInfo `tfsdk:"quota_info" tf:"optional,object"` } // Get refresh @@ -1811,7 +1811,7 @@ type ModelVersionInfo struct { // parent schema ModelName types.String `tfsdk:"model_name" tf:"optional"` // Model version dependencies, for feature-store packaged models - ModelVersionDependencies []DependencyList `tfsdk:"model_version_dependencies" tf:"optional"` + ModelVersionDependencies []DependencyList `tfsdk:"model_version_dependencies" tf:"optional,object"` // MLflow run ID used when creating the model version, if ``source`` was // generated by an experiment run stored in an MLflow tracking server RunId types.String `tfsdk:"run_id" tf:"optional"` @@ -1910,26 +1910,26 @@ type MonitorInfo struct { // if the monitor is in PENDING state. DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` // The data classification config for the monitor. - DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional"` + DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional,object"` // The full name of the drift metrics table. Format: // __catalog_name__.__schema_name__.__table_name__. DriftMetricsTableName types.String `tfsdk:"drift_metrics_table_name" tf:""` // Configuration for monitoring inference logs. - InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional"` + InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional,object"` // The latest failure message of the monitor (if any). LatestMonitorFailureMsg types.String `tfsdk:"latest_monitor_failure_msg" tf:"optional"` // The version of the monitor config (e.g. 1,2,3). If negative, the monitor // may be corrupted. MonitorVersion types.String `tfsdk:"monitor_version" tf:""` // The notification settings for the monitor. - Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional"` + Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional,object"` // Schema where output metric tables are created. OutputSchemaName types.String `tfsdk:"output_schema_name" tf:"optional"` // The full name of the profile metrics table. Format: // __catalog_name__.__schema_name__.__table_name__. ProfileMetricsTableName types.String `tfsdk:"profile_metrics_table_name" tf:""` // The schedule for automatically updating and refreshing metric tables. - Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional,object"` // List of column expressions to slice data with for targeted analysis. The // data is grouped by each expression independently, resulting in a separate // slice for each predicate and its complements. For high-cardinality @@ -1937,14 +1937,14 @@ type MonitorInfo struct { // slices. SlicingExprs []types.String `tfsdk:"slicing_exprs" tf:"optional"` // Configuration for monitoring snapshot tables. - Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional"` + Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional,object"` // The status of the monitor. Status types.String `tfsdk:"status" tf:""` // The full name of the table to monitor. Format: // __catalog_name__.__schema_name__.__table_name__. TableName types.String `tfsdk:"table_name" tf:""` // Configuration for monitoring time series tables. - TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional"` + TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional,object"` } type MonitorMetric struct { @@ -1976,10 +1976,10 @@ type MonitorMetric struct { type MonitorNotifications struct { // Who to send notifications to on monitor failure. - OnFailure []MonitorDestination `tfsdk:"on_failure" tf:"optional"` + OnFailure []MonitorDestination `tfsdk:"on_failure" tf:"optional,object"` // Who to send notifications to when new data classification tags are // detected. - OnNewClassificationTagDetected []MonitorDestination `tfsdk:"on_new_classification_tag_detected" tf:"optional"` + OnNewClassificationTagDetected []MonitorDestination `tfsdk:"on_new_classification_tag_detected" tf:"optional,object"` } type MonitorRefreshInfo struct { @@ -2033,9 +2033,9 @@ type OnlineTable struct { // Full three-part (catalog, schema, table) name of the table. Name types.String `tfsdk:"name" tf:"optional"` // Specification of the online table. - Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional"` + Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional,object"` // Online Table status - Status []OnlineTableStatus `tfsdk:"status" tf:"optional"` + Status []OnlineTableStatus `tfsdk:"status" tf:"optional,object"` // Data serving REST API URL for this table TableServingUrl types.String `tfsdk:"table_serving_url" tf:"optional"` } @@ -2056,10 +2056,10 @@ type OnlineTableSpec struct { // Primary Key columns to be used for data insert/update in the destination. PrimaryKeyColumns []types.String `tfsdk:"primary_key_columns" tf:"optional"` // Pipeline runs continuously after generating the initial data. - RunContinuously []OnlineTableSpecContinuousSchedulingPolicy `tfsdk:"run_continuously" tf:"optional"` + RunContinuously []OnlineTableSpecContinuousSchedulingPolicy `tfsdk:"run_continuously" tf:"optional,object"` // Pipeline stops after generating the initial data and can be triggered // later (manually, through a cron job or through data triggers) - RunTriggered []OnlineTableSpecTriggeredSchedulingPolicy `tfsdk:"run_triggered" tf:"optional"` + RunTriggered []OnlineTableSpecTriggeredSchedulingPolicy `tfsdk:"run_triggered" tf:"optional,object"` // Three-part (catalog, schema, table) name of the source Delta table. SourceTableFullName types.String `tfsdk:"source_table_full_name" tf:"optional"` // Time series key to deduplicate (tie-break) rows with the same primary @@ -2077,21 +2077,21 @@ type OnlineTableSpecTriggeredSchedulingPolicy struct { type OnlineTableStatus struct { // Detailed status of an online table. Shown if the online table is in the // ONLINE_CONTINUOUS_UPDATE or the ONLINE_UPDATING_PIPELINE_RESOURCES state. - ContinuousUpdateStatus []ContinuousUpdateStatus `tfsdk:"continuous_update_status" tf:"optional"` + ContinuousUpdateStatus []ContinuousUpdateStatus `tfsdk:"continuous_update_status" tf:"optional,object"` // The state of the online table. DetailedState types.String `tfsdk:"detailed_state" tf:"optional"` // Detailed status of an online table. Shown if the online table is in the // OFFLINE_FAILED or the ONLINE_PIPELINE_FAILED state. - FailedStatus []FailedStatus `tfsdk:"failed_status" tf:"optional"` + FailedStatus []FailedStatus `tfsdk:"failed_status" tf:"optional,object"` // A text description of the current state of the online table. Message types.String `tfsdk:"message" tf:"optional"` // Detailed status of an online table. Shown if the online table is in the // PROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT // state. - ProvisioningStatus []ProvisioningStatus `tfsdk:"provisioning_status" tf:"optional"` + ProvisioningStatus []ProvisioningStatus `tfsdk:"provisioning_status" tf:"optional,object"` // Detailed status of an online table. Shown if the online table is in the // ONLINE_TRIGGERED_UPDATE or the ONLINE_NO_PENDING_UPDATE state. - TriggeredUpdateStatus []TriggeredUpdateStatus `tfsdk:"triggered_update_status" tf:"optional"` + TriggeredUpdateStatus []TriggeredUpdateStatus `tfsdk:"triggered_update_status" tf:"optional,object"` } type PermissionsChange struct { @@ -2148,7 +2148,7 @@ type ProvisioningInfo struct { type ProvisioningStatus struct { // Details about initial data synchronization. Only populated when in the // PROVISIONING_INITIAL_SNAPSHOT state. - InitialPipelineSyncProgress []PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional"` + InitialPipelineSyncProgress []PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional,object"` } type QuotaInfo struct { @@ -2268,7 +2268,7 @@ type SchemaInfo struct { // Username of schema creator. CreatedBy types.String `tfsdk:"created_by" tf:"optional"` - EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional"` + EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional,object"` // Whether predictive optimization should be enabled for this object and // objects under it. EnablePredictiveOptimization types.String `tfsdk:"enable_predictive_optimization" tf:"optional"` @@ -2321,13 +2321,13 @@ type SseEncryptionDetails struct { type StorageCredentialInfo struct { // The AWS IAM role configuration. - AwsIamRole []AwsIamRoleResponse `tfsdk:"aws_iam_role" tf:"optional"` + AwsIamRole []AwsIamRoleResponse `tfsdk:"aws_iam_role" tf:"optional,object"` // The Azure managed identity configuration. - AzureManagedIdentity []AzureManagedIdentityResponse `tfsdk:"azure_managed_identity" tf:"optional"` + AzureManagedIdentity []AzureManagedIdentityResponse `tfsdk:"azure_managed_identity" tf:"optional,object"` // The Azure service principal configuration. - AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` + AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional,object"` // The Cloudflare API token configuration. - CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` + CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional,object"` // Comment associated with the credential. Comment types.String `tfsdk:"comment" tf:"optional"` // Time at which this Credential was created, in epoch milliseconds. @@ -2335,7 +2335,7 @@ type StorageCredentialInfo struct { // Username of credential creator. CreatedBy types.String `tfsdk:"created_by" tf:"optional"` // The Databricks managed GCP service account configuration. - DatabricksGcpServiceAccount []DatabricksGcpServiceAccountResponse `tfsdk:"databricks_gcp_service_account" tf:"optional"` + DatabricksGcpServiceAccount []DatabricksGcpServiceAccountResponse `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` // The unique identifier of the credential. Id types.String `tfsdk:"id" tf:"optional"` // Whether the current securable is accessible from all workspaces or a @@ -2370,11 +2370,11 @@ type SystemSchemaInfo struct { // __primary_key_constraint__, __foreign_key_constraint__, // __named_table_constraint__. type TableConstraint struct { - ForeignKeyConstraint []ForeignKeyConstraint `tfsdk:"foreign_key_constraint" tf:"optional"` + ForeignKeyConstraint []ForeignKeyConstraint `tfsdk:"foreign_key_constraint" tf:"optional,object"` - NamedTableConstraint []NamedTableConstraint `tfsdk:"named_table_constraint" tf:"optional"` + NamedTableConstraint []NamedTableConstraint `tfsdk:"named_table_constraint" tf:"optional,object"` - PrimaryKeyConstraint []PrimaryKeyConstraint `tfsdk:"primary_key_constraint" tf:"optional"` + PrimaryKeyConstraint []PrimaryKeyConstraint `tfsdk:"primary_key_constraint" tf:"optional,object"` } // A table that is dependent on a SQL object. @@ -2414,14 +2414,14 @@ type TableInfo struct { // omitted if table is not deleted. DeletedAt types.Int64 `tfsdk:"deleted_at" tf:"optional"` // Information pertaining to current state of the delta table. - DeltaRuntimePropertiesKvpairs []DeltaRuntimePropertiesKvPairs `tfsdk:"delta_runtime_properties_kvpairs" tf:"optional"` + DeltaRuntimePropertiesKvpairs []DeltaRuntimePropertiesKvPairs `tfsdk:"delta_runtime_properties_kvpairs" tf:"optional,object"` - EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional"` + EffectivePredictiveOptimizationFlag []EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag" tf:"optional,object"` // Whether predictive optimization should be enabled for this object and // objects under it. EnablePredictiveOptimization types.String `tfsdk:"enable_predictive_optimization" tf:"optional"` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional,object"` // Full name of table, in form of // __catalog_name__.__schema_name__.__table_name__ FullName types.String `tfsdk:"full_name" tf:"optional"` @@ -2437,7 +2437,7 @@ type TableInfo struct { // A map of key-value properties attached to the securable. Properties map[string]types.String `tfsdk:"properties" tf:"optional"` - RowFilter []TableRowFilter `tfsdk:"row_filter" tf:"optional"` + RowFilter []TableRowFilter `tfsdk:"row_filter" tf:"optional,object"` // Name of parent schema relative to its parent catalog. SchemaName types.String `tfsdk:"schema_name" tf:"optional"` // List of schemes whose objects can be referenced without qualification. @@ -2466,7 +2466,7 @@ type TableInfo struct { // provided; - when DependencyList is an empty list, the dependency is // provided but is empty; - when DependencyList is not an empty list, // dependencies are provided and recorded. - ViewDependencies []DependencyList `tfsdk:"view_dependencies" tf:"optional"` + ViewDependencies []DependencyList `tfsdk:"view_dependencies" tf:"optional,object"` } type TableRowFilter struct { @@ -2496,7 +2496,7 @@ type TriggeredUpdateStatus struct { // table to the online table. Timestamp types.String `tfsdk:"timestamp" tf:"optional"` // Progress of the active data synchronization pipeline. - TriggeredUpdateProgress []PipelineProgress `tfsdk:"triggered_update_progress" tf:"optional"` + TriggeredUpdateProgress []PipelineProgress `tfsdk:"triggered_update_progress" tf:"optional,object"` } // Delete an assignment @@ -2551,7 +2551,7 @@ type UpdateExternalLocation struct { // Name of the storage credential used with this location. CredentialName types.String `tfsdk:"credential_name" tf:"optional"` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional,object"` // Indicates whether fallback mode is enabled for this external location. // When fallback mode is enabled, the access to the location falls back to // cluster credentials if UC credentials are not sufficient. @@ -2639,15 +2639,15 @@ type UpdateMonitor struct { // if the monitor is in PENDING state. DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` // The data classification config for the monitor. - DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional"` + DataClassificationConfig []MonitorDataClassificationConfig `tfsdk:"data_classification_config" tf:"optional,object"` // Configuration for monitoring inference logs. - InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional"` + InferenceLog []MonitorInferenceLog `tfsdk:"inference_log" tf:"optional,object"` // The notification settings for the monitor. - Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional"` + Notifications []MonitorNotifications `tfsdk:"notifications" tf:"optional,object"` // Schema where output metric tables are created. OutputSchemaName types.String `tfsdk:"output_schema_name" tf:""` // The schedule for automatically updating and refreshing metric tables. - Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []MonitorCronSchedule `tfsdk:"schedule" tf:"optional,object"` // List of column expressions to slice data with for targeted analysis. The // data is grouped by each expression independently, resulting in a separate // slice for each predicate and its complements. For high-cardinality @@ -2655,11 +2655,11 @@ type UpdateMonitor struct { // slices. SlicingExprs []types.String `tfsdk:"slicing_exprs" tf:"optional"` // Configuration for monitoring snapshot tables. - Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional"` + Snapshot []MonitorSnapshot `tfsdk:"snapshot" tf:"optional,object"` // Full name of the table. TableName types.String `tfsdk:"-"` // Configuration for monitoring time series tables. - TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional"` + TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional,object"` } type UpdatePermissions struct { @@ -2703,17 +2703,17 @@ type UpdateSchema struct { type UpdateStorageCredential struct { // The AWS IAM role configuration. - AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional"` + AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional,object"` // The Azure managed identity configuration. - AzureManagedIdentity []AzureManagedIdentityResponse `tfsdk:"azure_managed_identity" tf:"optional"` + AzureManagedIdentity []AzureManagedIdentityResponse `tfsdk:"azure_managed_identity" tf:"optional,object"` // The Azure service principal configuration. - AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` + AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional,object"` // The Cloudflare API token configuration. - CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` + CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional,object"` // Comment associated with the credential. Comment types.String `tfsdk:"comment" tf:"optional"` // The Databricks managed GCP service account configuration. - DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional"` + DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` // Force update even if there are dependent external locations or external // tables. Force types.Bool `tfsdk:"force" tf:"optional"` @@ -2774,15 +2774,15 @@ type UpdateWorkspaceBindingsParameters struct { type ValidateStorageCredential struct { // The AWS IAM role configuration. - AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional"` + AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional,object"` // The Azure managed identity configuration. - AzureManagedIdentity []AzureManagedIdentityRequest `tfsdk:"azure_managed_identity" tf:"optional"` + AzureManagedIdentity []AzureManagedIdentityRequest `tfsdk:"azure_managed_identity" tf:"optional,object"` // The Azure service principal configuration. - AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional"` + AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional,object"` // The Cloudflare API token configuration. - CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional"` + CloudflareApiToken []CloudflareApiToken `tfsdk:"cloudflare_api_token" tf:"optional,object"` // The Databricks created GCP service account configuration. - DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional"` + DatabricksGcpServiceAccount []DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` // The name of an existing external location to validate. ExternalLocationName types.String `tfsdk:"external_location_name" tf:"optional"` // Whether the storage credential is only usable for read operations. @@ -2825,7 +2825,7 @@ type VolumeInfo struct { // The identifier of the user who created the volume CreatedBy types.String `tfsdk:"created_by" tf:"optional"` // Encryption options that apply to clients connecting to cloud storage. - EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional"` + EncryptionDetails []EncryptionDetails `tfsdk:"encryption_details" tf:"optional,object"` // The three-level (fully qualified) name of the volume FullName types.String `tfsdk:"full_name" tf:"optional"` // The unique identifier of the metastore diff --git a/internal/service/compute_tf/model.go b/internal/service/compute_tf/model.go index 91eef81ce0..dcc16fd50f 100755 --- a/internal/service/compute_tf/model.go +++ b/internal/service/compute_tf/model.go @@ -166,7 +166,7 @@ type AzureAttributes struct { // mutated over the lifetime of a cluster. FirstOnDemand types.Int64 `tfsdk:"first_on_demand" tf:"optional"` // Defines values necessary to configure and run Azure Log Analytics agent - LogAnalyticsInfo []LogAnalyticsInfo `tfsdk:"log_analytics_info" tf:"optional"` + LogAnalyticsInfo []LogAnalyticsInfo `tfsdk:"log_analytics_info" tf:"optional,object"` // The max bid price to be used for Azure spot instances. The Max price for // the bid cannot be higher than the on-demand price of the instance. If not // specified, the default value is -1, which specifies that the instance @@ -245,17 +245,17 @@ type ClusterAttributes struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only // one destination can be specified for one cluster. If the conf is given, // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -292,7 +292,7 @@ type ClusterAttributes struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional,object"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -310,7 +310,7 @@ type ClusterAttributes struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -366,7 +366,7 @@ type ClusterAttributes struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } type ClusterCompliance struct { @@ -386,7 +386,7 @@ type ClusterDetails struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -395,10 +395,10 @@ type ClusterDetails struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // Number of CPU cores available for this cluster. Note that this can be // fractional, e.g. 7.5 cores, since certain node types are configured to // share cores between Spark nodes on the same instance. @@ -412,9 +412,9 @@ type ClusterDetails struct { // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Cluster log delivery status. - ClusterLogStatus []LogSyncStatus `tfsdk:"cluster_log_status" tf:"optional"` + ClusterLogStatus []LogSyncStatus `tfsdk:"cluster_log_status" tf:"optional,object"` // Total amount of cluster memory, in megabytes ClusterMemoryMb types.Int64 `tfsdk:"cluster_memory_mb" tf:"optional"` // Cluster name requested by the user. This doesn't have to be unique. If @@ -473,11 +473,11 @@ type ClusterDetails struct { // - Name: DefaultTags map[string]types.String `tfsdk:"default_tags" tf:"optional"` - DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional,object"` // Node on which the Spark driver resides. The driver node contains the // Spark master and the Databricks application that manages the per-notebook // Spark REPLs. - Driver []SparkNode `tfsdk:"driver" tf:"optional"` + Driver []SparkNode `tfsdk:"driver" tf:"optional,object"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -497,7 +497,7 @@ type ClusterDetails struct { Executors []SparkNode `tfsdk:"executors" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -575,7 +575,7 @@ type ClusterDetails struct { // or edit this cluster. The contents of `spec` can be used in the body of a // create cluster request. This field might not be populated for older // clusters. Note: not included in the response of the ListClusters API. - Spec []ClusterSpec `tfsdk:"spec" tf:"optional"` + Spec []ClusterSpec `tfsdk:"spec" tf:"optional,object"` // SSH public key contents that will be added to each Spark node in this // cluster. The corresponding private keys can be used to login with the // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. @@ -593,18 +593,18 @@ type ClusterDetails struct { TerminatedTime types.Int64 `tfsdk:"terminated_time" tf:"optional"` // Information about why the cluster was terminated. This field only appears // when the cluster is in a `TERMINATING` or `TERMINATED` state. - TerminationReason []TerminationReason `tfsdk:"termination_reason" tf:"optional"` + TerminationReason []TerminationReason `tfsdk:"termination_reason" tf:"optional,object"` - WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } type ClusterEvent struct { // ClusterId types.String `tfsdk:"cluster_id" tf:""` // - DataPlaneEventDetails []DataPlaneEventDetails `tfsdk:"data_plane_event_details" tf:"optional"` + DataPlaneEventDetails []DataPlaneEventDetails `tfsdk:"data_plane_event_details" tf:"optional,object"` // - Details []EventDetails `tfsdk:"details" tf:"optional"` + Details []EventDetails `tfsdk:"details" tf:"optional,object"` // The timestamp when the event occurred, stored as the number of // milliseconds since the Unix epoch. If not provided, this will be assigned // by the Timeline service. @@ -623,13 +623,13 @@ type ClusterLibraryStatuses struct { type ClusterLogConf struct { // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : // "dbfs:/home/cluster_log" } }` - Dbfs []DbfsStorageInfo `tfsdk:"dbfs" tf:"optional"` + Dbfs []DbfsStorageInfo `tfsdk:"dbfs" tf:"optional,object"` // destination and either the region or endpoint need to be provided. e.g. // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : // "us-west-2" } }` Cluster iam role is used to access s3, please make sure // the cluster iam role in `instance_profile_arn` has permission to write // data to the s3 destination. - S3 []S3StorageInfo `tfsdk:"s3" tf:"optional"` + S3 []S3StorageInfo `tfsdk:"s3" tf:"optional,object"` } type ClusterPermission struct { @@ -733,7 +733,7 @@ type ClusterSize struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // Number of worker nodes that this cluster should have. A cluster has one // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 // Spark nodes. @@ -755,7 +755,7 @@ type ClusterSpec struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -764,17 +764,17 @@ type ClusterSpec struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only // one destination can be specified for one cluster. If the conf is given, // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -811,7 +811,7 @@ type ClusterSpec struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional,object"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -829,7 +829,7 @@ type ClusterSpec struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -896,7 +896,7 @@ type ClusterSpec struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } // Get status @@ -928,7 +928,7 @@ type CommandStatusRequest struct { type CommandStatusResponse struct { Id types.String `tfsdk:"id" tf:"optional"` - Results []Results `tfsdk:"results" tf:"optional"` + Results []Results `tfsdk:"results" tf:"optional,object"` Status types.String `tfsdk:"status" tf:"optional"` } @@ -954,7 +954,7 @@ type CreateCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -963,20 +963,20 @@ type CreateCluster struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // When specified, this clones libraries from a source cluster during the // creation of a new cluster. - CloneFrom []CloneCluster `tfsdk:"clone_from" tf:"optional"` + CloneFrom []CloneCluster `tfsdk:"clone_from" tf:"optional,object"` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only // one destination can be specified for one cluster. If the conf is given, // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -1013,7 +1013,7 @@ type CreateCluster struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional,object"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -1031,7 +1031,7 @@ type CreateCluster struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -1098,7 +1098,7 @@ type CreateCluster struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } type CreateClusterResponse struct { @@ -1115,10 +1115,10 @@ type CreateContext struct { type CreateInstancePool struct { // Attributes related to instance pools running on Amazon Web Services. If // not specified at pool creation, a set of default values will be used. - AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to instance pools running on Azure. If not specified // at pool creation, a set of default values will be used. - AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // Additional tags for pool resources. Databricks will tag all pool // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: @@ -1127,7 +1127,7 @@ type CreateInstancePool struct { CustomTags map[string]types.String `tfsdk:"custom_tags" tf:"optional"` // Defines the specification of the disks that will be attached to all spark // containers. - DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional"` + DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional,object"` // Autoscaling Local Storage: when enabled, this instances in this pool will // dynamically acquire additional disk space when its Spark workers are // running low on disk space. In AWS, this feature requires specific AWS @@ -1136,7 +1136,7 @@ type CreateInstancePool struct { EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk" tf:"optional"` // Attributes related to instance pools running on Google Cloud Platform. If // not specified at pool creation, a set of default values will be used. - GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // Automatically terminates the extra instances in the pool cache after they // are inactive for this time in minutes if min_idle_instances requirement // is already met. If not set, the extra pool instances will be @@ -1314,7 +1314,7 @@ type DiskSpec struct { DiskThroughput types.Int64 `tfsdk:"disk_throughput" tf:"optional"` // The type of disks that will be launched with this cluster. - DiskType []DiskType `tfsdk:"disk_type" tf:"optional"` + DiskType []DiskType `tfsdk:"disk_type" tf:"optional,object"` } type DiskType struct { @@ -1331,7 +1331,7 @@ type DockerBasicAuth struct { } type DockerImage struct { - BasicAuth []DockerBasicAuth `tfsdk:"basic_auth" tf:"optional"` + BasicAuth []DockerBasicAuth `tfsdk:"basic_auth" tf:"optional,object"` // URL of the docker image. Url types.String `tfsdk:"url" tf:"optional"` } @@ -1344,7 +1344,7 @@ type EditCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -1353,10 +1353,10 @@ type EditCluster struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // ID of the cluser ClusterId types.String `tfsdk:"cluster_id" tf:""` // The configuration for delivering spark logs to a long-term storage @@ -1365,7 +1365,7 @@ type EditCluster struct { // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -1402,7 +1402,7 @@ type EditCluster struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional,object"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -1420,7 +1420,7 @@ type EditCluster struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -1487,7 +1487,7 @@ type EditCluster struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } type EditClusterResponse struct { @@ -1612,11 +1612,11 @@ type Environment struct { type EventDetails struct { // * For created clusters, the attributes of the cluster. * For edited // clusters, the new attributes of the cluster. - Attributes []ClusterAttributes `tfsdk:"attributes" tf:"optional"` + Attributes []ClusterAttributes `tfsdk:"attributes" tf:"optional,object"` // The cause of a change in target size. Cause types.String `tfsdk:"cause" tf:"optional"` // The actual cluster size that was set in the cluster creation or edit. - ClusterSize []ClusterSize `tfsdk:"cluster_size" tf:"optional"` + ClusterSize []ClusterSize `tfsdk:"cluster_size" tf:"optional,object"` // The current number of vCPUs in the cluster. CurrentNumVcpus types.Int64 `tfsdk:"current_num_vcpus" tf:"optional"` // The current number of nodes in the cluster. @@ -1634,7 +1634,7 @@ type EventDetails struct { FreeSpace types.Int64 `tfsdk:"free_space" tf:"optional"` // List of global and cluster init scripts associated with this cluster // event. - InitScripts []InitScriptEventDetails `tfsdk:"init_scripts" tf:"optional"` + InitScripts []InitScriptEventDetails `tfsdk:"init_scripts" tf:"optional,object"` // Instance Id where the event originated from InstanceId types.String `tfsdk:"instance_id" tf:"optional"` // Unique identifier of the specific job run associated with this cluster @@ -1642,15 +1642,15 @@ type EventDetails struct { // cluster name JobRunName types.String `tfsdk:"job_run_name" tf:"optional"` // The cluster attributes before a cluster was edited. - PreviousAttributes []ClusterAttributes `tfsdk:"previous_attributes" tf:"optional"` + PreviousAttributes []ClusterAttributes `tfsdk:"previous_attributes" tf:"optional,object"` // The size of the cluster before an edit or resize. - PreviousClusterSize []ClusterSize `tfsdk:"previous_cluster_size" tf:"optional"` + PreviousClusterSize []ClusterSize `tfsdk:"previous_cluster_size" tf:"optional,object"` // Previous disk size in bytes PreviousDiskSize types.Int64 `tfsdk:"previous_disk_size" tf:"optional"` // A termination reason: * On a TERMINATED event, this is the reason of the // termination. * On a RESIZE_COMPLETE event, this indicates the reason that // we failed to acquire some nodes. - Reason []TerminationReason `tfsdk:"reason" tf:"optional"` + Reason []TerminationReason `tfsdk:"reason" tf:"optional,object"` // The targeted number of vCPUs in the cluster. TargetNumVcpus types.Int64 `tfsdk:"target_num_vcpus" tf:"optional"` // The targeted number of nodes in the cluster. @@ -1791,7 +1791,7 @@ type GetEventsResponse struct { Events []ClusterEvent `tfsdk:"events" tf:"optional"` // The parameters required to retrieve the next page of events. Omitted if // there are no more events to read. - NextPage []GetEvents `tfsdk:"next_page" tf:"optional"` + NextPage []GetEvents `tfsdk:"next_page" tf:"optional,object"` // The total number of events filtered by the start_time, end_time, and // event_types. TotalCount types.Int64 `tfsdk:"total_count" tf:"optional"` @@ -1806,10 +1806,10 @@ type GetGlobalInitScriptRequest struct { type GetInstancePool struct { // Attributes related to instance pools running on Amazon Web Services. If // not specified at pool creation, a set of default values will be used. - AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to instance pools running on Azure. If not specified // at pool creation, a set of default values will be used. - AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // Additional tags for pool resources. Databricks will tag all pool // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: @@ -1829,7 +1829,7 @@ type GetInstancePool struct { DefaultTags map[string]types.String `tfsdk:"default_tags" tf:"optional"` // Defines the specification of the disks that will be attached to all spark // containers. - DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional"` + DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional,object"` // Autoscaling Local Storage: when enabled, this instances in this pool will // dynamically acquire additional disk space when its Spark workers are // running low on disk space. In AWS, this feature requires specific AWS @@ -1838,7 +1838,7 @@ type GetInstancePool struct { EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk" tf:"optional"` // Attributes related to instance pools running on Google Cloud Platform. If // not specified at pool creation, a set of default values will be used. - GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // Automatically terminates the extra instances in the pool cache after they // are inactive for this time in minutes if min_idle_instances requirement // is already met. If not set, the extra pool instances will be @@ -1874,9 +1874,9 @@ type GetInstancePool struct { // Current state of the instance pool. State types.String `tfsdk:"state" tf:"optional"` // Usage statistics about the instance pool. - Stats []InstancePoolStats `tfsdk:"stats" tf:"optional"` + Stats []InstancePoolStats `tfsdk:"stats" tf:"optional,object"` // Status of failed pending instances in the pool. - Status []InstancePoolStatus `tfsdk:"status" tf:"optional"` + Status []InstancePoolStatus `tfsdk:"status" tf:"optional,object"` } // Get instance pool permission levels @@ -2029,35 +2029,35 @@ type InitScriptInfo struct { // destination needs to be provided. e.g. `{ "abfss" : { "destination" : // "abfss://@.dfs.core.windows.net/" // } } - Abfss []Adlsgen2Info `tfsdk:"abfss" tf:"optional"` + Abfss []Adlsgen2Info `tfsdk:"abfss" tf:"optional,object"` // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : // "dbfs:/home/cluster_log" } }` - Dbfs []DbfsStorageInfo `tfsdk:"dbfs" tf:"optional"` + Dbfs []DbfsStorageInfo `tfsdk:"dbfs" tf:"optional,object"` // destination needs to be provided. e.g. `{ "file" : { "destination" : // "file:/my/local/file.sh" } }` - File []LocalFileInfo `tfsdk:"file" tf:"optional"` + File []LocalFileInfo `tfsdk:"file" tf:"optional,object"` // destination needs to be provided. e.g. `{ "gcs": { "destination": // "gs://my-bucket/file.sh" } }` - Gcs []GcsStorageInfo `tfsdk:"gcs" tf:"optional"` + Gcs []GcsStorageInfo `tfsdk:"gcs" tf:"optional,object"` // destination and either the region or endpoint need to be provided. e.g. // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : // "us-west-2" } }` Cluster iam role is used to access s3, please make sure // the cluster iam role in `instance_profile_arn` has permission to write // data to the s3 destination. - S3 []S3StorageInfo `tfsdk:"s3" tf:"optional"` + S3 []S3StorageInfo `tfsdk:"s3" tf:"optional,object"` // destination needs to be provided. e.g. `{ "volumes" : { "destination" : // "/Volumes/my-init.sh" } }` - Volumes []VolumesStorageInfo `tfsdk:"volumes" tf:"optional"` + Volumes []VolumesStorageInfo `tfsdk:"volumes" tf:"optional,object"` // destination needs to be provided. e.g. `{ "workspace" : { "destination" : // "/Users/user1@databricks.com/my-init.sh" } }` - Workspace []WorkspaceStorageInfo `tfsdk:"workspace" tf:"optional"` + Workspace []WorkspaceStorageInfo `tfsdk:"workspace" tf:"optional,object"` } type InitScriptInfoAndExecutionDetails struct { // Details about the script - ExecutionDetails []InitScriptExecutionDetails `tfsdk:"execution_details" tf:"optional"` + ExecutionDetails []InitScriptExecutionDetails `tfsdk:"execution_details" tf:"optional,object"` // The script - Script []InitScriptInfo `tfsdk:"script" tf:"optional"` + Script []InitScriptInfo `tfsdk:"script" tf:"optional,object"` } type InstallLibraries struct { @@ -2097,10 +2097,10 @@ type InstancePoolAccessControlResponse struct { type InstancePoolAndStats struct { // Attributes related to instance pools running on Amazon Web Services. If // not specified at pool creation, a set of default values will be used. - AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []InstancePoolAwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to instance pools running on Azure. If not specified // at pool creation, a set of default values will be used. - AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []InstancePoolAzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // Additional tags for pool resources. Databricks will tag all pool // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: @@ -2120,7 +2120,7 @@ type InstancePoolAndStats struct { DefaultTags map[string]types.String `tfsdk:"default_tags" tf:"optional"` // Defines the specification of the disks that will be attached to all spark // containers. - DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional"` + DiskSpec []DiskSpec `tfsdk:"disk_spec" tf:"optional,object"` // Autoscaling Local Storage: when enabled, this instances in this pool will // dynamically acquire additional disk space when its Spark workers are // running low on disk space. In AWS, this feature requires specific AWS @@ -2129,7 +2129,7 @@ type InstancePoolAndStats struct { EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk" tf:"optional"` // Attributes related to instance pools running on Google Cloud Platform. If // not specified at pool creation, a set of default values will be used. - GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []InstancePoolGcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // Automatically terminates the extra instances in the pool cache after they // are inactive for this time in minutes if min_idle_instances requirement // is already met. If not set, the extra pool instances will be @@ -2165,9 +2165,9 @@ type InstancePoolAndStats struct { // Current state of the instance pool. State types.String `tfsdk:"state" tf:"optional"` // Usage statistics about the instance pool. - Stats []InstancePoolStats `tfsdk:"stats" tf:"optional"` + Stats []InstancePoolStats `tfsdk:"stats" tf:"optional,object"` // Status of failed pending instances in the pool. - Status []InstancePoolStatus `tfsdk:"status" tf:"optional"` + Status []InstancePoolStatus `tfsdk:"status" tf:"optional,object"` } type InstancePoolAwsAttributes struct { @@ -2314,7 +2314,7 @@ type InstanceProfile struct { type Library struct { // Specification of a CRAN library to be installed as part of the library - Cran []RCranLibrary `tfsdk:"cran" tf:"optional"` + Cran []RCranLibrary `tfsdk:"cran" tf:"optional,object"` // Deprecated. URI of the egg library to install. Installing Python egg // files is deprecated and is not supported in Databricks Runtime 14.0 and // above. @@ -2329,10 +2329,10 @@ type Library struct { Jar types.String `tfsdk:"jar" tf:"optional"` // Specification of a maven library to be installed. For example: `{ // "coordinates": "org.jsoup:jsoup:1.7.2" }` - Maven []MavenLibrary `tfsdk:"maven" tf:"optional"` + Maven []MavenLibrary `tfsdk:"maven" tf:"optional,object"` // Specification of a PyPi library to be installed. For example: `{ // "package": "simplejson" }` - Pypi []PythonPyPiLibrary `tfsdk:"pypi" tf:"optional"` + Pypi []PythonPyPiLibrary `tfsdk:"pypi" tf:"optional,object"` // URI of the requirements.txt file to install. Only Workspace paths and // Unity Catalog Volumes paths are supported. For example: `{ // "requirements": "/Workspace/path/to/requirements.txt" }` or `{ @@ -2354,7 +2354,7 @@ type LibraryFullStatus struct { // libraries UI. IsLibraryForAllClusters types.Bool `tfsdk:"is_library_for_all_clusters" tf:"optional"` // Unique identifier for the library. - Library []Library `tfsdk:"library" tf:"optional"` + Library []Library `tfsdk:"library" tf:"optional,object"` // All the info and warning messages that have occurred so far for this // library. Messages []types.String `tfsdk:"messages" tf:"optional"` @@ -2568,9 +2568,9 @@ type NodeType struct { // Memory (in MB) available for this node type. MemoryMb types.Int64 `tfsdk:"memory_mb" tf:""` - NodeInfo []CloudProviderNodeInfo `tfsdk:"node_info" tf:"optional"` + NodeInfo []CloudProviderNodeInfo `tfsdk:"node_info" tf:"optional,object"` - NodeInstanceType []NodeInstanceType `tfsdk:"node_instance_type" tf:"optional"` + NodeInstanceType []NodeInstanceType `tfsdk:"node_instance_type" tf:"optional,object"` // Unique identifier for this node type. NodeTypeId types.String `tfsdk:"node_type_id" tf:""` // Number of CPU cores available for this node type. Note that this can be @@ -2709,7 +2709,7 @@ type ResizeCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // The cluster to be resized. ClusterId types.String `tfsdk:"cluster_id" tf:""` // Number of worker nodes that this cluster should have. A cluster has one @@ -2801,7 +2801,7 @@ type SparkNode struct { // Globally unique identifier for the host instance from the cloud provider. InstanceId types.String `tfsdk:"instance_id" tf:"optional"` // Attributes specific to AWS for a Spark node. - NodeAwsAttributes []SparkNodeAwsAttributes `tfsdk:"node_aws_attributes" tf:"optional"` + NodeAwsAttributes []SparkNodeAwsAttributes `tfsdk:"node_aws_attributes" tf:"optional,object"` // Globally unique identifier for this node. NodeId types.String `tfsdk:"node_id" tf:"optional"` // Private IP address (typically a 10.x.x.x address) of the Spark node. Note @@ -2877,7 +2877,7 @@ type UnpinClusterResponse struct { type UpdateCluster struct { // The cluster to be updated. - Cluster []UpdateClusterResource `tfsdk:"cluster" tf:"optional"` + Cluster []UpdateClusterResource `tfsdk:"cluster" tf:"optional,object"` // ID of the cluster. ClusterId types.String `tfsdk:"cluster_id" tf:""` // Specifies which fields of the cluster will be updated. This is required @@ -2892,7 +2892,7 @@ type UpdateClusterResource struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional"` + Autoscale []AutoScale `tfsdk:"autoscale" tf:"optional,object"` // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically // terminated. If specified, the threshold must be between 10 and 10000 @@ -2901,17 +2901,17 @@ type UpdateClusterResource struct { AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes" tf:"optional"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes []AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only // one destination can be specified for one cluster. If the conf is given, // the logs will be delivered to the destination every `5 mins`. The // destination of driver logs is `$destination/$clusterId/driver`, while the // destination of executor logs is `$destination/$clusterId/executor`. - ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf []ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. ClusterName types.String `tfsdk:"cluster_name" tf:"optional"` @@ -2948,7 +2948,7 @@ type UpdateClusterResource struct { // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode types.String `tfsdk:"data_security_mode" tf:"optional"` - DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional"` + DockerImage []DockerImage `tfsdk:"docker_image" tf:"optional,object"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id // (instance_pool_id) if the driver pool is not assigned. @@ -2966,7 +2966,7 @@ type UpdateClusterResource struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes []GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -3033,7 +3033,7 @@ type UpdateClusterResource struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` - WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional"` + WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } type UpdateClusterResponse struct { @@ -3049,7 +3049,7 @@ type VolumesStorageInfo struct { type WorkloadType struct { // defined what type of clients can use the cluster. E.g. Notebooks, Jobs - Clients []ClientsTypes `tfsdk:"clients" tf:""` + Clients []ClientsTypes `tfsdk:"clients" tf:"object"` } type WorkspaceStorageInfo struct { diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index 876787f5d9..d0035a99d3 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -37,7 +37,7 @@ type CreateDashboardRequest struct { type CreateScheduleRequest struct { // The cron expression describing the frequency of the periodic refresh for // this schedule. - CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:""` + CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` // UUID identifying the dashboard to which the schedule belongs. DashboardId types.String `tfsdk:"-"` // The display name for schedule. @@ -53,7 +53,7 @@ type CreateSubscriptionRequest struct { ScheduleId types.String `tfsdk:"-"` // Subscriber details for users and destinations to be added as subscribers // to the schedule. - Subscriber []Subscriber `tfsdk:"subscriber" tf:""` + Subscriber []Subscriber `tfsdk:"subscriber" tf:"object"` } type CronSchedule struct { @@ -147,9 +147,9 @@ type ExecuteMessageQueryRequest struct { // Genie AI Response type GenieAttachment struct { - Query []QueryAttachment `tfsdk:"query" tf:"optional"` + Query []QueryAttachment `tfsdk:"query" tf:"optional,object"` - Text []TextAttachment `tfsdk:"text" tf:"optional"` + Text []TextAttachment `tfsdk:"text" tf:"optional,object"` } type GenieConversation struct { @@ -201,7 +201,7 @@ type GenieGetMessageQueryResultRequest struct { type GenieGetMessageQueryResultResponse struct { // SQL Statement Execution response. See [Get status, manifest, and result // first chunk](:method:statementexecution/getstatement) for more details. - StatementResponse sql.StatementResponse `tfsdk:"statement_response" tf:"optional"` + StatementResponse sql.StatementResponse `tfsdk:"statement_response" tf:"optional,object"` } type GenieMessage struct { @@ -214,13 +214,13 @@ type GenieMessage struct { // Timestamp when the message was created CreatedTimestamp types.Int64 `tfsdk:"created_timestamp" tf:"optional"` // Error message if AI failed to respond to the message - Error []MessageError `tfsdk:"error" tf:"optional"` + Error []MessageError `tfsdk:"error" tf:"optional,object"` // Message ID Id types.String `tfsdk:"id" tf:""` // Timestamp when the message was last updated LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp" tf:"optional"` // The result of SQL query if the message has a query attachment - QueryResult []Result `tfsdk:"query_result" tf:"optional"` + QueryResult []Result `tfsdk:"query_result" tf:"optional,object"` // Genie space ID SpaceId types.String `tfsdk:"space_id" tf:""` // MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching @@ -253,11 +253,11 @@ type GenieStartConversationMessageRequest struct { } type GenieStartConversationResponse struct { - Conversation []GenieConversation `tfsdk:"conversation" tf:"optional"` + Conversation []GenieConversation `tfsdk:"conversation" tf:"optional,object"` // Conversation ID ConversationId types.String `tfsdk:"conversation_id" tf:""` - Message []GenieMessage `tfsdk:"message" tf:"optional"` + Message []GenieMessage `tfsdk:"message" tf:"optional,object"` // Message ID MessageId types.String `tfsdk:"message_id" tf:""` } @@ -427,7 +427,7 @@ type Schedule struct { CreateTime types.String `tfsdk:"create_time" tf:"optional"` // The cron expression describing the frequency of the periodic refresh for // this schedule. - CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:""` + CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` // UUID identifying the dashboard to which the schedule belongs. DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` // The display name for schedule. @@ -447,10 +447,10 @@ type Schedule struct { type Subscriber struct { // The destination to receive the subscription email. This parameter is // mutually exclusive with `user_subscriber`. - DestinationSubscriber []SubscriptionSubscriberDestination `tfsdk:"destination_subscriber" tf:"optional"` + DestinationSubscriber []SubscriptionSubscriberDestination `tfsdk:"destination_subscriber" tf:"optional,object"` // The user to receive the subscription email. This parameter is mutually // exclusive with `destination_subscriber`. - UserSubscriber []SubscriptionSubscriberUser `tfsdk:"user_subscriber" tf:"optional"` + UserSubscriber []SubscriptionSubscriberUser `tfsdk:"user_subscriber" tf:"optional,object"` } type Subscription struct { @@ -469,7 +469,7 @@ type Subscription struct { ScheduleId types.String `tfsdk:"schedule_id" tf:"optional"` // Subscriber details for users and destinations to be added as subscribers // to the schedule. - Subscriber []Subscriber `tfsdk:"subscriber" tf:""` + Subscriber []Subscriber `tfsdk:"subscriber" tf:"object"` // UUID identifying the subscription. SubscriptionId types.String `tfsdk:"subscription_id" tf:"optional"` // A timestamp indicating when the subscription was last updated. @@ -536,7 +536,7 @@ type UpdateDashboardRequest struct { type UpdateScheduleRequest struct { // The cron expression describing the frequency of the periodic refresh for // this schedule. - CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:""` + CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` // UUID identifying the dashboard to which the schedule belongs. DashboardId types.String `tfsdk:"-"` // The display name for schedule. diff --git a/internal/service/iam_tf/model.go b/internal/service/iam_tf/model.go index a590d05629..7eee548409 100755 --- a/internal/service/iam_tf/model.go +++ b/internal/service/iam_tf/model.go @@ -263,7 +263,7 @@ type Group struct { Members []ComplexValue `tfsdk:"members" tf:"optional"` // Container for the group identifier. Workspace local versus account. - Meta []ResourceMeta `tfsdk:"meta" tf:"optional"` + Meta []ResourceMeta `tfsdk:"meta" tf:"optional,object"` // Corresponds to AWS instance profile/arn role. Roles []ComplexValue `tfsdk:"roles" tf:"optional"` // The schema of the group. @@ -584,7 +584,7 @@ type PermissionAssignment struct { // The permissions level of the principal. Permissions []types.String `tfsdk:"permissions" tf:"optional"` // Information about the principal assigned to the workspace. - Principal []PrincipalOutput `tfsdk:"principal" tf:"optional"` + Principal []PrincipalOutput `tfsdk:"principal" tf:"optional,object"` } type PermissionAssignments struct { @@ -694,7 +694,7 @@ type UpdateRuleSetRequest struct { // Name of the rule set. Name types.String `tfsdk:"name" tf:""` - RuleSet []RuleSetUpdateRequest `tfsdk:"rule_set" tf:""` + RuleSet []RuleSetUpdateRequest `tfsdk:"rule_set" tf:"object"` } type UpdateWorkspaceAssignments struct { @@ -736,7 +736,7 @@ type User struct { // provided by the client will be ignored. Id types.String `tfsdk:"id" tf:"optional"` - Name []Name `tfsdk:"name" tf:"optional"` + Name []Name `tfsdk:"name" tf:"optional,object"` // Corresponds to AWS instance profile/arn role. Roles []ComplexValue `tfsdk:"roles" tf:"optional"` // The schema of the user. diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index 71c3096a41..d2544ac0d0 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -26,7 +26,7 @@ type BaseJob struct { JobId types.Int64 `tfsdk:"job_id" tf:"optional"` // Settings for this job and all of its runs. These settings can be updated // using the `resetJob` method. - Settings []JobSettings `tfsdk:"settings" tf:"optional"` + Settings []JobSettings `tfsdk:"settings" tf:"optional,object"` } type BaseRun struct { @@ -47,10 +47,10 @@ type BaseRun struct { // The cluster used for this run. If the run is specified to use a new // cluster, this field is set once the Jobs service has requested a cluster // for the run. - ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional"` + ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional,object"` // A snapshot of the job’s cluster specification when this run was // created. - ClusterSpec []ClusterSpec `tfsdk:"cluster_spec" tf:"optional"` + ClusterSpec []ClusterSpec `tfsdk:"cluster_spec" tf:"optional,object"` // The creator user name. This field won’t be included in the response if // the user has already been deleted. CreatorUserName types.String `tfsdk:"creator_user_name" tf:"optional"` @@ -77,7 +77,7 @@ type BaseRun struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource []GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional,object"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. @@ -98,7 +98,7 @@ type BaseRun struct { // run_id of the original attempt; otherwise, it is the same as the run_id. OriginalAttemptRunId types.Int64 `tfsdk:"original_attempt_run_id" tf:"optional"` // The parameters used for this run. - OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional"` + OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional,object"` // The time in milliseconds that the run has spent in the queue. QueueDuration types.Int64 `tfsdk:"queue_duration" tf:"optional"` // The repair history of the run. @@ -123,7 +123,7 @@ type BaseRun struct { RunType types.String `tfsdk:"run_type" tf:"optional"` // The cron schedule that triggered this run if it was triggered by the // periodic scheduler. - Schedule []CronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []CronSchedule `tfsdk:"schedule" tf:"optional,object"` // The time in milliseconds it took to set up the cluster. For runs that run // on new clusters this is the cluster creation time, for runs that run on // existing clusters this time should be very short. The duration of a task @@ -138,9 +138,9 @@ type BaseRun struct { // new cluster, this is the time the cluster creation call is issued. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // Deprecated. Please use the `status` field instead. - State []RunState `tfsdk:"state" tf:"optional"` + State []RunState `tfsdk:"state" tf:"optional,object"` // The current status of the run - Status []RunStatus `tfsdk:"status" tf:"optional"` + Status []RunStatus `tfsdk:"status" tf:"optional,object"` // The list of tasks performed by the run. Each task has its own `run_id` // which you can use to call `JobsGetOutput` to retrieve the run resutls. Tasks []RunTask `tfsdk:"tasks" tf:"optional"` @@ -156,7 +156,7 @@ type BaseRun struct { // arrival. * `TABLE`: Indicates a run that is triggered by a table update. Trigger types.String `tfsdk:"trigger" tf:"optional"` // Additional details about what triggered the run - TriggerInfo []TriggerInfo `tfsdk:"trigger_info" tf:"optional"` + TriggerInfo []TriggerInfo `tfsdk:"trigger_info" tf:"optional,object"` } type CancelAllRuns struct { @@ -213,7 +213,7 @@ type ClusterSpec struct { Libraries compute.Library `tfsdk:"library" tf:"optional"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` } type ConditionTask struct { @@ -248,9 +248,9 @@ type CreateJob struct { // An optional continuous property for this job. The continuous property // will ensure that there is always one run executing. Only one of // `schedule` and `continuous` can be used. - Continuous []Continuous `tfsdk:"continuous" tf:"optional"` + Continuous []Continuous `tfsdk:"continuous" tf:"optional,object"` // Deployment information for jobs managed by external sources. - Deployment []JobDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []JobDeployment `tfsdk:"deployment" tf:"optional,object"` // An optional description for the job. The maximum length is 27700 // characters in UTF-8 encoding. Description types.String `tfsdk:"description" tf:"optional"` @@ -261,7 +261,7 @@ type CreateJob struct { EditMode types.String `tfsdk:"edit_mode" tf:"optional"` // An optional set of email addresses that is notified when runs of this job // begin or complete as well as when this job is deleted. - EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` // A list of task execution environment specifications that can be // referenced by serverless tasks of this job. An environment is required to // be present for serverless tasks. For serverless notebook tasks, the @@ -283,9 +283,9 @@ type CreateJob struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource []GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional,object"` // An optional set of health rules that can be defined for this job. - Health []JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. @@ -308,22 +308,22 @@ type CreateJob struct { // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // job. - NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` // Job-level parameter definitions Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. - Queue []QueueSettings `tfsdk:"queue" tf:"optional"` + Queue []QueueSettings `tfsdk:"queue" tf:"optional,object"` // Write-only setting. Specifies the user, service principal or group that // the job/pipeline runs as. If not specified, the job/pipeline runs as the // user who created the job/pipeline. // // Exactly one of `user_name`, `service_principal_name`, `group_name` should // be specified. If not, an error is thrown. - RunAs []JobRunAs `tfsdk:"run_as" tf:"optional"` + RunAs []JobRunAs `tfsdk:"run_as" tf:"optional,object"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI // or sending an API request to `runNow`. - Schedule []CronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []CronSchedule `tfsdk:"schedule" tf:"optional,object"` // A map of tags associated with the job. These are forwarded to the cluster // as cluster tags for jobs clusters, and are subject to the same // limitations as cluster tags. A maximum of 25 tags can be added to the @@ -337,10 +337,10 @@ type CreateJob struct { // A configuration to trigger a run when certain conditions are met. The // default behavior is that the job runs only when triggered by clicking // “Run Now” in the Jobs UI or sending an API request to `runNow`. - Trigger []TriggerSettings `tfsdk:"trigger" tf:"optional"` + Trigger []TriggerSettings `tfsdk:"trigger" tf:"optional,object"` // A collection of system notification IDs to notify when runs of this job // begin or complete. - WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } // Job was created successfully @@ -467,7 +467,7 @@ type EnforcePolicyComplianceResponse struct { // clusters. Updated job settings are derived by applying policy default // values to the existing job clusters in order to satisfy policy // requirements. - Settings []JobSettings `tfsdk:"settings" tf:"optional"` + Settings []JobSettings `tfsdk:"settings" tf:"optional,object"` } // Run was exported successfully. @@ -507,7 +507,7 @@ type ForEachStats struct { // Sample of 3 most common error messages occurred during the iteration. ErrorMessageStats []ForEachTaskErrorMessageStats `tfsdk:"error_message_stats" tf:"optional"` // Describes stats of the iteration. Only latest retries are considered. - TaskRunStats []ForEachTaskTaskRunStats `tfsdk:"task_run_stats" tf:"optional"` + TaskRunStats []ForEachTaskTaskRunStats `tfsdk:"task_run_stats" tf:"optional,object"` } type ForEachTask struct { @@ -519,7 +519,7 @@ type ForEachTask struct { // an array parameter. Inputs types.String `tfsdk:"inputs" tf:""` // Configuration for the task that will be run for each element in the array - Task []Task `tfsdk:"task" tf:""` + Task []Task `tfsdk:"task" tf:"object"` } type ForEachTaskErrorMessageStats struct { @@ -644,7 +644,7 @@ type GitSource struct { GitProvider types.String `tfsdk:"git_provider" tf:""` // Read-only state of the remote repository at the time the job was run. // This field is only included on job runs. - GitSnapshot []GitSnapshot `tfsdk:"git_snapshot" tf:"optional"` + GitSnapshot []GitSnapshot `tfsdk:"git_snapshot" tf:"optional,object"` // Name of the tag to be checked out and used by this job. This field cannot // be specified in conjunction with git_branch or git_commit. GitTag types.String `tfsdk:"tag" tf:"optional"` @@ -652,7 +652,7 @@ type GitSource struct { GitUrl types.String `tfsdk:"url" tf:""` // The source of the job specification in the remote repository when the job // is source controlled. - JobSource []JobSource `tfsdk:"job_source" tf:"optional"` + JobSource []JobSource `tfsdk:"job_source" tf:"optional,object"` } // Job was retrieved successfully. @@ -675,7 +675,7 @@ type Job struct { RunAsUserName types.String `tfsdk:"run_as_user_name" tf:"optional"` // Settings for this job and all of its runs. These settings can be updated // using the `resetJob` method. - Settings []JobSettings `tfsdk:"settings" tf:"optional"` + Settings []JobSettings `tfsdk:"settings" tf:"optional,object"` } type JobAccessControlRequest struct { @@ -708,7 +708,7 @@ type JobCluster struct { // determine which cluster to launch for the task execution. JobClusterKey types.String `tfsdk:"job_cluster_key" tf:""` // If new_cluster, a description of a cluster that is created for each task. - NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:""` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"object"` } type JobCompliance struct { @@ -775,7 +775,7 @@ type JobEnvironment struct { // The environment entity used to preserve serverless environment side panel // and jobs' environment for non-notebook task. In this minimal environment // spec, only pip dependencies are supported. - Spec compute.Environment `tfsdk:"spec" tf:"optional"` + Spec compute.Environment `tfsdk:"spec" tf:"optional,object"` } type JobNotificationSettings struct { @@ -851,9 +851,9 @@ type JobSettings struct { // An optional continuous property for this job. The continuous property // will ensure that there is always one run executing. Only one of // `schedule` and `continuous` can be used. - Continuous []Continuous `tfsdk:"continuous" tf:"optional"` + Continuous []Continuous `tfsdk:"continuous" tf:"optional,object"` // Deployment information for jobs managed by external sources. - Deployment []JobDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []JobDeployment `tfsdk:"deployment" tf:"optional,object"` // An optional description for the job. The maximum length is 27700 // characters in UTF-8 encoding. Description types.String `tfsdk:"description" tf:"optional"` @@ -864,7 +864,7 @@ type JobSettings struct { EditMode types.String `tfsdk:"edit_mode" tf:"optional"` // An optional set of email addresses that is notified when runs of this job // begin or complete as well as when this job is deleted. - EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` // A list of task execution environment specifications that can be // referenced by serverless tasks of this job. An environment is required to // be present for serverless tasks. For serverless notebook tasks, the @@ -886,9 +886,9 @@ type JobSettings struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource []GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional,object"` // An optional set of health rules that can be defined for this job. - Health []JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. // You must declare dependent libraries in task settings. @@ -911,22 +911,22 @@ type JobSettings struct { // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // job. - NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` // Job-level parameter definitions Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. - Queue []QueueSettings `tfsdk:"queue" tf:"optional"` + Queue []QueueSettings `tfsdk:"queue" tf:"optional,object"` // Write-only setting. Specifies the user, service principal or group that // the job/pipeline runs as. If not specified, the job/pipeline runs as the // user who created the job/pipeline. // // Exactly one of `user_name`, `service_principal_name`, `group_name` should // be specified. If not, an error is thrown. - RunAs []JobRunAs `tfsdk:"run_as" tf:"optional"` + RunAs []JobRunAs `tfsdk:"run_as" tf:"optional,object"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI // or sending an API request to `runNow`. - Schedule []CronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []CronSchedule `tfsdk:"schedule" tf:"optional,object"` // A map of tags associated with the job. These are forwarded to the cluster // as cluster tags for jobs clusters, and are subject to the same // limitations as cluster tags. A maximum of 25 tags can be added to the @@ -940,10 +940,10 @@ type JobSettings struct { // A configuration to trigger a run when certain conditions are met. The // default behavior is that the job runs only when triggered by clicking // “Run Now” in the Jobs UI or sending an API request to `runNow`. - Trigger []TriggerSettings `tfsdk:"trigger" tf:"optional"` + Trigger []TriggerSettings `tfsdk:"trigger" tf:"optional,object"` // A collection of system notification IDs to notify when runs of this job // begin or complete. - WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } // The source of the job specification in the remote repository when the job is @@ -1221,9 +1221,9 @@ type RepairHistoryItem struct { // The start time of the (repaired) run. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // Deprecated. Please use the `status` field instead. - State []RunState `tfsdk:"state" tf:"optional"` + State []RunState `tfsdk:"state" tf:"optional,object"` // The current status of the run - Status []RunStatus `tfsdk:"status" tf:"optional"` + Status []RunStatus `tfsdk:"status" tf:"optional,object"` // The run IDs of the task runs that ran as part of this repair history // item. TaskRunIds []types.Int64 `tfsdk:"task_run_ids" tf:"optional"` @@ -1276,7 +1276,7 @@ type RepairRun struct { // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` // Controls whether the pipeline should perform a full refresh - PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional"` + PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional,object"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` // A list of parameters for jobs with Python tasks, for example @@ -1348,7 +1348,7 @@ type ResetJob struct { // // Changes to the field `JobBaseSettings.timeout_seconds` are applied to // active runs. Changes to other fields are applied to future runs only. - NewSettings []JobSettings `tfsdk:"new_settings" tf:""` + NewSettings []JobSettings `tfsdk:"new_settings" tf:"object"` } type ResetResponse struct { @@ -1389,25 +1389,25 @@ type ResolvedStringParamsValues struct { } type ResolvedValues struct { - ConditionTask []ResolvedConditionTaskValues `tfsdk:"condition_task" tf:"optional"` + ConditionTask []ResolvedConditionTaskValues `tfsdk:"condition_task" tf:"optional,object"` - DbtTask []ResolvedDbtTaskValues `tfsdk:"dbt_task" tf:"optional"` + DbtTask []ResolvedDbtTaskValues `tfsdk:"dbt_task" tf:"optional,object"` - NotebookTask []ResolvedNotebookTaskValues `tfsdk:"notebook_task" tf:"optional"` + NotebookTask []ResolvedNotebookTaskValues `tfsdk:"notebook_task" tf:"optional,object"` - PythonWheelTask []ResolvedPythonWheelTaskValues `tfsdk:"python_wheel_task" tf:"optional"` + PythonWheelTask []ResolvedPythonWheelTaskValues `tfsdk:"python_wheel_task" tf:"optional,object"` - RunJobTask []ResolvedRunJobTaskValues `tfsdk:"run_job_task" tf:"optional"` + RunJobTask []ResolvedRunJobTaskValues `tfsdk:"run_job_task" tf:"optional,object"` - SimulationTask []ResolvedParamPairValues `tfsdk:"simulation_task" tf:"optional"` + SimulationTask []ResolvedParamPairValues `tfsdk:"simulation_task" tf:"optional,object"` - SparkJarTask []ResolvedStringParamsValues `tfsdk:"spark_jar_task" tf:"optional"` + SparkJarTask []ResolvedStringParamsValues `tfsdk:"spark_jar_task" tf:"optional,object"` - SparkPythonTask []ResolvedStringParamsValues `tfsdk:"spark_python_task" tf:"optional"` + SparkPythonTask []ResolvedStringParamsValues `tfsdk:"spark_python_task" tf:"optional,object"` - SparkSubmitTask []ResolvedStringParamsValues `tfsdk:"spark_submit_task" tf:"optional"` + SparkSubmitTask []ResolvedStringParamsValues `tfsdk:"spark_submit_task" tf:"optional,object"` - SqlTask []ResolvedParamPairValues `tfsdk:"sql_task" tf:"optional"` + SqlTask []ResolvedParamPairValues `tfsdk:"sql_task" tf:"optional,object"` } // Run was retrieved successfully @@ -1429,10 +1429,10 @@ type Run struct { // The cluster used for this run. If the run is specified to use a new // cluster, this field is set once the Jobs service has requested a cluster // for the run. - ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional"` + ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional,object"` // A snapshot of the job’s cluster specification when this run was // created. - ClusterSpec []ClusterSpec `tfsdk:"cluster_spec" tf:"optional"` + ClusterSpec []ClusterSpec `tfsdk:"cluster_spec" tf:"optional,object"` // The creator user name. This field won’t be included in the response if // the user has already been deleted. CreatorUserName types.String `tfsdk:"creator_user_name" tf:"optional"` @@ -1459,7 +1459,7 @@ type Run struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource []GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional,object"` // Only populated by for-each iterations. The parent for-each task is // located in tasks array. Iterations []RunTask `tfsdk:"iterations" tf:"optional"` @@ -1485,7 +1485,7 @@ type Run struct { // run_id of the original attempt; otherwise, it is the same as the run_id. OriginalAttemptRunId types.Int64 `tfsdk:"original_attempt_run_id" tf:"optional"` // The parameters used for this run. - OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional"` + OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional,object"` // A token that can be used to list the previous page of sub-resources. PrevPageToken types.String `tfsdk:"prev_page_token" tf:"optional"` // The time in milliseconds that the run has spent in the queue. @@ -1512,7 +1512,7 @@ type Run struct { RunType types.String `tfsdk:"run_type" tf:"optional"` // The cron schedule that triggered this run if it was triggered by the // periodic scheduler. - Schedule []CronSchedule `tfsdk:"schedule" tf:"optional"` + Schedule []CronSchedule `tfsdk:"schedule" tf:"optional,object"` // The time in milliseconds it took to set up the cluster. For runs that run // on new clusters this is the cluster creation time, for runs that run on // existing clusters this time should be very short. The duration of a task @@ -1527,9 +1527,9 @@ type Run struct { // new cluster, this is the time the cluster creation call is issued. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // Deprecated. Please use the `status` field instead. - State []RunState `tfsdk:"state" tf:"optional"` + State []RunState `tfsdk:"state" tf:"optional,object"` // The current status of the run - Status []RunStatus `tfsdk:"status" tf:"optional"` + Status []RunStatus `tfsdk:"status" tf:"optional,object"` // The list of tasks performed by the run. Each task has its own `run_id` // which you can use to call `JobsGetOutput` to retrieve the run resutls. Tasks []RunTask `tfsdk:"tasks" tf:"optional"` @@ -1545,7 +1545,7 @@ type Run struct { // arrival. * `TABLE`: Indicates a run that is triggered by a table update. Trigger types.String `tfsdk:"trigger" tf:"optional"` // Additional details about what triggered the run - TriggerInfo []TriggerInfo `tfsdk:"trigger_info" tf:"optional"` + TriggerInfo []TriggerInfo `tfsdk:"trigger_info" tf:"optional,object"` } type RunConditionTask struct { @@ -1581,9 +1581,9 @@ type RunForEachTask struct { Inputs types.String `tfsdk:"inputs" tf:""` // Read only field. Populated for GetRun and ListRuns RPC calls and stores // the execution stats of an For each task - Stats []ForEachStats `tfsdk:"stats" tf:"optional"` + Stats []ForEachStats `tfsdk:"stats" tf:"optional,object"` // Configuration for the task that will be run for each element in the array - Task []Task `tfsdk:"task" tf:""` + Task []Task `tfsdk:"task" tf:"object"` } type RunJobOutput struct { @@ -1632,7 +1632,7 @@ type RunJobTask struct { // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` // Controls whether the pipeline should perform a full refresh - PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional"` + PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional,object"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` // A list of parameters for jobs with Python tasks, for example @@ -1735,7 +1735,7 @@ type RunNow struct { // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` // Controls whether the pipeline should perform a full refresh - PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional"` + PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional,object"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` // A list of parameters for jobs with Python tasks, for example @@ -1757,7 +1757,7 @@ type RunNow struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables PythonParams []types.String `tfsdk:"python_params" tf:"optional"` // The queue settings of the run. - Queue []QueueSettings `tfsdk:"queue" tf:"optional"` + Queue []QueueSettings `tfsdk:"queue" tf:"optional,object"` // A list of parameters for jobs with spark submit task, for example // `"spark_submit_params": ["--class", // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to @@ -1795,7 +1795,7 @@ type RunNowResponse struct { // Run output was retrieved successfully. type RunOutput struct { // The output of a dbt task, if available. - DbtOutput []DbtOutput `tfsdk:"dbt_output" tf:"optional"` + DbtOutput []DbtOutput `tfsdk:"dbt_output" tf:"optional,object"` // An error message indicating why a task failed or why output is not // available. The message is unstructured, and its exact format is subject // to change. @@ -1816,7 +1816,7 @@ type RunOutput struct { // Whether the logs are truncated. LogsTruncated types.Bool `tfsdk:"logs_truncated" tf:"optional"` // All details of the run except for its output. - Metadata []Run `tfsdk:"metadata" tf:"optional"` + Metadata []Run `tfsdk:"metadata" tf:"optional,object"` // The output of a notebook task, if available. A notebook task that // terminates (either successfully or with a failure) without calling // `dbutils.notebook.exit()` is considered to have an empty output. This @@ -1825,11 +1825,11 @@ type RunOutput struct { // the [ClusterLogConf] field to configure log storage for the job cluster. // // [ClusterLogConf]: https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterlogconf - NotebookOutput []NotebookOutput `tfsdk:"notebook_output" tf:"optional"` + NotebookOutput []NotebookOutput `tfsdk:"notebook_output" tf:"optional,object"` // The output of a run job task, if available - RunJobOutput []RunJobOutput `tfsdk:"run_job_output" tf:"optional"` + RunJobOutput []RunJobOutput `tfsdk:"run_job_output" tf:"optional,object"` // The output of a SQL task, if available. - SqlOutput []SqlOutput `tfsdk:"sql_output" tf:"optional"` + SqlOutput []SqlOutput `tfsdk:"sql_output" tf:"optional,object"` } type RunParameters struct { @@ -1869,7 +1869,7 @@ type RunParameters struct { // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` // Controls whether the pipeline should perform a full refresh - PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional"` + PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional,object"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` // A list of parameters for jobs with Python tasks, for example @@ -1936,12 +1936,12 @@ type RunState struct { // The current status of the run type RunStatus struct { // If the run was queued, details about the reason for queuing the run. - QueueDetails []QueueDetails `tfsdk:"queue_details" tf:"optional"` + QueueDetails []QueueDetails `tfsdk:"queue_details" tf:"optional,object"` // The current state of the run. State types.String `tfsdk:"state" tf:"optional"` // If the run is in a TERMINATING or TERMINATED state, details about the // reason for terminating the run. - TerminationDetails []TerminationDetails `tfsdk:"termination_details" tf:"optional"` + TerminationDetails []TerminationDetails `tfsdk:"termination_details" tf:"optional,object"` } // Used when outputting a child run, in GetRun or ListRuns. @@ -1963,15 +1963,15 @@ type RunTask struct { // The cluster used for this run. If the run is specified to use a new // cluster, this field is set once the Jobs service has requested a cluster // for the run. - ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional"` + ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional,object"` // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. - ConditionTask []RunConditionTask `tfsdk:"condition_task" tf:"optional"` + ConditionTask []RunConditionTask `tfsdk:"condition_task" tf:"optional,object"` // If dbt_task, indicates that this must execute a dbt task. It requires // both Databricks SQL and the ability to use a serverless or a pro SQL // warehouse. - DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional"` + DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before // executing this task. The key is `task_key`, and the value is the name @@ -1981,7 +1981,7 @@ type RunTask struct { Description types.String `tfsdk:"description" tf:"optional"` // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. - EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. EndTime types.Int64 `tfsdk:"end_time" tf:"optional"` @@ -2004,7 +2004,7 @@ type RunTask struct { ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` // If for_each_task, indicates that this task must execute the nested task // within it. - ForEachTask []RunForEachTask `tfsdk:"for_each_task" tf:"optional"` + ForEachTask []RunForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by // notebook, dbt, Python script, and SQL File tasks. If `git_source` is set, @@ -2013,7 +2013,7 @@ type RunTask struct { // `WORKSPACE` on the task. Note: dbt and SQL File tasks support only // version-controlled sources. If dbt or SQL File tasks are used, // `git_source` must be defined on the job. - GitSource []GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional,object"` // If job_cluster_key, this task is executed reusing the cluster specified // in `job.settings.job_clusters`. JobClusterKey types.String `tfsdk:"job_cluster_key" tf:"optional"` @@ -2022,22 +2022,22 @@ type RunTask struct { Libraries compute.Library `tfsdk:"library" tf:"optional"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. - NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional"` + NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. - NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` // If pipeline_task, indicates that this task must execute a Pipeline. - PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional"` + PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` // If python_wheel_task, indicates that this job must execute a PythonWheel. - PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional"` + PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // The time in milliseconds that the run has spent in the queue. QueueDuration types.Int64 `tfsdk:"queue_duration" tf:"optional"` // Parameter values including resolved references - ResolvedValues []ResolvedValues `tfsdk:"resolved_values" tf:"optional"` + ResolvedValues []ResolvedValues `tfsdk:"resolved_values" tf:"optional,object"` // The time in milliseconds it took the job run and all of its repairs to // finish. RunDuration types.Int64 `tfsdk:"run_duration" tf:"optional"` @@ -2049,7 +2049,7 @@ type RunTask struct { // possible values. RunIf types.String `tfsdk:"run_if" tf:"optional"` // If run_job_task, indicates that this task must execute another job. - RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional"` + RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` RunPageUrl types.String `tfsdk:"run_page_url" tf:"optional"` // The time in milliseconds it took to set up the cluster. For runs that run @@ -2061,9 +2061,9 @@ type RunTask struct { // `run_duration` field. SetupDuration types.Int64 `tfsdk:"setup_duration" tf:"optional"` // If spark_jar_task, indicates that this task must run a JAR. - SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional"` + SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` // If spark_python_task, indicates that this task must run a Python file. - SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional"` + SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // @@ -2081,18 +2081,18 @@ type RunTask struct { // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. - SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional"` + SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` // If sql_task, indicates that this job must execute a SQL task. - SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional"` + SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // The time at which this run was started in epoch milliseconds // (milliseconds since 1/1/1970 UTC). This may not be the time when the job // task starts executing, for example, if the job is scheduled to run on a // new cluster, this is the time the cluster creation call is issued. StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // Deprecated. Please use the `status` field instead. - State []RunState `tfsdk:"state" tf:"optional"` + State []RunState `tfsdk:"state" tf:"optional,object"` // The current status of the run - Status []RunStatus `tfsdk:"status" tf:"optional"` + Status []RunStatus `tfsdk:"status" tf:"optional,object"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be @@ -2104,7 +2104,7 @@ type RunTask struct { // A collection of system notification IDs to notify when the run begins or // completes. The default behavior is to not send any system notifications. // Task webhooks respect the task notification settings. - WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } type SparkJarTask struct { @@ -2191,7 +2191,7 @@ type SqlDashboardWidgetOutput struct { // Time (in epoch milliseconds) when execution of the SQL widget ends. EndTime types.Int64 `tfsdk:"end_time" tf:"optional"` // The information about the error when execution fails. - Error []SqlOutputError `tfsdk:"error" tf:"optional"` + Error []SqlOutputError `tfsdk:"error" tf:"optional,object"` // The link to find the output results. OutputLink types.String `tfsdk:"output_link" tf:"optional"` // Time (in epoch milliseconds) when execution of the SQL widget starts. @@ -2206,11 +2206,11 @@ type SqlDashboardWidgetOutput struct { type SqlOutput struct { // The output of a SQL alert task, if available. - AlertOutput []SqlAlertOutput `tfsdk:"alert_output" tf:"optional"` + AlertOutput []SqlAlertOutput `tfsdk:"alert_output" tf:"optional,object"` // The output of a SQL dashboard task, if available. - DashboardOutput []SqlDashboardOutput `tfsdk:"dashboard_output" tf:"optional"` + DashboardOutput []SqlDashboardOutput `tfsdk:"dashboard_output" tf:"optional,object"` // The output of a SQL query task, if available. - QueryOutput []SqlQueryOutput `tfsdk:"query_output" tf:"optional"` + QueryOutput []SqlQueryOutput `tfsdk:"query_output" tf:"optional,object"` } type SqlOutputError struct { @@ -2238,17 +2238,17 @@ type SqlStatementOutput struct { type SqlTask struct { // If alert, indicates that this job must refresh a SQL alert. - Alert []SqlTaskAlert `tfsdk:"alert" tf:"optional"` + Alert []SqlTaskAlert `tfsdk:"alert" tf:"optional,object"` // If dashboard, indicates that this job must refresh a SQL dashboard. - Dashboard []SqlTaskDashboard `tfsdk:"dashboard" tf:"optional"` + Dashboard []SqlTaskDashboard `tfsdk:"dashboard" tf:"optional,object"` // If file, indicates that this job runs a SQL file in a remote Git // repository. - File []SqlTaskFile `tfsdk:"file" tf:"optional"` + File []SqlTaskFile `tfsdk:"file" tf:"optional,object"` // Parameters to be used for each run of this job. The SQL alert task does // not support custom parameters. Parameters map[string]types.String `tfsdk:"parameters" tf:"optional"` // If query, indicates that this job must execute a SQL query. - Query []SqlTaskQuery `tfsdk:"query" tf:"optional"` + Query []SqlTaskQuery `tfsdk:"query" tf:"optional,object"` // The canonical identifier of the SQL warehouse. Recommended to use with // serverless or pro SQL warehouses. Classic SQL warehouses are only // supported for SQL alert, dashboard and query tasks and are limited to @@ -2314,7 +2314,7 @@ type SubmitRun struct { AccessControlList []JobAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // An optional set of email addresses notified when the run begins or // completes. - EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` // A list of task execution environment specifications that can be // referenced by tasks of this run. Environments []JobEnvironment `tfsdk:"environments" tf:"optional"` @@ -2328,9 +2328,9 @@ type SubmitRun struct { // // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. - GitSource []GitSource `tfsdk:"git_source" tf:"optional"` + GitSource []GitSource `tfsdk:"git_source" tf:"optional,object"` // An optional set of health rules that can be defined for this job. - Health []JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` // An optional token that can be used to guarantee the idempotency of job // run requests. If a run with the provided token already exists, the // request does not create a new run but returns the ID of the existing run @@ -2350,12 +2350,12 @@ type SubmitRun struct { // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // run. - NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []JobNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` // The queue settings of the one-time run. - Queue []QueueSettings `tfsdk:"queue" tf:"optional"` + Queue []QueueSettings `tfsdk:"queue" tf:"optional,object"` // Specifies the user or service principal that the job runs as. If not // specified, the job runs as the user who submits the request. - RunAs []JobRunAs `tfsdk:"run_as" tf:"optional"` + RunAs []JobRunAs `tfsdk:"run_as" tf:"optional,object"` // An optional name for the run. The default value is `Untitled`. RunName types.String `tfsdk:"run_name" tf:"optional"` @@ -2365,7 +2365,7 @@ type SubmitRun struct { TimeoutSeconds types.Int64 `tfsdk:"timeout_seconds" tf:"optional"` // A collection of system notification IDs to notify when the run begins or // completes. - WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } // Run was created and started successfully. @@ -2378,11 +2378,11 @@ type SubmitTask struct { // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. - ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional"` + ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional,object"` // If dbt_task, indicates that this must execute a dbt task. It requires // both Databricks SQL and the ability to use a serverless or a pro SQL // warehouse. - DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional"` + DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before // executing this task. The key is `task_key`, and the value is the name @@ -2392,7 +2392,7 @@ type SubmitTask struct { Description types.String `tfsdk:"description" tf:"optional"` // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. - EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` // The key that references an environment spec in a job. This field is // required for Python script, Python wheel and dbt tasks when using // serverless compute. @@ -2404,37 +2404,37 @@ type SubmitTask struct { ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` // If for_each_task, indicates that this task must execute the nested task // within it. - ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional"` + ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional set of health rules that can be defined for this job. - Health []JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` // An optional list of libraries to be installed on the cluster. The default // value is an empty list. Libraries compute.Library `tfsdk:"library" tf:"optional"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. - NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional"` + NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. - NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` // If pipeline_task, indicates that this task must execute a Pipeline. - PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional"` + PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` // If python_wheel_task, indicates that this job must execute a PythonWheel. - PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional"` + PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // An optional value indicating the condition that determines whether the // task should be run once its dependencies have been completed. When // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf types.String `tfsdk:"run_if" tf:"optional"` // If run_job_task, indicates that this task must execute another job. - RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional"` + RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` // If spark_jar_task, indicates that this task must run a JAR. - SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional"` + SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` // If spark_python_task, indicates that this task must run a Python file. - SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional"` + SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // @@ -2452,9 +2452,9 @@ type SubmitTask struct { // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. - SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional"` + SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` // If sql_task, indicates that this job must execute a SQL task. - SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional"` + SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be @@ -2466,7 +2466,7 @@ type SubmitTask struct { // A collection of system notification IDs to notify when the run begins or // completes. The default behavior is to not send any system notifications. // Task webhooks respect the task notification settings. - WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } type TableUpdateTriggerConfiguration struct { @@ -2490,11 +2490,11 @@ type Task struct { // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. - ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional"` + ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional,object"` // If dbt_task, indicates that this must execute a dbt task. It requires // both Databricks SQL and the ability to use a serverless or a pro SQL // warehouse. - DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional"` + DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete before executing this // task. The task will run only if the `run_if` condition is true. The key @@ -2507,7 +2507,7 @@ type Task struct { // An optional set of email addresses that is notified when runs of this // task begin or complete as well as when this task is deleted. The default // behavior is to not send any emails. - EmailNotifications []TaskEmailNotifications `tfsdk:"email_notifications" tf:"optional"` + EmailNotifications []TaskEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` // The key that references an environment spec in a job. This field is // required for Python script, Python wheel and dbt tasks when using // serverless compute. @@ -2519,9 +2519,9 @@ type Task struct { ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` // If for_each_task, indicates that this task must execute the nested task // within it. - ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional"` + ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional set of health rules that can be defined for this job. - Health []JobsHealthRules `tfsdk:"health" tf:"optional"` + Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` // If job_cluster_key, this task is executed reusing the cluster specified // in `job.settings.job_clusters`. JobClusterKey types.String `tfsdk:"job_cluster_key" tf:"optional"` @@ -2539,18 +2539,18 @@ type Task struct { MinRetryIntervalMillis types.Int64 `tfsdk:"min_retry_interval_millis" tf:"optional"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional"` + NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` // If notebook_task, indicates that this task must run a notebook. This // field may not be specified in conjunction with spark_jar_task. - NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional"` + NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task. - NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional"` + NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` // If pipeline_task, indicates that this task must execute a Pipeline. - PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional"` + PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` // If python_wheel_task, indicates that this job must execute a PythonWheel. - PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional"` + PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // An optional policy to specify whether to retry a job when it times out. // The default behavior is to not retry on timeout. RetryOnTimeout types.Bool `tfsdk:"retry_on_timeout" tf:"optional"` @@ -2565,11 +2565,11 @@ type Task struct { // dependencies have failed RunIf types.String `tfsdk:"run_if" tf:"optional"` // If run_job_task, indicates that this task must execute another job. - RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional"` + RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` // If spark_jar_task, indicates that this task must run a JAR. - SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional"` + SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` // If spark_python_task, indicates that this task must run a Python file. - SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional"` + SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` // If `spark_submit_task`, indicates that this task must be launched by the // spark submit script. This task can run only on new clusters. // @@ -2587,9 +2587,9 @@ type Task struct { // // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. - SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional"` + SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` // If sql_task, indicates that this job must execute a SQL task. - SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional"` + SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent // job. On Update or Reset, this field is used to reference the tasks to be @@ -2601,7 +2601,7 @@ type Task struct { // A collection of system notification IDs to notify when runs of this task // begin or complete. The default behavior is to not send any system // notifications. - WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional"` + WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } type TaskDependency struct { @@ -2730,15 +2730,15 @@ type TriggerInfo struct { type TriggerSettings struct { // File arrival trigger settings. - FileArrival []FileArrivalTriggerConfiguration `tfsdk:"file_arrival" tf:"optional"` + FileArrival []FileArrivalTriggerConfiguration `tfsdk:"file_arrival" tf:"optional,object"` // Whether this trigger is paused or not. PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` // Periodic trigger settings. - Periodic []PeriodicTriggerConfiguration `tfsdk:"periodic" tf:"optional"` + Periodic []PeriodicTriggerConfiguration `tfsdk:"periodic" tf:"optional,object"` // Old table trigger settings name. Deprecated in favor of `table_update`. - Table []TableUpdateTriggerConfiguration `tfsdk:"table" tf:"optional"` + Table []TableUpdateTriggerConfiguration `tfsdk:"table" tf:"optional,object"` - TableUpdate []TableUpdateTriggerConfiguration `tfsdk:"table_update" tf:"optional"` + TableUpdate []TableUpdateTriggerConfiguration `tfsdk:"table_update" tf:"optional,object"` } type UpdateJob struct { @@ -2759,7 +2759,7 @@ type UpdateJob struct { // // Changes to the field `JobSettings.timeout_seconds` are applied to active // runs. Changes to other fields are applied to future runs only. - NewSettings []JobSettings `tfsdk:"new_settings" tf:"optional"` + NewSettings []JobSettings `tfsdk:"new_settings" tf:"optional,object"` } type UpdateResponse struct { diff --git a/internal/service/marketplace_tf/model.go b/internal/service/marketplace_tf/model.go index 692e1c80d5..40648fd8c2 100755 --- a/internal/service/marketplace_tf/model.go +++ b/internal/service/marketplace_tf/model.go @@ -21,7 +21,7 @@ type AddExchangeForListingRequest struct { } type AddExchangeForListingResponse struct { - ExchangeForListing []ExchangeListing `tfsdk:"exchange_for_listing" tf:"optional"` + ExchangeForListing []ExchangeListing `tfsdk:"exchange_for_listing" tf:"optional,object"` } // Get one batch of listings. One may specify up to 50 IDs per request. @@ -59,7 +59,7 @@ type ContactInfo struct { } type CreateExchangeFilterRequest struct { - Filter []ExchangeFilter `tfsdk:"filter" tf:""` + Filter []ExchangeFilter `tfsdk:"filter" tf:"object"` } type CreateExchangeFilterResponse struct { @@ -67,7 +67,7 @@ type CreateExchangeFilterResponse struct { } type CreateExchangeRequest struct { - Exchange []Exchange `tfsdk:"exchange" tf:""` + Exchange []Exchange `tfsdk:"exchange" tf:"object"` } type CreateExchangeResponse struct { @@ -77,7 +77,7 @@ type CreateExchangeResponse struct { type CreateFileRequest struct { DisplayName types.String `tfsdk:"display_name" tf:"optional"` - FileParent []FileParent `tfsdk:"file_parent" tf:""` + FileParent []FileParent `tfsdk:"file_parent" tf:"object"` MarketplaceFileType types.String `tfsdk:"marketplace_file_type" tf:""` @@ -85,13 +85,13 @@ type CreateFileRequest struct { } type CreateFileResponse struct { - FileInfo []FileInfo `tfsdk:"file_info" tf:"optional"` + FileInfo []FileInfo `tfsdk:"file_info" tf:"optional,object"` // Pre-signed POST URL to blob storage UploadUrl types.String `tfsdk:"upload_url" tf:"optional"` } type CreateInstallationRequest struct { - AcceptedConsumerTerms []ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:"optional"` + AcceptedConsumerTerms []ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:"optional,object"` CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` @@ -99,13 +99,13 @@ type CreateInstallationRequest struct { RecipientType types.String `tfsdk:"recipient_type" tf:"optional"` // for git repo installations - RepoDetail []RepoInstallation `tfsdk:"repo_detail" tf:"optional"` + RepoDetail []RepoInstallation `tfsdk:"repo_detail" tf:"optional,object"` ShareName types.String `tfsdk:"share_name" tf:"optional"` } type CreateListingRequest struct { - Listing []Listing `tfsdk:"listing" tf:""` + Listing []Listing `tfsdk:"listing" tf:"object"` } type CreateListingResponse struct { @@ -114,7 +114,7 @@ type CreateListingResponse struct { // Data request messages also creates a lead (maybe) type CreatePersonalizationRequest struct { - AcceptedConsumerTerms []ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:""` + AcceptedConsumerTerms []ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:"object"` Comment types.String `tfsdk:"comment" tf:"optional"` @@ -138,7 +138,7 @@ type CreatePersonalizationRequestResponse struct { } type CreateProviderRequest struct { - Provider []ProviderInfo `tfsdk:"provider" tf:""` + Provider []ProviderInfo `tfsdk:"provider" tf:"object"` } type CreateProviderResponse struct { @@ -264,7 +264,7 @@ type FileInfo struct { DownloadLink types.String `tfsdk:"download_link" tf:"optional"` - FileParent []FileParent `tfsdk:"file_parent" tf:"optional"` + FileParent []FileParent `tfsdk:"file_parent" tf:"optional,object"` Id types.String `tfsdk:"id" tf:"optional"` @@ -292,7 +292,7 @@ type GetExchangeRequest struct { } type GetExchangeResponse struct { - Exchange []Exchange `tfsdk:"exchange" tf:"optional"` + Exchange []Exchange `tfsdk:"exchange" tf:"optional,object"` } // Get a file @@ -301,7 +301,7 @@ type GetFileRequest struct { } type GetFileResponse struct { - FileInfo []FileInfo `tfsdk:"file_info" tf:"optional"` + FileInfo []FileInfo `tfsdk:"file_info" tf:"optional,object"` } type GetLatestVersionProviderAnalyticsDashboardResponse struct { @@ -330,7 +330,7 @@ type GetListingRequest struct { } type GetListingResponse struct { - Listing []Listing `tfsdk:"listing" tf:"optional"` + Listing []Listing `tfsdk:"listing" tf:"optional,object"` } // List listings @@ -361,11 +361,11 @@ type GetProviderRequest struct { } type GetProviderResponse struct { - Provider []ProviderInfo `tfsdk:"provider" tf:"optional"` + Provider []ProviderInfo `tfsdk:"provider" tf:"optional,object"` } type Installation struct { - Installation []InstallationDetail `tfsdk:"installation" tf:"optional"` + Installation []InstallationDetail `tfsdk:"installation" tf:"optional,object"` } type InstallationDetail struct { @@ -391,7 +391,7 @@ type InstallationDetail struct { Status types.String `tfsdk:"status" tf:"optional"` - TokenDetail []TokenDetail `tfsdk:"token_detail" tf:"optional"` + TokenDetail []TokenDetail `tfsdk:"token_detail" tf:"optional,object"` Tokens []TokenInfo `tfsdk:"tokens" tf:"optional"` } @@ -578,11 +578,11 @@ type ListProvidersResponse struct { } type Listing struct { - Detail []ListingDetail `tfsdk:"detail" tf:"optional"` + Detail []ListingDetail `tfsdk:"detail" tf:"optional,object"` Id types.String `tfsdk:"id" tf:"optional"` // Next Number: 26 - Summary []ListingSummary `tfsdk:"summary" tf:""` + Summary []ListingSummary `tfsdk:"summary" tf:"object"` } type ListingDetail struct { @@ -594,7 +594,7 @@ type ListingDetail struct { // The starting date timestamp for when the data spans CollectionDateStart types.Int64 `tfsdk:"collection_date_start" tf:"optional"` // Smallest unit of time in the dataset - CollectionGranularity []DataRefreshInfo `tfsdk:"collection_granularity" tf:"optional"` + CollectionGranularity []DataRefreshInfo `tfsdk:"collection_granularity" tf:"optional,object"` // Whether the dataset is free or paid Cost types.String `tfsdk:"cost" tf:"optional"` // Where/how the data is sourced @@ -633,7 +633,7 @@ type ListingDetail struct { TermsOfService types.String `tfsdk:"terms_of_service" tf:"optional"` // How often data is updated - UpdateFrequency []DataRefreshInfo `tfsdk:"update_frequency" tf:"optional"` + UpdateFrequency []DataRefreshInfo `tfsdk:"update_frequency" tf:"optional,object"` } type ListingFulfillment struct { @@ -643,9 +643,9 @@ type ListingFulfillment struct { RecipientType types.String `tfsdk:"recipient_type" tf:"optional"` - RepoInfo []RepoInfo `tfsdk:"repo_info" tf:"optional"` + RepoInfo []RepoInfo `tfsdk:"repo_info" tf:"optional,object"` - ShareInfo []ShareInfo `tfsdk:"share_info" tf:"optional"` + ShareInfo []ShareInfo `tfsdk:"share_info" tf:"optional,object"` } type ListingSetting struct { @@ -665,7 +665,7 @@ type ListingSummary struct { ExchangeIds []types.String `tfsdk:"exchange_ids" tf:"optional"` // if a git repo is being created, a listing will be initialized with this // field as opposed to a share - GitRepo []RepoInfo `tfsdk:"git_repo" tf:"optional"` + GitRepo []RepoInfo `tfsdk:"git_repo" tf:"optional,object"` ListingType types.String `tfsdk:"listingType" tf:""` @@ -673,15 +673,15 @@ type ListingSummary struct { ProviderId types.String `tfsdk:"provider_id" tf:"optional"` - ProviderRegion []RegionInfo `tfsdk:"provider_region" tf:"optional"` + ProviderRegion []RegionInfo `tfsdk:"provider_region" tf:"optional,object"` PublishedAt types.Int64 `tfsdk:"published_at" tf:"optional"` PublishedBy types.String `tfsdk:"published_by" tf:"optional"` - Setting []ListingSetting `tfsdk:"setting" tf:"optional"` + Setting []ListingSetting `tfsdk:"setting" tf:"optional,object"` - Share []ShareInfo `tfsdk:"share" tf:"optional"` + Share []ShareInfo `tfsdk:"share" tf:"optional,object"` // Enums Status types.String `tfsdk:"status" tf:"optional"` @@ -705,10 +705,10 @@ type ListingTag struct { type PersonalizationRequest struct { Comment types.String `tfsdk:"comment" tf:"optional"` - ConsumerRegion []RegionInfo `tfsdk:"consumer_region" tf:""` + ConsumerRegion []RegionInfo `tfsdk:"consumer_region" tf:"object"` // contact info for the consumer requesting data or performing a listing // installation - ContactInfo []ContactInfo `tfsdk:"contact_info" tf:"optional"` + ContactInfo []ContactInfo `tfsdk:"contact_info" tf:"optional,object"` CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` @@ -728,7 +728,7 @@ type PersonalizationRequest struct { RecipientType types.String `tfsdk:"recipient_type" tf:"optional"` - Share []ShareInfo `tfsdk:"share" tf:"optional"` + Share []ShareInfo `tfsdk:"share" tf:"optional,object"` Status types.String `tfsdk:"status" tf:"optional"` @@ -870,27 +870,27 @@ type TokenInfo struct { } type UpdateExchangeFilterRequest struct { - Filter []ExchangeFilter `tfsdk:"filter" tf:""` + Filter []ExchangeFilter `tfsdk:"filter" tf:"object"` Id types.String `tfsdk:"-"` } type UpdateExchangeFilterResponse struct { - Filter []ExchangeFilter `tfsdk:"filter" tf:"optional"` + Filter []ExchangeFilter `tfsdk:"filter" tf:"optional,object"` } type UpdateExchangeRequest struct { - Exchange []Exchange `tfsdk:"exchange" tf:""` + Exchange []Exchange `tfsdk:"exchange" tf:"object"` Id types.String `tfsdk:"-"` } type UpdateExchangeResponse struct { - Exchange []Exchange `tfsdk:"exchange" tf:"optional"` + Exchange []Exchange `tfsdk:"exchange" tf:"optional,object"` } type UpdateInstallationRequest struct { - Installation []InstallationDetail `tfsdk:"installation" tf:""` + Installation []InstallationDetail `tfsdk:"installation" tf:"object"` InstallationId types.String `tfsdk:"-"` @@ -900,17 +900,17 @@ type UpdateInstallationRequest struct { } type UpdateInstallationResponse struct { - Installation []InstallationDetail `tfsdk:"installation" tf:"optional"` + Installation []InstallationDetail `tfsdk:"installation" tf:"optional,object"` } type UpdateListingRequest struct { Id types.String `tfsdk:"-"` - Listing []Listing `tfsdk:"listing" tf:""` + Listing []Listing `tfsdk:"listing" tf:"object"` } type UpdateListingResponse struct { - Listing []Listing `tfsdk:"listing" tf:"optional"` + Listing []Listing `tfsdk:"listing" tf:"optional,object"` } type UpdatePersonalizationRequestRequest struct { @@ -920,13 +920,13 @@ type UpdatePersonalizationRequestRequest struct { RequestId types.String `tfsdk:"-"` - Share []ShareInfo `tfsdk:"share" tf:"optional"` + Share []ShareInfo `tfsdk:"share" tf:"optional,object"` Status types.String `tfsdk:"status" tf:""` } type UpdatePersonalizationRequestResponse struct { - Request []PersonalizationRequest `tfsdk:"request" tf:"optional"` + Request []PersonalizationRequest `tfsdk:"request" tf:"optional,object"` } type UpdateProviderAnalyticsDashboardRequest struct { @@ -950,9 +950,9 @@ type UpdateProviderAnalyticsDashboardResponse struct { type UpdateProviderRequest struct { Id types.String `tfsdk:"-"` - Provider []ProviderInfo `tfsdk:"provider" tf:""` + Provider []ProviderInfo `tfsdk:"provider" tf:"object"` } type UpdateProviderResponse struct { - Provider []ProviderInfo `tfsdk:"provider" tf:"optional"` + Provider []ProviderInfo `tfsdk:"provider" tf:"optional,object"` } diff --git a/internal/service/ml_tf/model.go b/internal/service/ml_tf/model.go index 71e0ecbc1e..e3e52c78a5 100755 --- a/internal/service/ml_tf/model.go +++ b/internal/service/ml_tf/model.go @@ -94,7 +94,7 @@ type ApproveTransitionRequest struct { type ApproveTransitionRequestResponse struct { // Activity recorded for the action. - Activity []Activity `tfsdk:"activity" tf:"optional"` + Activity []Activity `tfsdk:"activity" tf:"optional,object"` } // Comment details. @@ -124,7 +124,7 @@ type CreateComment struct { type CreateCommentResponse struct { // Comment details. - Comment []CommentObject `tfsdk:"comment" tf:"optional"` + Comment []CommentObject `tfsdk:"comment" tf:"optional,object"` } type CreateExperiment struct { @@ -156,7 +156,7 @@ type CreateModelRequest struct { } type CreateModelResponse struct { - RegisteredModel []Model `tfsdk:"registered_model" tf:"optional"` + RegisteredModel []Model `tfsdk:"registered_model" tf:"optional,object"` } type CreateModelVersionRequest struct { @@ -178,7 +178,7 @@ type CreateModelVersionRequest struct { type CreateModelVersionResponse struct { // Return new version number generated for this model in registry. - ModelVersion []ModelVersion `tfsdk:"model_version" tf:"optional"` + ModelVersion []ModelVersion `tfsdk:"model_version" tf:"optional,object"` } type CreateRegistryWebhook struct { @@ -219,9 +219,9 @@ type CreateRegistryWebhook struct { // version be archived. Events []types.String `tfsdk:"events" tf:""` - HttpUrlSpec []HttpUrlSpec `tfsdk:"http_url_spec" tf:"optional"` + HttpUrlSpec []HttpUrlSpec `tfsdk:"http_url_spec" tf:"optional,object"` - JobSpec []JobSpec `tfsdk:"job_spec" tf:"optional"` + JobSpec []JobSpec `tfsdk:"job_spec" tf:"optional,object"` // Name of the model whose events would trigger this webhook. ModelName types.String `tfsdk:"model_name" tf:"optional"` // Enable or disable triggering the webhook, or put the webhook into test @@ -250,7 +250,7 @@ type CreateRun struct { type CreateRunResponse struct { // The newly created run. - Run []Run `tfsdk:"run" tf:"optional"` + Run []Run `tfsdk:"run" tf:"optional,object"` } type CreateTransitionRequest struct { @@ -274,11 +274,11 @@ type CreateTransitionRequest struct { type CreateTransitionRequestResponse struct { // Transition request details. - Request []TransitionRequest `tfsdk:"request" tf:"optional"` + Request []TransitionRequest `tfsdk:"request" tf:"optional,object"` } type CreateWebhookResponse struct { - Webhook []RegistryWebhook `tfsdk:"webhook" tf:"optional"` + Webhook []RegistryWebhook `tfsdk:"webhook" tf:"optional,object"` } type Dataset struct { @@ -306,7 +306,7 @@ type Dataset struct { type DatasetInput struct { // The dataset being used as a Run input. - Dataset []Dataset `tfsdk:"dataset" tf:"optional"` + Dataset []Dataset `tfsdk:"dataset" tf:"optional,object"` // A list of tags for the dataset input, e.g. a “context” tag with value // “training” Tags []InputTag `tfsdk:"tags" tf:"optional"` @@ -562,7 +562,7 @@ type GetExperimentRequest struct { type GetExperimentResponse struct { // Experiment details. - Experiment []Experiment `tfsdk:"experiment" tf:"optional"` + Experiment []Experiment `tfsdk:"experiment" tf:"optional,object"` } // Get history of a given metric within a run @@ -611,7 +611,7 @@ type GetModelRequest struct { } type GetModelResponse struct { - RegisteredModelDatabricks []ModelDatabricks `tfsdk:"registered_model_databricks" tf:"optional"` + RegisteredModelDatabricks []ModelDatabricks `tfsdk:"registered_model_databricks" tf:"optional,object"` } // Get a model version URI @@ -636,7 +636,7 @@ type GetModelVersionRequest struct { } type GetModelVersionResponse struct { - ModelVersion []ModelVersion `tfsdk:"model_version" tf:"optional"` + ModelVersion []ModelVersion `tfsdk:"model_version" tf:"optional,object"` } // Get registered model permission levels @@ -668,7 +668,7 @@ type GetRunRequest struct { type GetRunResponse struct { // Run metadata (name, start time, etc) and data (metrics, params, and // tags). - Run []Run `tfsdk:"run" tf:"optional"` + Run []Run `tfsdk:"run" tf:"optional,object"` } type HttpUrlSpec struct { @@ -1152,11 +1152,11 @@ type RegistryWebhook struct { // version be archived. Events []types.String `tfsdk:"events" tf:"optional"` - HttpUrlSpec []HttpUrlSpecWithoutSecret `tfsdk:"http_url_spec" tf:"optional"` + HttpUrlSpec []HttpUrlSpecWithoutSecret `tfsdk:"http_url_spec" tf:"optional,object"` // Webhook ID Id types.String `tfsdk:"id" tf:"optional"` - JobSpec []JobSpecWithoutSecret `tfsdk:"job_spec" tf:"optional"` + JobSpec []JobSpecWithoutSecret `tfsdk:"job_spec" tf:"optional,object"` // Time of the object at last update, as a Unix timestamp in milliseconds. LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp" tf:"optional"` // Name of the model whose events would trigger this webhook. @@ -1193,7 +1193,7 @@ type RejectTransitionRequest struct { type RejectTransitionRequestResponse struct { // Activity recorded for the action. - Activity []Activity `tfsdk:"activity" tf:"optional"` + Activity []Activity `tfsdk:"activity" tf:"optional,object"` } type RenameModelRequest struct { @@ -1204,7 +1204,7 @@ type RenameModelRequest struct { } type RenameModelResponse struct { - RegisteredModel []Model `tfsdk:"registered_model" tf:"optional"` + RegisteredModel []Model `tfsdk:"registered_model" tf:"optional,object"` } type RestoreExperiment struct { @@ -1242,11 +1242,11 @@ type RestoreRunsResponse struct { type Run struct { // Run data. - Data []RunData `tfsdk:"data" tf:"optional"` + Data []RunData `tfsdk:"data" tf:"optional,object"` // Run metadata. - Info []RunInfo `tfsdk:"info" tf:"optional"` + Info []RunInfo `tfsdk:"info" tf:"optional,object"` // Run inputs. - Inputs []RunInputs `tfsdk:"inputs" tf:"optional"` + Inputs []RunInputs `tfsdk:"inputs" tf:"optional,object"` } type RunData struct { @@ -1497,7 +1497,7 @@ type TestRegistryWebhookRequest struct { type TestRegistryWebhookResponse struct { // Test webhook response object. - Webhook []TestRegistryWebhook `tfsdk:"webhook" tf:"optional"` + Webhook []TestRegistryWebhook `tfsdk:"webhook" tf:"optional,object"` } type TransitionModelVersionStageDatabricks struct { @@ -1546,7 +1546,7 @@ type TransitionRequest struct { } type TransitionStageResponse struct { - ModelVersion []ModelVersionDatabricks `tfsdk:"model_version" tf:"optional"` + ModelVersion []ModelVersionDatabricks `tfsdk:"model_version" tf:"optional,object"` } type UpdateComment struct { @@ -1558,7 +1558,7 @@ type UpdateComment struct { type UpdateCommentResponse struct { // Comment details. - Comment []CommentObject `tfsdk:"comment" tf:"optional"` + Comment []CommentObject `tfsdk:"comment" tf:"optional,object"` } type UpdateExperiment struct { @@ -1632,11 +1632,11 @@ type UpdateRegistryWebhook struct { // version be archived. Events []types.String `tfsdk:"events" tf:"optional"` - HttpUrlSpec []HttpUrlSpec `tfsdk:"http_url_spec" tf:"optional"` + HttpUrlSpec []HttpUrlSpec `tfsdk:"http_url_spec" tf:"optional,object"` // Webhook ID Id types.String `tfsdk:"id" tf:""` - JobSpec []JobSpec `tfsdk:"job_spec" tf:"optional"` + JobSpec []JobSpec `tfsdk:"job_spec" tf:"optional,object"` // Enable or disable triggering the webhook, or put the webhook into test // mode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an // associated event happens. @@ -1662,7 +1662,7 @@ type UpdateRun struct { type UpdateRunResponse struct { // Updated metadata of the run. - RunInfo []RunInfo `tfsdk:"run_info" tf:"optional"` + RunInfo []RunInfo `tfsdk:"run_info" tf:"optional,object"` } type UpdateWebhookResponse struct { diff --git a/internal/service/oauth2_tf/model.go b/internal/service/oauth2_tf/model.go index 3efb4b2b43..f7959bfcf5 100755 --- a/internal/service/oauth2_tf/model.go +++ b/internal/service/oauth2_tf/model.go @@ -26,7 +26,7 @@ type CreateCustomAppIntegration struct { // offline_access, openid, profile, email. Scopes []types.String `tfsdk:"scopes" tf:"optional"` // Token access policy - TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } type CreateCustomAppIntegrationOutput struct { @@ -44,7 +44,7 @@ type CreatePublishedAppIntegration struct { // tableau-deskop AppId types.String `tfsdk:"app_id" tf:"optional"` // Token access policy - TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } type CreatePublishedAppIntegrationOutput struct { @@ -128,7 +128,7 @@ type GetCustomAppIntegrationOutput struct { Scopes []types.String `tfsdk:"scopes" tf:"optional"` // Token access policy - TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } // Get OAuth Custom App Integration @@ -155,7 +155,7 @@ type GetPublishedAppIntegrationOutput struct { // Display name of the published OAuth app Name types.String `tfsdk:"name" tf:"optional"` // Token access policy - TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } // Get OAuth Published App Integration @@ -258,7 +258,7 @@ type UpdateCustomAppIntegration struct { // integration RedirectUrls []types.String `tfsdk:"redirect_urls" tf:"optional"` // Token access policy to be updated in the custom OAuth app integration - TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } type UpdateCustomAppIntegrationOutput struct { @@ -267,7 +267,7 @@ type UpdateCustomAppIntegrationOutput struct { type UpdatePublishedAppIntegration struct { IntegrationId types.String `tfsdk:"-"` // Token access policy to be updated in the published OAuth app integration - TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional"` + TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } type UpdatePublishedAppIntegrationOutput struct { diff --git a/internal/service/pipelines_tf/model.go b/internal/service/pipelines_tf/model.go index 68ee17d592..c4ad05458b 100755 --- a/internal/service/pipelines_tf/model.go +++ b/internal/service/pipelines_tf/model.go @@ -36,7 +36,7 @@ type CreatePipeline struct { // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous types.Bool `tfsdk:"continuous" tf:"optional"` // Deployment type of this pipeline. - Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional,object"` // Whether the pipeline is in Development mode. Defaults to false. Development types.Bool `tfsdk:"development" tf:"optional"` @@ -44,14 +44,14 @@ type CreatePipeline struct { // Pipeline product edition. Edition types.String `tfsdk:"edition" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. - Filters []Filters `tfsdk:"filters" tf:"optional"` + Filters []Filters `tfsdk:"filters" tf:"optional,object"` // The definition of a gateway pipeline to support CDC. - GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional"` + GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional,object"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` // The configuration for a managed ingestion pipeline. These settings cannot // be used with the 'libraries', 'target' or 'catalog' settings. - IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional"` + IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional,object"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `tfsdk:"libraries" tf:"optional"` // Friendly identifier for this pipeline. @@ -73,12 +73,12 @@ type CreatePipeline struct { // To publish to Unity Catalog, also specify `catalog`. Target types.String `tfsdk:"target" tf:"optional"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. - Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional"` + Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional,object"` } type CreatePipelineResponse struct { // Only returned when dry_run is true. - EffectiveSettings []PipelineSpec `tfsdk:"effective_settings" tf:"optional"` + EffectiveSettings []PipelineSpec `tfsdk:"effective_settings" tf:"optional,object"` // The unique identifier for the newly created pipeline. Only returned when // dry_run is false. PipelineId types.String `tfsdk:"pipeline_id" tf:"optional"` @@ -126,7 +126,7 @@ type EditPipeline struct { // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous types.Bool `tfsdk:"continuous" tf:"optional"` // Deployment type of this pipeline. - Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional,object"` // Whether the pipeline is in Development mode. Defaults to false. Development types.Bool `tfsdk:"development" tf:"optional"` // Pipeline product edition. @@ -136,14 +136,14 @@ type EditPipeline struct { // will fail with a conflict. ExpectedLastModified types.Int64 `tfsdk:"expected_last_modified" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. - Filters []Filters `tfsdk:"filters" tf:"optional"` + Filters []Filters `tfsdk:"filters" tf:"optional,object"` // The definition of a gateway pipeline to support CDC. - GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional"` + GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional,object"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` // The configuration for a managed ingestion pipeline. These settings cannot // be used with the 'libraries', 'target' or 'catalog' settings. - IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional"` + IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional,object"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `tfsdk:"libraries" tf:"optional"` // Friendly identifier for this pipeline. @@ -167,7 +167,7 @@ type EditPipeline struct { // To publish to Unity Catalog, also specify `catalog`. Target types.String `tfsdk:"target" tf:"optional"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. - Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional"` + Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional,object"` } type EditPipelineResponse struct { @@ -238,7 +238,7 @@ type GetPipelineResponse struct { RunAsUserName types.String `tfsdk:"run_as_user_name" tf:"optional"` // The pipeline specification. This field is not returned when called by // `ListPipelines`. - Spec []PipelineSpec `tfsdk:"spec" tf:"optional"` + Spec []PipelineSpec `tfsdk:"spec" tf:"optional,object"` // The pipeline state. State types.String `tfsdk:"state" tf:"optional"` } @@ -253,14 +253,14 @@ type GetUpdateRequest struct { type GetUpdateResponse struct { // The current update info. - Update []UpdateInfo `tfsdk:"update" tf:"optional"` + Update []UpdateInfo `tfsdk:"update" tf:"optional,object"` } type IngestionConfig struct { // Select tables from a specific source schema. - Schema []SchemaSpec `tfsdk:"schema" tf:"optional"` + Schema []SchemaSpec `tfsdk:"schema" tf:"optional,object"` // Select tables from a specific source table. - Table []TableSpec `tfsdk:"table" tf:"optional"` + Table []TableSpec `tfsdk:"table" tf:"optional,object"` } type IngestionGatewayPipelineDefinition struct { @@ -294,7 +294,7 @@ type IngestionPipelineDefinition struct { Objects []IngestionConfig `tfsdk:"objects" tf:"optional"` // Configuration settings to control the ingestion of tables. These settings // are applied to all tables in the pipeline. - TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional"` + TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional,object"` } // List pipeline events @@ -478,20 +478,20 @@ type PipelineCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 // or later. - Autoscale []PipelineClusterAutoscale `tfsdk:"autoscale" tf:"optional"` + Autoscale []PipelineClusterAutoscale `tfsdk:"autoscale" tf:"optional,object"` // Attributes related to clusters running on Amazon Web Services. If not // specified at cluster creation, a set of default values will be used. - AwsAttributes compute.AwsAttributes `tfsdk:"aws_attributes" tf:"optional"` + AwsAttributes compute.AwsAttributes `tfsdk:"aws_attributes" tf:"optional,object"` // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. - AzureAttributes compute.AzureAttributes `tfsdk:"azure_attributes" tf:"optional"` + AzureAttributes compute.AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` // The configuration for delivering spark logs to a long-term storage // destination. Only dbfs destinations are supported. Only one destination // can be specified for one cluster. If the conf is given, the logs will be // delivered to the destination every `5 mins`. The destination of driver // logs is `$destination/$clusterId/driver`, while the destination of // executor logs is `$destination/$clusterId/executor`. - ClusterLogConf compute.ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional"` + ClusterLogConf compute.ClusterLogConf `tfsdk:"cluster_log_conf" tf:"optional,object"` // Additional tags for cluster resources. Databricks will tag all cluster // resources (e.g., AWS instances and EBS volumes) with these tags in // addition to `default_tags`. Notes: @@ -513,7 +513,7 @@ type PipelineCluster struct { EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption" tf:"optional"` // Attributes related to clusters running on Google Cloud Platform. If not // specified at cluster creation, a set of default values will be used. - GcpAttributes compute.GcpAttributes `tfsdk:"gcp_attributes" tf:"optional"` + GcpAttributes compute.GcpAttributes `tfsdk:"gcp_attributes" tf:"optional,object"` // The configuration for storing init scripts. Any number of destinations // can be specified. The scripts are executed sequentially in the order // provided. If `cluster_log_conf` is specified, init script logs are sent @@ -593,7 +593,7 @@ type PipelineDeployment struct { type PipelineEvent struct { // Information about an error captured by the event. - Error []ErrorDetail `tfsdk:"error" tf:"optional"` + Error []ErrorDetail `tfsdk:"error" tf:"optional,object"` // The event type. Should always correspond to the details EventType types.String `tfsdk:"event_type" tf:"optional"` // A time-based, globally unique id. @@ -605,9 +605,9 @@ type PipelineEvent struct { // The display message associated with the event. Message types.String `tfsdk:"message" tf:"optional"` // Describes where the event originates from. - Origin []Origin `tfsdk:"origin" tf:"optional"` + Origin []Origin `tfsdk:"origin" tf:"optional,object"` // A sequencing object to identify and order events. - Sequence []Sequencing `tfsdk:"sequence" tf:"optional"` + Sequence []Sequencing `tfsdk:"sequence" tf:"optional,object"` // The time of the event. Timestamp types.String `tfsdk:"timestamp" tf:"optional"` } @@ -615,14 +615,14 @@ type PipelineEvent struct { type PipelineLibrary struct { // The path to a file that defines a pipeline and is stored in the // Databricks Repos. - File []FileLibrary `tfsdk:"file" tf:"optional"` + File []FileLibrary `tfsdk:"file" tf:"optional,object"` // URI of the jar to be installed. Currently only DBFS is supported. Jar types.String `tfsdk:"jar" tf:"optional"` // Specification of a maven library to be installed. - Maven compute.MavenLibrary `tfsdk:"maven" tf:"optional"` + Maven compute.MavenLibrary `tfsdk:"maven" tf:"optional,object"` // The path to a notebook that defines a pipeline and is stored in the // Databricks workspace. - Notebook []NotebookLibrary `tfsdk:"notebook" tf:"optional"` + Notebook []NotebookLibrary `tfsdk:"notebook" tf:"optional,object"` // URI of the whl to be installed. Whl types.String `tfsdk:"whl" tf:"optional"` } @@ -673,20 +673,20 @@ type PipelineSpec struct { // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous types.Bool `tfsdk:"continuous" tf:"optional"` // Deployment type of this pipeline. - Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional"` + Deployment []PipelineDeployment `tfsdk:"deployment" tf:"optional,object"` // Whether the pipeline is in Development mode. Defaults to false. Development types.Bool `tfsdk:"development" tf:"optional"` // Pipeline product edition. Edition types.String `tfsdk:"edition" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. - Filters []Filters `tfsdk:"filters" tf:"optional"` + Filters []Filters `tfsdk:"filters" tf:"optional,object"` // The definition of a gateway pipeline to support CDC. - GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional"` + GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional,object"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` // The configuration for a managed ingestion pipeline. These settings cannot // be used with the 'libraries', 'target' or 'catalog' settings. - IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional"` + IngestionDefinition []IngestionPipelineDefinition `tfsdk:"ingestion_definition" tf:"optional,object"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `tfsdk:"libraries" tf:"optional"` // Friendly identifier for this pipeline. @@ -708,7 +708,7 @@ type PipelineSpec struct { // To publish to Unity Catalog, also specify `catalog`. Target types.String `tfsdk:"target" tf:"optional"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. - Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional"` + Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional,object"` } type PipelineStateInfo struct { @@ -733,9 +733,9 @@ type PipelineStateInfo struct { } type PipelineTrigger struct { - Cron []CronTrigger `tfsdk:"cron" tf:"optional"` + Cron []CronTrigger `tfsdk:"cron" tf:"optional,object"` - Manual []ManualTrigger `tfsdk:"manual" tf:"optional"` + Manual []ManualTrigger `tfsdk:"manual" tf:"optional,object"` } type SchemaSpec struct { @@ -753,14 +753,14 @@ type SchemaSpec struct { // Configuration settings to control the ingestion of tables. These settings // are applied to all tables in this schema and override the // table_configuration defined in the IngestionPipelineDefinition object. - TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional"` + TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional,object"` } type Sequencing struct { // A sequence number, unique and increasing within the control plane. ControlPlaneSeqNo types.Int64 `tfsdk:"control_plane_seq_no" tf:"optional"` // the ID assigned by the data plane. - DataPlaneId []DataPlaneId `tfsdk:"data_plane_id" tf:"optional"` + DataPlaneId []DataPlaneId `tfsdk:"data_plane_id" tf:"optional,object"` } type SerializedException struct { @@ -834,7 +834,7 @@ type TableSpec struct { // Configuration settings to control the ingestion of tables. These settings // override the table_configuration defined in the // IngestionPipelineDefinition object and the SchemaSpec. - TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional"` + TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional,object"` } type TableSpecificConfig struct { @@ -854,7 +854,7 @@ type UpdateInfo struct { ClusterId types.String `tfsdk:"cluster_id" tf:"optional"` // The pipeline configuration with system defaults applied where unspecified // by the user. Not returned by ListUpdates. - Config []PipelineSpec `tfsdk:"config" tf:"optional"` + Config []PipelineSpec `tfsdk:"config" tf:"optional,object"` // The time when this update was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // If true, this update will reset all tables before running. diff --git a/internal/service/provisioning_tf/model.go b/internal/service/provisioning_tf/model.go index 8880df8f4c..502b806409 100755 --- a/internal/service/provisioning_tf/model.go +++ b/internal/service/provisioning_tf/model.go @@ -15,7 +15,7 @@ import ( ) type AwsCredentials struct { - StsRole []StsRole `tfsdk:"sts_role" tf:"optional"` + StsRole []StsRole `tfsdk:"sts_role" tf:"optional,object"` } type AwsKeyInfo struct { @@ -42,7 +42,7 @@ type AzureWorkspaceInfo struct { // The general workspace configurations that are specific to cloud providers. type CloudResourceContainer struct { // The general workspace configurations that are specific to Google Cloud. - Gcp []CustomerFacingGcpCloudResourceContainer `tfsdk:"gcp" tf:"optional"` + Gcp []CustomerFacingGcpCloudResourceContainer `tfsdk:"gcp" tf:"optional,object"` } type CreateAwsKeyInfo struct { @@ -59,11 +59,11 @@ type CreateAwsKeyInfo struct { } type CreateCredentialAwsCredentials struct { - StsRole []CreateCredentialStsRole `tfsdk:"sts_role" tf:"optional"` + StsRole []CreateCredentialStsRole `tfsdk:"sts_role" tf:"optional,object"` } type CreateCredentialRequest struct { - AwsCredentials []CreateCredentialAwsCredentials `tfsdk:"aws_credentials" tf:""` + AwsCredentials []CreateCredentialAwsCredentials `tfsdk:"aws_credentials" tf:"object"` // The human-readable name of the credential configuration object. CredentialsName types.String `tfsdk:"credentials_name" tf:""` } @@ -74,9 +74,9 @@ type CreateCredentialStsRole struct { } type CreateCustomerManagedKeyRequest struct { - AwsKeyInfo []CreateAwsKeyInfo `tfsdk:"aws_key_info" tf:"optional"` + AwsKeyInfo []CreateAwsKeyInfo `tfsdk:"aws_key_info" tf:"optional,object"` - GcpKeyInfo []CreateGcpKeyInfo `tfsdk:"gcp_key_info" tf:"optional"` + GcpKeyInfo []CreateGcpKeyInfo `tfsdk:"gcp_key_info" tf:"optional,object"` // The cases that the key can be used for. UseCases []types.String `tfsdk:"use_cases" tf:""` } @@ -89,7 +89,7 @@ type CreateGcpKeyInfo struct { type CreateNetworkRequest struct { // The Google Cloud specific information for this network (for example, the // VPC ID, subnet ID, and secondary IP ranges). - GcpNetworkInfo []GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional"` + GcpNetworkInfo []GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional,object"` // The human-readable name of the network configuration. NetworkName types.String `tfsdk:"network_name" tf:""` // IDs of one to five security groups associated with this network. Security @@ -102,7 +102,7 @@ type CreateNetworkRequest struct { // communication from this VPC over [AWS PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ - VpcEndpoints []NetworkVpcEndpoints `tfsdk:"vpc_endpoints" tf:"optional"` + VpcEndpoints []NetworkVpcEndpoints `tfsdk:"vpc_endpoints" tf:"optional,object"` // The ID of the VPC associated with this network. VPC IDs can be used in // multiple network configurations. VpcId types.String `tfsdk:"vpc_id" tf:"optional"` @@ -110,7 +110,7 @@ type CreateNetworkRequest struct { type CreateStorageConfigurationRequest struct { // Root S3 bucket information. - RootBucketInfo []RootBucketInfo `tfsdk:"root_bucket_info" tf:""` + RootBucketInfo []RootBucketInfo `tfsdk:"root_bucket_info" tf:"object"` // The human-readable name of the storage configuration. StorageConfigurationName types.String `tfsdk:"storage_configuration_name" tf:""` } @@ -120,7 +120,7 @@ type CreateVpcEndpointRequest struct { AwsVpcEndpointId types.String `tfsdk:"aws_vpc_endpoint_id" tf:"optional"` // The Google Cloud specific information for this Private Service Connect // endpoint. - GcpVpcEndpointInfo []GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info" tf:"optional"` + GcpVpcEndpointInfo []GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info" tf:"optional,object"` // The AWS region in which this VPC endpoint object exists. Region types.String `tfsdk:"region" tf:"optional"` // The human-readable name of the storage configuration. @@ -135,7 +135,7 @@ type CreateWorkspaceRequest struct { Cloud types.String `tfsdk:"cloud" tf:"optional"` // The general workspace configurations that are specific to cloud // providers. - CloudResourceContainer []CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional"` + CloudResourceContainer []CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional,object"` // ID of the workspace's credential configuration object. CredentialsId types.String `tfsdk:"credentials_id" tf:"optional"` // The custom tags key-value pairing that is attached to this workspace. The @@ -196,9 +196,9 @@ type CreateWorkspaceRequest struct { // for a new workspace]. // // [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html - GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional"` + GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional,object"` // The configurations for the GKE cluster of a Databricks workspace. - GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional"` + GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional,object"` // The Google Cloud region of the workspace data plane in your Google // account. For example, `us-east4`. Location types.String `tfsdk:"location" tf:"optional"` @@ -242,7 +242,7 @@ type Credential struct { // The Databricks account ID that hosts the credential. AccountId types.String `tfsdk:"account_id" tf:"optional"` - AwsCredentials []AwsCredentials `tfsdk:"aws_credentials" tf:"optional"` + AwsCredentials []AwsCredentials `tfsdk:"aws_credentials" tf:"optional,object"` // Time in epoch milliseconds when the credential was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // Databricks credential configuration ID. @@ -262,13 +262,13 @@ type CustomerManagedKey struct { // The Databricks account ID that holds the customer-managed key. AccountId types.String `tfsdk:"account_id" tf:"optional"` - AwsKeyInfo []AwsKeyInfo `tfsdk:"aws_key_info" tf:"optional"` + AwsKeyInfo []AwsKeyInfo `tfsdk:"aws_key_info" tf:"optional,object"` // Time in epoch milliseconds when the customer key was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // ID of the encryption key configuration object. CustomerManagedKeyId types.String `tfsdk:"customer_managed_key_id" tf:"optional"` - GcpKeyInfo []GcpKeyInfo `tfsdk:"gcp_key_info" tf:"optional"` + GcpKeyInfo []GcpKeyInfo `tfsdk:"gcp_key_info" tf:"optional,object"` // The cases that the key can be used for. UseCases []types.String `tfsdk:"use_cases" tf:"optional"` } @@ -466,7 +466,7 @@ type Network struct { ErrorMessages []NetworkHealth `tfsdk:"error_messages" tf:"optional"` // The Google Cloud specific information for this network (for example, the // VPC ID, subnet ID, and secondary IP ranges). - GcpNetworkInfo []GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional"` + GcpNetworkInfo []GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional,object"` // The Databricks network configuration ID. NetworkId types.String `tfsdk:"network_id" tf:"optional"` // The human-readable name of the network configuration. @@ -479,7 +479,7 @@ type Network struct { // communication from this VPC over [AWS PrivateLink]. // // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ - VpcEndpoints []NetworkVpcEndpoints `tfsdk:"vpc_endpoints" tf:"optional"` + VpcEndpoints []NetworkVpcEndpoints `tfsdk:"vpc_endpoints" tf:"optional,object"` // The ID of the VPC associated with this network configuration. VPC IDs can // be used in multiple networks. VpcId types.String `tfsdk:"vpc_id" tf:"optional"` @@ -564,7 +564,7 @@ type StorageConfiguration struct { // Time in epoch milliseconds when the storage configuration was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // Root S3 bucket information. - RootBucketInfo []RootBucketInfo `tfsdk:"root_bucket_info" tf:"optional"` + RootBucketInfo []RootBucketInfo `tfsdk:"root_bucket_info" tf:"optional,object"` // Databricks storage configuration ID. StorageConfigurationId types.String `tfsdk:"storage_configuration_id" tf:"optional"` // The human-readable name of the storage configuration. @@ -668,7 +668,7 @@ type VpcEndpoint struct { AwsVpcEndpointId types.String `tfsdk:"aws_vpc_endpoint_id" tf:"optional"` // The Google Cloud specific information for this Private Service Connect // endpoint. - GcpVpcEndpointInfo []GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info" tf:"optional"` + GcpVpcEndpointInfo []GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info" tf:"optional,object"` // The AWS region in which this VPC endpoint object exists. Region types.String `tfsdk:"region" tf:"optional"` // The current state (such as `available` or `rejected`) of the VPC @@ -696,12 +696,12 @@ type Workspace struct { // The AWS region of the workspace data plane (for example, `us-west-2`). AwsRegion types.String `tfsdk:"aws_region" tf:"optional"` - AzureWorkspaceInfo []AzureWorkspaceInfo `tfsdk:"azure_workspace_info" tf:"optional"` + AzureWorkspaceInfo []AzureWorkspaceInfo `tfsdk:"azure_workspace_info" tf:"optional,object"` // The cloud name. This field always has the value `gcp`. Cloud types.String `tfsdk:"cloud" tf:"optional"` // The general workspace configurations that are specific to cloud // providers. - CloudResourceContainer []CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional"` + CloudResourceContainer []CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional,object"` // Time in epoch milliseconds when the workspace was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // ID of the workspace's credential configuration object. @@ -741,9 +741,9 @@ type Workspace struct { // for a new workspace]. // // [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html - GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional"` + GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional,object"` // The configurations for the GKE cluster of a Databricks workspace. - GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional"` + GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional,object"` // The Google Cloud region of the workspace data plane in your Google // account (for example, `us-east4`). Location types.String `tfsdk:"location" tf:"optional"` diff --git a/internal/service/serving_tf/model.go b/internal/service/serving_tf/model.go index e34dcc1037..7e6bdee0a1 100755 --- a/internal/service/serving_tf/model.go +++ b/internal/service/serving_tf/model.go @@ -33,17 +33,17 @@ type Ai21LabsConfig struct { type AiGatewayConfig struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. - Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional,object"` // Configuration for payload logging using inference tables. Use these // tables to monitor and audit data being sent to and received from model // APIs and to improve model quality. - InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional,object"` // Configuration for rate limits which can be set to limit endpoint traffic. RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` // Configuration to enable usage tracking using system tables. These tables // allow you to monitor operational usage on endpoints and their associated // costs. - UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` + UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional,object"` } type AiGatewayGuardrailParameters struct { @@ -51,7 +51,7 @@ type AiGatewayGuardrailParameters struct { // decide if the keyword exists in the request or response content. InvalidKeywords []types.String `tfsdk:"invalid_keywords" tf:"optional"` // Configuration for guardrail PII filter. - Pii []AiGatewayGuardrailPiiBehavior `tfsdk:"pii" tf:"optional"` + Pii []AiGatewayGuardrailPiiBehavior `tfsdk:"pii" tf:"optional,object"` // Indicates whether the safety filter is enabled. Safety types.Bool `tfsdk:"safety" tf:"optional"` // The list of allowed topics. Given a chat request, this guardrail flags @@ -71,9 +71,9 @@ type AiGatewayGuardrailPiiBehavior struct { type AiGatewayGuardrails struct { // Configuration for input guardrail filters. - Input []AiGatewayGuardrailParameters `tfsdk:"input" tf:"optional"` + Input []AiGatewayGuardrailParameters `tfsdk:"input" tf:"optional,object"` // Configuration for output guardrail filters. - Output []AiGatewayGuardrailParameters `tfsdk:"output" tf:"optional"` + Output []AiGatewayGuardrailParameters `tfsdk:"output" tf:"optional,object"` } type AiGatewayInferenceTableConfig struct { @@ -178,13 +178,13 @@ type AutoCaptureConfigOutput struct { // The name of the schema in Unity Catalog. SchemaName types.String `tfsdk:"schema_name" tf:"optional"` - State []AutoCaptureState `tfsdk:"state" tf:"optional"` + State []AutoCaptureState `tfsdk:"state" tf:"optional,object"` // The prefix of the table in Unity Catalog. TableNamePrefix types.String `tfsdk:"table_name_prefix" tf:"optional"` } type AutoCaptureState struct { - PayloadTable []PayloadTable `tfsdk:"payload_table" tf:"optional"` + PayloadTable []PayloadTable `tfsdk:"payload_table" tf:"optional,object"` } // Get build logs for a served model @@ -228,9 +228,9 @@ type CohereConfig struct { type CreateServingEndpoint struct { // The AI Gateway configuration for the serving endpoint. NOTE: only // external model endpoints are supported as of now. - AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` + AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional,object"` // The core config of the serving endpoint. - Config []EndpointCoreConfigInput `tfsdk:"config" tf:""` + Config []EndpointCoreConfigInput `tfsdk:"config" tf:"object"` // The name of the serving endpoint. This field is required and must be // unique across a Databricks workspace. An endpoint name can consist of // alphanumeric characters, dashes, and underscores. @@ -293,7 +293,7 @@ type EmbeddingsV1ResponseEmbeddingElement struct { type EndpointCoreConfigInput struct { // Configuration for Inference Tables which automatically logs requests and // responses to Unity Catalog. - AutoCaptureConfig []AutoCaptureConfigInput `tfsdk:"auto_capture_config" tf:"optional"` + AutoCaptureConfig []AutoCaptureConfigInput `tfsdk:"auto_capture_config" tf:"optional,object"` // The name of the serving endpoint to update. This field is required. Name types.String `tfsdk:"-"` // A list of served entities for the endpoint to serve. A serving endpoint @@ -304,13 +304,13 @@ type EndpointCoreConfigInput struct { ServedModels []ServedModelInput `tfsdk:"served_models" tf:"optional"` // The traffic config defining how invocations to the serving endpoint // should be routed. - TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional"` + TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional,object"` } type EndpointCoreConfigOutput struct { // Configuration for Inference Tables which automatically logs requests and // responses to Unity Catalog. - AutoCaptureConfig []AutoCaptureConfigOutput `tfsdk:"auto_capture_config" tf:"optional"` + AutoCaptureConfig []AutoCaptureConfigOutput `tfsdk:"auto_capture_config" tf:"optional,object"` // The config version that the serving endpoint is currently serving. ConfigVersion types.Int64 `tfsdk:"config_version" tf:"optional"` // The list of served entities under the serving endpoint config. @@ -319,7 +319,7 @@ type EndpointCoreConfigOutput struct { // the serving endpoint config. ServedModels []ServedModelOutput `tfsdk:"served_models" tf:"optional"` // The traffic configuration associated with the serving endpoint config. - TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional"` + TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional,object"` } type EndpointCoreConfigSummary struct { @@ -333,7 +333,7 @@ type EndpointCoreConfigSummary struct { type EndpointPendingConfig struct { // Configuration for Inference Tables which automatically logs requests and // responses to Unity Catalog. - AutoCaptureConfig []AutoCaptureConfigOutput `tfsdk:"auto_capture_config" tf:"optional"` + AutoCaptureConfig []AutoCaptureConfigOutput `tfsdk:"auto_capture_config" tf:"optional,object"` // The config version that the serving endpoint is currently serving. ConfigVersion types.Int64 `tfsdk:"config_version" tf:"optional"` // The list of served entities belonging to the last issued update to the @@ -346,7 +346,7 @@ type EndpointPendingConfig struct { StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` // The traffic config defining how invocations to the serving endpoint // should be routed. - TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional"` + TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional,object"` } type EndpointState struct { @@ -383,25 +383,25 @@ type ExportMetricsResponse struct { type ExternalModel struct { // AI21Labs Config. Only required if the provider is 'ai21labs'. - Ai21labsConfig []Ai21LabsConfig `tfsdk:"ai21labs_config" tf:"optional"` + Ai21labsConfig []Ai21LabsConfig `tfsdk:"ai21labs_config" tf:"optional,object"` // Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'. - AmazonBedrockConfig []AmazonBedrockConfig `tfsdk:"amazon_bedrock_config" tf:"optional"` + AmazonBedrockConfig []AmazonBedrockConfig `tfsdk:"amazon_bedrock_config" tf:"optional,object"` // Anthropic Config. Only required if the provider is 'anthropic'. - AnthropicConfig []AnthropicConfig `tfsdk:"anthropic_config" tf:"optional"` + AnthropicConfig []AnthropicConfig `tfsdk:"anthropic_config" tf:"optional,object"` // Cohere Config. Only required if the provider is 'cohere'. - CohereConfig []CohereConfig `tfsdk:"cohere_config" tf:"optional"` + CohereConfig []CohereConfig `tfsdk:"cohere_config" tf:"optional,object"` // Databricks Model Serving Config. Only required if the provider is // 'databricks-model-serving'. - DatabricksModelServingConfig []DatabricksModelServingConfig `tfsdk:"databricks_model_serving_config" tf:"optional"` + DatabricksModelServingConfig []DatabricksModelServingConfig `tfsdk:"databricks_model_serving_config" tf:"optional,object"` // Google Cloud Vertex AI Config. Only required if the provider is // 'google-cloud-vertex-ai'. - GoogleCloudVertexAiConfig []GoogleCloudVertexAiConfig `tfsdk:"google_cloud_vertex_ai_config" tf:"optional"` + GoogleCloudVertexAiConfig []GoogleCloudVertexAiConfig `tfsdk:"google_cloud_vertex_ai_config" tf:"optional,object"` // The name of the external model. Name types.String `tfsdk:"name" tf:""` // OpenAI Config. Only required if the provider is 'openai'. - OpenaiConfig []OpenAiConfig `tfsdk:"openai_config" tf:"optional"` + OpenaiConfig []OpenAiConfig `tfsdk:"openai_config" tf:"optional,object"` // PaLM Config. Only required if the provider is 'palm'. - PalmConfig []PaLmConfig `tfsdk:"palm_config" tf:"optional"` + PalmConfig []PaLmConfig `tfsdk:"palm_config" tf:"optional,object"` // The name of the provider for the external model. Currently, the supported // providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', // 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and @@ -513,7 +513,7 @@ type LogsRequest struct { type ModelDataPlaneInfo struct { // Information required to query DataPlane API 'query' endpoint. - QueryInfo oauth2.DataPlaneInfo `tfsdk:"query_info" tf:"optional"` + QueryInfo oauth2.DataPlaneInfo `tfsdk:"query_info" tf:"optional,object"` } type OpenAiConfig struct { @@ -606,11 +606,11 @@ type PayloadTable struct { type PutAiGatewayRequest struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. - Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional,object"` // Configuration for payload logging using inference tables. Use these // tables to monitor and audit data being sent to and received from model // APIs and to improve model quality. - InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional,object"` // The name of the serving endpoint whose AI Gateway is being updated. This // field is required. Name types.String `tfsdk:"-"` @@ -619,23 +619,23 @@ type PutAiGatewayRequest struct { // Configuration to enable usage tracking using system tables. These tables // allow you to monitor operational usage on endpoints and their associated // costs. - UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` + UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional,object"` } type PutAiGatewayResponse struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. - Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + Guardrails []AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional,object"` // Configuration for payload logging using inference tables. Use these // tables to monitor and audit data being sent to and received from model // APIs and to improve model quality . - InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + InferenceTableConfig []AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional,object"` // Configuration for rate limits which can be set to limit endpoint traffic. RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` // Configuration to enable usage tracking using system tables. These tables // allow you to monitor operational usage on endpoints and their associated // costs. - UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` + UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional,object"` } // Update rate limits of a serving endpoint @@ -656,7 +656,7 @@ type QueryEndpointInput struct { // Pandas Dataframe input in the records orientation. DataframeRecords []any `tfsdk:"dataframe_records" tf:"optional"` // Pandas Dataframe input in the split orientation. - DataframeSplit []DataframeSplitInput `tfsdk:"dataframe_split" tf:"optional"` + DataframeSplit []DataframeSplitInput `tfsdk:"dataframe_split" tf:"optional,object"` // The extra parameters field used ONLY for __completions, chat,__ and // __embeddings external & foundation model__ serving endpoints. This is a // map of strings and should only be used with other external/foundation @@ -732,7 +732,7 @@ type QueryEndpointResponse struct { // The usage object that may be returned by the __external/foundation // model__ serving endpoint. This contains information about the number of // tokens used in the prompt and response. - Usage []ExternalModelUsageElement `tfsdk:"usage" tf:"optional"` + Usage []ExternalModelUsageElement `tfsdk:"usage" tf:"optional,object"` } type RateLimit struct { @@ -781,7 +781,7 @@ type ServedEntityInput struct { // endpoint without external_model. If the endpoint is created without // external_model, users cannot update it to add external_model later. The // task type of all external models within an endpoint must be the same. - ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional"` + ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional,object"` // ARN of the instance profile that the served entity uses to access AWS // resources. InstanceProfileArn types.String `tfsdk:"instance_profile_arn" tf:"optional"` @@ -842,12 +842,12 @@ type ServedEntityOutput struct { // foundation_model, and (entity_name, entity_version, workload_size, // workload_type, and scale_to_zero_enabled) is returned based on the // endpoint type. - ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional"` + ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional,object"` // The foundation model that is served. NOTE: Only one of foundation_model, // external_model, and (entity_name, entity_version, workload_size, // workload_type, and scale_to_zero_enabled) is returned based on the // endpoint type. - FoundationModel []FoundationModel `tfsdk:"foundation_model" tf:"optional"` + FoundationModel []FoundationModel `tfsdk:"foundation_model" tf:"optional,object"` // ARN of the instance profile that the served entity uses to access AWS // resources. InstanceProfileArn types.String `tfsdk:"instance_profile_arn" tf:"optional"` @@ -861,7 +861,7 @@ type ServedEntityOutput struct { // zero. ScaleToZeroEnabled types.Bool `tfsdk:"scale_to_zero_enabled" tf:"optional"` // Information corresponding to the state of the served entity. - State []ServedModelState `tfsdk:"state" tf:"optional"` + State []ServedModelState `tfsdk:"state" tf:"optional,object"` // The workload size of the served entity. The workload size corresponds to // a range of provisioned concurrency that the compute autoscales between. A // single unit of provisioned concurrency can process one request at a time. @@ -893,11 +893,11 @@ type ServedEntitySpec struct { // The external model that is served. NOTE: Only one of external_model, // foundation_model, and (entity_name, entity_version) is returned based on // the endpoint type. - ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional"` + ExternalModel []ExternalModel `tfsdk:"external_model" tf:"optional,object"` // The foundation model that is served. NOTE: Only one of foundation_model, // external_model, and (entity_name, entity_version) is returned based on // the endpoint type. - FoundationModel []FoundationModel `tfsdk:"foundation_model" tf:"optional"` + FoundationModel []FoundationModel `tfsdk:"foundation_model" tf:"optional,object"` // The name of the served entity. Name types.String `tfsdk:"name" tf:"optional"` } @@ -977,7 +977,7 @@ type ServedModelOutput struct { // zero. ScaleToZeroEnabled types.Bool `tfsdk:"scale_to_zero_enabled" tf:"optional"` // Information corresponding to the state of the Served Model. - State []ServedModelState `tfsdk:"state" tf:"optional"` + State []ServedModelState `tfsdk:"state" tf:"optional,object"` // The workload size of the served model. The workload size corresponds to a // range of provisioned concurrency that the compute will autoscale between. // A single unit of provisioned concurrency can process one request at a @@ -1034,9 +1034,9 @@ type ServerLogsResponse struct { type ServingEndpoint struct { // The AI Gateway configuration for the serving endpoint. NOTE: Only // external model endpoints are currently supported. - AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` + AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional,object"` // The config that is currently being served by the endpoint. - Config []EndpointCoreConfigSummary `tfsdk:"config" tf:"optional"` + Config []EndpointCoreConfigSummary `tfsdk:"config" tf:"optional,object"` // The timestamp when the endpoint was created in Unix time. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` // The email of the user who created the serving endpoint. @@ -1049,7 +1049,7 @@ type ServingEndpoint struct { // The name of the serving endpoint. Name types.String `tfsdk:"name" tf:"optional"` // Information corresponding to the state of the serving endpoint. - State []EndpointState `tfsdk:"state" tf:"optional"` + State []EndpointState `tfsdk:"state" tf:"optional,object"` // Tags attached to the serving endpoint. Tags []EndpointTag `tfsdk:"tags" tf:"optional"` // The task type of the serving endpoint. @@ -1083,15 +1083,15 @@ type ServingEndpointAccessControlResponse struct { type ServingEndpointDetailed struct { // The AI Gateway configuration for the serving endpoint. NOTE: Only // external model endpoints are currently supported. - AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` + AiGateway []AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional,object"` // The config that is currently being served by the endpoint. - Config []EndpointCoreConfigOutput `tfsdk:"config" tf:"optional"` + Config []EndpointCoreConfigOutput `tfsdk:"config" tf:"optional,object"` // The timestamp when the endpoint was created in Unix time. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` // The email of the user who created the serving endpoint. Creator types.String `tfsdk:"creator" tf:"optional"` // Information required to query DataPlane APIs. - DataPlaneInfo []ModelDataPlaneInfo `tfsdk:"data_plane_info" tf:"optional"` + DataPlaneInfo []ModelDataPlaneInfo `tfsdk:"data_plane_info" tf:"optional,object"` // Endpoint invocation url if route optimization is enabled for endpoint EndpointUrl types.String `tfsdk:"endpoint_url" tf:"optional"` // System-generated ID of the endpoint. This is used to refer to the @@ -1102,14 +1102,14 @@ type ServingEndpointDetailed struct { // The name of the serving endpoint. Name types.String `tfsdk:"name" tf:"optional"` // The config that the endpoint is attempting to update to. - PendingConfig []EndpointPendingConfig `tfsdk:"pending_config" tf:"optional"` + PendingConfig []EndpointPendingConfig `tfsdk:"pending_config" tf:"optional,object"` // The permission level of the principal making the request. PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` // Boolean representing if route optimization has been enabled for the // endpoint RouteOptimized types.Bool `tfsdk:"route_optimized" tf:"optional"` // Information corresponding to the state of the serving endpoint. - State []EndpointState `tfsdk:"state" tf:"optional"` + State []EndpointState `tfsdk:"state" tf:"optional,object"` // Tags attached to the serving endpoint. Tags []EndpointTag `tfsdk:"tags" tf:"optional"` // The task type of the serving endpoint. @@ -1157,7 +1157,7 @@ type V1ResponseChoiceElement struct { // The logprobs returned only by the __completions__ endpoint. Logprobs types.Int64 `tfsdk:"logprobs" tf:"optional"` // The message response from the __chat__ endpoint. - Message []ChatMessage `tfsdk:"message" tf:"optional"` + Message []ChatMessage `tfsdk:"message" tf:"optional,object"` // The text response from the __completions__ endpoint. Text types.String `tfsdk:"text" tf:"optional"` } diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index 1ee6dcc0a1..a3fad58cb7 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -15,7 +15,7 @@ import ( ) type AutomaticClusterUpdateSetting struct { - AutomaticClusterUpdateWorkspace []ClusterAutoRestartMessage `tfsdk:"automatic_cluster_update_workspace" tf:""` + AutomaticClusterUpdateWorkspace []ClusterAutoRestartMessage `tfsdk:"automatic_cluster_update_workspace" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -46,9 +46,9 @@ type ClusterAutoRestartMessage struct { // intended to use only for purposes like showing an error message to the // customer with the additional details. For example, using these details we // can check why exactly the feature is disabled for this customer. - EnablementDetails []ClusterAutoRestartMessageEnablementDetails `tfsdk:"enablement_details" tf:"optional"` + EnablementDetails []ClusterAutoRestartMessageEnablementDetails `tfsdk:"enablement_details" tf:"optional,object"` - MaintenanceWindow []ClusterAutoRestartMessageMaintenanceWindow `tfsdk:"maintenance_window" tf:"optional"` + MaintenanceWindow []ClusterAutoRestartMessageMaintenanceWindow `tfsdk:"maintenance_window" tf:"optional,object"` RestartEvenIfNoUpdatesAvailable types.Bool `tfsdk:"restart_even_if_no_updates_available" tf:"optional"` } @@ -70,7 +70,7 @@ type ClusterAutoRestartMessageEnablementDetails struct { } type ClusterAutoRestartMessageMaintenanceWindow struct { - WeekDayBasedSchedule []ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule `tfsdk:"week_day_based_schedule" tf:"optional"` + WeekDayBasedSchedule []ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule `tfsdk:"week_day_based_schedule" tf:"optional,object"` } type ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule struct { @@ -78,7 +78,7 @@ type ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule struct { Frequency types.String `tfsdk:"frequency" tf:"optional"` - WindowStartTime []ClusterAutoRestartMessageMaintenanceWindowWindowStartTime `tfsdk:"window_start_time" tf:"optional"` + WindowStartTime []ClusterAutoRestartMessageMaintenanceWindowWindowStartTime `tfsdk:"window_start_time" tf:"optional,object"` } type ClusterAutoRestartMessageMaintenanceWindowWindowStartTime struct { @@ -97,7 +97,7 @@ type ComplianceSecurityProfile struct { type ComplianceSecurityProfileSetting struct { // SHIELD feature: CSP - ComplianceSecurityProfileWorkspace []ComplianceSecurityProfile `tfsdk:"compliance_security_profile_workspace" tf:""` + ComplianceSecurityProfileWorkspace []ComplianceSecurityProfile `tfsdk:"compliance_security_profile_workspace" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -115,15 +115,15 @@ type ComplianceSecurityProfileSetting struct { } type Config struct { - Email []EmailConfig `tfsdk:"email" tf:"optional"` + Email []EmailConfig `tfsdk:"email" tf:"optional,object"` - GenericWebhook []GenericWebhookConfig `tfsdk:"generic_webhook" tf:"optional"` + GenericWebhook []GenericWebhookConfig `tfsdk:"generic_webhook" tf:"optional,object"` - MicrosoftTeams []MicrosoftTeamsConfig `tfsdk:"microsoft_teams" tf:"optional"` + MicrosoftTeams []MicrosoftTeamsConfig `tfsdk:"microsoft_teams" tf:"optional,object"` - Pagerduty []PagerdutyConfig `tfsdk:"pagerduty" tf:"optional"` + Pagerduty []PagerdutyConfig `tfsdk:"pagerduty" tf:"optional,object"` - Slack []SlackConfig `tfsdk:"slack" tf:"optional"` + Slack []SlackConfig `tfsdk:"slack" tf:"optional,object"` } // Details required to configure a block list or allow list. @@ -143,7 +143,7 @@ type CreateIpAccessList struct { // An IP access list was successfully created. type CreateIpAccessListResponse struct { // Definition of an IP Access list - IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional,object"` } type CreateNetworkConnectivityConfigRequest struct { @@ -161,7 +161,7 @@ type CreateNetworkConnectivityConfigRequest struct { type CreateNotificationDestinationRequest struct { // The configuration for the notification destination. Must wrap EXACTLY one // of the nested configs. - Config []Config `tfsdk:"config" tf:"optional"` + Config []Config `tfsdk:"config" tf:"optional,object"` // The display name for the notification destination. DisplayName types.String `tfsdk:"display_name" tf:"optional"` } @@ -178,7 +178,7 @@ type CreateOboTokenRequest struct { // An on-behalf token was successfully created for the service principal. type CreateOboTokenResponse struct { - TokenInfo []TokenInfo `tfsdk:"token_info" tf:"optional"` + TokenInfo []TokenInfo `tfsdk:"token_info" tf:"optional,object"` // Value of the token. TokenValue types.String `tfsdk:"token_value" tf:"optional"` } @@ -205,7 +205,7 @@ type CreateTokenRequest struct { type CreateTokenResponse struct { // The information for the new token. - TokenInfo []PublicTokenInfo `tfsdk:"token_info" tf:"optional"` + TokenInfo []PublicTokenInfo `tfsdk:"token_info" tf:"optional,object"` // The value of the new token. TokenValue types.String `tfsdk:"token_value" tf:"optional"` } @@ -221,7 +221,7 @@ type CspEnablementAccount struct { type CspEnablementAccountSetting struct { // Account level policy for CSP - CspEnablementAccount []CspEnablementAccount `tfsdk:"csp_enablement_account" tf:""` + CspEnablementAccount []CspEnablementAccount `tfsdk:"csp_enablement_account" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -257,7 +257,7 @@ type DefaultNamespaceSetting struct { // PATCH request to identify the setting version you are updating. Etag types.String `tfsdk:"etag" tf:"optional"` - Namespace []StringMessage `tfsdk:"namespace" tf:""` + Namespace []StringMessage `tfsdk:"namespace" tf:"object"` // Name of the corresponding setting. This field is populated in the // response, but it will not be respected even if it's set in the request // body. The setting name in the path parameter will be respected instead. @@ -430,7 +430,7 @@ type DeleteTokenManagementRequest struct { } type DisableLegacyAccess struct { - DisableLegacyAccess []BooleanMessage `tfsdk:"disable_legacy_access" tf:""` + DisableLegacyAccess []BooleanMessage `tfsdk:"disable_legacy_access" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -448,7 +448,7 @@ type DisableLegacyAccess struct { } type DisableLegacyFeatures struct { - DisableLegacyFeatures []BooleanMessage `tfsdk:"disable_legacy_features" tf:""` + DisableLegacyFeatures []BooleanMessage `tfsdk:"disable_legacy_features" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -480,7 +480,7 @@ type EnhancedSecurityMonitoring struct { type EnhancedSecurityMonitoringSetting struct { // SHIELD feature: ESM - EnhancedSecurityMonitoringWorkspace []EnhancedSecurityMonitoring `tfsdk:"enhanced_security_monitoring_workspace" tf:""` + EnhancedSecurityMonitoringWorkspace []EnhancedSecurityMonitoring `tfsdk:"enhanced_security_monitoring_workspace" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -504,7 +504,7 @@ type EsmEnablementAccount struct { type EsmEnablementAccountSetting struct { // Account level policy for ESM - EsmEnablementAccount []EsmEnablementAccount `tfsdk:"esm_enablement_account" tf:""` + EsmEnablementAccount []EsmEnablementAccount `tfsdk:"esm_enablement_account" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to // help prevent simultaneous writes of a setting overwriting each other. It @@ -539,7 +539,7 @@ type ExchangeToken struct { // Exchange a token with the IdP type ExchangeTokenRequest struct { // The partition of Credentials store - PartitionId []PartitionId `tfsdk:"partitionId" tf:""` + PartitionId []PartitionId `tfsdk:"partitionId" tf:"object"` // Array of scopes for the token request. Scopes []types.String `tfsdk:"scopes" tf:""` // A list of token types being requested @@ -554,7 +554,7 @@ type ExchangeTokenResponse struct { // An IP access list was successfully returned. type FetchIpAccessListResponse struct { // Definition of an IP Access list - IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional,object"` } type GenericWebhookConfig struct { @@ -682,7 +682,7 @@ type GetIpAccessListRequest struct { type GetIpAccessListResponse struct { // Definition of an IP Access list - IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional,object"` } // IP access lists were successfully returned. @@ -751,7 +751,7 @@ type GetTokenPermissionLevelsResponse struct { // Token with specified Token ID was successfully returned. type GetTokenResponse struct { - TokenInfo []TokenInfo `tfsdk:"token_info" tf:"optional"` + TokenInfo []TokenInfo `tfsdk:"token_info" tf:"optional,object"` } // Definition of an IP Access list @@ -931,10 +931,10 @@ type NccEgressConfig struct { // The network connectivity rules that are applied by default without // resource specific configurations. You can find the stable network // information of your serverless compute resources here. - DefaultRules []NccEgressDefaultRules `tfsdk:"default_rules" tf:"optional"` + DefaultRules []NccEgressDefaultRules `tfsdk:"default_rules" tf:"optional,object"` // The network connectivity rules that configured for each destinations. // These rules override default rules. - TargetRules []NccEgressTargetRules `tfsdk:"target_rules" tf:"optional"` + TargetRules []NccEgressTargetRules `tfsdk:"target_rules" tf:"optional,object"` } // The network connectivity rules that are applied by default without resource @@ -944,11 +944,11 @@ type NccEgressDefaultRules struct { // The stable AWS IP CIDR blocks. You can use these to configure the // firewall of your resources to allow traffic from your Databricks // workspace. - AwsStableIpRule []NccAwsStableIpRule `tfsdk:"aws_stable_ip_rule" tf:"optional"` + AwsStableIpRule []NccAwsStableIpRule `tfsdk:"aws_stable_ip_rule" tf:"optional,object"` // The stable Azure service endpoints. You can configure the firewall of // your Azure resources to allow traffic from your Databricks serverless // compute resources. - AzureServiceEndpointRule []NccAzureServiceEndpointRule `tfsdk:"azure_service_endpoint_rule" tf:"optional"` + AzureServiceEndpointRule []NccAzureServiceEndpointRule `tfsdk:"azure_service_endpoint_rule" tf:"optional,object"` } // The network connectivity rules that configured for each destinations. These @@ -964,7 +964,7 @@ type NetworkConnectivityConfiguration struct { CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // The network connectivity rules that apply to network traffic from your // serverless compute resources. - EgressConfig []NccEgressConfig `tfsdk:"egress_config" tf:"optional"` + EgressConfig []NccEgressConfig `tfsdk:"egress_config" tf:"optional,object"` // The name of the network connectivity configuration. The name can contain // alphanumeric characters, hyphens, and underscores. The length must be // between 3 and 30 characters. The name must match the regular expression @@ -984,7 +984,7 @@ type NotificationDestination struct { // The configuration for the notification destination. Will be exactly one // of the nested configs. Only returns for users with workspace admin // permissions. - Config []Config `tfsdk:"config" tf:"optional"` + Config []Config `tfsdk:"config" tf:"optional,object"` // [Output-only] The type of the notification destination. The type can not // be changed once set. DestinationType types.String `tfsdk:"destination_type" tf:"optional"` @@ -1028,7 +1028,7 @@ type PersonalComputeSetting struct { // PATCH request to identify the setting version you are updating. Etag types.String `tfsdk:"etag" tf:"optional"` - PersonalCompute []PersonalComputeMessage `tfsdk:"personal_compute" tf:""` + PersonalCompute []PersonalComputeMessage `tfsdk:"personal_compute" tf:"object"` // Name of the corresponding setting. This field is populated in the // response, but it will not be respected even if it's set in the request // body. The setting name in the path parameter will be respected instead. @@ -1085,7 +1085,7 @@ type RestrictWorkspaceAdminsSetting struct { // PATCH request to identify the setting version you are updating. Etag types.String `tfsdk:"etag" tf:"optional"` - RestrictWorkspaceAdmins []RestrictWorkspaceAdminsMessage `tfsdk:"restrict_workspace_admins" tf:""` + RestrictWorkspaceAdmins []RestrictWorkspaceAdminsMessage `tfsdk:"restrict_workspace_admins" tf:"object"` // Name of the corresponding setting. This field is populated in the // response, but it will not be respected even if it's set in the request // body. The setting name in the path parameter will be respected instead. @@ -1198,7 +1198,7 @@ type UpdateAutomaticClusterUpdateSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []AutomaticClusterUpdateSetting `tfsdk:"setting" tf:""` + Setting []AutomaticClusterUpdateSetting `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1212,7 +1212,7 @@ type UpdateComplianceSecurityProfileSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []ComplianceSecurityProfileSetting `tfsdk:"setting" tf:""` + Setting []ComplianceSecurityProfileSetting `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1226,7 +1226,7 @@ type UpdateCspEnablementAccountSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []CspEnablementAccountSetting `tfsdk:"setting" tf:""` + Setting []CspEnablementAccountSetting `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1248,7 +1248,7 @@ type UpdateDefaultNamespaceSettingRequest struct { // assumed). This setting requires a restart of clusters and SQL warehouses // to take effect. Additionally, the default namespace only applies when // using Unity Catalog-enabled compute. - Setting []DefaultNamespaceSetting `tfsdk:"setting" tf:""` + Setting []DefaultNamespaceSetting `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1262,7 +1262,7 @@ type UpdateDisableLegacyAccessRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []DisableLegacyAccess `tfsdk:"setting" tf:""` + Setting []DisableLegacyAccess `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1276,7 +1276,7 @@ type UpdateDisableLegacyFeaturesRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []DisableLegacyFeatures `tfsdk:"setting" tf:""` + Setting []DisableLegacyFeatures `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1290,7 +1290,7 @@ type UpdateEnhancedSecurityMonitoringSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []EnhancedSecurityMonitoringSetting `tfsdk:"setting" tf:""` + Setting []EnhancedSecurityMonitoringSetting `tfsdk:"setting" tf:"object"` } // Details required to update a setting. @@ -1304,7 +1304,7 @@ type UpdateEsmEnablementAccountSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []EsmEnablementAccountSetting `tfsdk:"setting" tf:""` + Setting []EsmEnablementAccountSetting `tfsdk:"setting" tf:"object"` } // Details required to update an IP access list. @@ -1329,7 +1329,7 @@ type UpdateIpAccessList struct { type UpdateNotificationDestinationRequest struct { // The configuration for the notification destination. Must wrap EXACTLY one // of the nested configs. - Config []Config `tfsdk:"config" tf:"optional"` + Config []Config `tfsdk:"config" tf:"optional,object"` // The display name for the notification destination. DisplayName types.String `tfsdk:"display_name" tf:"optional"` @@ -1347,7 +1347,7 @@ type UpdatePersonalComputeSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []PersonalComputeSetting `tfsdk:"setting" tf:""` + Setting []PersonalComputeSetting `tfsdk:"setting" tf:"object"` } type UpdateResponse struct { @@ -1364,5 +1364,5 @@ type UpdateRestrictWorkspaceAdminsSettingRequest struct { // the field mask, use comma as the separator (no space). FieldMask types.String `tfsdk:"field_mask" tf:""` - Setting []RestrictWorkspaceAdminsSetting `tfsdk:"setting" tf:""` + Setting []RestrictWorkspaceAdminsSetting `tfsdk:"setting" tf:"object"` } diff --git a/internal/service/sharing_tf/model.go b/internal/service/sharing_tf/model.go index 210f466b32..d83c38ff9b 100755 --- a/internal/service/sharing_tf/model.go +++ b/internal/service/sharing_tf/model.go @@ -22,7 +22,7 @@ type CentralCleanRoomInfo struct { // All collaborators who are in the clean room. Collaborators []CleanRoomCollaboratorInfo `tfsdk:"collaborators" tf:"optional"` // The collaborator who created the clean room. - Creator []CleanRoomCollaboratorInfo `tfsdk:"creator" tf:"optional"` + Creator []CleanRoomCollaboratorInfo `tfsdk:"creator" tf:"optional,object"` // The cloud where clean room tasks will be run. StationCloud types.String `tfsdk:"station_cloud" tf:"optional"` // The region where clean room tasks will be run. @@ -33,11 +33,11 @@ type CleanRoomAssetInfo struct { // Time at which this asset was added, in epoch milliseconds. AddedAt types.Int64 `tfsdk:"added_at" tf:"optional"` // Details about the notebook asset. - NotebookInfo []CleanRoomNotebookInfo `tfsdk:"notebook_info" tf:"optional"` + NotebookInfo []CleanRoomNotebookInfo `tfsdk:"notebook_info" tf:"optional,object"` // The collaborator who owns the asset. - Owner []CleanRoomCollaboratorInfo `tfsdk:"owner" tf:"optional"` + Owner []CleanRoomCollaboratorInfo `tfsdk:"owner" tf:"optional,object"` // Details about the table asset. - TableInfo []CleanRoomTableInfo `tfsdk:"table_info" tf:"optional"` + TableInfo []CleanRoomTableInfo `tfsdk:"table_info" tf:"optional,object"` // Time at which this asset was updated, in epoch milliseconds. UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` } @@ -55,7 +55,7 @@ type CleanRoomCatalogUpdate struct { // The name of the catalog to update assets. CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` // The updates to the assets in the catalog. - Updates []SharedDataObjectUpdate `tfsdk:"updates" tf:"optional"` + Updates []SharedDataObjectUpdate `tfsdk:"updates" tf:"optional,object"` } type CleanRoomCollaboratorInfo struct { @@ -83,7 +83,7 @@ type CleanRoomInfo struct { // Username of current owner of clean room. Owner types.String `tfsdk:"owner" tf:"optional"` // Central clean room details. - RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"optional"` + RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"optional,object"` // Time at which this clean room was updated, in epoch milliseconds. UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` // Username of clean room updater. @@ -115,7 +115,7 @@ type ColumnInfo struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` - Mask []ColumnMask `tfsdk:"mask" tf:"optional"` + Mask []ColumnMask `tfsdk:"mask" tf:"optional,object"` // Name of Column. Name types.String `tfsdk:"name" tf:"optional"` // Whether field may be Null (default: true). @@ -154,7 +154,7 @@ type CreateCleanRoom struct { // Name of the clean room. Name types.String `tfsdk:"name" tf:""` // Central clean room details. - RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:""` + RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"object"` } type CreateProvider struct { @@ -182,13 +182,13 @@ type CreateRecipient struct { // Expiration timestamp of the token, in epoch milliseconds. ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` // IP Access List - IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional,object"` // Name of Recipient. Name types.String `tfsdk:"name" tf:""` // Username of the recipient owner. Owner types.String `tfsdk:"owner" tf:"optional"` // Recipient properties as map of string key-value pairs. - PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional"` + PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional,object"` // The one-time sharing code provided by the data recipient. This field is // required when the __authentication_type__ is **DATABRICKS**. SharingCode types.String `tfsdk:"sharing_code" tf:"optional"` @@ -447,7 +447,7 @@ type ProviderInfo struct { Owner types.String `tfsdk:"owner" tf:"optional"` // The recipient profile. This field is only present when the // authentication_type is `TOKEN`. - RecipientProfile []RecipientProfile `tfsdk:"recipient_profile" tf:"optional"` + RecipientProfile []RecipientProfile `tfsdk:"recipient_profile" tf:"optional,object"` // This field is only present when the authentication_type is `TOKEN` or not // provided. RecipientProfileStr types.String `tfsdk:"recipient_profile_str" tf:"optional"` @@ -489,7 +489,7 @@ type RecipientInfo struct { // __cloud__:__region__:__metastore-uuid__. DataRecipientGlobalMetastoreId types.String `tfsdk:"data_recipient_global_metastore_id" tf:"optional"` // IP Access List - IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional,object"` // Unique identifier of recipient's Unity Catalog metastore. This field is // only present when the __authentication_type__ is **DATABRICKS** MetastoreId types.String `tfsdk:"metastore_id" tf:"optional"` @@ -498,7 +498,7 @@ type RecipientInfo struct { // Username of the recipient owner. Owner types.String `tfsdk:"owner" tf:"optional"` // Recipient properties as map of string key-value pairs. - PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional"` + PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional,object"` // Cloud region of the recipient's Unity Catalog Metstore. This field is // only present when the __authentication_type__ is **DATABRICKS**. Region types.String `tfsdk:"region" tf:"optional"` @@ -676,7 +676,7 @@ type SharedDataObjectUpdate struct { // One of: **ADD**, **REMOVE**, **UPDATE**. Action types.String `tfsdk:"action" tf:"optional"` // The data object that is being added, removed, or updated. - DataObject []SharedDataObject `tfsdk:"data_object" tf:"optional"` + DataObject []SharedDataObject `tfsdk:"data_object" tf:"optional,object"` } type UpdateCleanRoom struct { @@ -713,7 +713,7 @@ type UpdateRecipient struct { // Expiration timestamp of the token, in epoch milliseconds. ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` // IP Access List - IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional"` + IpAccessList []IpAccessList `tfsdk:"ip_access_list" tf:"optional,object"` // Name of the recipient. Name types.String `tfsdk:"-"` // New name for the recipient. @@ -724,7 +724,7 @@ type UpdateRecipient struct { // update request, the specified properties will override the existing // properties. To add and remove properties, one would need to perform a // read-modify-write. - PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional"` + PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional,object"` } type UpdateResponse struct { diff --git a/internal/service/sql_tf/model.go b/internal/service/sql_tf/model.go index f7b5567a20..18cf637b87 100755 --- a/internal/service/sql_tf/model.go +++ b/internal/service/sql_tf/model.go @@ -25,7 +25,7 @@ type AccessControl struct { type Alert struct { // Trigger conditions of the alert. - Condition []AlertCondition `tfsdk:"condition" tf:"optional"` + Condition []AlertCondition `tfsdk:"condition" tf:"optional,object"` // The timestamp indicating when the alert was created. CreateTime types.String `tfsdk:"create_time" tf:"optional"` // Custom body of alert notification, if it exists. See [here] for custom @@ -74,17 +74,17 @@ type AlertCondition struct { Op types.String `tfsdk:"op" tf:"optional"` // Name of the column from the query result to use for comparison in alert // evaluation. - Operand []AlertConditionOperand `tfsdk:"operand" tf:"optional"` + Operand []AlertConditionOperand `tfsdk:"operand" tf:"optional,object"` // Threshold value used for comparison in alert evaluation. - Threshold []AlertConditionThreshold `tfsdk:"threshold" tf:"optional"` + Threshold []AlertConditionThreshold `tfsdk:"threshold" tf:"optional,object"` } type AlertConditionOperand struct { - Column []AlertOperandColumn `tfsdk:"column" tf:"optional"` + Column []AlertOperandColumn `tfsdk:"column" tf:"optional,object"` } type AlertConditionThreshold struct { - Value []AlertOperandValue `tfsdk:"value" tf:"optional"` + Value []AlertOperandValue `tfsdk:"value" tf:"optional,object"` } type AlertOperandColumn struct { @@ -158,7 +158,7 @@ type AlertQuery struct { // on the query page. Name types.String `tfsdk:"name" tf:"optional"` - Options []QueryOptions `tfsdk:"options" tf:"optional"` + Options []QueryOptions `tfsdk:"options" tf:"optional,object"` // The text of the query to be run. Query types.String `tfsdk:"query" tf:"optional"` @@ -234,7 +234,7 @@ type CreateAlert struct { // Name of the alert. Name types.String `tfsdk:"name" tf:""` // Alert configuration options. - Options []AlertOptions `tfsdk:"options" tf:""` + Options []AlertOptions `tfsdk:"options" tf:"object"` // The identifier of the workspace folder containing the object. Parent types.String `tfsdk:"parent" tf:"optional"` // Query ID. @@ -246,12 +246,12 @@ type CreateAlert struct { } type CreateAlertRequest struct { - Alert []CreateAlertRequestAlert `tfsdk:"alert" tf:"optional"` + Alert []CreateAlertRequestAlert `tfsdk:"alert" tf:"optional,object"` } type CreateAlertRequestAlert struct { // Trigger conditions of the alert. - Condition []AlertCondition `tfsdk:"condition" tf:"optional"` + Condition []AlertCondition `tfsdk:"condition" tf:"optional,object"` // Custom body of alert notification, if it exists. See [here] for custom // templating instructions. // @@ -276,7 +276,7 @@ type CreateAlertRequestAlert struct { } type CreateQueryRequest struct { - Query []CreateQueryRequestQuery `tfsdk:"query" tf:"optional"` + Query []CreateQueryRequestQuery `tfsdk:"query" tf:"optional,object"` } type CreateQueryRequestQuery struct { @@ -325,7 +325,7 @@ type CreateQueryVisualizationsLegacyRequest struct { } type CreateVisualizationRequest struct { - Visualization []CreateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional"` + Visualization []CreateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional,object"` } type CreateVisualizationRequestVisualization struct { @@ -356,7 +356,7 @@ type CreateWarehouseRequest struct { // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` // Channel Details - Channel []Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional,object"` // Size of the clusters allocated for this warehouse. Increasing the size of // a spark cluster allows you to run larger queries on it. If you want to // increase the number of concurrent queries, please tune max_num_clusters. @@ -402,7 +402,7 @@ type CreateWarehouseRequest struct { // instances and EBS volumes) associated with this SQL warehouse. // // Supported values: - Number of tags < 45. - Tags []EndpointTags `tfsdk:"tags" tf:"optional"` + Tags []EndpointTags `tfsdk:"tags" tf:"optional,object"` // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless // compute, you must set to `PRO` and also set the field // `enable_serverless_compute` to `true`. @@ -420,7 +420,7 @@ type CreateWidget struct { // Widget ID returned by :method:dashboardwidgets/create Id types.String `tfsdk:"-"` - Options []WidgetOptions `tfsdk:"options" tf:""` + Options []WidgetOptions `tfsdk:"options" tf:"object"` // If this is a textbox widget, the application displays this text. This // field is ignored if the widget contains a visualization in the // `visualization` field. @@ -459,7 +459,7 @@ type Dashboard struct { // the dashboard page. Name types.String `tfsdk:"name" tf:"optional"` - Options []DashboardOptions `tfsdk:"options" tf:"optional"` + Options []DashboardOptions `tfsdk:"options" tf:"optional,object"` // The identifier of the workspace folder containing the object. Parent types.String `tfsdk:"parent" tf:"optional"` // * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * @@ -473,7 +473,7 @@ type Dashboard struct { // Timestamp when this dashboard was last updated. UpdatedAt types.String `tfsdk:"updated_at" tf:"optional"` - User []User `tfsdk:"user" tf:"optional"` + User []User `tfsdk:"user" tf:"optional,object"` // The ID of the user who owns the dashboard. UserId types.Int64 `tfsdk:"user_id" tf:"optional"` @@ -555,7 +555,7 @@ type DateRange struct { type DateRangeValue struct { // Manually specified date-time range value. - DateRangeValue []DateRange `tfsdk:"date_range_value" tf:"optional"` + DateRangeValue []DateRange `tfsdk:"date_range_value" tf:"optional,object"` // Dynamic date-time range value based on current date-time. DynamicDateRangeValue types.String `tfsdk:"dynamic_date_range_value" tf:"optional"` // Date-time precision to format the value into when the query is run. @@ -624,7 +624,7 @@ type EditAlert struct { // Name of the alert. Name types.String `tfsdk:"name" tf:""` // Alert configuration options. - Options []AlertOptions `tfsdk:"options" tf:""` + Options []AlertOptions `tfsdk:"options" tf:"object"` // Query ID. QueryId types.String `tfsdk:"query_id" tf:""` // Number of seconds after being triggered before the alert rearms itself @@ -642,7 +642,7 @@ type EditWarehouseRequest struct { // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` // Channel Details - Channel []Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional,object"` // Size of the clusters allocated for this warehouse. Increasing the size of // a spark cluster allows you to run larger queries on it. If you want to // increase the number of concurrent queries, please tune max_num_clusters. @@ -690,7 +690,7 @@ type EditWarehouseRequest struct { // instances and EBS volumes) associated with this SQL warehouse. // // Supported values: - Number of tags < 45. - Tags []EndpointTags `tfsdk:"tags" tf:"optional"` + Tags []EndpointTags `tfsdk:"tags" tf:"optional,object"` // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless // compute, you must set to `PRO` and also set the field // `enable_serverless_compute` to `true`. @@ -716,7 +716,7 @@ type EndpointHealth struct { Details types.String `tfsdk:"details" tf:"optional"` // The reason for failure to bring up clusters for this warehouse. This is // available when status is 'FAILED' and sometimes when it is DEGRADED. - FailureReason []TerminationReason `tfsdk:"failure_reason" tf:"optional"` + FailureReason []TerminationReason `tfsdk:"failure_reason" tf:"optional,object"` // Deprecated. split into summary and details for security Message types.String `tfsdk:"message" tf:"optional"` // Health status of the warehouse. @@ -735,7 +735,7 @@ type EndpointInfo struct { // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` // Channel Details - Channel []Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional,object"` // Size of the clusters allocated for this warehouse. Increasing the size of // a spark cluster allows you to run larger queries on it. If you want to // increase the number of concurrent queries, please tune max_num_clusters. @@ -753,7 +753,7 @@ type EndpointInfo struct { EnableServerlessCompute types.Bool `tfsdk:"enable_serverless_compute" tf:"optional"` // Optional health status. Assume the warehouse is healthy if this field is // not set. - Health []EndpointHealth `tfsdk:"health" tf:"optional"` + Health []EndpointHealth `tfsdk:"health" tf:"optional,object"` // unique identifier for warehouse Id types.String `tfsdk:"id" tf:"optional"` // Deprecated. Instance profile used to pass IAM role to the cluster @@ -787,7 +787,7 @@ type EndpointInfo struct { // current number of clusters running for the service NumClusters types.Int64 `tfsdk:"num_clusters" tf:"optional"` // ODBC parameters for the SQL warehouse - OdbcParams []OdbcParams `tfsdk:"odbc_params" tf:"optional"` + OdbcParams []OdbcParams `tfsdk:"odbc_params" tf:"optional,object"` // Configurations whether the warehouse should use spot instances. SpotInstancePolicy types.String `tfsdk:"spot_instance_policy" tf:"optional"` // State of the warehouse @@ -796,7 +796,7 @@ type EndpointInfo struct { // instances and EBS volumes) associated with this SQL warehouse. // // Supported values: - Number of tags < 45. - Tags []EndpointTags `tfsdk:"tags" tf:"optional"` + Tags []EndpointTags `tfsdk:"tags" tf:"optional,object"` // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless // compute, you must set to `PRO` and also set the field // `enable_serverless_compute` to `true`. @@ -817,7 +817,7 @@ type EnumValue struct { // List of valid query parameter values, newline delimited. EnumOptions types.String `tfsdk:"enum_options" tf:"optional"` // If specified, allows multiple values to be selected for this parameter. - MultiValuesOptions []MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional"` + MultiValuesOptions []MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional,object"` // List of selected query parameter values. Values []types.String `tfsdk:"values" tf:"optional"` } @@ -1069,7 +1069,7 @@ type GetWarehouseResponse struct { // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` // Channel Details - Channel []Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional,object"` // Size of the clusters allocated for this warehouse. Increasing the size of // a spark cluster allows you to run larger queries on it. If you want to // increase the number of concurrent queries, please tune max_num_clusters. @@ -1087,7 +1087,7 @@ type GetWarehouseResponse struct { EnableServerlessCompute types.Bool `tfsdk:"enable_serverless_compute" tf:"optional"` // Optional health status. Assume the warehouse is healthy if this field is // not set. - Health []EndpointHealth `tfsdk:"health" tf:"optional"` + Health []EndpointHealth `tfsdk:"health" tf:"optional,object"` // unique identifier for warehouse Id types.String `tfsdk:"id" tf:"optional"` // Deprecated. Instance profile used to pass IAM role to the cluster @@ -1121,7 +1121,7 @@ type GetWarehouseResponse struct { // current number of clusters running for the service NumClusters types.Int64 `tfsdk:"num_clusters" tf:"optional"` // ODBC parameters for the SQL warehouse - OdbcParams []OdbcParams `tfsdk:"odbc_params" tf:"optional"` + OdbcParams []OdbcParams `tfsdk:"odbc_params" tf:"optional,object"` // Configurations whether the warehouse should use spot instances. SpotInstancePolicy types.String `tfsdk:"spot_instance_policy" tf:"optional"` // State of the warehouse @@ -1130,7 +1130,7 @@ type GetWarehouseResponse struct { // instances and EBS volumes) associated with this SQL warehouse. // // Supported values: - Number of tags < 45. - Tags []EndpointTags `tfsdk:"tags" tf:"optional"` + Tags []EndpointTags `tfsdk:"tags" tf:"optional,object"` // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless // compute, you must set to `PRO` and also set the field // `enable_serverless_compute` to `true`. @@ -1139,9 +1139,9 @@ type GetWarehouseResponse struct { type GetWorkspaceWarehouseConfigResponse struct { // Optional: Channel selection details - Channel []Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional,object"` // Deprecated: Use sql_configuration_parameters - ConfigParam []RepeatedEndpointConfPairs `tfsdk:"config_param" tf:"optional"` + ConfigParam []RepeatedEndpointConfPairs `tfsdk:"config_param" tf:"optional,object"` // Spark confs for external hive metastore configuration JSON serialized // size must be less than <= 512K DataAccessConfig []EndpointConfPair `tfsdk:"data_access_config" tf:"optional"` @@ -1153,7 +1153,7 @@ type GetWorkspaceWarehouseConfigResponse struct { // specific type availability in the warehouse create and edit form UI. EnabledWarehouseTypes []WarehouseTypePair `tfsdk:"enabled_warehouse_types" tf:"optional"` // Deprecated: Use sql_configuration_parameters - GlobalParam []RepeatedEndpointConfPairs `tfsdk:"global_param" tf:"optional"` + GlobalParam []RepeatedEndpointConfPairs `tfsdk:"global_param" tf:"optional,object"` // GCP only: Google Service Account used to pass to cluster to access Google // Cloud Storage GoogleServiceAccount types.String `tfsdk:"google_service_account" tf:"optional"` @@ -1162,7 +1162,7 @@ type GetWorkspaceWarehouseConfigResponse struct { // Security policy for warehouses SecurityPolicy types.String `tfsdk:"security_policy" tf:"optional"` // SQL configuration parameters - SqlConfigurationParameters []RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional"` + SqlConfigurationParameters []RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional,object"` } type LegacyAlert struct { @@ -1175,11 +1175,11 @@ type LegacyAlert struct { // Name of the alert. Name types.String `tfsdk:"name" tf:"optional"` // Alert configuration options. - Options []AlertOptions `tfsdk:"options" tf:"optional"` + Options []AlertOptions `tfsdk:"options" tf:"optional,object"` // The identifier of the workspace folder containing the object. Parent types.String `tfsdk:"parent" tf:"optional"` - Query []AlertQuery `tfsdk:"query" tf:"optional"` + Query []AlertQuery `tfsdk:"query" tf:"optional,object"` // Number of seconds after being triggered before the alert rearms itself // and can be triggered again. If `null`, alert will never be triggered // again. @@ -1191,7 +1191,7 @@ type LegacyAlert struct { // Timestamp when the alert was last updated. UpdatedAt types.String `tfsdk:"updated_at" tf:"optional"` - User []User `tfsdk:"user" tf:"optional"` + User []User `tfsdk:"user" tf:"optional,object"` } type LegacyQuery struct { @@ -1228,7 +1228,7 @@ type LegacyQuery struct { // type parameters are handled safely. IsSafe types.Bool `tfsdk:"is_safe" tf:"optional"` - LastModifiedBy []User `tfsdk:"last_modified_by" tf:"optional"` + LastModifiedBy []User `tfsdk:"last_modified_by" tf:"optional,object"` // The ID of the user who last saved changes to this query. LastModifiedById types.Int64 `tfsdk:"last_modified_by_id" tf:"optional"` // If there is a cached result for this query and user, this field includes @@ -1239,7 +1239,7 @@ type LegacyQuery struct { // on the query page. Name types.String `tfsdk:"name" tf:"optional"` - Options []QueryOptions `tfsdk:"options" tf:"optional"` + Options []QueryOptions `tfsdk:"options" tf:"optional,object"` // The identifier of the workspace folder containing the object. Parent types.String `tfsdk:"parent" tf:"optional"` // * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * @@ -1258,7 +1258,7 @@ type LegacyQuery struct { // The timestamp at which this query was last updated. UpdatedAt types.String `tfsdk:"updated_at" tf:"optional"` - User []User `tfsdk:"user" tf:"optional"` + User []User `tfsdk:"user" tf:"optional,object"` // The ID of the user who owns the query. UserId types.Int64 `tfsdk:"user_id" tf:"optional"` @@ -1285,7 +1285,7 @@ type LegacyVisualization struct { // settings in JSON. Options any `tfsdk:"options" tf:"optional"` - Query []LegacyQuery `tfsdk:"query" tf:"optional"` + Query []LegacyQuery `tfsdk:"query" tf:"optional,object"` // The type of visualization: chart, table, pivot table, and so on. Type types.String `tfsdk:"type" tf:"optional"` @@ -1307,7 +1307,7 @@ type ListAlertsResponse struct { type ListAlertsResponseAlert struct { // Trigger conditions of the alert. - Condition []AlertCondition `tfsdk:"condition" tf:"optional"` + Condition []AlertCondition `tfsdk:"condition" tf:"optional,object"` // The timestamp indicating when the alert was created. CreateTime types.String `tfsdk:"create_time" tf:"optional"` // Custom body of alert notification, if it exists. See [here] for custom @@ -1528,7 +1528,7 @@ type Parameter struct { EnumOptions types.String `tfsdk:"enumOptions" tf:"optional"` // If specified, allows multiple values to be selected for this parameter. // Only applies to dropdown list and query-based dropdown list parameters. - MultiValuesOptions []MultiValuesOptions `tfsdk:"multiValuesOptions" tf:"optional"` + MultiValuesOptions []MultiValuesOptions `tfsdk:"multiValuesOptions" tf:"optional,object"` // The literal parameter marker that appears between double curly braces in // the query text. Name types.String `tfsdk:"name" tf:"optional"` @@ -1584,7 +1584,7 @@ type Query struct { type QueryBackedValue struct { // If specified, allows multiple values to be selected for this parameter. - MultiValuesOptions []MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional"` + MultiValuesOptions []MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional,object"` // UUID of the query that provides the parameter values. QueryId types.String `tfsdk:"query_id" tf:"optional"` // List of selected query parameter values. @@ -1622,7 +1622,7 @@ type QueryEditContent struct { type QueryFilter struct { // A range filter for query submitted time. The time range must be <= 30 // days. - QueryStartTimeRange []TimeRange `tfsdk:"query_start_time_range" tf:"optional"` + QueryStartTimeRange []TimeRange `tfsdk:"query_start_time_range" tf:"optional,object"` // A list of statement IDs. StatementIds []types.String `tfsdk:"statement_ids" tf:"optional"` @@ -1635,7 +1635,7 @@ type QueryFilter struct { type QueryInfo struct { // SQL Warehouse channel information at the time of query execution - ChannelUsed []ChannelInfo `tfsdk:"channel_used" tf:"optional"` + ChannelUsed []ChannelInfo `tfsdk:"channel_used" tf:"optional,object"` // Total execution time of the statement ( excluding result fetch time ). Duration types.Int64 `tfsdk:"duration" tf:"optional"` // Alias for `warehouse_id`. @@ -1654,7 +1654,7 @@ type QueryInfo struct { // A key that can be used to look up query details. LookupKey types.String `tfsdk:"lookup_key" tf:"optional"` // Metrics about query execution. - Metrics []QueryMetrics `tfsdk:"metrics" tf:"optional"` + Metrics []QueryMetrics `tfsdk:"metrics" tf:"optional,object"` // Whether plans exist for the execution, or the reason why they are missing PlansState types.String `tfsdk:"plans_state" tf:"optional"` // The time the query ended. @@ -1773,21 +1773,21 @@ type QueryOptions struct { type QueryParameter struct { // Date-range query parameter value. Can only specify one of // `dynamic_date_range_value` or `date_range_value`. - DateRangeValue []DateRangeValue `tfsdk:"date_range_value" tf:"optional"` + DateRangeValue []DateRangeValue `tfsdk:"date_range_value" tf:"optional,object"` // Date query parameter value. Can only specify one of `dynamic_date_value` // or `date_value`. - DateValue []DateValue `tfsdk:"date_value" tf:"optional"` + DateValue []DateValue `tfsdk:"date_value" tf:"optional,object"` // Dropdown query parameter value. - EnumValue []EnumValue `tfsdk:"enum_value" tf:"optional"` + EnumValue []EnumValue `tfsdk:"enum_value" tf:"optional,object"` // Literal parameter marker that appears between double curly braces in the // query text. Name types.String `tfsdk:"name" tf:"optional"` // Numeric query parameter value. - NumericValue []NumericValue `tfsdk:"numeric_value" tf:"optional"` + NumericValue []NumericValue `tfsdk:"numeric_value" tf:"optional,object"` // Query-based dropdown query parameter value. - QueryBackedValue []QueryBackedValue `tfsdk:"query_backed_value" tf:"optional"` + QueryBackedValue []QueryBackedValue `tfsdk:"query_backed_value" tf:"optional,object"` // Text query parameter value. - TextValue []TextValue `tfsdk:"text_value" tf:"optional"` + TextValue []TextValue `tfsdk:"text_value" tf:"optional,object"` // Text displayed in the user-facing parameter widget in the UI. Title types.String `tfsdk:"title" tf:"optional"` } @@ -1874,7 +1874,7 @@ type ResultManifest struct { Format types.String `tfsdk:"format" tf:"optional"` // The schema is an ordered list of column descriptions. - Schema []ResultSchema `tfsdk:"schema" tf:"optional"` + Schema []ResultSchema `tfsdk:"schema" tf:"optional,object"` // The total number of bytes in the result set. This field is not available // when using `INLINE` disposition. TotalByteCount types.Int64 `tfsdk:"total_byte_count" tf:"optional"` @@ -1920,9 +1920,9 @@ type SetResponse struct { type SetWorkspaceWarehouseConfigRequest struct { // Optional: Channel selection details - Channel []Channel `tfsdk:"channel" tf:"optional"` + Channel []Channel `tfsdk:"channel" tf:"optional,object"` // Deprecated: Use sql_configuration_parameters - ConfigParam []RepeatedEndpointConfPairs `tfsdk:"config_param" tf:"optional"` + ConfigParam []RepeatedEndpointConfPairs `tfsdk:"config_param" tf:"optional,object"` // Spark confs for external hive metastore configuration JSON serialized // size must be less than <= 512K DataAccessConfig []EndpointConfPair `tfsdk:"data_access_config" tf:"optional"` @@ -1934,7 +1934,7 @@ type SetWorkspaceWarehouseConfigRequest struct { // specific type availability in the warehouse create and edit form UI. EnabledWarehouseTypes []WarehouseTypePair `tfsdk:"enabled_warehouse_types" tf:"optional"` // Deprecated: Use sql_configuration_parameters - GlobalParam []RepeatedEndpointConfPairs `tfsdk:"global_param" tf:"optional"` + GlobalParam []RepeatedEndpointConfPairs `tfsdk:"global_param" tf:"optional,object"` // GCP only: Google Service Account used to pass to cluster to access Google // Cloud Storage GoogleServiceAccount types.String `tfsdk:"google_service_account" tf:"optional"` @@ -1943,7 +1943,7 @@ type SetWorkspaceWarehouseConfigRequest struct { // Security policy for warehouses SecurityPolicy types.String `tfsdk:"security_policy" tf:"optional"` // SQL configuration parameters - SqlConfigurationParameters []RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional"` + SqlConfigurationParameters []RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional,object"` } type SetWorkspaceWarehouseConfigResponse struct { @@ -1976,21 +1976,21 @@ type StatementParameterListItem struct { type StatementResponse struct { // The result manifest provides schema and metadata for the result set. - Manifest []ResultManifest `tfsdk:"manifest" tf:"optional"` + Manifest []ResultManifest `tfsdk:"manifest" tf:"optional,object"` - Result []ResultData `tfsdk:"result" tf:"optional"` + Result []ResultData `tfsdk:"result" tf:"optional,object"` // The statement ID is returned upon successfully submitting a SQL // statement, and is a required reference for all subsequent calls. StatementId types.String `tfsdk:"statement_id" tf:"optional"` // The status response includes execution state and if relevant, error // information. - Status []StatementStatus `tfsdk:"status" tf:"optional"` + Status []StatementStatus `tfsdk:"status" tf:"optional,object"` } // The status response includes execution state and if relevant, error // information. type StatementStatus struct { - Error []ServiceError `tfsdk:"error" tf:"optional"` + Error []ServiceError `tfsdk:"error" tf:"optional,object"` // Statement execution state: - `PENDING`: waiting for warehouse - // `RUNNING`: running - `SUCCEEDED`: execution was successful, result data // available for fetch - `FAILED`: execution failed; reason for failure @@ -2061,7 +2061,7 @@ type TrashQueryRequest struct { } type UpdateAlertRequest struct { - Alert []UpdateAlertRequestAlert `tfsdk:"alert" tf:"optional"` + Alert []UpdateAlertRequestAlert `tfsdk:"alert" tf:"optional,object"` Id types.String `tfsdk:"-"` // Field mask is required to be passed into the PATCH request. Field mask @@ -2073,7 +2073,7 @@ type UpdateAlertRequest struct { type UpdateAlertRequestAlert struct { // Trigger conditions of the alert. - Condition []AlertCondition `tfsdk:"condition" tf:"optional"` + Condition []AlertCondition `tfsdk:"condition" tf:"optional,object"` // Custom body of alert notification, if it exists. See [here] for custom // templating instructions. // @@ -2101,7 +2101,7 @@ type UpdateAlertRequestAlert struct { type UpdateQueryRequest struct { Id types.String `tfsdk:"-"` - Query []UpdateQueryRequestQuery `tfsdk:"query" tf:"optional"` + Query []UpdateQueryRequestQuery `tfsdk:"query" tf:"optional,object"` // Field mask is required to be passed into the PATCH request. Field mask // specifies which fields of the setting payload will be updated. The field // mask needs to be supplied as single string. To specify multiple fields in @@ -2147,7 +2147,7 @@ type UpdateVisualizationRequest struct { // the field mask, use comma as the separator (no space). UpdateMask types.String `tfsdk:"update_mask" tf:""` - Visualization []UpdateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional"` + Visualization []UpdateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional,object"` } type UpdateVisualizationRequestVisualization struct { @@ -2260,13 +2260,13 @@ type Widget struct { // The unique ID for this widget. Id types.String `tfsdk:"id" tf:"optional"` - Options []WidgetOptions `tfsdk:"options" tf:"optional"` + Options []WidgetOptions `tfsdk:"options" tf:"optional,object"` // The visualization description API changes frequently and is unsupported. // You can duplicate a visualization by copying description objects received // _from the API_ and then using them to create a new one with a POST // request to the same endpoint. Databricks does not recommend constructing // ad-hoc visualizations entirely in JSON. - Visualization []LegacyVisualization `tfsdk:"visualization" tf:"optional"` + Visualization []LegacyVisualization `tfsdk:"visualization" tf:"optional,object"` // Unused field. Width types.Int64 `tfsdk:"width" tf:"optional"` } @@ -2284,7 +2284,7 @@ type WidgetOptions struct { ParameterMappings any `tfsdk:"parameterMappings" tf:"optional"` // Coordinates of this widget on a dashboard. This portion of the API // changes frequently and is unsupported. - Position []WidgetPosition `tfsdk:"position" tf:"optional"` + Position []WidgetPosition `tfsdk:"position" tf:"optional,object"` // Custom title of the widget Title types.String `tfsdk:"title" tf:"optional"` // Timestamp of the last time this object was updated. diff --git a/internal/service/vectorsearch_tf/model.go b/internal/service/vectorsearch_tf/model.go index 11f4179394..e0590e7ad9 100755 --- a/internal/service/vectorsearch_tf/model.go +++ b/internal/service/vectorsearch_tf/model.go @@ -29,10 +29,10 @@ type CreateEndpoint struct { type CreateVectorIndexRequest struct { // Specification for Delta Sync Index. Required if `index_type` is // `DELTA_SYNC`. - DeltaSyncIndexSpec []DeltaSyncVectorIndexSpecRequest `tfsdk:"delta_sync_index_spec" tf:"optional"` + DeltaSyncIndexSpec []DeltaSyncVectorIndexSpecRequest `tfsdk:"delta_sync_index_spec" tf:"optional,object"` // Specification for Direct Vector Access Index. Required if `index_type` is // `DIRECT_ACCESS`. - DirectAccessIndexSpec []DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec" tf:"optional"` + DirectAccessIndexSpec []DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec" tf:"optional,object"` // Name of the endpoint to be used for serving the index EndpointName types.String `tfsdk:"endpoint_name" tf:""` // There are 2 types of Vector Search indexes: @@ -50,7 +50,7 @@ type CreateVectorIndexRequest struct { } type CreateVectorIndexResponse struct { - VectorIndex []VectorIndex `tfsdk:"vector_index" tf:"optional"` + VectorIndex []VectorIndex `tfsdk:"vector_index" tf:"optional,object"` } // Result of the upsert or delete operation. @@ -73,7 +73,7 @@ type DeleteDataVectorIndexRequest struct { // Response to a delete data vector index request. type DeleteDataVectorIndexResponse struct { // Result of the upsert or delete operation. - Result []DeleteDataResult `tfsdk:"result" tf:"optional"` + Result []DeleteDataResult `tfsdk:"result" tf:"optional,object"` // Status of the delete operation. Status types.String `tfsdk:"status" tf:"optional"` } @@ -181,7 +181,7 @@ type EndpointInfo struct { // Creator of the endpoint Creator types.String `tfsdk:"creator" tf:"optional"` // Current status of the endpoint - EndpointStatus []EndpointStatus `tfsdk:"endpoint_status" tf:"optional"` + EndpointStatus []EndpointStatus `tfsdk:"endpoint_status" tf:"optional,object"` // Type of endpoint. EndpointType types.String `tfsdk:"endpoint_type" tf:"optional"` // Unique identifier of the endpoint @@ -255,7 +255,7 @@ type MapStringValueEntry struct { // Column name. Key types.String `tfsdk:"key" tf:"optional"` // Column value, nullable. - Value []Value `tfsdk:"value" tf:"optional"` + Value []Value `tfsdk:"value" tf:"optional,object"` } type MiniVectorIndex struct { @@ -315,13 +315,13 @@ type QueryVectorIndexRequest struct { type QueryVectorIndexResponse struct { // Metadata about the result set. - Manifest []ResultManifest `tfsdk:"manifest" tf:"optional"` + Manifest []ResultManifest `tfsdk:"manifest" tf:"optional,object"` // [Optional] Token that can be used in `QueryVectorIndexNextPage` API to // get next page of results. If more than 1000 results satisfy the query, // they are returned in groups of 1000. Empty value means no more results. NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` // Data returned in the query result. - Result []ResultData `tfsdk:"result" tf:"optional"` + Result []ResultData `tfsdk:"result" tf:"optional,object"` } // Data returned in the query result. @@ -392,7 +392,7 @@ type UpsertDataVectorIndexRequest struct { // Response to an upsert data vector index request. type UpsertDataVectorIndexResponse struct { // Result of the upsert or delete operation. - Result []UpsertDataResult `tfsdk:"result" tf:"optional"` + Result []UpsertDataResult `tfsdk:"result" tf:"optional,object"` // Status of the upsert operation. Status types.String `tfsdk:"status" tf:"optional"` } @@ -400,7 +400,7 @@ type UpsertDataVectorIndexResponse struct { type Value struct { BoolValue types.Bool `tfsdk:"bool_value" tf:"optional"` - ListValue []ListValue `tfsdk:"list_value" tf:"optional"` + ListValue []ListValue `tfsdk:"list_value" tf:"optional,object"` NullValue types.String `tfsdk:"null_value" tf:"optional"` @@ -408,16 +408,16 @@ type Value struct { StringValue types.String `tfsdk:"string_value" tf:"optional"` - StructValue []Struct `tfsdk:"struct_value" tf:"optional"` + StructValue []Struct `tfsdk:"struct_value" tf:"optional,object"` } type VectorIndex struct { // The user who created the index. Creator types.String `tfsdk:"creator" tf:"optional"` - DeltaSyncIndexSpec []DeltaSyncVectorIndexSpecResponse `tfsdk:"delta_sync_index_spec" tf:"optional"` + DeltaSyncIndexSpec []DeltaSyncVectorIndexSpecResponse `tfsdk:"delta_sync_index_spec" tf:"optional,object"` - DirectAccessIndexSpec []DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec" tf:"optional"` + DirectAccessIndexSpec []DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec" tf:"optional,object"` // Name of the endpoint associated with the index EndpointName types.String `tfsdk:"endpoint_name" tf:"optional"` // There are 2 types of Vector Search indexes: @@ -433,7 +433,7 @@ type VectorIndex struct { // Primary key of the index PrimaryKey types.String `tfsdk:"primary_key" tf:"optional"` - Status []VectorIndexStatus `tfsdk:"status" tf:"optional"` + Status []VectorIndexStatus `tfsdk:"status" tf:"optional,object"` } type VectorIndexStatus struct { diff --git a/internal/service/workspace_tf/model.go b/internal/service/workspace_tf/model.go index 7564d08cce..6845913417 100755 --- a/internal/service/workspace_tf/model.go +++ b/internal/service/workspace_tf/model.go @@ -73,7 +73,7 @@ type CreateRepoRequest struct { Provider types.String `tfsdk:"provider" tf:""` // If specified, the repo will be created with sparse checkout enabled. You // cannot enable/disable sparse checkout after the repo is created. - SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional,object"` // URL of the Git repository to be linked. Url types.String `tfsdk:"url" tf:""` } @@ -91,14 +91,14 @@ type CreateRepoResponse struct { // Git provider of the linked Git repository. Provider types.String `tfsdk:"provider" tf:"optional"` // Sparse checkout settings for the Git folder (repo). - SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional,object"` // URL of the linked Git repository. Url types.String `tfsdk:"url" tf:"optional"` } type CreateScope struct { // The metadata for the secret scope if the type is `AZURE_KEYVAULT` - BackendAzureKeyvault []AzureKeyVaultSecretScopeMetadata `tfsdk:"backend_azure_keyvault" tf:"optional"` + BackendAzureKeyvault []AzureKeyVaultSecretScopeMetadata `tfsdk:"backend_azure_keyvault" tf:"optional,object"` // The principal that is initially granted `MANAGE` permission to the // created scope. InitialManagePrincipal types.String `tfsdk:"initial_manage_principal" tf:"optional"` @@ -270,7 +270,7 @@ type GetRepoResponse struct { // Git provider of the linked Git repository. Provider types.String `tfsdk:"provider" tf:"optional"` // Sparse checkout settings for the Git folder (repo). - SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional,object"` // URL of the linked Git repository. Url types.String `tfsdk:"url" tf:"optional"` } @@ -516,7 +516,7 @@ type RepoInfo struct { // Git provider of the remote git repository, e.g. `gitHub`. Provider types.String `tfsdk:"provider" tf:"optional"` // Sparse checkout config for the git folder (repo). - SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckout `tfsdk:"sparse_checkout" tf:"optional,object"` // URL of the remote git repository. Url types.String `tfsdk:"url" tf:"optional"` } @@ -560,7 +560,7 @@ type SecretScope struct { // The type of secret scope backend. BackendType types.String `tfsdk:"backend_type" tf:"optional"` // The metadata for the secret scope if the type is `AZURE_KEYVAULT` - KeyvaultMetadata []AzureKeyVaultSecretScopeMetadata `tfsdk:"keyvault_metadata" tf:"optional"` + KeyvaultMetadata []AzureKeyVaultSecretScopeMetadata `tfsdk:"keyvault_metadata" tf:"optional,object"` // A unique name to identify the secret scope. Name types.String `tfsdk:"name" tf:"optional"` } @@ -617,7 +617,7 @@ type UpdateRepoRequest struct { RepoId types.Int64 `tfsdk:"-"` // If specified, update the sparse checkout settings. The update will fail // if sparse checkout is not enabled for the repo. - SparseCheckout []SparseCheckoutUpdate `tfsdk:"sparse_checkout" tf:"optional"` + SparseCheckout []SparseCheckoutUpdate `tfsdk:"sparse_checkout" tf:"optional,object"` // Tag that the local version of the repo is checked out to. Updating the // repo to a tag puts the repo in a detached HEAD state. Before committing // new changes, you must update the repo to a branch instead of the detached From e46ad2b9e88402e5f33a22903074c0de2abdd4cb Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Sat, 12 Oct 2024 02:26:26 -0400 Subject: [PATCH 44/99] [Feature] Add `databricks_registered_model` data source (#4033) ## Changes ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --------- Co-authored-by: Miles Yucht --- docs/data-sources/registered_model.md | 53 +++++++++++ internal/acceptance/registered_model_test.go | 7 ++ internal/providers/pluginfw/pluginfw.go | 2 + .../registered_model/data_registered_model.go | 90 +++++++++++++++++++ 4 files changed, 152 insertions(+) create mode 100644 docs/data-sources/registered_model.md create mode 100644 internal/providers/pluginfw/resources/registered_model/data_registered_model.go diff --git a/docs/data-sources/registered_model.md b/docs/data-sources/registered_model.md new file mode 100644 index 0000000000..065396d0c2 --- /dev/null +++ b/docs/data-sources/registered_model.md @@ -0,0 +1,53 @@ +--- +subcategory: "Unity Catalog" +--- +# databricks_registered_model Data Source + +-> This resource can only be used with a workspace-level provider! + +This resource allows you to get information about [Model in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. + +## Example Usage + +```hcl +data "databricks_registered_model" "this" { + full_name = "main.default.my_model" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `full_name` - (Required, String) The fully-qualified name of the registered model (`catalog_name.schema_name.name`). +* `include_aliases` - (Optional, Boolean) flag to specify if list of aliases should be included into output. +* `include_browse` - (Optional, Boolean) flag to specify if include registered models in the response for which the principal can only access selective metadata for. + +## Attribute Reference + +The following attributes are exported: + +* `model_info` - block with information about the model in Unity Catalog: + * `aliases` - the list of aliases associated with this model. Each item is object consisting of following attributes: + * `alias_name` - string with the name of alias + * `version_num` - associated model version + * `catalog_name` - The name of the catalog where the schema and the registered model reside. + * `comment` - The comment attached to the registered model. + * `created_at` - the Unix timestamp at the model's creation + * `created_by` - the identifier of the user who created the model + * `full_name` - The fully-qualified name of the registered model (`catalog_name.schema_name.name`). + * `metastore_id` - the unique identifier of the metastore + * `name` - The name of the registered model. + * `owner` - Name of the registered model owner. + * `schema_name` - The name of the schema where the registered model resides. + * `storage_location` - The storage location under which model version data files are stored. + * `updated_at` - the timestamp of the last time changes were made to the model + * `updated_by` - the identifier of the user who updated the model last time + +## Related Resources + +The following resources are often used in the same context: + +* [databricks_registered_model](../resources/schema.md) resource to manage models within Unity Catalog. +* [databricks_model_serving](../resources/model_serving.md) to serve this model on a Databricks serving endpoint. +* [databricks_mlflow_experiment](../resources/mlflow_experiment.md) to manage [MLflow experiments](https://docs.databricks.com/data/data-sources/mlflow-experiment.html) in Databricks. diff --git a/internal/acceptance/registered_model_test.go b/internal/acceptance/registered_model_test.go index f4293b3f98..b35bd288db 100644 --- a/internal/acceptance/registered_model_test.go +++ b/internal/acceptance/registered_model_test.go @@ -43,6 +43,13 @@ func TestUcAccRegisteredModel(t *testing.T) { owner = "account users" comment = "new comment" } + data "databricks_registered_model" "model" { + full_name = databricks_registered_model.model.id + include_model_versions = true + } + output "model" { + value = data.databricks_registered_model.model + } `, }, ) diff --git a/internal/providers/pluginfw/pluginfw.go b/internal/providers/pluginfw/pluginfw.go index 71e91dccd8..cc51975133 100644 --- a/internal/providers/pluginfw/pluginfw.go +++ b/internal/providers/pluginfw/pluginfw.go @@ -19,6 +19,7 @@ import ( "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/cluster" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/library" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/qualitymonitor" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/registered_model" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/volume" "github.com/hashicorp/terraform-plugin-framework/datasource" @@ -52,6 +53,7 @@ func (p *DatabricksProviderPluginFramework) DataSources(ctx context.Context) []f return []func() datasource.DataSource{ cluster.DataSourceCluster, volume.DataSourceVolumes, + registered_model.DataSourceRegisteredModel, } } diff --git a/internal/providers/pluginfw/resources/registered_model/data_registered_model.go b/internal/providers/pluginfw/resources/registered_model/data_registered_model.go new file mode 100644 index 0000000000..64ed516e51 --- /dev/null +++ b/internal/providers/pluginfw/resources/registered_model/data_registered_model.go @@ -0,0 +1,90 @@ +package registered_model + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/databricks/terraform-provider-databricks/internal/service/catalog_tf" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func DataSourceRegisteredModel() datasource.DataSource { + return &RegisteredModelDataSource{} +} + +var _ datasource.DataSourceWithConfigure = &RegisteredModelDataSource{} + +type RegisteredModelDataSource struct { + Client *common.DatabricksClient +} + +type RegisteredModelData struct { + FullName types.String `tfsdk:"full_name"` + IncludeAliases types.Bool `tfsdk:"include_aliases" tf:"optional"` + IncludeBrowse types.Bool `tfsdk:"include_browse" tf:"optional"` + ModelInfo []catalog_tf.RegisteredModelInfo `tfsdk:"model_info" tf:"optional,computed"` +} + +func (d *RegisteredModelDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = "databricks_registered_model" +} + +func (d *RegisteredModelDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + attrs, blocks := tfschema.DataSourceStructToSchemaMap(RegisteredModelData{}, nil) + resp.Schema = schema.Schema{ + Attributes: attrs, + Blocks: blocks, + } +} + +func (d *RegisteredModelDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if d.Client == nil { + d.Client = pluginfwcommon.ConfigureDataSource(req, resp) + } +} + +func (d *RegisteredModelDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + w, diags := d.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var registeredModel RegisteredModelData + diags = req.Config.Get(ctx, ®isteredModel) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + modelFullName := registeredModel.FullName.ValueString() + modelInfoSdk, err := w.RegisteredModels.Get(ctx, catalog.GetRegisteredModelRequest{ + FullName: modelFullName, + IncludeAliases: registeredModel.IncludeAliases.ValueBool(), + IncludeBrowse: registeredModel.IncludeBrowse.ValueBool(), + }) + if err != nil { + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + } + resp.Diagnostics.AddError(fmt.Sprintf("failed to get registered model %s", modelFullName), err.Error()) + return + } + var modelInfo catalog_tf.RegisteredModelInfo + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, modelInfoSdk, &modelInfo)...) + if resp.Diagnostics.HasError() { + return + } + if modelInfo.Aliases == nil { + modelInfo.Aliases = []catalog_tf.RegisteredModelAlias{} + } + registeredModel.ModelInfo = append(registeredModel.ModelInfo, modelInfo) + resp.Diagnostics.Append(resp.State.Set(ctx, registeredModel)...) +} From 45ff6680d275deea9a0dff76c0ef2625fa29f2a7 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Sat, 12 Oct 2024 06:23:24 -0400 Subject: [PATCH 45/99] [Doc] Reformat code examples in documentation (#4081) ## Changes ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/data-sources/mlflow_models.md | 6 +++--- docs/guides/azure-authenticate-with-oidc.md | 16 ++++++++-------- docs/resources/budget.md | 20 ++++++++++---------- docs/resources/sql_table.md | 4 ++-- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/docs/data-sources/mlflow_models.md b/docs/data-sources/mlflow_models.md index b8b67c9096..d62c640bf9 100644 --- a/docs/data-sources/mlflow_models.md +++ b/docs/data-sources/mlflow_models.md @@ -22,14 +22,14 @@ data "databricks_mlflow_models" "this" {} check "model_list_not_empty" { assert { - condition = length(data.databricks_mlflow_models.this.names) != 0 + condition = length(data.databricks_mlflow_models.this.names) != 0 error_message = "Model list is empty." } } check "model_list_contains_model" { assert { - condition = contains(data.databricks_mlflow_models.this.names, "model_1") + condition = contains(data.databricks_mlflow_models.this.names, "model_1") error_message = "model_1 is missing in model list." } } @@ -39,4 +39,4 @@ check "model_list_contains_model" { This data source exports the following attributes: -* `names` - List of names of [databricks_mlflow_model](./mlflow_model.md) \ No newline at end of file +* `names` - List of names of [databricks_mlflow_model](./mlflow_model.md) diff --git a/docs/guides/azure-authenticate-with-oidc.md b/docs/guides/azure-authenticate-with-oidc.md index 85015a77cb..1aad2f6f03 100644 --- a/docs/guides/azure-authenticate-with-oidc.md +++ b/docs/guides/azure-authenticate-with-oidc.md @@ -45,9 +45,9 @@ Finally, grant the service principal access to the workspace. ```hcl resource "azurerm_role_assignment" "example" { - scope = "/subscriptions//resourceGroups//providers/Microsoft.Databricks/workspaces/" - role_definition_name = "Contributor" - principal_id = azuread_service_principal.example.id + scope = "/subscriptions//resourceGroups//providers/Microsoft.Databricks/workspaces/" + role_definition_name = "Contributor" + principal_id = azuread_service_principal.example.id } ``` @@ -59,7 +59,7 @@ In your Terraform configuration, configure the Databricks provider to use the se provider "databricks" { azure_client_id = "" azure_tenant_id = "" - host = "https://" + host = "https://" } ``` @@ -140,9 +140,9 @@ Finally, grant the service principal access to the workspace. ```hcl resource "azurerm_role_assignment" "example" { - scope = "/subscriptions//resourceGroups//providers/Microsoft.Databricks/workspaces/" - role_definition_name = "Contributor" - principal_id = azuread_service_principal.example.id + scope = "/subscriptions//resourceGroups//providers/Microsoft.Databricks/workspaces/" + role_definition_name = "Contributor" + principal_id = azuread_service_principal.example.id } ``` @@ -156,7 +156,7 @@ In your Terraform configuration, configure the Databricks provider to use the se provider "databricks" { azure_client_id = "" azure_tenant_id = "" - host = "https://" + host = "https://" } ``` diff --git a/docs/resources/budget.md b/docs/resources/budget.md index 31378d4254..37ce71c75d 100644 --- a/docs/resources/budget.md +++ b/docs/resources/budget.md @@ -16,10 +16,10 @@ resource "databricks_budget" "this" { display_name = "databricks-workspace-budget" alert_configurations { - time_period = "MONTH" - trigger_type = "CUMULATIVE_SPENDING_EXCEEDED" - quantity_type = "LIST_PRICE_DOLLARS_USD" - quantity_threshold = "840" + time_period = "MONTH" + trigger_type = "CUMULATIVE_SPENDING_EXCEEDED" + quantity_type = "LIST_PRICE_DOLLARS_USD" + quantity_threshold = "840" action_configurations { action_type = "EMAIL_NOTIFICATION" @@ -30,24 +30,24 @@ resource "databricks_budget" "this" { filter { workspace_id { operator = "IN" - values = [ + values = [ 1234567890098765 ] } - + tags { - key = "Team" + key = "Team" value { operator = "IN" - values = ["Data Science"] + values = ["Data Science"] } } tags { - key = "Environment" + key = "Environment" value { operator = "IN" - values = ["Development"] + values = ["Development"] } } } diff --git a/docs/resources/sql_table.md b/docs/resources/sql_table.md index 67483248d6..87b2d11533 100644 --- a/docs/resources/sql_table.md +++ b/docs/resources/sql_table.md @@ -136,8 +136,8 @@ resource "databricks_sql_table" "thing" { data_source_format = "DELTA" storage_location = "" column { - name = "id" - type = "bigint" + name = "id" + type = "bigint" identity = "default" } column { From ef27ec511d51906b1535cb2534e2454e87477eb6 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Tue, 15 Oct 2024 03:36:20 -0400 Subject: [PATCH 46/99] [Internal] Fix acceptance test for `databricks_registered_model` data source (#4105) ## Changes Remove unsupported attribute `include_model_versions` ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [ ] using Go SDK --- internal/acceptance/registered_model_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/acceptance/registered_model_test.go b/internal/acceptance/registered_model_test.go index b35bd288db..adbaf9b11e 100644 --- a/internal/acceptance/registered_model_test.go +++ b/internal/acceptance/registered_model_test.go @@ -45,7 +45,6 @@ func TestUcAccRegisteredModel(t *testing.T) { } data "databricks_registered_model" "model" { full_name = databricks_registered_model.model.id - include_model_versions = true } output "model" { value = data.databricks_registered_model.model From 1f59bfdad60cb5e8c16ad24d054bc15f3cb309d9 Mon Sep 17 00:00:00 2001 From: Omer Lachish <289488+rauchy@users.noreply.github.com> Date: Tue, 15 Oct 2024 12:21:14 +0200 Subject: [PATCH 47/99] [Internal] Generate Effective Fields (#4057) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR introduces changes to the TFSDK generator to support “effective fields” for server-provided values. When fields are marked with the new proto annotation `ServerProposedIfEmpty` (name pending final decision, until then the `computed` annotation is used to indicate these fields, while specific fields are excluded), the generator will create an additional computed field (e.g., `Effective`) and add two sync functions to ensure proper handling of user-provided and server-determined values. ### Generated Struct: ``` type ResourceModel struct { OriginalField types.String `tfsdk:"original_field" tf:"optional"` EffectiveField types.String `tfsdk:"effective_field" tf:"computed"` } ``` ### Sync Functions: ``` func (newState *ResourceModel) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResourceModel) { newState.EffectiveField = newState.OriginalField newState.OriginalField = plan.OriginalField } func (newState *ResourceModel) SyncEffectiveFieldsDuringRead(existingState ResourceModel) { if existingState.EffectiveField.ValueString() == newState.OriginalField.ValueString() { newState.OriginalField = existingState.OriginalField } } ``` ## Changes ## Tests - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --------- Co-authored-by: Omer Lachish --- .codegen/model.go.tmpl | 58 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/.codegen/model.go.tmpl b/.codegen/model.go.tmpl index 40eae0d683..7d20bea4e7 100644 --- a/.codegen/model.go.tmpl +++ b/.codegen/model.go.tmpl @@ -18,20 +18,74 @@ import ( "github.com/databricks/databricks-sdk-go/marshal" "github.com/hashicorp/terraform-plugin-framework/types" ) +{{- $excluded := dict "ShareInfo" (list "CreatedAt" "CreatedBy" "UpdatedAt" "UpdatedBy") + "SharedDataObject" (list "AddedAt" "AddedBy" "Status") -}} {{range .Types}} {{- if or .Fields .IsEmpty}} {{.Comment "// " 80}} type {{.PascalName}} struct { + {{- $excluded := getOrDefault $excluded .PascalName (list) -}} {{- range .Fields}} {{.Comment " // " 80}} - {{.PascalName}} {{template "type" .Entity}} `{{template "field-tag" . }}`{{end}} + {{- $data := dict "field" . "excluded" $excluded }} + {{template "field" $data}}{{if and .Entity.IsComputed (not (in $excluded .PascalName))}}{{ $data := dict "field" . "excluded" $excluded "effective" true }}{{printf "\n"}}{{template "field" $data}}{{end}}{{end}} +} + +func (newState *{{.PascalName}}) SyncEffectiveFieldsDuringCreateOrUpdate(plan {{.PascalName}}) { + {{- range .Fields -}} + {{- if and .Entity.IsComputed (or .Entity.IsString .Entity.IsBool .Entity.IsInt64 .Entity.IsFloat64 .Entity.IsInt .Entity.Enum) -}} + {{- if not (in $excluded .PascalName)}} + newState.Effective{{.PascalName}} = newState.{{.PascalName}} + newState.{{.PascalName}} = plan.{{.PascalName}} + {{- end}} + {{- end}} + {{- end}} +} + +func (newState *{{.PascalName}}) SyncEffectiveFieldsDuringRead(existingState {{.PascalName}}) { + {{- range .Fields -}} + {{- if and .Entity.IsComputed (or .Entity.IsString .Entity.IsBool .Entity.IsInt64 .Entity.IsFloat64 .Entity.IsInt .Entity.Enum) -}} + {{- if not (in $excluded .PascalName) -}} + {{- $type := "" -}} + {{- if .Entity.IsString}}{{$type = "String"}}{{end}} + {{- if .Entity.IsBool}}{{$type = "Bool"}}{{end}} + {{- if .Entity.IsInt64}}{{$type = "Int64"}}{{end}} + {{- if .Entity.IsFloat64}}{{$type = "Float64"}}{{end}} + {{- if .Entity.IsInt}}{{$type = "Int64"}}{{end}} + {{- if .Entity.Enum}}{{$type = "String"}}{{end}} + if existingState.Effective{{.PascalName}}.Value{{$type}}() == newState.{{.PascalName}}.Value{{$type}}() { + newState.{{.PascalName}} = existingState.{{.PascalName}} + } + {{- end}} + {{- end}} + {{- end}} } {{end}} {{end}} +{{- define "field" -}} +{{if .effective}}Effective{{end}}{{.field.PascalName}} {{template "type" .field.Entity}} `{{template "field-tag" . }}` +{{- end -}} + {{- define "field-tag" -}} - {{if .IsJson}}tfsdk:"{{if and (ne .Entity.Terraform nil) (ne .Entity.Terraform.Alias "") }}{{.Entity.Terraform.Alias}}{{else}}{{.Name}}{{end}}" tf:"{{- $first := true -}}{{- if not .Required -}}{{- if not $first -}},{{end}}optional{{- $first = false -}}{{- end -}}{{- if .Entity.IsObject -}}{{- if not $first -}},{{end}}object{{- $first = false -}}{{- end -}}"{{else}}tfsdk:"-"{{end -}} + {{- $annotations := "" -}} + {{- if in .excluded .field.PascalName -}} + {{- $annotations = (printf "%scomputed,optional," $annotations) -}} + {{- else if .effective -}} + {{- $annotations = (printf "%scomputed,optional," $annotations) -}} + {{- else -}} + {{- if not .field.Required -}} + {{- $annotations = (printf "%soptional," $annotations) -}} + {{- end -}} + {{- if .field.Entity.IsObject -}} + {{- $annotations = (printf "%sobject," $annotations) -}} + {{- end -}} + {{- end -}} + {{- if gt (len $annotations) 0 -}} + {{- $annotations = (printf "%s" (trimSuffix "," $annotations)) -}} + {{- end -}} + {{if .field.IsJson}}tfsdk:"{{if and (ne .field.Entity.Terraform nil) (ne .field.Entity.Terraform.Alias "") }}{{.field.Entity.Terraform.Alias}}{{else}}{{if .effective}}effective_{{end}}{{.field.Name}}{{end}}" tf:"{{$annotations}}"{{else}}tfsdk:"-"{{end -}} {{- end -}} {{- define "type" -}} From a84e3c7378887b9fbb1a97b1b912a57abd17c84d Mon Sep 17 00:00:00 2001 From: Omer Lachish <289488+rauchy@users.noreply.github.com> Date: Tue, 15 Oct 2024 15:59:11 +0200 Subject: [PATCH 48/99] [Internal] Generate Effective Fields (#4112) ## Changes This pull request creates the most recent effective fields from OpenAPI. It's a move towards migrating the share resource to the plugin framework. ## Tests - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --------- Co-authored-by: Omer Lachish --- .codegen/_openapi_sha | 2 +- go.mod | 4 +- go.sum | 4 +- internal/service/apps_tf/model.go | 302 ++++- internal/service/billing_tf/model.go | 222 ++++ internal/service/catalog_tf/model.go | 1337 ++++++++++++++++++++- internal/service/compute_tf/model.go | 1074 +++++++++++++++++ internal/service/dashboards_tf/model.go | 692 ++++++++++- internal/service/files_tf/model.go | 216 ++++ internal/service/iam_tf/model.go | 408 +++++++ internal/service/jobs_tf/model.go | 819 +++++++++++++ internal/service/marketplace_tf/model.go | 678 +++++++++++ internal/service/ml_tf/model.go | 966 +++++++++++++++ internal/service/oauth2_tf/model.go | 192 +++ internal/service/pipelines_tf/model.go | 372 +++++- internal/service/provisioning_tf/model.go | 384 +++++- internal/service/serving_tf/model.go | 444 +++++++ internal/service/settings_tf/model.go | 955 ++++++++++++++- internal/service/sharing_tf/model.go | 428 ++++++- internal/service/sql_tf/model.go | 896 ++++++++++++++ internal/service/vectorsearch_tf/model.go | 258 ++++ internal/service/workspace_tf/model.go | 462 +++++++ 22 files changed, 11002 insertions(+), 113 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 303c785539..2d9cb6d86d 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -0c86ea6dbd9a730c24ff0d4e509603e476955ac5 \ No newline at end of file +cf9c61453990df0f9453670f2fe68e1b128647a2 \ No newline at end of file diff --git a/go.mod b/go.mod index 1fc08e53ba..4e97cc0d23 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,13 @@ module github.com/databricks/terraform-provider-databricks go 1.22 require ( - github.com/databricks/databricks-sdk-go v0.48.0 + github.com/databricks/databricks-sdk-go v0.49.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl/v2 v2.22.0 github.com/hashicorp/terraform-plugin-framework v1.11.0 + github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 github.com/hashicorp/terraform-plugin-go v0.23.0 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-mux v0.16.0 @@ -52,7 +53,6 @@ require ( github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect github.com/hashicorp/terraform-json v0.22.1 // indirect - github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect diff --git a/go.sum b/go.sum index c323c71e4c..e95a0ffe39 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.48.0 h1:46KtsnRo+FGhC3izUXbpL0PXBNomvsdignYDhJZlm9s= -github.com/databricks/databricks-sdk-go v0.48.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.49.0 h1:VBTeZZMLIuBSM4kxOCfUcW9z4FUQZY2QeNRD5qm9FUQ= +github.com/databricks/databricks-sdk-go v0.49.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index 0b4c6101e2..2c5594a1ec 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -15,16 +15,19 @@ import ( ) type App struct { - // The active deployment of the app. + // The active deployment of the app. A deployment is considered active when + // it has been deployed to the app compute. ActiveDeployment []AppDeployment `tfsdk:"active_deployment" tf:"optional,object"` AppStatus []ApplicationStatus `tfsdk:"app_status" tf:"optional,object"` ComputeStatus []ComputeStatus `tfsdk:"compute_status" tf:"optional,object"` // The creation time of the app. Formatted timestamp in ISO 6801. - CreateTime types.String `tfsdk:"create_time" tf:"optional"` + CreateTime types.String `tfsdk:"create_time" tf:"optional"` + EffectiveCreateTime types.String `tfsdk:"effective_create_time" tf:"computed,optional"` // The email of the user that created the app. - Creator types.String `tfsdk:"creator" tf:"optional"` + Creator types.String `tfsdk:"creator" tf:"optional"` + EffectiveCreator types.String `tfsdk:"effective_creator" tf:"computed,optional"` // The default workspace file system path of the source code from which app // deployment are created. This field tracks the workspace source code path // of the last active deployment. @@ -34,20 +37,67 @@ type App struct { // The name of the app. The name must contain only lowercase alphanumeric // characters and hyphens. It must be unique within the workspace. Name types.String `tfsdk:"name" tf:""` - // The pending deployment of the app. + // The pending deployment of the app. A deployment is considered pending + // when it is being prepared for deployment to the app compute. PendingDeployment []AppDeployment `tfsdk:"pending_deployment" tf:"optional,object"` // Resources for the app. Resources []AppResource `tfsdk:"resources" tf:"optional"` - ServicePrincipalId types.Int64 `tfsdk:"service_principal_id" tf:"optional"` + ServicePrincipalId types.Int64 `tfsdk:"service_principal_id" tf:"optional"` + EffectiveServicePrincipalId types.Int64 `tfsdk:"effective_service_principal_id" tf:"computed,optional"` - ServicePrincipalName types.String `tfsdk:"service_principal_name" tf:"optional"` + ServicePrincipalName types.String `tfsdk:"service_principal_name" tf:"optional"` + EffectiveServicePrincipalName types.String `tfsdk:"effective_service_principal_name" tf:"computed,optional"` // The update time of the app. Formatted timestamp in ISO 6801. - UpdateTime types.String `tfsdk:"update_time" tf:"optional"` + UpdateTime types.String `tfsdk:"update_time" tf:"optional"` + EffectiveUpdateTime types.String `tfsdk:"effective_update_time" tf:"computed,optional"` // The email of the user that last updated the app. - Updater types.String `tfsdk:"updater" tf:"optional"` + Updater types.String `tfsdk:"updater" tf:"optional"` + EffectiveUpdater types.String `tfsdk:"effective_updater" tf:"computed,optional"` // The URL of the app once it is deployed. - Url types.String `tfsdk:"url" tf:"optional"` + Url types.String `tfsdk:"url" tf:"optional"` + EffectiveUrl types.String `tfsdk:"effective_url" tf:"computed,optional"` +} + +func (newState *App) SyncEffectiveFieldsDuringCreateOrUpdate(plan App) { + newState.EffectiveCreateTime = newState.CreateTime + newState.CreateTime = plan.CreateTime + newState.EffectiveCreator = newState.Creator + newState.Creator = plan.Creator + newState.EffectiveServicePrincipalId = newState.ServicePrincipalId + newState.ServicePrincipalId = plan.ServicePrincipalId + newState.EffectiveServicePrincipalName = newState.ServicePrincipalName + newState.ServicePrincipalName = plan.ServicePrincipalName + newState.EffectiveUpdateTime = newState.UpdateTime + newState.UpdateTime = plan.UpdateTime + newState.EffectiveUpdater = newState.Updater + newState.Updater = plan.Updater + newState.EffectiveUrl = newState.Url + newState.Url = plan.Url +} + +func (newState *App) SyncEffectiveFieldsDuringRead(existingState App) { + if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { + newState.CreateTime = existingState.CreateTime + } + if existingState.EffectiveCreator.ValueString() == newState.Creator.ValueString() { + newState.Creator = existingState.Creator + } + if existingState.EffectiveServicePrincipalId.ValueInt64() == newState.ServicePrincipalId.ValueInt64() { + newState.ServicePrincipalId = existingState.ServicePrincipalId + } + if existingState.EffectiveServicePrincipalName.ValueString() == newState.ServicePrincipalName.ValueString() { + newState.ServicePrincipalName = existingState.ServicePrincipalName + } + if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { + newState.UpdateTime = existingState.UpdateTime + } + if existingState.EffectiveUpdater.ValueString() == newState.Updater.ValueString() { + newState.Updater = existingState.Updater + } + if existingState.EffectiveUrl.ValueString() == newState.Url.ValueString() { + newState.Url = existingState.Url + } } type AppAccessControlRequest struct { @@ -61,6 +111,12 @@ type AppAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *AppAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppAccessControlRequest) { +} + +func (newState *AppAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState AppAccessControlRequest) { +} + type AppAccessControlResponse struct { // All permissions. AllPermissions []AppPermission `tfsdk:"all_permissions" tf:"optional"` @@ -74,11 +130,19 @@ type AppAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *AppAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppAccessControlResponse) { +} + +func (newState *AppAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState AppAccessControlResponse) { +} + type AppDeployment struct { // The creation time of the deployment. Formatted timestamp in ISO 6801. - CreateTime types.String `tfsdk:"create_time" tf:"optional"` + CreateTime types.String `tfsdk:"create_time" tf:"optional"` + EffectiveCreateTime types.String `tfsdk:"effective_create_time" tf:"computed,optional"` // The email of the user creates the deployment. - Creator types.String `tfsdk:"creator" tf:"optional"` + Creator types.String `tfsdk:"creator" tf:"optional"` + EffectiveCreator types.String `tfsdk:"effective_creator" tf:"computed,optional"` // The deployment artifacts for an app. DeploymentArtifacts []AppDeploymentArtifacts `tfsdk:"deployment_artifacts" tf:"optional,object"` // The unique id of the deployment. @@ -96,7 +160,29 @@ type AppDeployment struct { // Status and status message of the deployment Status []AppDeploymentStatus `tfsdk:"status" tf:"optional,object"` // The update time of the deployment. Formatted timestamp in ISO 6801. - UpdateTime types.String `tfsdk:"update_time" tf:"optional"` + UpdateTime types.String `tfsdk:"update_time" tf:"optional"` + EffectiveUpdateTime types.String `tfsdk:"effective_update_time" tf:"computed,optional"` +} + +func (newState *AppDeployment) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppDeployment) { + newState.EffectiveCreateTime = newState.CreateTime + newState.CreateTime = plan.CreateTime + newState.EffectiveCreator = newState.Creator + newState.Creator = plan.Creator + newState.EffectiveUpdateTime = newState.UpdateTime + newState.UpdateTime = plan.UpdateTime +} + +func (newState *AppDeployment) SyncEffectiveFieldsDuringRead(existingState AppDeployment) { + if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { + newState.CreateTime = existingState.CreateTime + } + if existingState.EffectiveCreator.ValueString() == newState.Creator.ValueString() { + newState.Creator = existingState.Creator + } + if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { + newState.UpdateTime = existingState.UpdateTime + } } type AppDeploymentArtifacts struct { @@ -105,13 +191,31 @@ type AppDeploymentArtifacts struct { SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` } +func (newState *AppDeploymentArtifacts) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppDeploymentArtifacts) { +} + +func (newState *AppDeploymentArtifacts) SyncEffectiveFieldsDuringRead(existingState AppDeploymentArtifacts) { +} + type AppDeploymentStatus struct { // Message corresponding with the deployment state. - Message types.String `tfsdk:"message" tf:"optional"` + Message types.String `tfsdk:"message" tf:"optional"` + EffectiveMessage types.String `tfsdk:"effective_message" tf:"computed,optional"` // State of the deployment. State types.String `tfsdk:"state" tf:"optional"` } +func (newState *AppDeploymentStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppDeploymentStatus) { + newState.EffectiveMessage = newState.Message + newState.Message = plan.Message +} + +func (newState *AppDeploymentStatus) SyncEffectiveFieldsDuringRead(existingState AppDeploymentStatus) { + if existingState.EffectiveMessage.ValueString() == newState.Message.ValueString() { + newState.Message = existingState.Message + } +} + type AppPermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -120,6 +224,12 @@ type AppPermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *AppPermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppPermission) { +} + +func (newState *AppPermission) SyncEffectiveFieldsDuringRead(existingState AppPermission) { +} + type AppPermissions struct { AccessControlList []AppAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -128,18 +238,36 @@ type AppPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *AppPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppPermissions) { +} + +func (newState *AppPermissions) SyncEffectiveFieldsDuringRead(existingState AppPermissions) { +} + type AppPermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *AppPermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppPermissionsDescription) { +} + +func (newState *AppPermissionsDescription) SyncEffectiveFieldsDuringRead(existingState AppPermissionsDescription) { +} + type AppPermissionsRequest struct { AccessControlList []AppAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The app for which to get or manage permissions. AppName types.String `tfsdk:"-"` } +func (newState *AppPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppPermissionsRequest) { +} + +func (newState *AppPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState AppPermissionsRequest) { +} + type AppResource struct { // Description of the App Resource. Description types.String `tfsdk:"description" tf:"optional"` @@ -155,6 +283,12 @@ type AppResource struct { SqlWarehouse []AppResourceSqlWarehouse `tfsdk:"sql_warehouse" tf:"optional,object"` } +func (newState *AppResource) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppResource) { +} + +func (newState *AppResource) SyncEffectiveFieldsDuringRead(existingState AppResource) { +} + type AppResourceJob struct { // Id of the job to grant permission on. Id types.String `tfsdk:"id" tf:""` @@ -163,6 +297,12 @@ type AppResourceJob struct { Permission types.String `tfsdk:"permission" tf:""` } +func (newState *AppResourceJob) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppResourceJob) { +} + +func (newState *AppResourceJob) SyncEffectiveFieldsDuringRead(existingState AppResourceJob) { +} + type AppResourceSecret struct { // Key of the secret to grant permission on. Key types.String `tfsdk:"key" tf:""` @@ -173,6 +313,12 @@ type AppResourceSecret struct { Scope types.String `tfsdk:"scope" tf:""` } +func (newState *AppResourceSecret) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppResourceSecret) { +} + +func (newState *AppResourceSecret) SyncEffectiveFieldsDuringRead(existingState AppResourceSecret) { +} + type AppResourceServingEndpoint struct { // Name of the serving endpoint to grant permission on. Name types.String `tfsdk:"name" tf:""` @@ -181,6 +327,12 @@ type AppResourceServingEndpoint struct { Permission types.String `tfsdk:"permission" tf:""` } +func (newState *AppResourceServingEndpoint) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppResourceServingEndpoint) { +} + +func (newState *AppResourceServingEndpoint) SyncEffectiveFieldsDuringRead(existingState AppResourceServingEndpoint) { +} + type AppResourceSqlWarehouse struct { // Id of the SQL warehouse to grant permission on. Id types.String `tfsdk:"id" tf:""` @@ -189,20 +341,50 @@ type AppResourceSqlWarehouse struct { Permission types.String `tfsdk:"permission" tf:""` } +func (newState *AppResourceSqlWarehouse) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppResourceSqlWarehouse) { +} + +func (newState *AppResourceSqlWarehouse) SyncEffectiveFieldsDuringRead(existingState AppResourceSqlWarehouse) { +} + type ApplicationStatus struct { // Application status message - Message types.String `tfsdk:"message" tf:"optional"` + Message types.String `tfsdk:"message" tf:"optional"` + EffectiveMessage types.String `tfsdk:"effective_message" tf:"computed,optional"` // State of the application. State types.String `tfsdk:"state" tf:"optional"` } +func (newState *ApplicationStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan ApplicationStatus) { + newState.EffectiveMessage = newState.Message + newState.Message = plan.Message +} + +func (newState *ApplicationStatus) SyncEffectiveFieldsDuringRead(existingState ApplicationStatus) { + if existingState.EffectiveMessage.ValueString() == newState.Message.ValueString() { + newState.Message = existingState.Message + } +} + type ComputeStatus struct { // Compute status message - Message types.String `tfsdk:"message" tf:"optional"` + Message types.String `tfsdk:"message" tf:"optional"` + EffectiveMessage types.String `tfsdk:"effective_message" tf:"computed,optional"` // State of the app compute. State types.String `tfsdk:"state" tf:"optional"` } +func (newState *ComputeStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan ComputeStatus) { + newState.EffectiveMessage = newState.Message + newState.Message = plan.Message +} + +func (newState *ComputeStatus) SyncEffectiveFieldsDuringRead(existingState ComputeStatus) { + if existingState.EffectiveMessage.ValueString() == newState.Message.ValueString() { + newState.Message = existingState.Message + } +} + type CreateAppDeploymentRequest struct { // The name of the app. AppName types.String `tfsdk:"-"` @@ -220,6 +402,12 @@ type CreateAppDeploymentRequest struct { SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` } +func (newState *CreateAppDeploymentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAppDeploymentRequest) { +} + +func (newState *CreateAppDeploymentRequest) SyncEffectiveFieldsDuringRead(existingState CreateAppDeploymentRequest) { +} + type CreateAppRequest struct { // The description of the app. Description types.String `tfsdk:"description" tf:"optional"` @@ -230,12 +418,24 @@ type CreateAppRequest struct { Resources []AppResource `tfsdk:"resources" tf:"optional"` } +func (newState *CreateAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAppRequest) { +} + +func (newState *CreateAppRequest) SyncEffectiveFieldsDuringRead(existingState CreateAppRequest) { +} + // Delete an app type DeleteAppRequest struct { // The name of the app. Name types.String `tfsdk:"-"` } +func (newState *DeleteAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAppRequest) { +} + +func (newState *DeleteAppRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAppRequest) { +} + // Get an app deployment type GetAppDeploymentRequest struct { // The name of the app. @@ -244,29 +444,59 @@ type GetAppDeploymentRequest struct { DeploymentId types.String `tfsdk:"-"` } +func (newState *GetAppDeploymentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAppDeploymentRequest) { +} + +func (newState *GetAppDeploymentRequest) SyncEffectiveFieldsDuringRead(existingState GetAppDeploymentRequest) { +} + // Get app permission levels type GetAppPermissionLevelsRequest struct { // The app for which to get or manage permissions. AppName types.String `tfsdk:"-"` } +func (newState *GetAppPermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAppPermissionLevelsRequest) { +} + +func (newState *GetAppPermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetAppPermissionLevelsRequest) { +} + type GetAppPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []AppPermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetAppPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAppPermissionLevelsResponse) { +} + +func (newState *GetAppPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetAppPermissionLevelsResponse) { +} + // Get app permissions type GetAppPermissionsRequest struct { // The app for which to get or manage permissions. AppName types.String `tfsdk:"-"` } +func (newState *GetAppPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAppPermissionsRequest) { +} + +func (newState *GetAppPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState GetAppPermissionsRequest) { +} + // Get an app type GetAppRequest struct { // The name of the app. Name types.String `tfsdk:"-"` } +func (newState *GetAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAppRequest) { +} + +func (newState *GetAppRequest) SyncEffectiveFieldsDuringRead(existingState GetAppRequest) { +} + // List app deployments type ListAppDeploymentsRequest struct { // The name of the app. @@ -278,6 +508,12 @@ type ListAppDeploymentsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListAppDeploymentsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAppDeploymentsRequest) { +} + +func (newState *ListAppDeploymentsRequest) SyncEffectiveFieldsDuringRead(existingState ListAppDeploymentsRequest) { +} + type ListAppDeploymentsResponse struct { // Deployment history of the app. AppDeployments []AppDeployment `tfsdk:"app_deployments" tf:"optional"` @@ -285,6 +521,12 @@ type ListAppDeploymentsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListAppDeploymentsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAppDeploymentsResponse) { +} + +func (newState *ListAppDeploymentsResponse) SyncEffectiveFieldsDuringRead(existingState ListAppDeploymentsResponse) { +} + // List apps type ListAppsRequest struct { // Upper bound for items returned. @@ -294,22 +536,46 @@ type ListAppsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListAppsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAppsRequest) { +} + +func (newState *ListAppsRequest) SyncEffectiveFieldsDuringRead(existingState ListAppsRequest) { +} + type ListAppsResponse struct { Apps []App `tfsdk:"apps" tf:"optional"` // Pagination token to request the next page of apps. NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListAppsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAppsResponse) { +} + +func (newState *ListAppsResponse) SyncEffectiveFieldsDuringRead(existingState ListAppsResponse) { +} + type StartAppRequest struct { // The name of the app. Name types.String `tfsdk:"-"` } +func (newState *StartAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan StartAppRequest) { +} + +func (newState *StartAppRequest) SyncEffectiveFieldsDuringRead(existingState StartAppRequest) { +} + type StopAppRequest struct { // The name of the app. Name types.String `tfsdk:"-"` } +func (newState *StopAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan StopAppRequest) { +} + +func (newState *StopAppRequest) SyncEffectiveFieldsDuringRead(existingState StopAppRequest) { +} + type UpdateAppRequest struct { // The description of the app. Description types.String `tfsdk:"description" tf:"optional"` @@ -319,3 +585,9 @@ type UpdateAppRequest struct { // Resources for the app. Resources []AppResource `tfsdk:"resources" tf:"optional"` } + +func (newState *UpdateAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAppRequest) { +} + +func (newState *UpdateAppRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAppRequest) { +} diff --git a/internal/service/billing_tf/model.go b/internal/service/billing_tf/model.go index f2a63fde2b..4aecb90bb0 100755 --- a/internal/service/billing_tf/model.go +++ b/internal/service/billing_tf/model.go @@ -25,6 +25,12 @@ type ActionConfiguration struct { Target types.String `tfsdk:"target" tf:"optional"` } +func (newState *ActionConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(plan ActionConfiguration) { +} + +func (newState *ActionConfiguration) SyncEffectiveFieldsDuringRead(existingState ActionConfiguration) { +} + type AlertConfiguration struct { // Configured actions for this alert. These define what happens when an // alert enters a triggered state. @@ -44,6 +50,12 @@ type AlertConfiguration struct { TriggerType types.String `tfsdk:"trigger_type" tf:"optional"` } +func (newState *AlertConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(plan AlertConfiguration) { +} + +func (newState *AlertConfiguration) SyncEffectiveFieldsDuringRead(existingState AlertConfiguration) { +} + type BudgetConfiguration struct { // Databricks account ID. AccountId types.String `tfsdk:"account_id" tf:"optional"` @@ -65,6 +77,12 @@ type BudgetConfiguration struct { UpdateTime types.Int64 `tfsdk:"update_time" tf:"optional"` } +func (newState *BudgetConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(plan BudgetConfiguration) { +} + +func (newState *BudgetConfiguration) SyncEffectiveFieldsDuringRead(existingState BudgetConfiguration) { +} + type BudgetConfigurationFilter struct { // A list of tag keys and values that will limit the budget to usage that // includes those specific custom tags. Tags are case-sensitive and should @@ -74,24 +92,48 @@ type BudgetConfigurationFilter struct { WorkspaceId []BudgetConfigurationFilterWorkspaceIdClause `tfsdk:"workspace_id" tf:"optional,object"` } +func (newState *BudgetConfigurationFilter) SyncEffectiveFieldsDuringCreateOrUpdate(plan BudgetConfigurationFilter) { +} + +func (newState *BudgetConfigurationFilter) SyncEffectiveFieldsDuringRead(existingState BudgetConfigurationFilter) { +} + type BudgetConfigurationFilterClause struct { Operator types.String `tfsdk:"operator" tf:"optional"` Values []types.String `tfsdk:"values" tf:"optional"` } +func (newState *BudgetConfigurationFilterClause) SyncEffectiveFieldsDuringCreateOrUpdate(plan BudgetConfigurationFilterClause) { +} + +func (newState *BudgetConfigurationFilterClause) SyncEffectiveFieldsDuringRead(existingState BudgetConfigurationFilterClause) { +} + type BudgetConfigurationFilterTagClause struct { Key types.String `tfsdk:"key" tf:"optional"` Value []BudgetConfigurationFilterClause `tfsdk:"value" tf:"optional,object"` } +func (newState *BudgetConfigurationFilterTagClause) SyncEffectiveFieldsDuringCreateOrUpdate(plan BudgetConfigurationFilterTagClause) { +} + +func (newState *BudgetConfigurationFilterTagClause) SyncEffectiveFieldsDuringRead(existingState BudgetConfigurationFilterTagClause) { +} + type BudgetConfigurationFilterWorkspaceIdClause struct { Operator types.String `tfsdk:"operator" tf:"optional"` Values []types.Int64 `tfsdk:"values" tf:"optional"` } +func (newState *BudgetConfigurationFilterWorkspaceIdClause) SyncEffectiveFieldsDuringCreateOrUpdate(plan BudgetConfigurationFilterWorkspaceIdClause) { +} + +func (newState *BudgetConfigurationFilterWorkspaceIdClause) SyncEffectiveFieldsDuringRead(existingState BudgetConfigurationFilterWorkspaceIdClause) { +} + type CreateBillingUsageDashboardRequest struct { // Workspace level usage dashboard shows usage data for the specified // workspace ID. Global level usage dashboard shows usage data for all @@ -102,11 +144,23 @@ type CreateBillingUsageDashboardRequest struct { WorkspaceId types.Int64 `tfsdk:"workspace_id" tf:"optional"` } +func (newState *CreateBillingUsageDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateBillingUsageDashboardRequest) { +} + +func (newState *CreateBillingUsageDashboardRequest) SyncEffectiveFieldsDuringRead(existingState CreateBillingUsageDashboardRequest) { +} + type CreateBillingUsageDashboardResponse struct { // The unique id of the usage dashboard. DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` } +func (newState *CreateBillingUsageDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateBillingUsageDashboardResponse) { +} + +func (newState *CreateBillingUsageDashboardResponse) SyncEffectiveFieldsDuringRead(existingState CreateBillingUsageDashboardResponse) { +} + type CreateBudgetConfigurationBudget struct { // Databricks account ID. AccountId types.String `tfsdk:"account_id" tf:"optional"` @@ -122,6 +176,12 @@ type CreateBudgetConfigurationBudget struct { Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional,object"` } +func (newState *CreateBudgetConfigurationBudget) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateBudgetConfigurationBudget) { +} + +func (newState *CreateBudgetConfigurationBudget) SyncEffectiveFieldsDuringRead(existingState CreateBudgetConfigurationBudget) { +} + type CreateBudgetConfigurationBudgetActionConfigurations struct { // The type of the action. ActionType types.String `tfsdk:"action_type" tf:"optional"` @@ -129,6 +189,12 @@ type CreateBudgetConfigurationBudgetActionConfigurations struct { Target types.String `tfsdk:"target" tf:"optional"` } +func (newState *CreateBudgetConfigurationBudgetActionConfigurations) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateBudgetConfigurationBudgetActionConfigurations) { +} + +func (newState *CreateBudgetConfigurationBudgetActionConfigurations) SyncEffectiveFieldsDuringRead(existingState CreateBudgetConfigurationBudgetActionConfigurations) { +} + type CreateBudgetConfigurationBudgetAlertConfigurations struct { // Configured actions for this alert. These define what happens when an // alert enters a triggered state. @@ -146,16 +212,34 @@ type CreateBudgetConfigurationBudgetAlertConfigurations struct { TriggerType types.String `tfsdk:"trigger_type" tf:"optional"` } +func (newState *CreateBudgetConfigurationBudgetAlertConfigurations) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateBudgetConfigurationBudgetAlertConfigurations) { +} + +func (newState *CreateBudgetConfigurationBudgetAlertConfigurations) SyncEffectiveFieldsDuringRead(existingState CreateBudgetConfigurationBudgetAlertConfigurations) { +} + type CreateBudgetConfigurationRequest struct { // Properties of the new budget configuration. Budget []CreateBudgetConfigurationBudget `tfsdk:"budget" tf:"object"` } +func (newState *CreateBudgetConfigurationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateBudgetConfigurationRequest) { +} + +func (newState *CreateBudgetConfigurationRequest) SyncEffectiveFieldsDuringRead(existingState CreateBudgetConfigurationRequest) { +} + type CreateBudgetConfigurationResponse struct { // The created budget configuration. Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional,object"` } +func (newState *CreateBudgetConfigurationResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateBudgetConfigurationResponse) { +} + +func (newState *CreateBudgetConfigurationResponse) SyncEffectiveFieldsDuringRead(existingState CreateBudgetConfigurationResponse) { +} + type CreateLogDeliveryConfigurationParams struct { // The optional human-readable name of the log delivery configuration. // Defaults to empty. @@ -228,15 +312,33 @@ type CreateLogDeliveryConfigurationParams struct { WorkspaceIdsFilter []types.Int64 `tfsdk:"workspace_ids_filter" tf:"optional"` } +func (newState *CreateLogDeliveryConfigurationParams) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateLogDeliveryConfigurationParams) { +} + +func (newState *CreateLogDeliveryConfigurationParams) SyncEffectiveFieldsDuringRead(existingState CreateLogDeliveryConfigurationParams) { +} + // Delete budget type DeleteBudgetConfigurationRequest struct { // The Databricks budget configuration ID. BudgetId types.String `tfsdk:"-"` } +func (newState *DeleteBudgetConfigurationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteBudgetConfigurationRequest) { +} + +func (newState *DeleteBudgetConfigurationRequest) SyncEffectiveFieldsDuringRead(existingState DeleteBudgetConfigurationRequest) { +} + type DeleteBudgetConfigurationResponse struct { } +func (newState *DeleteBudgetConfigurationResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteBudgetConfigurationResponse) { +} + +func (newState *DeleteBudgetConfigurationResponse) SyncEffectiveFieldsDuringRead(existingState DeleteBudgetConfigurationResponse) { +} + // Return billable usage logs type DownloadRequest struct { // Format: `YYYY-MM`. Last month to return billable usage logs for. This @@ -251,10 +353,22 @@ type DownloadRequest struct { StartMonth types.String `tfsdk:"-"` } +func (newState *DownloadRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DownloadRequest) { +} + +func (newState *DownloadRequest) SyncEffectiveFieldsDuringRead(existingState DownloadRequest) { +} + type DownloadResponse struct { Contents io.ReadCloser `tfsdk:"-"` } +func (newState *DownloadResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DownloadResponse) { +} + +func (newState *DownloadResponse) SyncEffectiveFieldsDuringRead(existingState DownloadResponse) { +} + // Get usage dashboard type GetBillingUsageDashboardRequest struct { // Workspace level usage dashboard shows usage data for the specified @@ -266,6 +380,12 @@ type GetBillingUsageDashboardRequest struct { WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *GetBillingUsageDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetBillingUsageDashboardRequest) { +} + +func (newState *GetBillingUsageDashboardRequest) SyncEffectiveFieldsDuringRead(existingState GetBillingUsageDashboardRequest) { +} + type GetBillingUsageDashboardResponse struct { // The unique id of the usage dashboard. DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` @@ -273,22 +393,46 @@ type GetBillingUsageDashboardResponse struct { DashboardUrl types.String `tfsdk:"dashboard_url" tf:"optional"` } +func (newState *GetBillingUsageDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetBillingUsageDashboardResponse) { +} + +func (newState *GetBillingUsageDashboardResponse) SyncEffectiveFieldsDuringRead(existingState GetBillingUsageDashboardResponse) { +} + // Get budget type GetBudgetConfigurationRequest struct { // The Databricks budget configuration ID. BudgetId types.String `tfsdk:"-"` } +func (newState *GetBudgetConfigurationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetBudgetConfigurationRequest) { +} + +func (newState *GetBudgetConfigurationRequest) SyncEffectiveFieldsDuringRead(existingState GetBudgetConfigurationRequest) { +} + type GetBudgetConfigurationResponse struct { Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional,object"` } +func (newState *GetBudgetConfigurationResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetBudgetConfigurationResponse) { +} + +func (newState *GetBudgetConfigurationResponse) SyncEffectiveFieldsDuringRead(existingState GetBudgetConfigurationResponse) { +} + // Get log delivery configuration type GetLogDeliveryRequest struct { // Databricks log delivery configuration ID LogDeliveryConfigurationId types.String `tfsdk:"-"` } +func (newState *GetLogDeliveryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetLogDeliveryRequest) { +} + +func (newState *GetLogDeliveryRequest) SyncEffectiveFieldsDuringRead(existingState GetLogDeliveryRequest) { +} + // Get all budgets type ListBudgetConfigurationsRequest struct { // A page token received from a previous get all budget configurations call. @@ -297,6 +441,12 @@ type ListBudgetConfigurationsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListBudgetConfigurationsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListBudgetConfigurationsRequest) { +} + +func (newState *ListBudgetConfigurationsRequest) SyncEffectiveFieldsDuringRead(existingState ListBudgetConfigurationsRequest) { +} + type ListBudgetConfigurationsResponse struct { Budgets []BudgetConfiguration `tfsdk:"budgets" tf:"optional"` // Token which can be sent as `page_token` to retrieve the next page of @@ -304,6 +454,12 @@ type ListBudgetConfigurationsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListBudgetConfigurationsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListBudgetConfigurationsResponse) { +} + +func (newState *ListBudgetConfigurationsResponse) SyncEffectiveFieldsDuringRead(existingState ListBudgetConfigurationsResponse) { +} + // Get all log delivery configurations type ListLogDeliveryRequest struct { // Filter by credential configuration ID. @@ -314,6 +470,12 @@ type ListLogDeliveryRequest struct { StorageConfigurationId types.String `tfsdk:"-"` } +func (newState *ListLogDeliveryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListLogDeliveryRequest) { +} + +func (newState *ListLogDeliveryRequest) SyncEffectiveFieldsDuringRead(existingState ListLogDeliveryRequest) { +} + type LogDeliveryConfiguration struct { // The Databricks account ID that hosts the log delivery configuration. AccountId types.String `tfsdk:"account_id" tf:"optional"` @@ -398,6 +560,12 @@ type LogDeliveryConfiguration struct { WorkspaceIdsFilter []types.Int64 `tfsdk:"workspace_ids_filter" tf:"optional"` } +func (newState *LogDeliveryConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogDeliveryConfiguration) { +} + +func (newState *LogDeliveryConfiguration) SyncEffectiveFieldsDuringRead(existingState LogDeliveryConfiguration) { +} + // Databricks log delivery status. type LogDeliveryStatus struct { // The UTC time for the latest log delivery attempt. @@ -421,9 +589,21 @@ type LogDeliveryStatus struct { Status types.String `tfsdk:"status" tf:"optional"` } +func (newState *LogDeliveryStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogDeliveryStatus) { +} + +func (newState *LogDeliveryStatus) SyncEffectiveFieldsDuringRead(existingState LogDeliveryStatus) { +} + type PatchStatusResponse struct { } +func (newState *PatchStatusResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan PatchStatusResponse) { +} + +func (newState *PatchStatusResponse) SyncEffectiveFieldsDuringRead(existingState PatchStatusResponse) { +} + type UpdateBudgetConfigurationBudget struct { // Databricks account ID. AccountId types.String `tfsdk:"account_id" tf:"optional"` @@ -441,6 +621,12 @@ type UpdateBudgetConfigurationBudget struct { Filter []BudgetConfigurationFilter `tfsdk:"filter" tf:"optional,object"` } +func (newState *UpdateBudgetConfigurationBudget) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateBudgetConfigurationBudget) { +} + +func (newState *UpdateBudgetConfigurationBudget) SyncEffectiveFieldsDuringRead(existingState UpdateBudgetConfigurationBudget) { +} + type UpdateBudgetConfigurationRequest struct { // The updated budget. This will overwrite the budget specified by the // budget ID. @@ -449,11 +635,23 @@ type UpdateBudgetConfigurationRequest struct { BudgetId types.String `tfsdk:"-"` } +func (newState *UpdateBudgetConfigurationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateBudgetConfigurationRequest) { +} + +func (newState *UpdateBudgetConfigurationRequest) SyncEffectiveFieldsDuringRead(existingState UpdateBudgetConfigurationRequest) { +} + type UpdateBudgetConfigurationResponse struct { // The updated budget. Budget []BudgetConfiguration `tfsdk:"budget" tf:"optional,object"` } +func (newState *UpdateBudgetConfigurationResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateBudgetConfigurationResponse) { +} + +func (newState *UpdateBudgetConfigurationResponse) SyncEffectiveFieldsDuringRead(existingState UpdateBudgetConfigurationResponse) { +} + type UpdateLogDeliveryConfigurationStatusRequest struct { // Databricks log delivery configuration ID LogDeliveryConfigurationId types.String `tfsdk:"-"` @@ -465,14 +663,38 @@ type UpdateLogDeliveryConfigurationStatusRequest struct { Status types.String `tfsdk:"status" tf:""` } +func (newState *UpdateLogDeliveryConfigurationStatusRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateLogDeliveryConfigurationStatusRequest) { +} + +func (newState *UpdateLogDeliveryConfigurationStatusRequest) SyncEffectiveFieldsDuringRead(existingState UpdateLogDeliveryConfigurationStatusRequest) { +} + type WrappedCreateLogDeliveryConfiguration struct { LogDeliveryConfiguration []CreateLogDeliveryConfigurationParams `tfsdk:"log_delivery_configuration" tf:"optional,object"` } +func (newState *WrappedCreateLogDeliveryConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(plan WrappedCreateLogDeliveryConfiguration) { +} + +func (newState *WrappedCreateLogDeliveryConfiguration) SyncEffectiveFieldsDuringRead(existingState WrappedCreateLogDeliveryConfiguration) { +} + type WrappedLogDeliveryConfiguration struct { LogDeliveryConfiguration []LogDeliveryConfiguration `tfsdk:"log_delivery_configuration" tf:"optional,object"` } +func (newState *WrappedLogDeliveryConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(plan WrappedLogDeliveryConfiguration) { +} + +func (newState *WrappedLogDeliveryConfiguration) SyncEffectiveFieldsDuringRead(existingState WrappedLogDeliveryConfiguration) { +} + type WrappedLogDeliveryConfigurations struct { LogDeliveryConfigurations []LogDeliveryConfiguration `tfsdk:"log_delivery_configurations" tf:"optional"` } + +func (newState *WrappedLogDeliveryConfigurations) SyncEffectiveFieldsDuringCreateOrUpdate(plan WrappedLogDeliveryConfigurations) { +} + +func (newState *WrappedLogDeliveryConfigurations) SyncEffectiveFieldsDuringRead(existingState WrappedLogDeliveryConfigurations) { +} diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index 25fa29c013..d064881534 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -18,6 +18,12 @@ type AccountsCreateMetastore struct { MetastoreInfo []CreateMetastore `tfsdk:"metastore_info" tf:"optional,object"` } +func (newState *AccountsCreateMetastore) SyncEffectiveFieldsDuringCreateOrUpdate(plan AccountsCreateMetastore) { +} + +func (newState *AccountsCreateMetastore) SyncEffectiveFieldsDuringRead(existingState AccountsCreateMetastore) { +} + type AccountsCreateMetastoreAssignment struct { MetastoreAssignment []CreateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional,object"` // Unity Catalog metastore ID @@ -26,24 +32,54 @@ type AccountsCreateMetastoreAssignment struct { WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *AccountsCreateMetastoreAssignment) SyncEffectiveFieldsDuringCreateOrUpdate(plan AccountsCreateMetastoreAssignment) { +} + +func (newState *AccountsCreateMetastoreAssignment) SyncEffectiveFieldsDuringRead(existingState AccountsCreateMetastoreAssignment) { +} + type AccountsCreateStorageCredential struct { CredentialInfo []CreateStorageCredential `tfsdk:"credential_info" tf:"optional,object"` // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` } +func (newState *AccountsCreateStorageCredential) SyncEffectiveFieldsDuringCreateOrUpdate(plan AccountsCreateStorageCredential) { +} + +func (newState *AccountsCreateStorageCredential) SyncEffectiveFieldsDuringRead(existingState AccountsCreateStorageCredential) { +} + type AccountsMetastoreAssignment struct { MetastoreAssignment []MetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional,object"` } +func (newState *AccountsMetastoreAssignment) SyncEffectiveFieldsDuringCreateOrUpdate(plan AccountsMetastoreAssignment) { +} + +func (newState *AccountsMetastoreAssignment) SyncEffectiveFieldsDuringRead(existingState AccountsMetastoreAssignment) { +} + type AccountsMetastoreInfo struct { MetastoreInfo []MetastoreInfo `tfsdk:"metastore_info" tf:"optional,object"` } +func (newState *AccountsMetastoreInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan AccountsMetastoreInfo) { +} + +func (newState *AccountsMetastoreInfo) SyncEffectiveFieldsDuringRead(existingState AccountsMetastoreInfo) { +} + type AccountsStorageCredentialInfo struct { CredentialInfo []StorageCredentialInfo `tfsdk:"credential_info" tf:"optional,object"` } +func (newState *AccountsStorageCredentialInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan AccountsStorageCredentialInfo) { +} + +func (newState *AccountsStorageCredentialInfo) SyncEffectiveFieldsDuringRead(existingState AccountsStorageCredentialInfo) { +} + type AccountsUpdateMetastore struct { // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` @@ -51,6 +87,12 @@ type AccountsUpdateMetastore struct { MetastoreInfo []UpdateMetastore `tfsdk:"metastore_info" tf:"optional,object"` } +func (newState *AccountsUpdateMetastore) SyncEffectiveFieldsDuringCreateOrUpdate(plan AccountsUpdateMetastore) { +} + +func (newState *AccountsUpdateMetastore) SyncEffectiveFieldsDuringRead(existingState AccountsUpdateMetastore) { +} + type AccountsUpdateMetastoreAssignment struct { MetastoreAssignment []UpdateMetastoreAssignment `tfsdk:"metastore_assignment" tf:"optional,object"` // Unity Catalog metastore ID @@ -59,6 +101,12 @@ type AccountsUpdateMetastoreAssignment struct { WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *AccountsUpdateMetastoreAssignment) SyncEffectiveFieldsDuringCreateOrUpdate(plan AccountsUpdateMetastoreAssignment) { +} + +func (newState *AccountsUpdateMetastoreAssignment) SyncEffectiveFieldsDuringRead(existingState AccountsUpdateMetastoreAssignment) { +} + type AccountsUpdateStorageCredential struct { CredentialInfo []UpdateStorageCredential `tfsdk:"credential_info" tf:"optional,object"` // Unity Catalog metastore ID @@ -67,6 +115,12 @@ type AccountsUpdateStorageCredential struct { StorageCredentialName types.String `tfsdk:"-"` } +func (newState *AccountsUpdateStorageCredential) SyncEffectiveFieldsDuringCreateOrUpdate(plan AccountsUpdateStorageCredential) { +} + +func (newState *AccountsUpdateStorageCredential) SyncEffectiveFieldsDuringRead(existingState AccountsUpdateStorageCredential) { +} + type ArtifactAllowlistInfo struct { // A list of allowed artifact match patterns. ArtifactMatchers []ArtifactMatcher `tfsdk:"artifact_matchers" tf:"optional"` @@ -78,6 +132,12 @@ type ArtifactAllowlistInfo struct { MetastoreId types.String `tfsdk:"metastore_id" tf:"optional"` } +func (newState *ArtifactAllowlistInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ArtifactAllowlistInfo) { +} + +func (newState *ArtifactAllowlistInfo) SyncEffectiveFieldsDuringRead(existingState ArtifactAllowlistInfo) { +} + type ArtifactMatcher struct { // The artifact path or maven coordinate Artifact types.String `tfsdk:"artifact" tf:""` @@ -85,9 +145,21 @@ type ArtifactMatcher struct { MatchType types.String `tfsdk:"match_type" tf:""` } +func (newState *ArtifactMatcher) SyncEffectiveFieldsDuringCreateOrUpdate(plan ArtifactMatcher) { +} + +func (newState *ArtifactMatcher) SyncEffectiveFieldsDuringRead(existingState ArtifactMatcher) { +} + type AssignResponse struct { } +func (newState *AssignResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan AssignResponse) { +} + +func (newState *AssignResponse) SyncEffectiveFieldsDuringRead(existingState AssignResponse) { +} + // AWS temporary credentials for API authentication. Read more at // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. type AwsCredentials struct { @@ -103,11 +175,23 @@ type AwsCredentials struct { SessionToken types.String `tfsdk:"session_token" tf:"optional"` } +func (newState *AwsCredentials) SyncEffectiveFieldsDuringCreateOrUpdate(plan AwsCredentials) { +} + +func (newState *AwsCredentials) SyncEffectiveFieldsDuringRead(existingState AwsCredentials) { +} + type AwsIamRoleRequest struct { // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access. RoleArn types.String `tfsdk:"role_arn" tf:""` } +func (newState *AwsIamRoleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan AwsIamRoleRequest) { +} + +func (newState *AwsIamRoleRequest) SyncEffectiveFieldsDuringRead(existingState AwsIamRoleRequest) { +} + type AwsIamRoleResponse struct { // The external ID used in role assumption to prevent confused deputy // problem.. @@ -119,6 +203,12 @@ type AwsIamRoleResponse struct { UnityCatalogIamArn types.String `tfsdk:"unity_catalog_iam_arn" tf:"optional"` } +func (newState *AwsIamRoleResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan AwsIamRoleResponse) { +} + +func (newState *AwsIamRoleResponse) SyncEffectiveFieldsDuringRead(existingState AwsIamRoleResponse) { +} + type AzureManagedIdentityRequest struct { // The Azure resource ID of the Azure Databricks Access Connector. Use the // format @@ -133,6 +223,12 @@ type AzureManagedIdentityRequest struct { ManagedIdentityId types.String `tfsdk:"managed_identity_id" tf:"optional"` } +func (newState *AzureManagedIdentityRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan AzureManagedIdentityRequest) { +} + +func (newState *AzureManagedIdentityRequest) SyncEffectiveFieldsDuringRead(existingState AzureManagedIdentityRequest) { +} + type AzureManagedIdentityResponse struct { // The Azure resource ID of the Azure Databricks Access Connector. Use the // format @@ -149,6 +245,12 @@ type AzureManagedIdentityResponse struct { ManagedIdentityId types.String `tfsdk:"managed_identity_id" tf:"optional"` } +func (newState *AzureManagedIdentityResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan AzureManagedIdentityResponse) { +} + +func (newState *AzureManagedIdentityResponse) SyncEffectiveFieldsDuringRead(existingState AzureManagedIdentityResponse) { +} + type AzureServicePrincipal struct { // The application ID of the application registration within the referenced // AAD tenant. @@ -160,6 +262,12 @@ type AzureServicePrincipal struct { DirectoryId types.String `tfsdk:"directory_id" tf:""` } +func (newState *AzureServicePrincipal) SyncEffectiveFieldsDuringCreateOrUpdate(plan AzureServicePrincipal) { +} + +func (newState *AzureServicePrincipal) SyncEffectiveFieldsDuringRead(existingState AzureServicePrincipal) { +} + // Azure temporary credentials for API authentication. Read more at // https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas type AzureUserDelegationSas struct { @@ -167,6 +275,12 @@ type AzureUserDelegationSas struct { SasToken types.String `tfsdk:"sas_token" tf:"optional"` } +func (newState *AzureUserDelegationSas) SyncEffectiveFieldsDuringCreateOrUpdate(plan AzureUserDelegationSas) { +} + +func (newState *AzureUserDelegationSas) SyncEffectiveFieldsDuringRead(existingState AzureUserDelegationSas) { +} + // Cancel refresh type CancelRefreshRequest struct { // ID of the refresh. @@ -175,9 +289,21 @@ type CancelRefreshRequest struct { TableName types.String `tfsdk:"-"` } +func (newState *CancelRefreshRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CancelRefreshRequest) { +} + +func (newState *CancelRefreshRequest) SyncEffectiveFieldsDuringRead(existingState CancelRefreshRequest) { +} + type CancelRefreshResponse struct { } +func (newState *CancelRefreshResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CancelRefreshResponse) { +} + +func (newState *CancelRefreshResponse) SyncEffectiveFieldsDuringRead(existingState CancelRefreshResponse) { +} + type CatalogInfo struct { // Indicates whether the principal is limited to retrieving metadata for the // associated object through the BROWSE privilege when include_browse is @@ -236,6 +362,12 @@ type CatalogInfo struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *CatalogInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CatalogInfo) { +} + +func (newState *CatalogInfo) SyncEffectiveFieldsDuringRead(existingState CatalogInfo) { +} + type CloudflareApiToken struct { // The Cloudflare access key id of the token. AccessKeyId types.String `tfsdk:"access_key_id" tf:""` @@ -245,6 +377,12 @@ type CloudflareApiToken struct { SecretAccessKey types.String `tfsdk:"secret_access_key" tf:""` } +func (newState *CloudflareApiToken) SyncEffectiveFieldsDuringCreateOrUpdate(plan CloudflareApiToken) { +} + +func (newState *CloudflareApiToken) SyncEffectiveFieldsDuringRead(existingState CloudflareApiToken) { +} + type ColumnInfo struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -272,6 +410,12 @@ type ColumnInfo struct { TypeText types.String `tfsdk:"type_text" tf:"optional"` } +func (newState *ColumnInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ColumnInfo) { +} + +func (newState *ColumnInfo) SyncEffectiveFieldsDuringRead(existingState ColumnInfo) { +} + type ColumnMask struct { // The full name of the column mask SQL UDF. FunctionName types.String `tfsdk:"function_name" tf:"optional"` @@ -282,6 +426,12 @@ type ColumnMask struct { UsingColumnNames []types.String `tfsdk:"using_column_names" tf:"optional"` } +func (newState *ColumnMask) SyncEffectiveFieldsDuringCreateOrUpdate(plan ColumnMask) { +} + +func (newState *ColumnMask) SyncEffectiveFieldsDuringRead(existingState ColumnMask) { +} + type ConnectionInfo struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -324,6 +474,12 @@ type ConnectionInfo struct { Url types.String `tfsdk:"url" tf:"optional"` } +func (newState *ConnectionInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ConnectionInfo) { +} + +func (newState *ConnectionInfo) SyncEffectiveFieldsDuringRead(existingState ConnectionInfo) { +} + // Detailed status of an online table. Shown if the online table is in the // ONLINE_CONTINUOUS_UPDATE or the ONLINE_UPDATING_PIPELINE_RESOURCES state. type ContinuousUpdateStatus struct { @@ -338,6 +494,12 @@ type ContinuousUpdateStatus struct { Timestamp types.String `tfsdk:"timestamp" tf:"optional"` } +func (newState *ContinuousUpdateStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan ContinuousUpdateStatus) { +} + +func (newState *ContinuousUpdateStatus) SyncEffectiveFieldsDuringRead(existingState ContinuousUpdateStatus) { +} + type CreateCatalog struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -360,6 +522,12 @@ type CreateCatalog struct { StorageRoot types.String `tfsdk:"storage_root" tf:"optional"` } +func (newState *CreateCatalog) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCatalog) { +} + +func (newState *CreateCatalog) SyncEffectiveFieldsDuringRead(existingState CreateCatalog) { +} + type CreateConnection struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -376,6 +544,12 @@ type CreateConnection struct { ReadOnly types.Bool `tfsdk:"read_only" tf:"optional"` } +func (newState *CreateConnection) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateConnection) { +} + +func (newState *CreateConnection) SyncEffectiveFieldsDuringRead(existingState CreateConnection) { +} + type CreateExternalLocation struct { // The AWS access point to use when accesing s3 for this external location. AccessPoint types.String `tfsdk:"access_point" tf:"optional"` @@ -400,6 +574,12 @@ type CreateExternalLocation struct { Url types.String `tfsdk:"url" tf:""` } +func (newState *CreateExternalLocation) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateExternalLocation) { +} + +func (newState *CreateExternalLocation) SyncEffectiveFieldsDuringRead(existingState CreateExternalLocation) { +} + type CreateFunction struct { // Name of parent catalog. CatalogName types.String `tfsdk:"catalog_name" tf:""` @@ -449,11 +629,23 @@ type CreateFunction struct { SqlPath types.String `tfsdk:"sql_path" tf:"optional"` } +func (newState *CreateFunction) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateFunction) { +} + +func (newState *CreateFunction) SyncEffectiveFieldsDuringRead(existingState CreateFunction) { +} + type CreateFunctionRequest struct { // Partial __FunctionInfo__ specifying the function to be created. FunctionInfo []CreateFunction `tfsdk:"function_info" tf:"object"` } +func (newState *CreateFunctionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateFunctionRequest) { +} + +func (newState *CreateFunctionRequest) SyncEffectiveFieldsDuringRead(existingState CreateFunctionRequest) { +} + type CreateMetastore struct { // The user-specified name of the metastore. Name types.String `tfsdk:"name" tf:""` @@ -466,6 +658,12 @@ type CreateMetastore struct { StorageRoot types.String `tfsdk:"storage_root" tf:"optional"` } +func (newState *CreateMetastore) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateMetastore) { +} + +func (newState *CreateMetastore) SyncEffectiveFieldsDuringRead(existingState CreateMetastore) { +} + type CreateMetastoreAssignment struct { // The name of the default catalog in the metastore. This field is // depracted. Please use "Default Namespace API" to configure the default @@ -477,6 +675,12 @@ type CreateMetastoreAssignment struct { WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *CreateMetastoreAssignment) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateMetastoreAssignment) { +} + +func (newState *CreateMetastoreAssignment) SyncEffectiveFieldsDuringRead(existingState CreateMetastoreAssignment) { +} + type CreateMonitor struct { // The directory to store monitoring assets (e.g. dashboard, metric tables). AssetsDir types.String `tfsdk:"assets_dir" tf:""` @@ -518,6 +722,12 @@ type CreateMonitor struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *CreateMonitor) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateMonitor) { +} + +func (newState *CreateMonitor) SyncEffectiveFieldsDuringRead(existingState CreateMonitor) { +} + // Online Table information. type CreateOnlineTableRequest struct { // Full three-part (catalog, schema, table) name of the table. @@ -526,6 +736,12 @@ type CreateOnlineTableRequest struct { Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional,object"` } +func (newState *CreateOnlineTableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateOnlineTableRequest) { +} + +func (newState *CreateOnlineTableRequest) SyncEffectiveFieldsDuringRead(existingState CreateOnlineTableRequest) { +} + type CreateRegisteredModelRequest struct { // The name of the catalog where the schema and the registered model reside CatalogName types.String `tfsdk:"catalog_name" tf:""` @@ -540,9 +756,21 @@ type CreateRegisteredModelRequest struct { StorageLocation types.String `tfsdk:"storage_location" tf:"optional"` } +func (newState *CreateRegisteredModelRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateRegisteredModelRequest) { +} + +func (newState *CreateRegisteredModelRequest) SyncEffectiveFieldsDuringRead(existingState CreateRegisteredModelRequest) { +} + type CreateResponse struct { } +func (newState *CreateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateResponse) { +} + +func (newState *CreateResponse) SyncEffectiveFieldsDuringRead(existingState CreateResponse) { +} + type CreateSchema struct { // Name of parent catalog. CatalogName types.String `tfsdk:"catalog_name" tf:""` @@ -556,6 +784,12 @@ type CreateSchema struct { StorageRoot types.String `tfsdk:"storage_root" tf:"optional"` } +func (newState *CreateSchema) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateSchema) { +} + +func (newState *CreateSchema) SyncEffectiveFieldsDuringRead(existingState CreateSchema) { +} + type CreateStorageCredential struct { // The AWS IAM role configuration. AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional,object"` @@ -578,6 +812,12 @@ type CreateStorageCredential struct { SkipValidation types.Bool `tfsdk:"skip_validation" tf:"optional"` } +func (newState *CreateStorageCredential) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateStorageCredential) { +} + +func (newState *CreateStorageCredential) SyncEffectiveFieldsDuringRead(existingState CreateStorageCredential) { +} + type CreateTableConstraint struct { // A table constraint, as defined by *one* of the following fields being // set: __primary_key_constraint__, __foreign_key_constraint__, @@ -587,6 +827,12 @@ type CreateTableConstraint struct { FullNameArg types.String `tfsdk:"full_name_arg" tf:""` } +func (newState *CreateTableConstraint) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateTableConstraint) { +} + +func (newState *CreateTableConstraint) SyncEffectiveFieldsDuringRead(existingState CreateTableConstraint) { +} + type CreateVolumeRequestContent struct { // The name of the catalog where the schema and the volume are CatalogName types.String `tfsdk:"catalog_name" tf:""` @@ -602,15 +848,33 @@ type CreateVolumeRequestContent struct { VolumeType types.String `tfsdk:"volume_type" tf:""` } +func (newState *CreateVolumeRequestContent) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateVolumeRequestContent) { +} + +func (newState *CreateVolumeRequestContent) SyncEffectiveFieldsDuringRead(existingState CreateVolumeRequestContent) { +} + // Currently assigned workspaces type CurrentWorkspaceBindings struct { // A list of workspace IDs. Workspaces []types.Int64 `tfsdk:"workspaces" tf:"optional"` } +func (newState *CurrentWorkspaceBindings) SyncEffectiveFieldsDuringCreateOrUpdate(plan CurrentWorkspaceBindings) { +} + +func (newState *CurrentWorkspaceBindings) SyncEffectiveFieldsDuringRead(existingState CurrentWorkspaceBindings) { +} + type DatabricksGcpServiceAccountRequest struct { } +func (newState *DatabricksGcpServiceAccountRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DatabricksGcpServiceAccountRequest) { +} + +func (newState *DatabricksGcpServiceAccountRequest) SyncEffectiveFieldsDuringRead(existingState DatabricksGcpServiceAccountRequest) { +} + type DatabricksGcpServiceAccountResponse struct { // The Databricks internal ID that represents this service account. This is // an output-only field. @@ -619,6 +883,12 @@ type DatabricksGcpServiceAccountResponse struct { Email types.String `tfsdk:"email" tf:"optional"` } +func (newState *DatabricksGcpServiceAccountResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DatabricksGcpServiceAccountResponse) { +} + +func (newState *DatabricksGcpServiceAccountResponse) SyncEffectiveFieldsDuringRead(existingState DatabricksGcpServiceAccountResponse) { +} + // Delete a metastore assignment type DeleteAccountMetastoreAssignmentRequest struct { // Unity Catalog metastore ID @@ -627,6 +897,12 @@ type DeleteAccountMetastoreAssignmentRequest struct { WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *DeleteAccountMetastoreAssignmentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAccountMetastoreAssignmentRequest) { +} + +func (newState *DeleteAccountMetastoreAssignmentRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAccountMetastoreAssignmentRequest) { +} + // Delete a metastore type DeleteAccountMetastoreRequest struct { // Force deletion even if the metastore is not empty. Default is false. @@ -635,6 +911,12 @@ type DeleteAccountMetastoreRequest struct { MetastoreId types.String `tfsdk:"-"` } +func (newState *DeleteAccountMetastoreRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAccountMetastoreRequest) { +} + +func (newState *DeleteAccountMetastoreRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAccountMetastoreRequest) { +} + // Delete a storage credential type DeleteAccountStorageCredentialRequest struct { // Force deletion even if the Storage Credential is not empty. Default is @@ -646,6 +928,12 @@ type DeleteAccountStorageCredentialRequest struct { StorageCredentialName types.String `tfsdk:"-"` } +func (newState *DeleteAccountStorageCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAccountStorageCredentialRequest) { +} + +func (newState *DeleteAccountStorageCredentialRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAccountStorageCredentialRequest) { +} + // Delete a Registered Model Alias type DeleteAliasRequest struct { // The name of the alias @@ -654,9 +942,21 @@ type DeleteAliasRequest struct { FullName types.String `tfsdk:"-"` } +func (newState *DeleteAliasRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAliasRequest) { +} + +func (newState *DeleteAliasRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAliasRequest) { +} + type DeleteAliasResponse struct { } +func (newState *DeleteAliasResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAliasResponse) { +} + +func (newState *DeleteAliasResponse) SyncEffectiveFieldsDuringRead(existingState DeleteAliasResponse) { +} + // Delete a catalog type DeleteCatalogRequest struct { // Force deletion even if the catalog is not empty. @@ -665,12 +965,24 @@ type DeleteCatalogRequest struct { Name types.String `tfsdk:"-"` } +func (newState *DeleteCatalogRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCatalogRequest) { +} + +func (newState *DeleteCatalogRequest) SyncEffectiveFieldsDuringRead(existingState DeleteCatalogRequest) { +} + // Delete a connection type DeleteConnectionRequest struct { // The name of the connection to be deleted. Name types.String `tfsdk:"-"` } +func (newState *DeleteConnectionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteConnectionRequest) { +} + +func (newState *DeleteConnectionRequest) SyncEffectiveFieldsDuringRead(existingState DeleteConnectionRequest) { +} + // Delete an external location type DeleteExternalLocationRequest struct { // Force deletion even if there are dependent external tables or mounts. @@ -679,6 +991,12 @@ type DeleteExternalLocationRequest struct { Name types.String `tfsdk:"-"` } +func (newState *DeleteExternalLocationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteExternalLocationRequest) { +} + +func (newState *DeleteExternalLocationRequest) SyncEffectiveFieldsDuringRead(existingState DeleteExternalLocationRequest) { +} + // Delete a function type DeleteFunctionRequest struct { // Force deletion even if the function is notempty. @@ -688,6 +1006,12 @@ type DeleteFunctionRequest struct { Name types.String `tfsdk:"-"` } +func (newState *DeleteFunctionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteFunctionRequest) { +} + +func (newState *DeleteFunctionRequest) SyncEffectiveFieldsDuringRead(existingState DeleteFunctionRequest) { +} + // Delete a metastore type DeleteMetastoreRequest struct { // Force deletion even if the metastore is not empty. Default is false. @@ -696,6 +1020,12 @@ type DeleteMetastoreRequest struct { Id types.String `tfsdk:"-"` } +func (newState *DeleteMetastoreRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteMetastoreRequest) { +} + +func (newState *DeleteMetastoreRequest) SyncEffectiveFieldsDuringRead(existingState DeleteMetastoreRequest) { +} + // Delete a Model Version type DeleteModelVersionRequest struct { // The three-level (fully qualified) name of the model version @@ -704,27 +1034,57 @@ type DeleteModelVersionRequest struct { Version types.Int64 `tfsdk:"-"` } +func (newState *DeleteModelVersionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteModelVersionRequest) { +} + +func (newState *DeleteModelVersionRequest) SyncEffectiveFieldsDuringRead(existingState DeleteModelVersionRequest) { +} + // Delete an Online Table type DeleteOnlineTableRequest struct { // Full three-part (catalog, schema, table) name of the table. Name types.String `tfsdk:"-"` } +func (newState *DeleteOnlineTableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteOnlineTableRequest) { +} + +func (newState *DeleteOnlineTableRequest) SyncEffectiveFieldsDuringRead(existingState DeleteOnlineTableRequest) { +} + // Delete a table monitor type DeleteQualityMonitorRequest struct { // Full name of the table. TableName types.String `tfsdk:"-"` } +func (newState *DeleteQualityMonitorRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteQualityMonitorRequest) { +} + +func (newState *DeleteQualityMonitorRequest) SyncEffectiveFieldsDuringRead(existingState DeleteQualityMonitorRequest) { +} + // Delete a Registered Model type DeleteRegisteredModelRequest struct { // The three-level (fully qualified) name of the registered model FullName types.String `tfsdk:"-"` } +func (newState *DeleteRegisteredModelRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteRegisteredModelRequest) { +} + +func (newState *DeleteRegisteredModelRequest) SyncEffectiveFieldsDuringRead(existingState DeleteRegisteredModelRequest) { +} + type DeleteResponse struct { } +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + // Delete a schema type DeleteSchemaRequest struct { // Force deletion even if the schema is not empty. @@ -733,6 +1093,12 @@ type DeleteSchemaRequest struct { FullName types.String `tfsdk:"-"` } +func (newState *DeleteSchemaRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteSchemaRequest) { +} + +func (newState *DeleteSchemaRequest) SyncEffectiveFieldsDuringRead(existingState DeleteSchemaRequest) { +} + // Delete a credential type DeleteStorageCredentialRequest struct { // Force deletion even if there are dependent external locations or external @@ -742,6 +1108,12 @@ type DeleteStorageCredentialRequest struct { Name types.String `tfsdk:"-"` } +func (newState *DeleteStorageCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteStorageCredentialRequest) { +} + +func (newState *DeleteStorageCredentialRequest) SyncEffectiveFieldsDuringRead(existingState DeleteStorageCredentialRequest) { +} + // Delete a table constraint type DeleteTableConstraintRequest struct { // If true, try deleting all child constraints of the current constraint. If @@ -754,18 +1126,36 @@ type DeleteTableConstraintRequest struct { FullName types.String `tfsdk:"-"` } +func (newState *DeleteTableConstraintRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteTableConstraintRequest) { +} + +func (newState *DeleteTableConstraintRequest) SyncEffectiveFieldsDuringRead(existingState DeleteTableConstraintRequest) { +} + // Delete a table type DeleteTableRequest struct { // Full name of the table. FullName types.String `tfsdk:"-"` } +func (newState *DeleteTableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteTableRequest) { +} + +func (newState *DeleteTableRequest) SyncEffectiveFieldsDuringRead(existingState DeleteTableRequest) { +} + // Delete a Volume type DeleteVolumeRequest struct { // The three-level (fully qualified) name of the volume Name types.String `tfsdk:"-"` } +func (newState *DeleteVolumeRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteVolumeRequest) { +} + +func (newState *DeleteVolumeRequest) SyncEffectiveFieldsDuringRead(existingState DeleteVolumeRequest) { +} + // Properties pertaining to the current state of the delta table as given by the // commit server. This does not contain **delta.*** (input) properties in // __TableInfo.properties__. @@ -774,6 +1164,12 @@ type DeltaRuntimePropertiesKvPairs struct { DeltaRuntimeProperties map[string]types.String `tfsdk:"delta_runtime_properties" tf:""` } +func (newState *DeltaRuntimePropertiesKvPairs) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeltaRuntimePropertiesKvPairs) { +} + +func (newState *DeltaRuntimePropertiesKvPairs) SyncEffectiveFieldsDuringRead(existingState DeltaRuntimePropertiesKvPairs) { +} + // A dependency of a SQL object. Either the __table__ field or the __function__ // field must be defined. type Dependency struct { @@ -783,12 +1179,24 @@ type Dependency struct { Table []TableDependency `tfsdk:"table" tf:"optional,object"` } +func (newState *Dependency) SyncEffectiveFieldsDuringCreateOrUpdate(plan Dependency) { +} + +func (newState *Dependency) SyncEffectiveFieldsDuringRead(existingState Dependency) { +} + // A list of dependencies. type DependencyList struct { // Array of dependencies. Dependencies []Dependency `tfsdk:"dependencies" tf:"optional"` } +func (newState *DependencyList) SyncEffectiveFieldsDuringCreateOrUpdate(plan DependencyList) { +} + +func (newState *DependencyList) SyncEffectiveFieldsDuringRead(existingState DependencyList) { +} + // Disable a system schema type DisableRequest struct { // The metastore ID under which the system schema lives. @@ -797,15 +1205,33 @@ type DisableRequest struct { SchemaName types.String `tfsdk:"-"` } +func (newState *DisableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DisableRequest) { +} + +func (newState *DisableRequest) SyncEffectiveFieldsDuringRead(existingState DisableRequest) { +} + type DisableResponse struct { } +func (newState *DisableResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DisableResponse) { +} + +func (newState *DisableResponse) SyncEffectiveFieldsDuringRead(existingState DisableResponse) { +} + type EffectivePermissionsList struct { // The privileges conveyed to each principal (either directly or via // inheritance) PrivilegeAssignments []EffectivePrivilegeAssignment `tfsdk:"privilege_assignments" tf:"optional"` } +func (newState *EffectivePermissionsList) SyncEffectiveFieldsDuringCreateOrUpdate(plan EffectivePermissionsList) { +} + +func (newState *EffectivePermissionsList) SyncEffectiveFieldsDuringRead(existingState EffectivePermissionsList) { +} + type EffectivePredictiveOptimizationFlag struct { // The name of the object from which the flag was inherited. If there was no // inheritance, this field is left blank. @@ -818,6 +1244,12 @@ type EffectivePredictiveOptimizationFlag struct { Value types.String `tfsdk:"value" tf:""` } +func (newState *EffectivePredictiveOptimizationFlag) SyncEffectiveFieldsDuringCreateOrUpdate(plan EffectivePredictiveOptimizationFlag) { +} + +func (newState *EffectivePredictiveOptimizationFlag) SyncEffectiveFieldsDuringRead(existingState EffectivePredictiveOptimizationFlag) { +} + type EffectivePrivilege struct { // The full name of the object that conveys this privilege via inheritance. // This field is omitted when privilege is not inherited (it's assigned to @@ -831,6 +1263,12 @@ type EffectivePrivilege struct { Privilege types.String `tfsdk:"privilege" tf:"optional"` } +func (newState *EffectivePrivilege) SyncEffectiveFieldsDuringCreateOrUpdate(plan EffectivePrivilege) { +} + +func (newState *EffectivePrivilege) SyncEffectiveFieldsDuringRead(existingState EffectivePrivilege) { +} + type EffectivePrivilegeAssignment struct { // The principal (user email address or group name). Principal types.String `tfsdk:"principal" tf:"optional"` @@ -839,6 +1277,12 @@ type EffectivePrivilegeAssignment struct { Privileges []EffectivePrivilege `tfsdk:"privileges" tf:"optional"` } +func (newState *EffectivePrivilegeAssignment) SyncEffectiveFieldsDuringCreateOrUpdate(plan EffectivePrivilegeAssignment) { +} + +func (newState *EffectivePrivilegeAssignment) SyncEffectiveFieldsDuringRead(existingState EffectivePrivilegeAssignment) { +} + // Enable a system schema type EnableRequest struct { // The metastore ID under which the system schema lives. @@ -847,21 +1291,45 @@ type EnableRequest struct { SchemaName types.String `tfsdk:"-"` } +func (newState *EnableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan EnableRequest) { +} + +func (newState *EnableRequest) SyncEffectiveFieldsDuringRead(existingState EnableRequest) { +} + type EnableResponse struct { } +func (newState *EnableResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan EnableResponse) { +} + +func (newState *EnableResponse) SyncEffectiveFieldsDuringRead(existingState EnableResponse) { +} + // Encryption options that apply to clients connecting to cloud storage. type EncryptionDetails struct { // Server-Side Encryption properties for clients communicating with AWS s3. SseEncryptionDetails []SseEncryptionDetails `tfsdk:"sse_encryption_details" tf:"optional,object"` } +func (newState *EncryptionDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan EncryptionDetails) { +} + +func (newState *EncryptionDetails) SyncEffectiveFieldsDuringRead(existingState EncryptionDetails) { +} + // Get boolean reflecting if table exists type ExistsRequest struct { // Full name of the table. FullName types.String `tfsdk:"-"` } +func (newState *ExistsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExistsRequest) { +} + +func (newState *ExistsRequest) SyncEffectiveFieldsDuringRead(existingState ExistsRequest) { +} + type ExternalLocationInfo struct { // The AWS access point to use when accesing s3 for this external location. AccessPoint types.String `tfsdk:"access_point" tf:"optional"` @@ -905,6 +1373,12 @@ type ExternalLocationInfo struct { Url types.String `tfsdk:"url" tf:"optional"` } +func (newState *ExternalLocationInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExternalLocationInfo) { +} + +func (newState *ExternalLocationInfo) SyncEffectiveFieldsDuringRead(existingState ExternalLocationInfo) { +} + // Detailed status of an online table. Shown if the online table is in the // OFFLINE_FAILED or the ONLINE_PIPELINE_FAILED state. type FailedStatus struct { @@ -919,6 +1393,12 @@ type FailedStatus struct { Timestamp types.String `tfsdk:"timestamp" tf:"optional"` } +func (newState *FailedStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan FailedStatus) { +} + +func (newState *FailedStatus) SyncEffectiveFieldsDuringRead(existingState FailedStatus) { +} + type ForeignKeyConstraint struct { // Column names for this constraint. ChildColumns []types.String `tfsdk:"child_columns" tf:""` @@ -930,6 +1410,12 @@ type ForeignKeyConstraint struct { ParentTable types.String `tfsdk:"parent_table" tf:""` } +func (newState *ForeignKeyConstraint) SyncEffectiveFieldsDuringCreateOrUpdate(plan ForeignKeyConstraint) { +} + +func (newState *ForeignKeyConstraint) SyncEffectiveFieldsDuringRead(existingState ForeignKeyConstraint) { +} + // A function that is dependent on a SQL object. type FunctionDependency struct { // Full name of the dependent function, in the form of @@ -937,6 +1423,12 @@ type FunctionDependency struct { FunctionFullName types.String `tfsdk:"function_full_name" tf:""` } +func (newState *FunctionDependency) SyncEffectiveFieldsDuringCreateOrUpdate(plan FunctionDependency) { +} + +func (newState *FunctionDependency) SyncEffectiveFieldsDuringRead(existingState FunctionDependency) { +} + type FunctionInfo struct { // Indicates whether the principal is limited to retrieving metadata for the // associated object through the BROWSE privilege when include_browse is @@ -1007,6 +1499,12 @@ type FunctionInfo struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *FunctionInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan FunctionInfo) { +} + +func (newState *FunctionInfo) SyncEffectiveFieldsDuringRead(existingState FunctionInfo) { +} + type FunctionParameterInfo struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -1034,18 +1532,36 @@ type FunctionParameterInfo struct { TypeText types.String `tfsdk:"type_text" tf:""` } +func (newState *FunctionParameterInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan FunctionParameterInfo) { +} + +func (newState *FunctionParameterInfo) SyncEffectiveFieldsDuringRead(existingState FunctionParameterInfo) { +} + type FunctionParameterInfos struct { // The array of __FunctionParameterInfo__ definitions of the function's // parameters. Parameters []FunctionParameterInfo `tfsdk:"parameters" tf:"optional"` } +func (newState *FunctionParameterInfos) SyncEffectiveFieldsDuringCreateOrUpdate(plan FunctionParameterInfos) { +} + +func (newState *FunctionParameterInfos) SyncEffectiveFieldsDuringRead(existingState FunctionParameterInfos) { +} + // GCP temporary credentials for API authentication. Read more at // https://developers.google.com/identity/protocols/oauth2/service-account type GcpOauthToken struct { OauthToken types.String `tfsdk:"oauth_token" tf:"optional"` } +func (newState *GcpOauthToken) SyncEffectiveFieldsDuringCreateOrUpdate(plan GcpOauthToken) { +} + +func (newState *GcpOauthToken) SyncEffectiveFieldsDuringRead(existingState GcpOauthToken) { +} + type GenerateTemporaryTableCredentialRequest struct { // The operation performed against the table data, either READ or // READ_WRITE. If READ_WRITE is specified, the credentials returned will @@ -1055,6 +1571,12 @@ type GenerateTemporaryTableCredentialRequest struct { TableId types.String `tfsdk:"table_id" tf:"optional"` } +func (newState *GenerateTemporaryTableCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenerateTemporaryTableCredentialRequest) { +} + +func (newState *GenerateTemporaryTableCredentialRequest) SyncEffectiveFieldsDuringRead(existingState GenerateTemporaryTableCredentialRequest) { +} + type GenerateTemporaryTableCredentialResponse struct { // AWS temporary credentials for API authentication. Read more at // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. @@ -1075,18 +1597,36 @@ type GenerateTemporaryTableCredentialResponse struct { Url types.String `tfsdk:"url" tf:"optional"` } +func (newState *GenerateTemporaryTableCredentialResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenerateTemporaryTableCredentialResponse) { +} + +func (newState *GenerateTemporaryTableCredentialResponse) SyncEffectiveFieldsDuringRead(existingState GenerateTemporaryTableCredentialResponse) { +} + // Gets the metastore assignment for a workspace type GetAccountMetastoreAssignmentRequest struct { // Workspace ID. WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *GetAccountMetastoreAssignmentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAccountMetastoreAssignmentRequest) { +} + +func (newState *GetAccountMetastoreAssignmentRequest) SyncEffectiveFieldsDuringRead(existingState GetAccountMetastoreAssignmentRequest) { +} + // Get a metastore type GetAccountMetastoreRequest struct { // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` } +func (newState *GetAccountMetastoreRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAccountMetastoreRequest) { +} + +func (newState *GetAccountMetastoreRequest) SyncEffectiveFieldsDuringRead(existingState GetAccountMetastoreRequest) { +} + // Gets the named storage credential type GetAccountStorageCredentialRequest struct { // Unity Catalog metastore ID @@ -1095,12 +1635,24 @@ type GetAccountStorageCredentialRequest struct { StorageCredentialName types.String `tfsdk:"-"` } +func (newState *GetAccountStorageCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAccountStorageCredentialRequest) { +} + +func (newState *GetAccountStorageCredentialRequest) SyncEffectiveFieldsDuringRead(existingState GetAccountStorageCredentialRequest) { +} + // Get an artifact allowlist type GetArtifactAllowlistRequest struct { // The artifact type of the allowlist. ArtifactType types.String `tfsdk:"-"` } +func (newState *GetArtifactAllowlistRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetArtifactAllowlistRequest) { +} + +func (newState *GetArtifactAllowlistRequest) SyncEffectiveFieldsDuringRead(existingState GetArtifactAllowlistRequest) { +} + // Get securable workspace bindings type GetBindingsRequest struct { // Maximum number of workspace bindings to return. - When set to 0, the page @@ -1118,6 +1670,12 @@ type GetBindingsRequest struct { SecurableType types.String `tfsdk:"-"` } +func (newState *GetBindingsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetBindingsRequest) { +} + +func (newState *GetBindingsRequest) SyncEffectiveFieldsDuringRead(existingState GetBindingsRequest) { +} + // Get Model Version By Alias type GetByAliasRequest struct { // The name of the alias @@ -1129,6 +1687,12 @@ type GetByAliasRequest struct { IncludeAliases types.Bool `tfsdk:"-"` } +func (newState *GetByAliasRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetByAliasRequest) { +} + +func (newState *GetByAliasRequest) SyncEffectiveFieldsDuringRead(existingState GetByAliasRequest) { +} + // Get a catalog type GetCatalogRequest struct { // Whether to include catalogs in the response for which the principal can @@ -1138,12 +1702,24 @@ type GetCatalogRequest struct { Name types.String `tfsdk:"-"` } +func (newState *GetCatalogRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCatalogRequest) { +} + +func (newState *GetCatalogRequest) SyncEffectiveFieldsDuringRead(existingState GetCatalogRequest) { +} + // Get a connection type GetConnectionRequest struct { // Name of the connection. Name types.String `tfsdk:"-"` } +func (newState *GetConnectionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetConnectionRequest) { +} + +func (newState *GetConnectionRequest) SyncEffectiveFieldsDuringRead(existingState GetConnectionRequest) { +} + // Get effective permissions type GetEffectiveRequest struct { // Full name of securable. @@ -1155,6 +1731,12 @@ type GetEffectiveRequest struct { SecurableType types.String `tfsdk:"-"` } +func (newState *GetEffectiveRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetEffectiveRequest) { +} + +func (newState *GetEffectiveRequest) SyncEffectiveFieldsDuringRead(existingState GetEffectiveRequest) { +} + // Get an external location type GetExternalLocationRequest struct { // Whether to include external locations in the response for which the @@ -1164,6 +1746,12 @@ type GetExternalLocationRequest struct { Name types.String `tfsdk:"-"` } +func (newState *GetExternalLocationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetExternalLocationRequest) { +} + +func (newState *GetExternalLocationRequest) SyncEffectiveFieldsDuringRead(existingState GetExternalLocationRequest) { +} + // Get a function type GetFunctionRequest struct { // Whether to include functions in the response for which the principal can @@ -1174,6 +1762,12 @@ type GetFunctionRequest struct { Name types.String `tfsdk:"-"` } +func (newState *GetFunctionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetFunctionRequest) { +} + +func (newState *GetFunctionRequest) SyncEffectiveFieldsDuringRead(existingState GetFunctionRequest) { +} + // Get permissions type GetGrantRequest struct { // Full name of securable. @@ -1185,12 +1779,24 @@ type GetGrantRequest struct { SecurableType types.String `tfsdk:"-"` } +func (newState *GetGrantRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetGrantRequest) { +} + +func (newState *GetGrantRequest) SyncEffectiveFieldsDuringRead(existingState GetGrantRequest) { +} + // Get a metastore type GetMetastoreRequest struct { // Unique ID of the metastore. Id types.String `tfsdk:"-"` } +func (newState *GetMetastoreRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetMetastoreRequest) { +} + +func (newState *GetMetastoreRequest) SyncEffectiveFieldsDuringRead(existingState GetMetastoreRequest) { +} + type GetMetastoreSummaryResponse struct { // Cloud vendor of the metastore home shard (e.g., `aws`, `azure`, `gcp`). Cloud types.String `tfsdk:"cloud" tf:"optional"` @@ -1236,6 +1842,12 @@ type GetMetastoreSummaryResponse struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *GetMetastoreSummaryResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetMetastoreSummaryResponse) { +} + +func (newState *GetMetastoreSummaryResponse) SyncEffectiveFieldsDuringRead(existingState GetMetastoreSummaryResponse) { +} + // Get a Model Version type GetModelVersionRequest struct { // The three-level (fully qualified) name of the model version @@ -1250,18 +1862,36 @@ type GetModelVersionRequest struct { Version types.Int64 `tfsdk:"-"` } +func (newState *GetModelVersionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetModelVersionRequest) { +} + +func (newState *GetModelVersionRequest) SyncEffectiveFieldsDuringRead(existingState GetModelVersionRequest) { +} + // Get an Online Table type GetOnlineTableRequest struct { // Full three-part (catalog, schema, table) name of the table. Name types.String `tfsdk:"-"` } +func (newState *GetOnlineTableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetOnlineTableRequest) { +} + +func (newState *GetOnlineTableRequest) SyncEffectiveFieldsDuringRead(existingState GetOnlineTableRequest) { +} + // Get a table monitor type GetQualityMonitorRequest struct { // Full name of the table. TableName types.String `tfsdk:"-"` } +func (newState *GetQualityMonitorRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetQualityMonitorRequest) { +} + +func (newState *GetQualityMonitorRequest) SyncEffectiveFieldsDuringRead(existingState GetQualityMonitorRequest) { +} + // Get information for a single resource quota. type GetQuotaRequest struct { // Full name of the parent resource. Provide the metastore ID if the parent @@ -1274,11 +1904,23 @@ type GetQuotaRequest struct { QuotaName types.String `tfsdk:"-"` } +func (newState *GetQuotaRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetQuotaRequest) { +} + +func (newState *GetQuotaRequest) SyncEffectiveFieldsDuringRead(existingState GetQuotaRequest) { +} + type GetQuotaResponse struct { // The returned QuotaInfo. QuotaInfo []QuotaInfo `tfsdk:"quota_info" tf:"optional,object"` } +func (newState *GetQuotaResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetQuotaResponse) { +} + +func (newState *GetQuotaResponse) SyncEffectiveFieldsDuringRead(existingState GetQuotaResponse) { +} + // Get refresh type GetRefreshRequest struct { // ID of the refresh. @@ -1287,6 +1929,12 @@ type GetRefreshRequest struct { TableName types.String `tfsdk:"-"` } +func (newState *GetRefreshRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRefreshRequest) { +} + +func (newState *GetRefreshRequest) SyncEffectiveFieldsDuringRead(existingState GetRefreshRequest) { +} + // Get a Registered Model type GetRegisteredModelRequest struct { // The three-level (fully qualified) name of the registered model @@ -1298,6 +1946,12 @@ type GetRegisteredModelRequest struct { IncludeBrowse types.Bool `tfsdk:"-"` } +func (newState *GetRegisteredModelRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRegisteredModelRequest) { +} + +func (newState *GetRegisteredModelRequest) SyncEffectiveFieldsDuringRead(existingState GetRegisteredModelRequest) { +} + // Get a schema type GetSchemaRequest struct { // Full name of the schema. @@ -1307,12 +1961,24 @@ type GetSchemaRequest struct { IncludeBrowse types.Bool `tfsdk:"-"` } +func (newState *GetSchemaRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetSchemaRequest) { +} + +func (newState *GetSchemaRequest) SyncEffectiveFieldsDuringRead(existingState GetSchemaRequest) { +} + // Get a credential type GetStorageCredentialRequest struct { // Name of the storage credential. Name types.String `tfsdk:"-"` } +func (newState *GetStorageCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetStorageCredentialRequest) { +} + +func (newState *GetStorageCredentialRequest) SyncEffectiveFieldsDuringRead(existingState GetStorageCredentialRequest) { +} + // Get a table type GetTableRequest struct { // Full name of the table. @@ -1326,34 +1992,70 @@ type GetTableRequest struct { IncludeManifestCapabilities types.Bool `tfsdk:"-"` } +func (newState *GetTableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetTableRequest) { +} + +func (newState *GetTableRequest) SyncEffectiveFieldsDuringRead(existingState GetTableRequest) { +} + // Get catalog workspace bindings type GetWorkspaceBindingRequest struct { // The name of the catalog. Name types.String `tfsdk:"-"` } +func (newState *GetWorkspaceBindingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetWorkspaceBindingRequest) { +} + +func (newState *GetWorkspaceBindingRequest) SyncEffectiveFieldsDuringRead(existingState GetWorkspaceBindingRequest) { +} + // Get all workspaces assigned to a metastore type ListAccountMetastoreAssignmentsRequest struct { // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` } +func (newState *ListAccountMetastoreAssignmentsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAccountMetastoreAssignmentsRequest) { +} + +func (newState *ListAccountMetastoreAssignmentsRequest) SyncEffectiveFieldsDuringRead(existingState ListAccountMetastoreAssignmentsRequest) { +} + // The list of workspaces to which the given metastore is assigned. type ListAccountMetastoreAssignmentsResponse struct { WorkspaceIds []types.Int64 `tfsdk:"workspace_ids" tf:"optional"` } +func (newState *ListAccountMetastoreAssignmentsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAccountMetastoreAssignmentsResponse) { +} + +func (newState *ListAccountMetastoreAssignmentsResponse) SyncEffectiveFieldsDuringRead(existingState ListAccountMetastoreAssignmentsResponse) { +} + // Get all storage credentials assigned to a metastore type ListAccountStorageCredentialsRequest struct { // Unity Catalog metastore ID MetastoreId types.String `tfsdk:"-"` } +func (newState *ListAccountStorageCredentialsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAccountStorageCredentialsRequest) { +} + +func (newState *ListAccountStorageCredentialsRequest) SyncEffectiveFieldsDuringRead(existingState ListAccountStorageCredentialsRequest) { +} + type ListAccountStorageCredentialsResponse struct { // An array of metastore storage credentials. StorageCredentials []StorageCredentialInfo `tfsdk:"storage_credentials" tf:"optional"` } +func (newState *ListAccountStorageCredentialsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAccountStorageCredentialsResponse) { +} + +func (newState *ListAccountStorageCredentialsResponse) SyncEffectiveFieldsDuringRead(existingState ListAccountStorageCredentialsResponse) { +} + // List catalogs type ListCatalogsRequest struct { // Whether to include catalogs in the response for which the principal can @@ -1373,6 +2075,12 @@ type ListCatalogsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListCatalogsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCatalogsRequest) { +} + +func (newState *ListCatalogsRequest) SyncEffectiveFieldsDuringRead(existingState ListCatalogsRequest) { +} + type ListCatalogsResponse struct { // An array of catalog information objects. Catalogs []CatalogInfo `tfsdk:"catalogs" tf:"optional"` @@ -1382,6 +2090,12 @@ type ListCatalogsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListCatalogsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCatalogsResponse) { +} + +func (newState *ListCatalogsResponse) SyncEffectiveFieldsDuringRead(existingState ListCatalogsResponse) { +} + // List connections type ListConnectionsRequest struct { // Maximum number of connections to return. - If not set, all connections @@ -1395,6 +2109,12 @@ type ListConnectionsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListConnectionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListConnectionsRequest) { +} + +func (newState *ListConnectionsRequest) SyncEffectiveFieldsDuringRead(existingState ListConnectionsRequest) { +} + type ListConnectionsResponse struct { // An array of connection information objects. Connections []ConnectionInfo `tfsdk:"connections" tf:"optional"` @@ -1404,6 +2124,12 @@ type ListConnectionsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListConnectionsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListConnectionsResponse) { +} + +func (newState *ListConnectionsResponse) SyncEffectiveFieldsDuringRead(existingState ListConnectionsResponse) { +} + // List external locations type ListExternalLocationsRequest struct { // Whether to include external locations in the response for which the @@ -1420,6 +2146,12 @@ type ListExternalLocationsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListExternalLocationsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListExternalLocationsRequest) { +} + +func (newState *ListExternalLocationsRequest) SyncEffectiveFieldsDuringRead(existingState ListExternalLocationsRequest) { +} + type ListExternalLocationsResponse struct { // An array of external locations. ExternalLocations []ExternalLocationInfo `tfsdk:"external_locations" tf:"optional"` @@ -1429,6 +2161,12 @@ type ListExternalLocationsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListExternalLocationsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListExternalLocationsResponse) { +} + +func (newState *ListExternalLocationsResponse) SyncEffectiveFieldsDuringRead(existingState ListExternalLocationsResponse) { +} + // List functions type ListFunctionsRequest struct { // Name of parent catalog for functions of interest. @@ -1449,6 +2187,12 @@ type ListFunctionsRequest struct { SchemaName types.String `tfsdk:"-"` } +func (newState *ListFunctionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListFunctionsRequest) { +} + +func (newState *ListFunctionsRequest) SyncEffectiveFieldsDuringRead(existingState ListFunctionsRequest) { +} + type ListFunctionsResponse struct { // An array of function information objects. Functions []FunctionInfo `tfsdk:"functions" tf:"optional"` @@ -1458,11 +2202,23 @@ type ListFunctionsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListFunctionsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListFunctionsResponse) { +} + +func (newState *ListFunctionsResponse) SyncEffectiveFieldsDuringRead(existingState ListFunctionsResponse) { +} + type ListMetastoresResponse struct { // An array of metastore information objects. Metastores []MetastoreInfo `tfsdk:"metastores" tf:"optional"` } +func (newState *ListMetastoresResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListMetastoresResponse) { +} + +func (newState *ListMetastoresResponse) SyncEffectiveFieldsDuringRead(existingState ListMetastoresResponse) { +} + // List Model Versions type ListModelVersionsRequest struct { // The full three-level name of the registered model under which to list @@ -1483,6 +2239,12 @@ type ListModelVersionsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListModelVersionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListModelVersionsRequest) { +} + +func (newState *ListModelVersionsRequest) SyncEffectiveFieldsDuringRead(existingState ListModelVersionsRequest) { +} + type ListModelVersionsResponse struct { ModelVersions []ModelVersionInfo `tfsdk:"model_versions" tf:"optional"` // Opaque token to retrieve the next page of results. Absent if there are no @@ -1491,6 +2253,12 @@ type ListModelVersionsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListModelVersionsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListModelVersionsResponse) { +} + +func (newState *ListModelVersionsResponse) SyncEffectiveFieldsDuringRead(existingState ListModelVersionsResponse) { +} + // List all resource quotas under a metastore. type ListQuotasRequest struct { // The number of quotas to return. @@ -1499,6 +2267,12 @@ type ListQuotasRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListQuotasRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListQuotasRequest) { +} + +func (newState *ListQuotasRequest) SyncEffectiveFieldsDuringRead(existingState ListQuotasRequest) { +} + type ListQuotasResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -1508,12 +2282,24 @@ type ListQuotasResponse struct { Quotas []QuotaInfo `tfsdk:"quotas" tf:"optional"` } +func (newState *ListQuotasResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListQuotasResponse) { +} + +func (newState *ListQuotasResponse) SyncEffectiveFieldsDuringRead(existingState ListQuotasResponse) { +} + // List refreshes type ListRefreshesRequest struct { // Full name of the table. TableName types.String `tfsdk:"-"` } +func (newState *ListRefreshesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListRefreshesRequest) { +} + +func (newState *ListRefreshesRequest) SyncEffectiveFieldsDuringRead(existingState ListRefreshesRequest) { +} + // List Registered Models type ListRegisteredModelsRequest struct { // The identifier of the catalog under which to list registered models. If @@ -1547,6 +2333,12 @@ type ListRegisteredModelsRequest struct { SchemaName types.String `tfsdk:"-"` } +func (newState *ListRegisteredModelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListRegisteredModelsRequest) { +} + +func (newState *ListRegisteredModelsRequest) SyncEffectiveFieldsDuringRead(existingState ListRegisteredModelsRequest) { +} + type ListRegisteredModelsResponse struct { // Opaque token for pagination. Omitted if there are no more results. // page_token should be set to this value for fetching the next page. @@ -1555,6 +2347,12 @@ type ListRegisteredModelsResponse struct { RegisteredModels []RegisteredModelInfo `tfsdk:"registered_models" tf:"optional"` } +func (newState *ListRegisteredModelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListRegisteredModelsResponse) { +} + +func (newState *ListRegisteredModelsResponse) SyncEffectiveFieldsDuringRead(existingState ListRegisteredModelsResponse) { +} + // List schemas type ListSchemasRequest struct { // Parent catalog for schemas of interest. @@ -1573,6 +2371,12 @@ type ListSchemasRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListSchemasRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSchemasRequest) { +} + +func (newState *ListSchemasRequest) SyncEffectiveFieldsDuringRead(existingState ListSchemasRequest) { +} + type ListSchemasResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -1582,6 +2386,12 @@ type ListSchemasResponse struct { Schemas []SchemaInfo `tfsdk:"schemas" tf:"optional"` } +func (newState *ListSchemasResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSchemasResponse) { +} + +func (newState *ListSchemasResponse) SyncEffectiveFieldsDuringRead(existingState ListSchemasResponse) { +} + // List credentials type ListStorageCredentialsRequest struct { // Maximum number of storage credentials to return. If not set, all the @@ -1595,6 +2405,12 @@ type ListStorageCredentialsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListStorageCredentialsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListStorageCredentialsRequest) { +} + +func (newState *ListStorageCredentialsRequest) SyncEffectiveFieldsDuringRead(existingState ListStorageCredentialsRequest) { +} + type ListStorageCredentialsResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -1604,6 +2420,12 @@ type ListStorageCredentialsResponse struct { StorageCredentials []StorageCredentialInfo `tfsdk:"storage_credentials" tf:"optional"` } +func (newState *ListStorageCredentialsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListStorageCredentialsResponse) { +} + +func (newState *ListStorageCredentialsResponse) SyncEffectiveFieldsDuringRead(existingState ListStorageCredentialsResponse) { +} + // List table summaries type ListSummariesRequest struct { // Name of parent catalog for tables of interest. @@ -1628,6 +2450,12 @@ type ListSummariesRequest struct { TableNamePattern types.String `tfsdk:"-"` } +func (newState *ListSummariesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSummariesRequest) { +} + +func (newState *ListSummariesRequest) SyncEffectiveFieldsDuringRead(existingState ListSummariesRequest) { +} + // List system schemas type ListSystemSchemasRequest struct { // Maximum number of schemas to return. - When set to 0, the page length is @@ -1643,6 +2471,12 @@ type ListSystemSchemasRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListSystemSchemasRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSystemSchemasRequest) { +} + +func (newState *ListSystemSchemasRequest) SyncEffectiveFieldsDuringRead(existingState ListSystemSchemasRequest) { +} + type ListSystemSchemasResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -1652,6 +2486,12 @@ type ListSystemSchemasResponse struct { Schemas []SystemSchemaInfo `tfsdk:"schemas" tf:"optional"` } +func (newState *ListSystemSchemasResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSystemSchemasResponse) { +} + +func (newState *ListSystemSchemasResponse) SyncEffectiveFieldsDuringRead(existingState ListSystemSchemasResponse) { +} + type ListTableSummariesResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -1661,6 +2501,12 @@ type ListTableSummariesResponse struct { Tables []TableSummary `tfsdk:"tables" tf:"optional"` } +func (newState *ListTableSummariesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListTableSummariesResponse) { +} + +func (newState *ListTableSummariesResponse) SyncEffectiveFieldsDuringRead(existingState ListTableSummariesResponse) { +} + // List tables type ListTablesRequest struct { // Name of parent catalog for tables of interest. @@ -1689,6 +2535,12 @@ type ListTablesRequest struct { SchemaName types.String `tfsdk:"-"` } +func (newState *ListTablesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListTablesRequest) { +} + +func (newState *ListTablesRequest) SyncEffectiveFieldsDuringRead(existingState ListTablesRequest) { +} + type ListTablesResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -1698,6 +2550,12 @@ type ListTablesResponse struct { Tables []TableInfo `tfsdk:"tables" tf:"optional"` } +func (newState *ListTablesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListTablesResponse) { +} + +func (newState *ListTablesResponse) SyncEffectiveFieldsDuringRead(existingState ListTablesResponse) { +} + // List Volumes type ListVolumesRequest struct { // The identifier of the catalog @@ -1725,6 +2583,12 @@ type ListVolumesRequest struct { SchemaName types.String `tfsdk:"-"` } +func (newState *ListVolumesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListVolumesRequest) { +} + +func (newState *ListVolumesRequest) SyncEffectiveFieldsDuringRead(existingState ListVolumesRequest) { +} + type ListVolumesResponseContent struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -1734,6 +2598,12 @@ type ListVolumesResponseContent struct { Volumes []VolumeInfo `tfsdk:"volumes" tf:"optional"` } +func (newState *ListVolumesResponseContent) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListVolumesResponseContent) { +} + +func (newState *ListVolumesResponseContent) SyncEffectiveFieldsDuringRead(existingState ListVolumesResponseContent) { +} + type MetastoreAssignment struct { // The name of the default catalog in the metastore. DefaultCatalogName types.String `tfsdk:"default_catalog_name" tf:"optional"` @@ -1743,6 +2613,12 @@ type MetastoreAssignment struct { WorkspaceId types.Int64 `tfsdk:"workspace_id" tf:""` } +func (newState *MetastoreAssignment) SyncEffectiveFieldsDuringCreateOrUpdate(plan MetastoreAssignment) { +} + +func (newState *MetastoreAssignment) SyncEffectiveFieldsDuringRead(existingState MetastoreAssignment) { +} + type MetastoreInfo struct { // Cloud vendor of the metastore home shard (e.g., `aws`, `azure`, `gcp`). Cloud types.String `tfsdk:"cloud" tf:"optional"` @@ -1788,6 +2664,12 @@ type MetastoreInfo struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *MetastoreInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan MetastoreInfo) { +} + +func (newState *MetastoreInfo) SyncEffectiveFieldsDuringRead(existingState MetastoreInfo) { +} + type ModelVersionInfo struct { // List of aliases associated with the model version Aliases []RegisteredModelAlias `tfsdk:"aliases" tf:"optional"` @@ -1841,6 +2723,12 @@ type ModelVersionInfo struct { Version types.Int64 `tfsdk:"version" tf:"optional"` } +func (newState *ModelVersionInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ModelVersionInfo) { +} + +func (newState *ModelVersionInfo) SyncEffectiveFieldsDuringRead(existingState ModelVersionInfo) { +} + type MonitorCronSchedule struct { // Read only field that indicates whether a schedule is paused or not. PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` @@ -1853,17 +2741,35 @@ type MonitorCronSchedule struct { TimezoneId types.String `tfsdk:"timezone_id" tf:""` } +func (newState *MonitorCronSchedule) SyncEffectiveFieldsDuringCreateOrUpdate(plan MonitorCronSchedule) { +} + +func (newState *MonitorCronSchedule) SyncEffectiveFieldsDuringRead(existingState MonitorCronSchedule) { +} + type MonitorDataClassificationConfig struct { // Whether data classification is enabled. Enabled types.Bool `tfsdk:"enabled" tf:"optional"` } +func (newState *MonitorDataClassificationConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan MonitorDataClassificationConfig) { +} + +func (newState *MonitorDataClassificationConfig) SyncEffectiveFieldsDuringRead(existingState MonitorDataClassificationConfig) { +} + type MonitorDestination struct { // The list of email addresses to send the notification to. A maximum of 5 // email addresses is supported. EmailAddresses []types.String `tfsdk:"email_addresses" tf:"optional"` } +func (newState *MonitorDestination) SyncEffectiveFieldsDuringCreateOrUpdate(plan MonitorDestination) { +} + +func (newState *MonitorDestination) SyncEffectiveFieldsDuringRead(existingState MonitorDestination) { +} + type MonitorInferenceLog struct { // Granularities for aggregating data into time windows based on their // timestamp. Currently the following static granularities are supported: @@ -1895,6 +2801,12 @@ type MonitorInferenceLog struct { TimestampCol types.String `tfsdk:"timestamp_col" tf:""` } +func (newState *MonitorInferenceLog) SyncEffectiveFieldsDuringCreateOrUpdate(plan MonitorInferenceLog) { +} + +func (newState *MonitorInferenceLog) SyncEffectiveFieldsDuringRead(existingState MonitorInferenceLog) { +} + type MonitorInfo struct { // The directory to store monitoring assets (e.g. dashboard, metric tables). AssetsDir types.String `tfsdk:"assets_dir" tf:"optional"` @@ -1947,6 +2859,12 @@ type MonitorInfo struct { TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional,object"` } +func (newState *MonitorInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan MonitorInfo) { +} + +func (newState *MonitorInfo) SyncEffectiveFieldsDuringRead(existingState MonitorInfo) { +} + type MonitorMetric struct { // Jinja template for a SQL expression that specifies how to compute the // metric. See [create metric definition]. @@ -1974,6 +2892,12 @@ type MonitorMetric struct { Type types.String `tfsdk:"type" tf:""` } +func (newState *MonitorMetric) SyncEffectiveFieldsDuringCreateOrUpdate(plan MonitorMetric) { +} + +func (newState *MonitorMetric) SyncEffectiveFieldsDuringRead(existingState MonitorMetric) { +} + type MonitorNotifications struct { // Who to send notifications to on monitor failure. OnFailure []MonitorDestination `tfsdk:"on_failure" tf:"optional,object"` @@ -1982,6 +2906,12 @@ type MonitorNotifications struct { OnNewClassificationTagDetected []MonitorDestination `tfsdk:"on_new_classification_tag_detected" tf:"optional,object"` } +func (newState *MonitorNotifications) SyncEffectiveFieldsDuringCreateOrUpdate(plan MonitorNotifications) { +} + +func (newState *MonitorNotifications) SyncEffectiveFieldsDuringRead(existingState MonitorNotifications) { +} + type MonitorRefreshInfo struct { // Time at which refresh operation completed (milliseconds since 1/1/1970 // UTC). @@ -2000,14 +2930,32 @@ type MonitorRefreshInfo struct { Trigger types.String `tfsdk:"trigger" tf:"optional"` } +func (newState *MonitorRefreshInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan MonitorRefreshInfo) { +} + +func (newState *MonitorRefreshInfo) SyncEffectiveFieldsDuringRead(existingState MonitorRefreshInfo) { +} + type MonitorRefreshListResponse struct { // List of refreshes. Refreshes []MonitorRefreshInfo `tfsdk:"refreshes" tf:"optional"` } +func (newState *MonitorRefreshListResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan MonitorRefreshListResponse) { +} + +func (newState *MonitorRefreshListResponse) SyncEffectiveFieldsDuringRead(existingState MonitorRefreshListResponse) { +} + type MonitorSnapshot struct { } +func (newState *MonitorSnapshot) SyncEffectiveFieldsDuringCreateOrUpdate(plan MonitorSnapshot) { +} + +func (newState *MonitorSnapshot) SyncEffectiveFieldsDuringRead(existingState MonitorSnapshot) { +} + type MonitorTimeSeries struct { // Granularities for aggregating data into time windows based on their // timestamp. Currently the following static granularities are supported: @@ -2023,21 +2971,50 @@ type MonitorTimeSeries struct { TimestampCol types.String `tfsdk:"timestamp_col" tf:""` } +func (newState *MonitorTimeSeries) SyncEffectiveFieldsDuringCreateOrUpdate(plan MonitorTimeSeries) { +} + +func (newState *MonitorTimeSeries) SyncEffectiveFieldsDuringRead(existingState MonitorTimeSeries) { +} + type NamedTableConstraint struct { // The name of the constraint. Name types.String `tfsdk:"name" tf:""` } +func (newState *NamedTableConstraint) SyncEffectiveFieldsDuringCreateOrUpdate(plan NamedTableConstraint) { +} + +func (newState *NamedTableConstraint) SyncEffectiveFieldsDuringRead(existingState NamedTableConstraint) { +} + // Online Table information. type OnlineTable struct { // Full three-part (catalog, schema, table) name of the table. Name types.String `tfsdk:"name" tf:"optional"` // Specification of the online table. Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional,object"` - // Online Table status + // Online Table data synchronization status Status []OnlineTableStatus `tfsdk:"status" tf:"optional,object"` // Data serving REST API URL for this table - TableServingUrl types.String `tfsdk:"table_serving_url" tf:"optional"` + TableServingUrl types.String `tfsdk:"table_serving_url" tf:"optional"` + EffectiveTableServingUrl types.String `tfsdk:"effective_table_serving_url" tf:"computed,optional"` + // The provisioning state of the online table entity in Unity Catalog. This + // is distinct from the state of the data synchronization pipeline (i.e. the + // table may be in "ACTIVE" but the pipeline may be in "PROVISIONING" as it + // runs asynchronously). + UnityCatalogProvisioningState types.String `tfsdk:"unity_catalog_provisioning_state" tf:"optional"` +} + +func (newState *OnlineTable) SyncEffectiveFieldsDuringCreateOrUpdate(plan OnlineTable) { + newState.EffectiveTableServingUrl = newState.TableServingUrl + newState.TableServingUrl = plan.TableServingUrl +} + +func (newState *OnlineTable) SyncEffectiveFieldsDuringRead(existingState OnlineTable) { + if existingState.EffectiveTableServingUrl.ValueString() == newState.TableServingUrl.ValueString() { + newState.TableServingUrl = existingState.TableServingUrl + } } // Specification of an online table. @@ -2052,7 +3029,8 @@ type OnlineTableSpec struct { PerformFullCopy types.Bool `tfsdk:"perform_full_copy" tf:"optional"` // ID of the associated pipeline. Generated by the server - cannot be set by // the caller. - PipelineId types.String `tfsdk:"pipeline_id" tf:"optional"` + PipelineId types.String `tfsdk:"pipeline_id" tf:"optional"` + EffectivePipelineId types.String `tfsdk:"effective_pipeline_id" tf:"computed,optional"` // Primary Key columns to be used for data insert/update in the destination. PrimaryKeyColumns []types.String `tfsdk:"primary_key_columns" tf:"optional"` // Pipeline runs continuously after generating the initial data. @@ -2067,12 +3045,35 @@ type OnlineTableSpec struct { TimeseriesKey types.String `tfsdk:"timeseries_key" tf:"optional"` } +func (newState *OnlineTableSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan OnlineTableSpec) { + newState.EffectivePipelineId = newState.PipelineId + newState.PipelineId = plan.PipelineId +} + +func (newState *OnlineTableSpec) SyncEffectiveFieldsDuringRead(existingState OnlineTableSpec) { + if existingState.EffectivePipelineId.ValueString() == newState.PipelineId.ValueString() { + newState.PipelineId = existingState.PipelineId + } +} + type OnlineTableSpecContinuousSchedulingPolicy struct { } +func (newState *OnlineTableSpecContinuousSchedulingPolicy) SyncEffectiveFieldsDuringCreateOrUpdate(plan OnlineTableSpecContinuousSchedulingPolicy) { +} + +func (newState *OnlineTableSpecContinuousSchedulingPolicy) SyncEffectiveFieldsDuringRead(existingState OnlineTableSpecContinuousSchedulingPolicy) { +} + type OnlineTableSpecTriggeredSchedulingPolicy struct { } +func (newState *OnlineTableSpecTriggeredSchedulingPolicy) SyncEffectiveFieldsDuringCreateOrUpdate(plan OnlineTableSpecTriggeredSchedulingPolicy) { +} + +func (newState *OnlineTableSpecTriggeredSchedulingPolicy) SyncEffectiveFieldsDuringRead(existingState OnlineTableSpecTriggeredSchedulingPolicy) { +} + // Status of an online table. type OnlineTableStatus struct { // Detailed status of an online table. Shown if the online table is in the @@ -2094,6 +3095,12 @@ type OnlineTableStatus struct { TriggeredUpdateStatus []TriggeredUpdateStatus `tfsdk:"triggered_update_status" tf:"optional,object"` } +func (newState *OnlineTableStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan OnlineTableStatus) { +} + +func (newState *OnlineTableStatus) SyncEffectiveFieldsDuringRead(existingState OnlineTableStatus) { +} + type PermissionsChange struct { // The set of privileges to add. Add []types.String `tfsdk:"add" tf:"optional"` @@ -2103,11 +3110,23 @@ type PermissionsChange struct { Remove []types.String `tfsdk:"remove" tf:"optional"` } +func (newState *PermissionsChange) SyncEffectiveFieldsDuringCreateOrUpdate(plan PermissionsChange) { +} + +func (newState *PermissionsChange) SyncEffectiveFieldsDuringRead(existingState PermissionsChange) { +} + type PermissionsList struct { // The privileges assigned to each principal PrivilegeAssignments []PrivilegeAssignment `tfsdk:"privilege_assignments" tf:"optional"` } +func (newState *PermissionsList) SyncEffectiveFieldsDuringCreateOrUpdate(plan PermissionsList) { +} + +func (newState *PermissionsList) SyncEffectiveFieldsDuringRead(existingState PermissionsList) { +} + // Progress information of the Online Table data synchronization pipeline. type PipelineProgress struct { // The estimated time remaining to complete this update in seconds. @@ -2124,6 +3143,12 @@ type PipelineProgress struct { TotalRowCount types.Int64 `tfsdk:"total_row_count" tf:"optional"` } +func (newState *PipelineProgress) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineProgress) { +} + +func (newState *PipelineProgress) SyncEffectiveFieldsDuringRead(existingState PipelineProgress) { +} + type PrimaryKeyConstraint struct { // Column names for this constraint. ChildColumns []types.String `tfsdk:"child_columns" tf:""` @@ -2131,6 +3156,12 @@ type PrimaryKeyConstraint struct { Name types.String `tfsdk:"name" tf:""` } +func (newState *PrimaryKeyConstraint) SyncEffectiveFieldsDuringCreateOrUpdate(plan PrimaryKeyConstraint) { +} + +func (newState *PrimaryKeyConstraint) SyncEffectiveFieldsDuringRead(existingState PrimaryKeyConstraint) { +} + type PrivilegeAssignment struct { // The principal (user email address or group name). Principal types.String `tfsdk:"principal" tf:"optional"` @@ -2138,11 +3169,23 @@ type PrivilegeAssignment struct { Privileges []types.String `tfsdk:"privileges" tf:"optional"` } +func (newState *PrivilegeAssignment) SyncEffectiveFieldsDuringCreateOrUpdate(plan PrivilegeAssignment) { +} + +func (newState *PrivilegeAssignment) SyncEffectiveFieldsDuringRead(existingState PrivilegeAssignment) { +} + // Status of an asynchronously provisioned resource. type ProvisioningInfo struct { State types.String `tfsdk:"state" tf:"optional"` } +func (newState *ProvisioningInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ProvisioningInfo) { +} + +func (newState *ProvisioningInfo) SyncEffectiveFieldsDuringRead(existingState ProvisioningInfo) { +} + // Detailed status of an online table. Shown if the online table is in the // PROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state. type ProvisioningStatus struct { @@ -2151,6 +3194,12 @@ type ProvisioningStatus struct { InitialPipelineSyncProgress []PipelineProgress `tfsdk:"initial_pipeline_sync_progress" tf:"optional,object"` } +func (newState *ProvisioningStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan ProvisioningStatus) { +} + +func (newState *ProvisioningStatus) SyncEffectiveFieldsDuringRead(existingState ProvisioningStatus) { +} + type QuotaInfo struct { // The timestamp that indicates when the quota count was last updated. LastRefreshedAt types.Int64 `tfsdk:"last_refreshed_at" tf:"optional"` @@ -2167,6 +3216,12 @@ type QuotaInfo struct { QuotaName types.String `tfsdk:"quota_name" tf:"optional"` } +func (newState *QuotaInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan QuotaInfo) { +} + +func (newState *QuotaInfo) SyncEffectiveFieldsDuringRead(existingState QuotaInfo) { +} + // R2 temporary credentials for API authentication. Read more at // https://developers.cloudflare.com/r2/api/s3/tokens/. type R2Credentials struct { @@ -2178,6 +3233,12 @@ type R2Credentials struct { SessionToken types.String `tfsdk:"session_token" tf:"optional"` } +func (newState *R2Credentials) SyncEffectiveFieldsDuringCreateOrUpdate(plan R2Credentials) { +} + +func (newState *R2Credentials) SyncEffectiveFieldsDuringRead(existingState R2Credentials) { +} + // Get a Volume type ReadVolumeRequest struct { // Whether to include volumes in the response for which the principal can @@ -2187,6 +3248,12 @@ type ReadVolumeRequest struct { Name types.String `tfsdk:"-"` } +func (newState *ReadVolumeRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ReadVolumeRequest) { +} + +func (newState *ReadVolumeRequest) SyncEffectiveFieldsDuringRead(existingState ReadVolumeRequest) { +} + type RegenerateDashboardRequest struct { // Full name of the table. TableName types.String `tfsdk:"-"` @@ -2195,6 +3262,12 @@ type RegenerateDashboardRequest struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *RegenerateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RegenerateDashboardRequest) { +} + +func (newState *RegenerateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState RegenerateDashboardRequest) { +} + type RegenerateDashboardResponse struct { // Id of the regenerated monitoring dashboard. DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` @@ -2202,6 +3275,12 @@ type RegenerateDashboardResponse struct { ParentFolder types.String `tfsdk:"parent_folder" tf:"optional"` } +func (newState *RegenerateDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RegenerateDashboardResponse) { +} + +func (newState *RegenerateDashboardResponse) SyncEffectiveFieldsDuringRead(existingState RegenerateDashboardResponse) { +} + // Registered model alias. type RegisteredModelAlias struct { // Name of the alias, e.g. 'champion' or 'latest_stable' @@ -2210,6 +3289,12 @@ type RegisteredModelAlias struct { VersionNum types.Int64 `tfsdk:"version_num" tf:"optional"` } +func (newState *RegisteredModelAlias) SyncEffectiveFieldsDuringCreateOrUpdate(plan RegisteredModelAlias) { +} + +func (newState *RegisteredModelAlias) SyncEffectiveFieldsDuringRead(existingState RegisteredModelAlias) { +} + type RegisteredModelInfo struct { // List of aliases associated with the registered model Aliases []RegisteredModelAlias `tfsdk:"aliases" tf:"optional"` @@ -2246,12 +3331,24 @@ type RegisteredModelInfo struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *RegisteredModelInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan RegisteredModelInfo) { +} + +func (newState *RegisteredModelInfo) SyncEffectiveFieldsDuringRead(existingState RegisteredModelInfo) { +} + // Queue a metric refresh for a monitor type RunRefreshRequest struct { // Full name of the table. TableName types.String `tfsdk:"-"` } +func (newState *RunRefreshRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunRefreshRequest) { +} + +func (newState *RunRefreshRequest) SyncEffectiveFieldsDuringRead(existingState RunRefreshRequest) { +} + type SchemaInfo struct { // Indicates whether the principal is limited to retrieving metadata for the // associated object through the BROWSE privilege when include_browse is @@ -2294,6 +3391,12 @@ type SchemaInfo struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *SchemaInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan SchemaInfo) { +} + +func (newState *SchemaInfo) SyncEffectiveFieldsDuringRead(existingState SchemaInfo) { +} + type SetArtifactAllowlist struct { // A list of allowed artifact match patterns. ArtifactMatchers []ArtifactMatcher `tfsdk:"artifact_matchers" tf:""` @@ -2301,6 +3404,12 @@ type SetArtifactAllowlist struct { ArtifactType types.String `tfsdk:"-"` } +func (newState *SetArtifactAllowlist) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetArtifactAllowlist) { +} + +func (newState *SetArtifactAllowlist) SyncEffectiveFieldsDuringRead(existingState SetArtifactAllowlist) { +} + type SetRegisteredModelAliasRequest struct { // The name of the alias Alias types.String `tfsdk:"alias" tf:""` @@ -2310,6 +3419,12 @@ type SetRegisteredModelAliasRequest struct { VersionNum types.Int64 `tfsdk:"version_num" tf:""` } +func (newState *SetRegisteredModelAliasRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetRegisteredModelAliasRequest) { +} + +func (newState *SetRegisteredModelAliasRequest) SyncEffectiveFieldsDuringRead(existingState SetRegisteredModelAliasRequest) { +} + // Server-Side Encryption properties for clients communicating with AWS s3. type SseEncryptionDetails struct { // The type of key encryption to use (affects headers from s3 client). @@ -2319,6 +3434,12 @@ type SseEncryptionDetails struct { AwsKmsKeyArn types.String `tfsdk:"aws_kms_key_arn" tf:"optional"` } +func (newState *SseEncryptionDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan SseEncryptionDetails) { +} + +func (newState *SseEncryptionDetails) SyncEffectiveFieldsDuringRead(existingState SseEncryptionDetails) { +} + type StorageCredentialInfo struct { // The AWS IAM role configuration. AwsIamRole []AwsIamRoleResponse `tfsdk:"aws_iam_role" tf:"optional,object"` @@ -2358,6 +3479,12 @@ type StorageCredentialInfo struct { UsedForManagedStorage types.Bool `tfsdk:"used_for_managed_storage" tf:"optional"` } +func (newState *StorageCredentialInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan StorageCredentialInfo) { +} + +func (newState *StorageCredentialInfo) SyncEffectiveFieldsDuringRead(existingState StorageCredentialInfo) { +} + type SystemSchemaInfo struct { // Name of the system schema. Schema types.String `tfsdk:"schema" tf:"optional"` @@ -2366,6 +3493,12 @@ type SystemSchemaInfo struct { State types.String `tfsdk:"state" tf:"optional"` } +func (newState *SystemSchemaInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan SystemSchemaInfo) { +} + +func (newState *SystemSchemaInfo) SyncEffectiveFieldsDuringRead(existingState SystemSchemaInfo) { +} + // A table constraint, as defined by *one* of the following fields being set: // __primary_key_constraint__, __foreign_key_constraint__, // __named_table_constraint__. @@ -2377,6 +3510,12 @@ type TableConstraint struct { PrimaryKeyConstraint []PrimaryKeyConstraint `tfsdk:"primary_key_constraint" tf:"optional,object"` } +func (newState *TableConstraint) SyncEffectiveFieldsDuringCreateOrUpdate(plan TableConstraint) { +} + +func (newState *TableConstraint) SyncEffectiveFieldsDuringRead(existingState TableConstraint) { +} + // A table that is dependent on a SQL object. type TableDependency struct { // Full name of the dependent table, in the form of @@ -2384,11 +3523,23 @@ type TableDependency struct { TableFullName types.String `tfsdk:"table_full_name" tf:""` } +func (newState *TableDependency) SyncEffectiveFieldsDuringCreateOrUpdate(plan TableDependency) { +} + +func (newState *TableDependency) SyncEffectiveFieldsDuringRead(existingState TableDependency) { +} + type TableExistsResponse struct { // Whether the table exists or not. TableExists types.Bool `tfsdk:"table_exists" tf:"optional"` } +func (newState *TableExistsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan TableExistsResponse) { +} + +func (newState *TableExistsResponse) SyncEffectiveFieldsDuringRead(existingState TableExistsResponse) { +} + type TableInfo struct { // The AWS access point to use when accesing s3 for this external location. AccessPoint types.String `tfsdk:"access_point" tf:"optional"` @@ -2469,6 +3620,12 @@ type TableInfo struct { ViewDependencies []DependencyList `tfsdk:"view_dependencies" tf:"optional,object"` } +func (newState *TableInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan TableInfo) { +} + +func (newState *TableInfo) SyncEffectiveFieldsDuringRead(existingState TableInfo) { +} + type TableRowFilter struct { // The full name of the row filter SQL UDF. FunctionName types.String `tfsdk:"function_name" tf:""` @@ -2478,6 +3635,12 @@ type TableRowFilter struct { InputColumnNames []types.String `tfsdk:"input_column_names" tf:""` } +func (newState *TableRowFilter) SyncEffectiveFieldsDuringCreateOrUpdate(plan TableRowFilter) { +} + +func (newState *TableRowFilter) SyncEffectiveFieldsDuringRead(existingState TableRowFilter) { +} + type TableSummary struct { // The full name of the table. FullName types.String `tfsdk:"full_name" tf:"optional"` @@ -2485,6 +3648,12 @@ type TableSummary struct { TableType types.String `tfsdk:"table_type" tf:"optional"` } +func (newState *TableSummary) SyncEffectiveFieldsDuringCreateOrUpdate(plan TableSummary) { +} + +func (newState *TableSummary) SyncEffectiveFieldsDuringRead(existingState TableSummary) { +} + // Detailed status of an online table. Shown if the online table is in the // ONLINE_TRIGGERED_UPDATE or the ONLINE_NO_PENDING_UPDATE state. type TriggeredUpdateStatus struct { @@ -2499,6 +3668,12 @@ type TriggeredUpdateStatus struct { TriggeredUpdateProgress []PipelineProgress `tfsdk:"triggered_update_progress" tf:"optional,object"` } +func (newState *TriggeredUpdateStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan TriggeredUpdateStatus) { +} + +func (newState *TriggeredUpdateStatus) SyncEffectiveFieldsDuringRead(existingState TriggeredUpdateStatus) { +} + // Delete an assignment type UnassignRequest struct { // Query for the ID of the metastore to delete. @@ -2507,12 +3682,30 @@ type UnassignRequest struct { WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *UnassignRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UnassignRequest) { +} + +func (newState *UnassignRequest) SyncEffectiveFieldsDuringRead(existingState UnassignRequest) { +} + type UnassignResponse struct { } +func (newState *UnassignResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UnassignResponse) { +} + +func (newState *UnassignResponse) SyncEffectiveFieldsDuringRead(existingState UnassignResponse) { +} + type UpdateAssignmentResponse struct { } +func (newState *UpdateAssignmentResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAssignmentResponse) { +} + +func (newState *UpdateAssignmentResponse) SyncEffectiveFieldsDuringRead(existingState UpdateAssignmentResponse) { +} + type UpdateCatalog struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -2532,6 +3725,12 @@ type UpdateCatalog struct { Properties map[string]types.String `tfsdk:"properties" tf:"optional"` } +func (newState *UpdateCatalog) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCatalog) { +} + +func (newState *UpdateCatalog) SyncEffectiveFieldsDuringRead(existingState UpdateCatalog) { +} + type UpdateConnection struct { // Name of the connection. Name types.String `tfsdk:"-"` @@ -2543,6 +3742,12 @@ type UpdateConnection struct { Owner types.String `tfsdk:"owner" tf:"optional"` } +func (newState *UpdateConnection) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateConnection) { +} + +func (newState *UpdateConnection) SyncEffectiveFieldsDuringRead(existingState UpdateConnection) { +} + type UpdateExternalLocation struct { // The AWS access point to use when accesing s3 for this external location. AccessPoint types.String `tfsdk:"access_point" tf:"optional"` @@ -2577,6 +3782,12 @@ type UpdateExternalLocation struct { Url types.String `tfsdk:"url" tf:"optional"` } +func (newState *UpdateExternalLocation) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateExternalLocation) { +} + +func (newState *UpdateExternalLocation) SyncEffectiveFieldsDuringRead(existingState UpdateExternalLocation) { +} + type UpdateFunction struct { // The fully-qualified name of the function (of the form // __catalog_name__.__schema_name__.__function__name__). @@ -2585,6 +3796,12 @@ type UpdateFunction struct { Owner types.String `tfsdk:"owner" tf:"optional"` } +func (newState *UpdateFunction) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateFunction) { +} + +func (newState *UpdateFunction) SyncEffectiveFieldsDuringRead(existingState UpdateFunction) { +} + type UpdateMetastore struct { // The organization name of a Delta Sharing entity, to be used in // Databricks-to-Databricks Delta Sharing as the official name. @@ -2606,6 +3823,12 @@ type UpdateMetastore struct { StorageRootCredentialId types.String `tfsdk:"storage_root_credential_id" tf:"optional"` } +func (newState *UpdateMetastore) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateMetastore) { +} + +func (newState *UpdateMetastore) SyncEffectiveFieldsDuringRead(existingState UpdateMetastore) { +} + type UpdateMetastoreAssignment struct { // The name of the default catalog in the metastore. This field is // depracted. Please use "Default Namespace API" to configure the default @@ -2617,6 +3840,12 @@ type UpdateMetastoreAssignment struct { WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *UpdateMetastoreAssignment) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateMetastoreAssignment) { +} + +func (newState *UpdateMetastoreAssignment) SyncEffectiveFieldsDuringRead(existingState UpdateMetastoreAssignment) { +} + type UpdateModelVersionRequest struct { // The comment attached to the model version Comment types.String `tfsdk:"comment" tf:"optional"` @@ -2626,6 +3855,12 @@ type UpdateModelVersionRequest struct { Version types.Int64 `tfsdk:"-"` } +func (newState *UpdateModelVersionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateModelVersionRequest) { +} + +func (newState *UpdateModelVersionRequest) SyncEffectiveFieldsDuringRead(existingState UpdateModelVersionRequest) { +} + type UpdateMonitor struct { // Name of the baseline table from which drift metrics are computed from. // Columns in the monitored table should also be present in the baseline @@ -2662,6 +3897,12 @@ type UpdateMonitor struct { TimeSeries []MonitorTimeSeries `tfsdk:"time_series" tf:"optional,object"` } +func (newState *UpdateMonitor) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateMonitor) { +} + +func (newState *UpdateMonitor) SyncEffectiveFieldsDuringRead(existingState UpdateMonitor) { +} + type UpdatePermissions struct { // Array of permissions change objects. Changes []PermissionsChange `tfsdk:"changes" tf:"optional"` @@ -2671,6 +3912,12 @@ type UpdatePermissions struct { SecurableType types.String `tfsdk:"-"` } +func (newState *UpdatePermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdatePermissions) { +} + +func (newState *UpdatePermissions) SyncEffectiveFieldsDuringRead(existingState UpdatePermissions) { +} + type UpdateRegisteredModelRequest struct { // The comment attached to the registered model Comment types.String `tfsdk:"comment" tf:"optional"` @@ -2682,9 +3929,21 @@ type UpdateRegisteredModelRequest struct { Owner types.String `tfsdk:"owner" tf:"optional"` } +func (newState *UpdateRegisteredModelRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateRegisteredModelRequest) { +} + +func (newState *UpdateRegisteredModelRequest) SyncEffectiveFieldsDuringRead(existingState UpdateRegisteredModelRequest) { +} + type UpdateResponse struct { } +func (newState *UpdateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateResponse) { +} + +func (newState *UpdateResponse) SyncEffectiveFieldsDuringRead(existingState UpdateResponse) { +} + type UpdateSchema struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -2701,6 +3960,12 @@ type UpdateSchema struct { Properties map[string]types.String `tfsdk:"properties" tf:"optional"` } +func (newState *UpdateSchema) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateSchema) { +} + +func (newState *UpdateSchema) SyncEffectiveFieldsDuringRead(existingState UpdateSchema) { +} + type UpdateStorageCredential struct { // The AWS IAM role configuration. AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional,object"` @@ -2733,6 +3998,12 @@ type UpdateStorageCredential struct { SkipValidation types.Bool `tfsdk:"skip_validation" tf:"optional"` } +func (newState *UpdateStorageCredential) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateStorageCredential) { +} + +func (newState *UpdateStorageCredential) SyncEffectiveFieldsDuringRead(existingState UpdateStorageCredential) { +} + // Update a table owner. type UpdateTableRequest struct { // Full name of the table. @@ -2741,6 +4012,12 @@ type UpdateTableRequest struct { Owner types.String `tfsdk:"owner" tf:"optional"` } +func (newState *UpdateTableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateTableRequest) { +} + +func (newState *UpdateTableRequest) SyncEffectiveFieldsDuringRead(existingState UpdateTableRequest) { +} + type UpdateVolumeRequestContent struct { // The comment attached to the volume Comment types.String `tfsdk:"comment" tf:"optional"` @@ -2752,6 +4029,12 @@ type UpdateVolumeRequestContent struct { Owner types.String `tfsdk:"owner" tf:"optional"` } +func (newState *UpdateVolumeRequestContent) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateVolumeRequestContent) { +} + +func (newState *UpdateVolumeRequestContent) SyncEffectiveFieldsDuringRead(existingState UpdateVolumeRequestContent) { +} + type UpdateWorkspaceBindings struct { // A list of workspace IDs. AssignWorkspaces []types.Int64 `tfsdk:"assign_workspaces" tf:"optional"` @@ -2761,6 +4044,12 @@ type UpdateWorkspaceBindings struct { UnassignWorkspaces []types.Int64 `tfsdk:"unassign_workspaces" tf:"optional"` } +func (newState *UpdateWorkspaceBindings) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateWorkspaceBindings) { +} + +func (newState *UpdateWorkspaceBindings) SyncEffectiveFieldsDuringRead(existingState UpdateWorkspaceBindings) { +} + type UpdateWorkspaceBindingsParameters struct { // List of workspace bindings Add []WorkspaceBinding `tfsdk:"add" tf:"optional"` @@ -2772,6 +4061,12 @@ type UpdateWorkspaceBindingsParameters struct { SecurableType types.String `tfsdk:"-"` } +func (newState *UpdateWorkspaceBindingsParameters) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateWorkspaceBindingsParameters) { +} + +func (newState *UpdateWorkspaceBindingsParameters) SyncEffectiveFieldsDuringRead(existingState UpdateWorkspaceBindingsParameters) { +} + type ValidateStorageCredential struct { // The AWS IAM role configuration. AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional,object"` @@ -2793,6 +4088,12 @@ type ValidateStorageCredential struct { Url types.String `tfsdk:"url" tf:"optional"` } +func (newState *ValidateStorageCredential) SyncEffectiveFieldsDuringCreateOrUpdate(plan ValidateStorageCredential) { +} + +func (newState *ValidateStorageCredential) SyncEffectiveFieldsDuringRead(existingState ValidateStorageCredential) { +} + type ValidateStorageCredentialResponse struct { // Whether the tested location is a directory in cloud storage. IsDir types.Bool `tfsdk:"isDir" tf:"optional"` @@ -2800,6 +4101,12 @@ type ValidateStorageCredentialResponse struct { Results []ValidationResult `tfsdk:"results" tf:"optional"` } +func (newState *ValidateStorageCredentialResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ValidateStorageCredentialResponse) { +} + +func (newState *ValidateStorageCredentialResponse) SyncEffectiveFieldsDuringRead(existingState ValidateStorageCredentialResponse) { +} + type ValidationResult struct { // Error message would exist when the result does not equal to **PASS**. Message types.String `tfsdk:"message" tf:"optional"` @@ -2809,6 +4116,12 @@ type ValidationResult struct { Result types.String `tfsdk:"result" tf:"optional"` } +func (newState *ValidationResult) SyncEffectiveFieldsDuringCreateOrUpdate(plan ValidationResult) { +} + +func (newState *ValidationResult) SyncEffectiveFieldsDuringRead(existingState ValidationResult) { +} + type VolumeInfo struct { // The AWS access point to use when accesing s3 for this external location. AccessPoint types.String `tfsdk:"access_point" tf:"optional"` @@ -2848,12 +4161,24 @@ type VolumeInfo struct { VolumeType types.String `tfsdk:"volume_type" tf:"optional"` } +func (newState *VolumeInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan VolumeInfo) { +} + +func (newState *VolumeInfo) SyncEffectiveFieldsDuringRead(existingState VolumeInfo) { +} + type WorkspaceBinding struct { BindingType types.String `tfsdk:"binding_type" tf:"optional"` WorkspaceId types.Int64 `tfsdk:"workspace_id" tf:"optional"` } +func (newState *WorkspaceBinding) SyncEffectiveFieldsDuringCreateOrUpdate(plan WorkspaceBinding) { +} + +func (newState *WorkspaceBinding) SyncEffectiveFieldsDuringRead(existingState WorkspaceBinding) { +} + // Currently assigned workspace bindings type WorkspaceBindingsResponse struct { // List of workspace bindings @@ -2863,3 +4188,9 @@ type WorkspaceBindingsResponse struct { // request (for the next page of results). NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } + +func (newState *WorkspaceBindingsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan WorkspaceBindingsResponse) { +} + +func (newState *WorkspaceBindingsResponse) SyncEffectiveFieldsDuringRead(existingState WorkspaceBindingsResponse) { +} diff --git a/internal/service/compute_tf/model.go b/internal/service/compute_tf/model.go index dcc16fd50f..653cfec24f 100755 --- a/internal/service/compute_tf/model.go +++ b/internal/service/compute_tf/model.go @@ -43,15 +43,33 @@ type AddInstanceProfile struct { SkipValidation types.Bool `tfsdk:"skip_validation" tf:"optional"` } +func (newState *AddInstanceProfile) SyncEffectiveFieldsDuringCreateOrUpdate(plan AddInstanceProfile) { +} + +func (newState *AddInstanceProfile) SyncEffectiveFieldsDuringRead(existingState AddInstanceProfile) { +} + type AddResponse struct { } +func (newState *AddResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan AddResponse) { +} + +func (newState *AddResponse) SyncEffectiveFieldsDuringRead(existingState AddResponse) { +} + type Adlsgen2Info struct { // abfss destination, e.g. // `abfss://@.dfs.core.windows.net/`. Destination types.String `tfsdk:"destination" tf:""` } +func (newState *Adlsgen2Info) SyncEffectiveFieldsDuringCreateOrUpdate(plan Adlsgen2Info) { +} + +func (newState *Adlsgen2Info) SyncEffectiveFieldsDuringRead(existingState Adlsgen2Info) { +} + type AutoScale struct { // The maximum number of workers to which the cluster can scale up when // overloaded. Note that `max_workers` must be strictly greater than @@ -63,6 +81,12 @@ type AutoScale struct { MinWorkers types.Int64 `tfsdk:"min_workers" tf:"optional"` } +func (newState *AutoScale) SyncEffectiveFieldsDuringCreateOrUpdate(plan AutoScale) { +} + +func (newState *AutoScale) SyncEffectiveFieldsDuringRead(existingState AutoScale) { +} + type AwsAttributes struct { // Availability type used for all subsequent nodes past the // `first_on_demand` ones. @@ -149,6 +173,12 @@ type AwsAttributes struct { ZoneId types.String `tfsdk:"zone_id" tf:"optional"` } +func (newState *AwsAttributes) SyncEffectiveFieldsDuringCreateOrUpdate(plan AwsAttributes) { +} + +func (newState *AwsAttributes) SyncEffectiveFieldsDuringRead(existingState AwsAttributes) { +} + type AzureAttributes struct { // Availability type used for all subsequent nodes past the // `first_on_demand` ones. Note: If `first_on_demand` is zero (which only @@ -175,6 +205,12 @@ type AzureAttributes struct { SpotBidMaxPrice types.Float64 `tfsdk:"spot_bid_max_price" tf:"optional"` } +func (newState *AzureAttributes) SyncEffectiveFieldsDuringCreateOrUpdate(plan AzureAttributes) { +} + +func (newState *AzureAttributes) SyncEffectiveFieldsDuringRead(existingState AzureAttributes) { +} + type CancelCommand struct { ClusterId types.String `tfsdk:"clusterId" tf:"optional"` @@ -183,9 +219,21 @@ type CancelCommand struct { ContextId types.String `tfsdk:"contextId" tf:"optional"` } +func (newState *CancelCommand) SyncEffectiveFieldsDuringCreateOrUpdate(plan CancelCommand) { +} + +func (newState *CancelCommand) SyncEffectiveFieldsDuringRead(existingState CancelCommand) { +} + type CancelResponse struct { } +func (newState *CancelResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CancelResponse) { +} + +func (newState *CancelResponse) SyncEffectiveFieldsDuringRead(existingState CancelResponse) { +} + type ChangeClusterOwner struct { // ClusterId types.String `tfsdk:"cluster_id" tf:""` @@ -193,9 +241,21 @@ type ChangeClusterOwner struct { OwnerUsername types.String `tfsdk:"owner_username" tf:""` } +func (newState *ChangeClusterOwner) SyncEffectiveFieldsDuringCreateOrUpdate(plan ChangeClusterOwner) { +} + +func (newState *ChangeClusterOwner) SyncEffectiveFieldsDuringRead(existingState ChangeClusterOwner) { +} + type ChangeClusterOwnerResponse struct { } +func (newState *ChangeClusterOwnerResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ChangeClusterOwnerResponse) { +} + +func (newState *ChangeClusterOwnerResponse) SyncEffectiveFieldsDuringRead(existingState ChangeClusterOwnerResponse) { +} + type ClientsTypes struct { // With jobs set, the cluster can be used for jobs Jobs types.Bool `tfsdk:"jobs" tf:"optional"` @@ -203,15 +263,33 @@ type ClientsTypes struct { Notebooks types.Bool `tfsdk:"notebooks" tf:"optional"` } +func (newState *ClientsTypes) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClientsTypes) { +} + +func (newState *ClientsTypes) SyncEffectiveFieldsDuringRead(existingState ClientsTypes) { +} + type CloneCluster struct { // The cluster that is being cloned. SourceClusterId types.String `tfsdk:"source_cluster_id" tf:""` } +func (newState *CloneCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan CloneCluster) { +} + +func (newState *CloneCluster) SyncEffectiveFieldsDuringRead(existingState CloneCluster) { +} + type CloudProviderNodeInfo struct { Status []types.String `tfsdk:"status" tf:"optional"` } +func (newState *CloudProviderNodeInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CloudProviderNodeInfo) { +} + +func (newState *CloudProviderNodeInfo) SyncEffectiveFieldsDuringRead(existingState CloudProviderNodeInfo) { +} + type ClusterAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -223,6 +301,12 @@ type ClusterAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *ClusterAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterAccessControlRequest) { +} + +func (newState *ClusterAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState ClusterAccessControlRequest) { +} + type ClusterAccessControlResponse struct { // All permissions. AllPermissions []ClusterPermission `tfsdk:"all_permissions" tf:"optional"` @@ -236,6 +320,12 @@ type ClusterAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *ClusterAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterAccessControlResponse) { +} + +func (newState *ClusterAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState ClusterAccessControlResponse) { +} + type ClusterAttributes struct { // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically @@ -369,6 +459,12 @@ type ClusterAttributes struct { WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } +func (newState *ClusterAttributes) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterAttributes) { +} + +func (newState *ClusterAttributes) SyncEffectiveFieldsDuringRead(existingState ClusterAttributes) { +} + type ClusterCompliance struct { // Canonical unique identifier for a cluster. ClusterId types.String `tfsdk:"cluster_id" tf:""` @@ -382,6 +478,12 @@ type ClusterCompliance struct { Violations map[string]types.String `tfsdk:"violations" tf:"optional"` } +func (newState *ClusterCompliance) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterCompliance) { +} + +func (newState *ClusterCompliance) SyncEffectiveFieldsDuringRead(existingState ClusterCompliance) { +} + type ClusterDetails struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 @@ -598,6 +700,12 @@ type ClusterDetails struct { WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } +func (newState *ClusterDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterDetails) { +} + +func (newState *ClusterDetails) SyncEffectiveFieldsDuringRead(existingState ClusterDetails) { +} + type ClusterEvent struct { // ClusterId types.String `tfsdk:"cluster_id" tf:""` @@ -613,6 +721,12 @@ type ClusterEvent struct { Type types.String `tfsdk:"type" tf:"optional"` } +func (newState *ClusterEvent) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterEvent) { +} + +func (newState *ClusterEvent) SyncEffectiveFieldsDuringRead(existingState ClusterEvent) { +} + type ClusterLibraryStatuses struct { // Unique identifier for the cluster. ClusterId types.String `tfsdk:"cluster_id" tf:"optional"` @@ -620,6 +734,12 @@ type ClusterLibraryStatuses struct { LibraryStatuses []LibraryFullStatus `tfsdk:"library_statuses" tf:"optional"` } +func (newState *ClusterLibraryStatuses) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterLibraryStatuses) { +} + +func (newState *ClusterLibraryStatuses) SyncEffectiveFieldsDuringRead(existingState ClusterLibraryStatuses) { +} + type ClusterLogConf struct { // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : // "dbfs:/home/cluster_log" } }` @@ -632,6 +752,12 @@ type ClusterLogConf struct { S3 []S3StorageInfo `tfsdk:"s3" tf:"optional,object"` } +func (newState *ClusterLogConf) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterLogConf) { +} + +func (newState *ClusterLogConf) SyncEffectiveFieldsDuringRead(existingState ClusterLogConf) { +} + type ClusterPermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -640,6 +766,12 @@ type ClusterPermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *ClusterPermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterPermission) { +} + +func (newState *ClusterPermission) SyncEffectiveFieldsDuringRead(existingState ClusterPermission) { +} + type ClusterPermissions struct { AccessControlList []ClusterAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -648,18 +780,36 @@ type ClusterPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *ClusterPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterPermissions) { +} + +func (newState *ClusterPermissions) SyncEffectiveFieldsDuringRead(existingState ClusterPermissions) { +} + type ClusterPermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *ClusterPermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterPermissionsDescription) { +} + +func (newState *ClusterPermissionsDescription) SyncEffectiveFieldsDuringRead(existingState ClusterPermissionsDescription) { +} + type ClusterPermissionsRequest struct { AccessControlList []ClusterAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The cluster for which to get or manage permissions. ClusterId types.String `tfsdk:"-"` } +func (newState *ClusterPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterPermissionsRequest) { +} + +func (newState *ClusterPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState ClusterPermissionsRequest) { +} + type ClusterPolicyAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -671,6 +821,12 @@ type ClusterPolicyAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *ClusterPolicyAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterPolicyAccessControlRequest) { +} + +func (newState *ClusterPolicyAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState ClusterPolicyAccessControlRequest) { +} + type ClusterPolicyAccessControlResponse struct { // All permissions. AllPermissions []ClusterPolicyPermission `tfsdk:"all_permissions" tf:"optional"` @@ -684,6 +840,12 @@ type ClusterPolicyAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *ClusterPolicyAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterPolicyAccessControlResponse) { +} + +func (newState *ClusterPolicyAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState ClusterPolicyAccessControlResponse) { +} + type ClusterPolicyPermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -692,6 +854,12 @@ type ClusterPolicyPermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *ClusterPolicyPermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterPolicyPermission) { +} + +func (newState *ClusterPolicyPermission) SyncEffectiveFieldsDuringRead(existingState ClusterPolicyPermission) { +} + type ClusterPolicyPermissions struct { AccessControlList []ClusterPolicyAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -700,18 +868,36 @@ type ClusterPolicyPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *ClusterPolicyPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterPolicyPermissions) { +} + +func (newState *ClusterPolicyPermissions) SyncEffectiveFieldsDuringRead(existingState ClusterPolicyPermissions) { +} + type ClusterPolicyPermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *ClusterPolicyPermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterPolicyPermissionsDescription) { +} + +func (newState *ClusterPolicyPermissionsDescription) SyncEffectiveFieldsDuringRead(existingState ClusterPolicyPermissionsDescription) { +} + type ClusterPolicyPermissionsRequest struct { AccessControlList []ClusterPolicyAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The cluster policy for which to get or manage permissions. ClusterPolicyId types.String `tfsdk:"-"` } +func (newState *ClusterPolicyPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterPolicyPermissionsRequest) { +} + +func (newState *ClusterPolicyPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState ClusterPolicyPermissionsRequest) { +} + // Represents a change to the cluster settings required for the cluster to // become compliant with its policy. type ClusterSettingsChange struct { @@ -729,6 +915,12 @@ type ClusterSettingsChange struct { PreviousValue types.String `tfsdk:"previous_value" tf:"optional"` } +func (newState *ClusterSettingsChange) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterSettingsChange) { +} + +func (newState *ClusterSettingsChange) SyncEffectiveFieldsDuringRead(existingState ClusterSettingsChange) { +} + type ClusterSize struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 @@ -747,6 +939,12 @@ type ClusterSize struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` } +func (newState *ClusterSize) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterSize) { +} + +func (newState *ClusterSize) SyncEffectiveFieldsDuringRead(existingState ClusterSize) { +} + type ClusterSpec struct { // When set to true, fixed and default values from the policy will be used // for fields that are omitted. When set to false, only fixed values from @@ -899,12 +1097,24 @@ type ClusterSpec struct { WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } +func (newState *ClusterSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterSpec) { +} + +func (newState *ClusterSpec) SyncEffectiveFieldsDuringRead(existingState ClusterSpec) { +} + // Get status type ClusterStatus struct { // Unique identifier of the cluster whose status should be retrieved. ClusterId types.String `tfsdk:"-"` } +func (newState *ClusterStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterStatus) { +} + +func (newState *ClusterStatus) SyncEffectiveFieldsDuringRead(existingState ClusterStatus) { +} + type Command struct { // Running cluster id ClusterId types.String `tfsdk:"clusterId" tf:"optional"` @@ -916,6 +1126,12 @@ type Command struct { Language types.String `tfsdk:"language" tf:"optional"` } +func (newState *Command) SyncEffectiveFieldsDuringCreateOrUpdate(plan Command) { +} + +func (newState *Command) SyncEffectiveFieldsDuringRead(existingState Command) { +} + // Get command info type CommandStatusRequest struct { ClusterId types.String `tfsdk:"-"` @@ -925,6 +1141,12 @@ type CommandStatusRequest struct { ContextId types.String `tfsdk:"-"` } +func (newState *CommandStatusRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CommandStatusRequest) { +} + +func (newState *CommandStatusRequest) SyncEffectiveFieldsDuringRead(existingState CommandStatusRequest) { +} + type CommandStatusResponse struct { Id types.String `tfsdk:"id" tf:"optional"` @@ -933,6 +1155,12 @@ type CommandStatusResponse struct { Status types.String `tfsdk:"status" tf:"optional"` } +func (newState *CommandStatusResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CommandStatusResponse) { +} + +func (newState *CommandStatusResponse) SyncEffectiveFieldsDuringRead(existingState CommandStatusResponse) { +} + // Get status type ContextStatusRequest struct { ClusterId types.String `tfsdk:"-"` @@ -940,12 +1168,24 @@ type ContextStatusRequest struct { ContextId types.String `tfsdk:"-"` } +func (newState *ContextStatusRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ContextStatusRequest) { +} + +func (newState *ContextStatusRequest) SyncEffectiveFieldsDuringRead(existingState ContextStatusRequest) { +} + type ContextStatusResponse struct { Id types.String `tfsdk:"id" tf:"optional"` Status types.String `tfsdk:"status" tf:"optional"` } +func (newState *ContextStatusResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ContextStatusResponse) { +} + +func (newState *ContextStatusResponse) SyncEffectiveFieldsDuringRead(existingState ContextStatusResponse) { +} + type CreateCluster struct { // When set to true, fixed and default values from the policy will be used // for fields that are omitted. When set to false, only fixed values from @@ -1101,10 +1341,22 @@ type CreateCluster struct { WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } +func (newState *CreateCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCluster) { +} + +func (newState *CreateCluster) SyncEffectiveFieldsDuringRead(existingState CreateCluster) { +} + type CreateClusterResponse struct { ClusterId types.String `tfsdk:"cluster_id" tf:"optional"` } +func (newState *CreateClusterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateClusterResponse) { +} + +func (newState *CreateClusterResponse) SyncEffectiveFieldsDuringRead(existingState CreateClusterResponse) { +} + type CreateContext struct { // Running cluster id ClusterId types.String `tfsdk:"clusterId" tf:"optional"` @@ -1112,6 +1364,12 @@ type CreateContext struct { Language types.String `tfsdk:"language" tf:"optional"` } +func (newState *CreateContext) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateContext) { +} + +func (newState *CreateContext) SyncEffectiveFieldsDuringRead(existingState CreateContext) { +} + type CreateInstancePool struct { // Attributes related to instance pools running on Amazon Web Services. If // not specified at pool creation, a set of default values will be used. @@ -1169,11 +1427,23 @@ type CreateInstancePool struct { PreloadedSparkVersions []types.String `tfsdk:"preloaded_spark_versions" tf:"optional"` } +func (newState *CreateInstancePool) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateInstancePool) { +} + +func (newState *CreateInstancePool) SyncEffectiveFieldsDuringRead(existingState CreateInstancePool) { +} + type CreateInstancePoolResponse struct { // The ID of the created instance pool. InstancePoolId types.String `tfsdk:"instance_pool_id" tf:"optional"` } +func (newState *CreateInstancePoolResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateInstancePoolResponse) { +} + +func (newState *CreateInstancePoolResponse) SyncEffectiveFieldsDuringRead(existingState CreateInstancePoolResponse) { +} + type CreatePolicy struct { // Policy definition document expressed in [Databricks Cluster Policy // Definition Language]. @@ -1210,20 +1480,44 @@ type CreatePolicy struct { PolicyFamilyId types.String `tfsdk:"policy_family_id" tf:"optional"` } +func (newState *CreatePolicy) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreatePolicy) { +} + +func (newState *CreatePolicy) SyncEffectiveFieldsDuringRead(existingState CreatePolicy) { +} + type CreatePolicyResponse struct { // Canonical unique identifier for the cluster policy. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` } +func (newState *CreatePolicyResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreatePolicyResponse) { +} + +func (newState *CreatePolicyResponse) SyncEffectiveFieldsDuringRead(existingState CreatePolicyResponse) { +} + type CreateResponse struct { // The global init script ID. ScriptId types.String `tfsdk:"script_id" tf:"optional"` } +func (newState *CreateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateResponse) { +} + +func (newState *CreateResponse) SyncEffectiveFieldsDuringRead(existingState CreateResponse) { +} + type Created struct { Id types.String `tfsdk:"id" tf:"optional"` } +func (newState *Created) SyncEffectiveFieldsDuringCreateOrUpdate(plan Created) { +} + +func (newState *Created) SyncEffectiveFieldsDuringRead(existingState Created) { +} + type DataPlaneEventDetails struct { // EventType types.String `tfsdk:"event_type" tf:"optional"` @@ -1235,53 +1529,125 @@ type DataPlaneEventDetails struct { Timestamp types.Int64 `tfsdk:"timestamp" tf:"optional"` } +func (newState *DataPlaneEventDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan DataPlaneEventDetails) { +} + +func (newState *DataPlaneEventDetails) SyncEffectiveFieldsDuringRead(existingState DataPlaneEventDetails) { +} + type DbfsStorageInfo struct { // dbfs destination, e.g. `dbfs:/my/path` Destination types.String `tfsdk:"destination" tf:""` } +func (newState *DbfsStorageInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan DbfsStorageInfo) { +} + +func (newState *DbfsStorageInfo) SyncEffectiveFieldsDuringRead(existingState DbfsStorageInfo) { +} + type DeleteCluster struct { // The cluster to be terminated. ClusterId types.String `tfsdk:"cluster_id" tf:""` } +func (newState *DeleteCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCluster) { +} + +func (newState *DeleteCluster) SyncEffectiveFieldsDuringRead(existingState DeleteCluster) { +} + type DeleteClusterResponse struct { } +func (newState *DeleteClusterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteClusterResponse) { +} + +func (newState *DeleteClusterResponse) SyncEffectiveFieldsDuringRead(existingState DeleteClusterResponse) { +} + // Delete init script type DeleteGlobalInitScriptRequest struct { // The ID of the global init script. ScriptId types.String `tfsdk:"-"` } +func (newState *DeleteGlobalInitScriptRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteGlobalInitScriptRequest) { +} + +func (newState *DeleteGlobalInitScriptRequest) SyncEffectiveFieldsDuringRead(existingState DeleteGlobalInitScriptRequest) { +} + type DeleteInstancePool struct { // The instance pool to be terminated. InstancePoolId types.String `tfsdk:"instance_pool_id" tf:""` } +func (newState *DeleteInstancePool) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteInstancePool) { +} + +func (newState *DeleteInstancePool) SyncEffectiveFieldsDuringRead(existingState DeleteInstancePool) { +} + type DeleteInstancePoolResponse struct { } +func (newState *DeleteInstancePoolResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteInstancePoolResponse) { +} + +func (newState *DeleteInstancePoolResponse) SyncEffectiveFieldsDuringRead(existingState DeleteInstancePoolResponse) { +} + type DeletePolicy struct { // The ID of the policy to delete. PolicyId types.String `tfsdk:"policy_id" tf:""` } +func (newState *DeletePolicy) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeletePolicy) { +} + +func (newState *DeletePolicy) SyncEffectiveFieldsDuringRead(existingState DeletePolicy) { +} + type DeletePolicyResponse struct { } +func (newState *DeletePolicyResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeletePolicyResponse) { +} + +func (newState *DeletePolicyResponse) SyncEffectiveFieldsDuringRead(existingState DeletePolicyResponse) { +} + type DeleteResponse struct { } +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + type DestroyContext struct { ClusterId types.String `tfsdk:"clusterId" tf:""` ContextId types.String `tfsdk:"contextId" tf:""` } +func (newState *DestroyContext) SyncEffectiveFieldsDuringCreateOrUpdate(plan DestroyContext) { +} + +func (newState *DestroyContext) SyncEffectiveFieldsDuringRead(existingState DestroyContext) { +} + type DestroyResponse struct { } +func (newState *DestroyResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DestroyResponse) { +} + +func (newState *DestroyResponse) SyncEffectiveFieldsDuringRead(existingState DestroyResponse) { +} + type DiskSpec struct { // The number of disks launched for each instance: - This feature is only // enabled for supported node types. - Users can choose up to the limit of @@ -1317,12 +1683,24 @@ type DiskSpec struct { DiskType []DiskType `tfsdk:"disk_type" tf:"optional,object"` } +func (newState *DiskSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan DiskSpec) { +} + +func (newState *DiskSpec) SyncEffectiveFieldsDuringRead(existingState DiskSpec) { +} + type DiskType struct { AzureDiskVolumeType types.String `tfsdk:"azure_disk_volume_type" tf:"optional"` EbsVolumeType types.String `tfsdk:"ebs_volume_type" tf:"optional"` } +func (newState *DiskType) SyncEffectiveFieldsDuringCreateOrUpdate(plan DiskType) { +} + +func (newState *DiskType) SyncEffectiveFieldsDuringRead(existingState DiskType) { +} + type DockerBasicAuth struct { // Password of the user Password types.String `tfsdk:"password" tf:"optional"` @@ -1330,12 +1708,24 @@ type DockerBasicAuth struct { Username types.String `tfsdk:"username" tf:"optional"` } +func (newState *DockerBasicAuth) SyncEffectiveFieldsDuringCreateOrUpdate(plan DockerBasicAuth) { +} + +func (newState *DockerBasicAuth) SyncEffectiveFieldsDuringRead(existingState DockerBasicAuth) { +} + type DockerImage struct { BasicAuth []DockerBasicAuth `tfsdk:"basic_auth" tf:"optional,object"` // URL of the docker image. Url types.String `tfsdk:"url" tf:"optional"` } +func (newState *DockerImage) SyncEffectiveFieldsDuringCreateOrUpdate(plan DockerImage) { +} + +func (newState *DockerImage) SyncEffectiveFieldsDuringRead(existingState DockerImage) { +} + type EditCluster struct { // When set to true, fixed and default values from the policy will be used // for fields that are omitted. When set to false, only fixed values from @@ -1490,9 +1880,21 @@ type EditCluster struct { WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } +func (newState *EditCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan EditCluster) { +} + +func (newState *EditCluster) SyncEffectiveFieldsDuringRead(existingState EditCluster) { +} + type EditClusterResponse struct { } +func (newState *EditClusterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan EditClusterResponse) { +} + +func (newState *EditClusterResponse) SyncEffectiveFieldsDuringRead(existingState EditClusterResponse) { +} + type EditInstancePool struct { // Additional tags for pool resources. Databricks will tag all pool // resources (e.g., AWS instances and EBS volumes) with these tags in @@ -1527,9 +1929,21 @@ type EditInstancePool struct { NodeTypeId types.String `tfsdk:"node_type_id" tf:""` } +func (newState *EditInstancePool) SyncEffectiveFieldsDuringCreateOrUpdate(plan EditInstancePool) { +} + +func (newState *EditInstancePool) SyncEffectiveFieldsDuringRead(existingState EditInstancePool) { +} + type EditInstancePoolResponse struct { } +func (newState *EditInstancePoolResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan EditInstancePoolResponse) { +} + +func (newState *EditInstancePoolResponse) SyncEffectiveFieldsDuringRead(existingState EditInstancePoolResponse) { +} + type EditPolicy struct { // Policy definition document expressed in [Databricks Cluster Policy // Definition Language]. @@ -1568,12 +1982,30 @@ type EditPolicy struct { PolicyId types.String `tfsdk:"policy_id" tf:""` } +func (newState *EditPolicy) SyncEffectiveFieldsDuringCreateOrUpdate(plan EditPolicy) { +} + +func (newState *EditPolicy) SyncEffectiveFieldsDuringRead(existingState EditPolicy) { +} + type EditPolicyResponse struct { } +func (newState *EditPolicyResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan EditPolicyResponse) { +} + +func (newState *EditPolicyResponse) SyncEffectiveFieldsDuringRead(existingState EditPolicyResponse) { +} + type EditResponse struct { } +func (newState *EditResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan EditResponse) { +} + +func (newState *EditResponse) SyncEffectiveFieldsDuringRead(existingState EditResponse) { +} + type EnforceClusterComplianceRequest struct { // The ID of the cluster you want to enforce policy compliance on. ClusterId types.String `tfsdk:"cluster_id" tf:""` @@ -1582,6 +2014,12 @@ type EnforceClusterComplianceRequest struct { ValidateOnly types.Bool `tfsdk:"validate_only" tf:"optional"` } +func (newState *EnforceClusterComplianceRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan EnforceClusterComplianceRequest) { +} + +func (newState *EnforceClusterComplianceRequest) SyncEffectiveFieldsDuringRead(existingState EnforceClusterComplianceRequest) { +} + type EnforceClusterComplianceResponse struct { // A list of changes that have been made to the cluster settings for the // cluster to become compliant with its policy. @@ -1591,6 +2029,12 @@ type EnforceClusterComplianceResponse struct { HasChanges types.Bool `tfsdk:"has_changes" tf:"optional"` } +func (newState *EnforceClusterComplianceResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan EnforceClusterComplianceResponse) { +} + +func (newState *EnforceClusterComplianceResponse) SyncEffectiveFieldsDuringRead(existingState EnforceClusterComplianceResponse) { +} + // The environment entity used to preserve serverless environment side panel and // jobs' environment for non-notebook task. In this minimal environment spec, // only pip dependencies are supported. @@ -1609,6 +2053,12 @@ type Environment struct { Dependencies []types.String `tfsdk:"dependencies" tf:"optional"` } +func (newState *Environment) SyncEffectiveFieldsDuringCreateOrUpdate(plan Environment) { +} + +func (newState *Environment) SyncEffectiveFieldsDuringRead(existingState Environment) { +} + type EventDetails struct { // * For created clusters, the attributes of the cluster. * For edited // clusters, the new attributes of the cluster. @@ -1660,6 +2110,12 @@ type EventDetails struct { User types.String `tfsdk:"user" tf:"optional"` } +func (newState *EventDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan EventDetails) { +} + +func (newState *EventDetails) SyncEffectiveFieldsDuringRead(existingState EventDetails) { +} + type GcpAttributes struct { // This field determines whether the instance pool will contain preemptible // VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs @@ -1694,17 +2150,35 @@ type GcpAttributes struct { ZoneId types.String `tfsdk:"zone_id" tf:"optional"` } +func (newState *GcpAttributes) SyncEffectiveFieldsDuringCreateOrUpdate(plan GcpAttributes) { +} + +func (newState *GcpAttributes) SyncEffectiveFieldsDuringRead(existingState GcpAttributes) { +} + type GcsStorageInfo struct { // GCS destination/URI, e.g. `gs://my-bucket/some-prefix` Destination types.String `tfsdk:"destination" tf:""` } +func (newState *GcsStorageInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan GcsStorageInfo) { +} + +func (newState *GcsStorageInfo) SyncEffectiveFieldsDuringRead(existingState GcsStorageInfo) { +} + // Get cluster policy compliance type GetClusterComplianceRequest struct { // The ID of the cluster to get the compliance status ClusterId types.String `tfsdk:"-"` } +func (newState *GetClusterComplianceRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetClusterComplianceRequest) { +} + +func (newState *GetClusterComplianceRequest) SyncEffectiveFieldsDuringRead(existingState GetClusterComplianceRequest) { +} + type GetClusterComplianceResponse struct { // Whether the cluster is compliant with its policy or not. Clusters could // be out of compliance if the policy was updated after the cluster was last @@ -1717,52 +2191,106 @@ type GetClusterComplianceResponse struct { Violations map[string]types.String `tfsdk:"violations" tf:"optional"` } +func (newState *GetClusterComplianceResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetClusterComplianceResponse) { +} + +func (newState *GetClusterComplianceResponse) SyncEffectiveFieldsDuringRead(existingState GetClusterComplianceResponse) { +} + // Get cluster permission levels type GetClusterPermissionLevelsRequest struct { // The cluster for which to get or manage permissions. ClusterId types.String `tfsdk:"-"` } +func (newState *GetClusterPermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetClusterPermissionLevelsRequest) { +} + +func (newState *GetClusterPermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetClusterPermissionLevelsRequest) { +} + type GetClusterPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []ClusterPermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetClusterPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetClusterPermissionLevelsResponse) { +} + +func (newState *GetClusterPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetClusterPermissionLevelsResponse) { +} + // Get cluster permissions type GetClusterPermissionsRequest struct { // The cluster for which to get or manage permissions. ClusterId types.String `tfsdk:"-"` } +func (newState *GetClusterPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetClusterPermissionsRequest) { +} + +func (newState *GetClusterPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState GetClusterPermissionsRequest) { +} + // Get cluster policy permission levels type GetClusterPolicyPermissionLevelsRequest struct { // The cluster policy for which to get or manage permissions. ClusterPolicyId types.String `tfsdk:"-"` } +func (newState *GetClusterPolicyPermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetClusterPolicyPermissionLevelsRequest) { +} + +func (newState *GetClusterPolicyPermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetClusterPolicyPermissionLevelsRequest) { +} + type GetClusterPolicyPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []ClusterPolicyPermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetClusterPolicyPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetClusterPolicyPermissionLevelsResponse) { +} + +func (newState *GetClusterPolicyPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetClusterPolicyPermissionLevelsResponse) { +} + // Get cluster policy permissions type GetClusterPolicyPermissionsRequest struct { // The cluster policy for which to get or manage permissions. ClusterPolicyId types.String `tfsdk:"-"` } +func (newState *GetClusterPolicyPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetClusterPolicyPermissionsRequest) { +} + +func (newState *GetClusterPolicyPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState GetClusterPolicyPermissionsRequest) { +} + // Get a cluster policy type GetClusterPolicyRequest struct { // Canonical unique identifier for the Cluster Policy. PolicyId types.String `tfsdk:"-"` } +func (newState *GetClusterPolicyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetClusterPolicyRequest) { +} + +func (newState *GetClusterPolicyRequest) SyncEffectiveFieldsDuringRead(existingState GetClusterPolicyRequest) { +} + // Get cluster info type GetClusterRequest struct { // The cluster about which to retrieve information. ClusterId types.String `tfsdk:"-"` } +func (newState *GetClusterRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetClusterRequest) { +} + +func (newState *GetClusterRequest) SyncEffectiveFieldsDuringRead(existingState GetClusterRequest) { +} + type GetEvents struct { // The ID of the cluster to retrieve events about. ClusterId types.String `tfsdk:"cluster_id" tf:""` @@ -1786,6 +2314,12 @@ type GetEvents struct { StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` } +func (newState *GetEvents) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetEvents) { +} + +func (newState *GetEvents) SyncEffectiveFieldsDuringRead(existingState GetEvents) { +} + type GetEventsResponse struct { // Events []ClusterEvent `tfsdk:"events" tf:"optional"` @@ -1797,12 +2331,24 @@ type GetEventsResponse struct { TotalCount types.Int64 `tfsdk:"total_count" tf:"optional"` } +func (newState *GetEventsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetEventsResponse) { +} + +func (newState *GetEventsResponse) SyncEffectiveFieldsDuringRead(existingState GetEventsResponse) { +} + // Get an init script type GetGlobalInitScriptRequest struct { // The ID of the global init script. ScriptId types.String `tfsdk:"-"` } +func (newState *GetGlobalInitScriptRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetGlobalInitScriptRequest) { +} + +func (newState *GetGlobalInitScriptRequest) SyncEffectiveFieldsDuringRead(existingState GetGlobalInitScriptRequest) { +} + type GetInstancePool struct { // Attributes related to instance pools running on Amazon Web Services. If // not specified at pool creation, a set of default values will be used. @@ -1879,29 +2425,59 @@ type GetInstancePool struct { Status []InstancePoolStatus `tfsdk:"status" tf:"optional,object"` } +func (newState *GetInstancePool) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetInstancePool) { +} + +func (newState *GetInstancePool) SyncEffectiveFieldsDuringRead(existingState GetInstancePool) { +} + // Get instance pool permission levels type GetInstancePoolPermissionLevelsRequest struct { // The instance pool for which to get or manage permissions. InstancePoolId types.String `tfsdk:"-"` } +func (newState *GetInstancePoolPermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetInstancePoolPermissionLevelsRequest) { +} + +func (newState *GetInstancePoolPermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetInstancePoolPermissionLevelsRequest) { +} + type GetInstancePoolPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []InstancePoolPermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetInstancePoolPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetInstancePoolPermissionLevelsResponse) { +} + +func (newState *GetInstancePoolPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetInstancePoolPermissionLevelsResponse) { +} + // Get instance pool permissions type GetInstancePoolPermissionsRequest struct { // The instance pool for which to get or manage permissions. InstancePoolId types.String `tfsdk:"-"` } +func (newState *GetInstancePoolPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetInstancePoolPermissionsRequest) { +} + +func (newState *GetInstancePoolPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState GetInstancePoolPermissionsRequest) { +} + // Get instance pool information type GetInstancePoolRequest struct { // The canonical unique identifier for the instance pool. InstancePoolId types.String `tfsdk:"-"` } +func (newState *GetInstancePoolRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetInstancePoolRequest) { +} + +func (newState *GetInstancePoolRequest) SyncEffectiveFieldsDuringRead(existingState GetInstancePoolRequest) { +} + // Get policy family information type GetPolicyFamilyRequest struct { // The family ID about which to retrieve information. @@ -1911,11 +2487,23 @@ type GetPolicyFamilyRequest struct { Version types.Int64 `tfsdk:"-"` } +func (newState *GetPolicyFamilyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPolicyFamilyRequest) { +} + +func (newState *GetPolicyFamilyRequest) SyncEffectiveFieldsDuringRead(existingState GetPolicyFamilyRequest) { +} + type GetSparkVersionsResponse struct { // All the available Spark versions. Versions []SparkVersion `tfsdk:"versions" tf:"optional"` } +func (newState *GetSparkVersionsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetSparkVersionsResponse) { +} + +func (newState *GetSparkVersionsResponse) SyncEffectiveFieldsDuringRead(existingState GetSparkVersionsResponse) { +} + type GlobalInitScriptCreateRequest struct { // Specifies whether the script is enabled. The script runs only if enabled. Enabled types.Bool `tfsdk:"enabled" tf:"optional"` @@ -1937,6 +2525,12 @@ type GlobalInitScriptCreateRequest struct { Script types.String `tfsdk:"script" tf:""` } +func (newState *GlobalInitScriptCreateRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GlobalInitScriptCreateRequest) { +} + +func (newState *GlobalInitScriptCreateRequest) SyncEffectiveFieldsDuringRead(existingState GlobalInitScriptCreateRequest) { +} + type GlobalInitScriptDetails struct { // Time when the script was created, represented as a Unix timestamp in // milliseconds. @@ -1959,6 +2553,12 @@ type GlobalInitScriptDetails struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *GlobalInitScriptDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan GlobalInitScriptDetails) { +} + +func (newState *GlobalInitScriptDetails) SyncEffectiveFieldsDuringRead(existingState GlobalInitScriptDetails) { +} + type GlobalInitScriptDetailsWithContent struct { // Time when the script was created, represented as a Unix timestamp in // milliseconds. @@ -1983,6 +2583,12 @@ type GlobalInitScriptDetailsWithContent struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *GlobalInitScriptDetailsWithContent) SyncEffectiveFieldsDuringCreateOrUpdate(plan GlobalInitScriptDetailsWithContent) { +} + +func (newState *GlobalInitScriptDetailsWithContent) SyncEffectiveFieldsDuringRead(existingState GlobalInitScriptDetailsWithContent) { +} + type GlobalInitScriptUpdateRequest struct { // Specifies whether the script is enabled. The script runs only if enabled. Enabled types.Bool `tfsdk:"enabled" tf:"optional"` @@ -2007,6 +2613,12 @@ type GlobalInitScriptUpdateRequest struct { ScriptId types.String `tfsdk:"-"` } +func (newState *GlobalInitScriptUpdateRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GlobalInitScriptUpdateRequest) { +} + +func (newState *GlobalInitScriptUpdateRequest) SyncEffectiveFieldsDuringRead(existingState GlobalInitScriptUpdateRequest) { +} + type InitScriptEventDetails struct { // The cluster scoped init scripts associated with this cluster event Cluster []InitScriptInfoAndExecutionDetails `tfsdk:"cluster" tf:"optional"` @@ -2016,6 +2628,12 @@ type InitScriptEventDetails struct { ReportedForNode types.String `tfsdk:"reported_for_node" tf:"optional"` } +func (newState *InitScriptEventDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan InitScriptEventDetails) { +} + +func (newState *InitScriptEventDetails) SyncEffectiveFieldsDuringRead(existingState InitScriptEventDetails) { +} + type InitScriptExecutionDetails struct { // Addition details regarding errors. ErrorMessage types.String `tfsdk:"error_message" tf:"optional"` @@ -2025,6 +2643,12 @@ type InitScriptExecutionDetails struct { Status types.String `tfsdk:"status" tf:"optional"` } +func (newState *InitScriptExecutionDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan InitScriptExecutionDetails) { +} + +func (newState *InitScriptExecutionDetails) SyncEffectiveFieldsDuringRead(existingState InitScriptExecutionDetails) { +} + type InitScriptInfo struct { // destination needs to be provided. e.g. `{ "abfss" : { "destination" : // "abfss://@.dfs.core.windows.net/" @@ -2053,6 +2677,12 @@ type InitScriptInfo struct { Workspace []WorkspaceStorageInfo `tfsdk:"workspace" tf:"optional,object"` } +func (newState *InitScriptInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan InitScriptInfo) { +} + +func (newState *InitScriptInfo) SyncEffectiveFieldsDuringRead(existingState InitScriptInfo) { +} + type InitScriptInfoAndExecutionDetails struct { // Details about the script ExecutionDetails []InitScriptExecutionDetails `tfsdk:"execution_details" tf:"optional,object"` @@ -2060,6 +2690,12 @@ type InitScriptInfoAndExecutionDetails struct { Script []InitScriptInfo `tfsdk:"script" tf:"optional,object"` } +func (newState *InitScriptInfoAndExecutionDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan InitScriptInfoAndExecutionDetails) { +} + +func (newState *InitScriptInfoAndExecutionDetails) SyncEffectiveFieldsDuringRead(existingState InitScriptInfoAndExecutionDetails) { +} + type InstallLibraries struct { // Unique identifier for the cluster on which to install these libraries. ClusterId types.String `tfsdk:"cluster_id" tf:""` @@ -2067,9 +2703,21 @@ type InstallLibraries struct { Libraries []Library `tfsdk:"libraries" tf:""` } +func (newState *InstallLibraries) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstallLibraries) { +} + +func (newState *InstallLibraries) SyncEffectiveFieldsDuringRead(existingState InstallLibraries) { +} + type InstallLibrariesResponse struct { } +func (newState *InstallLibrariesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstallLibrariesResponse) { +} + +func (newState *InstallLibrariesResponse) SyncEffectiveFieldsDuringRead(existingState InstallLibrariesResponse) { +} + type InstancePoolAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -2081,6 +2729,12 @@ type InstancePoolAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *InstancePoolAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstancePoolAccessControlRequest) { +} + +func (newState *InstancePoolAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState InstancePoolAccessControlRequest) { +} + type InstancePoolAccessControlResponse struct { // All permissions. AllPermissions []InstancePoolPermission `tfsdk:"all_permissions" tf:"optional"` @@ -2094,6 +2748,12 @@ type InstancePoolAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *InstancePoolAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstancePoolAccessControlResponse) { +} + +func (newState *InstancePoolAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState InstancePoolAccessControlResponse) { +} + type InstancePoolAndStats struct { // Attributes related to instance pools running on Amazon Web Services. If // not specified at pool creation, a set of default values will be used. @@ -2170,6 +2830,12 @@ type InstancePoolAndStats struct { Status []InstancePoolStatus `tfsdk:"status" tf:"optional,object"` } +func (newState *InstancePoolAndStats) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstancePoolAndStats) { +} + +func (newState *InstancePoolAndStats) SyncEffectiveFieldsDuringRead(existingState InstancePoolAndStats) { +} + type InstancePoolAwsAttributes struct { // Availability type used for the spot nodes. // @@ -2202,6 +2868,12 @@ type InstancePoolAwsAttributes struct { ZoneId types.String `tfsdk:"zone_id" tf:"optional"` } +func (newState *InstancePoolAwsAttributes) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstancePoolAwsAttributes) { +} + +func (newState *InstancePoolAwsAttributes) SyncEffectiveFieldsDuringRead(existingState InstancePoolAwsAttributes) { +} + type InstancePoolAzureAttributes struct { // Shows the Availability type used for the spot nodes. // @@ -2213,6 +2885,12 @@ type InstancePoolAzureAttributes struct { SpotBidMaxPrice types.Float64 `tfsdk:"spot_bid_max_price" tf:"optional"` } +func (newState *InstancePoolAzureAttributes) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstancePoolAzureAttributes) { +} + +func (newState *InstancePoolAzureAttributes) SyncEffectiveFieldsDuringRead(existingState InstancePoolAzureAttributes) { +} + type InstancePoolGcpAttributes struct { // This field determines whether the instance pool will contain preemptible // VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs @@ -2244,6 +2922,12 @@ type InstancePoolGcpAttributes struct { ZoneId types.String `tfsdk:"zone_id" tf:"optional"` } +func (newState *InstancePoolGcpAttributes) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstancePoolGcpAttributes) { +} + +func (newState *InstancePoolGcpAttributes) SyncEffectiveFieldsDuringRead(existingState InstancePoolGcpAttributes) { +} + type InstancePoolPermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -2252,6 +2936,12 @@ type InstancePoolPermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *InstancePoolPermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstancePoolPermission) { +} + +func (newState *InstancePoolPermission) SyncEffectiveFieldsDuringRead(existingState InstancePoolPermission) { +} + type InstancePoolPermissions struct { AccessControlList []InstancePoolAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -2260,18 +2950,36 @@ type InstancePoolPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *InstancePoolPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstancePoolPermissions) { +} + +func (newState *InstancePoolPermissions) SyncEffectiveFieldsDuringRead(existingState InstancePoolPermissions) { +} + type InstancePoolPermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *InstancePoolPermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstancePoolPermissionsDescription) { +} + +func (newState *InstancePoolPermissionsDescription) SyncEffectiveFieldsDuringRead(existingState InstancePoolPermissionsDescription) { +} + type InstancePoolPermissionsRequest struct { AccessControlList []InstancePoolAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The instance pool for which to get or manage permissions. InstancePoolId types.String `tfsdk:"-"` } +func (newState *InstancePoolPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstancePoolPermissionsRequest) { +} + +func (newState *InstancePoolPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState InstancePoolPermissionsRequest) { +} + type InstancePoolStats struct { // Number of active instances in the pool that are NOT part of a cluster. IdleCount types.Int64 `tfsdk:"idle_count" tf:"optional"` @@ -2283,6 +2991,12 @@ type InstancePoolStats struct { UsedCount types.Int64 `tfsdk:"used_count" tf:"optional"` } +func (newState *InstancePoolStats) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstancePoolStats) { +} + +func (newState *InstancePoolStats) SyncEffectiveFieldsDuringRead(existingState InstancePoolStats) { +} + type InstancePoolStatus struct { // List of error messages for the failed pending instances. The // pending_instance_errors follows FIFO with maximum length of the min_idle @@ -2291,6 +3005,12 @@ type InstancePoolStatus struct { PendingInstanceErrors []PendingInstanceError `tfsdk:"pending_instance_errors" tf:"optional"` } +func (newState *InstancePoolStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstancePoolStatus) { +} + +func (newState *InstancePoolStatus) SyncEffectiveFieldsDuringRead(existingState InstancePoolStatus) { +} + type InstanceProfile struct { // The AWS IAM role ARN of the role associated with the instance profile. // This field is required if your role name and instance profile name do not @@ -2312,6 +3032,12 @@ type InstanceProfile struct { IsMetaInstanceProfile types.Bool `tfsdk:"is_meta_instance_profile" tf:"optional"` } +func (newState *InstanceProfile) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstanceProfile) { +} + +func (newState *InstanceProfile) SyncEffectiveFieldsDuringRead(existingState InstanceProfile) { +} + type Library struct { // Specification of a CRAN library to be installed as part of the library Cran []RCranLibrary `tfsdk:"cran" tf:"optional,object"` @@ -2348,6 +3074,12 @@ type Library struct { Whl types.String `tfsdk:"whl" tf:"optional"` } +func (newState *Library) SyncEffectiveFieldsDuringCreateOrUpdate(plan Library) { +} + +func (newState *Library) SyncEffectiveFieldsDuringRead(existingState Library) { +} + // The status of the library on a specific cluster. type LibraryFullStatus struct { // Whether the library was set to be installed on all clusters via the @@ -2362,11 +3094,23 @@ type LibraryFullStatus struct { Status types.String `tfsdk:"status" tf:"optional"` } +func (newState *LibraryFullStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan LibraryFullStatus) { +} + +func (newState *LibraryFullStatus) SyncEffectiveFieldsDuringRead(existingState LibraryFullStatus) { +} + type ListAllClusterLibraryStatusesResponse struct { // A list of cluster statuses. Statuses []ClusterLibraryStatuses `tfsdk:"statuses" tf:"optional"` } +func (newState *ListAllClusterLibraryStatusesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAllClusterLibraryStatusesResponse) { +} + +func (newState *ListAllClusterLibraryStatusesResponse) SyncEffectiveFieldsDuringRead(existingState ListAllClusterLibraryStatusesResponse) { +} + type ListAvailableZonesResponse struct { // The availability zone if no `zone_id` is provided in the cluster creation // request. @@ -2375,6 +3119,12 @@ type ListAvailableZonesResponse struct { Zones []types.String `tfsdk:"zones" tf:"optional"` } +func (newState *ListAvailableZonesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAvailableZonesResponse) { +} + +func (newState *ListAvailableZonesResponse) SyncEffectiveFieldsDuringRead(existingState ListAvailableZonesResponse) { +} + // List cluster policy compliance type ListClusterCompliancesRequest struct { // Use this field to specify the maximum number of results to be returned by @@ -2388,6 +3138,12 @@ type ListClusterCompliancesRequest struct { PolicyId types.String `tfsdk:"-"` } +func (newState *ListClusterCompliancesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListClusterCompliancesRequest) { +} + +func (newState *ListClusterCompliancesRequest) SyncEffectiveFieldsDuringRead(existingState ListClusterCompliancesRequest) { +} + type ListClusterCompliancesResponse struct { // A list of clusters and their policy compliance statuses. Clusters []ClusterCompliance `tfsdk:"clusters" tf:"optional"` @@ -2400,6 +3156,12 @@ type ListClusterCompliancesResponse struct { PrevPageToken types.String `tfsdk:"prev_page_token" tf:"optional"` } +func (newState *ListClusterCompliancesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListClusterCompliancesResponse) { +} + +func (newState *ListClusterCompliancesResponse) SyncEffectiveFieldsDuringRead(existingState ListClusterCompliancesResponse) { +} + // List cluster policies type ListClusterPoliciesRequest struct { // The cluster policy attribute to sort by. * `POLICY_CREATION_TIME` - Sort @@ -2411,6 +3173,12 @@ type ListClusterPoliciesRequest struct { SortOrder types.String `tfsdk:"-"` } +func (newState *ListClusterPoliciesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListClusterPoliciesRequest) { +} + +func (newState *ListClusterPoliciesRequest) SyncEffectiveFieldsDuringRead(existingState ListClusterPoliciesRequest) { +} + type ListClustersFilterBy struct { // The source of cluster creation. ClusterSources []types.String `tfsdk:"cluster_sources" tf:"optional"` @@ -2422,6 +3190,12 @@ type ListClustersFilterBy struct { PolicyId types.String `tfsdk:"policy_id" tf:"optional"` } +func (newState *ListClustersFilterBy) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListClustersFilterBy) { +} + +func (newState *ListClustersFilterBy) SyncEffectiveFieldsDuringRead(existingState ListClustersFilterBy) { +} + // List clusters type ListClustersRequest struct { // Filters to apply to the list of clusters. @@ -2437,6 +3211,12 @@ type ListClustersRequest struct { SortBy []ListClustersSortBy `tfsdk:"-"` } +func (newState *ListClustersRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListClustersRequest) { +} + +func (newState *ListClustersRequest) SyncEffectiveFieldsDuringRead(existingState ListClustersRequest) { +} + type ListClustersResponse struct { // Clusters []ClusterDetails `tfsdk:"clusters" tf:"optional"` @@ -2449,6 +3229,12 @@ type ListClustersResponse struct { PrevPageToken types.String `tfsdk:"prev_page_token" tf:"optional"` } +func (newState *ListClustersResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListClustersResponse) { +} + +func (newState *ListClustersResponse) SyncEffectiveFieldsDuringRead(existingState ListClustersResponse) { +} + type ListClustersSortBy struct { // The direction to sort by. Direction types.String `tfsdk:"direction" tf:"optional"` @@ -2458,29 +3244,65 @@ type ListClustersSortBy struct { Field types.String `tfsdk:"field" tf:"optional"` } +func (newState *ListClustersSortBy) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListClustersSortBy) { +} + +func (newState *ListClustersSortBy) SyncEffectiveFieldsDuringRead(existingState ListClustersSortBy) { +} + type ListGlobalInitScriptsResponse struct { Scripts []GlobalInitScriptDetails `tfsdk:"scripts" tf:"optional"` } +func (newState *ListGlobalInitScriptsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListGlobalInitScriptsResponse) { +} + +func (newState *ListGlobalInitScriptsResponse) SyncEffectiveFieldsDuringRead(existingState ListGlobalInitScriptsResponse) { +} + type ListInstancePools struct { InstancePools []InstancePoolAndStats `tfsdk:"instance_pools" tf:"optional"` } +func (newState *ListInstancePools) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListInstancePools) { +} + +func (newState *ListInstancePools) SyncEffectiveFieldsDuringRead(existingState ListInstancePools) { +} + type ListInstanceProfilesResponse struct { // A list of instance profiles that the user can access. InstanceProfiles []InstanceProfile `tfsdk:"instance_profiles" tf:"optional"` } +func (newState *ListInstanceProfilesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListInstanceProfilesResponse) { +} + +func (newState *ListInstanceProfilesResponse) SyncEffectiveFieldsDuringRead(existingState ListInstanceProfilesResponse) { +} + type ListNodeTypesResponse struct { // The list of available Spark node types. NodeTypes []NodeType `tfsdk:"node_types" tf:"optional"` } +func (newState *ListNodeTypesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListNodeTypesResponse) { +} + +func (newState *ListNodeTypesResponse) SyncEffectiveFieldsDuringRead(existingState ListNodeTypesResponse) { +} + type ListPoliciesResponse struct { // List of policies. Policies []Policy `tfsdk:"policies" tf:"optional"` } +func (newState *ListPoliciesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListPoliciesResponse) { +} + +func (newState *ListPoliciesResponse) SyncEffectiveFieldsDuringRead(existingState ListPoliciesResponse) { +} + // List policy families type ListPolicyFamiliesRequest struct { // Maximum number of policy families to return. @@ -2489,6 +3311,12 @@ type ListPolicyFamiliesRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListPolicyFamiliesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListPolicyFamiliesRequest) { +} + +func (newState *ListPolicyFamiliesRequest) SyncEffectiveFieldsDuringRead(existingState ListPolicyFamiliesRequest) { +} + type ListPolicyFamiliesResponse struct { // A token that can be used to get the next page of results. If not present, // there are no more results to show. @@ -2497,11 +3325,23 @@ type ListPolicyFamiliesResponse struct { PolicyFamilies []PolicyFamily `tfsdk:"policy_families" tf:"optional"` } +func (newState *ListPolicyFamiliesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListPolicyFamiliesResponse) { +} + +func (newState *ListPolicyFamiliesResponse) SyncEffectiveFieldsDuringRead(existingState ListPolicyFamiliesResponse) { +} + type LocalFileInfo struct { // local file destination, e.g. `file:/my/local/file.sh` Destination types.String `tfsdk:"destination" tf:""` } +func (newState *LocalFileInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan LocalFileInfo) { +} + +func (newState *LocalFileInfo) SyncEffectiveFieldsDuringRead(existingState LocalFileInfo) { +} + type LogAnalyticsInfo struct { // LogAnalyticsPrimaryKey types.String `tfsdk:"log_analytics_primary_key" tf:"optional"` @@ -2509,6 +3349,12 @@ type LogAnalyticsInfo struct { LogAnalyticsWorkspaceId types.String `tfsdk:"log_analytics_workspace_id" tf:"optional"` } +func (newState *LogAnalyticsInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogAnalyticsInfo) { +} + +func (newState *LogAnalyticsInfo) SyncEffectiveFieldsDuringRead(existingState LogAnalyticsInfo) { +} + type LogSyncStatus struct { // The timestamp of last attempt. If the last attempt fails, // `last_exception` will contain the exception in the last attempt. @@ -2518,6 +3364,12 @@ type LogSyncStatus struct { LastException types.String `tfsdk:"last_exception" tf:"optional"` } +func (newState *LogSyncStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogSyncStatus) { +} + +func (newState *LogSyncStatus) SyncEffectiveFieldsDuringRead(existingState LogSyncStatus) { +} + type MavenLibrary struct { // Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2". Coordinates types.String `tfsdk:"coordinates" tf:""` @@ -2532,6 +3384,12 @@ type MavenLibrary struct { Repo types.String `tfsdk:"repo" tf:"optional"` } +func (newState *MavenLibrary) SyncEffectiveFieldsDuringCreateOrUpdate(plan MavenLibrary) { +} + +func (newState *MavenLibrary) SyncEffectiveFieldsDuringRead(existingState MavenLibrary) { +} + type NodeInstanceType struct { InstanceTypeId types.String `tfsdk:"instance_type_id" tf:"optional"` @@ -2544,6 +3402,12 @@ type NodeInstanceType struct { LocalNvmeDisks types.Int64 `tfsdk:"local_nvme_disks" tf:"optional"` } +func (newState *NodeInstanceType) SyncEffectiveFieldsDuringCreateOrUpdate(plan NodeInstanceType) { +} + +func (newState *NodeInstanceType) SyncEffectiveFieldsDuringRead(existingState NodeInstanceType) { +} + type NodeType struct { Category types.String `tfsdk:"category" tf:"optional"` // A string description associated with this node type, e.g., "r3.xlarge". @@ -2594,28 +3458,64 @@ type NodeType struct { SupportsElasticDisk types.Bool `tfsdk:"supports_elastic_disk" tf:"optional"` } +func (newState *NodeType) SyncEffectiveFieldsDuringCreateOrUpdate(plan NodeType) { +} + +func (newState *NodeType) SyncEffectiveFieldsDuringRead(existingState NodeType) { +} + type PendingInstanceError struct { InstanceId types.String `tfsdk:"instance_id" tf:"optional"` Message types.String `tfsdk:"message" tf:"optional"` } +func (newState *PendingInstanceError) SyncEffectiveFieldsDuringCreateOrUpdate(plan PendingInstanceError) { +} + +func (newState *PendingInstanceError) SyncEffectiveFieldsDuringRead(existingState PendingInstanceError) { +} + type PermanentDeleteCluster struct { // The cluster to be deleted. ClusterId types.String `tfsdk:"cluster_id" tf:""` } +func (newState *PermanentDeleteCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan PermanentDeleteCluster) { +} + +func (newState *PermanentDeleteCluster) SyncEffectiveFieldsDuringRead(existingState PermanentDeleteCluster) { +} + type PermanentDeleteClusterResponse struct { } +func (newState *PermanentDeleteClusterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan PermanentDeleteClusterResponse) { +} + +func (newState *PermanentDeleteClusterResponse) SyncEffectiveFieldsDuringRead(existingState PermanentDeleteClusterResponse) { +} + type PinCluster struct { // ClusterId types.String `tfsdk:"cluster_id" tf:""` } +func (newState *PinCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan PinCluster) { +} + +func (newState *PinCluster) SyncEffectiveFieldsDuringRead(existingState PinCluster) { +} + type PinClusterResponse struct { } +func (newState *PinClusterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan PinClusterResponse) { +} + +func (newState *PinClusterResponse) SyncEffectiveFieldsDuringRead(existingState PinClusterResponse) { +} + // Describes a Cluster Policy entity. type Policy struct { // Creation time. The timestamp (in millisecond) when this Cluster Policy @@ -2665,6 +3565,12 @@ type Policy struct { PolicyId types.String `tfsdk:"policy_id" tf:"optional"` } +func (newState *Policy) SyncEffectiveFieldsDuringCreateOrUpdate(plan Policy) { +} + +func (newState *Policy) SyncEffectiveFieldsDuringRead(existingState Policy) { +} + type PolicyFamily struct { // Policy definition document expressed in [Databricks Cluster Policy // Definition Language]. @@ -2679,6 +3585,12 @@ type PolicyFamily struct { PolicyFamilyId types.String `tfsdk:"policy_family_id" tf:"optional"` } +func (newState *PolicyFamily) SyncEffectiveFieldsDuringCreateOrUpdate(plan PolicyFamily) { +} + +func (newState *PolicyFamily) SyncEffectiveFieldsDuringRead(existingState PolicyFamily) { +} + type PythonPyPiLibrary struct { // The name of the pypi package to install. An optional exact version // specification is also supported. Examples: "simplejson" and @@ -2689,6 +3601,12 @@ type PythonPyPiLibrary struct { Repo types.String `tfsdk:"repo" tf:"optional"` } +func (newState *PythonPyPiLibrary) SyncEffectiveFieldsDuringCreateOrUpdate(plan PythonPyPiLibrary) { +} + +func (newState *PythonPyPiLibrary) SyncEffectiveFieldsDuringRead(existingState PythonPyPiLibrary) { +} + type RCranLibrary struct { // The name of the CRAN package to install. Package types.String `tfsdk:"package" tf:""` @@ -2697,14 +3615,32 @@ type RCranLibrary struct { Repo types.String `tfsdk:"repo" tf:"optional"` } +func (newState *RCranLibrary) SyncEffectiveFieldsDuringCreateOrUpdate(plan RCranLibrary) { +} + +func (newState *RCranLibrary) SyncEffectiveFieldsDuringRead(existingState RCranLibrary) { +} + type RemoveInstanceProfile struct { // The ARN of the instance profile to remove. This field is required. InstanceProfileArn types.String `tfsdk:"instance_profile_arn" tf:""` } +func (newState *RemoveInstanceProfile) SyncEffectiveFieldsDuringCreateOrUpdate(plan RemoveInstanceProfile) { +} + +func (newState *RemoveInstanceProfile) SyncEffectiveFieldsDuringRead(existingState RemoveInstanceProfile) { +} + type RemoveResponse struct { } +func (newState *RemoveResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RemoveResponse) { +} + +func (newState *RemoveResponse) SyncEffectiveFieldsDuringRead(existingState RemoveResponse) { +} + type ResizeCluster struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 @@ -2725,9 +3661,21 @@ type ResizeCluster struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` } +func (newState *ResizeCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResizeCluster) { +} + +func (newState *ResizeCluster) SyncEffectiveFieldsDuringRead(existingState ResizeCluster) { +} + type ResizeClusterResponse struct { } +func (newState *ResizeClusterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResizeClusterResponse) { +} + +func (newState *ResizeClusterResponse) SyncEffectiveFieldsDuringRead(existingState ResizeClusterResponse) { +} + type RestartCluster struct { // The cluster to be started. ClusterId types.String `tfsdk:"cluster_id" tf:""` @@ -2735,9 +3683,21 @@ type RestartCluster struct { RestartUser types.String `tfsdk:"restart_user" tf:"optional"` } +func (newState *RestartCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestartCluster) { +} + +func (newState *RestartCluster) SyncEffectiveFieldsDuringRead(existingState RestartCluster) { +} + type RestartClusterResponse struct { } +func (newState *RestartClusterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestartClusterResponse) { +} + +func (newState *RestartClusterResponse) SyncEffectiveFieldsDuringRead(existingState RestartClusterResponse) { +} + type Results struct { // The cause of the error Cause types.String `tfsdk:"cause" tf:"optional"` @@ -2762,6 +3722,12 @@ type Results struct { Truncated types.Bool `tfsdk:"truncated" tf:"optional"` } +func (newState *Results) SyncEffectiveFieldsDuringCreateOrUpdate(plan Results) { +} + +func (newState *Results) SyncEffectiveFieldsDuringRead(existingState Results) { +} + type S3StorageInfo struct { // (Optional) Set canned access control list for the logs, e.g. // `bucket-owner-full-control`. If `canned_cal` is set, please make sure the @@ -2795,6 +3761,12 @@ type S3StorageInfo struct { Region types.String `tfsdk:"region" tf:"optional"` } +func (newState *S3StorageInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan S3StorageInfo) { +} + +func (newState *S3StorageInfo) SyncEffectiveFieldsDuringRead(existingState S3StorageInfo) { +} + type SparkNode struct { // The private IP address of the host instance. HostPrivateIp types.String `tfsdk:"host_private_ip" tf:"optional"` @@ -2823,11 +3795,23 @@ type SparkNode struct { StartTimestamp types.Int64 `tfsdk:"start_timestamp" tf:"optional"` } +func (newState *SparkNode) SyncEffectiveFieldsDuringCreateOrUpdate(plan SparkNode) { +} + +func (newState *SparkNode) SyncEffectiveFieldsDuringRead(existingState SparkNode) { +} + type SparkNodeAwsAttributes struct { // Whether this node is on an Amazon spot instance. IsSpot types.Bool `tfsdk:"is_spot" tf:"optional"` } +func (newState *SparkNodeAwsAttributes) SyncEffectiveFieldsDuringCreateOrUpdate(plan SparkNodeAwsAttributes) { +} + +func (newState *SparkNodeAwsAttributes) SyncEffectiveFieldsDuringRead(existingState SparkNodeAwsAttributes) { +} + type SparkVersion struct { // Spark version key, for example "2.1.x-scala2.11". This is the value which // should be provided as the "spark_version" when creating a new cluster. @@ -2839,14 +3823,32 @@ type SparkVersion struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *SparkVersion) SyncEffectiveFieldsDuringCreateOrUpdate(plan SparkVersion) { +} + +func (newState *SparkVersion) SyncEffectiveFieldsDuringRead(existingState SparkVersion) { +} + type StartCluster struct { // The cluster to be started. ClusterId types.String `tfsdk:"cluster_id" tf:""` } +func (newState *StartCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan StartCluster) { +} + +func (newState *StartCluster) SyncEffectiveFieldsDuringRead(existingState StartCluster) { +} + type StartClusterResponse struct { } +func (newState *StartClusterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan StartClusterResponse) { +} + +func (newState *StartClusterResponse) SyncEffectiveFieldsDuringRead(existingState StartClusterResponse) { +} + type TerminationReason struct { // status code indicating why the cluster was terminated Code types.String `tfsdk:"code" tf:"optional"` @@ -2857,6 +3859,12 @@ type TerminationReason struct { Type types.String `tfsdk:"type" tf:"optional"` } +func (newState *TerminationReason) SyncEffectiveFieldsDuringCreateOrUpdate(plan TerminationReason) { +} + +func (newState *TerminationReason) SyncEffectiveFieldsDuringRead(existingState TerminationReason) { +} + type UninstallLibraries struct { // Unique identifier for the cluster on which to uninstall these libraries. ClusterId types.String `tfsdk:"cluster_id" tf:""` @@ -2864,17 +3872,41 @@ type UninstallLibraries struct { Libraries []Library `tfsdk:"libraries" tf:""` } +func (newState *UninstallLibraries) SyncEffectiveFieldsDuringCreateOrUpdate(plan UninstallLibraries) { +} + +func (newState *UninstallLibraries) SyncEffectiveFieldsDuringRead(existingState UninstallLibraries) { +} + type UninstallLibrariesResponse struct { } +func (newState *UninstallLibrariesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UninstallLibrariesResponse) { +} + +func (newState *UninstallLibrariesResponse) SyncEffectiveFieldsDuringRead(existingState UninstallLibrariesResponse) { +} + type UnpinCluster struct { // ClusterId types.String `tfsdk:"cluster_id" tf:""` } +func (newState *UnpinCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan UnpinCluster) { +} + +func (newState *UnpinCluster) SyncEffectiveFieldsDuringRead(existingState UnpinCluster) { +} + type UnpinClusterResponse struct { } +func (newState *UnpinClusterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UnpinClusterResponse) { +} + +func (newState *UnpinClusterResponse) SyncEffectiveFieldsDuringRead(existingState UnpinClusterResponse) { +} + type UpdateCluster struct { // The cluster to be updated. Cluster []UpdateClusterResource `tfsdk:"cluster" tf:"optional,object"` @@ -2888,6 +3920,12 @@ type UpdateCluster struct { UpdateMask types.String `tfsdk:"update_mask" tf:""` } +func (newState *UpdateCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCluster) { +} + +func (newState *UpdateCluster) SyncEffectiveFieldsDuringRead(existingState UpdateCluster) { +} + type UpdateClusterResource struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 @@ -3036,24 +4074,60 @@ type UpdateClusterResource struct { WorkloadType []WorkloadType `tfsdk:"workload_type" tf:"optional,object"` } +func (newState *UpdateClusterResource) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateClusterResource) { +} + +func (newState *UpdateClusterResource) SyncEffectiveFieldsDuringRead(existingState UpdateClusterResource) { +} + type UpdateClusterResponse struct { } +func (newState *UpdateClusterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateClusterResponse) { +} + +func (newState *UpdateClusterResponse) SyncEffectiveFieldsDuringRead(existingState UpdateClusterResponse) { +} + type UpdateResponse struct { } +func (newState *UpdateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateResponse) { +} + +func (newState *UpdateResponse) SyncEffectiveFieldsDuringRead(existingState UpdateResponse) { +} + type VolumesStorageInfo struct { // Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh` Destination types.String `tfsdk:"destination" tf:""` } +func (newState *VolumesStorageInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan VolumesStorageInfo) { +} + +func (newState *VolumesStorageInfo) SyncEffectiveFieldsDuringRead(existingState VolumesStorageInfo) { +} + type WorkloadType struct { // defined what type of clients can use the cluster. E.g. Notebooks, Jobs Clients []ClientsTypes `tfsdk:"clients" tf:"object"` } +func (newState *WorkloadType) SyncEffectiveFieldsDuringCreateOrUpdate(plan WorkloadType) { +} + +func (newState *WorkloadType) SyncEffectiveFieldsDuringRead(existingState WorkloadType) { +} + type WorkspaceStorageInfo struct { // workspace files destination, e.g. // `/Users/user1@databricks.com/my-init.sh` Destination types.String `tfsdk:"destination" tf:""` } + +func (newState *WorkspaceStorageInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan WorkspaceStorageInfo) { +} + +func (newState *WorkspaceStorageInfo) SyncEffectiveFieldsDuringRead(existingState WorkspaceStorageInfo) { +} diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index d0035a99d3..85023fe5e2 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -21,7 +21,8 @@ type CreateDashboardRequest struct { // The workspace path of the folder containing the dashboard. Includes // leading slash and no trailing slash. This field is excluded in List // Dashboards responses. - ParentPath types.String `tfsdk:"parent_path" tf:"optional"` + ParentPath types.String `tfsdk:"parent_path" tf:"optional"` + EffectiveParentPath types.String `tfsdk:"effective_parent_path" tf:"computed,optional"` // The contents of the dashboard in serialized string form. This field is // excluded in List Dashboards responses. Use the [get dashboard API] to // retrieve an example response, which includes the `serialized_dashboard` @@ -34,28 +35,69 @@ type CreateDashboardRequest struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateDashboardRequest) { + newState.EffectiveParentPath = newState.ParentPath + newState.ParentPath = plan.ParentPath +} + +func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState CreateDashboardRequest) { + if existingState.EffectiveParentPath.ValueString() == newState.ParentPath.ValueString() { + newState.ParentPath = existingState.ParentPath + } +} + type CreateScheduleRequest struct { // The cron expression describing the frequency of the periodic refresh for // this schedule. CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` // UUID identifying the dashboard to which the schedule belongs. - DashboardId types.String `tfsdk:"-"` + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` // The display name for schedule. DisplayName types.String `tfsdk:"display_name" tf:"optional"` // The status indicates whether this schedule is paused or not. PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` } +func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateScheduleRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId +} + +func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState CreateScheduleRequest) { + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } +} + type CreateSubscriptionRequest struct { // UUID identifying the dashboard to which the subscription belongs. - DashboardId types.String `tfsdk:"-"` + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` // UUID identifying the schedule to which the subscription belongs. - ScheduleId types.String `tfsdk:"-"` + ScheduleId types.String `tfsdk:"-"` + EffectiveScheduleId types.String `tfsdk:"-"` // Subscriber details for users and destinations to be added as subscribers // to the schedule. Subscriber []Subscriber `tfsdk:"subscriber" tf:"object"` } +func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateSubscriptionRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId +} + +func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState CreateSubscriptionRequest) { + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } +} + type CronSchedule struct { // A cron expression using quartz syntax. EX: `0 0 8 * * ?` represents // everyday at 8am. See [Cron Trigger] for details. @@ -69,27 +111,38 @@ type CronSchedule struct { TimezoneId types.String `tfsdk:"timezone_id" tf:""` } +func (newState *CronSchedule) SyncEffectiveFieldsDuringCreateOrUpdate(plan CronSchedule) { +} + +func (newState *CronSchedule) SyncEffectiveFieldsDuringRead(existingState CronSchedule) { +} + type Dashboard struct { // The timestamp of when the dashboard was created. - CreateTime types.String `tfsdk:"create_time" tf:"optional"` + CreateTime types.String `tfsdk:"create_time" tf:"optional"` + EffectiveCreateTime types.String `tfsdk:"effective_create_time" tf:"computed,optional"` // UUID identifying the dashboard. - DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` + DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` + EffectiveDashboardId types.String `tfsdk:"effective_dashboard_id" tf:"computed,optional"` // The display name of the dashboard. DisplayName types.String `tfsdk:"display_name" tf:"optional"` // The etag for the dashboard. Can be optionally provided on updates to // ensure that the dashboard has not been modified since the last read. This // field is excluded in List Dashboards responses. - Etag types.String `tfsdk:"etag" tf:"optional"` + Etag types.String `tfsdk:"etag" tf:"optional"` + EffectiveEtag types.String `tfsdk:"effective_etag" tf:"computed,optional"` // The state of the dashboard resource. Used for tracking trashed status. LifecycleState types.String `tfsdk:"lifecycle_state" tf:"optional"` // The workspace path of the folder containing the dashboard. Includes // leading slash and no trailing slash. This field is excluded in List // Dashboards responses. - ParentPath types.String `tfsdk:"parent_path" tf:"optional"` + ParentPath types.String `tfsdk:"parent_path" tf:"optional"` + EffectiveParentPath types.String `tfsdk:"effective_parent_path" tf:"computed,optional"` // The workspace path of the dashboard asset, including the file name. // Exported dashboards always have the file extension `.lvdash.json`. This // field is excluded in List Dashboards responses. - Path types.String `tfsdk:"path" tf:"optional"` + Path types.String `tfsdk:"path" tf:"optional"` + EffectivePath types.String `tfsdk:"effective_path" tf:"computed,optional"` // The contents of the dashboard in serialized string form. This field is // excluded in List Dashboards responses. Use the [get dashboard API] to // retrieve an example response, which includes the `serialized_dashboard` @@ -100,41 +153,144 @@ type Dashboard struct { SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` // The timestamp of when the dashboard was last updated by the user. This // field is excluded in List Dashboards responses. - UpdateTime types.String `tfsdk:"update_time" tf:"optional"` + UpdateTime types.String `tfsdk:"update_time" tf:"optional"` + EffectiveUpdateTime types.String `tfsdk:"effective_update_time" tf:"computed,optional"` // The warehouse ID used to run the dashboard. WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *Dashboard) SyncEffectiveFieldsDuringCreateOrUpdate(plan Dashboard) { + newState.EffectiveCreateTime = newState.CreateTime + newState.CreateTime = plan.CreateTime + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveEtag = newState.Etag + newState.Etag = plan.Etag + newState.EffectiveParentPath = newState.ParentPath + newState.ParentPath = plan.ParentPath + newState.EffectivePath = newState.Path + newState.Path = plan.Path + newState.EffectiveUpdateTime = newState.UpdateTime + newState.UpdateTime = plan.UpdateTime +} + +func (newState *Dashboard) SyncEffectiveFieldsDuringRead(existingState Dashboard) { + if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { + newState.CreateTime = existingState.CreateTime + } + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { + newState.Etag = existingState.Etag + } + if existingState.EffectiveParentPath.ValueString() == newState.ParentPath.ValueString() { + newState.ParentPath = existingState.ParentPath + } + if existingState.EffectivePath.ValueString() == newState.Path.ValueString() { + newState.Path = existingState.Path + } + if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { + newState.UpdateTime = existingState.UpdateTime + } +} + // Delete dashboard schedule type DeleteScheduleRequest struct { // UUID identifying the dashboard to which the schedule belongs. - DashboardId types.String `tfsdk:"-"` + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` // The etag for the schedule. Optionally, it can be provided to verify that // the schedule has not been modified from its last retrieval. - Etag types.String `tfsdk:"-"` + Etag types.String `tfsdk:"-"` + EffectiveEtag types.String `tfsdk:"-"` // UUID identifying the schedule. - ScheduleId types.String `tfsdk:"-"` + ScheduleId types.String `tfsdk:"-"` + EffectiveScheduleId types.String `tfsdk:"-"` +} + +func (newState *DeleteScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteScheduleRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveEtag = newState.Etag + newState.Etag = plan.Etag + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId +} + +func (newState *DeleteScheduleRequest) SyncEffectiveFieldsDuringRead(existingState DeleteScheduleRequest) { + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { + newState.Etag = existingState.Etag + } + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } } type DeleteScheduleResponse struct { } +func (newState *DeleteScheduleResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteScheduleResponse) { +} + +func (newState *DeleteScheduleResponse) SyncEffectiveFieldsDuringRead(existingState DeleteScheduleResponse) { +} + // Delete schedule subscription type DeleteSubscriptionRequest struct { // UUID identifying the dashboard which the subscription belongs. - DashboardId types.String `tfsdk:"-"` + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` // The etag for the subscription. Can be optionally provided to ensure that // the subscription has not been modified since the last read. - Etag types.String `tfsdk:"-"` + Etag types.String `tfsdk:"-"` + EffectiveEtag types.String `tfsdk:"-"` // UUID identifying the schedule which the subscription belongs. - ScheduleId types.String `tfsdk:"-"` + ScheduleId types.String `tfsdk:"-"` + EffectiveScheduleId types.String `tfsdk:"-"` // UUID identifying the subscription. - SubscriptionId types.String `tfsdk:"-"` + SubscriptionId types.String `tfsdk:"-"` + EffectiveSubscriptionId types.String `tfsdk:"-"` +} + +func (newState *DeleteSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteSubscriptionRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveEtag = newState.Etag + newState.Etag = plan.Etag + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId + newState.EffectiveSubscriptionId = newState.SubscriptionId + newState.SubscriptionId = plan.SubscriptionId +} + +func (newState *DeleteSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState DeleteSubscriptionRequest) { + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { + newState.Etag = existingState.Etag + } + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } + if existingState.EffectiveSubscriptionId.ValueString() == newState.SubscriptionId.ValueString() { + newState.SubscriptionId = existingState.SubscriptionId + } } type DeleteSubscriptionResponse struct { } +func (newState *DeleteSubscriptionResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteSubscriptionResponse) { +} + +func (newState *DeleteSubscriptionResponse) SyncEffectiveFieldsDuringRead(existingState DeleteSubscriptionResponse) { +} + // Execute SQL query in a conversation message type ExecuteMessageQueryRequest struct { // Conversation ID @@ -145,6 +301,12 @@ type ExecuteMessageQueryRequest struct { SpaceId types.String `tfsdk:"-"` } +func (newState *ExecuteMessageQueryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExecuteMessageQueryRequest) { +} + +func (newState *ExecuteMessageQueryRequest) SyncEffectiveFieldsDuringRead(existingState ExecuteMessageQueryRequest) { +} + // Genie AI Response type GenieAttachment struct { Query []QueryAttachment `tfsdk:"query" tf:"optional,object"` @@ -152,6 +314,12 @@ type GenieAttachment struct { Text []TextAttachment `tfsdk:"text" tf:"optional,object"` } +func (newState *GenieAttachment) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenieAttachment) { +} + +func (newState *GenieAttachment) SyncEffectiveFieldsDuringRead(existingState GenieAttachment) { +} + type GenieConversation struct { // Timestamp when the message was created CreatedTimestamp types.Int64 `tfsdk:"created_timestamp" tf:"optional"` @@ -167,6 +335,12 @@ type GenieConversation struct { UserId types.Int64 `tfsdk:"user_id" tf:""` } +func (newState *GenieConversation) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenieConversation) { +} + +func (newState *GenieConversation) SyncEffectiveFieldsDuringRead(existingState GenieConversation) { +} + type GenieCreateConversationMessageRequest struct { // User message content. Content types.String `tfsdk:"content" tf:""` @@ -176,6 +350,12 @@ type GenieCreateConversationMessageRequest struct { SpaceId types.String `tfsdk:"-"` } +func (newState *GenieCreateConversationMessageRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenieCreateConversationMessageRequest) { +} + +func (newState *GenieCreateConversationMessageRequest) SyncEffectiveFieldsDuringRead(existingState GenieCreateConversationMessageRequest) { +} + // Get conversation message type GenieGetConversationMessageRequest struct { // The ID associated with the target conversation. @@ -188,6 +368,12 @@ type GenieGetConversationMessageRequest struct { SpaceId types.String `tfsdk:"-"` } +func (newState *GenieGetConversationMessageRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenieGetConversationMessageRequest) { +} + +func (newState *GenieGetConversationMessageRequest) SyncEffectiveFieldsDuringRead(existingState GenieGetConversationMessageRequest) { +} + // Get conversation message SQL query result type GenieGetMessageQueryResultRequest struct { // Conversation ID @@ -198,12 +384,24 @@ type GenieGetMessageQueryResultRequest struct { SpaceId types.String `tfsdk:"-"` } +func (newState *GenieGetMessageQueryResultRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenieGetMessageQueryResultRequest) { +} + +func (newState *GenieGetMessageQueryResultRequest) SyncEffectiveFieldsDuringRead(existingState GenieGetMessageQueryResultRequest) { +} + type GenieGetMessageQueryResultResponse struct { // SQL Statement Execution response. See [Get status, manifest, and result // first chunk](:method:statementexecution/getstatement) for more details. StatementResponse sql.StatementResponse `tfsdk:"statement_response" tf:"optional,object"` } +func (newState *GenieGetMessageQueryResultResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenieGetMessageQueryResultResponse) { +} + +func (newState *GenieGetMessageQueryResultResponse) SyncEffectiveFieldsDuringRead(existingState GenieGetMessageQueryResultResponse) { +} + type GenieMessage struct { // AI produced response to the message Attachments []GenieAttachment `tfsdk:"attachments" tf:"optional"` @@ -244,6 +442,12 @@ type GenieMessage struct { UserId types.Int64 `tfsdk:"user_id" tf:"optional"` } +func (newState *GenieMessage) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenieMessage) { +} + +func (newState *GenieMessage) SyncEffectiveFieldsDuringRead(existingState GenieMessage) { +} + type GenieStartConversationMessageRequest struct { // The text of the message that starts the conversation. Content types.String `tfsdk:"content" tf:""` @@ -252,6 +456,12 @@ type GenieStartConversationMessageRequest struct { SpaceId types.String `tfsdk:"-"` } +func (newState *GenieStartConversationMessageRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenieStartConversationMessageRequest) { +} + +func (newState *GenieStartConversationMessageRequest) SyncEffectiveFieldsDuringRead(existingState GenieStartConversationMessageRequest) { +} + type GenieStartConversationResponse struct { Conversation []GenieConversation `tfsdk:"conversation" tf:"optional,object"` // Conversation ID @@ -262,34 +472,94 @@ type GenieStartConversationResponse struct { MessageId types.String `tfsdk:"message_id" tf:""` } +func (newState *GenieStartConversationResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenieStartConversationResponse) { +} + +func (newState *GenieStartConversationResponse) SyncEffectiveFieldsDuringRead(existingState GenieStartConversationResponse) { +} + // Get dashboard type GetDashboardRequest struct { // UUID identifying the dashboard. DashboardId types.String `tfsdk:"-"` } +func (newState *GetDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetDashboardRequest) { +} + +func (newState *GetDashboardRequest) SyncEffectiveFieldsDuringRead(existingState GetDashboardRequest) { +} + // Get published dashboard type GetPublishedDashboardRequest struct { // UUID identifying the dashboard to be published. DashboardId types.String `tfsdk:"-"` } +func (newState *GetPublishedDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPublishedDashboardRequest) { +} + +func (newState *GetPublishedDashboardRequest) SyncEffectiveFieldsDuringRead(existingState GetPublishedDashboardRequest) { +} + // Get dashboard schedule type GetScheduleRequest struct { // UUID identifying the dashboard to which the schedule belongs. - DashboardId types.String `tfsdk:"-"` + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` // UUID identifying the schedule. - ScheduleId types.String `tfsdk:"-"` + ScheduleId types.String `tfsdk:"-"` + EffectiveScheduleId types.String `tfsdk:"-"` +} + +func (newState *GetScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetScheduleRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId +} + +func (newState *GetScheduleRequest) SyncEffectiveFieldsDuringRead(existingState GetScheduleRequest) { + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } } // Get schedule subscription type GetSubscriptionRequest struct { // UUID identifying the dashboard which the subscription belongs. - DashboardId types.String `tfsdk:"-"` + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` // UUID identifying the schedule which the subscription belongs. - ScheduleId types.String `tfsdk:"-"` + ScheduleId types.String `tfsdk:"-"` + EffectiveScheduleId types.String `tfsdk:"-"` // UUID identifying the subscription. - SubscriptionId types.String `tfsdk:"-"` + SubscriptionId types.String `tfsdk:"-"` + EffectiveSubscriptionId types.String `tfsdk:"-"` +} + +func (newState *GetSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetSubscriptionRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId + newState.EffectiveSubscriptionId = newState.SubscriptionId + newState.SubscriptionId = plan.SubscriptionId +} + +func (newState *GetSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState GetSubscriptionRequest) { + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } + if existingState.EffectiveSubscriptionId.ValueString() == newState.SubscriptionId.ValueString() { + newState.SubscriptionId = existingState.SubscriptionId + } } // List dashboards @@ -298,7 +568,8 @@ type ListDashboardsRequest struct { PageSize types.Int64 `tfsdk:"-"` // A page token, received from a previous `ListDashboards` call. This token // can be used to retrieve the subsequent page. - PageToken types.String `tfsdk:"-"` + PageToken types.String `tfsdk:"-"` + EffectivePageToken types.String `tfsdk:"-"` // The flag to include dashboards located in the trash. If unspecified, only // active dashboards will be returned. ShowTrashed types.Bool `tfsdk:"-"` @@ -306,61 +577,156 @@ type ListDashboardsRequest struct { View types.String `tfsdk:"-"` } +func (newState *ListDashboardsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListDashboardsRequest) { + newState.EffectivePageToken = newState.PageToken + newState.PageToken = plan.PageToken +} + +func (newState *ListDashboardsRequest) SyncEffectiveFieldsDuringRead(existingState ListDashboardsRequest) { + if existingState.EffectivePageToken.ValueString() == newState.PageToken.ValueString() { + newState.PageToken = existingState.PageToken + } +} + type ListDashboardsResponse struct { Dashboards []Dashboard `tfsdk:"dashboards" tf:"optional"` // A token, which can be sent as `page_token` to retrieve the next page. If // this field is omitted, there are no subsequent dashboards. - NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` + NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` + EffectiveNextPageToken types.String `tfsdk:"effective_next_page_token" tf:"computed,optional"` +} + +func (newState *ListDashboardsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListDashboardsResponse) { + newState.EffectiveNextPageToken = newState.NextPageToken + newState.NextPageToken = plan.NextPageToken +} + +func (newState *ListDashboardsResponse) SyncEffectiveFieldsDuringRead(existingState ListDashboardsResponse) { + if existingState.EffectiveNextPageToken.ValueString() == newState.NextPageToken.ValueString() { + newState.NextPageToken = existingState.NextPageToken + } } // List dashboard schedules type ListSchedulesRequest struct { // UUID identifying the dashboard to which the schedule belongs. - DashboardId types.String `tfsdk:"-"` + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` // The number of schedules to return per page. PageSize types.Int64 `tfsdk:"-"` // A page token, received from a previous `ListSchedules` call. Use this to // retrieve the subsequent page. - PageToken types.String `tfsdk:"-"` + PageToken types.String `tfsdk:"-"` + EffectivePageToken types.String `tfsdk:"-"` +} + +func (newState *ListSchedulesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSchedulesRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectivePageToken = newState.PageToken + newState.PageToken = plan.PageToken +} + +func (newState *ListSchedulesRequest) SyncEffectiveFieldsDuringRead(existingState ListSchedulesRequest) { + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + if existingState.EffectivePageToken.ValueString() == newState.PageToken.ValueString() { + newState.PageToken = existingState.PageToken + } } type ListSchedulesResponse struct { // A token that can be used as a `page_token` in subsequent requests to // retrieve the next page of results. If this field is omitted, there are no // subsequent schedules. - NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` + NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` + EffectiveNextPageToken types.String `tfsdk:"effective_next_page_token" tf:"computed,optional"` Schedules []Schedule `tfsdk:"schedules" tf:"optional"` } +func (newState *ListSchedulesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSchedulesResponse) { + newState.EffectiveNextPageToken = newState.NextPageToken + newState.NextPageToken = plan.NextPageToken +} + +func (newState *ListSchedulesResponse) SyncEffectiveFieldsDuringRead(existingState ListSchedulesResponse) { + if existingState.EffectiveNextPageToken.ValueString() == newState.NextPageToken.ValueString() { + newState.NextPageToken = existingState.NextPageToken + } +} + // List schedule subscriptions type ListSubscriptionsRequest struct { // UUID identifying the dashboard to which the subscription belongs. - DashboardId types.String `tfsdk:"-"` + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` // The number of subscriptions to return per page. PageSize types.Int64 `tfsdk:"-"` // A page token, received from a previous `ListSubscriptions` call. Use this // to retrieve the subsequent page. - PageToken types.String `tfsdk:"-"` + PageToken types.String `tfsdk:"-"` + EffectivePageToken types.String `tfsdk:"-"` // UUID identifying the schedule to which the subscription belongs. - ScheduleId types.String `tfsdk:"-"` + ScheduleId types.String `tfsdk:"-"` + EffectiveScheduleId types.String `tfsdk:"-"` +} + +func (newState *ListSubscriptionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSubscriptionsRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectivePageToken = newState.PageToken + newState.PageToken = plan.PageToken + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId +} + +func (newState *ListSubscriptionsRequest) SyncEffectiveFieldsDuringRead(existingState ListSubscriptionsRequest) { + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + if existingState.EffectivePageToken.ValueString() == newState.PageToken.ValueString() { + newState.PageToken = existingState.PageToken + } + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } } type ListSubscriptionsResponse struct { // A token that can be used as a `page_token` in subsequent requests to // retrieve the next page of results. If this field is omitted, there are no // subsequent subscriptions. - NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` + NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` + EffectiveNextPageToken types.String `tfsdk:"effective_next_page_token" tf:"computed,optional"` Subscriptions []Subscription `tfsdk:"subscriptions" tf:"optional"` } +func (newState *ListSubscriptionsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSubscriptionsResponse) { + newState.EffectiveNextPageToken = newState.NextPageToken + newState.NextPageToken = plan.NextPageToken +} + +func (newState *ListSubscriptionsResponse) SyncEffectiveFieldsDuringRead(existingState ListSubscriptionsResponse) { + if existingState.EffectiveNextPageToken.ValueString() == newState.NextPageToken.ValueString() { + newState.NextPageToken = existingState.NextPageToken + } +} + type MessageError struct { Error types.String `tfsdk:"error" tf:"optional"` Type types.String `tfsdk:"type" tf:"optional"` } +func (newState *MessageError) SyncEffectiveFieldsDuringCreateOrUpdate(plan MessageError) { +} + +func (newState *MessageError) SyncEffectiveFieldsDuringRead(existingState MessageError) { +} + type MigrateDashboardRequest struct { // Display name for the new Lakeview dashboard. DisplayName types.String `tfsdk:"display_name" tf:"optional"` @@ -371,6 +737,12 @@ type MigrateDashboardRequest struct { SourceDashboardId types.String `tfsdk:"source_dashboard_id" tf:""` } +func (newState *MigrateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan MigrateDashboardRequest) { +} + +func (newState *MigrateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState MigrateDashboardRequest) { +} + type PublishRequest struct { // UUID identifying the dashboard to be published. DashboardId types.String `tfsdk:"-"` @@ -383,17 +755,41 @@ type PublishRequest struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *PublishRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan PublishRequest) { +} + +func (newState *PublishRequest) SyncEffectiveFieldsDuringRead(existingState PublishRequest) { +} + type PublishedDashboard struct { // The display name of the published dashboard. - DisplayName types.String `tfsdk:"display_name" tf:"optional"` + DisplayName types.String `tfsdk:"display_name" tf:"optional"` + EffectiveDisplayName types.String `tfsdk:"effective_display_name" tf:"computed,optional"` // Indicates whether credentials are embedded in the published dashboard. EmbedCredentials types.Bool `tfsdk:"embed_credentials" tf:"optional"` // The timestamp of when the published dashboard was last revised. - RevisionCreateTime types.String `tfsdk:"revision_create_time" tf:"optional"` + RevisionCreateTime types.String `tfsdk:"revision_create_time" tf:"optional"` + EffectiveRevisionCreateTime types.String `tfsdk:"effective_revision_create_time" tf:"computed,optional"` // The warehouse ID used to run the published dashboard. WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *PublishedDashboard) SyncEffectiveFieldsDuringCreateOrUpdate(plan PublishedDashboard) { + newState.EffectiveDisplayName = newState.DisplayName + newState.DisplayName = plan.DisplayName + newState.EffectiveRevisionCreateTime = newState.RevisionCreateTime + newState.RevisionCreateTime = plan.RevisionCreateTime +} + +func (newState *PublishedDashboard) SyncEffectiveFieldsDuringRead(existingState PublishedDashboard) { + if existingState.EffectiveDisplayName.ValueString() == newState.DisplayName.ValueString() { + newState.DisplayName = existingState.DisplayName + } + if existingState.EffectiveRevisionCreateTime.ValueString() == newState.RevisionCreateTime.ValueString() { + newState.RevisionCreateTime = existingState.RevisionCreateTime + } +} + type QueryAttachment struct { // Description of the query Description types.String `tfsdk:"description" tf:"optional"` @@ -413,7 +809,15 @@ type QueryAttachment struct { Title types.String `tfsdk:"title" tf:"optional"` } +func (newState *QueryAttachment) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryAttachment) { +} + +func (newState *QueryAttachment) SyncEffectiveFieldsDuringRead(existingState QueryAttachment) { +} + type Result struct { + // If result is truncated + IsTruncated types.Bool `tfsdk:"is_truncated" tf:"optional"` // Row count of the result RowCount types.Int64 `tfsdk:"row_count" tf:"optional"` // Statement Execution API statement id. Use [Get status, manifest, and @@ -422,26 +826,68 @@ type Result struct { StatementId types.String `tfsdk:"statement_id" tf:"optional"` } +func (newState *Result) SyncEffectiveFieldsDuringCreateOrUpdate(plan Result) { +} + +func (newState *Result) SyncEffectiveFieldsDuringRead(existingState Result) { +} + type Schedule struct { // A timestamp indicating when the schedule was created. - CreateTime types.String `tfsdk:"create_time" tf:"optional"` + CreateTime types.String `tfsdk:"create_time" tf:"optional"` + EffectiveCreateTime types.String `tfsdk:"effective_create_time" tf:"computed,optional"` // The cron expression describing the frequency of the periodic refresh for // this schedule. CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` // UUID identifying the dashboard to which the schedule belongs. - DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` + DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` + EffectiveDashboardId types.String `tfsdk:"effective_dashboard_id" tf:"computed,optional"` // The display name for schedule. DisplayName types.String `tfsdk:"display_name" tf:"optional"` // The etag for the schedule. Must be left empty on create, must be provided // on updates to ensure that the schedule has not been modified since the // last read, and can be optionally provided on delete. - Etag types.String `tfsdk:"etag" tf:"optional"` + Etag types.String `tfsdk:"etag" tf:"optional"` + EffectiveEtag types.String `tfsdk:"effective_etag" tf:"computed,optional"` // The status indicates whether this schedule is paused or not. PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` // UUID identifying the schedule. - ScheduleId types.String `tfsdk:"schedule_id" tf:"optional"` + ScheduleId types.String `tfsdk:"schedule_id" tf:"optional"` + EffectiveScheduleId types.String `tfsdk:"effective_schedule_id" tf:"computed,optional"` // A timestamp indicating when the schedule was last updated. - UpdateTime types.String `tfsdk:"update_time" tf:"optional"` + UpdateTime types.String `tfsdk:"update_time" tf:"optional"` + EffectiveUpdateTime types.String `tfsdk:"effective_update_time" tf:"computed,optional"` +} + +func (newState *Schedule) SyncEffectiveFieldsDuringCreateOrUpdate(plan Schedule) { + newState.EffectiveCreateTime = newState.CreateTime + newState.CreateTime = plan.CreateTime + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveEtag = newState.Etag + newState.Etag = plan.Etag + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId + newState.EffectiveUpdateTime = newState.UpdateTime + newState.UpdateTime = plan.UpdateTime +} + +func (newState *Schedule) SyncEffectiveFieldsDuringRead(existingState Schedule) { + if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { + newState.CreateTime = existingState.CreateTime + } + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { + newState.Etag = existingState.Etag + } + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } + if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { + newState.UpdateTime = existingState.UpdateTime + } } type Subscriber struct { @@ -453,38 +899,116 @@ type Subscriber struct { UserSubscriber []SubscriptionSubscriberUser `tfsdk:"user_subscriber" tf:"optional,object"` } +func (newState *Subscriber) SyncEffectiveFieldsDuringCreateOrUpdate(plan Subscriber) { +} + +func (newState *Subscriber) SyncEffectiveFieldsDuringRead(existingState Subscriber) { +} + type Subscription struct { // A timestamp indicating when the subscription was created. - CreateTime types.String `tfsdk:"create_time" tf:"optional"` + CreateTime types.String `tfsdk:"create_time" tf:"optional"` + EffectiveCreateTime types.String `tfsdk:"effective_create_time" tf:"computed,optional"` // UserId of the user who adds subscribers (users or notification // destinations) to the dashboard's schedule. - CreatedByUserId types.Int64 `tfsdk:"created_by_user_id" tf:"optional"` + CreatedByUserId types.Int64 `tfsdk:"created_by_user_id" tf:"optional"` + EffectiveCreatedByUserId types.Int64 `tfsdk:"effective_created_by_user_id" tf:"computed,optional"` // UUID identifying the dashboard to which the subscription belongs. - DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` + DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` + EffectiveDashboardId types.String `tfsdk:"effective_dashboard_id" tf:"computed,optional"` // The etag for the subscription. Must be left empty on create, can be // optionally provided on delete to ensure that the subscription has not // been deleted since the last read. - Etag types.String `tfsdk:"etag" tf:"optional"` + Etag types.String `tfsdk:"etag" tf:"optional"` + EffectiveEtag types.String `tfsdk:"effective_etag" tf:"computed,optional"` // UUID identifying the schedule to which the subscription belongs. - ScheduleId types.String `tfsdk:"schedule_id" tf:"optional"` + ScheduleId types.String `tfsdk:"schedule_id" tf:"optional"` + EffectiveScheduleId types.String `tfsdk:"effective_schedule_id" tf:"computed,optional"` // Subscriber details for users and destinations to be added as subscribers // to the schedule. Subscriber []Subscriber `tfsdk:"subscriber" tf:"object"` // UUID identifying the subscription. - SubscriptionId types.String `tfsdk:"subscription_id" tf:"optional"` + SubscriptionId types.String `tfsdk:"subscription_id" tf:"optional"` + EffectiveSubscriptionId types.String `tfsdk:"effective_subscription_id" tf:"computed,optional"` // A timestamp indicating when the subscription was last updated. - UpdateTime types.String `tfsdk:"update_time" tf:"optional"` + UpdateTime types.String `tfsdk:"update_time" tf:"optional"` + EffectiveUpdateTime types.String `tfsdk:"effective_update_time" tf:"computed,optional"` +} + +func (newState *Subscription) SyncEffectiveFieldsDuringCreateOrUpdate(plan Subscription) { + newState.EffectiveCreateTime = newState.CreateTime + newState.CreateTime = plan.CreateTime + newState.EffectiveCreatedByUserId = newState.CreatedByUserId + newState.CreatedByUserId = plan.CreatedByUserId + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveEtag = newState.Etag + newState.Etag = plan.Etag + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId + newState.EffectiveSubscriptionId = newState.SubscriptionId + newState.SubscriptionId = plan.SubscriptionId + newState.EffectiveUpdateTime = newState.UpdateTime + newState.UpdateTime = plan.UpdateTime +} + +func (newState *Subscription) SyncEffectiveFieldsDuringRead(existingState Subscription) { + if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { + newState.CreateTime = existingState.CreateTime + } + if existingState.EffectiveCreatedByUserId.ValueInt64() == newState.CreatedByUserId.ValueInt64() { + newState.CreatedByUserId = existingState.CreatedByUserId + } + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { + newState.Etag = existingState.Etag + } + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } + if existingState.EffectiveSubscriptionId.ValueString() == newState.SubscriptionId.ValueString() { + newState.SubscriptionId = existingState.SubscriptionId + } + if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { + newState.UpdateTime = existingState.UpdateTime + } } type SubscriptionSubscriberDestination struct { // The canonical identifier of the destination to receive email // notification. - DestinationId types.String `tfsdk:"destination_id" tf:""` + DestinationId types.String `tfsdk:"destination_id" tf:""` + EffectiveDestinationId types.String `tfsdk:"effective_destination_id" tf:"computed,optional"` +} + +func (newState *SubscriptionSubscriberDestination) SyncEffectiveFieldsDuringCreateOrUpdate(plan SubscriptionSubscriberDestination) { + newState.EffectiveDestinationId = newState.DestinationId + newState.DestinationId = plan.DestinationId +} + +func (newState *SubscriptionSubscriberDestination) SyncEffectiveFieldsDuringRead(existingState SubscriptionSubscriberDestination) { + if existingState.EffectiveDestinationId.ValueString() == newState.DestinationId.ValueString() { + newState.DestinationId = existingState.DestinationId + } } type SubscriptionSubscriberUser struct { // UserId of the subscriber. - UserId types.Int64 `tfsdk:"user_id" tf:""` + UserId types.Int64 `tfsdk:"user_id" tf:""` + EffectiveUserId types.Int64 `tfsdk:"effective_user_id" tf:"computed,optional"` +} + +func (newState *SubscriptionSubscriberUser) SyncEffectiveFieldsDuringCreateOrUpdate(plan SubscriptionSubscriberUser) { + newState.EffectiveUserId = newState.UserId + newState.UserId = plan.UserId +} + +func (newState *SubscriptionSubscriberUser) SyncEffectiveFieldsDuringRead(existingState SubscriptionSubscriberUser) { + if existingState.EffectiveUserId.ValueInt64() == newState.UserId.ValueInt64() { + newState.UserId = existingState.UserId + } } type TextAttachment struct { @@ -494,24 +1018,54 @@ type TextAttachment struct { Id types.String `tfsdk:"id" tf:"optional"` } +func (newState *TextAttachment) SyncEffectiveFieldsDuringCreateOrUpdate(plan TextAttachment) { +} + +func (newState *TextAttachment) SyncEffectiveFieldsDuringRead(existingState TextAttachment) { +} + // Trash dashboard type TrashDashboardRequest struct { // UUID identifying the dashboard. DashboardId types.String `tfsdk:"-"` } +func (newState *TrashDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan TrashDashboardRequest) { +} + +func (newState *TrashDashboardRequest) SyncEffectiveFieldsDuringRead(existingState TrashDashboardRequest) { +} + type TrashDashboardResponse struct { } +func (newState *TrashDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan TrashDashboardResponse) { +} + +func (newState *TrashDashboardResponse) SyncEffectiveFieldsDuringRead(existingState TrashDashboardResponse) { +} + // Unpublish dashboard type UnpublishDashboardRequest struct { // UUID identifying the dashboard to be published. DashboardId types.String `tfsdk:"-"` } +func (newState *UnpublishDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UnpublishDashboardRequest) { +} + +func (newState *UnpublishDashboardRequest) SyncEffectiveFieldsDuringRead(existingState UnpublishDashboardRequest) { +} + type UnpublishDashboardResponse struct { } +func (newState *UnpublishDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UnpublishDashboardResponse) { +} + +func (newState *UnpublishDashboardResponse) SyncEffectiveFieldsDuringRead(existingState UnpublishDashboardResponse) { +} + type UpdateDashboardRequest struct { // UUID identifying the dashboard. DashboardId types.String `tfsdk:"-"` @@ -520,7 +1074,8 @@ type UpdateDashboardRequest struct { // The etag for the dashboard. Can be optionally provided on updates to // ensure that the dashboard has not been modified since the last read. This // field is excluded in List Dashboards responses. - Etag types.String `tfsdk:"etag" tf:"optional"` + Etag types.String `tfsdk:"etag" tf:"optional"` + EffectiveEtag types.String `tfsdk:"effective_etag" tf:"computed,optional"` // The contents of the dashboard in serialized string form. This field is // excluded in List Dashboards responses. Use the [get dashboard API] to // retrieve an example response, which includes the `serialized_dashboard` @@ -533,20 +1088,55 @@ type UpdateDashboardRequest struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateDashboardRequest) { + newState.EffectiveEtag = newState.Etag + newState.Etag = plan.Etag +} + +func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState UpdateDashboardRequest) { + if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { + newState.Etag = existingState.Etag + } +} + type UpdateScheduleRequest struct { // The cron expression describing the frequency of the periodic refresh for // this schedule. CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` // UUID identifying the dashboard to which the schedule belongs. - DashboardId types.String `tfsdk:"-"` + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` // The display name for schedule. DisplayName types.String `tfsdk:"display_name" tf:"optional"` // The etag for the schedule. Must be left empty on create, must be provided // on updates to ensure that the schedule has not been modified since the // last read, and can be optionally provided on delete. - Etag types.String `tfsdk:"etag" tf:"optional"` + Etag types.String `tfsdk:"etag" tf:"optional"` + EffectiveEtag types.String `tfsdk:"effective_etag" tf:"computed,optional"` // The status indicates whether this schedule is paused or not. PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` // UUID identifying the schedule. - ScheduleId types.String `tfsdk:"-"` + ScheduleId types.String `tfsdk:"-"` + EffectiveScheduleId types.String `tfsdk:"-"` +} + +func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateScheduleRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveEtag = newState.Etag + newState.Etag = plan.Etag + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId +} + +func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState UpdateScheduleRequest) { + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { + newState.Etag = existingState.Etag + } + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } } diff --git a/internal/service/files_tf/model.go b/internal/service/files_tf/model.go index e3e67e9961..d2296d76ee 100755 --- a/internal/service/files_tf/model.go +++ b/internal/service/files_tf/model.go @@ -24,17 +24,41 @@ type AddBlock struct { Handle types.Int64 `tfsdk:"handle" tf:""` } +func (newState *AddBlock) SyncEffectiveFieldsDuringCreateOrUpdate(plan AddBlock) { +} + +func (newState *AddBlock) SyncEffectiveFieldsDuringRead(existingState AddBlock) { +} + type AddBlockResponse struct { } +func (newState *AddBlockResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan AddBlockResponse) { +} + +func (newState *AddBlockResponse) SyncEffectiveFieldsDuringRead(existingState AddBlockResponse) { +} + type Close struct { // The handle on an open stream. Handle types.Int64 `tfsdk:"handle" tf:""` } +func (newState *Close) SyncEffectiveFieldsDuringCreateOrUpdate(plan Close) { +} + +func (newState *Close) SyncEffectiveFieldsDuringRead(existingState Close) { +} + type CloseResponse struct { } +func (newState *CloseResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CloseResponse) { +} + +func (newState *CloseResponse) SyncEffectiveFieldsDuringRead(existingState CloseResponse) { +} + type Create struct { // The flag that specifies whether to overwrite existing file/files. Overwrite types.Bool `tfsdk:"overwrite" tf:"optional"` @@ -42,21 +66,45 @@ type Create struct { Path types.String `tfsdk:"path" tf:""` } +func (newState *Create) SyncEffectiveFieldsDuringCreateOrUpdate(plan Create) { +} + +func (newState *Create) SyncEffectiveFieldsDuringRead(existingState Create) { +} + // Create a directory type CreateDirectoryRequest struct { // The absolute path of a directory. DirectoryPath types.String `tfsdk:"-"` } +func (newState *CreateDirectoryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateDirectoryRequest) { +} + +func (newState *CreateDirectoryRequest) SyncEffectiveFieldsDuringRead(existingState CreateDirectoryRequest) { +} + type CreateDirectoryResponse struct { } +func (newState *CreateDirectoryResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateDirectoryResponse) { +} + +func (newState *CreateDirectoryResponse) SyncEffectiveFieldsDuringRead(existingState CreateDirectoryResponse) { +} + type CreateResponse struct { // Handle which should subsequently be passed into the AddBlock and Close // calls when writing to a file through a stream. Handle types.Int64 `tfsdk:"handle" tf:"optional"` } +func (newState *CreateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateResponse) { +} + +func (newState *CreateResponse) SyncEffectiveFieldsDuringRead(existingState CreateResponse) { +} + type Delete struct { // The path of the file or directory to delete. The path should be the // absolute DBFS path. @@ -66,24 +114,54 @@ type Delete struct { Recursive types.Bool `tfsdk:"recursive" tf:"optional"` } +func (newState *Delete) SyncEffectiveFieldsDuringCreateOrUpdate(plan Delete) { +} + +func (newState *Delete) SyncEffectiveFieldsDuringRead(existingState Delete) { +} + // Delete a directory type DeleteDirectoryRequest struct { // The absolute path of a directory. DirectoryPath types.String `tfsdk:"-"` } +func (newState *DeleteDirectoryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDirectoryRequest) { +} + +func (newState *DeleteDirectoryRequest) SyncEffectiveFieldsDuringRead(existingState DeleteDirectoryRequest) { +} + type DeleteDirectoryResponse struct { } +func (newState *DeleteDirectoryResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDirectoryResponse) { +} + +func (newState *DeleteDirectoryResponse) SyncEffectiveFieldsDuringRead(existingState DeleteDirectoryResponse) { +} + // Delete a file type DeleteFileRequest struct { // The absolute path of the file. FilePath types.String `tfsdk:"-"` } +func (newState *DeleteFileRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteFileRequest) { +} + +func (newState *DeleteFileRequest) SyncEffectiveFieldsDuringRead(existingState DeleteFileRequest) { +} + type DeleteResponse struct { } +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + type DirectoryEntry struct { // The length of the file in bytes. This field is omitted for directories. FileSize types.Int64 `tfsdk:"file_size" tf:"optional"` @@ -98,12 +176,24 @@ type DirectoryEntry struct { Path types.String `tfsdk:"path" tf:"optional"` } +func (newState *DirectoryEntry) SyncEffectiveFieldsDuringCreateOrUpdate(plan DirectoryEntry) { +} + +func (newState *DirectoryEntry) SyncEffectiveFieldsDuringRead(existingState DirectoryEntry) { +} + // Download a file type DownloadRequest struct { // The absolute path of the file. FilePath types.String `tfsdk:"-"` } +func (newState *DownloadRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DownloadRequest) { +} + +func (newState *DownloadRequest) SyncEffectiveFieldsDuringRead(existingState DownloadRequest) { +} + type DownloadResponse struct { ContentLength types.Int64 `tfsdk:"-"` @@ -114,6 +204,12 @@ type DownloadResponse struct { LastModified types.String `tfsdk:"-"` } +func (newState *DownloadResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DownloadResponse) { +} + +func (newState *DownloadResponse) SyncEffectiveFieldsDuringRead(existingState DownloadResponse) { +} + type FileInfo struct { // The length of the file in bytes. This field is omitted for directories. FileSize types.Int64 `tfsdk:"file_size" tf:"optional"` @@ -125,21 +221,45 @@ type FileInfo struct { Path types.String `tfsdk:"path" tf:"optional"` } +func (newState *FileInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan FileInfo) { +} + +func (newState *FileInfo) SyncEffectiveFieldsDuringRead(existingState FileInfo) { +} + // Get directory metadata type GetDirectoryMetadataRequest struct { // The absolute path of a directory. DirectoryPath types.String `tfsdk:"-"` } +func (newState *GetDirectoryMetadataRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetDirectoryMetadataRequest) { +} + +func (newState *GetDirectoryMetadataRequest) SyncEffectiveFieldsDuringRead(existingState GetDirectoryMetadataRequest) { +} + type GetDirectoryMetadataResponse struct { } +func (newState *GetDirectoryMetadataResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetDirectoryMetadataResponse) { +} + +func (newState *GetDirectoryMetadataResponse) SyncEffectiveFieldsDuringRead(existingState GetDirectoryMetadataResponse) { +} + // Get file metadata type GetMetadataRequest struct { // The absolute path of the file. FilePath types.String `tfsdk:"-"` } +func (newState *GetMetadataRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetMetadataRequest) { +} + +func (newState *GetMetadataRequest) SyncEffectiveFieldsDuringRead(existingState GetMetadataRequest) { +} + type GetMetadataResponse struct { ContentLength types.Int64 `tfsdk:"-"` @@ -148,6 +268,12 @@ type GetMetadataResponse struct { LastModified types.String `tfsdk:"-"` } +func (newState *GetMetadataResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetMetadataResponse) { +} + +func (newState *GetMetadataResponse) SyncEffectiveFieldsDuringRead(existingState GetMetadataResponse) { +} + // Get the information of a file or directory type GetStatusRequest struct { // The path of the file or directory. The path should be the absolute DBFS @@ -155,6 +281,12 @@ type GetStatusRequest struct { Path types.String `tfsdk:"-"` } +func (newState *GetStatusRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetStatusRequest) { +} + +func (newState *GetStatusRequest) SyncEffectiveFieldsDuringRead(existingState GetStatusRequest) { +} + // List directory contents or file details type ListDbfsRequest struct { // The path of the file or directory. The path should be the absolute DBFS @@ -162,6 +294,12 @@ type ListDbfsRequest struct { Path types.String `tfsdk:"-"` } +func (newState *ListDbfsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListDbfsRequest) { +} + +func (newState *ListDbfsRequest) SyncEffectiveFieldsDuringRead(existingState ListDbfsRequest) { +} + // List directory contents type ListDirectoryContentsRequest struct { // The absolute path of a directory. @@ -188,6 +326,12 @@ type ListDirectoryContentsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListDirectoryContentsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListDirectoryContentsRequest) { +} + +func (newState *ListDirectoryContentsRequest) SyncEffectiveFieldsDuringRead(existingState ListDirectoryContentsRequest) { +} + type ListDirectoryResponse struct { // Array of DirectoryEntry. Contents []DirectoryEntry `tfsdk:"contents" tf:"optional"` @@ -195,20 +339,44 @@ type ListDirectoryResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListDirectoryResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListDirectoryResponse) { +} + +func (newState *ListDirectoryResponse) SyncEffectiveFieldsDuringRead(existingState ListDirectoryResponse) { +} + type ListStatusResponse struct { // A list of FileInfo's that describe contents of directory or file. See // example above. Files []FileInfo `tfsdk:"files" tf:"optional"` } +func (newState *ListStatusResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListStatusResponse) { +} + +func (newState *ListStatusResponse) SyncEffectiveFieldsDuringRead(existingState ListStatusResponse) { +} + type MkDirs struct { // The path of the new directory. The path should be the absolute DBFS path. Path types.String `tfsdk:"path" tf:""` } +func (newState *MkDirs) SyncEffectiveFieldsDuringCreateOrUpdate(plan MkDirs) { +} + +func (newState *MkDirs) SyncEffectiveFieldsDuringRead(existingState MkDirs) { +} + type MkDirsResponse struct { } +func (newState *MkDirsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan MkDirsResponse) { +} + +func (newState *MkDirsResponse) SyncEffectiveFieldsDuringRead(existingState MkDirsResponse) { +} + type Move struct { // The destination path of the file or directory. The path should be the // absolute DBFS path. @@ -218,9 +386,21 @@ type Move struct { SourcePath types.String `tfsdk:"source_path" tf:""` } +func (newState *Move) SyncEffectiveFieldsDuringCreateOrUpdate(plan Move) { +} + +func (newState *Move) SyncEffectiveFieldsDuringRead(existingState Move) { +} + type MoveResponse struct { } +func (newState *MoveResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan MoveResponse) { +} + +func (newState *MoveResponse) SyncEffectiveFieldsDuringRead(existingState MoveResponse) { +} + type Put struct { // This parameter might be absent, and instead a posted file will be used. Contents types.String `tfsdk:"contents" tf:"optional"` @@ -230,9 +410,21 @@ type Put struct { Path types.String `tfsdk:"path" tf:""` } +func (newState *Put) SyncEffectiveFieldsDuringCreateOrUpdate(plan Put) { +} + +func (newState *Put) SyncEffectiveFieldsDuringRead(existingState Put) { +} + type PutResponse struct { } +func (newState *PutResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan PutResponse) { +} + +func (newState *PutResponse) SyncEffectiveFieldsDuringRead(existingState PutResponse) { +} + // Get the contents of a file type ReadDbfsRequest struct { // The number of bytes to read starting from the offset. This has a limit of @@ -244,6 +436,12 @@ type ReadDbfsRequest struct { Path types.String `tfsdk:"-"` } +func (newState *ReadDbfsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ReadDbfsRequest) { +} + +func (newState *ReadDbfsRequest) SyncEffectiveFieldsDuringRead(existingState ReadDbfsRequest) { +} + type ReadResponse struct { // The number of bytes read (could be less than ``length`` if we hit end of // file). This refers to number of bytes read in unencoded version (response @@ -253,6 +451,12 @@ type ReadResponse struct { Data types.String `tfsdk:"data" tf:"optional"` } +func (newState *ReadResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ReadResponse) { +} + +func (newState *ReadResponse) SyncEffectiveFieldsDuringRead(existingState ReadResponse) { +} + // Upload a file type UploadRequest struct { Contents io.ReadCloser `tfsdk:"-"` @@ -262,5 +466,17 @@ type UploadRequest struct { Overwrite types.Bool `tfsdk:"-"` } +func (newState *UploadRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UploadRequest) { +} + +func (newState *UploadRequest) SyncEffectiveFieldsDuringRead(existingState UploadRequest) { +} + type UploadResponse struct { } + +func (newState *UploadResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UploadResponse) { +} + +func (newState *UploadResponse) SyncEffectiveFieldsDuringRead(existingState UploadResponse) { +} diff --git a/internal/service/iam_tf/model.go b/internal/service/iam_tf/model.go index 7eee548409..f4541fec02 100755 --- a/internal/service/iam_tf/model.go +++ b/internal/service/iam_tf/model.go @@ -25,6 +25,12 @@ type AccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *AccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan AccessControlRequest) { +} + +func (newState *AccessControlRequest) SyncEffectiveFieldsDuringRead(existingState AccessControlRequest) { +} + type AccessControlResponse struct { // All permissions. AllPermissions []Permission `tfsdk:"all_permissions" tf:"optional"` @@ -38,6 +44,12 @@ type AccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *AccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan AccessControlResponse) { +} + +func (newState *AccessControlResponse) SyncEffectiveFieldsDuringRead(existingState AccessControlResponse) { +} + type ComplexValue struct { Display types.String `tfsdk:"display" tf:"optional"` @@ -50,45 +62,93 @@ type ComplexValue struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *ComplexValue) SyncEffectiveFieldsDuringCreateOrUpdate(plan ComplexValue) { +} + +func (newState *ComplexValue) SyncEffectiveFieldsDuringRead(existingState ComplexValue) { +} + // Delete a group type DeleteAccountGroupRequest struct { // Unique ID for a group in the Databricks account. Id types.String `tfsdk:"-"` } +func (newState *DeleteAccountGroupRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAccountGroupRequest) { +} + +func (newState *DeleteAccountGroupRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAccountGroupRequest) { +} + // Delete a service principal type DeleteAccountServicePrincipalRequest struct { // Unique ID for a service principal in the Databricks account. Id types.String `tfsdk:"-"` } +func (newState *DeleteAccountServicePrincipalRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAccountServicePrincipalRequest) { +} + +func (newState *DeleteAccountServicePrincipalRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAccountServicePrincipalRequest) { +} + // Delete a user type DeleteAccountUserRequest struct { // Unique ID for a user in the Databricks account. Id types.String `tfsdk:"-"` } +func (newState *DeleteAccountUserRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAccountUserRequest) { +} + +func (newState *DeleteAccountUserRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAccountUserRequest) { +} + // Delete a group type DeleteGroupRequest struct { // Unique ID for a group in the Databricks workspace. Id types.String `tfsdk:"-"` } +func (newState *DeleteGroupRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteGroupRequest) { +} + +func (newState *DeleteGroupRequest) SyncEffectiveFieldsDuringRead(existingState DeleteGroupRequest) { +} + type DeleteResponse struct { } +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + // Delete a service principal type DeleteServicePrincipalRequest struct { // Unique ID for a service principal in the Databricks workspace. Id types.String `tfsdk:"-"` } +func (newState *DeleteServicePrincipalRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteServicePrincipalRequest) { +} + +func (newState *DeleteServicePrincipalRequest) SyncEffectiveFieldsDuringRead(existingState DeleteServicePrincipalRequest) { +} + // Delete a user type DeleteUserRequest struct { // Unique ID for a user in the Databricks workspace. Id types.String `tfsdk:"-"` } +func (newState *DeleteUserRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteUserRequest) { +} + +func (newState *DeleteUserRequest) SyncEffectiveFieldsDuringRead(existingState DeleteUserRequest) { +} + // Delete permissions assignment type DeleteWorkspaceAssignmentRequest struct { // The ID of the user, service principal, or group. @@ -97,21 +157,45 @@ type DeleteWorkspaceAssignmentRequest struct { WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *DeleteWorkspaceAssignmentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteWorkspaceAssignmentRequest) { +} + +func (newState *DeleteWorkspaceAssignmentRequest) SyncEffectiveFieldsDuringRead(existingState DeleteWorkspaceAssignmentRequest) { +} + type DeleteWorkspacePermissionAssignmentResponse struct { } +func (newState *DeleteWorkspacePermissionAssignmentResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteWorkspacePermissionAssignmentResponse) { +} + +func (newState *DeleteWorkspacePermissionAssignmentResponse) SyncEffectiveFieldsDuringRead(existingState DeleteWorkspacePermissionAssignmentResponse) { +} + // Get group details type GetAccountGroupRequest struct { // Unique ID for a group in the Databricks account. Id types.String `tfsdk:"-"` } +func (newState *GetAccountGroupRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAccountGroupRequest) { +} + +func (newState *GetAccountGroupRequest) SyncEffectiveFieldsDuringRead(existingState GetAccountGroupRequest) { +} + // Get service principal details type GetAccountServicePrincipalRequest struct { // Unique ID for a service principal in the Databricks account. Id types.String `tfsdk:"-"` } +func (newState *GetAccountServicePrincipalRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAccountServicePrincipalRequest) { +} + +func (newState *GetAccountServicePrincipalRequest) SyncEffectiveFieldsDuringRead(existingState GetAccountServicePrincipalRequest) { +} + // Get user details type GetAccountUserRequest struct { // Comma-separated list of attributes to return in response. @@ -139,27 +223,57 @@ type GetAccountUserRequest struct { StartIndex types.Int64 `tfsdk:"-"` } +func (newState *GetAccountUserRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAccountUserRequest) { +} + +func (newState *GetAccountUserRequest) SyncEffectiveFieldsDuringRead(existingState GetAccountUserRequest) { +} + // Get assignable roles for a resource type GetAssignableRolesForResourceRequest struct { // The resource name for which assignable roles will be listed. Resource types.String `tfsdk:"-"` } +func (newState *GetAssignableRolesForResourceRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAssignableRolesForResourceRequest) { +} + +func (newState *GetAssignableRolesForResourceRequest) SyncEffectiveFieldsDuringRead(existingState GetAssignableRolesForResourceRequest) { +} + type GetAssignableRolesForResourceResponse struct { Roles []Role `tfsdk:"roles" tf:"optional"` } +func (newState *GetAssignableRolesForResourceResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAssignableRolesForResourceResponse) { +} + +func (newState *GetAssignableRolesForResourceResponse) SyncEffectiveFieldsDuringRead(existingState GetAssignableRolesForResourceResponse) { +} + // Get group details type GetGroupRequest struct { // Unique ID for a group in the Databricks workspace. Id types.String `tfsdk:"-"` } +func (newState *GetGroupRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetGroupRequest) { +} + +func (newState *GetGroupRequest) SyncEffectiveFieldsDuringRead(existingState GetGroupRequest) { +} + type GetPasswordPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []PasswordPermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetPasswordPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPasswordPermissionLevelsResponse) { +} + +func (newState *GetPasswordPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetPasswordPermissionLevelsResponse) { +} + // Get object permission levels type GetPermissionLevelsRequest struct { // @@ -168,11 +282,23 @@ type GetPermissionLevelsRequest struct { RequestObjectType types.String `tfsdk:"-"` } +func (newState *GetPermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPermissionLevelsRequest) { +} + +func (newState *GetPermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetPermissionLevelsRequest) { +} + type GetPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []PermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPermissionLevelsResponse) { +} + +func (newState *GetPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetPermissionLevelsResponse) { +} + // Get object permissions type GetPermissionRequest struct { // The id of the request object. @@ -185,6 +311,12 @@ type GetPermissionRequest struct { RequestObjectType types.String `tfsdk:"-"` } +func (newState *GetPermissionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPermissionRequest) { +} + +func (newState *GetPermissionRequest) SyncEffectiveFieldsDuringRead(existingState GetPermissionRequest) { +} + // Get a rule set type GetRuleSetRequest struct { // Etag used for versioning. The response is at least as fresh as the eTag @@ -200,12 +332,24 @@ type GetRuleSetRequest struct { Name types.String `tfsdk:"-"` } +func (newState *GetRuleSetRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRuleSetRequest) { +} + +func (newState *GetRuleSetRequest) SyncEffectiveFieldsDuringRead(existingState GetRuleSetRequest) { +} + // Get service principal details type GetServicePrincipalRequest struct { // Unique ID for a service principal in the Databricks workspace. Id types.String `tfsdk:"-"` } +func (newState *GetServicePrincipalRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetServicePrincipalRequest) { +} + +func (newState *GetServicePrincipalRequest) SyncEffectiveFieldsDuringRead(existingState GetServicePrincipalRequest) { +} + // Get user details type GetUserRequest struct { // Comma-separated list of attributes to return in response. @@ -233,12 +377,24 @@ type GetUserRequest struct { StartIndex types.Int64 `tfsdk:"-"` } +func (newState *GetUserRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetUserRequest) { +} + +func (newState *GetUserRequest) SyncEffectiveFieldsDuringRead(existingState GetUserRequest) { +} + // List workspace permissions type GetWorkspaceAssignmentRequest struct { // The workspace ID. WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *GetWorkspaceAssignmentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetWorkspaceAssignmentRequest) { +} + +func (newState *GetWorkspaceAssignmentRequest) SyncEffectiveFieldsDuringRead(existingState GetWorkspaceAssignmentRequest) { +} + type GrantRule struct { // Principals this grant rule applies to. Principals []types.String `tfsdk:"principals" tf:"optional"` @@ -246,6 +402,12 @@ type GrantRule struct { Role types.String `tfsdk:"role" tf:""` } +func (newState *GrantRule) SyncEffectiveFieldsDuringCreateOrUpdate(plan GrantRule) { +} + +func (newState *GrantRule) SyncEffectiveFieldsDuringRead(existingState GrantRule) { +} + type Group struct { // String that represents a human-readable group name DisplayName types.String `tfsdk:"displayName" tf:"optional"` @@ -270,6 +432,12 @@ type Group struct { Schemas []types.String `tfsdk:"schemas" tf:"optional"` } +func (newState *Group) SyncEffectiveFieldsDuringCreateOrUpdate(plan Group) { +} + +func (newState *Group) SyncEffectiveFieldsDuringRead(existingState Group) { +} + // List group details type ListAccountGroupsRequest struct { // Comma-separated list of attributes to return in response. @@ -294,6 +462,12 @@ type ListAccountGroupsRequest struct { StartIndex types.Int64 `tfsdk:"-"` } +func (newState *ListAccountGroupsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAccountGroupsRequest) { +} + +func (newState *ListAccountGroupsRequest) SyncEffectiveFieldsDuringRead(existingState ListAccountGroupsRequest) { +} + // List service principals type ListAccountServicePrincipalsRequest struct { // Comma-separated list of attributes to return in response. @@ -318,6 +492,12 @@ type ListAccountServicePrincipalsRequest struct { StartIndex types.Int64 `tfsdk:"-"` } +func (newState *ListAccountServicePrincipalsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAccountServicePrincipalsRequest) { +} + +func (newState *ListAccountServicePrincipalsRequest) SyncEffectiveFieldsDuringRead(existingState ListAccountServicePrincipalsRequest) { +} + // List users type ListAccountUsersRequest struct { // Comma-separated list of attributes to return in response. @@ -343,6 +523,12 @@ type ListAccountUsersRequest struct { StartIndex types.Int64 `tfsdk:"-"` } +func (newState *ListAccountUsersRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAccountUsersRequest) { +} + +func (newState *ListAccountUsersRequest) SyncEffectiveFieldsDuringRead(existingState ListAccountUsersRequest) { +} + // List group details type ListGroupsRequest struct { // Comma-separated list of attributes to return in response. @@ -367,6 +553,12 @@ type ListGroupsRequest struct { StartIndex types.Int64 `tfsdk:"-"` } +func (newState *ListGroupsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListGroupsRequest) { +} + +func (newState *ListGroupsRequest) SyncEffectiveFieldsDuringRead(existingState ListGroupsRequest) { +} + type ListGroupsResponse struct { // Total results returned in the response. ItemsPerPage types.Int64 `tfsdk:"itemsPerPage" tf:"optional"` @@ -381,6 +573,12 @@ type ListGroupsResponse struct { TotalResults types.Int64 `tfsdk:"totalResults" tf:"optional"` } +func (newState *ListGroupsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListGroupsResponse) { +} + +func (newState *ListGroupsResponse) SyncEffectiveFieldsDuringRead(existingState ListGroupsResponse) { +} + type ListServicePrincipalResponse struct { // Total results returned in the response. ItemsPerPage types.Int64 `tfsdk:"itemsPerPage" tf:"optional"` @@ -395,6 +593,12 @@ type ListServicePrincipalResponse struct { TotalResults types.Int64 `tfsdk:"totalResults" tf:"optional"` } +func (newState *ListServicePrincipalResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListServicePrincipalResponse) { +} + +func (newState *ListServicePrincipalResponse) SyncEffectiveFieldsDuringRead(existingState ListServicePrincipalResponse) { +} + // List service principals type ListServicePrincipalsRequest struct { // Comma-separated list of attributes to return in response. @@ -419,6 +623,12 @@ type ListServicePrincipalsRequest struct { StartIndex types.Int64 `tfsdk:"-"` } +func (newState *ListServicePrincipalsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListServicePrincipalsRequest) { +} + +func (newState *ListServicePrincipalsRequest) SyncEffectiveFieldsDuringRead(existingState ListServicePrincipalsRequest) { +} + // List users type ListUsersRequest struct { // Comma-separated list of attributes to return in response. @@ -444,6 +654,12 @@ type ListUsersRequest struct { StartIndex types.Int64 `tfsdk:"-"` } +func (newState *ListUsersRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListUsersRequest) { +} + +func (newState *ListUsersRequest) SyncEffectiveFieldsDuringRead(existingState ListUsersRequest) { +} + type ListUsersResponse struct { // Total results returned in the response. ItemsPerPage types.Int64 `tfsdk:"itemsPerPage" tf:"optional"` @@ -458,12 +674,24 @@ type ListUsersResponse struct { TotalResults types.Int64 `tfsdk:"totalResults" tf:"optional"` } +func (newState *ListUsersResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListUsersResponse) { +} + +func (newState *ListUsersResponse) SyncEffectiveFieldsDuringRead(existingState ListUsersResponse) { +} + // Get permission assignments type ListWorkspaceAssignmentRequest struct { // The workspace ID for the account. WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *ListWorkspaceAssignmentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListWorkspaceAssignmentRequest) { +} + +func (newState *ListWorkspaceAssignmentRequest) SyncEffectiveFieldsDuringRead(existingState ListWorkspaceAssignmentRequest) { +} + type MigratePermissionsRequest struct { // The name of the workspace group that permissions will be migrated from. FromWorkspaceGroupName types.String `tfsdk:"from_workspace_group_name" tf:""` @@ -476,11 +704,23 @@ type MigratePermissionsRequest struct { WorkspaceId types.Int64 `tfsdk:"workspace_id" tf:""` } +func (newState *MigratePermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan MigratePermissionsRequest) { +} + +func (newState *MigratePermissionsRequest) SyncEffectiveFieldsDuringRead(existingState MigratePermissionsRequest) { +} + type MigratePermissionsResponse struct { // Number of permissions migrated. PermissionsMigrated types.Int64 `tfsdk:"permissions_migrated" tf:"optional"` } +func (newState *MigratePermissionsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan MigratePermissionsResponse) { +} + +func (newState *MigratePermissionsResponse) SyncEffectiveFieldsDuringRead(existingState MigratePermissionsResponse) { +} + type Name struct { // Family name of the Databricks user. FamilyName types.String `tfsdk:"familyName" tf:"optional"` @@ -488,6 +728,12 @@ type Name struct { GivenName types.String `tfsdk:"givenName" tf:"optional"` } +func (newState *Name) SyncEffectiveFieldsDuringCreateOrUpdate(plan Name) { +} + +func (newState *Name) SyncEffectiveFieldsDuringRead(existingState Name) { +} + type ObjectPermissions struct { AccessControlList []AccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -496,6 +742,12 @@ type ObjectPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *ObjectPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan ObjectPermissions) { +} + +func (newState *ObjectPermissions) SyncEffectiveFieldsDuringRead(existingState ObjectPermissions) { +} + type PartialUpdate struct { // Unique ID for a user in the Databricks workspace. Id types.String `tfsdk:"-"` @@ -506,6 +758,12 @@ type PartialUpdate struct { Schemas []types.String `tfsdk:"schemas" tf:"optional"` } +func (newState *PartialUpdate) SyncEffectiveFieldsDuringCreateOrUpdate(plan PartialUpdate) { +} + +func (newState *PartialUpdate) SyncEffectiveFieldsDuringRead(existingState PartialUpdate) { +} + type PasswordAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -517,6 +775,12 @@ type PasswordAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *PasswordAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan PasswordAccessControlRequest) { +} + +func (newState *PasswordAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState PasswordAccessControlRequest) { +} + type PasswordAccessControlResponse struct { // All permissions. AllPermissions []PasswordPermission `tfsdk:"all_permissions" tf:"optional"` @@ -530,6 +794,12 @@ type PasswordAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *PasswordAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan PasswordAccessControlResponse) { +} + +func (newState *PasswordAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState PasswordAccessControlResponse) { +} + type PasswordPermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -538,6 +808,12 @@ type PasswordPermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *PasswordPermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan PasswordPermission) { +} + +func (newState *PasswordPermission) SyncEffectiveFieldsDuringRead(existingState PasswordPermission) { +} + type PasswordPermissions struct { AccessControlList []PasswordAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -546,16 +822,34 @@ type PasswordPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *PasswordPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan PasswordPermissions) { +} + +func (newState *PasswordPermissions) SyncEffectiveFieldsDuringRead(existingState PasswordPermissions) { +} + type PasswordPermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *PasswordPermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan PasswordPermissionsDescription) { +} + +func (newState *PasswordPermissionsDescription) SyncEffectiveFieldsDuringRead(existingState PasswordPermissionsDescription) { +} + type PasswordPermissionsRequest struct { AccessControlList []PasswordAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` } +func (newState *PasswordPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan PasswordPermissionsRequest) { +} + +func (newState *PasswordPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState PasswordPermissionsRequest) { +} + type Patch struct { // Type of patch operation. Op types.String `tfsdk:"op" tf:"optional"` @@ -565,9 +859,21 @@ type Patch struct { Value any `tfsdk:"value" tf:"optional"` } +func (newState *Patch) SyncEffectiveFieldsDuringCreateOrUpdate(plan Patch) { +} + +func (newState *Patch) SyncEffectiveFieldsDuringRead(existingState Patch) { +} + type PatchResponse struct { } +func (newState *PatchResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan PatchResponse) { +} + +func (newState *PatchResponse) SyncEffectiveFieldsDuringRead(existingState PatchResponse) { +} + type Permission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -576,6 +882,12 @@ type Permission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *Permission) SyncEffectiveFieldsDuringCreateOrUpdate(plan Permission) { +} + +func (newState *Permission) SyncEffectiveFieldsDuringRead(existingState Permission) { +} + // The output format for existing workspace PermissionAssignment records, which // contains some info for user consumption. type PermissionAssignment struct { @@ -587,11 +899,23 @@ type PermissionAssignment struct { Principal []PrincipalOutput `tfsdk:"principal" tf:"optional,object"` } +func (newState *PermissionAssignment) SyncEffectiveFieldsDuringCreateOrUpdate(plan PermissionAssignment) { +} + +func (newState *PermissionAssignment) SyncEffectiveFieldsDuringRead(existingState PermissionAssignment) { +} + type PermissionAssignments struct { // Array of permissions assignments defined for a workspace. PermissionAssignments []PermissionAssignment `tfsdk:"permission_assignments" tf:"optional"` } +func (newState *PermissionAssignments) SyncEffectiveFieldsDuringCreateOrUpdate(plan PermissionAssignments) { +} + +func (newState *PermissionAssignments) SyncEffectiveFieldsDuringRead(existingState PermissionAssignments) { +} + type PermissionOutput struct { // The results of a permissions query. Description types.String `tfsdk:"description" tf:"optional"` @@ -599,12 +923,24 @@ type PermissionOutput struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *PermissionOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan PermissionOutput) { +} + +func (newState *PermissionOutput) SyncEffectiveFieldsDuringRead(existingState PermissionOutput) { +} + type PermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *PermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan PermissionsDescription) { +} + +func (newState *PermissionsDescription) SyncEffectiveFieldsDuringRead(existingState PermissionsDescription) { +} + type PermissionsRequest struct { AccessControlList []AccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The id of the request object. @@ -617,6 +953,12 @@ type PermissionsRequest struct { RequestObjectType types.String `tfsdk:"-"` } +func (newState *PermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan PermissionsRequest) { +} + +func (newState *PermissionsRequest) SyncEffectiveFieldsDuringRead(existingState PermissionsRequest) { +} + // Information about the principal assigned to the workspace. type PrincipalOutput struct { // The display name of the principal. @@ -632,17 +974,35 @@ type PrincipalOutput struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *PrincipalOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan PrincipalOutput) { +} + +func (newState *PrincipalOutput) SyncEffectiveFieldsDuringRead(existingState PrincipalOutput) { +} + type ResourceMeta struct { // Identifier for group type. Can be local workspace group // (`WorkspaceGroup`) or account group (`Group`). ResourceType types.String `tfsdk:"resourceType" tf:"optional"` } +func (newState *ResourceMeta) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResourceMeta) { +} + +func (newState *ResourceMeta) SyncEffectiveFieldsDuringRead(existingState ResourceMeta) { +} + type Role struct { // Role to assign to a principal or a list of principals on a resource. Name types.String `tfsdk:"name" tf:""` } +func (newState *Role) SyncEffectiveFieldsDuringCreateOrUpdate(plan Role) { +} + +func (newState *Role) SyncEffectiveFieldsDuringRead(existingState Role) { +} + type RuleSetResponse struct { // Identifies the version of the rule set returned. Etag types.String `tfsdk:"etag" tf:"optional"` @@ -652,6 +1012,12 @@ type RuleSetResponse struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *RuleSetResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RuleSetResponse) { +} + +func (newState *RuleSetResponse) SyncEffectiveFieldsDuringRead(existingState RuleSetResponse) { +} + type RuleSetUpdateRequest struct { // The expected etag of the rule set to update. The update will fail if the // value does not match the value that is stored in account access control @@ -663,6 +1029,12 @@ type RuleSetUpdateRequest struct { Name types.String `tfsdk:"name" tf:""` } +func (newState *RuleSetUpdateRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RuleSetUpdateRequest) { +} + +func (newState *RuleSetUpdateRequest) SyncEffectiveFieldsDuringRead(existingState RuleSetUpdateRequest) { +} + type ServicePrincipal struct { // If this user is active Active types.Bool `tfsdk:"active" tf:"optional"` @@ -687,9 +1059,21 @@ type ServicePrincipal struct { Schemas []types.String `tfsdk:"schemas" tf:"optional"` } +func (newState *ServicePrincipal) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServicePrincipal) { +} + +func (newState *ServicePrincipal) SyncEffectiveFieldsDuringRead(existingState ServicePrincipal) { +} + type UpdateResponse struct { } +func (newState *UpdateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateResponse) { +} + +func (newState *UpdateResponse) SyncEffectiveFieldsDuringRead(existingState UpdateResponse) { +} + type UpdateRuleSetRequest struct { // Name of the rule set. Name types.String `tfsdk:"name" tf:""` @@ -697,6 +1081,12 @@ type UpdateRuleSetRequest struct { RuleSet []RuleSetUpdateRequest `tfsdk:"rule_set" tf:"object"` } +func (newState *UpdateRuleSetRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateRuleSetRequest) { +} + +func (newState *UpdateRuleSetRequest) SyncEffectiveFieldsDuringRead(existingState UpdateRuleSetRequest) { +} + type UpdateWorkspaceAssignments struct { // Array of permissions assignments to update on the workspace. Valid values // are "USER" and "ADMIN" (case-sensitive). If both "USER" and "ADMIN" are @@ -711,6 +1101,12 @@ type UpdateWorkspaceAssignments struct { WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *UpdateWorkspaceAssignments) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateWorkspaceAssignments) { +} + +func (newState *UpdateWorkspaceAssignments) SyncEffectiveFieldsDuringRead(existingState UpdateWorkspaceAssignments) { +} + type User struct { // If this user is active Active types.Bool `tfsdk:"active" tf:"optional"` @@ -745,7 +1141,19 @@ type User struct { UserName types.String `tfsdk:"userName" tf:"optional"` } +func (newState *User) SyncEffectiveFieldsDuringCreateOrUpdate(plan User) { +} + +func (newState *User) SyncEffectiveFieldsDuringRead(existingState User) { +} + type WorkspacePermissions struct { // Array of permissions defined for a workspace. Permissions []PermissionOutput `tfsdk:"permissions" tf:"optional"` } + +func (newState *WorkspacePermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan WorkspacePermissions) { +} + +func (newState *WorkspacePermissions) SyncEffectiveFieldsDuringRead(existingState WorkspacePermissions) { +} diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index d2544ac0d0..35f110fbe2 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -22,6 +22,14 @@ type BaseJob struct { // The creator user name. This field won’t be included in the response if // the user has already been deleted. CreatorUserName types.String `tfsdk:"creator_user_name" tf:"optional"` + // The id of the budget policy used by this job for cost attribution + // purposes. This may be set through (in order of precedence): 1. Budget + // admins through the account or workspace console 2. Jobs UI in the job + // details page and Jobs API using `budget_policy_id` 3. Inferred default + // based on accessible budget policies of the run_as identity on job + // creation or modification. + EffectiveBudgetPolicyId types.String `tfsdk:"effective_budget_policy_id" tf:"optional"` + EffectiveEffectiveBudgetPolicyId types.String `tfsdk:"effective_effective_budget_policy_id" tf:"computed,optional"` // The canonical identifier for this job. JobId types.Int64 `tfsdk:"job_id" tf:"optional"` // Settings for this job and all of its runs. These settings can be updated @@ -29,6 +37,17 @@ type BaseJob struct { Settings []JobSettings `tfsdk:"settings" tf:"optional,object"` } +func (newState *BaseJob) SyncEffectiveFieldsDuringCreateOrUpdate(plan BaseJob) { + newState.EffectiveEffectiveBudgetPolicyId = newState.EffectiveBudgetPolicyId + newState.EffectiveBudgetPolicyId = plan.EffectiveBudgetPolicyId +} + +func (newState *BaseJob) SyncEffectiveFieldsDuringRead(existingState BaseJob) { + if existingState.EffectiveEffectiveBudgetPolicyId.ValueString() == newState.EffectiveBudgetPolicyId.ValueString() { + newState.EffectiveBudgetPolicyId = existingState.EffectiveBudgetPolicyId + } +} + type BaseRun struct { // The sequence number of this run attempt for a triggered job run. The // initial attempt of a run has an attempt_number of 0. If the initial run @@ -159,6 +178,12 @@ type BaseRun struct { TriggerInfo []TriggerInfo `tfsdk:"trigger_info" tf:"optional,object"` } +func (newState *BaseRun) SyncEffectiveFieldsDuringCreateOrUpdate(plan BaseRun) { +} + +func (newState *BaseRun) SyncEffectiveFieldsDuringRead(existingState BaseRun) { +} + type CancelAllRuns struct { // Optional boolean parameter to cancel all queued runs. If no job_id is // provided, all queued runs in the workspace are canceled. @@ -167,17 +192,41 @@ type CancelAllRuns struct { JobId types.Int64 `tfsdk:"job_id" tf:"optional"` } +func (newState *CancelAllRuns) SyncEffectiveFieldsDuringCreateOrUpdate(plan CancelAllRuns) { +} + +func (newState *CancelAllRuns) SyncEffectiveFieldsDuringRead(existingState CancelAllRuns) { +} + type CancelAllRunsResponse struct { } +func (newState *CancelAllRunsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CancelAllRunsResponse) { +} + +func (newState *CancelAllRunsResponse) SyncEffectiveFieldsDuringRead(existingState CancelAllRunsResponse) { +} + type CancelRun struct { // This field is required. RunId types.Int64 `tfsdk:"run_id" tf:""` } +func (newState *CancelRun) SyncEffectiveFieldsDuringCreateOrUpdate(plan CancelRun) { +} + +func (newState *CancelRun) SyncEffectiveFieldsDuringRead(existingState CancelRun) { +} + type CancelRunResponse struct { } +func (newState *CancelRunResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CancelRunResponse) { +} + +func (newState *CancelRunResponse) SyncEffectiveFieldsDuringRead(existingState CancelRunResponse) { +} + type ClusterInstance struct { // The canonical identifier for the cluster used by a run. This field is // always available for runs on existing clusters. For runs on new clusters, @@ -199,6 +248,12 @@ type ClusterInstance struct { SparkContextId types.String `tfsdk:"spark_context_id" tf:"optional"` } +func (newState *ClusterInstance) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterInstance) { +} + +func (newState *ClusterInstance) SyncEffectiveFieldsDuringRead(existingState ClusterInstance) { +} + type ClusterSpec struct { // If existing_cluster_id, the ID of an existing cluster that is used for // all runs. When running jobs or tasks on an existing cluster, you may need @@ -216,6 +271,12 @@ type ClusterSpec struct { NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` } +func (newState *ClusterSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterSpec) { +} + +func (newState *ClusterSpec) SyncEffectiveFieldsDuringRead(existingState ClusterSpec) { +} + type ConditionTask struct { // The left operand of the condition task. Can be either a string value or a // job state or parameter reference. @@ -236,15 +297,32 @@ type ConditionTask struct { Right types.String `tfsdk:"right" tf:""` } +func (newState *ConditionTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan ConditionTask) { +} + +func (newState *ConditionTask) SyncEffectiveFieldsDuringRead(existingState ConditionTask) { +} + type Continuous struct { // Indicate whether the continuous execution of the job is paused or not. // Defaults to UNPAUSED. PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` } +func (newState *Continuous) SyncEffectiveFieldsDuringCreateOrUpdate(plan Continuous) { +} + +func (newState *Continuous) SyncEffectiveFieldsDuringRead(existingState Continuous) { +} + type CreateJob struct { // List of permissions to set on the job. AccessControlList []JobAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` + // The id of the user specified budget policy to use for this job. If not + // specified, a default budget policy may be applied when creating or + // modifying the job. See `effective_budget_policy_id` for the budget policy + // used by this workload. + BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` // An optional continuous property for this job. The continuous property // will ensure that there is always one run executing. Only one of // `schedule` and `continuous` can be used. @@ -343,12 +421,24 @@ type CreateJob struct { WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } +func (newState *CreateJob) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateJob) { +} + +func (newState *CreateJob) SyncEffectiveFieldsDuringRead(existingState CreateJob) { +} + // Job was created successfully type CreateResponse struct { // The canonical identifier for the newly created job. JobId types.Int64 `tfsdk:"job_id" tf:"optional"` } +func (newState *CreateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateResponse) { +} + +func (newState *CreateResponse) SyncEffectiveFieldsDuringRead(existingState CreateResponse) { +} + type CronSchedule struct { // Indicate whether this schedule is paused or not. PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` @@ -364,6 +454,12 @@ type CronSchedule struct { TimezoneId types.String `tfsdk:"timezone_id" tf:""` } +func (newState *CronSchedule) SyncEffectiveFieldsDuringCreateOrUpdate(plan CronSchedule) { +} + +func (newState *CronSchedule) SyncEffectiveFieldsDuringRead(existingState CronSchedule) { +} + type DbtOutput struct { // An optional map of headers to send when retrieving the artifact from the // `artifacts_link`. @@ -374,6 +470,12 @@ type DbtOutput struct { ArtifactsLink types.String `tfsdk:"artifacts_link" tf:"optional"` } +func (newState *DbtOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan DbtOutput) { +} + +func (newState *DbtOutput) SyncEffectiveFieldsDuringRead(existingState DbtOutput) { +} + type DbtTask struct { // Optional name of the catalog to use. The value is the top level in the // 3-level namespace of Unity Catalog (catalog / schema / relation). The @@ -411,22 +513,52 @@ type DbtTask struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *DbtTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan DbtTask) { +} + +func (newState *DbtTask) SyncEffectiveFieldsDuringRead(existingState DbtTask) { +} + type DeleteJob struct { // The canonical identifier of the job to delete. This field is required. JobId types.Int64 `tfsdk:"job_id" tf:""` } +func (newState *DeleteJob) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteJob) { +} + +func (newState *DeleteJob) SyncEffectiveFieldsDuringRead(existingState DeleteJob) { +} + type DeleteResponse struct { } +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + type DeleteRun struct { // ID of the run to delete. RunId types.Int64 `tfsdk:"run_id" tf:""` } +func (newState *DeleteRun) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteRun) { +} + +func (newState *DeleteRun) SyncEffectiveFieldsDuringRead(existingState DeleteRun) { +} + type DeleteRunResponse struct { } +func (newState *DeleteRunResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteRunResponse) { +} + +func (newState *DeleteRunResponse) SyncEffectiveFieldsDuringRead(existingState DeleteRunResponse) { +} + // Represents a change to the job cluster's settings that would be required for // the job clusters to become compliant with their policies. type EnforcePolicyComplianceForJobResponseJobClusterSettingsChange struct { @@ -445,6 +577,12 @@ type EnforcePolicyComplianceForJobResponseJobClusterSettingsChange struct { PreviousValue types.String `tfsdk:"previous_value" tf:"optional"` } +func (newState *EnforcePolicyComplianceForJobResponseJobClusterSettingsChange) SyncEffectiveFieldsDuringCreateOrUpdate(plan EnforcePolicyComplianceForJobResponseJobClusterSettingsChange) { +} + +func (newState *EnforcePolicyComplianceForJobResponseJobClusterSettingsChange) SyncEffectiveFieldsDuringRead(existingState EnforcePolicyComplianceForJobResponseJobClusterSettingsChange) { +} + type EnforcePolicyComplianceRequest struct { // The ID of the job you want to enforce policy compliance on. JobId types.Int64 `tfsdk:"job_id" tf:""` @@ -453,6 +591,12 @@ type EnforcePolicyComplianceRequest struct { ValidateOnly types.Bool `tfsdk:"validate_only" tf:"optional"` } +func (newState *EnforcePolicyComplianceRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan EnforcePolicyComplianceRequest) { +} + +func (newState *EnforcePolicyComplianceRequest) SyncEffectiveFieldsDuringRead(existingState EnforcePolicyComplianceRequest) { +} + type EnforcePolicyComplianceResponse struct { // Whether any changes have been made to the job cluster settings for the // job to become compliant with its policies. @@ -470,6 +614,12 @@ type EnforcePolicyComplianceResponse struct { Settings []JobSettings `tfsdk:"settings" tf:"optional,object"` } +func (newState *EnforcePolicyComplianceResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan EnforcePolicyComplianceResponse) { +} + +func (newState *EnforcePolicyComplianceResponse) SyncEffectiveFieldsDuringRead(existingState EnforcePolicyComplianceResponse) { +} + // Run was exported successfully. type ExportRunOutput struct { // The exported content in HTML format (one for every view item). To extract @@ -480,6 +630,12 @@ type ExportRunOutput struct { Views []ViewItem `tfsdk:"views" tf:"optional"` } +func (newState *ExportRunOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExportRunOutput) { +} + +func (newState *ExportRunOutput) SyncEffectiveFieldsDuringRead(existingState ExportRunOutput) { +} + // Export and retrieve a job run type ExportRunRequest struct { // The canonical identifier for the run. This field is required. @@ -488,6 +644,12 @@ type ExportRunRequest struct { ViewsToExport types.String `tfsdk:"-"` } +func (newState *ExportRunRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExportRunRequest) { +} + +func (newState *ExportRunRequest) SyncEffectiveFieldsDuringRead(existingState ExportRunRequest) { +} + type FileArrivalTriggerConfiguration struct { // If set, the trigger starts a run only after the specified amount of time // passed since the last time the trigger fired. The minimum allowed value @@ -503,6 +665,12 @@ type FileArrivalTriggerConfiguration struct { WaitAfterLastChangeSeconds types.Int64 `tfsdk:"wait_after_last_change_seconds" tf:"optional"` } +func (newState *FileArrivalTriggerConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(plan FileArrivalTriggerConfiguration) { +} + +func (newState *FileArrivalTriggerConfiguration) SyncEffectiveFieldsDuringRead(existingState FileArrivalTriggerConfiguration) { +} + type ForEachStats struct { // Sample of 3 most common error messages occurred during the iteration. ErrorMessageStats []ForEachTaskErrorMessageStats `tfsdk:"error_message_stats" tf:"optional"` @@ -510,6 +678,12 @@ type ForEachStats struct { TaskRunStats []ForEachTaskTaskRunStats `tfsdk:"task_run_stats" tf:"optional,object"` } +func (newState *ForEachStats) SyncEffectiveFieldsDuringCreateOrUpdate(plan ForEachStats) { +} + +func (newState *ForEachStats) SyncEffectiveFieldsDuringRead(existingState ForEachStats) { +} + type ForEachTask struct { // An optional maximum allowed number of concurrent runs of the task. Set // this value if you want to be able to execute multiple runs of the task @@ -522,6 +696,12 @@ type ForEachTask struct { Task []Task `tfsdk:"task" tf:"object"` } +func (newState *ForEachTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan ForEachTask) { +} + +func (newState *ForEachTask) SyncEffectiveFieldsDuringRead(existingState ForEachTask) { +} + type ForEachTaskErrorMessageStats struct { // Describes the count of such error message encountered during the // iterations. @@ -532,6 +712,12 @@ type ForEachTaskErrorMessageStats struct { TerminationCategory types.String `tfsdk:"termination_category" tf:"optional"` } +func (newState *ForEachTaskErrorMessageStats) SyncEffectiveFieldsDuringCreateOrUpdate(plan ForEachTaskErrorMessageStats) { +} + +func (newState *ForEachTaskErrorMessageStats) SyncEffectiveFieldsDuringRead(existingState ForEachTaskErrorMessageStats) { +} + type ForEachTaskTaskRunStats struct { // Describes the iteration runs having an active lifecycle state or an // active run sub state. @@ -548,23 +734,47 @@ type ForEachTaskTaskRunStats struct { TotalIterations types.Int64 `tfsdk:"total_iterations" tf:"optional"` } +func (newState *ForEachTaskTaskRunStats) SyncEffectiveFieldsDuringCreateOrUpdate(plan ForEachTaskTaskRunStats) { +} + +func (newState *ForEachTaskTaskRunStats) SyncEffectiveFieldsDuringRead(existingState ForEachTaskTaskRunStats) { +} + // Get job permission levels type GetJobPermissionLevelsRequest struct { // The job for which to get or manage permissions. JobId types.String `tfsdk:"-"` } +func (newState *GetJobPermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetJobPermissionLevelsRequest) { +} + +func (newState *GetJobPermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetJobPermissionLevelsRequest) { +} + type GetJobPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []JobPermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetJobPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetJobPermissionLevelsResponse) { +} + +func (newState *GetJobPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetJobPermissionLevelsResponse) { +} + // Get job permissions type GetJobPermissionsRequest struct { // The job for which to get or manage permissions. JobId types.String `tfsdk:"-"` } +func (newState *GetJobPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetJobPermissionsRequest) { +} + +func (newState *GetJobPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState GetJobPermissionsRequest) { +} + // Get a single job type GetJobRequest struct { // The canonical identifier of the job to retrieve information about. This @@ -572,12 +782,24 @@ type GetJobRequest struct { JobId types.Int64 `tfsdk:"-"` } +func (newState *GetJobRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetJobRequest) { +} + +func (newState *GetJobRequest) SyncEffectiveFieldsDuringRead(existingState GetJobRequest) { +} + // Get job policy compliance type GetPolicyComplianceRequest struct { // The ID of the job whose compliance status you are requesting. JobId types.Int64 `tfsdk:"-"` } +func (newState *GetPolicyComplianceRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPolicyComplianceRequest) { +} + +func (newState *GetPolicyComplianceRequest) SyncEffectiveFieldsDuringRead(existingState GetPolicyComplianceRequest) { +} + type GetPolicyComplianceResponse struct { // Whether the job is compliant with its policies or not. Jobs could be out // of compliance if a policy they are using was updated after the job was @@ -592,12 +814,24 @@ type GetPolicyComplianceResponse struct { Violations map[string]types.String `tfsdk:"violations" tf:"optional"` } +func (newState *GetPolicyComplianceResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPolicyComplianceResponse) { +} + +func (newState *GetPolicyComplianceResponse) SyncEffectiveFieldsDuringRead(existingState GetPolicyComplianceResponse) { +} + // Get the output for a single run type GetRunOutputRequest struct { // The canonical identifier for the run. RunId types.Int64 `tfsdk:"-"` } +func (newState *GetRunOutputRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRunOutputRequest) { +} + +func (newState *GetRunOutputRequest) SyncEffectiveFieldsDuringRead(existingState GetRunOutputRequest) { +} + // Get a single job run type GetRunRequest struct { // Whether to include the repair history in the response. @@ -613,6 +847,12 @@ type GetRunRequest struct { RunId types.Int64 `tfsdk:"-"` } +func (newState *GetRunRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRunRequest) { +} + +func (newState *GetRunRequest) SyncEffectiveFieldsDuringRead(existingState GetRunRequest) { +} + // Read-only state of the remote repository at the time the job was run. This // field is only included on job runs. type GitSnapshot struct { @@ -622,6 +862,12 @@ type GitSnapshot struct { UsedCommit types.String `tfsdk:"used_commit" tf:"optional"` } +func (newState *GitSnapshot) SyncEffectiveFieldsDuringCreateOrUpdate(plan GitSnapshot) { +} + +func (newState *GitSnapshot) SyncEffectiveFieldsDuringRead(existingState GitSnapshot) { +} + // An optional specification for a remote Git repository containing the source // code used by tasks. Version-controlled source code is supported by notebook, // dbt, Python script, and SQL File tasks. @@ -655,6 +901,12 @@ type GitSource struct { JobSource []JobSource `tfsdk:"job_source" tf:"optional,object"` } +func (newState *GitSource) SyncEffectiveFieldsDuringCreateOrUpdate(plan GitSource) { +} + +func (newState *GitSource) SyncEffectiveFieldsDuringRead(existingState GitSource) { +} + // Job was retrieved successfully. type Job struct { // The time at which this job was created in epoch milliseconds @@ -663,6 +915,14 @@ type Job struct { // The creator user name. This field won’t be included in the response if // the user has already been deleted. CreatorUserName types.String `tfsdk:"creator_user_name" tf:"optional"` + // The id of the budget policy used by this job for cost attribution + // purposes. This may be set through (in order of precedence): 1. Budget + // admins through the account or workspace console 2. Jobs UI in the job + // details page and Jobs API using `budget_policy_id` 3. Inferred default + // based on accessible budget policies of the run_as identity on job + // creation or modification. + EffectiveBudgetPolicyId types.String `tfsdk:"effective_budget_policy_id" tf:"optional"` + EffectiveEffectiveBudgetPolicyId types.String `tfsdk:"effective_effective_budget_policy_id" tf:"computed,optional"` // The canonical identifier for this job. JobId types.Int64 `tfsdk:"job_id" tf:"optional"` // The email of an active workspace user or the application ID of a service @@ -678,6 +938,17 @@ type Job struct { Settings []JobSettings `tfsdk:"settings" tf:"optional,object"` } +func (newState *Job) SyncEffectiveFieldsDuringCreateOrUpdate(plan Job) { + newState.EffectiveEffectiveBudgetPolicyId = newState.EffectiveBudgetPolicyId + newState.EffectiveBudgetPolicyId = plan.EffectiveBudgetPolicyId +} + +func (newState *Job) SyncEffectiveFieldsDuringRead(existingState Job) { + if existingState.EffectiveEffectiveBudgetPolicyId.ValueString() == newState.EffectiveBudgetPolicyId.ValueString() { + newState.EffectiveBudgetPolicyId = existingState.EffectiveBudgetPolicyId + } +} + type JobAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -689,6 +960,12 @@ type JobAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *JobAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobAccessControlRequest) { +} + +func (newState *JobAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState JobAccessControlRequest) { +} + type JobAccessControlResponse struct { // All permissions. AllPermissions []JobPermission `tfsdk:"all_permissions" tf:"optional"` @@ -702,6 +979,12 @@ type JobAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *JobAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobAccessControlResponse) { +} + +func (newState *JobAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState JobAccessControlResponse) { +} + type JobCluster struct { // A unique name for the job cluster. This field is required and must be // unique within the job. `JobTaskSettings` may refer to this field to @@ -711,6 +994,12 @@ type JobCluster struct { NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"object"` } +func (newState *JobCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobCluster) { +} + +func (newState *JobCluster) SyncEffectiveFieldsDuringRead(existingState JobCluster) { +} + type JobCompliance struct { // Whether this job is in compliance with the latest version of its policy. IsCompliant types.Bool `tfsdk:"is_compliant" tf:"optional"` @@ -724,6 +1013,12 @@ type JobCompliance struct { Violations map[string]types.String `tfsdk:"violations" tf:"optional"` } +func (newState *JobCompliance) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobCompliance) { +} + +func (newState *JobCompliance) SyncEffectiveFieldsDuringRead(existingState JobCompliance) { +} + type JobDeployment struct { // The kind of deployment that manages the job. // @@ -733,6 +1028,12 @@ type JobDeployment struct { MetadataFilePath types.String `tfsdk:"metadata_file_path" tf:"optional"` } +func (newState *JobDeployment) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobDeployment) { +} + +func (newState *JobDeployment) SyncEffectiveFieldsDuringRead(existingState JobDeployment) { +} + type JobEmailNotifications struct { // If true, do not send email to recipients specified in `on_failure` if the // run is skipped. This field is `deprecated`. Please use the @@ -769,6 +1070,12 @@ type JobEmailNotifications struct { OnSuccess []types.String `tfsdk:"on_success" tf:"optional"` } +func (newState *JobEmailNotifications) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobEmailNotifications) { +} + +func (newState *JobEmailNotifications) SyncEffectiveFieldsDuringRead(existingState JobEmailNotifications) { +} + type JobEnvironment struct { // The key of an environment. It has to be unique within a job. EnvironmentKey types.String `tfsdk:"environment_key" tf:""` @@ -778,6 +1085,12 @@ type JobEnvironment struct { Spec compute.Environment `tfsdk:"spec" tf:"optional,object"` } +func (newState *JobEnvironment) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobEnvironment) { +} + +func (newState *JobEnvironment) SyncEffectiveFieldsDuringRead(existingState JobEnvironment) { +} + type JobNotificationSettings struct { // If true, do not send notifications to recipients specified in // `on_failure` if the run is canceled. @@ -787,6 +1100,12 @@ type JobNotificationSettings struct { NoAlertForSkippedRuns types.Bool `tfsdk:"no_alert_for_skipped_runs" tf:"optional"` } +func (newState *JobNotificationSettings) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobNotificationSettings) { +} + +func (newState *JobNotificationSettings) SyncEffectiveFieldsDuringRead(existingState JobNotificationSettings) { +} + type JobParameter struct { // The optional default value of the parameter Default types.String `tfsdk:"default" tf:"optional"` @@ -796,6 +1115,12 @@ type JobParameter struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *JobParameter) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobParameter) { +} + +func (newState *JobParameter) SyncEffectiveFieldsDuringRead(existingState JobParameter) { +} + type JobParameterDefinition struct { // Default value of the parameter. Default types.String `tfsdk:"default" tf:""` @@ -804,6 +1129,12 @@ type JobParameterDefinition struct { Name types.String `tfsdk:"name" tf:""` } +func (newState *JobParameterDefinition) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobParameterDefinition) { +} + +func (newState *JobParameterDefinition) SyncEffectiveFieldsDuringRead(existingState JobParameterDefinition) { +} + type JobPermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -812,6 +1143,12 @@ type JobPermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *JobPermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobPermission) { +} + +func (newState *JobPermission) SyncEffectiveFieldsDuringRead(existingState JobPermission) { +} + type JobPermissions struct { AccessControlList []JobAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -820,18 +1157,36 @@ type JobPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *JobPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobPermissions) { +} + +func (newState *JobPermissions) SyncEffectiveFieldsDuringRead(existingState JobPermissions) { +} + type JobPermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *JobPermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobPermissionsDescription) { +} + +func (newState *JobPermissionsDescription) SyncEffectiveFieldsDuringRead(existingState JobPermissionsDescription) { +} + type JobPermissionsRequest struct { AccessControlList []JobAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The job for which to get or manage permissions. JobId types.String `tfsdk:"-"` } +func (newState *JobPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobPermissionsRequest) { +} + +func (newState *JobPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState JobPermissionsRequest) { +} + // Write-only setting. Specifies the user, service principal or group that the // job/pipeline runs as. If not specified, the job/pipeline runs as the user who // created the job/pipeline. @@ -847,7 +1202,18 @@ type JobRunAs struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *JobRunAs) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobRunAs) { +} + +func (newState *JobRunAs) SyncEffectiveFieldsDuringRead(existingState JobRunAs) { +} + type JobSettings struct { + // The id of the user specified budget policy to use for this job. If not + // specified, a default budget policy may be applied when creating or + // modifying the job. See `effective_budget_policy_id` for the budget policy + // used by this workload. + BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` // An optional continuous property for this job. The continuous property // will ensure that there is always one run executing. Only one of // `schedule` and `continuous` can be used. @@ -946,6 +1312,12 @@ type JobSettings struct { WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } +func (newState *JobSettings) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobSettings) { +} + +func (newState *JobSettings) SyncEffectiveFieldsDuringRead(existingState JobSettings) { +} + // The source of the job specification in the remote repository when the job is // source controlled. type JobSource struct { @@ -965,6 +1337,12 @@ type JobSource struct { JobConfigPath types.String `tfsdk:"job_config_path" tf:""` } +func (newState *JobSource) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobSource) { +} + +func (newState *JobSource) SyncEffectiveFieldsDuringRead(existingState JobSource) { +} + type JobsHealthRule struct { // Specifies the health metric that is being evaluated for a particular // health rule. @@ -987,11 +1365,23 @@ type JobsHealthRule struct { Value types.Int64 `tfsdk:"value" tf:""` } +func (newState *JobsHealthRule) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobsHealthRule) { +} + +func (newState *JobsHealthRule) SyncEffectiveFieldsDuringRead(existingState JobsHealthRule) { +} + // An optional set of health rules that can be defined for this job. type JobsHealthRules struct { Rules []JobsHealthRule `tfsdk:"rules" tf:"optional"` } +func (newState *JobsHealthRules) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobsHealthRules) { +} + +func (newState *JobsHealthRules) SyncEffectiveFieldsDuringRead(existingState JobsHealthRules) { +} + type ListJobComplianceForPolicyResponse struct { // A list of jobs and their policy compliance statuses. Jobs []JobCompliance `tfsdk:"jobs" tf:"optional"` @@ -1005,6 +1395,12 @@ type ListJobComplianceForPolicyResponse struct { PrevPageToken types.String `tfsdk:"prev_page_token" tf:"optional"` } +func (newState *ListJobComplianceForPolicyResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListJobComplianceForPolicyResponse) { +} + +func (newState *ListJobComplianceForPolicyResponse) SyncEffectiveFieldsDuringRead(existingState ListJobComplianceForPolicyResponse) { +} + // List job policy compliance type ListJobComplianceRequest struct { // Use this field to specify the maximum number of results to be returned by @@ -1018,6 +1414,12 @@ type ListJobComplianceRequest struct { PolicyId types.String `tfsdk:"-"` } +func (newState *ListJobComplianceRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListJobComplianceRequest) { +} + +func (newState *ListJobComplianceRequest) SyncEffectiveFieldsDuringRead(existingState ListJobComplianceRequest) { +} + // List jobs type ListJobsRequest struct { // Whether to include task and cluster details in the response. @@ -1036,6 +1438,12 @@ type ListJobsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListJobsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListJobsRequest) { +} + +func (newState *ListJobsRequest) SyncEffectiveFieldsDuringRead(existingState ListJobsRequest) { +} + // List of jobs was retrieved successfully. type ListJobsResponse struct { // If true, additional jobs matching the provided filter are available for @@ -1051,6 +1459,12 @@ type ListJobsResponse struct { PrevPageToken types.String `tfsdk:"prev_page_token" tf:"optional"` } +func (newState *ListJobsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListJobsResponse) { +} + +func (newState *ListJobsResponse) SyncEffectiveFieldsDuringRead(existingState ListJobsResponse) { +} + // List job runs type ListRunsRequest struct { // If active_only is `true`, only active runs are included in the results; @@ -1091,6 +1505,12 @@ type ListRunsRequest struct { StartTimeTo types.Int64 `tfsdk:"-"` } +func (newState *ListRunsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListRunsRequest) { +} + +func (newState *ListRunsRequest) SyncEffectiveFieldsDuringRead(existingState ListRunsRequest) { +} + // List of runs was retrieved successfully. type ListRunsResponse struct { // If true, additional runs matching the provided filter are available for @@ -1106,6 +1526,12 @@ type ListRunsResponse struct { Runs []BaseRun `tfsdk:"runs" tf:"optional"` } +func (newState *ListRunsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListRunsResponse) { +} + +func (newState *ListRunsResponse) SyncEffectiveFieldsDuringRead(existingState ListRunsResponse) { +} + type NotebookOutput struct { // The value passed to // [dbutils.notebook.exit()](/notebooks/notebook-workflows.html#notebook-workflows-exit). @@ -1118,6 +1544,12 @@ type NotebookOutput struct { Truncated types.Bool `tfsdk:"truncated" tf:"optional"` } +func (newState *NotebookOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan NotebookOutput) { +} + +func (newState *NotebookOutput) SyncEffectiveFieldsDuringRead(existingState NotebookOutput) { +} + type NotebookTask struct { // Base parameters to be used for each run of this job. If the run is // initiated by a call to :method:jobs/run Now with parameters specified, @@ -1159,6 +1591,12 @@ type NotebookTask struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *NotebookTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan NotebookTask) { +} + +func (newState *NotebookTask) SyncEffectiveFieldsDuringRead(existingState NotebookTask) { +} + type PeriodicTriggerConfiguration struct { // The interval at which the trigger should run. Interval types.Int64 `tfsdk:"interval" tf:""` @@ -1166,11 +1604,23 @@ type PeriodicTriggerConfiguration struct { Unit types.String `tfsdk:"unit" tf:""` } +func (newState *PeriodicTriggerConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(plan PeriodicTriggerConfiguration) { +} + +func (newState *PeriodicTriggerConfiguration) SyncEffectiveFieldsDuringRead(existingState PeriodicTriggerConfiguration) { +} + type PipelineParams struct { // If true, triggers a full refresh on the delta live table. FullRefresh types.Bool `tfsdk:"full_refresh" tf:"optional"` } +func (newState *PipelineParams) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineParams) { +} + +func (newState *PipelineParams) SyncEffectiveFieldsDuringRead(existingState PipelineParams) { +} + type PipelineTask struct { // If true, triggers a full refresh on the delta live table. FullRefresh types.Bool `tfsdk:"full_refresh" tf:"optional"` @@ -1178,6 +1628,12 @@ type PipelineTask struct { PipelineId types.String `tfsdk:"pipeline_id" tf:""` } +func (newState *PipelineTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineTask) { +} + +func (newState *PipelineTask) SyncEffectiveFieldsDuringRead(existingState PipelineTask) { +} + type PythonWheelTask struct { // Named entry point to use, if it does not exist in the metadata of the // package it executes the function from the package directly using @@ -1194,6 +1650,12 @@ type PythonWheelTask struct { Parameters []types.String `tfsdk:"parameters" tf:"optional"` } +func (newState *PythonWheelTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan PythonWheelTask) { +} + +func (newState *PythonWheelTask) SyncEffectiveFieldsDuringRead(existingState PythonWheelTask) { +} + type QueueDetails struct { // The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run // was queued due to reaching the workspace limit of active task runs. * @@ -1207,11 +1669,23 @@ type QueueDetails struct { Message types.String `tfsdk:"message" tf:"optional"` } +func (newState *QueueDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueueDetails) { +} + +func (newState *QueueDetails) SyncEffectiveFieldsDuringRead(existingState QueueDetails) { +} + type QueueSettings struct { // If true, enable queueing for the job. This is a required field. Enabled types.Bool `tfsdk:"enabled" tf:""` } +func (newState *QueueSettings) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueueSettings) { +} + +func (newState *QueueSettings) SyncEffectiveFieldsDuringRead(existingState QueueSettings) { +} + type RepairHistoryItem struct { // The end time of the (repaired) run. EndTime types.Int64 `tfsdk:"end_time" tf:"optional"` @@ -1232,6 +1706,12 @@ type RepairHistoryItem struct { Type types.String `tfsdk:"type" tf:"optional"` } +func (newState *RepairHistoryItem) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepairHistoryItem) { +} + +func (newState *RepairHistoryItem) SyncEffectiveFieldsDuringRead(existingState RepairHistoryItem) { +} + type RepairRun struct { // An array of commands to execute for jobs with the dbt task, for example // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt @@ -1333,6 +1813,12 @@ type RepairRun struct { SqlParams map[string]types.String `tfsdk:"sql_params" tf:"optional"` } +func (newState *RepairRun) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepairRun) { +} + +func (newState *RepairRun) SyncEffectiveFieldsDuringRead(existingState RepairRun) { +} + // Run repair was initiated. type RepairRunResponse struct { // The ID of the repair. Must be provided in subsequent repairs using the @@ -1340,6 +1826,12 @@ type RepairRunResponse struct { RepairId types.Int64 `tfsdk:"repair_id" tf:"optional"` } +func (newState *RepairRunResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepairRunResponse) { +} + +func (newState *RepairRunResponse) SyncEffectiveFieldsDuringRead(existingState RepairRunResponse) { +} + type ResetJob struct { // The canonical identifier of the job to reset. This field is required. JobId types.Int64 `tfsdk:"job_id" tf:""` @@ -1351,43 +1843,97 @@ type ResetJob struct { NewSettings []JobSettings `tfsdk:"new_settings" tf:"object"` } +func (newState *ResetJob) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResetJob) { +} + +func (newState *ResetJob) SyncEffectiveFieldsDuringRead(existingState ResetJob) { +} + type ResetResponse struct { } +func (newState *ResetResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResetResponse) { +} + +func (newState *ResetResponse) SyncEffectiveFieldsDuringRead(existingState ResetResponse) { +} + type ResolvedConditionTaskValues struct { Left types.String `tfsdk:"left" tf:"optional"` Right types.String `tfsdk:"right" tf:"optional"` } +func (newState *ResolvedConditionTaskValues) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResolvedConditionTaskValues) { +} + +func (newState *ResolvedConditionTaskValues) SyncEffectiveFieldsDuringRead(existingState ResolvedConditionTaskValues) { +} + type ResolvedDbtTaskValues struct { Commands []types.String `tfsdk:"commands" tf:"optional"` } +func (newState *ResolvedDbtTaskValues) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResolvedDbtTaskValues) { +} + +func (newState *ResolvedDbtTaskValues) SyncEffectiveFieldsDuringRead(existingState ResolvedDbtTaskValues) { +} + type ResolvedNotebookTaskValues struct { BaseParameters map[string]types.String `tfsdk:"base_parameters" tf:"optional"` } +func (newState *ResolvedNotebookTaskValues) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResolvedNotebookTaskValues) { +} + +func (newState *ResolvedNotebookTaskValues) SyncEffectiveFieldsDuringRead(existingState ResolvedNotebookTaskValues) { +} + type ResolvedParamPairValues struct { Parameters map[string]types.String `tfsdk:"parameters" tf:"optional"` } +func (newState *ResolvedParamPairValues) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResolvedParamPairValues) { +} + +func (newState *ResolvedParamPairValues) SyncEffectiveFieldsDuringRead(existingState ResolvedParamPairValues) { +} + type ResolvedPythonWheelTaskValues struct { NamedParameters map[string]types.String `tfsdk:"named_parameters" tf:"optional"` Parameters []types.String `tfsdk:"parameters" tf:"optional"` } +func (newState *ResolvedPythonWheelTaskValues) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResolvedPythonWheelTaskValues) { +} + +func (newState *ResolvedPythonWheelTaskValues) SyncEffectiveFieldsDuringRead(existingState ResolvedPythonWheelTaskValues) { +} + type ResolvedRunJobTaskValues struct { JobParameters map[string]types.String `tfsdk:"job_parameters" tf:"optional"` Parameters map[string]types.String `tfsdk:"parameters" tf:"optional"` } +func (newState *ResolvedRunJobTaskValues) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResolvedRunJobTaskValues) { +} + +func (newState *ResolvedRunJobTaskValues) SyncEffectiveFieldsDuringRead(existingState ResolvedRunJobTaskValues) { +} + type ResolvedStringParamsValues struct { Parameters []types.String `tfsdk:"parameters" tf:"optional"` } +func (newState *ResolvedStringParamsValues) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResolvedStringParamsValues) { +} + +func (newState *ResolvedStringParamsValues) SyncEffectiveFieldsDuringRead(existingState ResolvedStringParamsValues) { +} + type ResolvedValues struct { ConditionTask []ResolvedConditionTaskValues `tfsdk:"condition_task" tf:"optional,object"` @@ -1410,6 +1956,12 @@ type ResolvedValues struct { SqlTask []ResolvedParamPairValues `tfsdk:"sql_task" tf:"optional,object"` } +func (newState *ResolvedValues) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResolvedValues) { +} + +func (newState *ResolvedValues) SyncEffectiveFieldsDuringRead(existingState ResolvedValues) { +} + // Run was retrieved successfully type Run struct { // The sequence number of this run attempt for a triggered job run. The @@ -1548,6 +2100,12 @@ type Run struct { TriggerInfo []TriggerInfo `tfsdk:"trigger_info" tf:"optional,object"` } +func (newState *Run) SyncEffectiveFieldsDuringCreateOrUpdate(plan Run) { +} + +func (newState *Run) SyncEffectiveFieldsDuringRead(existingState Run) { +} + type RunConditionTask struct { // The left operand of the condition task. Can be either a string value or a // job state or parameter reference. @@ -1571,6 +2129,12 @@ type RunConditionTask struct { Right types.String `tfsdk:"right" tf:""` } +func (newState *RunConditionTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunConditionTask) { +} + +func (newState *RunConditionTask) SyncEffectiveFieldsDuringRead(existingState RunConditionTask) { +} + type RunForEachTask struct { // An optional maximum allowed number of concurrent runs of the task. Set // this value if you want to be able to execute multiple runs of the task @@ -1586,11 +2150,23 @@ type RunForEachTask struct { Task []Task `tfsdk:"task" tf:"object"` } +func (newState *RunForEachTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunForEachTask) { +} + +func (newState *RunForEachTask) SyncEffectiveFieldsDuringRead(existingState RunForEachTask) { +} + type RunJobOutput struct { // The run id of the triggered job run RunId types.Int64 `tfsdk:"run_id" tf:"optional"` } +func (newState *RunJobOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunJobOutput) { +} + +func (newState *RunJobOutput) SyncEffectiveFieldsDuringRead(existingState RunJobOutput) { +} + type RunJobTask struct { // An array of commands to execute for jobs with the dbt task, for example // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt @@ -1678,6 +2254,12 @@ type RunJobTask struct { SqlParams map[string]types.String `tfsdk:"sql_params" tf:"optional"` } +func (newState *RunJobTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunJobTask) { +} + +func (newState *RunJobTask) SyncEffectiveFieldsDuringRead(existingState RunJobTask) { +} + type RunNow struct { // An array of commands to execute for jobs with the dbt task, for example // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt @@ -1783,6 +2365,12 @@ type RunNow struct { SqlParams map[string]types.String `tfsdk:"sql_params" tf:"optional"` } +func (newState *RunNow) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunNow) { +} + +func (newState *RunNow) SyncEffectiveFieldsDuringRead(existingState RunNow) { +} + // Run was started successfully. type RunNowResponse struct { // A unique identifier for this job run. This is set to the same value as @@ -1792,6 +2380,12 @@ type RunNowResponse struct { RunId types.Int64 `tfsdk:"run_id" tf:"optional"` } +func (newState *RunNowResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunNowResponse) { +} + +func (newState *RunNowResponse) SyncEffectiveFieldsDuringRead(existingState RunNowResponse) { +} + // Run output was retrieved successfully. type RunOutput struct { // The output of a dbt task, if available. @@ -1832,6 +2426,12 @@ type RunOutput struct { SqlOutput []SqlOutput `tfsdk:"sql_output" tf:"optional,object"` } +func (newState *RunOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunOutput) { +} + +func (newState *RunOutput) SyncEffectiveFieldsDuringRead(existingState RunOutput) { +} + type RunParameters struct { // An array of commands to execute for jobs with the dbt task, for example // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt @@ -1915,6 +2515,12 @@ type RunParameters struct { SqlParams map[string]types.String `tfsdk:"sql_params" tf:"optional"` } +func (newState *RunParameters) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunParameters) { +} + +func (newState *RunParameters) SyncEffectiveFieldsDuringRead(existingState RunParameters) { +} + // The current state of the run. type RunState struct { // A value indicating the run's current lifecycle state. This field is @@ -1933,6 +2539,12 @@ type RunState struct { UserCancelledOrTimedout types.Bool `tfsdk:"user_cancelled_or_timedout" tf:"optional"` } +func (newState *RunState) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunState) { +} + +func (newState *RunState) SyncEffectiveFieldsDuringRead(existingState RunState) { +} + // The current status of the run type RunStatus struct { // If the run was queued, details about the reason for queuing the run. @@ -1944,6 +2556,12 @@ type RunStatus struct { TerminationDetails []TerminationDetails `tfsdk:"termination_details" tf:"optional,object"` } +func (newState *RunStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunStatus) { +} + +func (newState *RunStatus) SyncEffectiveFieldsDuringRead(existingState RunStatus) { +} + // Used when outputting a child run, in GetRun or ListRuns. type RunTask struct { // The sequence number of this run attempt for a triggered job run. The @@ -2107,6 +2725,12 @@ type RunTask struct { WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } +func (newState *RunTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunTask) { +} + +func (newState *RunTask) SyncEffectiveFieldsDuringRead(existingState RunTask) { +} + type SparkJarTask struct { // Deprecated since 04/2016. Provide a `jar` through the `libraries` field // instead. For an example, see :method:jobs/create. @@ -2126,6 +2750,12 @@ type SparkJarTask struct { Parameters []types.String `tfsdk:"parameters" tf:"optional"` } +func (newState *SparkJarTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan SparkJarTask) { +} + +func (newState *SparkJarTask) SyncEffectiveFieldsDuringRead(existingState SparkJarTask) { +} + type SparkPythonTask struct { // Command line parameters passed to the Python file. // @@ -2152,6 +2782,12 @@ type SparkPythonTask struct { Source types.String `tfsdk:"source" tf:"optional"` } +func (newState *SparkPythonTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan SparkPythonTask) { +} + +func (newState *SparkPythonTask) SyncEffectiveFieldsDuringRead(existingState SparkPythonTask) { +} + type SparkSubmitTask struct { // Command-line parameters passed to spark submit. // @@ -2162,6 +2798,12 @@ type SparkSubmitTask struct { Parameters []types.String `tfsdk:"parameters" tf:"optional"` } +func (newState *SparkSubmitTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan SparkSubmitTask) { +} + +func (newState *SparkSubmitTask) SyncEffectiveFieldsDuringRead(existingState SparkSubmitTask) { +} + type SqlAlertOutput struct { // The state of the SQL alert. // @@ -2180,6 +2822,12 @@ type SqlAlertOutput struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *SqlAlertOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlAlertOutput) { +} + +func (newState *SqlAlertOutput) SyncEffectiveFieldsDuringRead(existingState SqlAlertOutput) { +} + type SqlDashboardOutput struct { // The canonical identifier of the SQL warehouse. WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` @@ -2187,6 +2835,12 @@ type SqlDashboardOutput struct { Widgets []SqlDashboardWidgetOutput `tfsdk:"widgets" tf:"optional"` } +func (newState *SqlDashboardOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlDashboardOutput) { +} + +func (newState *SqlDashboardOutput) SyncEffectiveFieldsDuringRead(existingState SqlDashboardOutput) { +} + type SqlDashboardWidgetOutput struct { // Time (in epoch milliseconds) when execution of the SQL widget ends. EndTime types.Int64 `tfsdk:"end_time" tf:"optional"` @@ -2204,6 +2858,12 @@ type SqlDashboardWidgetOutput struct { WidgetTitle types.String `tfsdk:"widget_title" tf:"optional"` } +func (newState *SqlDashboardWidgetOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlDashboardWidgetOutput) { +} + +func (newState *SqlDashboardWidgetOutput) SyncEffectiveFieldsDuringRead(existingState SqlDashboardWidgetOutput) { +} + type SqlOutput struct { // The output of a SQL alert task, if available. AlertOutput []SqlAlertOutput `tfsdk:"alert_output" tf:"optional,object"` @@ -2213,11 +2873,23 @@ type SqlOutput struct { QueryOutput []SqlQueryOutput `tfsdk:"query_output" tf:"optional,object"` } +func (newState *SqlOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlOutput) { +} + +func (newState *SqlOutput) SyncEffectiveFieldsDuringRead(existingState SqlOutput) { +} + type SqlOutputError struct { // The error message when execution fails. Message types.String `tfsdk:"message" tf:"optional"` } +func (newState *SqlOutputError) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlOutputError) { +} + +func (newState *SqlOutputError) SyncEffectiveFieldsDuringRead(existingState SqlOutputError) { +} + type SqlQueryOutput struct { EndpointId types.String `tfsdk:"endpoint_id" tf:"optional"` // The link to find the output results. @@ -2231,11 +2903,23 @@ type SqlQueryOutput struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *SqlQueryOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlQueryOutput) { +} + +func (newState *SqlQueryOutput) SyncEffectiveFieldsDuringRead(existingState SqlQueryOutput) { +} + type SqlStatementOutput struct { // A key that can be used to look up query details. LookupKey types.String `tfsdk:"lookup_key" tf:"optional"` } +func (newState *SqlStatementOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlStatementOutput) { +} + +func (newState *SqlStatementOutput) SyncEffectiveFieldsDuringRead(existingState SqlStatementOutput) { +} + type SqlTask struct { // If alert, indicates that this job must refresh a SQL alert. Alert []SqlTaskAlert `tfsdk:"alert" tf:"optional,object"` @@ -2256,6 +2940,12 @@ type SqlTask struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:""` } +func (newState *SqlTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlTask) { +} + +func (newState *SqlTask) SyncEffectiveFieldsDuringRead(existingState SqlTask) { +} + type SqlTaskAlert struct { // The canonical identifier of the SQL alert. AlertId types.String `tfsdk:"alert_id" tf:""` @@ -2265,6 +2955,12 @@ type SqlTaskAlert struct { Subscriptions []SqlTaskSubscription `tfsdk:"subscriptions" tf:"optional"` } +func (newState *SqlTaskAlert) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlTaskAlert) { +} + +func (newState *SqlTaskAlert) SyncEffectiveFieldsDuringRead(existingState SqlTaskAlert) { +} + type SqlTaskDashboard struct { // Subject of the email sent to subscribers of this task. CustomSubject types.String `tfsdk:"custom_subject" tf:"optional"` @@ -2277,6 +2973,12 @@ type SqlTaskDashboard struct { Subscriptions []SqlTaskSubscription `tfsdk:"subscriptions" tf:"optional"` } +func (newState *SqlTaskDashboard) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlTaskDashboard) { +} + +func (newState *SqlTaskDashboard) SyncEffectiveFieldsDuringRead(existingState SqlTaskDashboard) { +} + type SqlTaskFile struct { // Path of the SQL file. Must be relative if the source is a remote Git // repository and absolute for workspace paths. @@ -2292,11 +2994,23 @@ type SqlTaskFile struct { Source types.String `tfsdk:"source" tf:"optional"` } +func (newState *SqlTaskFile) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlTaskFile) { +} + +func (newState *SqlTaskFile) SyncEffectiveFieldsDuringRead(existingState SqlTaskFile) { +} + type SqlTaskQuery struct { // The canonical identifier of the SQL query. QueryId types.String `tfsdk:"query_id" tf:""` } +func (newState *SqlTaskQuery) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlTaskQuery) { +} + +func (newState *SqlTaskQuery) SyncEffectiveFieldsDuringRead(existingState SqlTaskQuery) { +} + type SqlTaskSubscription struct { // The canonical identifier of the destination to receive email // notification. This parameter is mutually exclusive with user_name. You @@ -2309,9 +3023,18 @@ type SqlTaskSubscription struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *SqlTaskSubscription) SyncEffectiveFieldsDuringCreateOrUpdate(plan SqlTaskSubscription) { +} + +func (newState *SqlTaskSubscription) SyncEffectiveFieldsDuringRead(existingState SqlTaskSubscription) { +} + type SubmitRun struct { // List of permissions to set on the job. AccessControlList []JobAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` + // The user specified id of the budget policy to use for this one-time run. + // If not specified, the run will be not be attributed to any budget policy. + BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` // An optional set of email addresses notified when the run begins or // completes. EmailNotifications []JobEmailNotifications `tfsdk:"email_notifications" tf:"optional,object"` @@ -2368,12 +3091,24 @@ type SubmitRun struct { WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } +func (newState *SubmitRun) SyncEffectiveFieldsDuringCreateOrUpdate(plan SubmitRun) { +} + +func (newState *SubmitRun) SyncEffectiveFieldsDuringRead(existingState SubmitRun) { +} + // Run was created and started successfully. type SubmitRunResponse struct { // The canonical identifier for the newly submitted run. RunId types.Int64 `tfsdk:"run_id" tf:"optional"` } +func (newState *SubmitRunResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SubmitRunResponse) { +} + +func (newState *SubmitRunResponse) SyncEffectiveFieldsDuringRead(existingState SubmitRunResponse) { +} + type SubmitTask struct { // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to @@ -2469,6 +3204,12 @@ type SubmitTask struct { WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } +func (newState *SubmitTask) SyncEffectiveFieldsDuringCreateOrUpdate(plan SubmitTask) { +} + +func (newState *SubmitTask) SyncEffectiveFieldsDuringRead(existingState SubmitTask) { +} + type TableUpdateTriggerConfiguration struct { // The table(s) condition based on which to trigger a job run. Condition types.String `tfsdk:"condition" tf:"optional"` @@ -2486,6 +3227,12 @@ type TableUpdateTriggerConfiguration struct { WaitAfterLastChangeSeconds types.Int64 `tfsdk:"wait_after_last_change_seconds" tf:"optional"` } +func (newState *TableUpdateTriggerConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(plan TableUpdateTriggerConfiguration) { +} + +func (newState *TableUpdateTriggerConfiguration) SyncEffectiveFieldsDuringRead(existingState TableUpdateTriggerConfiguration) { +} + type Task struct { // If condition_task, specifies a condition with an outcome that can be used // to control the execution of other tasks. Does not require a cluster to @@ -2604,6 +3351,12 @@ type Task struct { WebhookNotifications []WebhookNotifications `tfsdk:"webhook_notifications" tf:"optional,object"` } +func (newState *Task) SyncEffectiveFieldsDuringCreateOrUpdate(plan Task) { +} + +func (newState *Task) SyncEffectiveFieldsDuringRead(existingState Task) { +} + type TaskDependency struct { // Can only be specified on condition task dependencies. The outcome of the // dependent task that must be met for this task to run. @@ -2612,6 +3365,12 @@ type TaskDependency struct { TaskKey types.String `tfsdk:"task_key" tf:""` } +func (newState *TaskDependency) SyncEffectiveFieldsDuringCreateOrUpdate(plan TaskDependency) { +} + +func (newState *TaskDependency) SyncEffectiveFieldsDuringRead(existingState TaskDependency) { +} + type TaskEmailNotifications struct { // If true, do not send email to recipients specified in `on_failure` if the // run is skipped. This field is `deprecated`. Please use the @@ -2648,6 +3407,12 @@ type TaskEmailNotifications struct { OnSuccess []types.String `tfsdk:"on_success" tf:"optional"` } +func (newState *TaskEmailNotifications) SyncEffectiveFieldsDuringCreateOrUpdate(plan TaskEmailNotifications) { +} + +func (newState *TaskEmailNotifications) SyncEffectiveFieldsDuringRead(existingState TaskEmailNotifications) { +} + type TaskNotificationSettings struct { // If true, do not send notifications to recipients specified in `on_start` // for the retried runs and do not send notifications to recipients @@ -2661,6 +3426,12 @@ type TaskNotificationSettings struct { NoAlertForSkippedRuns types.Bool `tfsdk:"no_alert_for_skipped_runs" tf:"optional"` } +func (newState *TaskNotificationSettings) SyncEffectiveFieldsDuringCreateOrUpdate(plan TaskNotificationSettings) { +} + +func (newState *TaskNotificationSettings) SyncEffectiveFieldsDuringRead(existingState TaskNotificationSettings) { +} + type TerminationDetails struct { // The code indicates why the run was terminated. Additional codes might be // introduced in future releases. * `SUCCESS`: The run was completed @@ -2722,12 +3493,24 @@ type TerminationDetails struct { Type types.String `tfsdk:"type" tf:"optional"` } +func (newState *TerminationDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan TerminationDetails) { +} + +func (newState *TerminationDetails) SyncEffectiveFieldsDuringRead(existingState TerminationDetails) { +} + // Additional details about what triggered the run type TriggerInfo struct { // The run id of the Run Job task run RunId types.Int64 `tfsdk:"run_id" tf:"optional"` } +func (newState *TriggerInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan TriggerInfo) { +} + +func (newState *TriggerInfo) SyncEffectiveFieldsDuringRead(existingState TriggerInfo) { +} + type TriggerSettings struct { // File arrival trigger settings. FileArrival []FileArrivalTriggerConfiguration `tfsdk:"file_arrival" tf:"optional,object"` @@ -2741,6 +3524,12 @@ type TriggerSettings struct { TableUpdate []TableUpdateTriggerConfiguration `tfsdk:"table_update" tf:"optional,object"` } +func (newState *TriggerSettings) SyncEffectiveFieldsDuringCreateOrUpdate(plan TriggerSettings) { +} + +func (newState *TriggerSettings) SyncEffectiveFieldsDuringRead(existingState TriggerSettings) { +} + type UpdateJob struct { // Remove top-level fields in the job settings. Removing nested fields is // not supported, except for tasks and job clusters (`tasks/task_1`). This @@ -2762,9 +3551,21 @@ type UpdateJob struct { NewSettings []JobSettings `tfsdk:"new_settings" tf:"optional,object"` } +func (newState *UpdateJob) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateJob) { +} + +func (newState *UpdateJob) SyncEffectiveFieldsDuringRead(existingState UpdateJob) { +} + type UpdateResponse struct { } +func (newState *UpdateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateResponse) { +} + +func (newState *UpdateResponse) SyncEffectiveFieldsDuringRead(existingState UpdateResponse) { +} + type ViewItem struct { // Content of the view. Content types.String `tfsdk:"content" tf:"optional"` @@ -2776,10 +3577,22 @@ type ViewItem struct { Type types.String `tfsdk:"type" tf:"optional"` } +func (newState *ViewItem) SyncEffectiveFieldsDuringCreateOrUpdate(plan ViewItem) { +} + +func (newState *ViewItem) SyncEffectiveFieldsDuringRead(existingState ViewItem) { +} + type Webhook struct { Id types.String `tfsdk:"id" tf:""` } +func (newState *Webhook) SyncEffectiveFieldsDuringCreateOrUpdate(plan Webhook) { +} + +func (newState *Webhook) SyncEffectiveFieldsDuringRead(existingState Webhook) { +} + type WebhookNotifications struct { // An optional list of system notification IDs to call when the duration of // a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` @@ -2806,3 +3619,9 @@ type WebhookNotifications struct { // the `on_success` property. OnSuccess []Webhook `tfsdk:"on_success" tf:"optional"` } + +func (newState *WebhookNotifications) SyncEffectiveFieldsDuringCreateOrUpdate(plan WebhookNotifications) { +} + +func (newState *WebhookNotifications) SyncEffectiveFieldsDuringRead(existingState WebhookNotifications) { +} diff --git a/internal/service/marketplace_tf/model.go b/internal/service/marketplace_tf/model.go index 40648fd8c2..125335b593 100755 --- a/internal/service/marketplace_tf/model.go +++ b/internal/service/marketplace_tf/model.go @@ -20,32 +20,74 @@ type AddExchangeForListingRequest struct { ListingId types.String `tfsdk:"listing_id" tf:""` } +func (newState *AddExchangeForListingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan AddExchangeForListingRequest) { +} + +func (newState *AddExchangeForListingRequest) SyncEffectiveFieldsDuringRead(existingState AddExchangeForListingRequest) { +} + type AddExchangeForListingResponse struct { ExchangeForListing []ExchangeListing `tfsdk:"exchange_for_listing" tf:"optional,object"` } +func (newState *AddExchangeForListingResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan AddExchangeForListingResponse) { +} + +func (newState *AddExchangeForListingResponse) SyncEffectiveFieldsDuringRead(existingState AddExchangeForListingResponse) { +} + // Get one batch of listings. One may specify up to 50 IDs per request. type BatchGetListingsRequest struct { Ids []types.String `tfsdk:"-"` } +func (newState *BatchGetListingsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan BatchGetListingsRequest) { +} + +func (newState *BatchGetListingsRequest) SyncEffectiveFieldsDuringRead(existingState BatchGetListingsRequest) { +} + type BatchGetListingsResponse struct { Listings []Listing `tfsdk:"listings" tf:"optional"` } +func (newState *BatchGetListingsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan BatchGetListingsResponse) { +} + +func (newState *BatchGetListingsResponse) SyncEffectiveFieldsDuringRead(existingState BatchGetListingsResponse) { +} + // Get one batch of providers. One may specify up to 50 IDs per request. type BatchGetProvidersRequest struct { Ids []types.String `tfsdk:"-"` } +func (newState *BatchGetProvidersRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan BatchGetProvidersRequest) { +} + +func (newState *BatchGetProvidersRequest) SyncEffectiveFieldsDuringRead(existingState BatchGetProvidersRequest) { +} + type BatchGetProvidersResponse struct { Providers []ProviderInfo `tfsdk:"providers" tf:"optional"` } +func (newState *BatchGetProvidersResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan BatchGetProvidersResponse) { +} + +func (newState *BatchGetProvidersResponse) SyncEffectiveFieldsDuringRead(existingState BatchGetProvidersResponse) { +} + type ConsumerTerms struct { Version types.String `tfsdk:"version" tf:""` } +func (newState *ConsumerTerms) SyncEffectiveFieldsDuringCreateOrUpdate(plan ConsumerTerms) { +} + +func (newState *ConsumerTerms) SyncEffectiveFieldsDuringRead(existingState ConsumerTerms) { +} + // contact info for the consumer requesting data or performing a listing // installation type ContactInfo struct { @@ -58,22 +100,52 @@ type ContactInfo struct { LastName types.String `tfsdk:"last_name" tf:"optional"` } +func (newState *ContactInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ContactInfo) { +} + +func (newState *ContactInfo) SyncEffectiveFieldsDuringRead(existingState ContactInfo) { +} + type CreateExchangeFilterRequest struct { Filter []ExchangeFilter `tfsdk:"filter" tf:"object"` } +func (newState *CreateExchangeFilterRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateExchangeFilterRequest) { +} + +func (newState *CreateExchangeFilterRequest) SyncEffectiveFieldsDuringRead(existingState CreateExchangeFilterRequest) { +} + type CreateExchangeFilterResponse struct { FilterId types.String `tfsdk:"filter_id" tf:"optional"` } +func (newState *CreateExchangeFilterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateExchangeFilterResponse) { +} + +func (newState *CreateExchangeFilterResponse) SyncEffectiveFieldsDuringRead(existingState CreateExchangeFilterResponse) { +} + type CreateExchangeRequest struct { Exchange []Exchange `tfsdk:"exchange" tf:"object"` } +func (newState *CreateExchangeRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateExchangeRequest) { +} + +func (newState *CreateExchangeRequest) SyncEffectiveFieldsDuringRead(existingState CreateExchangeRequest) { +} + type CreateExchangeResponse struct { ExchangeId types.String `tfsdk:"exchange_id" tf:"optional"` } +func (newState *CreateExchangeResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateExchangeResponse) { +} + +func (newState *CreateExchangeResponse) SyncEffectiveFieldsDuringRead(existingState CreateExchangeResponse) { +} + type CreateFileRequest struct { DisplayName types.String `tfsdk:"display_name" tf:"optional"` @@ -84,12 +156,24 @@ type CreateFileRequest struct { MimeType types.String `tfsdk:"mime_type" tf:""` } +func (newState *CreateFileRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateFileRequest) { +} + +func (newState *CreateFileRequest) SyncEffectiveFieldsDuringRead(existingState CreateFileRequest) { +} + type CreateFileResponse struct { FileInfo []FileInfo `tfsdk:"file_info" tf:"optional,object"` // Pre-signed POST URL to blob storage UploadUrl types.String `tfsdk:"upload_url" tf:"optional"` } +func (newState *CreateFileResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateFileResponse) { +} + +func (newState *CreateFileResponse) SyncEffectiveFieldsDuringRead(existingState CreateFileResponse) { +} + type CreateInstallationRequest struct { AcceptedConsumerTerms []ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:"optional,object"` @@ -104,14 +188,32 @@ type CreateInstallationRequest struct { ShareName types.String `tfsdk:"share_name" tf:"optional"` } +func (newState *CreateInstallationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateInstallationRequest) { +} + +func (newState *CreateInstallationRequest) SyncEffectiveFieldsDuringRead(existingState CreateInstallationRequest) { +} + type CreateListingRequest struct { Listing []Listing `tfsdk:"listing" tf:"object"` } +func (newState *CreateListingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateListingRequest) { +} + +func (newState *CreateListingRequest) SyncEffectiveFieldsDuringRead(existingState CreateListingRequest) { +} + type CreateListingResponse struct { ListingId types.String `tfsdk:"listing_id" tf:"optional"` } +func (newState *CreateListingResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateListingResponse) { +} + +func (newState *CreateListingResponse) SyncEffectiveFieldsDuringRead(existingState CreateListingResponse) { +} + // Data request messages also creates a lead (maybe) type CreatePersonalizationRequest struct { AcceptedConsumerTerms []ConsumerTerms `tfsdk:"accepted_consumer_terms" tf:"object"` @@ -133,48 +235,114 @@ type CreatePersonalizationRequest struct { RecipientType types.String `tfsdk:"recipient_type" tf:"optional"` } +func (newState *CreatePersonalizationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreatePersonalizationRequest) { +} + +func (newState *CreatePersonalizationRequest) SyncEffectiveFieldsDuringRead(existingState CreatePersonalizationRequest) { +} + type CreatePersonalizationRequestResponse struct { Id types.String `tfsdk:"id" tf:"optional"` } +func (newState *CreatePersonalizationRequestResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreatePersonalizationRequestResponse) { +} + +func (newState *CreatePersonalizationRequestResponse) SyncEffectiveFieldsDuringRead(existingState CreatePersonalizationRequestResponse) { +} + type CreateProviderRequest struct { Provider []ProviderInfo `tfsdk:"provider" tf:"object"` } +func (newState *CreateProviderRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateProviderRequest) { +} + +func (newState *CreateProviderRequest) SyncEffectiveFieldsDuringRead(existingState CreateProviderRequest) { +} + type CreateProviderResponse struct { Id types.String `tfsdk:"id" tf:"optional"` } +func (newState *CreateProviderResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateProviderResponse) { +} + +func (newState *CreateProviderResponse) SyncEffectiveFieldsDuringRead(existingState CreateProviderResponse) { +} + type DataRefreshInfo struct { Interval types.Int64 `tfsdk:"interval" tf:""` Unit types.String `tfsdk:"unit" tf:""` } +func (newState *DataRefreshInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan DataRefreshInfo) { +} + +func (newState *DataRefreshInfo) SyncEffectiveFieldsDuringRead(existingState DataRefreshInfo) { +} + // Delete an exchange filter type DeleteExchangeFilterRequest struct { Id types.String `tfsdk:"-"` } +func (newState *DeleteExchangeFilterRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteExchangeFilterRequest) { +} + +func (newState *DeleteExchangeFilterRequest) SyncEffectiveFieldsDuringRead(existingState DeleteExchangeFilterRequest) { +} + type DeleteExchangeFilterResponse struct { } +func (newState *DeleteExchangeFilterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteExchangeFilterResponse) { +} + +func (newState *DeleteExchangeFilterResponse) SyncEffectiveFieldsDuringRead(existingState DeleteExchangeFilterResponse) { +} + // Delete an exchange type DeleteExchangeRequest struct { Id types.String `tfsdk:"-"` } +func (newState *DeleteExchangeRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteExchangeRequest) { +} + +func (newState *DeleteExchangeRequest) SyncEffectiveFieldsDuringRead(existingState DeleteExchangeRequest) { +} + type DeleteExchangeResponse struct { } +func (newState *DeleteExchangeResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteExchangeResponse) { +} + +func (newState *DeleteExchangeResponse) SyncEffectiveFieldsDuringRead(existingState DeleteExchangeResponse) { +} + // Delete a file type DeleteFileRequest struct { FileId types.String `tfsdk:"-"` } +func (newState *DeleteFileRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteFileRequest) { +} + +func (newState *DeleteFileRequest) SyncEffectiveFieldsDuringRead(existingState DeleteFileRequest) { +} + type DeleteFileResponse struct { } +func (newState *DeleteFileResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteFileResponse) { +} + +func (newState *DeleteFileResponse) SyncEffectiveFieldsDuringRead(existingState DeleteFileResponse) { +} + // Uninstall from a listing type DeleteInstallationRequest struct { InstallationId types.String `tfsdk:"-"` @@ -182,25 +350,61 @@ type DeleteInstallationRequest struct { ListingId types.String `tfsdk:"-"` } +func (newState *DeleteInstallationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteInstallationRequest) { +} + +func (newState *DeleteInstallationRequest) SyncEffectiveFieldsDuringRead(existingState DeleteInstallationRequest) { +} + type DeleteInstallationResponse struct { } +func (newState *DeleteInstallationResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteInstallationResponse) { +} + +func (newState *DeleteInstallationResponse) SyncEffectiveFieldsDuringRead(existingState DeleteInstallationResponse) { +} + // Delete a listing type DeleteListingRequest struct { Id types.String `tfsdk:"-"` } +func (newState *DeleteListingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteListingRequest) { +} + +func (newState *DeleteListingRequest) SyncEffectiveFieldsDuringRead(existingState DeleteListingRequest) { +} + type DeleteListingResponse struct { } +func (newState *DeleteListingResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteListingResponse) { +} + +func (newState *DeleteListingResponse) SyncEffectiveFieldsDuringRead(existingState DeleteListingResponse) { +} + // Delete provider type DeleteProviderRequest struct { Id types.String `tfsdk:"-"` } +func (newState *DeleteProviderRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteProviderRequest) { +} + +func (newState *DeleteProviderRequest) SyncEffectiveFieldsDuringRead(existingState DeleteProviderRequest) { +} + type DeleteProviderResponse struct { } +func (newState *DeleteProviderResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteProviderResponse) { +} + +func (newState *DeleteProviderResponse) SyncEffectiveFieldsDuringRead(existingState DeleteProviderResponse) { +} + type Exchange struct { Comment types.String `tfsdk:"comment" tf:"optional"` @@ -221,6 +425,12 @@ type Exchange struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *Exchange) SyncEffectiveFieldsDuringCreateOrUpdate(plan Exchange) { +} + +func (newState *Exchange) SyncEffectiveFieldsDuringRead(existingState Exchange) { +} + type ExchangeFilter struct { CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` @@ -241,6 +451,12 @@ type ExchangeFilter struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *ExchangeFilter) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExchangeFilter) { +} + +func (newState *ExchangeFilter) SyncEffectiveFieldsDuringRead(existingState ExchangeFilter) { +} + type ExchangeListing struct { CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` @@ -257,6 +473,12 @@ type ExchangeListing struct { ListingName types.String `tfsdk:"listing_name" tf:"optional"` } +func (newState *ExchangeListing) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExchangeListing) { +} + +func (newState *ExchangeListing) SyncEffectiveFieldsDuringRead(existingState ExchangeListing) { +} + type FileInfo struct { CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` // Name displayed to users for applicable files, e.g. embedded notebooks @@ -280,35 +502,77 @@ type FileInfo struct { UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` } +func (newState *FileInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan FileInfo) { +} + +func (newState *FileInfo) SyncEffectiveFieldsDuringRead(existingState FileInfo) { +} + type FileParent struct { FileParentType types.String `tfsdk:"file_parent_type" tf:"optional"` // TODO make the following fields required ParentId types.String `tfsdk:"parent_id" tf:"optional"` } +func (newState *FileParent) SyncEffectiveFieldsDuringCreateOrUpdate(plan FileParent) { +} + +func (newState *FileParent) SyncEffectiveFieldsDuringRead(existingState FileParent) { +} + // Get an exchange type GetExchangeRequest struct { Id types.String `tfsdk:"-"` } +func (newState *GetExchangeRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetExchangeRequest) { +} + +func (newState *GetExchangeRequest) SyncEffectiveFieldsDuringRead(existingState GetExchangeRequest) { +} + type GetExchangeResponse struct { Exchange []Exchange `tfsdk:"exchange" tf:"optional,object"` } +func (newState *GetExchangeResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetExchangeResponse) { +} + +func (newState *GetExchangeResponse) SyncEffectiveFieldsDuringRead(existingState GetExchangeResponse) { +} + // Get a file type GetFileRequest struct { FileId types.String `tfsdk:"-"` } +func (newState *GetFileRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetFileRequest) { +} + +func (newState *GetFileRequest) SyncEffectiveFieldsDuringRead(existingState GetFileRequest) { +} + type GetFileResponse struct { FileInfo []FileInfo `tfsdk:"file_info" tf:"optional,object"` } +func (newState *GetFileResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetFileResponse) { +} + +func (newState *GetFileResponse) SyncEffectiveFieldsDuringRead(existingState GetFileResponse) { +} + type GetLatestVersionProviderAnalyticsDashboardResponse struct { // version here is latest logical version of the dashboard template Version types.Int64 `tfsdk:"version" tf:"optional"` } +func (newState *GetLatestVersionProviderAnalyticsDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetLatestVersionProviderAnalyticsDashboardResponse) { +} + +func (newState *GetLatestVersionProviderAnalyticsDashboardResponse) SyncEffectiveFieldsDuringRead(existingState GetLatestVersionProviderAnalyticsDashboardResponse) { +} + // Get listing content metadata type GetListingContentMetadataRequest struct { ListingId types.String `tfsdk:"-"` @@ -318,21 +582,45 @@ type GetListingContentMetadataRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *GetListingContentMetadataRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetListingContentMetadataRequest) { +} + +func (newState *GetListingContentMetadataRequest) SyncEffectiveFieldsDuringRead(existingState GetListingContentMetadataRequest) { +} + type GetListingContentMetadataResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` SharedDataObjects []SharedDataObject `tfsdk:"shared_data_objects" tf:"optional"` } +func (newState *GetListingContentMetadataResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetListingContentMetadataResponse) { +} + +func (newState *GetListingContentMetadataResponse) SyncEffectiveFieldsDuringRead(existingState GetListingContentMetadataResponse) { +} + // Get listing type GetListingRequest struct { Id types.String `tfsdk:"-"` } +func (newState *GetListingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetListingRequest) { +} + +func (newState *GetListingRequest) SyncEffectiveFieldsDuringRead(existingState GetListingRequest) { +} + type GetListingResponse struct { Listing []Listing `tfsdk:"listing" tf:"optional,object"` } +func (newState *GetListingResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetListingResponse) { +} + +func (newState *GetListingResponse) SyncEffectiveFieldsDuringRead(existingState GetListingResponse) { +} + // List listings type GetListingsRequest struct { PageSize types.Int64 `tfsdk:"-"` @@ -340,34 +628,76 @@ type GetListingsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *GetListingsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetListingsRequest) { +} + +func (newState *GetListingsRequest) SyncEffectiveFieldsDuringRead(existingState GetListingsRequest) { +} + type GetListingsResponse struct { Listings []Listing `tfsdk:"listings" tf:"optional"` NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *GetListingsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetListingsResponse) { +} + +func (newState *GetListingsResponse) SyncEffectiveFieldsDuringRead(existingState GetListingsResponse) { +} + // Get the personalization request for a listing type GetPersonalizationRequestRequest struct { ListingId types.String `tfsdk:"-"` } +func (newState *GetPersonalizationRequestRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPersonalizationRequestRequest) { +} + +func (newState *GetPersonalizationRequestRequest) SyncEffectiveFieldsDuringRead(existingState GetPersonalizationRequestRequest) { +} + type GetPersonalizationRequestResponse struct { PersonalizationRequests []PersonalizationRequest `tfsdk:"personalization_requests" tf:"optional"` } +func (newState *GetPersonalizationRequestResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPersonalizationRequestResponse) { +} + +func (newState *GetPersonalizationRequestResponse) SyncEffectiveFieldsDuringRead(existingState GetPersonalizationRequestResponse) { +} + // Get a provider type GetProviderRequest struct { Id types.String `tfsdk:"-"` } +func (newState *GetProviderRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetProviderRequest) { +} + +func (newState *GetProviderRequest) SyncEffectiveFieldsDuringRead(existingState GetProviderRequest) { +} + type GetProviderResponse struct { Provider []ProviderInfo `tfsdk:"provider" tf:"optional,object"` } +func (newState *GetProviderResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetProviderResponse) { +} + +func (newState *GetProviderResponse) SyncEffectiveFieldsDuringRead(existingState GetProviderResponse) { +} + type Installation struct { Installation []InstallationDetail `tfsdk:"installation" tf:"optional,object"` } +func (newState *Installation) SyncEffectiveFieldsDuringCreateOrUpdate(plan Installation) { +} + +func (newState *Installation) SyncEffectiveFieldsDuringRead(existingState Installation) { +} + type InstallationDetail struct { CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` @@ -396,6 +726,12 @@ type InstallationDetail struct { Tokens []TokenInfo `tfsdk:"tokens" tf:"optional"` } +func (newState *InstallationDetail) SyncEffectiveFieldsDuringCreateOrUpdate(plan InstallationDetail) { +} + +func (newState *InstallationDetail) SyncEffectiveFieldsDuringRead(existingState InstallationDetail) { +} + // List all installations type ListAllInstallationsRequest struct { PageSize types.Int64 `tfsdk:"-"` @@ -403,12 +739,24 @@ type ListAllInstallationsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListAllInstallationsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAllInstallationsRequest) { +} + +func (newState *ListAllInstallationsRequest) SyncEffectiveFieldsDuringRead(existingState ListAllInstallationsRequest) { +} + type ListAllInstallationsResponse struct { Installations []InstallationDetail `tfsdk:"installations" tf:"optional"` NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListAllInstallationsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAllInstallationsResponse) { +} + +func (newState *ListAllInstallationsResponse) SyncEffectiveFieldsDuringRead(existingState ListAllInstallationsResponse) { +} + // List all personalization requests type ListAllPersonalizationRequestsRequest struct { PageSize types.Int64 `tfsdk:"-"` @@ -416,12 +764,24 @@ type ListAllPersonalizationRequestsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListAllPersonalizationRequestsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAllPersonalizationRequestsRequest) { +} + +func (newState *ListAllPersonalizationRequestsRequest) SyncEffectiveFieldsDuringRead(existingState ListAllPersonalizationRequestsRequest) { +} + type ListAllPersonalizationRequestsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` PersonalizationRequests []PersonalizationRequest `tfsdk:"personalization_requests" tf:"optional"` } +func (newState *ListAllPersonalizationRequestsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAllPersonalizationRequestsResponse) { +} + +func (newState *ListAllPersonalizationRequestsResponse) SyncEffectiveFieldsDuringRead(existingState ListAllPersonalizationRequestsResponse) { +} + // List exchange filters type ListExchangeFiltersRequest struct { ExchangeId types.String `tfsdk:"-"` @@ -431,12 +791,24 @@ type ListExchangeFiltersRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListExchangeFiltersRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListExchangeFiltersRequest) { +} + +func (newState *ListExchangeFiltersRequest) SyncEffectiveFieldsDuringRead(existingState ListExchangeFiltersRequest) { +} + type ListExchangeFiltersResponse struct { Filters []ExchangeFilter `tfsdk:"filters" tf:"optional"` NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListExchangeFiltersResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListExchangeFiltersResponse) { +} + +func (newState *ListExchangeFiltersResponse) SyncEffectiveFieldsDuringRead(existingState ListExchangeFiltersResponse) { +} + // List exchanges for listing type ListExchangesForListingRequest struct { ListingId types.String `tfsdk:"-"` @@ -446,12 +818,24 @@ type ListExchangesForListingRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListExchangesForListingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListExchangesForListingRequest) { +} + +func (newState *ListExchangesForListingRequest) SyncEffectiveFieldsDuringRead(existingState ListExchangesForListingRequest) { +} + type ListExchangesForListingResponse struct { ExchangeListing []ExchangeListing `tfsdk:"exchange_listing" tf:"optional"` NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListExchangesForListingResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListExchangesForListingResponse) { +} + +func (newState *ListExchangesForListingResponse) SyncEffectiveFieldsDuringRead(existingState ListExchangesForListingResponse) { +} + // List exchanges type ListExchangesRequest struct { PageSize types.Int64 `tfsdk:"-"` @@ -459,12 +843,24 @@ type ListExchangesRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListExchangesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListExchangesRequest) { +} + +func (newState *ListExchangesRequest) SyncEffectiveFieldsDuringRead(existingState ListExchangesRequest) { +} + type ListExchangesResponse struct { Exchanges []Exchange `tfsdk:"exchanges" tf:"optional"` NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListExchangesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListExchangesResponse) { +} + +func (newState *ListExchangesResponse) SyncEffectiveFieldsDuringRead(existingState ListExchangesResponse) { +} + // List files type ListFilesRequest struct { FileParent []FileParent `tfsdk:"-"` @@ -474,12 +870,24 @@ type ListFilesRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListFilesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListFilesRequest) { +} + +func (newState *ListFilesRequest) SyncEffectiveFieldsDuringRead(existingState ListFilesRequest) { +} + type ListFilesResponse struct { FileInfos []FileInfo `tfsdk:"file_infos" tf:"optional"` NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListFilesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListFilesResponse) { +} + +func (newState *ListFilesResponse) SyncEffectiveFieldsDuringRead(existingState ListFilesResponse) { +} + // List all listing fulfillments type ListFulfillmentsRequest struct { ListingId types.String `tfsdk:"-"` @@ -489,12 +897,24 @@ type ListFulfillmentsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListFulfillmentsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListFulfillmentsRequest) { +} + +func (newState *ListFulfillmentsRequest) SyncEffectiveFieldsDuringRead(existingState ListFulfillmentsRequest) { +} + type ListFulfillmentsResponse struct { Fulfillments []ListingFulfillment `tfsdk:"fulfillments" tf:"optional"` NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListFulfillmentsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListFulfillmentsResponse) { +} + +func (newState *ListFulfillmentsResponse) SyncEffectiveFieldsDuringRead(existingState ListFulfillmentsResponse) { +} + // List installations for a listing type ListInstallationsRequest struct { ListingId types.String `tfsdk:"-"` @@ -504,12 +924,24 @@ type ListInstallationsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListInstallationsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListInstallationsRequest) { +} + +func (newState *ListInstallationsRequest) SyncEffectiveFieldsDuringRead(existingState ListInstallationsRequest) { +} + type ListInstallationsResponse struct { Installations []InstallationDetail `tfsdk:"installations" tf:"optional"` NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListInstallationsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListInstallationsResponse) { +} + +func (newState *ListInstallationsResponse) SyncEffectiveFieldsDuringRead(existingState ListInstallationsResponse) { +} + // List listings for exchange type ListListingsForExchangeRequest struct { ExchangeId types.String `tfsdk:"-"` @@ -519,12 +951,24 @@ type ListListingsForExchangeRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListListingsForExchangeRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListListingsForExchangeRequest) { +} + +func (newState *ListListingsForExchangeRequest) SyncEffectiveFieldsDuringRead(existingState ListListingsForExchangeRequest) { +} + type ListListingsForExchangeResponse struct { ExchangeListings []ExchangeListing `tfsdk:"exchange_listings" tf:"optional"` NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListListingsForExchangeResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListListingsForExchangeResponse) { +} + +func (newState *ListListingsForExchangeResponse) SyncEffectiveFieldsDuringRead(existingState ListListingsForExchangeResponse) { +} + // List listings type ListListingsRequest struct { // Matches any of the following asset types @@ -547,12 +991,24 @@ type ListListingsRequest struct { Tags []ListingTag `tfsdk:"-"` } +func (newState *ListListingsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListListingsRequest) { +} + +func (newState *ListListingsRequest) SyncEffectiveFieldsDuringRead(existingState ListListingsRequest) { +} + type ListListingsResponse struct { Listings []Listing `tfsdk:"listings" tf:"optional"` NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListListingsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListListingsResponse) { +} + +func (newState *ListListingsResponse) SyncEffectiveFieldsDuringRead(existingState ListListingsResponse) { +} + type ListProviderAnalyticsDashboardResponse struct { // dashboard_id will be used to open Lakeview dashboard. DashboardId types.String `tfsdk:"dashboard_id" tf:""` @@ -562,6 +1018,12 @@ type ListProviderAnalyticsDashboardResponse struct { Version types.Int64 `tfsdk:"version" tf:"optional"` } +func (newState *ListProviderAnalyticsDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListProviderAnalyticsDashboardResponse) { +} + +func (newState *ListProviderAnalyticsDashboardResponse) SyncEffectiveFieldsDuringRead(existingState ListProviderAnalyticsDashboardResponse) { +} + // List providers type ListProvidersRequest struct { IsFeatured types.Bool `tfsdk:"-"` @@ -571,12 +1033,24 @@ type ListProvidersRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListProvidersRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListProvidersRequest) { +} + +func (newState *ListProvidersRequest) SyncEffectiveFieldsDuringRead(existingState ListProvidersRequest) { +} + type ListProvidersResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` Providers []ProviderInfo `tfsdk:"providers" tf:"optional"` } +func (newState *ListProvidersResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListProvidersResponse) { +} + +func (newState *ListProvidersResponse) SyncEffectiveFieldsDuringRead(existingState ListProvidersResponse) { +} + type Listing struct { Detail []ListingDetail `tfsdk:"detail" tf:"optional,object"` @@ -585,6 +1059,12 @@ type Listing struct { Summary []ListingSummary `tfsdk:"summary" tf:"object"` } +func (newState *Listing) SyncEffectiveFieldsDuringCreateOrUpdate(plan Listing) { +} + +func (newState *Listing) SyncEffectiveFieldsDuringRead(existingState Listing) { +} + type ListingDetail struct { // Type of assets included in the listing. eg. GIT_REPO, DATA_TABLE, MODEL, // NOTEBOOK @@ -636,6 +1116,12 @@ type ListingDetail struct { UpdateFrequency []DataRefreshInfo `tfsdk:"update_frequency" tf:"optional,object"` } +func (newState *ListingDetail) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListingDetail) { +} + +func (newState *ListingDetail) SyncEffectiveFieldsDuringRead(existingState ListingDetail) { +} + type ListingFulfillment struct { FulfillmentType types.String `tfsdk:"fulfillment_type" tf:"optional"` @@ -648,10 +1134,22 @@ type ListingFulfillment struct { ShareInfo []ShareInfo `tfsdk:"share_info" tf:"optional,object"` } +func (newState *ListingFulfillment) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListingFulfillment) { +} + +func (newState *ListingFulfillment) SyncEffectiveFieldsDuringRead(existingState ListingFulfillment) { +} + type ListingSetting struct { Visibility types.String `tfsdk:"visibility" tf:"optional"` } +func (newState *ListingSetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListingSetting) { +} + +func (newState *ListingSetting) SyncEffectiveFieldsDuringRead(existingState ListingSetting) { +} + // Next Number: 26 type ListingSummary struct { Categories []types.String `tfsdk:"categories" tf:"optional"` @@ -694,6 +1192,12 @@ type ListingSummary struct { UpdatedById types.Int64 `tfsdk:"updated_by_id" tf:"optional"` } +func (newState *ListingSummary) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListingSummary) { +} + +func (newState *ListingSummary) SyncEffectiveFieldsDuringRead(existingState ListingSummary) { +} + type ListingTag struct { // Tag name (enum) TagName types.String `tfsdk:"tag_name" tf:"optional"` @@ -702,6 +1206,12 @@ type ListingTag struct { TagValues []types.String `tfsdk:"tag_values" tf:"optional"` } +func (newState *ListingTag) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListingTag) { +} + +func (newState *ListingTag) SyncEffectiveFieldsDuringRead(existingState ListingTag) { +} + type PersonalizationRequest struct { Comment types.String `tfsdk:"comment" tf:"optional"` @@ -737,10 +1247,22 @@ type PersonalizationRequest struct { UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` } +func (newState *PersonalizationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan PersonalizationRequest) { +} + +func (newState *PersonalizationRequest) SyncEffectiveFieldsDuringRead(existingState PersonalizationRequest) { +} + type ProviderAnalyticsDashboard struct { Id types.String `tfsdk:"id" tf:""` } +func (newState *ProviderAnalyticsDashboard) SyncEffectiveFieldsDuringCreateOrUpdate(plan ProviderAnalyticsDashboard) { +} + +func (newState *ProviderAnalyticsDashboard) SyncEffectiveFieldsDuringRead(existingState ProviderAnalyticsDashboard) { +} + type ProviderInfo struct { BusinessContactEmail types.String `tfsdk:"business_contact_email" tf:""` @@ -771,25 +1293,55 @@ type ProviderInfo struct { TermOfServiceLink types.String `tfsdk:"term_of_service_link" tf:""` } +func (newState *ProviderInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ProviderInfo) { +} + +func (newState *ProviderInfo) SyncEffectiveFieldsDuringRead(existingState ProviderInfo) { +} + type RegionInfo struct { Cloud types.String `tfsdk:"cloud" tf:"optional"` Region types.String `tfsdk:"region" tf:"optional"` } +func (newState *RegionInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan RegionInfo) { +} + +func (newState *RegionInfo) SyncEffectiveFieldsDuringRead(existingState RegionInfo) { +} + // Remove an exchange for listing type RemoveExchangeForListingRequest struct { Id types.String `tfsdk:"-"` } +func (newState *RemoveExchangeForListingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RemoveExchangeForListingRequest) { +} + +func (newState *RemoveExchangeForListingRequest) SyncEffectiveFieldsDuringRead(existingState RemoveExchangeForListingRequest) { +} + type RemoveExchangeForListingResponse struct { } +func (newState *RemoveExchangeForListingResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RemoveExchangeForListingResponse) { +} + +func (newState *RemoveExchangeForListingResponse) SyncEffectiveFieldsDuringRead(existingState RemoveExchangeForListingResponse) { +} + type RepoInfo struct { // the git repo url e.g. https://github.com/databrickslabs/dolly.git GitRepoUrl types.String `tfsdk:"git_repo_url" tf:""` } +func (newState *RepoInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepoInfo) { +} + +func (newState *RepoInfo) SyncEffectiveFieldsDuringRead(existingState RepoInfo) { +} + type RepoInstallation struct { // the user-specified repo name for their installed git repo listing RepoName types.String `tfsdk:"repo_name" tf:""` @@ -799,6 +1351,12 @@ type RepoInstallation struct { RepoPath types.String `tfsdk:"repo_path" tf:""` } +func (newState *RepoInstallation) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepoInstallation) { +} + +func (newState *RepoInstallation) SyncEffectiveFieldsDuringRead(existingState RepoInstallation) { +} + // Search listings type SearchListingsRequest struct { // Matches any of the following asset types @@ -819,18 +1377,36 @@ type SearchListingsRequest struct { Query types.String `tfsdk:"-"` } +func (newState *SearchListingsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan SearchListingsRequest) { +} + +func (newState *SearchListingsRequest) SyncEffectiveFieldsDuringRead(existingState SearchListingsRequest) { +} + type SearchListingsResponse struct { Listings []Listing `tfsdk:"listings" tf:"optional"` NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *SearchListingsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SearchListingsResponse) { +} + +func (newState *SearchListingsResponse) SyncEffectiveFieldsDuringRead(existingState SearchListingsResponse) { +} + type ShareInfo struct { Name types.String `tfsdk:"name" tf:""` Type types.String `tfsdk:"type" tf:""` } +func (newState *ShareInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ShareInfo) { +} + +func (newState *ShareInfo) SyncEffectiveFieldsDuringRead(existingState ShareInfo) { +} + type SharedDataObject struct { // The type of the data object. Could be one of: TABLE, SCHEMA, // NOTEBOOK_FILE, MODEL, VOLUME @@ -839,6 +1415,12 @@ type SharedDataObject struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *SharedDataObject) SyncEffectiveFieldsDuringCreateOrUpdate(plan SharedDataObject) { +} + +func (newState *SharedDataObject) SyncEffectiveFieldsDuringRead(existingState SharedDataObject) { +} + type TokenDetail struct { BearerToken types.String `tfsdk:"bearerToken" tf:"optional"` @@ -851,6 +1433,12 @@ type TokenDetail struct { ShareCredentialsVersion types.Int64 `tfsdk:"shareCredentialsVersion" tf:"optional"` } +func (newState *TokenDetail) SyncEffectiveFieldsDuringCreateOrUpdate(plan TokenDetail) { +} + +func (newState *TokenDetail) SyncEffectiveFieldsDuringRead(existingState TokenDetail) { +} + type TokenInfo struct { // Full activation url to retrieve the access token. It will be empty if the // token is already retrieved. @@ -869,26 +1457,56 @@ type TokenInfo struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *TokenInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan TokenInfo) { +} + +func (newState *TokenInfo) SyncEffectiveFieldsDuringRead(existingState TokenInfo) { +} + type UpdateExchangeFilterRequest struct { Filter []ExchangeFilter `tfsdk:"filter" tf:"object"` Id types.String `tfsdk:"-"` } +func (newState *UpdateExchangeFilterRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateExchangeFilterRequest) { +} + +func (newState *UpdateExchangeFilterRequest) SyncEffectiveFieldsDuringRead(existingState UpdateExchangeFilterRequest) { +} + type UpdateExchangeFilterResponse struct { Filter []ExchangeFilter `tfsdk:"filter" tf:"optional,object"` } +func (newState *UpdateExchangeFilterResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateExchangeFilterResponse) { +} + +func (newState *UpdateExchangeFilterResponse) SyncEffectiveFieldsDuringRead(existingState UpdateExchangeFilterResponse) { +} + type UpdateExchangeRequest struct { Exchange []Exchange `tfsdk:"exchange" tf:"object"` Id types.String `tfsdk:"-"` } +func (newState *UpdateExchangeRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateExchangeRequest) { +} + +func (newState *UpdateExchangeRequest) SyncEffectiveFieldsDuringRead(existingState UpdateExchangeRequest) { +} + type UpdateExchangeResponse struct { Exchange []Exchange `tfsdk:"exchange" tf:"optional,object"` } +func (newState *UpdateExchangeResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateExchangeResponse) { +} + +func (newState *UpdateExchangeResponse) SyncEffectiveFieldsDuringRead(existingState UpdateExchangeResponse) { +} + type UpdateInstallationRequest struct { Installation []InstallationDetail `tfsdk:"installation" tf:"object"` @@ -899,20 +1517,44 @@ type UpdateInstallationRequest struct { RotateToken types.Bool `tfsdk:"rotate_token" tf:"optional"` } +func (newState *UpdateInstallationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateInstallationRequest) { +} + +func (newState *UpdateInstallationRequest) SyncEffectiveFieldsDuringRead(existingState UpdateInstallationRequest) { +} + type UpdateInstallationResponse struct { Installation []InstallationDetail `tfsdk:"installation" tf:"optional,object"` } +func (newState *UpdateInstallationResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateInstallationResponse) { +} + +func (newState *UpdateInstallationResponse) SyncEffectiveFieldsDuringRead(existingState UpdateInstallationResponse) { +} + type UpdateListingRequest struct { Id types.String `tfsdk:"-"` Listing []Listing `tfsdk:"listing" tf:"object"` } +func (newState *UpdateListingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateListingRequest) { +} + +func (newState *UpdateListingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateListingRequest) { +} + type UpdateListingResponse struct { Listing []Listing `tfsdk:"listing" tf:"optional,object"` } +func (newState *UpdateListingResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateListingResponse) { +} + +func (newState *UpdateListingResponse) SyncEffectiveFieldsDuringRead(existingState UpdateListingResponse) { +} + type UpdatePersonalizationRequestRequest struct { ListingId types.String `tfsdk:"-"` @@ -925,10 +1567,22 @@ type UpdatePersonalizationRequestRequest struct { Status types.String `tfsdk:"status" tf:""` } +func (newState *UpdatePersonalizationRequestRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdatePersonalizationRequestRequest) { +} + +func (newState *UpdatePersonalizationRequestRequest) SyncEffectiveFieldsDuringRead(existingState UpdatePersonalizationRequestRequest) { +} + type UpdatePersonalizationRequestResponse struct { Request []PersonalizationRequest `tfsdk:"request" tf:"optional,object"` } +func (newState *UpdatePersonalizationRequestResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdatePersonalizationRequestResponse) { +} + +func (newState *UpdatePersonalizationRequestResponse) SyncEffectiveFieldsDuringRead(existingState UpdatePersonalizationRequestResponse) { +} + type UpdateProviderAnalyticsDashboardRequest struct { // id is immutable property and can't be updated. Id types.String `tfsdk:"-"` @@ -938,6 +1592,12 @@ type UpdateProviderAnalyticsDashboardRequest struct { Version types.Int64 `tfsdk:"version" tf:"optional"` } +func (newState *UpdateProviderAnalyticsDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateProviderAnalyticsDashboardRequest) { +} + +func (newState *UpdateProviderAnalyticsDashboardRequest) SyncEffectiveFieldsDuringRead(existingState UpdateProviderAnalyticsDashboardRequest) { +} + type UpdateProviderAnalyticsDashboardResponse struct { // this is newly created Lakeview dashboard for the user DashboardId types.String `tfsdk:"dashboard_id" tf:""` @@ -947,12 +1607,30 @@ type UpdateProviderAnalyticsDashboardResponse struct { Version types.Int64 `tfsdk:"version" tf:"optional"` } +func (newState *UpdateProviderAnalyticsDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateProviderAnalyticsDashboardResponse) { +} + +func (newState *UpdateProviderAnalyticsDashboardResponse) SyncEffectiveFieldsDuringRead(existingState UpdateProviderAnalyticsDashboardResponse) { +} + type UpdateProviderRequest struct { Id types.String `tfsdk:"-"` Provider []ProviderInfo `tfsdk:"provider" tf:"object"` } +func (newState *UpdateProviderRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateProviderRequest) { +} + +func (newState *UpdateProviderRequest) SyncEffectiveFieldsDuringRead(existingState UpdateProviderRequest) { +} + type UpdateProviderResponse struct { Provider []ProviderInfo `tfsdk:"provider" tf:"optional,object"` } + +func (newState *UpdateProviderResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateProviderResponse) { +} + +func (newState *UpdateProviderResponse) SyncEffectiveFieldsDuringRead(existingState UpdateProviderResponse) { +} diff --git a/internal/service/ml_tf/model.go b/internal/service/ml_tf/model.go index e3e52c78a5..c978e6a2c4 100755 --- a/internal/service/ml_tf/model.go +++ b/internal/service/ml_tf/model.go @@ -70,6 +70,12 @@ type Activity struct { UserId types.String `tfsdk:"user_id" tf:"optional"` } +func (newState *Activity) SyncEffectiveFieldsDuringCreateOrUpdate(plan Activity) { +} + +func (newState *Activity) SyncEffectiveFieldsDuringRead(existingState Activity) { +} + type ApproveTransitionRequest struct { // Specifies whether to archive all current model versions in the target // stage. @@ -92,11 +98,23 @@ type ApproveTransitionRequest struct { Version types.String `tfsdk:"version" tf:""` } +func (newState *ApproveTransitionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ApproveTransitionRequest) { +} + +func (newState *ApproveTransitionRequest) SyncEffectiveFieldsDuringRead(existingState ApproveTransitionRequest) { +} + type ApproveTransitionRequestResponse struct { // Activity recorded for the action. Activity []Activity `tfsdk:"activity" tf:"optional,object"` } +func (newState *ApproveTransitionRequestResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ApproveTransitionRequestResponse) { +} + +func (newState *ApproveTransitionRequestResponse) SyncEffectiveFieldsDuringRead(existingState ApproveTransitionRequestResponse) { +} + // Comment details. type CommentObject struct { // Array of actions on the activity allowed for the current viewer. @@ -113,6 +131,12 @@ type CommentObject struct { UserId types.String `tfsdk:"user_id" tf:"optional"` } +func (newState *CommentObject) SyncEffectiveFieldsDuringCreateOrUpdate(plan CommentObject) { +} + +func (newState *CommentObject) SyncEffectiveFieldsDuringRead(existingState CommentObject) { +} + type CreateComment struct { // User-provided comment on the action. Comment types.String `tfsdk:"comment" tf:""` @@ -122,11 +146,23 @@ type CreateComment struct { Version types.String `tfsdk:"version" tf:""` } +func (newState *CreateComment) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateComment) { +} + +func (newState *CreateComment) SyncEffectiveFieldsDuringRead(existingState CreateComment) { +} + type CreateCommentResponse struct { // Comment details. Comment []CommentObject `tfsdk:"comment" tf:"optional,object"` } +func (newState *CreateCommentResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCommentResponse) { +} + +func (newState *CreateCommentResponse) SyncEffectiveFieldsDuringRead(existingState CreateCommentResponse) { +} + type CreateExperiment struct { // Location where all artifacts for the experiment are stored. If not // provided, the remote server will select an appropriate default. @@ -141,11 +177,23 @@ type CreateExperiment struct { Tags []ExperimentTag `tfsdk:"tags" tf:"optional"` } +func (newState *CreateExperiment) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateExperiment) { +} + +func (newState *CreateExperiment) SyncEffectiveFieldsDuringRead(existingState CreateExperiment) { +} + type CreateExperimentResponse struct { // Unique identifier for the experiment. ExperimentId types.String `tfsdk:"experiment_id" tf:"optional"` } +func (newState *CreateExperimentResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateExperimentResponse) { +} + +func (newState *CreateExperimentResponse) SyncEffectiveFieldsDuringRead(existingState CreateExperimentResponse) { +} + type CreateModelRequest struct { // Optional description for registered model. Description types.String `tfsdk:"description" tf:"optional"` @@ -155,10 +203,22 @@ type CreateModelRequest struct { Tags []ModelTag `tfsdk:"tags" tf:"optional"` } +func (newState *CreateModelRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateModelRequest) { +} + +func (newState *CreateModelRequest) SyncEffectiveFieldsDuringRead(existingState CreateModelRequest) { +} + type CreateModelResponse struct { RegisteredModel []Model `tfsdk:"registered_model" tf:"optional,object"` } +func (newState *CreateModelResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateModelResponse) { +} + +func (newState *CreateModelResponse) SyncEffectiveFieldsDuringRead(existingState CreateModelResponse) { +} + type CreateModelVersionRequest struct { // Optional description for model version. Description types.String `tfsdk:"description" tf:"optional"` @@ -176,11 +236,23 @@ type CreateModelVersionRequest struct { Tags []ModelVersionTag `tfsdk:"tags" tf:"optional"` } +func (newState *CreateModelVersionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateModelVersionRequest) { +} + +func (newState *CreateModelVersionRequest) SyncEffectiveFieldsDuringRead(existingState CreateModelVersionRequest) { +} + type CreateModelVersionResponse struct { // Return new version number generated for this model in registry. ModelVersion []ModelVersion `tfsdk:"model_version" tf:"optional,object"` } +func (newState *CreateModelVersionResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateModelVersionResponse) { +} + +func (newState *CreateModelVersionResponse) SyncEffectiveFieldsDuringRead(existingState CreateModelVersionResponse) { +} + type CreateRegistryWebhook struct { // User-specified description for the webhook. Description types.String `tfsdk:"description" tf:"optional"` @@ -235,6 +307,12 @@ type CreateRegistryWebhook struct { Status types.String `tfsdk:"status" tf:"optional"` } +func (newState *CreateRegistryWebhook) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateRegistryWebhook) { +} + +func (newState *CreateRegistryWebhook) SyncEffectiveFieldsDuringRead(existingState CreateRegistryWebhook) { +} + type CreateRun struct { // ID of the associated experiment. ExperimentId types.String `tfsdk:"experiment_id" tf:"optional"` @@ -248,11 +326,23 @@ type CreateRun struct { UserId types.String `tfsdk:"user_id" tf:"optional"` } +func (newState *CreateRun) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateRun) { +} + +func (newState *CreateRun) SyncEffectiveFieldsDuringRead(existingState CreateRun) { +} + type CreateRunResponse struct { // The newly created run. Run []Run `tfsdk:"run" tf:"optional,object"` } +func (newState *CreateRunResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateRunResponse) { +} + +func (newState *CreateRunResponse) SyncEffectiveFieldsDuringRead(existingState CreateRunResponse) { +} + type CreateTransitionRequest struct { // User-provided comment on the action. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -272,15 +362,33 @@ type CreateTransitionRequest struct { Version types.String `tfsdk:"version" tf:""` } +func (newState *CreateTransitionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateTransitionRequest) { +} + +func (newState *CreateTransitionRequest) SyncEffectiveFieldsDuringRead(existingState CreateTransitionRequest) { +} + type CreateTransitionRequestResponse struct { // Transition request details. Request []TransitionRequest `tfsdk:"request" tf:"optional,object"` } +func (newState *CreateTransitionRequestResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateTransitionRequestResponse) { +} + +func (newState *CreateTransitionRequestResponse) SyncEffectiveFieldsDuringRead(existingState CreateTransitionRequestResponse) { +} + type CreateWebhookResponse struct { Webhook []RegistryWebhook `tfsdk:"webhook" tf:"optional,object"` } +func (newState *CreateWebhookResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateWebhookResponse) { +} + +func (newState *CreateWebhookResponse) SyncEffectiveFieldsDuringRead(existingState CreateWebhookResponse) { +} + type Dataset struct { // Dataset digest, e.g. an md5 hash of the dataset that uniquely identifies // it within datasets of the same name. @@ -304,6 +412,12 @@ type Dataset struct { SourceType types.String `tfsdk:"source_type" tf:"optional"` } +func (newState *Dataset) SyncEffectiveFieldsDuringCreateOrUpdate(plan Dataset) { +} + +func (newState *Dataset) SyncEffectiveFieldsDuringRead(existingState Dataset) { +} + type DatasetInput struct { // The dataset being used as a Run input. Dataset []Dataset `tfsdk:"dataset" tf:"optional,object"` @@ -312,31 +426,73 @@ type DatasetInput struct { Tags []InputTag `tfsdk:"tags" tf:"optional"` } +func (newState *DatasetInput) SyncEffectiveFieldsDuringCreateOrUpdate(plan DatasetInput) { +} + +func (newState *DatasetInput) SyncEffectiveFieldsDuringRead(existingState DatasetInput) { +} + // Delete a comment type DeleteCommentRequest struct { Id types.String `tfsdk:"-"` } +func (newState *DeleteCommentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCommentRequest) { +} + +func (newState *DeleteCommentRequest) SyncEffectiveFieldsDuringRead(existingState DeleteCommentRequest) { +} + type DeleteCommentResponse struct { } +func (newState *DeleteCommentResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCommentResponse) { +} + +func (newState *DeleteCommentResponse) SyncEffectiveFieldsDuringRead(existingState DeleteCommentResponse) { +} + type DeleteExperiment struct { // ID of the associated experiment. ExperimentId types.String `tfsdk:"experiment_id" tf:""` } +func (newState *DeleteExperiment) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteExperiment) { +} + +func (newState *DeleteExperiment) SyncEffectiveFieldsDuringRead(existingState DeleteExperiment) { +} + type DeleteExperimentResponse struct { } +func (newState *DeleteExperimentResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteExperimentResponse) { +} + +func (newState *DeleteExperimentResponse) SyncEffectiveFieldsDuringRead(existingState DeleteExperimentResponse) { +} + // Delete a model type DeleteModelRequest struct { // Registered model unique name identifier. Name types.String `tfsdk:"-"` } +func (newState *DeleteModelRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteModelRequest) { +} + +func (newState *DeleteModelRequest) SyncEffectiveFieldsDuringRead(existingState DeleteModelRequest) { +} + type DeleteModelResponse struct { } +func (newState *DeleteModelResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteModelResponse) { +} + +func (newState *DeleteModelResponse) SyncEffectiveFieldsDuringRead(existingState DeleteModelResponse) { +} + // Delete a model tag type DeleteModelTagRequest struct { // Name of the tag. The name must be an exact match; wild-card deletion is @@ -346,9 +502,21 @@ type DeleteModelTagRequest struct { Name types.String `tfsdk:"-"` } +func (newState *DeleteModelTagRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteModelTagRequest) { +} + +func (newState *DeleteModelTagRequest) SyncEffectiveFieldsDuringRead(existingState DeleteModelTagRequest) { +} + type DeleteModelTagResponse struct { } +func (newState *DeleteModelTagResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteModelTagResponse) { +} + +func (newState *DeleteModelTagResponse) SyncEffectiveFieldsDuringRead(existingState DeleteModelTagResponse) { +} + // Delete a model version. type DeleteModelVersionRequest struct { // Name of the registered model @@ -357,9 +525,21 @@ type DeleteModelVersionRequest struct { Version types.String `tfsdk:"-"` } +func (newState *DeleteModelVersionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteModelVersionRequest) { +} + +func (newState *DeleteModelVersionRequest) SyncEffectiveFieldsDuringRead(existingState DeleteModelVersionRequest) { +} + type DeleteModelVersionResponse struct { } +func (newState *DeleteModelVersionResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteModelVersionResponse) { +} + +func (newState *DeleteModelVersionResponse) SyncEffectiveFieldsDuringRead(existingState DeleteModelVersionResponse) { +} + // Delete a model version tag type DeleteModelVersionTagRequest struct { // Name of the tag. The name must be an exact match; wild-card deletion is @@ -371,17 +551,41 @@ type DeleteModelVersionTagRequest struct { Version types.String `tfsdk:"-"` } +func (newState *DeleteModelVersionTagRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteModelVersionTagRequest) { +} + +func (newState *DeleteModelVersionTagRequest) SyncEffectiveFieldsDuringRead(existingState DeleteModelVersionTagRequest) { +} + type DeleteModelVersionTagResponse struct { } +func (newState *DeleteModelVersionTagResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteModelVersionTagResponse) { +} + +func (newState *DeleteModelVersionTagResponse) SyncEffectiveFieldsDuringRead(existingState DeleteModelVersionTagResponse) { +} + type DeleteRun struct { // ID of the run to delete. RunId types.String `tfsdk:"run_id" tf:""` } +func (newState *DeleteRun) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteRun) { +} + +func (newState *DeleteRun) SyncEffectiveFieldsDuringRead(existingState DeleteRun) { +} + type DeleteRunResponse struct { } +func (newState *DeleteRunResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteRunResponse) { +} + +func (newState *DeleteRunResponse) SyncEffectiveFieldsDuringRead(existingState DeleteRunResponse) { +} + type DeleteRuns struct { // The ID of the experiment containing the runs to delete. ExperimentId types.String `tfsdk:"experiment_id" tf:""` @@ -394,11 +598,23 @@ type DeleteRuns struct { MaxTimestampMillis types.Int64 `tfsdk:"max_timestamp_millis" tf:""` } +func (newState *DeleteRuns) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteRuns) { +} + +func (newState *DeleteRuns) SyncEffectiveFieldsDuringRead(existingState DeleteRuns) { +} + type DeleteRunsResponse struct { // The number of runs deleted. RunsDeleted types.Int64 `tfsdk:"runs_deleted" tf:"optional"` } +func (newState *DeleteRunsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteRunsResponse) { +} + +func (newState *DeleteRunsResponse) SyncEffectiveFieldsDuringRead(existingState DeleteRunsResponse) { +} + type DeleteTag struct { // Name of the tag. Maximum size is 255 bytes. Must be provided. Key types.String `tfsdk:"key" tf:""` @@ -406,9 +622,21 @@ type DeleteTag struct { RunId types.String `tfsdk:"run_id" tf:""` } +func (newState *DeleteTag) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteTag) { +} + +func (newState *DeleteTag) SyncEffectiveFieldsDuringRead(existingState DeleteTag) { +} + type DeleteTagResponse struct { } +func (newState *DeleteTagResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteTagResponse) { +} + +func (newState *DeleteTagResponse) SyncEffectiveFieldsDuringRead(existingState DeleteTagResponse) { +} + // Delete a transition request type DeleteTransitionRequestRequest struct { // User-provided comment on the action. @@ -433,18 +661,42 @@ type DeleteTransitionRequestRequest struct { Version types.String `tfsdk:"-"` } +func (newState *DeleteTransitionRequestRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteTransitionRequestRequest) { +} + +func (newState *DeleteTransitionRequestRequest) SyncEffectiveFieldsDuringRead(existingState DeleteTransitionRequestRequest) { +} + type DeleteTransitionRequestResponse struct { } +func (newState *DeleteTransitionRequestResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteTransitionRequestResponse) { +} + +func (newState *DeleteTransitionRequestResponse) SyncEffectiveFieldsDuringRead(existingState DeleteTransitionRequestResponse) { +} + // Delete a webhook type DeleteWebhookRequest struct { // Webhook ID required to delete a registry webhook. Id types.String `tfsdk:"-"` } +func (newState *DeleteWebhookRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteWebhookRequest) { +} + +func (newState *DeleteWebhookRequest) SyncEffectiveFieldsDuringRead(existingState DeleteWebhookRequest) { +} + type DeleteWebhookResponse struct { } +func (newState *DeleteWebhookResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteWebhookResponse) { +} + +func (newState *DeleteWebhookResponse) SyncEffectiveFieldsDuringRead(existingState DeleteWebhookResponse) { +} + type Experiment struct { // Location where artifacts for the experiment are stored. ArtifactLocation types.String `tfsdk:"artifact_location" tf:"optional"` @@ -463,6 +715,12 @@ type Experiment struct { Tags []ExperimentTag `tfsdk:"tags" tf:"optional"` } +func (newState *Experiment) SyncEffectiveFieldsDuringCreateOrUpdate(plan Experiment) { +} + +func (newState *Experiment) SyncEffectiveFieldsDuringRead(existingState Experiment) { +} + type ExperimentAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -474,6 +732,12 @@ type ExperimentAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *ExperimentAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExperimentAccessControlRequest) { +} + +func (newState *ExperimentAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState ExperimentAccessControlRequest) { +} + type ExperimentAccessControlResponse struct { // All permissions. AllPermissions []ExperimentPermission `tfsdk:"all_permissions" tf:"optional"` @@ -487,6 +751,12 @@ type ExperimentAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *ExperimentAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExperimentAccessControlResponse) { +} + +func (newState *ExperimentAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState ExperimentAccessControlResponse) { +} + type ExperimentPermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -495,6 +765,12 @@ type ExperimentPermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *ExperimentPermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExperimentPermission) { +} + +func (newState *ExperimentPermission) SyncEffectiveFieldsDuringRead(existingState ExperimentPermission) { +} + type ExperimentPermissions struct { AccessControlList []ExperimentAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -503,18 +779,36 @@ type ExperimentPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *ExperimentPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExperimentPermissions) { +} + +func (newState *ExperimentPermissions) SyncEffectiveFieldsDuringRead(existingState ExperimentPermissions) { +} + type ExperimentPermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *ExperimentPermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExperimentPermissionsDescription) { +} + +func (newState *ExperimentPermissionsDescription) SyncEffectiveFieldsDuringRead(existingState ExperimentPermissionsDescription) { +} + type ExperimentPermissionsRequest struct { AccessControlList []ExperimentAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The experiment for which to get or manage permissions. ExperimentId types.String `tfsdk:"-"` } +func (newState *ExperimentPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExperimentPermissionsRequest) { +} + +func (newState *ExperimentPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState ExperimentPermissionsRequest) { +} + type ExperimentTag struct { // The tag key. Key types.String `tfsdk:"key" tf:"optional"` @@ -522,6 +816,12 @@ type ExperimentTag struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *ExperimentTag) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExperimentTag) { +} + +func (newState *ExperimentTag) SyncEffectiveFieldsDuringRead(existingState ExperimentTag) { +} + type FileInfo struct { // Size in bytes. Unset for directories. FileSize types.Int64 `tfsdk:"file_size" tf:"optional"` @@ -531,40 +831,82 @@ type FileInfo struct { Path types.String `tfsdk:"path" tf:"optional"` } +func (newState *FileInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan FileInfo) { +} + +func (newState *FileInfo) SyncEffectiveFieldsDuringRead(existingState FileInfo) { +} + // Get metadata type GetByNameRequest struct { // Name of the associated experiment. ExperimentName types.String `tfsdk:"-"` } +func (newState *GetByNameRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetByNameRequest) { +} + +func (newState *GetByNameRequest) SyncEffectiveFieldsDuringRead(existingState GetByNameRequest) { +} + // Get experiment permission levels type GetExperimentPermissionLevelsRequest struct { // The experiment for which to get or manage permissions. ExperimentId types.String `tfsdk:"-"` } +func (newState *GetExperimentPermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetExperimentPermissionLevelsRequest) { +} + +func (newState *GetExperimentPermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetExperimentPermissionLevelsRequest) { +} + type GetExperimentPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []ExperimentPermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetExperimentPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetExperimentPermissionLevelsResponse) { +} + +func (newState *GetExperimentPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetExperimentPermissionLevelsResponse) { +} + // Get experiment permissions type GetExperimentPermissionsRequest struct { // The experiment for which to get or manage permissions. ExperimentId types.String `tfsdk:"-"` } +func (newState *GetExperimentPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetExperimentPermissionsRequest) { +} + +func (newState *GetExperimentPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState GetExperimentPermissionsRequest) { +} + // Get an experiment type GetExperimentRequest struct { // ID of the associated experiment. ExperimentId types.String `tfsdk:"-"` } +func (newState *GetExperimentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetExperimentRequest) { +} + +func (newState *GetExperimentRequest) SyncEffectiveFieldsDuringRead(existingState GetExperimentRequest) { +} + type GetExperimentResponse struct { // Experiment details. Experiment []Experiment `tfsdk:"experiment" tf:"optional,object"` } +func (newState *GetExperimentResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetExperimentResponse) { +} + +func (newState *GetExperimentResponse) SyncEffectiveFieldsDuringRead(existingState GetExperimentResponse) { +} + // Get history of a given metric within a run type GetHistoryRequest struct { // Maximum number of Metric records to return per paginated request. Default @@ -582,6 +924,12 @@ type GetHistoryRequest struct { RunUuid types.String `tfsdk:"-"` } +func (newState *GetHistoryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetHistoryRequest) { +} + +func (newState *GetHistoryRequest) SyncEffectiveFieldsDuringRead(existingState GetHistoryRequest) { +} + type GetLatestVersionsRequest struct { // Registered model unique name identifier. Name types.String `tfsdk:"name" tf:""` @@ -589,6 +937,12 @@ type GetLatestVersionsRequest struct { Stages []types.String `tfsdk:"stages" tf:"optional"` } +func (newState *GetLatestVersionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetLatestVersionsRequest) { +} + +func (newState *GetLatestVersionsRequest) SyncEffectiveFieldsDuringRead(existingState GetLatestVersionsRequest) { +} + type GetLatestVersionsResponse struct { // Latest version models for each requests stage. Only return models with // current `READY` status. If no `stages` provided, returns the latest @@ -596,6 +950,12 @@ type GetLatestVersionsResponse struct { ModelVersions []ModelVersion `tfsdk:"model_versions" tf:"optional"` } +func (newState *GetLatestVersionsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetLatestVersionsResponse) { +} + +func (newState *GetLatestVersionsResponse) SyncEffectiveFieldsDuringRead(existingState GetLatestVersionsResponse) { +} + type GetMetricHistoryResponse struct { // All logged values for this metric. Metrics []Metric `tfsdk:"metrics" tf:"optional"` @@ -604,16 +964,34 @@ type GetMetricHistoryResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *GetMetricHistoryResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetMetricHistoryResponse) { +} + +func (newState *GetMetricHistoryResponse) SyncEffectiveFieldsDuringRead(existingState GetMetricHistoryResponse) { +} + // Get model type GetModelRequest struct { // Registered model unique name identifier. Name types.String `tfsdk:"-"` } +func (newState *GetModelRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetModelRequest) { +} + +func (newState *GetModelRequest) SyncEffectiveFieldsDuringRead(existingState GetModelRequest) { +} + type GetModelResponse struct { RegisteredModelDatabricks []ModelDatabricks `tfsdk:"registered_model_databricks" tf:"optional,object"` } +func (newState *GetModelResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetModelResponse) { +} + +func (newState *GetModelResponse) SyncEffectiveFieldsDuringRead(existingState GetModelResponse) { +} + // Get a model version URI type GetModelVersionDownloadUriRequest struct { // Name of the registered model @@ -622,11 +1000,23 @@ type GetModelVersionDownloadUriRequest struct { Version types.String `tfsdk:"-"` } +func (newState *GetModelVersionDownloadUriRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetModelVersionDownloadUriRequest) { +} + +func (newState *GetModelVersionDownloadUriRequest) SyncEffectiveFieldsDuringRead(existingState GetModelVersionDownloadUriRequest) { +} + type GetModelVersionDownloadUriResponse struct { // URI corresponding to where artifacts for this model version are stored. ArtifactUri types.String `tfsdk:"artifact_uri" tf:"optional"` } +func (newState *GetModelVersionDownloadUriResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetModelVersionDownloadUriResponse) { +} + +func (newState *GetModelVersionDownloadUriResponse) SyncEffectiveFieldsDuringRead(existingState GetModelVersionDownloadUriResponse) { +} + // Get a model version type GetModelVersionRequest struct { // Name of the registered model @@ -635,27 +1025,57 @@ type GetModelVersionRequest struct { Version types.String `tfsdk:"-"` } +func (newState *GetModelVersionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetModelVersionRequest) { +} + +func (newState *GetModelVersionRequest) SyncEffectiveFieldsDuringRead(existingState GetModelVersionRequest) { +} + type GetModelVersionResponse struct { ModelVersion []ModelVersion `tfsdk:"model_version" tf:"optional,object"` } +func (newState *GetModelVersionResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetModelVersionResponse) { +} + +func (newState *GetModelVersionResponse) SyncEffectiveFieldsDuringRead(existingState GetModelVersionResponse) { +} + // Get registered model permission levels type GetRegisteredModelPermissionLevelsRequest struct { // The registered model for which to get or manage permissions. RegisteredModelId types.String `tfsdk:"-"` } +func (newState *GetRegisteredModelPermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRegisteredModelPermissionLevelsRequest) { +} + +func (newState *GetRegisteredModelPermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetRegisteredModelPermissionLevelsRequest) { +} + type GetRegisteredModelPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []RegisteredModelPermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetRegisteredModelPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRegisteredModelPermissionLevelsResponse) { +} + +func (newState *GetRegisteredModelPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetRegisteredModelPermissionLevelsResponse) { +} + // Get registered model permissions type GetRegisteredModelPermissionsRequest struct { // The registered model for which to get or manage permissions. RegisteredModelId types.String `tfsdk:"-"` } +func (newState *GetRegisteredModelPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRegisteredModelPermissionsRequest) { +} + +func (newState *GetRegisteredModelPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState GetRegisteredModelPermissionsRequest) { +} + // Get a run type GetRunRequest struct { // ID of the run to fetch. Must be provided. @@ -665,12 +1085,24 @@ type GetRunRequest struct { RunUuid types.String `tfsdk:"-"` } +func (newState *GetRunRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRunRequest) { +} + +func (newState *GetRunRequest) SyncEffectiveFieldsDuringRead(existingState GetRunRequest) { +} + type GetRunResponse struct { // Run metadata (name, start time, etc) and data (metrics, params, and // tags). Run []Run `tfsdk:"run" tf:"optional,object"` } +func (newState *GetRunResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRunResponse) { +} + +func (newState *GetRunResponse) SyncEffectiveFieldsDuringRead(existingState GetRunResponse) { +} + type HttpUrlSpec struct { // Value of the authorization header that should be sent in the request sent // by the wehbook. It should be of the form `" "`. @@ -693,6 +1125,12 @@ type HttpUrlSpec struct { Url types.String `tfsdk:"url" tf:""` } +func (newState *HttpUrlSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan HttpUrlSpec) { +} + +func (newState *HttpUrlSpec) SyncEffectiveFieldsDuringRead(existingState HttpUrlSpec) { +} + type HttpUrlSpecWithoutSecret struct { // Enable/disable SSL certificate validation. Default is true. For // self-signed certificates, this field must be false AND the destination @@ -706,6 +1144,12 @@ type HttpUrlSpecWithoutSecret struct { Url types.String `tfsdk:"url" tf:"optional"` } +func (newState *HttpUrlSpecWithoutSecret) SyncEffectiveFieldsDuringCreateOrUpdate(plan HttpUrlSpecWithoutSecret) { +} + +func (newState *HttpUrlSpecWithoutSecret) SyncEffectiveFieldsDuringRead(existingState HttpUrlSpecWithoutSecret) { +} + type InputTag struct { // The tag key. Key types.String `tfsdk:"key" tf:"optional"` @@ -713,6 +1157,12 @@ type InputTag struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *InputTag) SyncEffectiveFieldsDuringCreateOrUpdate(plan InputTag) { +} + +func (newState *InputTag) SyncEffectiveFieldsDuringRead(existingState InputTag) { +} + type JobSpec struct { // The personal access token used to authorize webhook's job runs. AccessToken types.String `tfsdk:"access_token" tf:""` @@ -724,6 +1174,12 @@ type JobSpec struct { WorkspaceUrl types.String `tfsdk:"workspace_url" tf:"optional"` } +func (newState *JobSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobSpec) { +} + +func (newState *JobSpec) SyncEffectiveFieldsDuringRead(existingState JobSpec) { +} + type JobSpecWithoutSecret struct { // ID of the job that the webhook runs. JobId types.String `tfsdk:"job_id" tf:"optional"` @@ -733,6 +1189,12 @@ type JobSpecWithoutSecret struct { WorkspaceUrl types.String `tfsdk:"workspace_url" tf:"optional"` } +func (newState *JobSpecWithoutSecret) SyncEffectiveFieldsDuringCreateOrUpdate(plan JobSpecWithoutSecret) { +} + +func (newState *JobSpecWithoutSecret) SyncEffectiveFieldsDuringRead(existingState JobSpecWithoutSecret) { +} + // Get all artifacts type ListArtifactsRequest struct { // Token indicating the page of artifact results to fetch. `page_token` is @@ -752,6 +1214,12 @@ type ListArtifactsRequest struct { RunUuid types.String `tfsdk:"-"` } +func (newState *ListArtifactsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListArtifactsRequest) { +} + +func (newState *ListArtifactsRequest) SyncEffectiveFieldsDuringRead(existingState ListArtifactsRequest) { +} + type ListArtifactsResponse struct { // File location and metadata for artifacts. Files []FileInfo `tfsdk:"files" tf:"optional"` @@ -761,6 +1229,12 @@ type ListArtifactsResponse struct { RootUri types.String `tfsdk:"root_uri" tf:"optional"` } +func (newState *ListArtifactsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListArtifactsResponse) { +} + +func (newState *ListArtifactsResponse) SyncEffectiveFieldsDuringRead(existingState ListArtifactsResponse) { +} + // List experiments type ListExperimentsRequest struct { // Maximum number of experiments desired. If `max_results` is unspecified, @@ -776,6 +1250,12 @@ type ListExperimentsRequest struct { ViewType types.String `tfsdk:"-"` } +func (newState *ListExperimentsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListExperimentsRequest) { +} + +func (newState *ListExperimentsRequest) SyncEffectiveFieldsDuringRead(existingState ListExperimentsRequest) { +} + type ListExperimentsResponse struct { // Paginated Experiments beginning with the first item on the requested // page. @@ -785,6 +1265,12 @@ type ListExperimentsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListExperimentsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListExperimentsResponse) { +} + +func (newState *ListExperimentsResponse) SyncEffectiveFieldsDuringRead(existingState ListExperimentsResponse) { +} + // List models type ListModelsRequest struct { // Maximum number of registered models desired. Max threshold is 1000. @@ -793,6 +1279,12 @@ type ListModelsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListModelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListModelsRequest) { +} + +func (newState *ListModelsRequest) SyncEffectiveFieldsDuringRead(existingState ListModelsRequest) { +} + type ListModelsResponse struct { // Pagination token to request next page of models for the same query. NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` @@ -800,6 +1292,12 @@ type ListModelsResponse struct { RegisteredModels []Model `tfsdk:"registered_models" tf:"optional"` } +func (newState *ListModelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListModelsResponse) { +} + +func (newState *ListModelsResponse) SyncEffectiveFieldsDuringRead(existingState ListModelsResponse) { +} + type ListRegistryWebhooks struct { // Token that can be used to retrieve the next page of artifact results NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` @@ -807,6 +1305,12 @@ type ListRegistryWebhooks struct { Webhooks []RegistryWebhook `tfsdk:"webhooks" tf:"optional"` } +func (newState *ListRegistryWebhooks) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListRegistryWebhooks) { +} + +func (newState *ListRegistryWebhooks) SyncEffectiveFieldsDuringRead(existingState ListRegistryWebhooks) { +} + // List transition requests type ListTransitionRequestsRequest struct { // Name of the model. @@ -815,11 +1319,23 @@ type ListTransitionRequestsRequest struct { Version types.String `tfsdk:"-"` } +func (newState *ListTransitionRequestsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListTransitionRequestsRequest) { +} + +func (newState *ListTransitionRequestsRequest) SyncEffectiveFieldsDuringRead(existingState ListTransitionRequestsRequest) { +} + type ListTransitionRequestsResponse struct { // Array of open transition requests. Requests []Activity `tfsdk:"requests" tf:"optional"` } +func (newState *ListTransitionRequestsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListTransitionRequestsResponse) { +} + +func (newState *ListTransitionRequestsResponse) SyncEffectiveFieldsDuringRead(existingState ListTransitionRequestsResponse) { +} + // List registry webhooks type ListWebhooksRequest struct { // If `events` is specified, any webhook with one or more of the specified @@ -833,6 +1349,12 @@ type ListWebhooksRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListWebhooksRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListWebhooksRequest) { +} + +func (newState *ListWebhooksRequest) SyncEffectiveFieldsDuringRead(existingState ListWebhooksRequest) { +} + type LogBatch struct { // Metrics to log. A single request can contain up to 1000 metrics, and up // to 1000 metrics, params, and tags in total. @@ -847,9 +1369,21 @@ type LogBatch struct { Tags []RunTag `tfsdk:"tags" tf:"optional"` } +func (newState *LogBatch) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogBatch) { +} + +func (newState *LogBatch) SyncEffectiveFieldsDuringRead(existingState LogBatch) { +} + type LogBatchResponse struct { } +func (newState *LogBatchResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogBatchResponse) { +} + +func (newState *LogBatchResponse) SyncEffectiveFieldsDuringRead(existingState LogBatchResponse) { +} + type LogInputs struct { // Dataset inputs Datasets []DatasetInput `tfsdk:"datasets" tf:"optional"` @@ -857,9 +1391,21 @@ type LogInputs struct { RunId types.String `tfsdk:"run_id" tf:"optional"` } +func (newState *LogInputs) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogInputs) { +} + +func (newState *LogInputs) SyncEffectiveFieldsDuringRead(existingState LogInputs) { +} + type LogInputsResponse struct { } +func (newState *LogInputsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogInputsResponse) { +} + +func (newState *LogInputsResponse) SyncEffectiveFieldsDuringRead(existingState LogInputsResponse) { +} + type LogMetric struct { // Name of the metric. Key types.String `tfsdk:"key" tf:""` @@ -876,9 +1422,21 @@ type LogMetric struct { Value types.Float64 `tfsdk:"value" tf:""` } +func (newState *LogMetric) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogMetric) { +} + +func (newState *LogMetric) SyncEffectiveFieldsDuringRead(existingState LogMetric) { +} + type LogMetricResponse struct { } +func (newState *LogMetricResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogMetricResponse) { +} + +func (newState *LogMetricResponse) SyncEffectiveFieldsDuringRead(existingState LogMetricResponse) { +} + type LogModel struct { // MLmodel file in json format. ModelJson types.String `tfsdk:"model_json" tf:"optional"` @@ -886,9 +1444,21 @@ type LogModel struct { RunId types.String `tfsdk:"run_id" tf:"optional"` } +func (newState *LogModel) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogModel) { +} + +func (newState *LogModel) SyncEffectiveFieldsDuringRead(existingState LogModel) { +} + type LogModelResponse struct { } +func (newState *LogModelResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogModelResponse) { +} + +func (newState *LogModelResponse) SyncEffectiveFieldsDuringRead(existingState LogModelResponse) { +} + type LogParam struct { // Name of the param. Maximum size is 255 bytes. Key types.String `tfsdk:"key" tf:""` @@ -901,9 +1471,21 @@ type LogParam struct { Value types.String `tfsdk:"value" tf:""` } +func (newState *LogParam) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogParam) { +} + +func (newState *LogParam) SyncEffectiveFieldsDuringRead(existingState LogParam) { +} + type LogParamResponse struct { } +func (newState *LogParamResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogParamResponse) { +} + +func (newState *LogParamResponse) SyncEffectiveFieldsDuringRead(existingState LogParamResponse) { +} + type Metric struct { // Key identifying this metric. Key types.String `tfsdk:"key" tf:"optional"` @@ -915,6 +1497,12 @@ type Metric struct { Value types.Float64 `tfsdk:"value" tf:"optional"` } +func (newState *Metric) SyncEffectiveFieldsDuringCreateOrUpdate(plan Metric) { +} + +func (newState *Metric) SyncEffectiveFieldsDuringRead(existingState Metric) { +} + type Model struct { // Timestamp recorded when this `registered_model` was created. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` @@ -934,6 +1522,12 @@ type Model struct { UserId types.String `tfsdk:"user_id" tf:"optional"` } +func (newState *Model) SyncEffectiveFieldsDuringCreateOrUpdate(plan Model) { +} + +func (newState *Model) SyncEffectiveFieldsDuringRead(existingState Model) { +} + type ModelDatabricks struct { // Creation time of the object, as a Unix timestamp in milliseconds. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` @@ -956,6 +1550,12 @@ type ModelDatabricks struct { UserId types.String `tfsdk:"user_id" tf:"optional"` } +func (newState *ModelDatabricks) SyncEffectiveFieldsDuringCreateOrUpdate(plan ModelDatabricks) { +} + +func (newState *ModelDatabricks) SyncEffectiveFieldsDuringRead(existingState ModelDatabricks) { +} + type ModelTag struct { // The tag key. Key types.String `tfsdk:"key" tf:"optional"` @@ -963,6 +1563,12 @@ type ModelTag struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *ModelTag) SyncEffectiveFieldsDuringCreateOrUpdate(plan ModelTag) { +} + +func (newState *ModelTag) SyncEffectiveFieldsDuringRead(existingState ModelTag) { +} + type ModelVersion struct { // Timestamp recorded when this `model_version` was created. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` @@ -995,6 +1601,12 @@ type ModelVersion struct { Version types.String `tfsdk:"version" tf:"optional"` } +func (newState *ModelVersion) SyncEffectiveFieldsDuringCreateOrUpdate(plan ModelVersion) { +} + +func (newState *ModelVersion) SyncEffectiveFieldsDuringRead(existingState ModelVersion) { +} + type ModelVersionDatabricks struct { // Creation time of the object, as a Unix timestamp in milliseconds. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` @@ -1046,6 +1658,12 @@ type ModelVersionDatabricks struct { Version types.String `tfsdk:"version" tf:"optional"` } +func (newState *ModelVersionDatabricks) SyncEffectiveFieldsDuringCreateOrUpdate(plan ModelVersionDatabricks) { +} + +func (newState *ModelVersionDatabricks) SyncEffectiveFieldsDuringRead(existingState ModelVersionDatabricks) { +} + type ModelVersionTag struct { // The tag key. Key types.String `tfsdk:"key" tf:"optional"` @@ -1053,6 +1671,12 @@ type ModelVersionTag struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *ModelVersionTag) SyncEffectiveFieldsDuringCreateOrUpdate(plan ModelVersionTag) { +} + +func (newState *ModelVersionTag) SyncEffectiveFieldsDuringRead(existingState ModelVersionTag) { +} + type Param struct { // Key identifying this param. Key types.String `tfsdk:"key" tf:"optional"` @@ -1060,6 +1684,12 @@ type Param struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *Param) SyncEffectiveFieldsDuringCreateOrUpdate(plan Param) { +} + +func (newState *Param) SyncEffectiveFieldsDuringRead(existingState Param) { +} + type RegisteredModelAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -1071,6 +1701,12 @@ type RegisteredModelAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *RegisteredModelAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RegisteredModelAccessControlRequest) { +} + +func (newState *RegisteredModelAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState RegisteredModelAccessControlRequest) { +} + type RegisteredModelAccessControlResponse struct { // All permissions. AllPermissions []RegisteredModelPermission `tfsdk:"all_permissions" tf:"optional"` @@ -1084,6 +1720,12 @@ type RegisteredModelAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *RegisteredModelAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RegisteredModelAccessControlResponse) { +} + +func (newState *RegisteredModelAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState RegisteredModelAccessControlResponse) { +} + type RegisteredModelPermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -1092,6 +1734,12 @@ type RegisteredModelPermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *RegisteredModelPermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan RegisteredModelPermission) { +} + +func (newState *RegisteredModelPermission) SyncEffectiveFieldsDuringRead(existingState RegisteredModelPermission) { +} + type RegisteredModelPermissions struct { AccessControlList []RegisteredModelAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -1100,18 +1748,36 @@ type RegisteredModelPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *RegisteredModelPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan RegisteredModelPermissions) { +} + +func (newState *RegisteredModelPermissions) SyncEffectiveFieldsDuringRead(existingState RegisteredModelPermissions) { +} + type RegisteredModelPermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *RegisteredModelPermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan RegisteredModelPermissionsDescription) { +} + +func (newState *RegisteredModelPermissionsDescription) SyncEffectiveFieldsDuringRead(existingState RegisteredModelPermissionsDescription) { +} + type RegisteredModelPermissionsRequest struct { AccessControlList []RegisteredModelAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The registered model for which to get or manage permissions. RegisteredModelId types.String `tfsdk:"-"` } +func (newState *RegisteredModelPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RegisteredModelPermissionsRequest) { +} + +func (newState *RegisteredModelPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState RegisteredModelPermissionsRequest) { +} + type RegistryWebhook struct { // Creation time of the object, as a Unix timestamp in milliseconds. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` @@ -1172,6 +1838,12 @@ type RegistryWebhook struct { Status types.String `tfsdk:"status" tf:"optional"` } +func (newState *RegistryWebhook) SyncEffectiveFieldsDuringCreateOrUpdate(plan RegistryWebhook) { +} + +func (newState *RegistryWebhook) SyncEffectiveFieldsDuringRead(existingState RegistryWebhook) { +} + type RejectTransitionRequest struct { // User-provided comment on the action. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -1191,11 +1863,23 @@ type RejectTransitionRequest struct { Version types.String `tfsdk:"version" tf:""` } +func (newState *RejectTransitionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RejectTransitionRequest) { +} + +func (newState *RejectTransitionRequest) SyncEffectiveFieldsDuringRead(existingState RejectTransitionRequest) { +} + type RejectTransitionRequestResponse struct { // Activity recorded for the action. Activity []Activity `tfsdk:"activity" tf:"optional,object"` } +func (newState *RejectTransitionRequestResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RejectTransitionRequestResponse) { +} + +func (newState *RejectTransitionRequestResponse) SyncEffectiveFieldsDuringRead(existingState RejectTransitionRequestResponse) { +} + type RenameModelRequest struct { // Registered model unique name identifier. Name types.String `tfsdk:"name" tf:""` @@ -1203,26 +1887,62 @@ type RenameModelRequest struct { NewName types.String `tfsdk:"new_name" tf:"optional"` } +func (newState *RenameModelRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RenameModelRequest) { +} + +func (newState *RenameModelRequest) SyncEffectiveFieldsDuringRead(existingState RenameModelRequest) { +} + type RenameModelResponse struct { RegisteredModel []Model `tfsdk:"registered_model" tf:"optional,object"` } +func (newState *RenameModelResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RenameModelResponse) { +} + +func (newState *RenameModelResponse) SyncEffectiveFieldsDuringRead(existingState RenameModelResponse) { +} + type RestoreExperiment struct { // ID of the associated experiment. ExperimentId types.String `tfsdk:"experiment_id" tf:""` } +func (newState *RestoreExperiment) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestoreExperiment) { +} + +func (newState *RestoreExperiment) SyncEffectiveFieldsDuringRead(existingState RestoreExperiment) { +} + type RestoreExperimentResponse struct { } +func (newState *RestoreExperimentResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestoreExperimentResponse) { +} + +func (newState *RestoreExperimentResponse) SyncEffectiveFieldsDuringRead(existingState RestoreExperimentResponse) { +} + type RestoreRun struct { // ID of the run to restore. RunId types.String `tfsdk:"run_id" tf:""` } +func (newState *RestoreRun) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestoreRun) { +} + +func (newState *RestoreRun) SyncEffectiveFieldsDuringRead(existingState RestoreRun) { +} + type RestoreRunResponse struct { } +func (newState *RestoreRunResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestoreRunResponse) { +} + +func (newState *RestoreRunResponse) SyncEffectiveFieldsDuringRead(existingState RestoreRunResponse) { +} + type RestoreRuns struct { // The ID of the experiment containing the runs to restore. ExperimentId types.String `tfsdk:"experiment_id" tf:""` @@ -1235,11 +1955,23 @@ type RestoreRuns struct { MinTimestampMillis types.Int64 `tfsdk:"min_timestamp_millis" tf:""` } +func (newState *RestoreRuns) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestoreRuns) { +} + +func (newState *RestoreRuns) SyncEffectiveFieldsDuringRead(existingState RestoreRuns) { +} + type RestoreRunsResponse struct { // The number of runs restored. RunsRestored types.Int64 `tfsdk:"runs_restored" tf:"optional"` } +func (newState *RestoreRunsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestoreRunsResponse) { +} + +func (newState *RestoreRunsResponse) SyncEffectiveFieldsDuringRead(existingState RestoreRunsResponse) { +} + type Run struct { // Run data. Data []RunData `tfsdk:"data" tf:"optional,object"` @@ -1249,6 +1981,12 @@ type Run struct { Inputs []RunInputs `tfsdk:"inputs" tf:"optional,object"` } +func (newState *Run) SyncEffectiveFieldsDuringCreateOrUpdate(plan Run) { +} + +func (newState *Run) SyncEffectiveFieldsDuringRead(existingState Run) { +} + type RunData struct { // Run metrics. Metrics []Metric `tfsdk:"metrics" tf:"optional"` @@ -1258,6 +1996,12 @@ type RunData struct { Tags []RunTag `tfsdk:"tags" tf:"optional"` } +func (newState *RunData) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunData) { +} + +func (newState *RunData) SyncEffectiveFieldsDuringRead(existingState RunData) { +} + type RunInfo struct { // URI of the directory where artifacts should be uploaded. This can be a // local path (starting with "/"), or a distributed file system (DFS) path, @@ -1285,11 +2029,23 @@ type RunInfo struct { UserId types.String `tfsdk:"user_id" tf:"optional"` } +func (newState *RunInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunInfo) { +} + +func (newState *RunInfo) SyncEffectiveFieldsDuringRead(existingState RunInfo) { +} + type RunInputs struct { // Run metrics. DatasetInputs []DatasetInput `tfsdk:"dataset_inputs" tf:"optional"` } +func (newState *RunInputs) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunInputs) { +} + +func (newState *RunInputs) SyncEffectiveFieldsDuringRead(existingState RunInputs) { +} + type RunTag struct { // The tag key. Key types.String `tfsdk:"key" tf:"optional"` @@ -1297,6 +2053,12 @@ type RunTag struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *RunTag) SyncEffectiveFieldsDuringCreateOrUpdate(plan RunTag) { +} + +func (newState *RunTag) SyncEffectiveFieldsDuringRead(existingState RunTag) { +} + type SearchExperiments struct { // String representing a SQL filter condition (e.g. "name ILIKE // 'my-experiment%'") @@ -1315,6 +2077,12 @@ type SearchExperiments struct { ViewType types.String `tfsdk:"view_type" tf:"optional"` } +func (newState *SearchExperiments) SyncEffectiveFieldsDuringCreateOrUpdate(plan SearchExperiments) { +} + +func (newState *SearchExperiments) SyncEffectiveFieldsDuringRead(existingState SearchExperiments) { +} + type SearchExperimentsResponse struct { // Experiments that match the search criteria Experiments []Experiment `tfsdk:"experiments" tf:"optional"` @@ -1323,6 +2091,12 @@ type SearchExperimentsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *SearchExperimentsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SearchExperimentsResponse) { +} + +func (newState *SearchExperimentsResponse) SyncEffectiveFieldsDuringRead(existingState SearchExperimentsResponse) { +} + // Searches model versions type SearchModelVersionsRequest struct { // String filter condition, like "name='my-model-name'". Must be a single @@ -1339,6 +2113,12 @@ type SearchModelVersionsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *SearchModelVersionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan SearchModelVersionsRequest) { +} + +func (newState *SearchModelVersionsRequest) SyncEffectiveFieldsDuringRead(existingState SearchModelVersionsRequest) { +} + type SearchModelVersionsResponse struct { // Models that match the search criteria ModelVersions []ModelVersion `tfsdk:"model_versions" tf:"optional"` @@ -1347,6 +2127,12 @@ type SearchModelVersionsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *SearchModelVersionsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SearchModelVersionsResponse) { +} + +func (newState *SearchModelVersionsResponse) SyncEffectiveFieldsDuringRead(existingState SearchModelVersionsResponse) { +} + // Search models type SearchModelsRequest struct { // String filter condition, like "name LIKE 'my-model-name'". Interpreted in @@ -1363,6 +2149,12 @@ type SearchModelsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *SearchModelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan SearchModelsRequest) { +} + +func (newState *SearchModelsRequest) SyncEffectiveFieldsDuringRead(existingState SearchModelsRequest) { +} + type SearchModelsResponse struct { // Pagination token to request the next page of models. NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` @@ -1370,6 +2162,12 @@ type SearchModelsResponse struct { RegisteredModels []Model `tfsdk:"registered_models" tf:"optional"` } +func (newState *SearchModelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SearchModelsResponse) { +} + +func (newState *SearchModelsResponse) SyncEffectiveFieldsDuringRead(existingState SearchModelsResponse) { +} + type SearchRuns struct { // List of experiment IDs to search over. ExperimentIds []types.String `tfsdk:"experiment_ids" tf:"optional"` @@ -1402,6 +2200,12 @@ type SearchRuns struct { RunViewType types.String `tfsdk:"run_view_type" tf:"optional"` } +func (newState *SearchRuns) SyncEffectiveFieldsDuringCreateOrUpdate(plan SearchRuns) { +} + +func (newState *SearchRuns) SyncEffectiveFieldsDuringRead(existingState SearchRuns) { +} + type SearchRunsResponse struct { // Token for the next page of runs. NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` @@ -1409,6 +2213,12 @@ type SearchRunsResponse struct { Runs []Run `tfsdk:"runs" tf:"optional"` } +func (newState *SearchRunsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SearchRunsResponse) { +} + +func (newState *SearchRunsResponse) SyncEffectiveFieldsDuringRead(existingState SearchRunsResponse) { +} + type SetExperimentTag struct { // ID of the experiment under which to log the tag. Must be provided. ExperimentId types.String `tfsdk:"experiment_id" tf:""` @@ -1421,9 +2231,21 @@ type SetExperimentTag struct { Value types.String `tfsdk:"value" tf:""` } +func (newState *SetExperimentTag) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetExperimentTag) { +} + +func (newState *SetExperimentTag) SyncEffectiveFieldsDuringRead(existingState SetExperimentTag) { +} + type SetExperimentTagResponse struct { } +func (newState *SetExperimentTagResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetExperimentTagResponse) { +} + +func (newState *SetExperimentTagResponse) SyncEffectiveFieldsDuringRead(existingState SetExperimentTagResponse) { +} + type SetModelTagRequest struct { // Name of the tag. Maximum size depends on storage backend. If a tag with // this name already exists, its preexisting value will be replaced by the @@ -1438,9 +2260,21 @@ type SetModelTagRequest struct { Value types.String `tfsdk:"value" tf:""` } +func (newState *SetModelTagRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetModelTagRequest) { +} + +func (newState *SetModelTagRequest) SyncEffectiveFieldsDuringRead(existingState SetModelTagRequest) { +} + type SetModelTagResponse struct { } +func (newState *SetModelTagResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetModelTagResponse) { +} + +func (newState *SetModelTagResponse) SyncEffectiveFieldsDuringRead(existingState SetModelTagResponse) { +} + type SetModelVersionTagRequest struct { // Name of the tag. Maximum size depends on storage backend. If a tag with // this name already exists, its preexisting value will be replaced by the @@ -1457,9 +2291,21 @@ type SetModelVersionTagRequest struct { Version types.String `tfsdk:"version" tf:""` } +func (newState *SetModelVersionTagRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetModelVersionTagRequest) { +} + +func (newState *SetModelVersionTagRequest) SyncEffectiveFieldsDuringRead(existingState SetModelVersionTagRequest) { +} + type SetModelVersionTagResponse struct { } +func (newState *SetModelVersionTagResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetModelVersionTagResponse) { +} + +func (newState *SetModelVersionTagResponse) SyncEffectiveFieldsDuringRead(existingState SetModelVersionTagResponse) { +} + type SetTag struct { // Name of the tag. Maximum size depends on storage backend. All storage // backends are guaranteed to support key values up to 250 bytes in size. @@ -1475,9 +2321,21 @@ type SetTag struct { Value types.String `tfsdk:"value" tf:""` } +func (newState *SetTag) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetTag) { +} + +func (newState *SetTag) SyncEffectiveFieldsDuringRead(existingState SetTag) { +} + type SetTagResponse struct { } +func (newState *SetTagResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetTagResponse) { +} + +func (newState *SetTagResponse) SyncEffectiveFieldsDuringRead(existingState SetTagResponse) { +} + // Test webhook response object. type TestRegistryWebhook struct { // Body of the response from the webhook URL @@ -1486,6 +2344,12 @@ type TestRegistryWebhook struct { StatusCode types.Int64 `tfsdk:"status_code" tf:"optional"` } +func (newState *TestRegistryWebhook) SyncEffectiveFieldsDuringCreateOrUpdate(plan TestRegistryWebhook) { +} + +func (newState *TestRegistryWebhook) SyncEffectiveFieldsDuringRead(existingState TestRegistryWebhook) { +} + type TestRegistryWebhookRequest struct { // If `event` is specified, the test trigger uses the specified event. If // `event` is not specified, the test trigger uses a randomly chosen event @@ -1495,11 +2359,23 @@ type TestRegistryWebhookRequest struct { Id types.String `tfsdk:"id" tf:""` } +func (newState *TestRegistryWebhookRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan TestRegistryWebhookRequest) { +} + +func (newState *TestRegistryWebhookRequest) SyncEffectiveFieldsDuringRead(existingState TestRegistryWebhookRequest) { +} + type TestRegistryWebhookResponse struct { // Test webhook response object. Webhook []TestRegistryWebhook `tfsdk:"webhook" tf:"optional,object"` } +func (newState *TestRegistryWebhookResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan TestRegistryWebhookResponse) { +} + +func (newState *TestRegistryWebhookResponse) SyncEffectiveFieldsDuringRead(existingState TestRegistryWebhookResponse) { +} + type TransitionModelVersionStageDatabricks struct { // Specifies whether to archive all current model versions in the target // stage. @@ -1522,6 +2398,12 @@ type TransitionModelVersionStageDatabricks struct { Version types.String `tfsdk:"version" tf:""` } +func (newState *TransitionModelVersionStageDatabricks) SyncEffectiveFieldsDuringCreateOrUpdate(plan TransitionModelVersionStageDatabricks) { +} + +func (newState *TransitionModelVersionStageDatabricks) SyncEffectiveFieldsDuringRead(existingState TransitionModelVersionStageDatabricks) { +} + // Transition request details. type TransitionRequest struct { // Array of actions on the activity allowed for the current viewer. @@ -1545,10 +2427,22 @@ type TransitionRequest struct { UserId types.String `tfsdk:"user_id" tf:"optional"` } +func (newState *TransitionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan TransitionRequest) { +} + +func (newState *TransitionRequest) SyncEffectiveFieldsDuringRead(existingState TransitionRequest) { +} + type TransitionStageResponse struct { ModelVersion []ModelVersionDatabricks `tfsdk:"model_version" tf:"optional,object"` } +func (newState *TransitionStageResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan TransitionStageResponse) { +} + +func (newState *TransitionStageResponse) SyncEffectiveFieldsDuringRead(existingState TransitionStageResponse) { +} + type UpdateComment struct { // User-provided comment on the action. Comment types.String `tfsdk:"comment" tf:""` @@ -1556,11 +2450,23 @@ type UpdateComment struct { Id types.String `tfsdk:"id" tf:""` } +func (newState *UpdateComment) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateComment) { +} + +func (newState *UpdateComment) SyncEffectiveFieldsDuringRead(existingState UpdateComment) { +} + type UpdateCommentResponse struct { // Comment details. Comment []CommentObject `tfsdk:"comment" tf:"optional,object"` } +func (newState *UpdateCommentResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCommentResponse) { +} + +func (newState *UpdateCommentResponse) SyncEffectiveFieldsDuringRead(existingState UpdateCommentResponse) { +} + type UpdateExperiment struct { // ID of the associated experiment. ExperimentId types.String `tfsdk:"experiment_id" tf:""` @@ -1569,9 +2475,21 @@ type UpdateExperiment struct { NewName types.String `tfsdk:"new_name" tf:"optional"` } +func (newState *UpdateExperiment) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateExperiment) { +} + +func (newState *UpdateExperiment) SyncEffectiveFieldsDuringRead(existingState UpdateExperiment) { +} + type UpdateExperimentResponse struct { } +func (newState *UpdateExperimentResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateExperimentResponse) { +} + +func (newState *UpdateExperimentResponse) SyncEffectiveFieldsDuringRead(existingState UpdateExperimentResponse) { +} + type UpdateModelRequest struct { // If provided, updates the description for this `registered_model`. Description types.String `tfsdk:"description" tf:"optional"` @@ -1579,9 +2497,21 @@ type UpdateModelRequest struct { Name types.String `tfsdk:"name" tf:""` } +func (newState *UpdateModelRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateModelRequest) { +} + +func (newState *UpdateModelRequest) SyncEffectiveFieldsDuringRead(existingState UpdateModelRequest) { +} + type UpdateModelResponse struct { } +func (newState *UpdateModelResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateModelResponse) { +} + +func (newState *UpdateModelResponse) SyncEffectiveFieldsDuringRead(existingState UpdateModelResponse) { +} + type UpdateModelVersionRequest struct { // If provided, updates the description for this `registered_model`. Description types.String `tfsdk:"description" tf:"optional"` @@ -1591,9 +2521,21 @@ type UpdateModelVersionRequest struct { Version types.String `tfsdk:"version" tf:""` } +func (newState *UpdateModelVersionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateModelVersionRequest) { +} + +func (newState *UpdateModelVersionRequest) SyncEffectiveFieldsDuringRead(existingState UpdateModelVersionRequest) { +} + type UpdateModelVersionResponse struct { } +func (newState *UpdateModelVersionResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateModelVersionResponse) { +} + +func (newState *UpdateModelVersionResponse) SyncEffectiveFieldsDuringRead(existingState UpdateModelVersionResponse) { +} + type UpdateRegistryWebhook struct { // User-specified description for the webhook. Description types.String `tfsdk:"description" tf:"optional"` @@ -1648,6 +2590,12 @@ type UpdateRegistryWebhook struct { Status types.String `tfsdk:"status" tf:"optional"` } +func (newState *UpdateRegistryWebhook) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateRegistryWebhook) { +} + +func (newState *UpdateRegistryWebhook) SyncEffectiveFieldsDuringRead(existingState UpdateRegistryWebhook) { +} + type UpdateRun struct { // Unix timestamp in milliseconds of when the run ended. EndTime types.Int64 `tfsdk:"end_time" tf:"optional"` @@ -1660,10 +2608,28 @@ type UpdateRun struct { Status types.String `tfsdk:"status" tf:"optional"` } +func (newState *UpdateRun) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateRun) { +} + +func (newState *UpdateRun) SyncEffectiveFieldsDuringRead(existingState UpdateRun) { +} + type UpdateRunResponse struct { // Updated metadata of the run. RunInfo []RunInfo `tfsdk:"run_info" tf:"optional,object"` } +func (newState *UpdateRunResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateRunResponse) { +} + +func (newState *UpdateRunResponse) SyncEffectiveFieldsDuringRead(existingState UpdateRunResponse) { +} + type UpdateWebhookResponse struct { } + +func (newState *UpdateWebhookResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateWebhookResponse) { +} + +func (newState *UpdateWebhookResponse) SyncEffectiveFieldsDuringRead(existingState UpdateWebhookResponse) { +} diff --git a/internal/service/oauth2_tf/model.go b/internal/service/oauth2_tf/model.go index f7959bfcf5..e738e7f094 100755 --- a/internal/service/oauth2_tf/model.go +++ b/internal/service/oauth2_tf/model.go @@ -29,6 +29,12 @@ type CreateCustomAppIntegration struct { TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } +func (newState *CreateCustomAppIntegration) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCustomAppIntegration) { +} + +func (newState *CreateCustomAppIntegration) SyncEffectiveFieldsDuringRead(existingState CreateCustomAppIntegration) { +} + type CreateCustomAppIntegrationOutput struct { // OAuth client-id generated by the Databricks ClientId types.String `tfsdk:"client_id" tf:"optional"` @@ -39,6 +45,12 @@ type CreateCustomAppIntegrationOutput struct { IntegrationId types.String `tfsdk:"integration_id" tf:"optional"` } +func (newState *CreateCustomAppIntegrationOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCustomAppIntegrationOutput) { +} + +func (newState *CreateCustomAppIntegrationOutput) SyncEffectiveFieldsDuringRead(existingState CreateCustomAppIntegrationOutput) { +} + type CreatePublishedAppIntegration struct { // App id of the OAuth published app integration. For example power-bi, // tableau-deskop @@ -47,17 +59,35 @@ type CreatePublishedAppIntegration struct { TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } +func (newState *CreatePublishedAppIntegration) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreatePublishedAppIntegration) { +} + +func (newState *CreatePublishedAppIntegration) SyncEffectiveFieldsDuringRead(existingState CreatePublishedAppIntegration) { +} + type CreatePublishedAppIntegrationOutput struct { // Unique integration id for the published OAuth app IntegrationId types.String `tfsdk:"integration_id" tf:"optional"` } +func (newState *CreatePublishedAppIntegrationOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreatePublishedAppIntegrationOutput) { +} + +func (newState *CreatePublishedAppIntegrationOutput) SyncEffectiveFieldsDuringRead(existingState CreatePublishedAppIntegrationOutput) { +} + // Create service principal secret type CreateServicePrincipalSecretRequest struct { // The service principal ID. ServicePrincipalId types.Int64 `tfsdk:"-"` } +func (newState *CreateServicePrincipalSecretRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateServicePrincipalSecretRequest) { +} + +func (newState *CreateServicePrincipalSecretRequest) SyncEffectiveFieldsDuringRead(existingState CreateServicePrincipalSecretRequest) { +} + type CreateServicePrincipalSecretResponse struct { // UTC time when the secret was created CreateTime types.String `tfsdk:"create_time" tf:"optional"` @@ -73,6 +103,12 @@ type CreateServicePrincipalSecretResponse struct { UpdateTime types.String `tfsdk:"update_time" tf:"optional"` } +func (newState *CreateServicePrincipalSecretResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateServicePrincipalSecretResponse) { +} + +func (newState *CreateServicePrincipalSecretResponse) SyncEffectiveFieldsDuringRead(existingState CreateServicePrincipalSecretResponse) { +} + type DataPlaneInfo struct { // Authorization details as a string. AuthorizationDetails types.String `tfsdk:"authorization_details" tf:"optional"` @@ -80,25 +116,61 @@ type DataPlaneInfo struct { EndpointUrl types.String `tfsdk:"endpoint_url" tf:"optional"` } +func (newState *DataPlaneInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan DataPlaneInfo) { +} + +func (newState *DataPlaneInfo) SyncEffectiveFieldsDuringRead(existingState DataPlaneInfo) { +} + type DeleteCustomAppIntegrationOutput struct { } +func (newState *DeleteCustomAppIntegrationOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCustomAppIntegrationOutput) { +} + +func (newState *DeleteCustomAppIntegrationOutput) SyncEffectiveFieldsDuringRead(existingState DeleteCustomAppIntegrationOutput) { +} + // Delete Custom OAuth App Integration type DeleteCustomAppIntegrationRequest struct { IntegrationId types.String `tfsdk:"-"` } +func (newState *DeleteCustomAppIntegrationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCustomAppIntegrationRequest) { +} + +func (newState *DeleteCustomAppIntegrationRequest) SyncEffectiveFieldsDuringRead(existingState DeleteCustomAppIntegrationRequest) { +} + type DeletePublishedAppIntegrationOutput struct { } +func (newState *DeletePublishedAppIntegrationOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeletePublishedAppIntegrationOutput) { +} + +func (newState *DeletePublishedAppIntegrationOutput) SyncEffectiveFieldsDuringRead(existingState DeletePublishedAppIntegrationOutput) { +} + // Delete Published OAuth App Integration type DeletePublishedAppIntegrationRequest struct { IntegrationId types.String `tfsdk:"-"` } +func (newState *DeletePublishedAppIntegrationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeletePublishedAppIntegrationRequest) { +} + +func (newState *DeletePublishedAppIntegrationRequest) SyncEffectiveFieldsDuringRead(existingState DeletePublishedAppIntegrationRequest) { +} + type DeleteResponse struct { } +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + // Delete service principal secret type DeleteServicePrincipalSecretRequest struct { // The secret ID. @@ -107,6 +179,12 @@ type DeleteServicePrincipalSecretRequest struct { ServicePrincipalId types.Int64 `tfsdk:"-"` } +func (newState *DeleteServicePrincipalSecretRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteServicePrincipalSecretRequest) { +} + +func (newState *DeleteServicePrincipalSecretRequest) SyncEffectiveFieldsDuringRead(existingState DeleteServicePrincipalSecretRequest) { +} + type GetCustomAppIntegrationOutput struct { // The client id of the custom OAuth app ClientId types.String `tfsdk:"client_id" tf:"optional"` @@ -131,11 +209,23 @@ type GetCustomAppIntegrationOutput struct { TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } +func (newState *GetCustomAppIntegrationOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCustomAppIntegrationOutput) { +} + +func (newState *GetCustomAppIntegrationOutput) SyncEffectiveFieldsDuringRead(existingState GetCustomAppIntegrationOutput) { +} + // Get OAuth Custom App Integration type GetCustomAppIntegrationRequest struct { IntegrationId types.String `tfsdk:"-"` } +func (newState *GetCustomAppIntegrationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCustomAppIntegrationRequest) { +} + +func (newState *GetCustomAppIntegrationRequest) SyncEffectiveFieldsDuringRead(existingState GetCustomAppIntegrationRequest) { +} + type GetCustomAppIntegrationsOutput struct { // List of Custom OAuth App Integrations defined for the account. Apps []GetCustomAppIntegrationOutput `tfsdk:"apps" tf:"optional"` @@ -143,6 +233,12 @@ type GetCustomAppIntegrationsOutput struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *GetCustomAppIntegrationsOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCustomAppIntegrationsOutput) { +} + +func (newState *GetCustomAppIntegrationsOutput) SyncEffectiveFieldsDuringRead(existingState GetCustomAppIntegrationsOutput) { +} + type GetPublishedAppIntegrationOutput struct { // App-id of the published app integration AppId types.String `tfsdk:"app_id" tf:"optional"` @@ -158,11 +254,23 @@ type GetPublishedAppIntegrationOutput struct { TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } +func (newState *GetPublishedAppIntegrationOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPublishedAppIntegrationOutput) { +} + +func (newState *GetPublishedAppIntegrationOutput) SyncEffectiveFieldsDuringRead(existingState GetPublishedAppIntegrationOutput) { +} + // Get OAuth Published App Integration type GetPublishedAppIntegrationRequest struct { IntegrationId types.String `tfsdk:"-"` } +func (newState *GetPublishedAppIntegrationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPublishedAppIntegrationRequest) { +} + +func (newState *GetPublishedAppIntegrationRequest) SyncEffectiveFieldsDuringRead(existingState GetPublishedAppIntegrationRequest) { +} + type GetPublishedAppIntegrationsOutput struct { // List of Published OAuth App Integrations defined for the account. Apps []GetPublishedAppIntegrationOutput `tfsdk:"apps" tf:"optional"` @@ -170,6 +278,12 @@ type GetPublishedAppIntegrationsOutput struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *GetPublishedAppIntegrationsOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPublishedAppIntegrationsOutput) { +} + +func (newState *GetPublishedAppIntegrationsOutput) SyncEffectiveFieldsDuringRead(existingState GetPublishedAppIntegrationsOutput) { +} + type GetPublishedAppsOutput struct { // List of Published OAuth Apps. Apps []PublishedAppOutput `tfsdk:"apps" tf:"optional"` @@ -178,6 +292,12 @@ type GetPublishedAppsOutput struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *GetPublishedAppsOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPublishedAppsOutput) { +} + +func (newState *GetPublishedAppsOutput) SyncEffectiveFieldsDuringRead(existingState GetPublishedAppsOutput) { +} + // Get custom oauth app integrations type ListCustomAppIntegrationsRequest struct { IncludeCreatorUsername types.Bool `tfsdk:"-"` @@ -187,6 +307,12 @@ type ListCustomAppIntegrationsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListCustomAppIntegrationsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCustomAppIntegrationsRequest) { +} + +func (newState *ListCustomAppIntegrationsRequest) SyncEffectiveFieldsDuringRead(existingState ListCustomAppIntegrationsRequest) { +} + // Get all the published OAuth apps type ListOAuthPublishedAppsRequest struct { // The max number of OAuth published apps to return in one page. @@ -195,6 +321,12 @@ type ListOAuthPublishedAppsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListOAuthPublishedAppsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListOAuthPublishedAppsRequest) { +} + +func (newState *ListOAuthPublishedAppsRequest) SyncEffectiveFieldsDuringRead(existingState ListOAuthPublishedAppsRequest) { +} + // Get published oauth app integrations type ListPublishedAppIntegrationsRequest struct { PageSize types.Int64 `tfsdk:"-"` @@ -202,17 +334,35 @@ type ListPublishedAppIntegrationsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListPublishedAppIntegrationsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListPublishedAppIntegrationsRequest) { +} + +func (newState *ListPublishedAppIntegrationsRequest) SyncEffectiveFieldsDuringRead(existingState ListPublishedAppIntegrationsRequest) { +} + // List service principal secrets type ListServicePrincipalSecretsRequest struct { // The service principal ID. ServicePrincipalId types.Int64 `tfsdk:"-"` } +func (newState *ListServicePrincipalSecretsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListServicePrincipalSecretsRequest) { +} + +func (newState *ListServicePrincipalSecretsRequest) SyncEffectiveFieldsDuringRead(existingState ListServicePrincipalSecretsRequest) { +} + type ListServicePrincipalSecretsResponse struct { // List of the secrets Secrets []SecretInfo `tfsdk:"secrets" tf:"optional"` } +func (newState *ListServicePrincipalSecretsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListServicePrincipalSecretsResponse) { +} + +func (newState *ListServicePrincipalSecretsResponse) SyncEffectiveFieldsDuringRead(existingState ListServicePrincipalSecretsResponse) { +} + type PublishedAppOutput struct { // Unique ID of the published OAuth app. AppId types.String `tfsdk:"app_id" tf:"optional"` @@ -232,6 +382,12 @@ type PublishedAppOutput struct { Scopes []types.String `tfsdk:"scopes" tf:"optional"` } +func (newState *PublishedAppOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan PublishedAppOutput) { +} + +func (newState *PublishedAppOutput) SyncEffectiveFieldsDuringRead(existingState PublishedAppOutput) { +} + type SecretInfo struct { // UTC time when the secret was created CreateTime types.String `tfsdk:"create_time" tf:"optional"` @@ -245,6 +401,12 @@ type SecretInfo struct { UpdateTime types.String `tfsdk:"update_time" tf:"optional"` } +func (newState *SecretInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan SecretInfo) { +} + +func (newState *SecretInfo) SyncEffectiveFieldsDuringRead(existingState SecretInfo) { +} + type TokenAccessPolicy struct { // access token time to live in minutes AccessTokenTtlInMinutes types.Int64 `tfsdk:"access_token_ttl_in_minutes" tf:"optional"` @@ -252,6 +414,12 @@ type TokenAccessPolicy struct { RefreshTokenTtlInMinutes types.Int64 `tfsdk:"refresh_token_ttl_in_minutes" tf:"optional"` } +func (newState *TokenAccessPolicy) SyncEffectiveFieldsDuringCreateOrUpdate(plan TokenAccessPolicy) { +} + +func (newState *TokenAccessPolicy) SyncEffectiveFieldsDuringRead(existingState TokenAccessPolicy) { +} + type UpdateCustomAppIntegration struct { IntegrationId types.String `tfsdk:"-"` // List of OAuth redirect urls to be updated in the custom OAuth app @@ -261,14 +429,38 @@ type UpdateCustomAppIntegration struct { TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } +func (newState *UpdateCustomAppIntegration) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCustomAppIntegration) { +} + +func (newState *UpdateCustomAppIntegration) SyncEffectiveFieldsDuringRead(existingState UpdateCustomAppIntegration) { +} + type UpdateCustomAppIntegrationOutput struct { } +func (newState *UpdateCustomAppIntegrationOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCustomAppIntegrationOutput) { +} + +func (newState *UpdateCustomAppIntegrationOutput) SyncEffectiveFieldsDuringRead(existingState UpdateCustomAppIntegrationOutput) { +} + type UpdatePublishedAppIntegration struct { IntegrationId types.String `tfsdk:"-"` // Token access policy to be updated in the published OAuth app integration TokenAccessPolicy []TokenAccessPolicy `tfsdk:"token_access_policy" tf:"optional,object"` } +func (newState *UpdatePublishedAppIntegration) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdatePublishedAppIntegration) { +} + +func (newState *UpdatePublishedAppIntegration) SyncEffectiveFieldsDuringRead(existingState UpdatePublishedAppIntegration) { +} + type UpdatePublishedAppIntegrationOutput struct { } + +func (newState *UpdatePublishedAppIntegrationOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdatePublishedAppIntegrationOutput) { +} + +func (newState *UpdatePublishedAppIntegrationOutput) SyncEffectiveFieldsDuringRead(existingState UpdatePublishedAppIntegrationOutput) { +} diff --git a/internal/service/pipelines_tf/model.go b/internal/service/pipelines_tf/model.go index c4ad05458b..8adcfa0bfa 100755 --- a/internal/service/pipelines_tf/model.go +++ b/internal/service/pipelines_tf/model.go @@ -76,6 +76,12 @@ type CreatePipeline struct { Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional,object"` } +func (newState *CreatePipeline) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreatePipeline) { +} + +func (newState *CreatePipeline) SyncEffectiveFieldsDuringRead(existingState CreatePipeline) { +} + type CreatePipelineResponse struct { // Only returned when dry_run is true. EffectiveSettings []PipelineSpec `tfsdk:"effective_settings" tf:"optional,object"` @@ -84,12 +90,24 @@ type CreatePipelineResponse struct { PipelineId types.String `tfsdk:"pipeline_id" tf:"optional"` } +func (newState *CreatePipelineResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreatePipelineResponse) { +} + +func (newState *CreatePipelineResponse) SyncEffectiveFieldsDuringRead(existingState CreatePipelineResponse) { +} + type CronTrigger struct { QuartzCronSchedule types.String `tfsdk:"quartz_cron_schedule" tf:"optional"` TimezoneId types.String `tfsdk:"timezone_id" tf:"optional"` } +func (newState *CronTrigger) SyncEffectiveFieldsDuringCreateOrUpdate(plan CronTrigger) { +} + +func (newState *CronTrigger) SyncEffectiveFieldsDuringRead(existingState CronTrigger) { +} + type DataPlaneId struct { // The instance name of the data plane emitting an event. Instance types.String `tfsdk:"instance" tf:"optional"` @@ -97,14 +115,32 @@ type DataPlaneId struct { SeqNo types.Int64 `tfsdk:"seq_no" tf:"optional"` } +func (newState *DataPlaneId) SyncEffectiveFieldsDuringCreateOrUpdate(plan DataPlaneId) { +} + +func (newState *DataPlaneId) SyncEffectiveFieldsDuringRead(existingState DataPlaneId) { +} + // Delete a pipeline type DeletePipelineRequest struct { PipelineId types.String `tfsdk:"-"` } +func (newState *DeletePipelineRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeletePipelineRequest) { +} + +func (newState *DeletePipelineRequest) SyncEffectiveFieldsDuringRead(existingState DeletePipelineRequest) { +} + type DeletePipelineResponse struct { } +func (newState *DeletePipelineResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeletePipelineResponse) { +} + +func (newState *DeletePipelineResponse) SyncEffectiveFieldsDuringRead(existingState DeletePipelineResponse) { +} + type EditPipeline struct { // If false, deployment will fail if name has changed and conflicts the name // of another pipeline. @@ -170,9 +206,21 @@ type EditPipeline struct { Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional,object"` } +func (newState *EditPipeline) SyncEffectiveFieldsDuringCreateOrUpdate(plan EditPipeline) { +} + +func (newState *EditPipeline) SyncEffectiveFieldsDuringRead(existingState EditPipeline) { +} + type EditPipelineResponse struct { } +func (newState *EditPipelineResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan EditPipelineResponse) { +} + +func (newState *EditPipelineResponse) SyncEffectiveFieldsDuringRead(existingState EditPipelineResponse) { +} + type ErrorDetail struct { // The exception thrown for this error, with its chain of cause. Exceptions []SerializedException `tfsdk:"exceptions" tf:"optional"` @@ -180,11 +228,23 @@ type ErrorDetail struct { Fatal types.Bool `tfsdk:"fatal" tf:"optional"` } +func (newState *ErrorDetail) SyncEffectiveFieldsDuringCreateOrUpdate(plan ErrorDetail) { +} + +func (newState *ErrorDetail) SyncEffectiveFieldsDuringRead(existingState ErrorDetail) { +} + type FileLibrary struct { // The absolute path of the file. Path types.String `tfsdk:"path" tf:"optional"` } +func (newState *FileLibrary) SyncEffectiveFieldsDuringCreateOrUpdate(plan FileLibrary) { +} + +func (newState *FileLibrary) SyncEffectiveFieldsDuringRead(existingState FileLibrary) { +} + type Filters struct { // Paths to exclude. Exclude []types.String `tfsdk:"exclude" tf:"optional"` @@ -192,28 +252,58 @@ type Filters struct { Include []types.String `tfsdk:"include" tf:"optional"` } +func (newState *Filters) SyncEffectiveFieldsDuringCreateOrUpdate(plan Filters) { +} + +func (newState *Filters) SyncEffectiveFieldsDuringRead(existingState Filters) { +} + // Get pipeline permission levels type GetPipelinePermissionLevelsRequest struct { // The pipeline for which to get or manage permissions. PipelineId types.String `tfsdk:"-"` } +func (newState *GetPipelinePermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPipelinePermissionLevelsRequest) { +} + +func (newState *GetPipelinePermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetPipelinePermissionLevelsRequest) { +} + type GetPipelinePermissionLevelsResponse struct { // Specific permission levels PermissionLevels []PipelinePermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetPipelinePermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPipelinePermissionLevelsResponse) { +} + +func (newState *GetPipelinePermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetPipelinePermissionLevelsResponse) { +} + // Get pipeline permissions type GetPipelinePermissionsRequest struct { // The pipeline for which to get or manage permissions. PipelineId types.String `tfsdk:"-"` } +func (newState *GetPipelinePermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPipelinePermissionsRequest) { +} + +func (newState *GetPipelinePermissionsRequest) SyncEffectiveFieldsDuringRead(existingState GetPipelinePermissionsRequest) { +} + // Get a pipeline type GetPipelineRequest struct { PipelineId types.String `tfsdk:"-"` } +func (newState *GetPipelineRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPipelineRequest) { +} + +func (newState *GetPipelineRequest) SyncEffectiveFieldsDuringRead(existingState GetPipelineRequest) { +} + type GetPipelineResponse struct { // An optional message detailing the cause of the pipeline state. Cause types.String `tfsdk:"cause" tf:"optional"` @@ -243,6 +333,12 @@ type GetPipelineResponse struct { State types.String `tfsdk:"state" tf:"optional"` } +func (newState *GetPipelineResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPipelineResponse) { +} + +func (newState *GetPipelineResponse) SyncEffectiveFieldsDuringRead(existingState GetPipelineResponse) { +} + // Get a pipeline update type GetUpdateRequest struct { // The ID of the pipeline. @@ -251,18 +347,38 @@ type GetUpdateRequest struct { UpdateId types.String `tfsdk:"-"` } +func (newState *GetUpdateRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetUpdateRequest) { +} + +func (newState *GetUpdateRequest) SyncEffectiveFieldsDuringRead(existingState GetUpdateRequest) { +} + type GetUpdateResponse struct { // The current update info. Update []UpdateInfo `tfsdk:"update" tf:"optional,object"` } +func (newState *GetUpdateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetUpdateResponse) { +} + +func (newState *GetUpdateResponse) SyncEffectiveFieldsDuringRead(existingState GetUpdateResponse) { +} + type IngestionConfig struct { + // Select tables from a specific source report. + Report []ReportSpec `tfsdk:"report" tf:"optional,object"` // Select tables from a specific source schema. Schema []SchemaSpec `tfsdk:"schema" tf:"optional,object"` // Select tables from a specific source table. Table []TableSpec `tfsdk:"table" tf:"optional,object"` } +func (newState *IngestionConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan IngestionConfig) { +} + +func (newState *IngestionConfig) SyncEffectiveFieldsDuringRead(existingState IngestionConfig) { +} + type IngestionGatewayPipelineDefinition struct { // Immutable. The Unity Catalog connection this gateway pipeline uses to // communicate with the source. @@ -280,6 +396,12 @@ type IngestionGatewayPipelineDefinition struct { GatewayStorageSchema types.String `tfsdk:"gateway_storage_schema" tf:"optional"` } +func (newState *IngestionGatewayPipelineDefinition) SyncEffectiveFieldsDuringCreateOrUpdate(plan IngestionGatewayPipelineDefinition) { +} + +func (newState *IngestionGatewayPipelineDefinition) SyncEffectiveFieldsDuringRead(existingState IngestionGatewayPipelineDefinition) { +} + type IngestionPipelineDefinition struct { // Immutable. The Unity Catalog connection this ingestion pipeline uses to // communicate with the source. Specify either ingestion_gateway_id or @@ -297,6 +419,12 @@ type IngestionPipelineDefinition struct { TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional,object"` } +func (newState *IngestionPipelineDefinition) SyncEffectiveFieldsDuringCreateOrUpdate(plan IngestionPipelineDefinition) { +} + +func (newState *IngestionPipelineDefinition) SyncEffectiveFieldsDuringRead(existingState IngestionPipelineDefinition) { +} + // List pipeline events type ListPipelineEventsRequest struct { // Criteria to select a subset of results, expressed using a SQL-like @@ -324,6 +452,12 @@ type ListPipelineEventsRequest struct { PipelineId types.String `tfsdk:"-"` } +func (newState *ListPipelineEventsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListPipelineEventsRequest) { +} + +func (newState *ListPipelineEventsRequest) SyncEffectiveFieldsDuringRead(existingState ListPipelineEventsRequest) { +} + type ListPipelineEventsResponse struct { // The list of events matching the request criteria. Events []PipelineEvent `tfsdk:"events" tf:"optional"` @@ -333,6 +467,12 @@ type ListPipelineEventsResponse struct { PrevPageToken types.String `tfsdk:"prev_page_token" tf:"optional"` } +func (newState *ListPipelineEventsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListPipelineEventsResponse) { +} + +func (newState *ListPipelineEventsResponse) SyncEffectiveFieldsDuringRead(existingState ListPipelineEventsResponse) { +} + // List pipelines type ListPipelinesRequest struct { // Select a subset of results based on the specified criteria. The supported @@ -358,6 +498,12 @@ type ListPipelinesRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListPipelinesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListPipelinesRequest) { +} + +func (newState *ListPipelinesRequest) SyncEffectiveFieldsDuringRead(existingState ListPipelinesRequest) { +} + type ListPipelinesResponse struct { // If present, a token to fetch the next page of events. NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` @@ -365,6 +511,12 @@ type ListPipelinesResponse struct { Statuses []PipelineStateInfo `tfsdk:"statuses" tf:"optional"` } +func (newState *ListPipelinesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListPipelinesResponse) { +} + +func (newState *ListPipelinesResponse) SyncEffectiveFieldsDuringRead(existingState ListPipelinesResponse) { +} + // List pipeline updates type ListUpdatesRequest struct { // Max number of entries to return in a single page. @@ -377,6 +529,12 @@ type ListUpdatesRequest struct { UntilUpdateId types.String `tfsdk:"-"` } +func (newState *ListUpdatesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListUpdatesRequest) { +} + +func (newState *ListUpdatesRequest) SyncEffectiveFieldsDuringRead(existingState ListUpdatesRequest) { +} + type ListUpdatesResponse struct { // If present, then there are more results, and this a token to be used in a // subsequent request to fetch the next page. @@ -388,14 +546,32 @@ type ListUpdatesResponse struct { Updates []UpdateInfo `tfsdk:"updates" tf:"optional"` } +func (newState *ListUpdatesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListUpdatesResponse) { +} + +func (newState *ListUpdatesResponse) SyncEffectiveFieldsDuringRead(existingState ListUpdatesResponse) { +} + type ManualTrigger struct { } +func (newState *ManualTrigger) SyncEffectiveFieldsDuringCreateOrUpdate(plan ManualTrigger) { +} + +func (newState *ManualTrigger) SyncEffectiveFieldsDuringRead(existingState ManualTrigger) { +} + type NotebookLibrary struct { // The absolute path of the notebook. Path types.String `tfsdk:"path" tf:"optional"` } +func (newState *NotebookLibrary) SyncEffectiveFieldsDuringCreateOrUpdate(plan NotebookLibrary) { +} + +func (newState *NotebookLibrary) SyncEffectiveFieldsDuringRead(existingState NotebookLibrary) { +} + type Notifications struct { // A list of alerts that trigger the sending of notifications to the // configured destinations. The supported alerts are: @@ -409,6 +585,12 @@ type Notifications struct { EmailRecipients []types.String `tfsdk:"email_recipients" tf:"optional"` } +func (newState *Notifications) SyncEffectiveFieldsDuringCreateOrUpdate(plan Notifications) { +} + +func (newState *Notifications) SyncEffectiveFieldsDuringRead(existingState Notifications) { +} + type Origin struct { // The id of a batch. Unique within a flow. BatchId types.Int64 `tfsdk:"batch_id" tf:"optional"` @@ -447,6 +629,12 @@ type Origin struct { UpdateId types.String `tfsdk:"update_id" tf:"optional"` } +func (newState *Origin) SyncEffectiveFieldsDuringCreateOrUpdate(plan Origin) { +} + +func (newState *Origin) SyncEffectiveFieldsDuringRead(existingState Origin) { +} + type PipelineAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -458,6 +646,12 @@ type PipelineAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *PipelineAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineAccessControlRequest) { +} + +func (newState *PipelineAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState PipelineAccessControlRequest) { +} + type PipelineAccessControlResponse struct { // All permissions. AllPermissions []PipelinePermission `tfsdk:"all_permissions" tf:"optional"` @@ -471,6 +665,12 @@ type PipelineAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *PipelineAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineAccessControlResponse) { +} + +func (newState *PipelineAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState PipelineAccessControlResponse) { +} + type PipelineCluster struct { // Note: This field won't be persisted. Only API users will check this // field. @@ -568,6 +768,12 @@ type PipelineCluster struct { SshPublicKeys []types.String `tfsdk:"ssh_public_keys" tf:"optional"` } +func (newState *PipelineCluster) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineCluster) { +} + +func (newState *PipelineCluster) SyncEffectiveFieldsDuringRead(existingState PipelineCluster) { +} + type PipelineClusterAutoscale struct { // The maximum number of workers to which the cluster can scale up when // overloaded. `max_workers` must be strictly greater than `min_workers`. @@ -584,6 +790,12 @@ type PipelineClusterAutoscale struct { Mode types.String `tfsdk:"mode" tf:"optional"` } +func (newState *PipelineClusterAutoscale) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineClusterAutoscale) { +} + +func (newState *PipelineClusterAutoscale) SyncEffectiveFieldsDuringRead(existingState PipelineClusterAutoscale) { +} + type PipelineDeployment struct { // The deployment method that manages the pipeline. Kind types.String `tfsdk:"kind" tf:"optional"` @@ -591,6 +803,12 @@ type PipelineDeployment struct { MetadataFilePath types.String `tfsdk:"metadata_file_path" tf:"optional"` } +func (newState *PipelineDeployment) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineDeployment) { +} + +func (newState *PipelineDeployment) SyncEffectiveFieldsDuringRead(existingState PipelineDeployment) { +} + type PipelineEvent struct { // Information about an error captured by the event. Error []ErrorDetail `tfsdk:"error" tf:"optional,object"` @@ -612,6 +830,12 @@ type PipelineEvent struct { Timestamp types.String `tfsdk:"timestamp" tf:"optional"` } +func (newState *PipelineEvent) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineEvent) { +} + +func (newState *PipelineEvent) SyncEffectiveFieldsDuringRead(existingState PipelineEvent) { +} + type PipelineLibrary struct { // The path to a file that defines a pipeline and is stored in the // Databricks Repos. @@ -627,6 +851,12 @@ type PipelineLibrary struct { Whl types.String `tfsdk:"whl" tf:"optional"` } +func (newState *PipelineLibrary) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineLibrary) { +} + +func (newState *PipelineLibrary) SyncEffectiveFieldsDuringRead(existingState PipelineLibrary) { +} + type PipelinePermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -635,6 +865,12 @@ type PipelinePermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *PipelinePermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelinePermission) { +} + +func (newState *PipelinePermission) SyncEffectiveFieldsDuringRead(existingState PipelinePermission) { +} + type PipelinePermissions struct { AccessControlList []PipelineAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -643,18 +879,36 @@ type PipelinePermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *PipelinePermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelinePermissions) { +} + +func (newState *PipelinePermissions) SyncEffectiveFieldsDuringRead(existingState PipelinePermissions) { +} + type PipelinePermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *PipelinePermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelinePermissionsDescription) { +} + +func (newState *PipelinePermissionsDescription) SyncEffectiveFieldsDuringRead(existingState PipelinePermissionsDescription) { +} + type PipelinePermissionsRequest struct { AccessControlList []PipelineAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The pipeline for which to get or manage permissions. PipelineId types.String `tfsdk:"-"` } +func (newState *PipelinePermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelinePermissionsRequest) { +} + +func (newState *PipelinePermissionsRequest) SyncEffectiveFieldsDuringRead(existingState PipelinePermissionsRequest) { +} + type PipelineSpec struct { // Budget policy of this pipeline. BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` @@ -711,6 +965,12 @@ type PipelineSpec struct { Trigger []PipelineTrigger `tfsdk:"trigger" tf:"optional,object"` } +func (newState *PipelineSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineSpec) { +} + +func (newState *PipelineSpec) SyncEffectiveFieldsDuringRead(existingState PipelineSpec) { +} + type PipelineStateInfo struct { // The unique identifier of the cluster running the pipeline. ClusterId types.String `tfsdk:"cluster_id" tf:"optional"` @@ -732,12 +992,46 @@ type PipelineStateInfo struct { State types.String `tfsdk:"state" tf:"optional"` } +func (newState *PipelineStateInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineStateInfo) { +} + +func (newState *PipelineStateInfo) SyncEffectiveFieldsDuringRead(existingState PipelineStateInfo) { +} + type PipelineTrigger struct { Cron []CronTrigger `tfsdk:"cron" tf:"optional,object"` Manual []ManualTrigger `tfsdk:"manual" tf:"optional,object"` } +func (newState *PipelineTrigger) SyncEffectiveFieldsDuringCreateOrUpdate(plan PipelineTrigger) { +} + +func (newState *PipelineTrigger) SyncEffectiveFieldsDuringRead(existingState PipelineTrigger) { +} + +type ReportSpec struct { + // Required. Destination catalog to store table. + DestinationCatalog types.String `tfsdk:"destination_catalog" tf:"optional"` + // Required. Destination schema to store table. + DestinationSchema types.String `tfsdk:"destination_schema" tf:"optional"` + // Required. Destination table name. The pipeline fails if a table with that + // name already exists. + DestinationTable types.String `tfsdk:"destination_table" tf:"optional"` + // Required. Report URL in the source system. + SourceUrl types.String `tfsdk:"source_url" tf:"optional"` + // Configuration settings to control the ingestion of tables. These settings + // override the table_configuration defined in the + // IngestionPipelineDefinition object. + TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional,object"` +} + +func (newState *ReportSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan ReportSpec) { +} + +func (newState *ReportSpec) SyncEffectiveFieldsDuringRead(existingState ReportSpec) { +} + type SchemaSpec struct { // Required. Destination catalog to store tables. DestinationCatalog types.String `tfsdk:"destination_catalog" tf:"optional"` @@ -756,6 +1050,12 @@ type SchemaSpec struct { TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional,object"` } +func (newState *SchemaSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan SchemaSpec) { +} + +func (newState *SchemaSpec) SyncEffectiveFieldsDuringRead(existingState SchemaSpec) { +} + type Sequencing struct { // A sequence number, unique and increasing within the control plane. ControlPlaneSeqNo types.Int64 `tfsdk:"control_plane_seq_no" tf:"optional"` @@ -763,6 +1063,12 @@ type Sequencing struct { DataPlaneId []DataPlaneId `tfsdk:"data_plane_id" tf:"optional,object"` } +func (newState *Sequencing) SyncEffectiveFieldsDuringCreateOrUpdate(plan Sequencing) { +} + +func (newState *Sequencing) SyncEffectiveFieldsDuringRead(existingState Sequencing) { +} + type SerializedException struct { // Runtime class of the exception ClassName types.String `tfsdk:"class_name" tf:"optional"` @@ -772,6 +1078,12 @@ type SerializedException struct { Stack []StackFrame `tfsdk:"stack" tf:"optional"` } +func (newState *SerializedException) SyncEffectiveFieldsDuringCreateOrUpdate(plan SerializedException) { +} + +func (newState *SerializedException) SyncEffectiveFieldsDuringRead(existingState SerializedException) { +} + type StackFrame struct { // Class from which the method call originated DeclaringClass types.String `tfsdk:"declaring_class" tf:"optional"` @@ -783,6 +1095,12 @@ type StackFrame struct { MethodName types.String `tfsdk:"method_name" tf:"optional"` } +func (newState *StackFrame) SyncEffectiveFieldsDuringCreateOrUpdate(plan StackFrame) { +} + +func (newState *StackFrame) SyncEffectiveFieldsDuringRead(existingState StackFrame) { +} + type StartUpdate struct { Cause types.String `tfsdk:"cause" tf:"optional"` // If true, this update will reset all tables before running. @@ -804,24 +1122,48 @@ type StartUpdate struct { ValidateOnly types.Bool `tfsdk:"validate_only" tf:"optional"` } +func (newState *StartUpdate) SyncEffectiveFieldsDuringCreateOrUpdate(plan StartUpdate) { +} + +func (newState *StartUpdate) SyncEffectiveFieldsDuringRead(existingState StartUpdate) { +} + type StartUpdateResponse struct { UpdateId types.String `tfsdk:"update_id" tf:"optional"` } +func (newState *StartUpdateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan StartUpdateResponse) { +} + +func (newState *StartUpdateResponse) SyncEffectiveFieldsDuringRead(existingState StartUpdateResponse) { +} + type StopPipelineResponse struct { } +func (newState *StopPipelineResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan StopPipelineResponse) { +} + +func (newState *StopPipelineResponse) SyncEffectiveFieldsDuringRead(existingState StopPipelineResponse) { +} + // Stop a pipeline type StopRequest struct { PipelineId types.String `tfsdk:"-"` } +func (newState *StopRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan StopRequest) { +} + +func (newState *StopRequest) SyncEffectiveFieldsDuringRead(existingState StopRequest) { +} + type TableSpec struct { // Required. Destination catalog to store table. DestinationCatalog types.String `tfsdk:"destination_catalog" tf:"optional"` // Required. Destination schema to store table. DestinationSchema types.String `tfsdk:"destination_schema" tf:"optional"` - // Optional. Destination table name. The pipeline fails If a table with that + // Optional. Destination table name. The pipeline fails if a table with that // name already exists. If not set, the source table name is used. DestinationTable types.String `tfsdk:"destination_table" tf:"optional"` // Source catalog name. Might be optional depending on the type of source. @@ -837,6 +1179,12 @@ type TableSpec struct { TableConfiguration []TableSpecificConfig `tfsdk:"table_configuration" tf:"optional,object"` } +func (newState *TableSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan TableSpec) { +} + +func (newState *TableSpec) SyncEffectiveFieldsDuringRead(existingState TableSpec) { +} + type TableSpecificConfig struct { // The primary key of the table used to apply changes. PrimaryKeys []types.String `tfsdk:"primary_keys" tf:"optional"` @@ -845,6 +1193,16 @@ type TableSpecificConfig struct { SalesforceIncludeFormulaFields types.Bool `tfsdk:"salesforce_include_formula_fields" tf:"optional"` // The SCD type to use to ingest the table. ScdType types.String `tfsdk:"scd_type" tf:"optional"` + // The column names specifying the logical order of events in the source + // data. Delta Live Tables uses this sequencing to handle change events that + // arrive out of order. + SequenceBy []types.String `tfsdk:"sequence_by" tf:"optional"` +} + +func (newState *TableSpecificConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan TableSpecificConfig) { +} + +func (newState *TableSpecificConfig) SyncEffectiveFieldsDuringRead(existingState TableSpecificConfig) { } type UpdateInfo struct { @@ -880,6 +1238,12 @@ type UpdateInfo struct { ValidateOnly types.Bool `tfsdk:"validate_only" tf:"optional"` } +func (newState *UpdateInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateInfo) { +} + +func (newState *UpdateInfo) SyncEffectiveFieldsDuringRead(existingState UpdateInfo) { +} + type UpdateStateInfo struct { CreationTime types.String `tfsdk:"creation_time" tf:"optional"` @@ -887,3 +1251,9 @@ type UpdateStateInfo struct { UpdateId types.String `tfsdk:"update_id" tf:"optional"` } + +func (newState *UpdateStateInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateStateInfo) { +} + +func (newState *UpdateStateInfo) SyncEffectiveFieldsDuringRead(existingState UpdateStateInfo) { +} diff --git a/internal/service/provisioning_tf/model.go b/internal/service/provisioning_tf/model.go index 502b806409..be8e5f0c2f 100755 --- a/internal/service/provisioning_tf/model.go +++ b/internal/service/provisioning_tf/model.go @@ -18,6 +18,12 @@ type AwsCredentials struct { StsRole []StsRole `tfsdk:"sts_role" tf:"optional,object"` } +func (newState *AwsCredentials) SyncEffectiveFieldsDuringCreateOrUpdate(plan AwsCredentials) { +} + +func (newState *AwsCredentials) SyncEffectiveFieldsDuringRead(existingState AwsCredentials) { +} + type AwsKeyInfo struct { // The AWS KMS key alias. KeyAlias types.String `tfsdk:"key_alias" tf:"optional"` @@ -32,6 +38,12 @@ type AwsKeyInfo struct { ReuseKeyForClusterVolumes types.Bool `tfsdk:"reuse_key_for_cluster_volumes" tf:"optional"` } +func (newState *AwsKeyInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan AwsKeyInfo) { +} + +func (newState *AwsKeyInfo) SyncEffectiveFieldsDuringRead(existingState AwsKeyInfo) { +} + type AzureWorkspaceInfo struct { // Azure Resource Group name ResourceGroup types.String `tfsdk:"resource_group" tf:"optional"` @@ -39,12 +51,24 @@ type AzureWorkspaceInfo struct { SubscriptionId types.String `tfsdk:"subscription_id" tf:"optional"` } +func (newState *AzureWorkspaceInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan AzureWorkspaceInfo) { +} + +func (newState *AzureWorkspaceInfo) SyncEffectiveFieldsDuringRead(existingState AzureWorkspaceInfo) { +} + // The general workspace configurations that are specific to cloud providers. type CloudResourceContainer struct { // The general workspace configurations that are specific to Google Cloud. Gcp []CustomerFacingGcpCloudResourceContainer `tfsdk:"gcp" tf:"optional,object"` } +func (newState *CloudResourceContainer) SyncEffectiveFieldsDuringCreateOrUpdate(plan CloudResourceContainer) { +} + +func (newState *CloudResourceContainer) SyncEffectiveFieldsDuringRead(existingState CloudResourceContainer) { +} + type CreateAwsKeyInfo struct { // The AWS KMS key alias. KeyAlias types.String `tfsdk:"key_alias" tf:"optional"` @@ -58,21 +82,45 @@ type CreateAwsKeyInfo struct { ReuseKeyForClusterVolumes types.Bool `tfsdk:"reuse_key_for_cluster_volumes" tf:"optional"` } +func (newState *CreateAwsKeyInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAwsKeyInfo) { +} + +func (newState *CreateAwsKeyInfo) SyncEffectiveFieldsDuringRead(existingState CreateAwsKeyInfo) { +} + type CreateCredentialAwsCredentials struct { StsRole []CreateCredentialStsRole `tfsdk:"sts_role" tf:"optional,object"` } +func (newState *CreateCredentialAwsCredentials) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCredentialAwsCredentials) { +} + +func (newState *CreateCredentialAwsCredentials) SyncEffectiveFieldsDuringRead(existingState CreateCredentialAwsCredentials) { +} + type CreateCredentialRequest struct { AwsCredentials []CreateCredentialAwsCredentials `tfsdk:"aws_credentials" tf:"object"` // The human-readable name of the credential configuration object. CredentialsName types.String `tfsdk:"credentials_name" tf:""` } +func (newState *CreateCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCredentialRequest) { +} + +func (newState *CreateCredentialRequest) SyncEffectiveFieldsDuringRead(existingState CreateCredentialRequest) { +} + type CreateCredentialStsRole struct { // The Amazon Resource Name (ARN) of the cross account role. RoleArn types.String `tfsdk:"role_arn" tf:"optional"` } +func (newState *CreateCredentialStsRole) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCredentialStsRole) { +} + +func (newState *CreateCredentialStsRole) SyncEffectiveFieldsDuringRead(existingState CreateCredentialStsRole) { +} + type CreateCustomerManagedKeyRequest struct { AwsKeyInfo []CreateAwsKeyInfo `tfsdk:"aws_key_info" tf:"optional,object"` @@ -81,11 +129,23 @@ type CreateCustomerManagedKeyRequest struct { UseCases []types.String `tfsdk:"use_cases" tf:""` } +func (newState *CreateCustomerManagedKeyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCustomerManagedKeyRequest) { +} + +func (newState *CreateCustomerManagedKeyRequest) SyncEffectiveFieldsDuringRead(existingState CreateCustomerManagedKeyRequest) { +} + type CreateGcpKeyInfo struct { // The GCP KMS key's resource name KmsKeyId types.String `tfsdk:"kms_key_id" tf:""` } +func (newState *CreateGcpKeyInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateGcpKeyInfo) { +} + +func (newState *CreateGcpKeyInfo) SyncEffectiveFieldsDuringRead(existingState CreateGcpKeyInfo) { +} + type CreateNetworkRequest struct { // The Google Cloud specific information for this network (for example, the // VPC ID, subnet ID, and secondary IP ranges). @@ -108,6 +168,12 @@ type CreateNetworkRequest struct { VpcId types.String `tfsdk:"vpc_id" tf:"optional"` } +func (newState *CreateNetworkRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateNetworkRequest) { +} + +func (newState *CreateNetworkRequest) SyncEffectiveFieldsDuringRead(existingState CreateNetworkRequest) { +} + type CreateStorageConfigurationRequest struct { // Root S3 bucket information. RootBucketInfo []RootBucketInfo `tfsdk:"root_bucket_info" tf:"object"` @@ -115,6 +181,12 @@ type CreateStorageConfigurationRequest struct { StorageConfigurationName types.String `tfsdk:"storage_configuration_name" tf:""` } +func (newState *CreateStorageConfigurationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateStorageConfigurationRequest) { +} + +func (newState *CreateStorageConfigurationRequest) SyncEffectiveFieldsDuringRead(existingState CreateStorageConfigurationRequest) { +} + type CreateVpcEndpointRequest struct { // The ID of the VPC endpoint object in AWS. AwsVpcEndpointId types.String `tfsdk:"aws_vpc_endpoint_id" tf:"optional"` @@ -127,6 +199,12 @@ type CreateVpcEndpointRequest struct { VpcEndpointName types.String `tfsdk:"vpc_endpoint_name" tf:""` } +func (newState *CreateVpcEndpointRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateVpcEndpointRequest) { +} + +func (newState *CreateVpcEndpointRequest) SyncEffectiveFieldsDuringRead(existingState CreateVpcEndpointRequest) { +} + type CreateWorkspaceRequest struct { // The AWS region of the workspace's data plane. AwsRegion types.String `tfsdk:"aws_region" tf:"optional"` @@ -238,19 +316,37 @@ type CreateWorkspaceRequest struct { WorkspaceName types.String `tfsdk:"workspace_name" tf:""` } +func (newState *CreateWorkspaceRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateWorkspaceRequest) { +} + +func (newState *CreateWorkspaceRequest) SyncEffectiveFieldsDuringRead(existingState CreateWorkspaceRequest) { +} + type Credential struct { // The Databricks account ID that hosts the credential. AccountId types.String `tfsdk:"account_id" tf:"optional"` AwsCredentials []AwsCredentials `tfsdk:"aws_credentials" tf:"optional,object"` // Time in epoch milliseconds when the credential was created. - CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + EffectiveCreationTime types.Int64 `tfsdk:"effective_creation_time" tf:"computed,optional"` // Databricks credential configuration ID. CredentialsId types.String `tfsdk:"credentials_id" tf:"optional"` // The human-readable name of the credential configuration object. CredentialsName types.String `tfsdk:"credentials_name" tf:"optional"` } +func (newState *Credential) SyncEffectiveFieldsDuringCreateOrUpdate(plan Credential) { + newState.EffectiveCreationTime = newState.CreationTime + newState.CreationTime = plan.CreationTime +} + +func (newState *Credential) SyncEffectiveFieldsDuringRead(existingState Credential) { + if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { + newState.CreationTime = existingState.CreationTime + } +} + // The general workspace configurations that are specific to Google Cloud. type CustomerFacingGcpCloudResourceContainer struct { // The Google Cloud project ID, which the workspace uses to instantiate @@ -258,13 +354,20 @@ type CustomerFacingGcpCloudResourceContainer struct { ProjectId types.String `tfsdk:"project_id" tf:"optional"` } +func (newState *CustomerFacingGcpCloudResourceContainer) SyncEffectiveFieldsDuringCreateOrUpdate(plan CustomerFacingGcpCloudResourceContainer) { +} + +func (newState *CustomerFacingGcpCloudResourceContainer) SyncEffectiveFieldsDuringRead(existingState CustomerFacingGcpCloudResourceContainer) { +} + type CustomerManagedKey struct { // The Databricks account ID that holds the customer-managed key. AccountId types.String `tfsdk:"account_id" tf:"optional"` AwsKeyInfo []AwsKeyInfo `tfsdk:"aws_key_info" tf:"optional,object"` // Time in epoch milliseconds when the customer key was created. - CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + EffectiveCreationTime types.Int64 `tfsdk:"effective_creation_time" tf:"computed,optional"` // ID of the encryption key configuration object. CustomerManagedKeyId types.String `tfsdk:"customer_managed_key_id" tf:"optional"` @@ -273,56 +376,121 @@ type CustomerManagedKey struct { UseCases []types.String `tfsdk:"use_cases" tf:"optional"` } +func (newState *CustomerManagedKey) SyncEffectiveFieldsDuringCreateOrUpdate(plan CustomerManagedKey) { + newState.EffectiveCreationTime = newState.CreationTime + newState.CreationTime = plan.CreationTime +} + +func (newState *CustomerManagedKey) SyncEffectiveFieldsDuringRead(existingState CustomerManagedKey) { + if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { + newState.CreationTime = existingState.CreationTime + } +} + // Delete credential configuration type DeleteCredentialRequest struct { // Databricks Account API credential configuration ID CredentialsId types.String `tfsdk:"-"` } +func (newState *DeleteCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCredentialRequest) { +} + +func (newState *DeleteCredentialRequest) SyncEffectiveFieldsDuringRead(existingState DeleteCredentialRequest) { +} + // Delete encryption key configuration type DeleteEncryptionKeyRequest struct { // Databricks encryption key configuration ID. CustomerManagedKeyId types.String `tfsdk:"-"` } +func (newState *DeleteEncryptionKeyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteEncryptionKeyRequest) { +} + +func (newState *DeleteEncryptionKeyRequest) SyncEffectiveFieldsDuringRead(existingState DeleteEncryptionKeyRequest) { +} + // Delete a network configuration type DeleteNetworkRequest struct { // Databricks Account API network configuration ID. NetworkId types.String `tfsdk:"-"` } +func (newState *DeleteNetworkRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteNetworkRequest) { +} + +func (newState *DeleteNetworkRequest) SyncEffectiveFieldsDuringRead(existingState DeleteNetworkRequest) { +} + // Delete a private access settings object type DeletePrivateAccesRequest struct { // Databricks Account API private access settings ID. PrivateAccessSettingsId types.String `tfsdk:"-"` } +func (newState *DeletePrivateAccesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeletePrivateAccesRequest) { +} + +func (newState *DeletePrivateAccesRequest) SyncEffectiveFieldsDuringRead(existingState DeletePrivateAccesRequest) { +} + type DeleteResponse struct { } +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + // Delete storage configuration type DeleteStorageRequest struct { // Databricks Account API storage configuration ID. StorageConfigurationId types.String `tfsdk:"-"` } +func (newState *DeleteStorageRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteStorageRequest) { +} + +func (newState *DeleteStorageRequest) SyncEffectiveFieldsDuringRead(existingState DeleteStorageRequest) { +} + // Delete VPC endpoint configuration type DeleteVpcEndpointRequest struct { // Databricks VPC endpoint ID. VpcEndpointId types.String `tfsdk:"-"` } +func (newState *DeleteVpcEndpointRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteVpcEndpointRequest) { +} + +func (newState *DeleteVpcEndpointRequest) SyncEffectiveFieldsDuringRead(existingState DeleteVpcEndpointRequest) { +} + // Delete a workspace type DeleteWorkspaceRequest struct { // Workspace ID. WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *DeleteWorkspaceRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteWorkspaceRequest) { +} + +func (newState *DeleteWorkspaceRequest) SyncEffectiveFieldsDuringRead(existingState DeleteWorkspaceRequest) { +} + type GcpKeyInfo struct { // The GCP KMS key's resource name KmsKeyId types.String `tfsdk:"kms_key_id" tf:""` } +func (newState *GcpKeyInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan GcpKeyInfo) { +} + +func (newState *GcpKeyInfo) SyncEffectiveFieldsDuringRead(existingState GcpKeyInfo) { +} + // The network settings for the workspace. The configurations are only for // Databricks-managed VPCs. It is ignored if you specify a customer-managed VPC // in the `network_id` field.", All the IP range configurations must be mutually @@ -358,6 +526,12 @@ type GcpManagedNetworkConfig struct { SubnetCidr types.String `tfsdk:"subnet_cidr" tf:"optional"` } +func (newState *GcpManagedNetworkConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan GcpManagedNetworkConfig) { +} + +func (newState *GcpManagedNetworkConfig) SyncEffectiveFieldsDuringRead(existingState GcpManagedNetworkConfig) { +} + // The Google Cloud specific information for this network (for example, the VPC // ID, subnet ID, and secondary IP ranges). type GcpNetworkInfo struct { @@ -381,6 +555,12 @@ type GcpNetworkInfo struct { VpcId types.String `tfsdk:"vpc_id" tf:""` } +func (newState *GcpNetworkInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan GcpNetworkInfo) { +} + +func (newState *GcpNetworkInfo) SyncEffectiveFieldsDuringRead(existingState GcpNetworkInfo) { +} + // The Google Cloud specific information for this Private Service Connect // endpoint. type GcpVpcEndpointInfo struct { @@ -397,48 +577,96 @@ type GcpVpcEndpointInfo struct { ServiceAttachmentId types.String `tfsdk:"service_attachment_id" tf:"optional"` } +func (newState *GcpVpcEndpointInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan GcpVpcEndpointInfo) { +} + +func (newState *GcpVpcEndpointInfo) SyncEffectiveFieldsDuringRead(existingState GcpVpcEndpointInfo) { +} + // Get credential configuration type GetCredentialRequest struct { // Databricks Account API credential configuration ID CredentialsId types.String `tfsdk:"-"` } +func (newState *GetCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCredentialRequest) { +} + +func (newState *GetCredentialRequest) SyncEffectiveFieldsDuringRead(existingState GetCredentialRequest) { +} + // Get encryption key configuration type GetEncryptionKeyRequest struct { // Databricks encryption key configuration ID. CustomerManagedKeyId types.String `tfsdk:"-"` } +func (newState *GetEncryptionKeyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetEncryptionKeyRequest) { +} + +func (newState *GetEncryptionKeyRequest) SyncEffectiveFieldsDuringRead(existingState GetEncryptionKeyRequest) { +} + // Get a network configuration type GetNetworkRequest struct { // Databricks Account API network configuration ID. NetworkId types.String `tfsdk:"-"` } +func (newState *GetNetworkRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetNetworkRequest) { +} + +func (newState *GetNetworkRequest) SyncEffectiveFieldsDuringRead(existingState GetNetworkRequest) { +} + // Get a private access settings object type GetPrivateAccesRequest struct { // Databricks Account API private access settings ID. PrivateAccessSettingsId types.String `tfsdk:"-"` } +func (newState *GetPrivateAccesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPrivateAccesRequest) { +} + +func (newState *GetPrivateAccesRequest) SyncEffectiveFieldsDuringRead(existingState GetPrivateAccesRequest) { +} + // Get storage configuration type GetStorageRequest struct { // Databricks Account API storage configuration ID. StorageConfigurationId types.String `tfsdk:"-"` } +func (newState *GetStorageRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetStorageRequest) { +} + +func (newState *GetStorageRequest) SyncEffectiveFieldsDuringRead(existingState GetStorageRequest) { +} + // Get a VPC endpoint configuration type GetVpcEndpointRequest struct { // Databricks VPC endpoint ID. VpcEndpointId types.String `tfsdk:"-"` } +func (newState *GetVpcEndpointRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetVpcEndpointRequest) { +} + +func (newState *GetVpcEndpointRequest) SyncEffectiveFieldsDuringRead(existingState GetVpcEndpointRequest) { +} + // Get a workspace type GetWorkspaceRequest struct { // Workspace ID. WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *GetWorkspaceRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetWorkspaceRequest) { +} + +func (newState *GetWorkspaceRequest) SyncEffectiveFieldsDuringRead(existingState GetWorkspaceRequest) { +} + // The configurations for the GKE cluster of a Databricks workspace. type GkeConfig struct { // Specifies the network connectivity types for the GKE nodes and the GKE @@ -457,13 +685,21 @@ type GkeConfig struct { MasterIpRange types.String `tfsdk:"master_ip_range" tf:"optional"` } +func (newState *GkeConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan GkeConfig) { +} + +func (newState *GkeConfig) SyncEffectiveFieldsDuringRead(existingState GkeConfig) { +} + type Network struct { // The Databricks account ID associated with this network configuration. AccountId types.String `tfsdk:"account_id" tf:"optional"` // Time in epoch milliseconds when the network was created. - CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + EffectiveCreationTime types.Int64 `tfsdk:"effective_creation_time" tf:"computed,optional"` // Array of error messages about the network configuration. - ErrorMessages []NetworkHealth `tfsdk:"error_messages" tf:"optional"` + ErrorMessages []NetworkHealth `tfsdk:"error_messages" tf:"optional"` + EffectiveErrorMessages []NetworkHealth `tfsdk:"effective_error_messages" tf:"computed,optional"` // The Google Cloud specific information for this network (for example, the // VPC ID, subnet ID, and secondary IP ranges). GcpNetworkInfo []GcpNetworkInfo `tfsdk:"gcp_network_info" tf:"optional,object"` @@ -486,13 +722,31 @@ type Network struct { // The status of this network configuration object in terms of its use in a // workspace: * `UNATTACHED`: Unattached. * `VALID`: Valid. * `BROKEN`: // Broken. * `WARNED`: Warned. - VpcStatus types.String `tfsdk:"vpc_status" tf:"optional"` + VpcStatus types.String `tfsdk:"vpc_status" tf:"optional"` + EffectiveVpcStatus types.String `tfsdk:"effective_vpc_status" tf:"computed,optional"` // Array of warning messages about the network configuration. - WarningMessages []NetworkWarning `tfsdk:"warning_messages" tf:"optional"` + WarningMessages []NetworkWarning `tfsdk:"warning_messages" tf:"optional"` + EffectiveWarningMessages []NetworkWarning `tfsdk:"effective_warning_messages" tf:"computed,optional"` // Workspace ID associated with this network configuration. WorkspaceId types.Int64 `tfsdk:"workspace_id" tf:"optional"` } +func (newState *Network) SyncEffectiveFieldsDuringCreateOrUpdate(plan Network) { + newState.EffectiveCreationTime = newState.CreationTime + newState.CreationTime = plan.CreationTime + newState.EffectiveVpcStatus = newState.VpcStatus + newState.VpcStatus = plan.VpcStatus +} + +func (newState *Network) SyncEffectiveFieldsDuringRead(existingState Network) { + if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { + newState.CreationTime = existingState.CreationTime + } + if existingState.EffectiveVpcStatus.ValueString() == newState.VpcStatus.ValueString() { + newState.VpcStatus = existingState.VpcStatus + } +} + type NetworkHealth struct { // Details of the error. ErrorMessage types.String `tfsdk:"error_message" tf:"optional"` @@ -501,6 +755,12 @@ type NetworkHealth struct { ErrorType types.String `tfsdk:"error_type" tf:"optional"` } +func (newState *NetworkHealth) SyncEffectiveFieldsDuringCreateOrUpdate(plan NetworkHealth) { +} + +func (newState *NetworkHealth) SyncEffectiveFieldsDuringRead(existingState NetworkHealth) { +} + // If specified, contains the VPC endpoints used to allow cluster communication // from this VPC over [AWS PrivateLink]. // @@ -514,6 +774,12 @@ type NetworkVpcEndpoints struct { RestApi []types.String `tfsdk:"rest_api" tf:""` } +func (newState *NetworkVpcEndpoints) SyncEffectiveFieldsDuringCreateOrUpdate(plan NetworkVpcEndpoints) { +} + +func (newState *NetworkVpcEndpoints) SyncEffectiveFieldsDuringRead(existingState NetworkVpcEndpoints) { +} + type NetworkWarning struct { // Details of the warning. WarningMessage types.String `tfsdk:"warning_message" tf:"optional"` @@ -522,6 +788,12 @@ type NetworkWarning struct { WarningType types.String `tfsdk:"warning_type" tf:"optional"` } +func (newState *NetworkWarning) SyncEffectiveFieldsDuringCreateOrUpdate(plan NetworkWarning) { +} + +func (newState *NetworkWarning) SyncEffectiveFieldsDuringRead(existingState NetworkWarning) { +} + type PrivateAccessSettings struct { // The Databricks account ID that hosts the credential. AccountId types.String `tfsdk:"account_id" tf:"optional"` @@ -549,20 +821,40 @@ type PrivateAccessSettings struct { Region types.String `tfsdk:"region" tf:"optional"` } +func (newState *PrivateAccessSettings) SyncEffectiveFieldsDuringCreateOrUpdate(plan PrivateAccessSettings) { +} + +func (newState *PrivateAccessSettings) SyncEffectiveFieldsDuringRead(existingState PrivateAccessSettings) { +} + type ReplaceResponse struct { } +func (newState *ReplaceResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ReplaceResponse) { +} + +func (newState *ReplaceResponse) SyncEffectiveFieldsDuringRead(existingState ReplaceResponse) { +} + // Root S3 bucket information. type RootBucketInfo struct { // The name of the S3 bucket. BucketName types.String `tfsdk:"bucket_name" tf:"optional"` } +func (newState *RootBucketInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan RootBucketInfo) { +} + +func (newState *RootBucketInfo) SyncEffectiveFieldsDuringRead(existingState RootBucketInfo) { +} + type StorageConfiguration struct { // The Databricks account ID that hosts the credential. - AccountId types.String `tfsdk:"account_id" tf:"optional"` + AccountId types.String `tfsdk:"account_id" tf:"optional"` + EffectiveAccountId types.String `tfsdk:"effective_account_id" tf:"computed,optional"` // Time in epoch milliseconds when the storage configuration was created. - CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + EffectiveCreationTime types.Int64 `tfsdk:"effective_creation_time" tf:"computed,optional"` // Root S3 bucket information. RootBucketInfo []RootBucketInfo `tfsdk:"root_bucket_info" tf:"optional,object"` // Databricks storage configuration ID. @@ -571,6 +863,22 @@ type StorageConfiguration struct { StorageConfigurationName types.String `tfsdk:"storage_configuration_name" tf:"optional"` } +func (newState *StorageConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(plan StorageConfiguration) { + newState.EffectiveAccountId = newState.AccountId + newState.AccountId = plan.AccountId + newState.EffectiveCreationTime = newState.CreationTime + newState.CreationTime = plan.CreationTime +} + +func (newState *StorageConfiguration) SyncEffectiveFieldsDuringRead(existingState StorageConfiguration) { + if existingState.EffectiveAccountId.ValueString() == newState.AccountId.ValueString() { + newState.AccountId = existingState.AccountId + } + if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { + newState.CreationTime = existingState.CreationTime + } +} + type StsRole struct { // The external ID that needs to be trusted by the cross-account role. This // is always your Databricks account ID. @@ -579,9 +887,21 @@ type StsRole struct { RoleArn types.String `tfsdk:"role_arn" tf:"optional"` } +func (newState *StsRole) SyncEffectiveFieldsDuringCreateOrUpdate(plan StsRole) { +} + +func (newState *StsRole) SyncEffectiveFieldsDuringRead(existingState StsRole) { +} + type UpdateResponse struct { } +func (newState *UpdateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateResponse) { +} + +func (newState *UpdateResponse) SyncEffectiveFieldsDuringRead(existingState UpdateResponse) { +} + type UpdateWorkspaceRequest struct { // The AWS region of the workspace's data plane (for example, `us-west-2`). // This parameter is available only for updating failed workspaces. @@ -614,6 +934,12 @@ type UpdateWorkspaceRequest struct { WorkspaceId types.Int64 `tfsdk:"-"` } +func (newState *UpdateWorkspaceRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateWorkspaceRequest) { +} + +func (newState *UpdateWorkspaceRequest) SyncEffectiveFieldsDuringRead(existingState UpdateWorkspaceRequest) { +} + type UpsertPrivateAccessSettingsRequest struct { // An array of Databricks VPC endpoint IDs. This is the Databricks ID that // is returned when registering the VPC endpoint configuration in your @@ -652,6 +978,12 @@ type UpsertPrivateAccessSettingsRequest struct { Region types.String `tfsdk:"region" tf:""` } +func (newState *UpsertPrivateAccessSettingsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpsertPrivateAccessSettingsRequest) { +} + +func (newState *UpsertPrivateAccessSettingsRequest) SyncEffectiveFieldsDuringRead(existingState UpsertPrivateAccessSettingsRequest) { +} + type VpcEndpoint struct { // The Databricks account ID that hosts the VPC endpoint configuration. AccountId types.String `tfsdk:"account_id" tf:"optional"` @@ -690,6 +1022,12 @@ type VpcEndpoint struct { VpcEndpointName types.String `tfsdk:"vpc_endpoint_name" tf:"optional"` } +func (newState *VpcEndpoint) SyncEffectiveFieldsDuringCreateOrUpdate(plan VpcEndpoint) { +} + +func (newState *VpcEndpoint) SyncEffectiveFieldsDuringRead(existingState VpcEndpoint) { +} + type Workspace struct { // Databricks account ID. AccountId types.String `tfsdk:"account_id" tf:"optional"` @@ -703,7 +1041,8 @@ type Workspace struct { // providers. CloudResourceContainer []CloudResourceContainer `tfsdk:"cloud_resource_container" tf:"optional,object"` // Time in epoch milliseconds when the workspace was created. - CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + EffectiveCreationTime types.Int64 `tfsdk:"effective_creation_time" tf:"computed,optional"` // ID of the workspace's credential configuration object. CredentialsId types.String `tfsdk:"credentials_id" tf:"optional"` // The custom tags key-value pairing that is attached to this workspace. The @@ -779,7 +1118,30 @@ type Workspace struct { // The status of the workspace. For workspace creation, usually it is set to // `PROVISIONING` initially. Continue to check the status until the status // is `RUNNING`. - WorkspaceStatus types.String `tfsdk:"workspace_status" tf:"optional"` + WorkspaceStatus types.String `tfsdk:"workspace_status" tf:"optional"` + EffectiveWorkspaceStatus types.String `tfsdk:"effective_workspace_status" tf:"computed,optional"` // Message describing the current workspace status. - WorkspaceStatusMessage types.String `tfsdk:"workspace_status_message" tf:"optional"` + WorkspaceStatusMessage types.String `tfsdk:"workspace_status_message" tf:"optional"` + EffectiveWorkspaceStatusMessage types.String `tfsdk:"effective_workspace_status_message" tf:"computed,optional"` +} + +func (newState *Workspace) SyncEffectiveFieldsDuringCreateOrUpdate(plan Workspace) { + newState.EffectiveCreationTime = newState.CreationTime + newState.CreationTime = plan.CreationTime + newState.EffectiveWorkspaceStatus = newState.WorkspaceStatus + newState.WorkspaceStatus = plan.WorkspaceStatus + newState.EffectiveWorkspaceStatusMessage = newState.WorkspaceStatusMessage + newState.WorkspaceStatusMessage = plan.WorkspaceStatusMessage +} + +func (newState *Workspace) SyncEffectiveFieldsDuringRead(existingState Workspace) { + if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { + newState.CreationTime = existingState.CreationTime + } + if existingState.EffectiveWorkspaceStatus.ValueString() == newState.WorkspaceStatus.ValueString() { + newState.WorkspaceStatus = existingState.WorkspaceStatus + } + if existingState.EffectiveWorkspaceStatusMessage.ValueString() == newState.WorkspaceStatusMessage.ValueString() { + newState.WorkspaceStatusMessage = existingState.WorkspaceStatusMessage + } } diff --git a/internal/service/serving_tf/model.go b/internal/service/serving_tf/model.go index 7e6bdee0a1..940de56ad4 100755 --- a/internal/service/serving_tf/model.go +++ b/internal/service/serving_tf/model.go @@ -30,6 +30,12 @@ type Ai21LabsConfig struct { Ai21labsApiKeyPlaintext types.String `tfsdk:"ai21labs_api_key_plaintext" tf:"optional"` } +func (newState *Ai21LabsConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan Ai21LabsConfig) { +} + +func (newState *Ai21LabsConfig) SyncEffectiveFieldsDuringRead(existingState Ai21LabsConfig) { +} + type AiGatewayConfig struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. @@ -46,6 +52,12 @@ type AiGatewayConfig struct { UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional,object"` } +func (newState *AiGatewayConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan AiGatewayConfig) { +} + +func (newState *AiGatewayConfig) SyncEffectiveFieldsDuringRead(existingState AiGatewayConfig) { +} + type AiGatewayGuardrailParameters struct { // List of invalid keywords. AI guardrail uses keyword or string matching to // decide if the keyword exists in the request or response content. @@ -59,6 +71,12 @@ type AiGatewayGuardrailParameters struct { ValidTopics []types.String `tfsdk:"valid_topics" tf:"optional"` } +func (newState *AiGatewayGuardrailParameters) SyncEffectiveFieldsDuringCreateOrUpdate(plan AiGatewayGuardrailParameters) { +} + +func (newState *AiGatewayGuardrailParameters) SyncEffectiveFieldsDuringRead(existingState AiGatewayGuardrailParameters) { +} + type AiGatewayGuardrailPiiBehavior struct { // Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' // is set for the input guardrail and the request contains PII, the request @@ -69,6 +87,12 @@ type AiGatewayGuardrailPiiBehavior struct { Behavior types.String `tfsdk:"behavior" tf:""` } +func (newState *AiGatewayGuardrailPiiBehavior) SyncEffectiveFieldsDuringCreateOrUpdate(plan AiGatewayGuardrailPiiBehavior) { +} + +func (newState *AiGatewayGuardrailPiiBehavior) SyncEffectiveFieldsDuringRead(existingState AiGatewayGuardrailPiiBehavior) { +} + type AiGatewayGuardrails struct { // Configuration for input guardrail filters. Input []AiGatewayGuardrailParameters `tfsdk:"input" tf:"optional,object"` @@ -76,6 +100,12 @@ type AiGatewayGuardrails struct { Output []AiGatewayGuardrailParameters `tfsdk:"output" tf:"optional,object"` } +func (newState *AiGatewayGuardrails) SyncEffectiveFieldsDuringCreateOrUpdate(plan AiGatewayGuardrails) { +} + +func (newState *AiGatewayGuardrails) SyncEffectiveFieldsDuringRead(existingState AiGatewayGuardrails) { +} + type AiGatewayInferenceTableConfig struct { // The name of the catalog in Unity Catalog. Required when enabling // inference tables. NOTE: On update, you have to disable inference table @@ -92,6 +122,12 @@ type AiGatewayInferenceTableConfig struct { TableNamePrefix types.String `tfsdk:"table_name_prefix" tf:"optional"` } +func (newState *AiGatewayInferenceTableConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan AiGatewayInferenceTableConfig) { +} + +func (newState *AiGatewayInferenceTableConfig) SyncEffectiveFieldsDuringRead(existingState AiGatewayInferenceTableConfig) { +} + type AiGatewayRateLimit struct { // Used to specify how many calls are allowed for a key within the // renewal_period. @@ -104,11 +140,23 @@ type AiGatewayRateLimit struct { RenewalPeriod types.String `tfsdk:"renewal_period" tf:""` } +func (newState *AiGatewayRateLimit) SyncEffectiveFieldsDuringCreateOrUpdate(plan AiGatewayRateLimit) { +} + +func (newState *AiGatewayRateLimit) SyncEffectiveFieldsDuringRead(existingState AiGatewayRateLimit) { +} + type AiGatewayUsageTrackingConfig struct { // Whether to enable usage tracking. Enabled types.Bool `tfsdk:"enabled" tf:"optional"` } +func (newState *AiGatewayUsageTrackingConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan AiGatewayUsageTrackingConfig) { +} + +func (newState *AiGatewayUsageTrackingConfig) SyncEffectiveFieldsDuringRead(existingState AiGatewayUsageTrackingConfig) { +} + type AmazonBedrockConfig struct { // The Databricks secret key reference for an AWS access key ID with // permissions to interact with Bedrock services. If you prefer to paste @@ -143,6 +191,12 @@ type AmazonBedrockConfig struct { BedrockProvider types.String `tfsdk:"bedrock_provider" tf:""` } +func (newState *AmazonBedrockConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan AmazonBedrockConfig) { +} + +func (newState *AmazonBedrockConfig) SyncEffectiveFieldsDuringRead(existingState AmazonBedrockConfig) { +} + type AnthropicConfig struct { // The Databricks secret key reference for an Anthropic API key. If you // prefer to paste your API key directly, see `anthropic_api_key_plaintext`. @@ -156,6 +210,12 @@ type AnthropicConfig struct { AnthropicApiKeyPlaintext types.String `tfsdk:"anthropic_api_key_plaintext" tf:"optional"` } +func (newState *AnthropicConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan AnthropicConfig) { +} + +func (newState *AnthropicConfig) SyncEffectiveFieldsDuringRead(existingState AnthropicConfig) { +} + type AutoCaptureConfigInput struct { // The name of the catalog in Unity Catalog. NOTE: On update, you cannot // change the catalog name if the inference table is already enabled. @@ -170,6 +230,12 @@ type AutoCaptureConfigInput struct { TableNamePrefix types.String `tfsdk:"table_name_prefix" tf:"optional"` } +func (newState *AutoCaptureConfigInput) SyncEffectiveFieldsDuringCreateOrUpdate(plan AutoCaptureConfigInput) { +} + +func (newState *AutoCaptureConfigInput) SyncEffectiveFieldsDuringRead(existingState AutoCaptureConfigInput) { +} + type AutoCaptureConfigOutput struct { // The name of the catalog in Unity Catalog. CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` @@ -183,10 +249,22 @@ type AutoCaptureConfigOutput struct { TableNamePrefix types.String `tfsdk:"table_name_prefix" tf:"optional"` } +func (newState *AutoCaptureConfigOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan AutoCaptureConfigOutput) { +} + +func (newState *AutoCaptureConfigOutput) SyncEffectiveFieldsDuringRead(existingState AutoCaptureConfigOutput) { +} + type AutoCaptureState struct { PayloadTable []PayloadTable `tfsdk:"payload_table" tf:"optional,object"` } +func (newState *AutoCaptureState) SyncEffectiveFieldsDuringCreateOrUpdate(plan AutoCaptureState) { +} + +func (newState *AutoCaptureState) SyncEffectiveFieldsDuringRead(existingState AutoCaptureState) { +} + // Get build logs for a served model type BuildLogsRequest struct { // The name of the serving endpoint that the served model belongs to. This @@ -197,11 +275,23 @@ type BuildLogsRequest struct { ServedModelName types.String `tfsdk:"-"` } +func (newState *BuildLogsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan BuildLogsRequest) { +} + +func (newState *BuildLogsRequest) SyncEffectiveFieldsDuringRead(existingState BuildLogsRequest) { +} + type BuildLogsResponse struct { // The logs associated with building the served entity's environment. Logs types.String `tfsdk:"logs" tf:""` } +func (newState *BuildLogsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan BuildLogsResponse) { +} + +func (newState *BuildLogsResponse) SyncEffectiveFieldsDuringRead(existingState BuildLogsResponse) { +} + type ChatMessage struct { // The content of the message. Content types.String `tfsdk:"content" tf:"optional"` @@ -209,6 +299,12 @@ type ChatMessage struct { Role types.String `tfsdk:"role" tf:"optional"` } +func (newState *ChatMessage) SyncEffectiveFieldsDuringCreateOrUpdate(plan ChatMessage) { +} + +func (newState *ChatMessage) SyncEffectiveFieldsDuringRead(existingState ChatMessage) { +} + type CohereConfig struct { // This is an optional field to provide a customized base URL for the Cohere // API. If left unspecified, the standard Cohere base URL is used. @@ -225,6 +321,12 @@ type CohereConfig struct { CohereApiKeyPlaintext types.String `tfsdk:"cohere_api_key_plaintext" tf:"optional"` } +func (newState *CohereConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan CohereConfig) { +} + +func (newState *CohereConfig) SyncEffectiveFieldsDuringRead(existingState CohereConfig) { +} + type CreateServingEndpoint struct { // The AI Gateway configuration for the serving endpoint. NOTE: only // external model endpoints are supported as of now. @@ -245,6 +347,12 @@ type CreateServingEndpoint struct { Tags []EndpointTag `tfsdk:"tags" tf:"optional"` } +func (newState *CreateServingEndpoint) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateServingEndpoint) { +} + +func (newState *CreateServingEndpoint) SyncEffectiveFieldsDuringRead(existingState CreateServingEndpoint) { +} + type DatabricksModelServingConfig struct { // The Databricks secret key reference for a Databricks API token that // corresponds to a user or service principal with Can Query access to the @@ -265,6 +373,12 @@ type DatabricksModelServingConfig struct { DatabricksWorkspaceUrl types.String `tfsdk:"databricks_workspace_url" tf:""` } +func (newState *DatabricksModelServingConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan DatabricksModelServingConfig) { +} + +func (newState *DatabricksModelServingConfig) SyncEffectiveFieldsDuringRead(existingState DatabricksModelServingConfig) { +} + type DataframeSplitInput struct { Columns []any `tfsdk:"columns" tf:"optional"` @@ -273,15 +387,33 @@ type DataframeSplitInput struct { Index []types.Int64 `tfsdk:"index" tf:"optional"` } +func (newState *DataframeSplitInput) SyncEffectiveFieldsDuringCreateOrUpdate(plan DataframeSplitInput) { +} + +func (newState *DataframeSplitInput) SyncEffectiveFieldsDuringRead(existingState DataframeSplitInput) { +} + type DeleteResponse struct { } +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + // Delete a serving endpoint type DeleteServingEndpointRequest struct { // The name of the serving endpoint. This field is required. Name types.String `tfsdk:"-"` } +func (newState *DeleteServingEndpointRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteServingEndpointRequest) { +} + +func (newState *DeleteServingEndpointRequest) SyncEffectiveFieldsDuringRead(existingState DeleteServingEndpointRequest) { +} + type EmbeddingsV1ResponseEmbeddingElement struct { Embedding []types.Float64 `tfsdk:"embedding" tf:"optional"` // The index of the embedding in the response. @@ -290,6 +422,12 @@ type EmbeddingsV1ResponseEmbeddingElement struct { Object types.String `tfsdk:"object" tf:"optional"` } +func (newState *EmbeddingsV1ResponseEmbeddingElement) SyncEffectiveFieldsDuringCreateOrUpdate(plan EmbeddingsV1ResponseEmbeddingElement) { +} + +func (newState *EmbeddingsV1ResponseEmbeddingElement) SyncEffectiveFieldsDuringRead(existingState EmbeddingsV1ResponseEmbeddingElement) { +} + type EndpointCoreConfigInput struct { // Configuration for Inference Tables which automatically logs requests and // responses to Unity Catalog. @@ -307,6 +445,12 @@ type EndpointCoreConfigInput struct { TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional,object"` } +func (newState *EndpointCoreConfigInput) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointCoreConfigInput) { +} + +func (newState *EndpointCoreConfigInput) SyncEffectiveFieldsDuringRead(existingState EndpointCoreConfigInput) { +} + type EndpointCoreConfigOutput struct { // Configuration for Inference Tables which automatically logs requests and // responses to Unity Catalog. @@ -322,6 +466,12 @@ type EndpointCoreConfigOutput struct { TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional,object"` } +func (newState *EndpointCoreConfigOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointCoreConfigOutput) { +} + +func (newState *EndpointCoreConfigOutput) SyncEffectiveFieldsDuringRead(existingState EndpointCoreConfigOutput) { +} + type EndpointCoreConfigSummary struct { // The list of served entities under the serving endpoint config. ServedEntities []ServedEntitySpec `tfsdk:"served_entities" tf:"optional"` @@ -330,6 +480,12 @@ type EndpointCoreConfigSummary struct { ServedModels []ServedModelSpec `tfsdk:"served_models" tf:"optional"` } +func (newState *EndpointCoreConfigSummary) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointCoreConfigSummary) { +} + +func (newState *EndpointCoreConfigSummary) SyncEffectiveFieldsDuringRead(existingState EndpointCoreConfigSummary) { +} + type EndpointPendingConfig struct { // Configuration for Inference Tables which automatically logs requests and // responses to Unity Catalog. @@ -349,6 +505,12 @@ type EndpointPendingConfig struct { TrafficConfig []TrafficConfig `tfsdk:"traffic_config" tf:"optional,object"` } +func (newState *EndpointPendingConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointPendingConfig) { +} + +func (newState *EndpointPendingConfig) SyncEffectiveFieldsDuringRead(existingState EndpointPendingConfig) { +} + type EndpointState struct { // The state of an endpoint's config update. This informs the user if the // pending_config is in progress, if the update failed, or if there is no @@ -363,6 +525,12 @@ type EndpointState struct { Ready types.String `tfsdk:"ready" tf:"optional"` } +func (newState *EndpointState) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointState) { +} + +func (newState *EndpointState) SyncEffectiveFieldsDuringRead(existingState EndpointState) { +} + type EndpointTag struct { // Key field for a serving endpoint tag. Key types.String `tfsdk:"key" tf:""` @@ -370,6 +538,12 @@ type EndpointTag struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *EndpointTag) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointTag) { +} + +func (newState *EndpointTag) SyncEffectiveFieldsDuringRead(existingState EndpointTag) { +} + // Get metrics of a serving endpoint type ExportMetricsRequest struct { // The name of the serving endpoint to retrieve metrics for. This field is @@ -377,10 +551,22 @@ type ExportMetricsRequest struct { Name types.String `tfsdk:"-"` } +func (newState *ExportMetricsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExportMetricsRequest) { +} + +func (newState *ExportMetricsRequest) SyncEffectiveFieldsDuringRead(existingState ExportMetricsRequest) { +} + type ExportMetricsResponse struct { Contents io.ReadCloser `tfsdk:"-"` } +func (newState *ExportMetricsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExportMetricsResponse) { +} + +func (newState *ExportMetricsResponse) SyncEffectiveFieldsDuringRead(existingState ExportMetricsResponse) { +} + type ExternalModel struct { // AI21Labs Config. Only required if the provider is 'ai21labs'. Ai21labsConfig []Ai21LabsConfig `tfsdk:"ai21labs_config" tf:"optional,object"` @@ -411,6 +597,12 @@ type ExternalModel struct { Task types.String `tfsdk:"task" tf:""` } +func (newState *ExternalModel) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExternalModel) { +} + +func (newState *ExternalModel) SyncEffectiveFieldsDuringRead(existingState ExternalModel) { +} + type ExternalModelUsageElement struct { // The number of tokens in the chat/completions response. CompletionTokens types.Int64 `tfsdk:"completion_tokens" tf:"optional"` @@ -420,6 +612,12 @@ type ExternalModelUsageElement struct { TotalTokens types.Int64 `tfsdk:"total_tokens" tf:"optional"` } +func (newState *ExternalModelUsageElement) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExternalModelUsageElement) { +} + +func (newState *ExternalModelUsageElement) SyncEffectiveFieldsDuringRead(existingState ExternalModelUsageElement) { +} + type FoundationModel struct { // The description of the foundation model. Description types.String `tfsdk:"description" tf:"optional"` @@ -431,6 +629,12 @@ type FoundationModel struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *FoundationModel) SyncEffectiveFieldsDuringCreateOrUpdate(plan FoundationModel) { +} + +func (newState *FoundationModel) SyncEffectiveFieldsDuringRead(existingState FoundationModel) { +} + // Get the schema for a serving endpoint type GetOpenApiRequest struct { // The name of the serving endpoint that the served model belongs to. This @@ -438,34 +642,70 @@ type GetOpenApiRequest struct { Name types.String `tfsdk:"-"` } +func (newState *GetOpenApiRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetOpenApiRequest) { +} + +func (newState *GetOpenApiRequest) SyncEffectiveFieldsDuringRead(existingState GetOpenApiRequest) { +} + // The response is an OpenAPI spec in JSON format that typically includes fields // like openapi, info, servers and paths, etc. type GetOpenApiResponse struct { } +func (newState *GetOpenApiResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetOpenApiResponse) { +} + +func (newState *GetOpenApiResponse) SyncEffectiveFieldsDuringRead(existingState GetOpenApiResponse) { +} + // Get serving endpoint permission levels type GetServingEndpointPermissionLevelsRequest struct { // The serving endpoint for which to get or manage permissions. ServingEndpointId types.String `tfsdk:"-"` } +func (newState *GetServingEndpointPermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetServingEndpointPermissionLevelsRequest) { +} + +func (newState *GetServingEndpointPermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetServingEndpointPermissionLevelsRequest) { +} + type GetServingEndpointPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []ServingEndpointPermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetServingEndpointPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetServingEndpointPermissionLevelsResponse) { +} + +func (newState *GetServingEndpointPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetServingEndpointPermissionLevelsResponse) { +} + // Get serving endpoint permissions type GetServingEndpointPermissionsRequest struct { // The serving endpoint for which to get or manage permissions. ServingEndpointId types.String `tfsdk:"-"` } +func (newState *GetServingEndpointPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetServingEndpointPermissionsRequest) { +} + +func (newState *GetServingEndpointPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState GetServingEndpointPermissionsRequest) { +} + // Get a single serving endpoint type GetServingEndpointRequest struct { // The name of the serving endpoint. This field is required. Name types.String `tfsdk:"-"` } +func (newState *GetServingEndpointRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetServingEndpointRequest) { +} + +func (newState *GetServingEndpointRequest) SyncEffectiveFieldsDuringRead(existingState GetServingEndpointRequest) { +} + type GoogleCloudVertexAiConfig struct { // The Databricks secret key reference for a private key for the service // account which has access to the Google Cloud Vertex AI Service. See [Best @@ -496,11 +736,23 @@ type GoogleCloudVertexAiConfig struct { Region types.String `tfsdk:"region" tf:"optional"` } +func (newState *GoogleCloudVertexAiConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan GoogleCloudVertexAiConfig) { +} + +func (newState *GoogleCloudVertexAiConfig) SyncEffectiveFieldsDuringRead(existingState GoogleCloudVertexAiConfig) { +} + type ListEndpointsResponse struct { // The list of endpoints. Endpoints []ServingEndpoint `tfsdk:"endpoints" tf:"optional"` } +func (newState *ListEndpointsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListEndpointsResponse) { +} + +func (newState *ListEndpointsResponse) SyncEffectiveFieldsDuringRead(existingState ListEndpointsResponse) { +} + // Get the latest logs for a served model type LogsRequest struct { // The name of the serving endpoint that the served model belongs to. This @@ -511,11 +763,23 @@ type LogsRequest struct { ServedModelName types.String `tfsdk:"-"` } +func (newState *LogsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan LogsRequest) { +} + +func (newState *LogsRequest) SyncEffectiveFieldsDuringRead(existingState LogsRequest) { +} + type ModelDataPlaneInfo struct { // Information required to query DataPlane API 'query' endpoint. QueryInfo oauth2.DataPlaneInfo `tfsdk:"query_info" tf:"optional,object"` } +func (newState *ModelDataPlaneInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ModelDataPlaneInfo) { +} + +func (newState *ModelDataPlaneInfo) SyncEffectiveFieldsDuringRead(existingState ModelDataPlaneInfo) { +} + type OpenAiConfig struct { // This field is only required for Azure AD OpenAI and is the Microsoft // Entra Client ID. @@ -570,6 +834,12 @@ type OpenAiConfig struct { OpenaiOrganization types.String `tfsdk:"openai_organization" tf:"optional"` } +func (newState *OpenAiConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan OpenAiConfig) { +} + +func (newState *OpenAiConfig) SyncEffectiveFieldsDuringRead(existingState OpenAiConfig) { +} + type PaLmConfig struct { // The Databricks secret key reference for a PaLM API key. If you prefer to // paste your API key directly, see `palm_api_key_plaintext`. You must @@ -583,6 +853,12 @@ type PaLmConfig struct { PalmApiKeyPlaintext types.String `tfsdk:"palm_api_key_plaintext" tf:"optional"` } +func (newState *PaLmConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan PaLmConfig) { +} + +func (newState *PaLmConfig) SyncEffectiveFieldsDuringRead(existingState PaLmConfig) { +} + type PatchServingEndpointTags struct { // List of endpoint tags to add AddTags []EndpointTag `tfsdk:"add_tags" tf:"optional"` @@ -593,6 +869,12 @@ type PatchServingEndpointTags struct { Name types.String `tfsdk:"-"` } +func (newState *PatchServingEndpointTags) SyncEffectiveFieldsDuringCreateOrUpdate(plan PatchServingEndpointTags) { +} + +func (newState *PatchServingEndpointTags) SyncEffectiveFieldsDuringRead(existingState PatchServingEndpointTags) { +} + type PayloadTable struct { // The name of the payload table. Name types.String `tfsdk:"name" tf:"optional"` @@ -602,6 +884,12 @@ type PayloadTable struct { StatusMessage types.String `tfsdk:"status_message" tf:"optional"` } +func (newState *PayloadTable) SyncEffectiveFieldsDuringCreateOrUpdate(plan PayloadTable) { +} + +func (newState *PayloadTable) SyncEffectiveFieldsDuringRead(existingState PayloadTable) { +} + // Update AI Gateway of a serving endpoint type PutAiGatewayRequest struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data @@ -622,6 +910,12 @@ type PutAiGatewayRequest struct { UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional,object"` } +func (newState *PutAiGatewayRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan PutAiGatewayRequest) { +} + +func (newState *PutAiGatewayRequest) SyncEffectiveFieldsDuringRead(existingState PutAiGatewayRequest) { +} + type PutAiGatewayResponse struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. @@ -638,6 +932,12 @@ type PutAiGatewayResponse struct { UsageTrackingConfig []AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional,object"` } +func (newState *PutAiGatewayResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan PutAiGatewayResponse) { +} + +func (newState *PutAiGatewayResponse) SyncEffectiveFieldsDuringRead(existingState PutAiGatewayResponse) { +} + // Update rate limits of a serving endpoint type PutRequest struct { // The name of the serving endpoint whose rate limits are being updated. @@ -647,11 +947,23 @@ type PutRequest struct { RateLimits []RateLimit `tfsdk:"rate_limits" tf:"optional"` } +func (newState *PutRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan PutRequest) { +} + +func (newState *PutRequest) SyncEffectiveFieldsDuringRead(existingState PutRequest) { +} + type PutResponse struct { // The list of endpoint rate limits. RateLimits []RateLimit `tfsdk:"rate_limits" tf:"optional"` } +func (newState *PutResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan PutResponse) { +} + +func (newState *PutResponse) SyncEffectiveFieldsDuringRead(existingState PutResponse) { +} + type QueryEndpointInput struct { // Pandas Dataframe input in the records orientation. DataframeRecords []any `tfsdk:"dataframe_records" tf:"optional"` @@ -704,6 +1016,12 @@ type QueryEndpointInput struct { Temperature types.Float64 `tfsdk:"temperature" tf:"optional"` } +func (newState *QueryEndpointInput) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryEndpointInput) { +} + +func (newState *QueryEndpointInput) SyncEffectiveFieldsDuringRead(existingState QueryEndpointInput) { +} + type QueryEndpointResponse struct { // The list of choices returned by the __chat or completions // external/foundation model__ serving endpoint. @@ -735,6 +1053,12 @@ type QueryEndpointResponse struct { Usage []ExternalModelUsageElement `tfsdk:"usage" tf:"optional,object"` } +func (newState *QueryEndpointResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryEndpointResponse) { +} + +func (newState *QueryEndpointResponse) SyncEffectiveFieldsDuringRead(existingState QueryEndpointResponse) { +} + type RateLimit struct { // Used to specify how many calls are allowed for a key within the // renewal_period. @@ -748,6 +1072,12 @@ type RateLimit struct { RenewalPeriod types.String `tfsdk:"renewal_period" tf:""` } +func (newState *RateLimit) SyncEffectiveFieldsDuringCreateOrUpdate(plan RateLimit) { +} + +func (newState *RateLimit) SyncEffectiveFieldsDuringRead(existingState RateLimit) { +} + type Route struct { // The name of the served model this route configures traffic for. ServedModelName types.String `tfsdk:"served_model_name" tf:""` @@ -756,6 +1086,12 @@ type Route struct { TrafficPercentage types.Int64 `tfsdk:"traffic_percentage" tf:""` } +func (newState *Route) SyncEffectiveFieldsDuringCreateOrUpdate(plan Route) { +} + +func (newState *Route) SyncEffectiveFieldsDuringRead(existingState Route) { +} + type ServedEntityInput struct { // The name of the entity to be served. The entity may be a model in the // Databricks Model Registry, a model in the Unity Catalog (UC), or a @@ -817,6 +1153,12 @@ type ServedEntityInput struct { WorkloadType types.String `tfsdk:"workload_type" tf:"optional"` } +func (newState *ServedEntityInput) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServedEntityInput) { +} + +func (newState *ServedEntityInput) SyncEffectiveFieldsDuringRead(existingState ServedEntityInput) { +} + type ServedEntityOutput struct { // The creation timestamp of the served entity in Unix time. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` @@ -880,6 +1222,12 @@ type ServedEntityOutput struct { WorkloadType types.String `tfsdk:"workload_type" tf:"optional"` } +func (newState *ServedEntityOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServedEntityOutput) { +} + +func (newState *ServedEntityOutput) SyncEffectiveFieldsDuringRead(existingState ServedEntityOutput) { +} + type ServedEntitySpec struct { // The name of the entity served. The entity may be a model in the // Databricks Model Registry, a model in the Unity Catalog (UC), or a @@ -902,6 +1250,12 @@ type ServedEntitySpec struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *ServedEntitySpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServedEntitySpec) { +} + +func (newState *ServedEntitySpec) SyncEffectiveFieldsDuringRead(existingState ServedEntitySpec) { +} + type ServedModelInput struct { // An object containing a set of optional, user-specified environment // variable key-value pairs used for serving this model. Note: this is an @@ -950,6 +1304,12 @@ type ServedModelInput struct { WorkloadType types.String `tfsdk:"workload_type" tf:"optional"` } +func (newState *ServedModelInput) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServedModelInput) { +} + +func (newState *ServedModelInput) SyncEffectiveFieldsDuringRead(existingState ServedModelInput) { +} + type ServedModelOutput struct { // The creation timestamp of the served model in Unix time. CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` @@ -996,6 +1356,12 @@ type ServedModelOutput struct { WorkloadType types.String `tfsdk:"workload_type" tf:"optional"` } +func (newState *ServedModelOutput) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServedModelOutput) { +} + +func (newState *ServedModelOutput) SyncEffectiveFieldsDuringRead(existingState ServedModelOutput) { +} + type ServedModelSpec struct { // The name of the model in Databricks Model Registry or the full name of // the model in Unity Catalog. @@ -1007,6 +1373,12 @@ type ServedModelSpec struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *ServedModelSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServedModelSpec) { +} + +func (newState *ServedModelSpec) SyncEffectiveFieldsDuringRead(existingState ServedModelSpec) { +} + type ServedModelState struct { // The state of the served entity deployment. DEPLOYMENT_CREATING indicates // that the served entity is not ready yet because the deployment is still @@ -1025,12 +1397,24 @@ type ServedModelState struct { DeploymentStateMessage types.String `tfsdk:"deployment_state_message" tf:"optional"` } +func (newState *ServedModelState) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServedModelState) { +} + +func (newState *ServedModelState) SyncEffectiveFieldsDuringRead(existingState ServedModelState) { +} + type ServerLogsResponse struct { // The most recent log lines of the model server processing invocation // requests. Logs types.String `tfsdk:"logs" tf:""` } +func (newState *ServerLogsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServerLogsResponse) { +} + +func (newState *ServerLogsResponse) SyncEffectiveFieldsDuringRead(existingState ServerLogsResponse) { +} + type ServingEndpoint struct { // The AI Gateway configuration for the serving endpoint. NOTE: Only // external model endpoints are currently supported. @@ -1056,6 +1440,12 @@ type ServingEndpoint struct { Task types.String `tfsdk:"task" tf:"optional"` } +func (newState *ServingEndpoint) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServingEndpoint) { +} + +func (newState *ServingEndpoint) SyncEffectiveFieldsDuringRead(existingState ServingEndpoint) { +} + type ServingEndpointAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -1067,6 +1457,12 @@ type ServingEndpointAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *ServingEndpointAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServingEndpointAccessControlRequest) { +} + +func (newState *ServingEndpointAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState ServingEndpointAccessControlRequest) { +} + type ServingEndpointAccessControlResponse struct { // All permissions. AllPermissions []ServingEndpointPermission `tfsdk:"all_permissions" tf:"optional"` @@ -1080,6 +1476,12 @@ type ServingEndpointAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *ServingEndpointAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServingEndpointAccessControlResponse) { +} + +func (newState *ServingEndpointAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState ServingEndpointAccessControlResponse) { +} + type ServingEndpointDetailed struct { // The AI Gateway configuration for the serving endpoint. NOTE: Only // external model endpoints are currently supported. @@ -1116,6 +1518,12 @@ type ServingEndpointDetailed struct { Task types.String `tfsdk:"task" tf:"optional"` } +func (newState *ServingEndpointDetailed) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServingEndpointDetailed) { +} + +func (newState *ServingEndpointDetailed) SyncEffectiveFieldsDuringRead(existingState ServingEndpointDetailed) { +} + type ServingEndpointPermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -1124,6 +1532,12 @@ type ServingEndpointPermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *ServingEndpointPermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServingEndpointPermission) { +} + +func (newState *ServingEndpointPermission) SyncEffectiveFieldsDuringRead(existingState ServingEndpointPermission) { +} + type ServingEndpointPermissions struct { AccessControlList []ServingEndpointAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -1132,23 +1546,47 @@ type ServingEndpointPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *ServingEndpointPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServingEndpointPermissions) { +} + +func (newState *ServingEndpointPermissions) SyncEffectiveFieldsDuringRead(existingState ServingEndpointPermissions) { +} + type ServingEndpointPermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *ServingEndpointPermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServingEndpointPermissionsDescription) { +} + +func (newState *ServingEndpointPermissionsDescription) SyncEffectiveFieldsDuringRead(existingState ServingEndpointPermissionsDescription) { +} + type ServingEndpointPermissionsRequest struct { AccessControlList []ServingEndpointAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The serving endpoint for which to get or manage permissions. ServingEndpointId types.String `tfsdk:"-"` } +func (newState *ServingEndpointPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServingEndpointPermissionsRequest) { +} + +func (newState *ServingEndpointPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState ServingEndpointPermissionsRequest) { +} + type TrafficConfig struct { // The list of routes that define traffic to each served entity. Routes []Route `tfsdk:"routes" tf:"optional"` } +func (newState *TrafficConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan TrafficConfig) { +} + +func (newState *TrafficConfig) SyncEffectiveFieldsDuringRead(existingState TrafficConfig) { +} + type V1ResponseChoiceElement struct { // The finish reason returned by the endpoint. FinishReason types.String `tfsdk:"finishReason" tf:"optional"` @@ -1161,3 +1599,9 @@ type V1ResponseChoiceElement struct { // The text response from the __completions__ endpoint. Text types.String `tfsdk:"text" tf:"optional"` } + +func (newState *V1ResponseChoiceElement) SyncEffectiveFieldsDuringCreateOrUpdate(plan V1ResponseChoiceElement) { +} + +func (newState *V1ResponseChoiceElement) SyncEffectiveFieldsDuringRead(existingState V1ResponseChoiceElement) { +} diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index a3fad58cb7..3ca9895b89 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -32,10 +32,22 @@ type AutomaticClusterUpdateSetting struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +func (newState *AutomaticClusterUpdateSetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan AutomaticClusterUpdateSetting) { +} + +func (newState *AutomaticClusterUpdateSetting) SyncEffectiveFieldsDuringRead(existingState AutomaticClusterUpdateSetting) { +} + type BooleanMessage struct { Value types.Bool `tfsdk:"value" tf:"optional"` } +func (newState *BooleanMessage) SyncEffectiveFieldsDuringCreateOrUpdate(plan BooleanMessage) { +} + +func (newState *BooleanMessage) SyncEffectiveFieldsDuringRead(existingState BooleanMessage) { +} + type ClusterAutoRestartMessage struct { CanToggle types.Bool `tfsdk:"can_toggle" tf:"optional"` @@ -53,6 +65,12 @@ type ClusterAutoRestartMessage struct { RestartEvenIfNoUpdatesAvailable types.Bool `tfsdk:"restart_even_if_no_updates_available" tf:"optional"` } +func (newState *ClusterAutoRestartMessage) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterAutoRestartMessage) { +} + +func (newState *ClusterAutoRestartMessage) SyncEffectiveFieldsDuringRead(existingState ClusterAutoRestartMessage) { +} + // Contains an information about the enablement status judging (e.g. whether the // enterprise tier is enabled) This is only additional information that MUST NOT // be used to decide whether the setting is enabled or not. This is intended to @@ -69,10 +87,22 @@ type ClusterAutoRestartMessageEnablementDetails struct { UnavailableForNonEnterpriseTier types.Bool `tfsdk:"unavailable_for_non_enterprise_tier" tf:"optional"` } +func (newState *ClusterAutoRestartMessageEnablementDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterAutoRestartMessageEnablementDetails) { +} + +func (newState *ClusterAutoRestartMessageEnablementDetails) SyncEffectiveFieldsDuringRead(existingState ClusterAutoRestartMessageEnablementDetails) { +} + type ClusterAutoRestartMessageMaintenanceWindow struct { WeekDayBasedSchedule []ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule `tfsdk:"week_day_based_schedule" tf:"optional,object"` } +func (newState *ClusterAutoRestartMessageMaintenanceWindow) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterAutoRestartMessageMaintenanceWindow) { +} + +func (newState *ClusterAutoRestartMessageMaintenanceWindow) SyncEffectiveFieldsDuringRead(existingState ClusterAutoRestartMessageMaintenanceWindow) { +} + type ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule struct { DayOfWeek types.String `tfsdk:"day_of_week" tf:"optional"` @@ -81,12 +111,24 @@ type ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule struct { WindowStartTime []ClusterAutoRestartMessageMaintenanceWindowWindowStartTime `tfsdk:"window_start_time" tf:"optional,object"` } +func (newState *ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule) { +} + +func (newState *ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule) SyncEffectiveFieldsDuringRead(existingState ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule) { +} + type ClusterAutoRestartMessageMaintenanceWindowWindowStartTime struct { Hours types.Int64 `tfsdk:"hours" tf:"optional"` Minutes types.Int64 `tfsdk:"minutes" tf:"optional"` } +func (newState *ClusterAutoRestartMessageMaintenanceWindowWindowStartTime) SyncEffectiveFieldsDuringCreateOrUpdate(plan ClusterAutoRestartMessageMaintenanceWindowWindowStartTime) { +} + +func (newState *ClusterAutoRestartMessageMaintenanceWindowWindowStartTime) SyncEffectiveFieldsDuringRead(existingState ClusterAutoRestartMessageMaintenanceWindowWindowStartTime) { +} + // SHIELD feature: CSP type ComplianceSecurityProfile struct { // Set by customers when they request Compliance Security Profile (CSP) @@ -95,6 +137,12 @@ type ComplianceSecurityProfile struct { IsEnabled types.Bool `tfsdk:"is_enabled" tf:"optional"` } +func (newState *ComplianceSecurityProfile) SyncEffectiveFieldsDuringCreateOrUpdate(plan ComplianceSecurityProfile) { +} + +func (newState *ComplianceSecurityProfile) SyncEffectiveFieldsDuringRead(existingState ComplianceSecurityProfile) { +} + type ComplianceSecurityProfileSetting struct { // SHIELD feature: CSP ComplianceSecurityProfileWorkspace []ComplianceSecurityProfile `tfsdk:"compliance_security_profile_workspace" tf:"object"` @@ -114,6 +162,12 @@ type ComplianceSecurityProfileSetting struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +func (newState *ComplianceSecurityProfileSetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan ComplianceSecurityProfileSetting) { +} + +func (newState *ComplianceSecurityProfileSetting) SyncEffectiveFieldsDuringRead(existingState ComplianceSecurityProfileSetting) { +} + type Config struct { Email []EmailConfig `tfsdk:"email" tf:"optional,object"` @@ -126,6 +180,12 @@ type Config struct { Slack []SlackConfig `tfsdk:"slack" tf:"optional,object"` } +func (newState *Config) SyncEffectiveFieldsDuringCreateOrUpdate(plan Config) { +} + +func (newState *Config) SyncEffectiveFieldsDuringRead(existingState Config) { +} + // Details required to configure a block list or allow list. type CreateIpAccessList struct { IpAddresses []types.String `tfsdk:"ip_addresses" tf:"optional"` @@ -140,12 +200,24 @@ type CreateIpAccessList struct { ListType types.String `tfsdk:"list_type" tf:""` } +func (newState *CreateIpAccessList) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateIpAccessList) { +} + +func (newState *CreateIpAccessList) SyncEffectiveFieldsDuringRead(existingState CreateIpAccessList) { +} + // An IP access list was successfully created. type CreateIpAccessListResponse struct { // Definition of an IP Access list IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional,object"` } +func (newState *CreateIpAccessListResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateIpAccessListResponse) { +} + +func (newState *CreateIpAccessListResponse) SyncEffectiveFieldsDuringRead(existingState CreateIpAccessListResponse) { +} + type CreateNetworkConnectivityConfigRequest struct { // The name of the network connectivity configuration. The name can contain // alphanumeric characters, hyphens, and underscores. The length must be @@ -158,6 +230,12 @@ type CreateNetworkConnectivityConfigRequest struct { Region types.String `tfsdk:"region" tf:""` } +func (newState *CreateNetworkConnectivityConfigRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateNetworkConnectivityConfigRequest) { +} + +func (newState *CreateNetworkConnectivityConfigRequest) SyncEffectiveFieldsDuringRead(existingState CreateNetworkConnectivityConfigRequest) { +} + type CreateNotificationDestinationRequest struct { // The configuration for the notification destination. Must wrap EXACTLY one // of the nested configs. @@ -166,6 +244,12 @@ type CreateNotificationDestinationRequest struct { DisplayName types.String `tfsdk:"display_name" tf:"optional"` } +func (newState *CreateNotificationDestinationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateNotificationDestinationRequest) { +} + +func (newState *CreateNotificationDestinationRequest) SyncEffectiveFieldsDuringRead(existingState CreateNotificationDestinationRequest) { +} + // Configuration details for creating on-behalf tokens. type CreateOboTokenRequest struct { // Application ID of the service principal. @@ -176,6 +260,12 @@ type CreateOboTokenRequest struct { LifetimeSeconds types.Int64 `tfsdk:"lifetime_seconds" tf:"optional"` } +func (newState *CreateOboTokenRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateOboTokenRequest) { +} + +func (newState *CreateOboTokenRequest) SyncEffectiveFieldsDuringRead(existingState CreateOboTokenRequest) { +} + // An on-behalf token was successfully created for the service principal. type CreateOboTokenResponse struct { TokenInfo []TokenInfo `tfsdk:"token_info" tf:"optional,object"` @@ -183,6 +273,12 @@ type CreateOboTokenResponse struct { TokenValue types.String `tfsdk:"token_value" tf:"optional"` } +func (newState *CreateOboTokenResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateOboTokenResponse) { +} + +func (newState *CreateOboTokenResponse) SyncEffectiveFieldsDuringRead(existingState CreateOboTokenResponse) { +} + type CreatePrivateEndpointRuleRequest struct { // The sub-resource type (group ID) of the target resource. Note that to // connect to workspace root storage (root DBFS), you need two endpoints, @@ -194,6 +290,12 @@ type CreatePrivateEndpointRuleRequest struct { ResourceId types.String `tfsdk:"resource_id" tf:""` } +func (newState *CreatePrivateEndpointRuleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreatePrivateEndpointRuleRequest) { +} + +func (newState *CreatePrivateEndpointRuleRequest) SyncEffectiveFieldsDuringRead(existingState CreatePrivateEndpointRuleRequest) { +} + type CreateTokenRequest struct { // Optional description to attach to the token. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -203,6 +305,12 @@ type CreateTokenRequest struct { LifetimeSeconds types.Int64 `tfsdk:"lifetime_seconds" tf:"optional"` } +func (newState *CreateTokenRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateTokenRequest) { +} + +func (newState *CreateTokenRequest) SyncEffectiveFieldsDuringRead(existingState CreateTokenRequest) { +} + type CreateTokenResponse struct { // The information for the new token. TokenInfo []PublicTokenInfo `tfsdk:"token_info" tf:"optional,object"` @@ -210,6 +318,12 @@ type CreateTokenResponse struct { TokenValue types.String `tfsdk:"token_value" tf:"optional"` } +func (newState *CreateTokenResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateTokenResponse) { +} + +func (newState *CreateTokenResponse) SyncEffectiveFieldsDuringRead(existingState CreateTokenResponse) { +} + // Account level policy for CSP type CspEnablementAccount struct { // Set by customers when they request Compliance Security Profile (CSP) @@ -219,6 +333,12 @@ type CspEnablementAccount struct { IsEnforced types.Bool `tfsdk:"is_enforced" tf:"optional"` } +func (newState *CspEnablementAccount) SyncEffectiveFieldsDuringCreateOrUpdate(plan CspEnablementAccount) { +} + +func (newState *CspEnablementAccount) SyncEffectiveFieldsDuringRead(existingState CspEnablementAccount) { +} + type CspEnablementAccountSetting struct { // Account level policy for CSP CspEnablementAccount []CspEnablementAccount `tfsdk:"csp_enablement_account" tf:"object"` @@ -238,6 +358,12 @@ type CspEnablementAccountSetting struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +func (newState *CspEnablementAccountSetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan CspEnablementAccountSetting) { +} + +func (newState *CspEnablementAccountSetting) SyncEffectiveFieldsDuringRead(existingState CspEnablementAccountSetting) { +} + // This represents the setting configuration for the default namespace in the // Databricks workspace. Setting the default catalog for the workspace // determines the catalog that is used when queries do not reference a fully @@ -266,12 +392,24 @@ type DefaultNamespaceSetting struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +func (newState *DefaultNamespaceSetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan DefaultNamespaceSetting) { +} + +func (newState *DefaultNamespaceSetting) SyncEffectiveFieldsDuringRead(existingState DefaultNamespaceSetting) { +} + // Delete access list type DeleteAccountIpAccessListRequest struct { // The ID for the corresponding IP access list IpAccessListId types.String `tfsdk:"-"` } +func (newState *DeleteAccountIpAccessListRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAccountIpAccessListRequest) { +} + +func (newState *DeleteAccountIpAccessListRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAccountIpAccessListRequest) { +} + // Delete the default namespace setting type DeleteDefaultNamespaceSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -284,6 +422,12 @@ type DeleteDefaultNamespaceSettingRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *DeleteDefaultNamespaceSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDefaultNamespaceSettingRequest) { +} + +func (newState *DeleteDefaultNamespaceSettingRequest) SyncEffectiveFieldsDuringRead(existingState DeleteDefaultNamespaceSettingRequest) { +} + // The etag is returned. type DeleteDefaultNamespaceSettingResponse struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -296,6 +440,12 @@ type DeleteDefaultNamespaceSettingResponse struct { Etag types.String `tfsdk:"etag" tf:""` } +func (newState *DeleteDefaultNamespaceSettingResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDefaultNamespaceSettingResponse) { +} + +func (newState *DeleteDefaultNamespaceSettingResponse) SyncEffectiveFieldsDuringRead(existingState DeleteDefaultNamespaceSettingResponse) { +} + // Delete Legacy Access Disablement Status type DeleteDisableLegacyAccessRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -308,6 +458,12 @@ type DeleteDisableLegacyAccessRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *DeleteDisableLegacyAccessRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDisableLegacyAccessRequest) { +} + +func (newState *DeleteDisableLegacyAccessRequest) SyncEffectiveFieldsDuringRead(existingState DeleteDisableLegacyAccessRequest) { +} + // The etag is returned. type DeleteDisableLegacyAccessResponse struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -320,6 +476,48 @@ type DeleteDisableLegacyAccessResponse struct { Etag types.String `tfsdk:"etag" tf:""` } +func (newState *DeleteDisableLegacyAccessResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDisableLegacyAccessResponse) { +} + +func (newState *DeleteDisableLegacyAccessResponse) SyncEffectiveFieldsDuringRead(existingState DeleteDisableLegacyAccessResponse) { +} + +// Delete the disable legacy DBFS setting +type DeleteDisableLegacyDbfsRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +func (newState *DeleteDisableLegacyDbfsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDisableLegacyDbfsRequest) { +} + +func (newState *DeleteDisableLegacyDbfsRequest) SyncEffectiveFieldsDuringRead(existingState DeleteDisableLegacyDbfsRequest) { +} + +// The etag is returned. +type DeleteDisableLegacyDbfsResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"etag" tf:""` +} + +func (newState *DeleteDisableLegacyDbfsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDisableLegacyDbfsResponse) { +} + +func (newState *DeleteDisableLegacyDbfsResponse) SyncEffectiveFieldsDuringRead(existingState DeleteDisableLegacyDbfsResponse) { +} + // Delete the disable legacy features setting type DeleteDisableLegacyFeaturesRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -332,6 +530,12 @@ type DeleteDisableLegacyFeaturesRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *DeleteDisableLegacyFeaturesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDisableLegacyFeaturesRequest) { +} + +func (newState *DeleteDisableLegacyFeaturesRequest) SyncEffectiveFieldsDuringRead(existingState DeleteDisableLegacyFeaturesRequest) { +} + // The etag is returned. type DeleteDisableLegacyFeaturesResponse struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -344,26 +548,56 @@ type DeleteDisableLegacyFeaturesResponse struct { Etag types.String `tfsdk:"etag" tf:""` } +func (newState *DeleteDisableLegacyFeaturesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDisableLegacyFeaturesResponse) { +} + +func (newState *DeleteDisableLegacyFeaturesResponse) SyncEffectiveFieldsDuringRead(existingState DeleteDisableLegacyFeaturesResponse) { +} + // Delete access list type DeleteIpAccessListRequest struct { // The ID for the corresponding IP access list IpAccessListId types.String `tfsdk:"-"` } +func (newState *DeleteIpAccessListRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteIpAccessListRequest) { +} + +func (newState *DeleteIpAccessListRequest) SyncEffectiveFieldsDuringRead(existingState DeleteIpAccessListRequest) { +} + // Delete a network connectivity configuration type DeleteNetworkConnectivityConfigurationRequest struct { // Your Network Connectvity Configuration ID. NetworkConnectivityConfigId types.String `tfsdk:"-"` } +func (newState *DeleteNetworkConnectivityConfigurationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteNetworkConnectivityConfigurationRequest) { +} + +func (newState *DeleteNetworkConnectivityConfigurationRequest) SyncEffectiveFieldsDuringRead(existingState DeleteNetworkConnectivityConfigurationRequest) { +} + type DeleteNetworkConnectivityConfigurationResponse struct { } +func (newState *DeleteNetworkConnectivityConfigurationResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteNetworkConnectivityConfigurationResponse) { +} + +func (newState *DeleteNetworkConnectivityConfigurationResponse) SyncEffectiveFieldsDuringRead(existingState DeleteNetworkConnectivityConfigurationResponse) { +} + // Delete a notification destination type DeleteNotificationDestinationRequest struct { Id types.String `tfsdk:"-"` } +func (newState *DeleteNotificationDestinationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteNotificationDestinationRequest) { +} + +func (newState *DeleteNotificationDestinationRequest) SyncEffectiveFieldsDuringRead(existingState DeleteNotificationDestinationRequest) { +} + // Delete Personal Compute setting type DeletePersonalComputeSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -376,6 +610,12 @@ type DeletePersonalComputeSettingRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *DeletePersonalComputeSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeletePersonalComputeSettingRequest) { +} + +func (newState *DeletePersonalComputeSettingRequest) SyncEffectiveFieldsDuringRead(existingState DeletePersonalComputeSettingRequest) { +} + // The etag is returned. type DeletePersonalComputeSettingResponse struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -388,6 +628,12 @@ type DeletePersonalComputeSettingResponse struct { Etag types.String `tfsdk:"etag" tf:""` } +func (newState *DeletePersonalComputeSettingResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeletePersonalComputeSettingResponse) { +} + +func (newState *DeletePersonalComputeSettingResponse) SyncEffectiveFieldsDuringRead(existingState DeletePersonalComputeSettingResponse) { +} + // Delete a private endpoint rule type DeletePrivateEndpointRuleRequest struct { // Your Network Connectvity Configuration ID. @@ -396,9 +642,21 @@ type DeletePrivateEndpointRuleRequest struct { PrivateEndpointRuleId types.String `tfsdk:"-"` } +func (newState *DeletePrivateEndpointRuleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeletePrivateEndpointRuleRequest) { +} + +func (newState *DeletePrivateEndpointRuleRequest) SyncEffectiveFieldsDuringRead(existingState DeletePrivateEndpointRuleRequest) { +} + type DeleteResponse struct { } +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + // Delete the restrict workspace admins setting type DeleteRestrictWorkspaceAdminsSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -411,6 +669,12 @@ type DeleteRestrictWorkspaceAdminsSettingRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *DeleteRestrictWorkspaceAdminsSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteRestrictWorkspaceAdminsSettingRequest) { +} + +func (newState *DeleteRestrictWorkspaceAdminsSettingRequest) SyncEffectiveFieldsDuringRead(existingState DeleteRestrictWorkspaceAdminsSettingRequest) { +} + // The etag is returned. type DeleteRestrictWorkspaceAdminsSettingResponse struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -423,12 +687,24 @@ type DeleteRestrictWorkspaceAdminsSettingResponse struct { Etag types.String `tfsdk:"etag" tf:""` } +func (newState *DeleteRestrictWorkspaceAdminsSettingResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteRestrictWorkspaceAdminsSettingResponse) { +} + +func (newState *DeleteRestrictWorkspaceAdminsSettingResponse) SyncEffectiveFieldsDuringRead(existingState DeleteRestrictWorkspaceAdminsSettingResponse) { +} + // Delete a token type DeleteTokenManagementRequest struct { // The ID of the token to get. TokenId types.String `tfsdk:"-"` } +func (newState *DeleteTokenManagementRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteTokenManagementRequest) { +} + +func (newState *DeleteTokenManagementRequest) SyncEffectiveFieldsDuringRead(existingState DeleteTokenManagementRequest) { +} + type DisableLegacyAccess struct { DisableLegacyAccess []BooleanMessage `tfsdk:"disable_legacy_access" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag @@ -447,6 +723,36 @@ type DisableLegacyAccess struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +func (newState *DisableLegacyAccess) SyncEffectiveFieldsDuringCreateOrUpdate(plan DisableLegacyAccess) { +} + +func (newState *DisableLegacyAccess) SyncEffectiveFieldsDuringRead(existingState DisableLegacyAccess) { +} + +type DisableLegacyDbfs struct { + DisableLegacyDbfs []BooleanMessage `tfsdk:"disable_legacy_dbfs" tf:"object"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + +func (newState *DisableLegacyDbfs) SyncEffectiveFieldsDuringCreateOrUpdate(plan DisableLegacyDbfs) { +} + +func (newState *DisableLegacyDbfs) SyncEffectiveFieldsDuringRead(existingState DisableLegacyDbfs) { +} + type DisableLegacyFeatures struct { DisableLegacyFeatures []BooleanMessage `tfsdk:"disable_legacy_features" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag @@ -465,19 +771,43 @@ type DisableLegacyFeatures struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +func (newState *DisableLegacyFeatures) SyncEffectiveFieldsDuringCreateOrUpdate(plan DisableLegacyFeatures) { +} + +func (newState *DisableLegacyFeatures) SyncEffectiveFieldsDuringRead(existingState DisableLegacyFeatures) { +} + type EmailConfig struct { // Email addresses to notify. Addresses []types.String `tfsdk:"addresses" tf:"optional"` } +func (newState *EmailConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan EmailConfig) { +} + +func (newState *EmailConfig) SyncEffectiveFieldsDuringRead(existingState EmailConfig) { +} + type Empty struct { } +func (newState *Empty) SyncEffectiveFieldsDuringCreateOrUpdate(plan Empty) { +} + +func (newState *Empty) SyncEffectiveFieldsDuringRead(existingState Empty) { +} + // SHIELD feature: ESM type EnhancedSecurityMonitoring struct { IsEnabled types.Bool `tfsdk:"is_enabled" tf:"optional"` } +func (newState *EnhancedSecurityMonitoring) SyncEffectiveFieldsDuringCreateOrUpdate(plan EnhancedSecurityMonitoring) { +} + +func (newState *EnhancedSecurityMonitoring) SyncEffectiveFieldsDuringRead(existingState EnhancedSecurityMonitoring) { +} + type EnhancedSecurityMonitoringSetting struct { // SHIELD feature: ESM EnhancedSecurityMonitoringWorkspace []EnhancedSecurityMonitoring `tfsdk:"enhanced_security_monitoring_workspace" tf:"object"` @@ -497,11 +827,23 @@ type EnhancedSecurityMonitoringSetting struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +func (newState *EnhancedSecurityMonitoringSetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan EnhancedSecurityMonitoringSetting) { +} + +func (newState *EnhancedSecurityMonitoringSetting) SyncEffectiveFieldsDuringRead(existingState EnhancedSecurityMonitoringSetting) { +} + // Account level policy for ESM type EsmEnablementAccount struct { IsEnforced types.Bool `tfsdk:"is_enforced" tf:"optional"` } +func (newState *EsmEnablementAccount) SyncEffectiveFieldsDuringCreateOrUpdate(plan EsmEnablementAccount) { +} + +func (newState *EsmEnablementAccount) SyncEffectiveFieldsDuringRead(existingState EsmEnablementAccount) { +} + type EsmEnablementAccountSetting struct { // Account level policy for ESM EsmEnablementAccount []EsmEnablementAccount `tfsdk:"esm_enablement_account" tf:"object"` @@ -521,6 +863,12 @@ type EsmEnablementAccountSetting struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +func (newState *EsmEnablementAccountSetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan EsmEnablementAccountSetting) { +} + +func (newState *EsmEnablementAccountSetting) SyncEffectiveFieldsDuringRead(existingState EsmEnablementAccountSetting) { +} + // The exchange token is the result of the token exchange with the IdP type ExchangeToken struct { // The requested token. @@ -536,6 +884,12 @@ type ExchangeToken struct { TokenType types.String `tfsdk:"tokenType" tf:"optional"` } +func (newState *ExchangeToken) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExchangeToken) { +} + +func (newState *ExchangeToken) SyncEffectiveFieldsDuringRead(existingState ExchangeToken) { +} + // Exchange a token with the IdP type ExchangeTokenRequest struct { // The partition of Credentials store @@ -546,17 +900,35 @@ type ExchangeTokenRequest struct { TokenType []types.String `tfsdk:"tokenType" tf:""` } +func (newState *ExchangeTokenRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExchangeTokenRequest) { +} + +func (newState *ExchangeTokenRequest) SyncEffectiveFieldsDuringRead(existingState ExchangeTokenRequest) { +} + // Exhanged tokens were successfully returned. type ExchangeTokenResponse struct { Values []ExchangeToken `tfsdk:"values" tf:"optional"` } +func (newState *ExchangeTokenResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExchangeTokenResponse) { +} + +func (newState *ExchangeTokenResponse) SyncEffectiveFieldsDuringRead(existingState ExchangeTokenResponse) { +} + // An IP access list was successfully returned. type FetchIpAccessListResponse struct { // Definition of an IP Access list IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional,object"` } +func (newState *FetchIpAccessListResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan FetchIpAccessListResponse) { +} + +func (newState *FetchIpAccessListResponse) SyncEffectiveFieldsDuringRead(existingState FetchIpAccessListResponse) { +} + type GenericWebhookConfig struct { // [Input-Only][Optional] Password for webhook. Password types.String `tfsdk:"password" tf:"optional"` @@ -572,12 +944,24 @@ type GenericWebhookConfig struct { UsernameSet types.Bool `tfsdk:"username_set" tf:"optional"` } +func (newState *GenericWebhookConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenericWebhookConfig) { +} + +func (newState *GenericWebhookConfig) SyncEffectiveFieldsDuringRead(existingState GenericWebhookConfig) { +} + // Get IP access list type GetAccountIpAccessListRequest struct { // The ID for the corresponding IP access list IpAccessListId types.String `tfsdk:"-"` } +func (newState *GetAccountIpAccessListRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAccountIpAccessListRequest) { +} + +func (newState *GetAccountIpAccessListRequest) SyncEffectiveFieldsDuringRead(existingState GetAccountIpAccessListRequest) { +} + // Get the automatic cluster update setting type GetAutomaticClusterUpdateSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -590,6 +974,12 @@ type GetAutomaticClusterUpdateSettingRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *GetAutomaticClusterUpdateSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAutomaticClusterUpdateSettingRequest) { +} + +func (newState *GetAutomaticClusterUpdateSettingRequest) SyncEffectiveFieldsDuringRead(existingState GetAutomaticClusterUpdateSettingRequest) { +} + // Get the compliance security profile setting type GetComplianceSecurityProfileSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -602,6 +992,12 @@ type GetComplianceSecurityProfileSettingRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *GetComplianceSecurityProfileSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetComplianceSecurityProfileSettingRequest) { +} + +func (newState *GetComplianceSecurityProfileSettingRequest) SyncEffectiveFieldsDuringRead(existingState GetComplianceSecurityProfileSettingRequest) { +} + // Get the compliance security profile setting for new workspaces type GetCspEnablementAccountSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -614,6 +1010,12 @@ type GetCspEnablementAccountSettingRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *GetCspEnablementAccountSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCspEnablementAccountSettingRequest) { +} + +func (newState *GetCspEnablementAccountSettingRequest) SyncEffectiveFieldsDuringRead(existingState GetCspEnablementAccountSettingRequest) { +} + // Get the default namespace setting type GetDefaultNamespaceSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -626,6 +1028,12 @@ type GetDefaultNamespaceSettingRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *GetDefaultNamespaceSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetDefaultNamespaceSettingRequest) { +} + +func (newState *GetDefaultNamespaceSettingRequest) SyncEffectiveFieldsDuringRead(existingState GetDefaultNamespaceSettingRequest) { +} + // Retrieve Legacy Access Disablement Status type GetDisableLegacyAccessRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -638,6 +1046,30 @@ type GetDisableLegacyAccessRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *GetDisableLegacyAccessRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetDisableLegacyAccessRequest) { +} + +func (newState *GetDisableLegacyAccessRequest) SyncEffectiveFieldsDuringRead(existingState GetDisableLegacyAccessRequest) { +} + +// Get the disable legacy DBFS setting +type GetDisableLegacyDbfsRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +func (newState *GetDisableLegacyDbfsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetDisableLegacyDbfsRequest) { +} + +func (newState *GetDisableLegacyDbfsRequest) SyncEffectiveFieldsDuringRead(existingState GetDisableLegacyDbfsRequest) { +} + // Get the disable legacy features setting type GetDisableLegacyFeaturesRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -650,6 +1082,12 @@ type GetDisableLegacyFeaturesRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *GetDisableLegacyFeaturesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetDisableLegacyFeaturesRequest) { +} + +func (newState *GetDisableLegacyFeaturesRequest) SyncEffectiveFieldsDuringRead(existingState GetDisableLegacyFeaturesRequest) { +} + // Get the enhanced security monitoring setting type GetEnhancedSecurityMonitoringSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -662,6 +1100,12 @@ type GetEnhancedSecurityMonitoringSettingRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *GetEnhancedSecurityMonitoringSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetEnhancedSecurityMonitoringSettingRequest) { +} + +func (newState *GetEnhancedSecurityMonitoringSettingRequest) SyncEffectiveFieldsDuringRead(existingState GetEnhancedSecurityMonitoringSettingRequest) { +} + // Get the enhanced security monitoring setting for new workspaces type GetEsmEnablementAccountSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -674,33 +1118,69 @@ type GetEsmEnablementAccountSettingRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *GetEsmEnablementAccountSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetEsmEnablementAccountSettingRequest) { +} + +func (newState *GetEsmEnablementAccountSettingRequest) SyncEffectiveFieldsDuringRead(existingState GetEsmEnablementAccountSettingRequest) { +} + // Get access list type GetIpAccessListRequest struct { // The ID for the corresponding IP access list IpAccessListId types.String `tfsdk:"-"` } +func (newState *GetIpAccessListRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetIpAccessListRequest) { +} + +func (newState *GetIpAccessListRequest) SyncEffectiveFieldsDuringRead(existingState GetIpAccessListRequest) { +} + type GetIpAccessListResponse struct { // Definition of an IP Access list IpAccessList []IpAccessListInfo `tfsdk:"ip_access_list" tf:"optional,object"` } +func (newState *GetIpAccessListResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetIpAccessListResponse) { +} + +func (newState *GetIpAccessListResponse) SyncEffectiveFieldsDuringRead(existingState GetIpAccessListResponse) { +} + // IP access lists were successfully returned. type GetIpAccessListsResponse struct { IpAccessLists []IpAccessListInfo `tfsdk:"ip_access_lists" tf:"optional"` } +func (newState *GetIpAccessListsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetIpAccessListsResponse) { +} + +func (newState *GetIpAccessListsResponse) SyncEffectiveFieldsDuringRead(existingState GetIpAccessListsResponse) { +} + // Get a network connectivity configuration type GetNetworkConnectivityConfigurationRequest struct { // Your Network Connectvity Configuration ID. NetworkConnectivityConfigId types.String `tfsdk:"-"` } +func (newState *GetNetworkConnectivityConfigurationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetNetworkConnectivityConfigurationRequest) { +} + +func (newState *GetNetworkConnectivityConfigurationRequest) SyncEffectiveFieldsDuringRead(existingState GetNetworkConnectivityConfigurationRequest) { +} + // Get a notification destination type GetNotificationDestinationRequest struct { Id types.String `tfsdk:"-"` } +func (newState *GetNotificationDestinationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetNotificationDestinationRequest) { +} + +func (newState *GetNotificationDestinationRequest) SyncEffectiveFieldsDuringRead(existingState GetNotificationDestinationRequest) { +} + // Get Personal Compute setting type GetPersonalComputeSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -713,6 +1193,12 @@ type GetPersonalComputeSettingRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *GetPersonalComputeSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPersonalComputeSettingRequest) { +} + +func (newState *GetPersonalComputeSettingRequest) SyncEffectiveFieldsDuringRead(existingState GetPersonalComputeSettingRequest) { +} + // Get a private endpoint rule type GetPrivateEndpointRuleRequest struct { // Your Network Connectvity Configuration ID. @@ -721,6 +1207,12 @@ type GetPrivateEndpointRuleRequest struct { PrivateEndpointRuleId types.String `tfsdk:"-"` } +func (newState *GetPrivateEndpointRuleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetPrivateEndpointRuleRequest) { +} + +func (newState *GetPrivateEndpointRuleRequest) SyncEffectiveFieldsDuringRead(existingState GetPrivateEndpointRuleRequest) { +} + // Get the restrict workspace admins setting type GetRestrictWorkspaceAdminsSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -733,27 +1225,57 @@ type GetRestrictWorkspaceAdminsSettingRequest struct { Etag types.String `tfsdk:"-"` } +func (newState *GetRestrictWorkspaceAdminsSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRestrictWorkspaceAdminsSettingRequest) { +} + +func (newState *GetRestrictWorkspaceAdminsSettingRequest) SyncEffectiveFieldsDuringRead(existingState GetRestrictWorkspaceAdminsSettingRequest) { +} + // Check configuration status type GetStatusRequest struct { Keys types.String `tfsdk:"-"` } +func (newState *GetStatusRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetStatusRequest) { +} + +func (newState *GetStatusRequest) SyncEffectiveFieldsDuringRead(existingState GetStatusRequest) { +} + // Get token info type GetTokenManagementRequest struct { // The ID of the token to get. TokenId types.String `tfsdk:"-"` } +func (newState *GetTokenManagementRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetTokenManagementRequest) { +} + +func (newState *GetTokenManagementRequest) SyncEffectiveFieldsDuringRead(existingState GetTokenManagementRequest) { +} + type GetTokenPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []TokenPermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetTokenPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetTokenPermissionLevelsResponse) { +} + +func (newState *GetTokenPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetTokenPermissionLevelsResponse) { +} + // Token with specified Token ID was successfully returned. type GetTokenResponse struct { TokenInfo []TokenInfo `tfsdk:"token_info" tf:"optional,object"` } +func (newState *GetTokenResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetTokenResponse) { +} + +func (newState *GetTokenResponse) SyncEffectiveFieldsDuringRead(existingState GetTokenResponse) { +} + // Definition of an IP Access list type IpAccessListInfo struct { // Total number of IP or CIDR values. @@ -783,11 +1305,23 @@ type IpAccessListInfo struct { UpdatedBy types.Int64 `tfsdk:"updated_by" tf:"optional"` } +func (newState *IpAccessListInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan IpAccessListInfo) { +} + +func (newState *IpAccessListInfo) SyncEffectiveFieldsDuringRead(existingState IpAccessListInfo) { +} + // IP access lists were successfully returned. type ListIpAccessListResponse struct { IpAccessLists []IpAccessListInfo `tfsdk:"ip_access_lists" tf:"optional"` } +func (newState *ListIpAccessListResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListIpAccessListResponse) { +} + +func (newState *ListIpAccessListResponse) SyncEffectiveFieldsDuringRead(existingState ListIpAccessListResponse) { +} + type ListNccAzurePrivateEndpointRulesResponse struct { Items []NccAzurePrivateEndpointRule `tfsdk:"items" tf:"optional"` // A token that can be used to get the next page of results. If null, there @@ -795,12 +1329,24 @@ type ListNccAzurePrivateEndpointRulesResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListNccAzurePrivateEndpointRulesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListNccAzurePrivateEndpointRulesResponse) { +} + +func (newState *ListNccAzurePrivateEndpointRulesResponse) SyncEffectiveFieldsDuringRead(existingState ListNccAzurePrivateEndpointRulesResponse) { +} + // List network connectivity configurations type ListNetworkConnectivityConfigurationsRequest struct { // Pagination token to go to next page based on previous query. PageToken types.String `tfsdk:"-"` } +func (newState *ListNetworkConnectivityConfigurationsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListNetworkConnectivityConfigurationsRequest) { +} + +func (newState *ListNetworkConnectivityConfigurationsRequest) SyncEffectiveFieldsDuringRead(existingState ListNetworkConnectivityConfigurationsRequest) { +} + type ListNetworkConnectivityConfigurationsResponse struct { Items []NetworkConnectivityConfiguration `tfsdk:"items" tf:"optional"` // A token that can be used to get the next page of results. If null, there @@ -808,6 +1354,12 @@ type ListNetworkConnectivityConfigurationsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListNetworkConnectivityConfigurationsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListNetworkConnectivityConfigurationsResponse) { +} + +func (newState *ListNetworkConnectivityConfigurationsResponse) SyncEffectiveFieldsDuringRead(existingState ListNetworkConnectivityConfigurationsResponse) { +} + // List notification destinations type ListNotificationDestinationsRequest struct { PageSize types.Int64 `tfsdk:"-"` @@ -815,6 +1367,12 @@ type ListNotificationDestinationsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListNotificationDestinationsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListNotificationDestinationsRequest) { +} + +func (newState *ListNotificationDestinationsRequest) SyncEffectiveFieldsDuringRead(existingState ListNotificationDestinationsRequest) { +} + type ListNotificationDestinationsResponse struct { // Page token for next of results. NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` @@ -822,6 +1380,12 @@ type ListNotificationDestinationsResponse struct { Results []ListNotificationDestinationsResult `tfsdk:"results" tf:"optional"` } +func (newState *ListNotificationDestinationsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListNotificationDestinationsResponse) { +} + +func (newState *ListNotificationDestinationsResponse) SyncEffectiveFieldsDuringRead(existingState ListNotificationDestinationsResponse) { +} + type ListNotificationDestinationsResult struct { // [Output-only] The type of the notification destination. The type can not // be changed once set. @@ -832,6 +1396,12 @@ type ListNotificationDestinationsResult struct { Id types.String `tfsdk:"id" tf:"optional"` } +func (newState *ListNotificationDestinationsResult) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListNotificationDestinationsResult) { +} + +func (newState *ListNotificationDestinationsResult) SyncEffectiveFieldsDuringRead(existingState ListNotificationDestinationsResult) { +} + // List private endpoint rules type ListPrivateEndpointRulesRequest struct { // Your Network Connectvity Configuration ID. @@ -840,11 +1410,23 @@ type ListPrivateEndpointRulesRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListPrivateEndpointRulesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListPrivateEndpointRulesRequest) { +} + +func (newState *ListPrivateEndpointRulesRequest) SyncEffectiveFieldsDuringRead(existingState ListPrivateEndpointRulesRequest) { +} + type ListPublicTokensResponse struct { // The information for each token. TokenInfos []PublicTokenInfo `tfsdk:"token_infos" tf:"optional"` } +func (newState *ListPublicTokensResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListPublicTokensResponse) { +} + +func (newState *ListPublicTokensResponse) SyncEffectiveFieldsDuringRead(existingState ListPublicTokensResponse) { +} + // List all tokens type ListTokenManagementRequest struct { // User ID of the user that created the token. @@ -853,12 +1435,24 @@ type ListTokenManagementRequest struct { CreatedByUsername types.String `tfsdk:"-"` } +func (newState *ListTokenManagementRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListTokenManagementRequest) { +} + +func (newState *ListTokenManagementRequest) SyncEffectiveFieldsDuringRead(existingState ListTokenManagementRequest) { +} + // Tokens were successfully returned. type ListTokensResponse struct { // Token metadata of each user-created token in the workspace TokenInfos []TokenInfo `tfsdk:"token_infos" tf:"optional"` } +func (newState *ListTokensResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListTokensResponse) { +} + +func (newState *ListTokensResponse) SyncEffectiveFieldsDuringRead(existingState ListTokensResponse) { +} + type MicrosoftTeamsConfig struct { // [Input-Only] URL for Microsoft Teams. Url types.String `tfsdk:"url" tf:"optional"` @@ -866,6 +1460,12 @@ type MicrosoftTeamsConfig struct { UrlSet types.Bool `tfsdk:"url_set" tf:"optional"` } +func (newState *MicrosoftTeamsConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan MicrosoftTeamsConfig) { +} + +func (newState *MicrosoftTeamsConfig) SyncEffectiveFieldsDuringRead(existingState MicrosoftTeamsConfig) { +} + // The stable AWS IP CIDR blocks. You can use these to configure the firewall of // your resources to allow traffic from your Databricks workspace. type NccAwsStableIpRule struct { @@ -874,6 +1474,12 @@ type NccAwsStableIpRule struct { CidrBlocks []types.String `tfsdk:"cidr_blocks" tf:"optional"` } +func (newState *NccAwsStableIpRule) SyncEffectiveFieldsDuringCreateOrUpdate(plan NccAwsStableIpRule) { +} + +func (newState *NccAwsStableIpRule) SyncEffectiveFieldsDuringRead(existingState NccAwsStableIpRule) { +} + type NccAzurePrivateEndpointRule struct { // The current status of this private endpoint. The private endpoint rules // are effective only if the connection state is `ESTABLISHED`. Remember @@ -888,15 +1494,20 @@ type NccAzurePrivateEndpointRule struct { // DISCONNECTED: Connection was removed by the private link resource owner, // the private endpoint becomes informative and should be deleted for // clean-up. - ConnectionState types.String `tfsdk:"connection_state" tf:"optional"` + ConnectionState types.String `tfsdk:"connection_state" tf:"optional"` + EffectiveConnectionState types.String `tfsdk:"effective_connection_state" tf:"computed,optional"` // Time in epoch milliseconds when this object was created. - CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + EffectiveCreationTime types.Int64 `tfsdk:"effective_creation_time" tf:"computed,optional"` // Whether this private endpoint is deactivated. - Deactivated types.Bool `tfsdk:"deactivated" tf:"optional"` + Deactivated types.Bool `tfsdk:"deactivated" tf:"optional"` + EffectiveDeactivated types.Bool `tfsdk:"effective_deactivated" tf:"computed,optional"` // Time in epoch milliseconds when this object was deactivated. - DeactivatedAt types.Int64 `tfsdk:"deactivated_at" tf:"optional"` + DeactivatedAt types.Int64 `tfsdk:"deactivated_at" tf:"optional"` + EffectiveDeactivatedAt types.Int64 `tfsdk:"effective_deactivated_at" tf:"computed,optional"` // The name of the Azure private endpoint resource. - EndpointName types.String `tfsdk:"endpoint_name" tf:"optional"` + EndpointName types.String `tfsdk:"endpoint_name" tf:"optional"` + EffectiveEndpointName types.String `tfsdk:"effective_endpoint_name" tf:"computed,optional"` // The sub-resource type (group ID) of the target resource. Note that to // connect to workspace root storage (root DBFS), you need two endpoints, // one for `blob` and one for `dfs`. @@ -907,9 +1518,52 @@ type NccAzurePrivateEndpointRule struct { // The Azure resource ID of the target resource. ResourceId types.String `tfsdk:"resource_id" tf:"optional"` // The ID of a private endpoint rule. - RuleId types.String `tfsdk:"rule_id" tf:"optional"` + RuleId types.String `tfsdk:"rule_id" tf:"optional"` + EffectiveRuleId types.String `tfsdk:"effective_rule_id" tf:"computed,optional"` // Time in epoch milliseconds when this object was updated. - UpdatedTime types.Int64 `tfsdk:"updated_time" tf:"optional"` + UpdatedTime types.Int64 `tfsdk:"updated_time" tf:"optional"` + EffectiveUpdatedTime types.Int64 `tfsdk:"effective_updated_time" tf:"computed,optional"` +} + +func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringCreateOrUpdate(plan NccAzurePrivateEndpointRule) { + newState.EffectiveConnectionState = newState.ConnectionState + newState.ConnectionState = plan.ConnectionState + newState.EffectiveCreationTime = newState.CreationTime + newState.CreationTime = plan.CreationTime + newState.EffectiveDeactivated = newState.Deactivated + newState.Deactivated = plan.Deactivated + newState.EffectiveDeactivatedAt = newState.DeactivatedAt + newState.DeactivatedAt = plan.DeactivatedAt + newState.EffectiveEndpointName = newState.EndpointName + newState.EndpointName = plan.EndpointName + newState.EffectiveRuleId = newState.RuleId + newState.RuleId = plan.RuleId + newState.EffectiveUpdatedTime = newState.UpdatedTime + newState.UpdatedTime = plan.UpdatedTime +} + +func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringRead(existingState NccAzurePrivateEndpointRule) { + if existingState.EffectiveConnectionState.ValueString() == newState.ConnectionState.ValueString() { + newState.ConnectionState = existingState.ConnectionState + } + if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { + newState.CreationTime = existingState.CreationTime + } + if existingState.EffectiveDeactivated.ValueBool() == newState.Deactivated.ValueBool() { + newState.Deactivated = existingState.Deactivated + } + if existingState.EffectiveDeactivatedAt.ValueInt64() == newState.DeactivatedAt.ValueInt64() { + newState.DeactivatedAt = existingState.DeactivatedAt + } + if existingState.EffectiveEndpointName.ValueString() == newState.EndpointName.ValueString() { + newState.EndpointName = existingState.EndpointName + } + if existingState.EffectiveRuleId.ValueString() == newState.RuleId.ValueString() { + newState.RuleId = existingState.RuleId + } + if existingState.EffectiveUpdatedTime.ValueInt64() == newState.UpdatedTime.ValueInt64() { + newState.UpdatedTime = existingState.UpdatedTime + } } // The stable Azure service endpoints. You can configure the firewall of your @@ -925,18 +1579,31 @@ type NccAzureServiceEndpointRule struct { TargetServices []types.String `tfsdk:"target_services" tf:"optional"` } +func (newState *NccAzureServiceEndpointRule) SyncEffectiveFieldsDuringCreateOrUpdate(plan NccAzureServiceEndpointRule) { +} + +func (newState *NccAzureServiceEndpointRule) SyncEffectiveFieldsDuringRead(existingState NccAzureServiceEndpointRule) { +} + // The network connectivity rules that apply to network traffic from your // serverless compute resources. type NccEgressConfig struct { // The network connectivity rules that are applied by default without // resource specific configurations. You can find the stable network // information of your serverless compute resources here. - DefaultRules []NccEgressDefaultRules `tfsdk:"default_rules" tf:"optional,object"` + DefaultRules []NccEgressDefaultRules `tfsdk:"default_rules" tf:"optional,object"` + EffectiveDefaultRules []NccEgressDefaultRules `tfsdk:"effective_default_rules" tf:"computed,optional"` // The network connectivity rules that configured for each destinations. // These rules override default rules. TargetRules []NccEgressTargetRules `tfsdk:"target_rules" tf:"optional,object"` } +func (newState *NccEgressConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan NccEgressConfig) { +} + +func (newState *NccEgressConfig) SyncEffectiveFieldsDuringRead(existingState NccEgressConfig) { +} + // The network connectivity rules that are applied by default without resource // specific configurations. You can find the stable network information of your // serverless compute resources here. @@ -951,17 +1618,30 @@ type NccEgressDefaultRules struct { AzureServiceEndpointRule []NccAzureServiceEndpointRule `tfsdk:"azure_service_endpoint_rule" tf:"optional,object"` } +func (newState *NccEgressDefaultRules) SyncEffectiveFieldsDuringCreateOrUpdate(plan NccEgressDefaultRules) { +} + +func (newState *NccEgressDefaultRules) SyncEffectiveFieldsDuringRead(existingState NccEgressDefaultRules) { +} + // The network connectivity rules that configured for each destinations. These // rules override default rules. type NccEgressTargetRules struct { AzurePrivateEndpointRules []NccAzurePrivateEndpointRule `tfsdk:"azure_private_endpoint_rules" tf:"optional"` } +func (newState *NccEgressTargetRules) SyncEffectiveFieldsDuringCreateOrUpdate(plan NccEgressTargetRules) { +} + +func (newState *NccEgressTargetRules) SyncEffectiveFieldsDuringRead(existingState NccEgressTargetRules) { +} + type NetworkConnectivityConfiguration struct { // The Databricks account ID that hosts the credential. AccountId types.String `tfsdk:"account_id" tf:"optional"` // Time in epoch milliseconds when this object was created. - CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` + EffectiveCreationTime types.Int64 `tfsdk:"effective_creation_time" tf:"computed,optional"` // The network connectivity rules that apply to network traffic from your // serverless compute resources. EgressConfig []NccEgressConfig `tfsdk:"egress_config" tf:"optional,object"` @@ -971,13 +1651,36 @@ type NetworkConnectivityConfiguration struct { // `^[0-9a-zA-Z-_]{3,30}$`. Name types.String `tfsdk:"name" tf:"optional"` // Databricks network connectivity configuration ID. - NetworkConnectivityConfigId types.String `tfsdk:"network_connectivity_config_id" tf:"optional"` + NetworkConnectivityConfigId types.String `tfsdk:"network_connectivity_config_id" tf:"optional"` + EffectiveNetworkConnectivityConfigId types.String `tfsdk:"effective_network_connectivity_config_id" tf:"computed,optional"` // The region for the network connectivity configuration. Only workspaces in // the same region can be attached to the network connectivity // configuration. Region types.String `tfsdk:"region" tf:"optional"` // Time in epoch milliseconds when this object was updated. - UpdatedTime types.Int64 `tfsdk:"updated_time" tf:"optional"` + UpdatedTime types.Int64 `tfsdk:"updated_time" tf:"optional"` + EffectiveUpdatedTime types.Int64 `tfsdk:"effective_updated_time" tf:"computed,optional"` +} + +func (newState *NetworkConnectivityConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(plan NetworkConnectivityConfiguration) { + newState.EffectiveCreationTime = newState.CreationTime + newState.CreationTime = plan.CreationTime + newState.EffectiveNetworkConnectivityConfigId = newState.NetworkConnectivityConfigId + newState.NetworkConnectivityConfigId = plan.NetworkConnectivityConfigId + newState.EffectiveUpdatedTime = newState.UpdatedTime + newState.UpdatedTime = plan.UpdatedTime +} + +func (newState *NetworkConnectivityConfiguration) SyncEffectiveFieldsDuringRead(existingState NetworkConnectivityConfiguration) { + if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { + newState.CreationTime = existingState.CreationTime + } + if existingState.EffectiveNetworkConnectivityConfigId.ValueString() == newState.NetworkConnectivityConfigId.ValueString() { + newState.NetworkConnectivityConfigId = existingState.NetworkConnectivityConfigId + } + if existingState.EffectiveUpdatedTime.ValueInt64() == newState.UpdatedTime.ValueInt64() { + newState.UpdatedTime = existingState.UpdatedTime + } } type NotificationDestination struct { @@ -994,6 +1697,12 @@ type NotificationDestination struct { Id types.String `tfsdk:"id" tf:"optional"` } +func (newState *NotificationDestination) SyncEffectiveFieldsDuringCreateOrUpdate(plan NotificationDestination) { +} + +func (newState *NotificationDestination) SyncEffectiveFieldsDuringRead(existingState NotificationDestination) { +} + type PagerdutyConfig struct { // [Input-Only] Integration key for PagerDuty. IntegrationKey types.String `tfsdk:"integration_key" tf:"optional"` @@ -1001,12 +1710,24 @@ type PagerdutyConfig struct { IntegrationKeySet types.Bool `tfsdk:"integration_key_set" tf:"optional"` } +func (newState *PagerdutyConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan PagerdutyConfig) { +} + +func (newState *PagerdutyConfig) SyncEffectiveFieldsDuringRead(existingState PagerdutyConfig) { +} + // Partition by workspace or account type PartitionId struct { // The ID of the workspace. WorkspaceId types.Int64 `tfsdk:"workspaceId" tf:"optional"` } +func (newState *PartitionId) SyncEffectiveFieldsDuringCreateOrUpdate(plan PartitionId) { +} + +func (newState *PartitionId) SyncEffectiveFieldsDuringRead(existingState PartitionId) { +} + type PersonalComputeMessage struct { // ON: Grants all users in all workspaces access to the Personal Compute // default policy, allowing all users to create single-machine compute @@ -1018,6 +1739,12 @@ type PersonalComputeMessage struct { Value types.String `tfsdk:"value" tf:""` } +func (newState *PersonalComputeMessage) SyncEffectiveFieldsDuringCreateOrUpdate(plan PersonalComputeMessage) { +} + +func (newState *PersonalComputeMessage) SyncEffectiveFieldsDuringRead(existingState PersonalComputeMessage) { +} + type PersonalComputeSetting struct { // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to @@ -1037,6 +1764,12 @@ type PersonalComputeSetting struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +func (newState *PersonalComputeSetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan PersonalComputeSetting) { +} + +func (newState *PersonalComputeSetting) SyncEffectiveFieldsDuringRead(existingState PersonalComputeSetting) { +} + type PublicTokenInfo struct { // Comment the token was created with, if applicable. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -1049,6 +1782,12 @@ type PublicTokenInfo struct { TokenId types.String `tfsdk:"token_id" tf:"optional"` } +func (newState *PublicTokenInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan PublicTokenInfo) { +} + +func (newState *PublicTokenInfo) SyncEffectiveFieldsDuringRead(existingState PublicTokenInfo) { +} + // Details required to replace an IP access list. type ReplaceIpAccessList struct { // Specifies whether this IP access list is enabled. @@ -1068,13 +1807,31 @@ type ReplaceIpAccessList struct { ListType types.String `tfsdk:"list_type" tf:""` } +func (newState *ReplaceIpAccessList) SyncEffectiveFieldsDuringCreateOrUpdate(plan ReplaceIpAccessList) { +} + +func (newState *ReplaceIpAccessList) SyncEffectiveFieldsDuringRead(existingState ReplaceIpAccessList) { +} + type ReplaceResponse struct { } +func (newState *ReplaceResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ReplaceResponse) { +} + +func (newState *ReplaceResponse) SyncEffectiveFieldsDuringRead(existingState ReplaceResponse) { +} + type RestrictWorkspaceAdminsMessage struct { Status types.String `tfsdk:"status" tf:""` } +func (newState *RestrictWorkspaceAdminsMessage) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestrictWorkspaceAdminsMessage) { +} + +func (newState *RestrictWorkspaceAdminsMessage) SyncEffectiveFieldsDuringRead(existingState RestrictWorkspaceAdminsMessage) { +} + type RestrictWorkspaceAdminsSetting struct { // etag used for versioning. The response is at least as fresh as the eTag // provided. This is used for optimistic concurrency control as a way to @@ -1094,17 +1851,41 @@ type RestrictWorkspaceAdminsSetting struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +func (newState *RestrictWorkspaceAdminsSetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestrictWorkspaceAdminsSetting) { +} + +func (newState *RestrictWorkspaceAdminsSetting) SyncEffectiveFieldsDuringRead(existingState RestrictWorkspaceAdminsSetting) { +} + type RevokeTokenRequest struct { // The ID of the token to be revoked. TokenId types.String `tfsdk:"token_id" tf:""` } +func (newState *RevokeTokenRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RevokeTokenRequest) { +} + +func (newState *RevokeTokenRequest) SyncEffectiveFieldsDuringRead(existingState RevokeTokenRequest) { +} + type RevokeTokenResponse struct { } +func (newState *RevokeTokenResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RevokeTokenResponse) { +} + +func (newState *RevokeTokenResponse) SyncEffectiveFieldsDuringRead(existingState RevokeTokenResponse) { +} + type SetStatusResponse struct { } +func (newState *SetStatusResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetStatusResponse) { +} + +func (newState *SetStatusResponse) SyncEffectiveFieldsDuringRead(existingState SetStatusResponse) { +} + type SlackConfig struct { // [Input-Only] URL for Slack destination. Url types.String `tfsdk:"url" tf:"optional"` @@ -1112,11 +1893,23 @@ type SlackConfig struct { UrlSet types.Bool `tfsdk:"url_set" tf:"optional"` } +func (newState *SlackConfig) SyncEffectiveFieldsDuringCreateOrUpdate(plan SlackConfig) { +} + +func (newState *SlackConfig) SyncEffectiveFieldsDuringRead(existingState SlackConfig) { +} + type StringMessage struct { // Represents a generic string value. Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *StringMessage) SyncEffectiveFieldsDuringCreateOrUpdate(plan StringMessage) { +} + +func (newState *StringMessage) SyncEffectiveFieldsDuringRead(existingState StringMessage) { +} + type TokenAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -1128,6 +1921,12 @@ type TokenAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *TokenAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan TokenAccessControlRequest) { +} + +func (newState *TokenAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState TokenAccessControlRequest) { +} + type TokenAccessControlResponse struct { // All permissions. AllPermissions []TokenPermission `tfsdk:"all_permissions" tf:"optional"` @@ -1141,6 +1940,12 @@ type TokenAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *TokenAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan TokenAccessControlResponse) { +} + +func (newState *TokenAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState TokenAccessControlResponse) { +} + type TokenInfo struct { // Comment that describes the purpose of the token, specified by the token // creator. @@ -1161,6 +1966,12 @@ type TokenInfo struct { WorkspaceId types.Int64 `tfsdk:"workspace_id" tf:"optional"` } +func (newState *TokenInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan TokenInfo) { +} + +func (newState *TokenInfo) SyncEffectiveFieldsDuringRead(existingState TokenInfo) { +} + type TokenPermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -1169,6 +1980,12 @@ type TokenPermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *TokenPermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan TokenPermission) { +} + +func (newState *TokenPermission) SyncEffectiveFieldsDuringRead(existingState TokenPermission) { +} + type TokenPermissions struct { AccessControlList []TokenAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -1177,16 +1994,34 @@ type TokenPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *TokenPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan TokenPermissions) { +} + +func (newState *TokenPermissions) SyncEffectiveFieldsDuringRead(existingState TokenPermissions) { +} + type TokenPermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *TokenPermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan TokenPermissionsDescription) { +} + +func (newState *TokenPermissionsDescription) SyncEffectiveFieldsDuringRead(existingState TokenPermissionsDescription) { +} + type TokenPermissionsRequest struct { AccessControlList []TokenAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` } +func (newState *TokenPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan TokenPermissionsRequest) { +} + +func (newState *TokenPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState TokenPermissionsRequest) { +} + // Details required to update a setting. type UpdateAutomaticClusterUpdateSettingRequest struct { // This should always be set to true for Settings API. Added for AIP @@ -1201,6 +2036,12 @@ type UpdateAutomaticClusterUpdateSettingRequest struct { Setting []AutomaticClusterUpdateSetting `tfsdk:"setting" tf:"object"` } +func (newState *UpdateAutomaticClusterUpdateSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAutomaticClusterUpdateSettingRequest) { +} + +func (newState *UpdateAutomaticClusterUpdateSettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAutomaticClusterUpdateSettingRequest) { +} + // Details required to update a setting. type UpdateComplianceSecurityProfileSettingRequest struct { // This should always be set to true for Settings API. Added for AIP @@ -1215,6 +2056,12 @@ type UpdateComplianceSecurityProfileSettingRequest struct { Setting []ComplianceSecurityProfileSetting `tfsdk:"setting" tf:"object"` } +func (newState *UpdateComplianceSecurityProfileSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateComplianceSecurityProfileSettingRequest) { +} + +func (newState *UpdateComplianceSecurityProfileSettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateComplianceSecurityProfileSettingRequest) { +} + // Details required to update a setting. type UpdateCspEnablementAccountSettingRequest struct { // This should always be set to true for Settings API. Added for AIP @@ -1229,6 +2076,12 @@ type UpdateCspEnablementAccountSettingRequest struct { Setting []CspEnablementAccountSetting `tfsdk:"setting" tf:"object"` } +func (newState *UpdateCspEnablementAccountSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCspEnablementAccountSettingRequest) { +} + +func (newState *UpdateCspEnablementAccountSettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateCspEnablementAccountSettingRequest) { +} + // Details required to update a setting. type UpdateDefaultNamespaceSettingRequest struct { // This should always be set to true for Settings API. Added for AIP @@ -1251,6 +2104,12 @@ type UpdateDefaultNamespaceSettingRequest struct { Setting []DefaultNamespaceSetting `tfsdk:"setting" tf:"object"` } +func (newState *UpdateDefaultNamespaceSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateDefaultNamespaceSettingRequest) { +} + +func (newState *UpdateDefaultNamespaceSettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateDefaultNamespaceSettingRequest) { +} + // Details required to update a setting. type UpdateDisableLegacyAccessRequest struct { // This should always be set to true for Settings API. Added for AIP @@ -1265,6 +2124,32 @@ type UpdateDisableLegacyAccessRequest struct { Setting []DisableLegacyAccess `tfsdk:"setting" tf:"object"` } +func (newState *UpdateDisableLegacyAccessRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateDisableLegacyAccessRequest) { +} + +func (newState *UpdateDisableLegacyAccessRequest) SyncEffectiveFieldsDuringRead(existingState UpdateDisableLegacyAccessRequest) { +} + +// Details required to update a setting. +type UpdateDisableLegacyDbfsRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting []DisableLegacyDbfs `tfsdk:"setting" tf:"object"` +} + +func (newState *UpdateDisableLegacyDbfsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateDisableLegacyDbfsRequest) { +} + +func (newState *UpdateDisableLegacyDbfsRequest) SyncEffectiveFieldsDuringRead(existingState UpdateDisableLegacyDbfsRequest) { +} + // Details required to update a setting. type UpdateDisableLegacyFeaturesRequest struct { // This should always be set to true for Settings API. Added for AIP @@ -1279,6 +2164,12 @@ type UpdateDisableLegacyFeaturesRequest struct { Setting []DisableLegacyFeatures `tfsdk:"setting" tf:"object"` } +func (newState *UpdateDisableLegacyFeaturesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateDisableLegacyFeaturesRequest) { +} + +func (newState *UpdateDisableLegacyFeaturesRequest) SyncEffectiveFieldsDuringRead(existingState UpdateDisableLegacyFeaturesRequest) { +} + // Details required to update a setting. type UpdateEnhancedSecurityMonitoringSettingRequest struct { // This should always be set to true for Settings API. Added for AIP @@ -1293,6 +2184,12 @@ type UpdateEnhancedSecurityMonitoringSettingRequest struct { Setting []EnhancedSecurityMonitoringSetting `tfsdk:"setting" tf:"object"` } +func (newState *UpdateEnhancedSecurityMonitoringSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateEnhancedSecurityMonitoringSettingRequest) { +} + +func (newState *UpdateEnhancedSecurityMonitoringSettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateEnhancedSecurityMonitoringSettingRequest) { +} + // Details required to update a setting. type UpdateEsmEnablementAccountSettingRequest struct { // This should always be set to true for Settings API. Added for AIP @@ -1307,6 +2204,12 @@ type UpdateEsmEnablementAccountSettingRequest struct { Setting []EsmEnablementAccountSetting `tfsdk:"setting" tf:"object"` } +func (newState *UpdateEsmEnablementAccountSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateEsmEnablementAccountSettingRequest) { +} + +func (newState *UpdateEsmEnablementAccountSettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateEsmEnablementAccountSettingRequest) { +} + // Details required to update an IP access list. type UpdateIpAccessList struct { // Specifies whether this IP access list is enabled. @@ -1326,6 +2229,12 @@ type UpdateIpAccessList struct { ListType types.String `tfsdk:"list_type" tf:"optional"` } +func (newState *UpdateIpAccessList) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateIpAccessList) { +} + +func (newState *UpdateIpAccessList) SyncEffectiveFieldsDuringRead(existingState UpdateIpAccessList) { +} + type UpdateNotificationDestinationRequest struct { // The configuration for the notification destination. Must wrap EXACTLY one // of the nested configs. @@ -1336,6 +2245,12 @@ type UpdateNotificationDestinationRequest struct { Id types.String `tfsdk:"-"` } +func (newState *UpdateNotificationDestinationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateNotificationDestinationRequest) { +} + +func (newState *UpdateNotificationDestinationRequest) SyncEffectiveFieldsDuringRead(existingState UpdateNotificationDestinationRequest) { +} + // Details required to update a setting. type UpdatePersonalComputeSettingRequest struct { // This should always be set to true for Settings API. Added for AIP @@ -1350,9 +2265,21 @@ type UpdatePersonalComputeSettingRequest struct { Setting []PersonalComputeSetting `tfsdk:"setting" tf:"object"` } +func (newState *UpdatePersonalComputeSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdatePersonalComputeSettingRequest) { +} + +func (newState *UpdatePersonalComputeSettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdatePersonalComputeSettingRequest) { +} + type UpdateResponse struct { } +func (newState *UpdateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateResponse) { +} + +func (newState *UpdateResponse) SyncEffectiveFieldsDuringRead(existingState UpdateResponse) { +} + // Details required to update a setting. type UpdateRestrictWorkspaceAdminsSettingRequest struct { // This should always be set to true for Settings API. Added for AIP @@ -1366,3 +2293,9 @@ type UpdateRestrictWorkspaceAdminsSettingRequest struct { Setting []RestrictWorkspaceAdminsSetting `tfsdk:"setting" tf:"object"` } + +func (newState *UpdateRestrictWorkspaceAdminsSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateRestrictWorkspaceAdminsSettingRequest) { +} + +func (newState *UpdateRestrictWorkspaceAdminsSettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateRestrictWorkspaceAdminsSettingRequest) { +} diff --git a/internal/service/sharing_tf/model.go b/internal/service/sharing_tf/model.go index d83c38ff9b..1cb5022027 100755 --- a/internal/service/sharing_tf/model.go +++ b/internal/service/sharing_tf/model.go @@ -29,6 +29,12 @@ type CentralCleanRoomInfo struct { StationRegion types.String `tfsdk:"station_region" tf:"optional"` } +func (newState *CentralCleanRoomInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CentralCleanRoomInfo) { +} + +func (newState *CentralCleanRoomInfo) SyncEffectiveFieldsDuringRead(existingState CentralCleanRoomInfo) { +} + type CleanRoomAssetInfo struct { // Time at which this asset was added, in epoch milliseconds. AddedAt types.Int64 `tfsdk:"added_at" tf:"optional"` @@ -42,6 +48,12 @@ type CleanRoomAssetInfo struct { UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` } +func (newState *CleanRoomAssetInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomAssetInfo) { +} + +func (newState *CleanRoomAssetInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomAssetInfo) { +} + type CleanRoomCatalog struct { // Name of the catalog in the clean room station. Empty for notebooks. CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` @@ -51,6 +63,12 @@ type CleanRoomCatalog struct { Tables []SharedDataObject `tfsdk:"tables" tf:"optional"` } +func (newState *CleanRoomCatalog) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomCatalog) { +} + +func (newState *CleanRoomCatalog) SyncEffectiveFieldsDuringRead(existingState CleanRoomCatalog) { +} + type CleanRoomCatalogUpdate struct { // The name of the catalog to update assets. CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` @@ -58,6 +76,12 @@ type CleanRoomCatalogUpdate struct { Updates []SharedDataObjectUpdate `tfsdk:"updates" tf:"optional,object"` } +func (newState *CleanRoomCatalogUpdate) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomCatalogUpdate) { +} + +func (newState *CleanRoomCatalogUpdate) SyncEffectiveFieldsDuringRead(existingState CleanRoomCatalogUpdate) { +} + type CleanRoomCollaboratorInfo struct { // The global Unity Catalog metastore id of the collaborator. Also known as // the sharing identifier. The identifier is of format @@ -69,6 +93,12 @@ type CleanRoomCollaboratorInfo struct { OrganizationName types.String `tfsdk:"organization_name" tf:"optional"` } +func (newState *CleanRoomCollaboratorInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomCollaboratorInfo) { +} + +func (newState *CleanRoomCollaboratorInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomCollaboratorInfo) { +} + type CleanRoomInfo struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -90,6 +120,12 @@ type CleanRoomInfo struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *CleanRoomInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomInfo) { +} + +func (newState *CleanRoomInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomInfo) { +} + type CleanRoomNotebookInfo struct { // The base64 representation of the notebook content in HTML. NotebookContent types.String `tfsdk:"notebook_content" tf:"optional"` @@ -97,6 +133,12 @@ type CleanRoomNotebookInfo struct { NotebookName types.String `tfsdk:"notebook_name" tf:"optional"` } +func (newState *CleanRoomNotebookInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomNotebookInfo) { +} + +func (newState *CleanRoomNotebookInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomNotebookInfo) { +} + type CleanRoomTableInfo struct { // Name of parent catalog. CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` @@ -111,6 +153,12 @@ type CleanRoomTableInfo struct { SchemaName types.String `tfsdk:"schema_name" tf:"optional"` } +func (newState *CleanRoomTableInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomTableInfo) { +} + +func (newState *CleanRoomTableInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomTableInfo) { +} + type ColumnInfo struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -138,6 +186,12 @@ type ColumnInfo struct { TypeText types.String `tfsdk:"type_text" tf:"optional"` } +func (newState *ColumnInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ColumnInfo) { +} + +func (newState *ColumnInfo) SyncEffectiveFieldsDuringRead(existingState ColumnInfo) { +} + type ColumnMask struct { // The full name of the column mask SQL UDF. FunctionName types.String `tfsdk:"function_name" tf:"optional"` @@ -148,6 +202,12 @@ type ColumnMask struct { UsingColumnNames []types.String `tfsdk:"using_column_names" tf:"optional"` } +func (newState *ColumnMask) SyncEffectiveFieldsDuringCreateOrUpdate(plan ColumnMask) { +} + +func (newState *ColumnMask) SyncEffectiveFieldsDuringRead(existingState ColumnMask) { +} + type CreateCleanRoom struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -157,6 +217,12 @@ type CreateCleanRoom struct { RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"object"` } +func (newState *CreateCleanRoom) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCleanRoom) { +} + +func (newState *CreateCleanRoom) SyncEffectiveFieldsDuringRead(existingState CreateCleanRoom) { +} + type CreateProvider struct { // The delta sharing authentication type. AuthenticationType types.String `tfsdk:"authentication_type" tf:""` @@ -169,6 +235,12 @@ type CreateProvider struct { RecipientProfileStr types.String `tfsdk:"recipient_profile_str" tf:"optional"` } +func (newState *CreateProvider) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateProvider) { +} + +func (newState *CreateProvider) SyncEffectiveFieldsDuringRead(existingState CreateProvider) { +} + type CreateRecipient struct { // The delta sharing authentication type. AuthenticationType types.String `tfsdk:"authentication_type" tf:""` @@ -194,6 +266,12 @@ type CreateRecipient struct { SharingCode types.String `tfsdk:"sharing_code" tf:"optional"` } +func (newState *CreateRecipient) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateRecipient) { +} + +func (newState *CreateRecipient) SyncEffectiveFieldsDuringRead(existingState CreateRecipient) { +} + type CreateShare struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -203,42 +281,90 @@ type CreateShare struct { StorageRoot types.String `tfsdk:"storage_root" tf:"optional"` } +func (newState *CreateShare) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateShare) { +} + +func (newState *CreateShare) SyncEffectiveFieldsDuringRead(existingState CreateShare) { +} + // Delete a clean room type DeleteCleanRoomRequest struct { // The name of the clean room. Name types.String `tfsdk:"-"` } +func (newState *DeleteCleanRoomRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCleanRoomRequest) { +} + +func (newState *DeleteCleanRoomRequest) SyncEffectiveFieldsDuringRead(existingState DeleteCleanRoomRequest) { +} + // Delete a provider type DeleteProviderRequest struct { // Name of the provider. Name types.String `tfsdk:"-"` } +func (newState *DeleteProviderRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteProviderRequest) { +} + +func (newState *DeleteProviderRequest) SyncEffectiveFieldsDuringRead(existingState DeleteProviderRequest) { +} + // Delete a share recipient type DeleteRecipientRequest struct { // Name of the recipient. Name types.String `tfsdk:"-"` } +func (newState *DeleteRecipientRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteRecipientRequest) { +} + +func (newState *DeleteRecipientRequest) SyncEffectiveFieldsDuringRead(existingState DeleteRecipientRequest) { +} + type DeleteResponse struct { } +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + // Delete a share type DeleteShareRequest struct { // The name of the share. Name types.String `tfsdk:"-"` } +func (newState *DeleteShareRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteShareRequest) { +} + +func (newState *DeleteShareRequest) SyncEffectiveFieldsDuringRead(existingState DeleteShareRequest) { +} + // Get a share activation URL type GetActivationUrlInfoRequest struct { // The one time activation url. It also accepts activation token. ActivationUrl types.String `tfsdk:"-"` } +func (newState *GetActivationUrlInfoRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetActivationUrlInfoRequest) { +} + +func (newState *GetActivationUrlInfoRequest) SyncEffectiveFieldsDuringRead(existingState GetActivationUrlInfoRequest) { +} + type GetActivationUrlInfoResponse struct { } +func (newState *GetActivationUrlInfoResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetActivationUrlInfoResponse) { +} + +func (newState *GetActivationUrlInfoResponse) SyncEffectiveFieldsDuringRead(existingState GetActivationUrlInfoResponse) { +} + // Get a clean room type GetCleanRoomRequest struct { // Whether to include remote details (central) on the clean room. @@ -247,18 +373,36 @@ type GetCleanRoomRequest struct { Name types.String `tfsdk:"-"` } +func (newState *GetCleanRoomRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCleanRoomRequest) { +} + +func (newState *GetCleanRoomRequest) SyncEffectiveFieldsDuringRead(existingState GetCleanRoomRequest) { +} + // Get a provider type GetProviderRequest struct { // Name of the provider. Name types.String `tfsdk:"-"` } +func (newState *GetProviderRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetProviderRequest) { +} + +func (newState *GetProviderRequest) SyncEffectiveFieldsDuringRead(existingState GetProviderRequest) { +} + // Get a share recipient type GetRecipientRequest struct { // Name of the recipient. Name types.String `tfsdk:"-"` } +func (newState *GetRecipientRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRecipientRequest) { +} + +func (newState *GetRecipientRequest) SyncEffectiveFieldsDuringRead(existingState GetRecipientRequest) { +} + type GetRecipientSharePermissionsResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -268,6 +412,12 @@ type GetRecipientSharePermissionsResponse struct { PermissionsOut []ShareToPrivilegeAssignment `tfsdk:"permissions_out" tf:"optional"` } +func (newState *GetRecipientSharePermissionsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRecipientSharePermissionsResponse) { +} + +func (newState *GetRecipientSharePermissionsResponse) SyncEffectiveFieldsDuringRead(existingState GetRecipientSharePermissionsResponse) { +} + // Get a share type GetShareRequest struct { // Query for data to include in the share. @@ -276,11 +426,23 @@ type GetShareRequest struct { Name types.String `tfsdk:"-"` } +func (newState *GetShareRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetShareRequest) { +} + +func (newState *GetShareRequest) SyncEffectiveFieldsDuringRead(existingState GetShareRequest) { +} + type IpAccessList struct { // Allowed IP Addresses in CIDR notation. Limit of 100. AllowedIpAddresses []types.String `tfsdk:"allowed_ip_addresses" tf:"optional"` } +func (newState *IpAccessList) SyncEffectiveFieldsDuringCreateOrUpdate(plan IpAccessList) { +} + +func (newState *IpAccessList) SyncEffectiveFieldsDuringRead(existingState IpAccessList) { +} + // List clean rooms type ListCleanRoomsRequest struct { // Maximum number of clean rooms to return. If not set, all the clean rooms @@ -294,6 +456,12 @@ type ListCleanRoomsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListCleanRoomsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCleanRoomsRequest) { +} + +func (newState *ListCleanRoomsRequest) SyncEffectiveFieldsDuringRead(existingState ListCleanRoomsRequest) { +} + type ListCleanRoomsResponse struct { // An array of clean rooms. Remote details (central) are not included. CleanRooms []CleanRoomInfo `tfsdk:"clean_rooms" tf:"optional"` @@ -303,6 +471,12 @@ type ListCleanRoomsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListCleanRoomsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCleanRoomsResponse) { +} + +func (newState *ListCleanRoomsResponse) SyncEffectiveFieldsDuringRead(existingState ListCleanRoomsResponse) { +} + type ListProviderSharesResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -312,6 +486,12 @@ type ListProviderSharesResponse struct { Shares []ProviderShare `tfsdk:"shares" tf:"optional"` } +func (newState *ListProviderSharesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListProviderSharesResponse) { +} + +func (newState *ListProviderSharesResponse) SyncEffectiveFieldsDuringRead(existingState ListProviderSharesResponse) { +} + // List providers type ListProvidersRequest struct { // If not provided, all providers will be returned. If no providers exist @@ -331,6 +511,12 @@ type ListProvidersRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListProvidersRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListProvidersRequest) { +} + +func (newState *ListProvidersRequest) SyncEffectiveFieldsDuringRead(existingState ListProvidersRequest) { +} + type ListProvidersResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -340,6 +526,12 @@ type ListProvidersResponse struct { Providers []ProviderInfo `tfsdk:"providers" tf:"optional"` } +func (newState *ListProvidersResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListProvidersResponse) { +} + +func (newState *ListProvidersResponse) SyncEffectiveFieldsDuringRead(existingState ListProvidersResponse) { +} + // List share recipients type ListRecipientsRequest struct { // If not provided, all recipients will be returned. If no recipients exist @@ -359,6 +551,12 @@ type ListRecipientsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListRecipientsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListRecipientsRequest) { +} + +func (newState *ListRecipientsRequest) SyncEffectiveFieldsDuringRead(existingState ListRecipientsRequest) { +} + type ListRecipientsResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -368,6 +566,12 @@ type ListRecipientsResponse struct { Recipients []RecipientInfo `tfsdk:"recipients" tf:"optional"` } +func (newState *ListRecipientsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListRecipientsResponse) { +} + +func (newState *ListRecipientsResponse) SyncEffectiveFieldsDuringRead(existingState ListRecipientsResponse) { +} + // List shares by Provider type ListSharesRequest struct { // Maximum number of shares to return. - when set to 0, the page length is @@ -386,6 +590,12 @@ type ListSharesRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListSharesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSharesRequest) { +} + +func (newState *ListSharesRequest) SyncEffectiveFieldsDuringRead(existingState ListSharesRequest) { +} + type ListSharesResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -395,9 +605,21 @@ type ListSharesResponse struct { Shares []ShareInfo `tfsdk:"shares" tf:"optional"` } +func (newState *ListSharesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSharesResponse) { +} + +func (newState *ListSharesResponse) SyncEffectiveFieldsDuringRead(existingState ListSharesResponse) { +} + type Partition struct { // An array of partition values. - Values []PartitionValue `tfsdk:"values" tf:"optional"` + Values []PartitionValue `tfsdk:"value" tf:"optional"` +} + +func (newState *Partition) SyncEffectiveFieldsDuringCreateOrUpdate(plan Partition) { +} + +func (newState *Partition) SyncEffectiveFieldsDuringRead(existingState Partition) { } type PartitionValue struct { @@ -415,6 +637,12 @@ type PartitionValue struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *PartitionValue) SyncEffectiveFieldsDuringCreateOrUpdate(plan PartitionValue) { +} + +func (newState *PartitionValue) SyncEffectiveFieldsDuringRead(existingState PartitionValue) { +} + type PrivilegeAssignment struct { // The principal (user email address or group name). Principal types.String `tfsdk:"principal" tf:"optional"` @@ -422,6 +650,12 @@ type PrivilegeAssignment struct { Privileges []types.String `tfsdk:"privileges" tf:"optional"` } +func (newState *PrivilegeAssignment) SyncEffectiveFieldsDuringCreateOrUpdate(plan PrivilegeAssignment) { +} + +func (newState *PrivilegeAssignment) SyncEffectiveFieldsDuringRead(existingState PrivilegeAssignment) { +} + type ProviderInfo struct { // The delta sharing authentication type. AuthenticationType types.String `tfsdk:"authentication_type" tf:"optional"` @@ -460,11 +694,23 @@ type ProviderInfo struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *ProviderInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ProviderInfo) { +} + +func (newState *ProviderInfo) SyncEffectiveFieldsDuringRead(existingState ProviderInfo) { +} + type ProviderShare struct { // The name of the Provider Share. Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *ProviderShare) SyncEffectiveFieldsDuringCreateOrUpdate(plan ProviderShare) { +} + +func (newState *ProviderShare) SyncEffectiveFieldsDuringRead(existingState ProviderShare) { +} + type RecipientInfo struct { // A boolean status field showing whether the Recipient's activation URL has // been exercised or not. @@ -513,6 +759,12 @@ type RecipientInfo struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *RecipientInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan RecipientInfo) { +} + +func (newState *RecipientInfo) SyncEffectiveFieldsDuringRead(existingState RecipientInfo) { +} + type RecipientProfile struct { // The token used to authorize the recipient. BearerToken types.String `tfsdk:"bearer_token" tf:"optional"` @@ -522,6 +774,12 @@ type RecipientProfile struct { ShareCredentialsVersion types.Int64 `tfsdk:"share_credentials_version" tf:"optional"` } +func (newState *RecipientProfile) SyncEffectiveFieldsDuringCreateOrUpdate(plan RecipientProfile) { +} + +func (newState *RecipientProfile) SyncEffectiveFieldsDuringRead(existingState RecipientProfile) { +} + type RecipientTokenInfo struct { // Full activation URL to retrieve the access token. It will be empty if the // token is already retrieved. @@ -540,12 +798,24 @@ type RecipientTokenInfo struct { UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` } +func (newState *RecipientTokenInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan RecipientTokenInfo) { +} + +func (newState *RecipientTokenInfo) SyncEffectiveFieldsDuringRead(existingState RecipientTokenInfo) { +} + // Get an access token type RetrieveTokenRequest struct { // The one time activation url. It also accepts activation token. ActivationUrl types.String `tfsdk:"-"` } +func (newState *RetrieveTokenRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RetrieveTokenRequest) { +} + +func (newState *RetrieveTokenRequest) SyncEffectiveFieldsDuringRead(existingState RetrieveTokenRequest) { +} + type RetrieveTokenResponse struct { // The token used to authorize the recipient. BearerToken types.String `tfsdk:"bearerToken" tf:"optional"` @@ -557,6 +827,12 @@ type RetrieveTokenResponse struct { ShareCredentialsVersion types.Int64 `tfsdk:"shareCredentialsVersion" tf:"optional"` } +func (newState *RetrieveTokenResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RetrieveTokenResponse) { +} + +func (newState *RetrieveTokenResponse) SyncEffectiveFieldsDuringRead(existingState RetrieveTokenResponse) { +} + type RotateRecipientToken struct { // The expiration time of the bearer token in ISO 8601 format. This will set // the expiration_time of existing token only to a smaller timestamp, it @@ -567,6 +843,12 @@ type RotateRecipientToken struct { Name types.String `tfsdk:"-"` } +func (newState *RotateRecipientToken) SyncEffectiveFieldsDuringCreateOrUpdate(plan RotateRecipientToken) { +} + +func (newState *RotateRecipientToken) SyncEffectiveFieldsDuringRead(existingState RotateRecipientToken) { +} + // An object with __properties__ containing map of key-value properties attached // to the securable. type SecurablePropertiesKvPairs struct { @@ -574,27 +856,45 @@ type SecurablePropertiesKvPairs struct { Properties map[string]types.String `tfsdk:"properties" tf:""` } +func (newState *SecurablePropertiesKvPairs) SyncEffectiveFieldsDuringCreateOrUpdate(plan SecurablePropertiesKvPairs) { +} + +func (newState *SecurablePropertiesKvPairs) SyncEffectiveFieldsDuringRead(existingState SecurablePropertiesKvPairs) { +} + type ShareInfo struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` // Time at which this share was created, in epoch milliseconds. - CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` + CreatedAt types.Int64 `tfsdk:"created_at" tf:"computed,optional"` // Username of share creator. - CreatedBy types.String `tfsdk:"created_by" tf:"optional"` + CreatedBy types.String `tfsdk:"created_by" tf:"computed,optional"` // Name of the share. Name types.String `tfsdk:"name" tf:"optional"` // A list of shared data objects within the share. - Objects []SharedDataObject `tfsdk:"objects" tf:"optional"` + Objects []SharedDataObject `tfsdk:"object" tf:"optional"` // Username of current owner of share. - Owner types.String `tfsdk:"owner" tf:"optional"` + Owner types.String `tfsdk:"owner" tf:"optional"` + EffectiveOwner types.String `tfsdk:"effective_owner" tf:"computed,optional"` // Storage Location URL (full path) for the share. StorageLocation types.String `tfsdk:"storage_location" tf:"optional"` // Storage root URL for the share. StorageRoot types.String `tfsdk:"storage_root" tf:"optional"` // Time at which this share was updated, in epoch milliseconds. - UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` + UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"computed,optional"` // Username of share updater. - UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` + UpdatedBy types.String `tfsdk:"updated_by" tf:"computed,optional"` +} + +func (newState *ShareInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ShareInfo) { + newState.EffectiveOwner = newState.Owner + newState.Owner = plan.Owner +} + +func (newState *ShareInfo) SyncEffectiveFieldsDuringRead(existingState ShareInfo) { + if existingState.EffectiveOwner.ValueString() == newState.Owner.ValueString() { + newState.Owner = existingState.Owner + } } // Get recipient share permissions @@ -615,6 +915,12 @@ type SharePermissionsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *SharePermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan SharePermissionsRequest) { +} + +func (newState *SharePermissionsRequest) SyncEffectiveFieldsDuringRead(existingState SharePermissionsRequest) { +} + type ShareToPrivilegeAssignment struct { // The privileges assigned to the principal. PrivilegeAssignments []PrivilegeAssignment `tfsdk:"privilege_assignments" tf:"optional"` @@ -622,14 +928,21 @@ type ShareToPrivilegeAssignment struct { ShareName types.String `tfsdk:"share_name" tf:"optional"` } +func (newState *ShareToPrivilegeAssignment) SyncEffectiveFieldsDuringCreateOrUpdate(plan ShareToPrivilegeAssignment) { +} + +func (newState *ShareToPrivilegeAssignment) SyncEffectiveFieldsDuringRead(existingState ShareToPrivilegeAssignment) { +} + type SharedDataObject struct { // The time when this data object is added to the share, in epoch // milliseconds. - AddedAt types.Int64 `tfsdk:"added_at" tf:"optional"` + AddedAt types.Int64 `tfsdk:"added_at" tf:"computed,optional"` // Username of the sharer. - AddedBy types.String `tfsdk:"added_by" tf:"optional"` + AddedBy types.String `tfsdk:"added_by" tf:"computed,optional"` // Whether to enable cdf or indicate if cdf is enabled on the shared object. - CdfEnabled types.Bool `tfsdk:"cdf_enabled" tf:"optional"` + CdfEnabled types.Bool `tfsdk:"cdf_enabled" tf:"optional"` + EffectiveCdfEnabled types.Bool `tfsdk:"effective_cdf_enabled" tf:"computed,optional"` // A user-provided comment when adding the data object to the share. // [Update:OPT] Comment types.String `tfsdk:"comment" tf:"optional"` @@ -641,19 +954,21 @@ type SharedDataObject struct { DataObjectType types.String `tfsdk:"data_object_type" tf:"optional"` // Whether to enable or disable sharing of data history. If not specified, // the default is **DISABLED**. - HistoryDataSharingStatus types.String `tfsdk:"history_data_sharing_status" tf:"optional"` + HistoryDataSharingStatus types.String `tfsdk:"history_data_sharing_status" tf:"optional"` + EffectiveHistoryDataSharingStatus types.String `tfsdk:"effective_history_data_sharing_status" tf:"computed,optional"` // A fully qualified name that uniquely identifies a data object. // // For example, a table's fully qualified name is in the format of // `..`. Name types.String `tfsdk:"name" tf:""` // Array of partitions for the shared data. - Partitions []Partition `tfsdk:"partitions" tf:"optional"` + Partitions []Partition `tfsdk:"partition" tf:"optional"` // A user-provided new name for the data object within the share. If this // new name is not provided, the object's original name will be used as the // `shared_as` name. The `shared_as` name must be unique within a share. For // tables, the new name must follow the format of `.
`. - SharedAs types.String `tfsdk:"shared_as" tf:"optional"` + SharedAs types.String `tfsdk:"shared_as" tf:"optional"` + EffectiveSharedAs types.String `tfsdk:"effective_shared_as" tf:"computed,optional"` // The start version associated with the object. This allows data providers // to control the lowest object version that is accessible by clients. If // specified, clients can query snapshots or changes for versions >= @@ -661,9 +976,10 @@ type SharedDataObject struct { // version of the object at the time it was added to the share. // // NOTE: The start_version should be <= the `current` version of the object. - StartVersion types.Int64 `tfsdk:"start_version" tf:"optional"` + StartVersion types.Int64 `tfsdk:"start_version" tf:"optional"` + EffectiveStartVersion types.Int64 `tfsdk:"effective_start_version" tf:"computed,optional"` // One of: **ACTIVE**, **PERMISSION_DENIED**. - Status types.String `tfsdk:"status" tf:"optional"` + Status types.String `tfsdk:"status" tf:"computed,optional"` // A user-provided new name for the data object within the share. If this // new name is not provided, the object's original name will be used as the // `string_shared_as` name. The `string_shared_as` name must be unique @@ -672,6 +988,32 @@ type SharedDataObject struct { StringSharedAs types.String `tfsdk:"string_shared_as" tf:"optional"` } +func (newState *SharedDataObject) SyncEffectiveFieldsDuringCreateOrUpdate(plan SharedDataObject) { + newState.EffectiveCdfEnabled = newState.CdfEnabled + newState.CdfEnabled = plan.CdfEnabled + newState.EffectiveHistoryDataSharingStatus = newState.HistoryDataSharingStatus + newState.HistoryDataSharingStatus = plan.HistoryDataSharingStatus + newState.EffectiveSharedAs = newState.SharedAs + newState.SharedAs = plan.SharedAs + newState.EffectiveStartVersion = newState.StartVersion + newState.StartVersion = plan.StartVersion +} + +func (newState *SharedDataObject) SyncEffectiveFieldsDuringRead(existingState SharedDataObject) { + if existingState.EffectiveCdfEnabled.ValueBool() == newState.CdfEnabled.ValueBool() { + newState.CdfEnabled = existingState.CdfEnabled + } + if existingState.EffectiveHistoryDataSharingStatus.ValueString() == newState.HistoryDataSharingStatus.ValueString() { + newState.HistoryDataSharingStatus = existingState.HistoryDataSharingStatus + } + if existingState.EffectiveSharedAs.ValueString() == newState.SharedAs.ValueString() { + newState.SharedAs = existingState.SharedAs + } + if existingState.EffectiveStartVersion.ValueInt64() == newState.StartVersion.ValueInt64() { + newState.StartVersion = existingState.StartVersion + } +} + type SharedDataObjectUpdate struct { // One of: **ADD**, **REMOVE**, **UPDATE**. Action types.String `tfsdk:"action" tf:"optional"` @@ -679,6 +1021,12 @@ type SharedDataObjectUpdate struct { DataObject []SharedDataObject `tfsdk:"data_object" tf:"optional,object"` } +func (newState *SharedDataObjectUpdate) SyncEffectiveFieldsDuringCreateOrUpdate(plan SharedDataObjectUpdate) { +} + +func (newState *SharedDataObjectUpdate) SyncEffectiveFieldsDuringRead(existingState SharedDataObjectUpdate) { +} + type UpdateCleanRoom struct { // Array of shared data object updates. CatalogUpdates []CleanRoomCatalogUpdate `tfsdk:"catalog_updates" tf:"optional"` @@ -690,9 +1038,21 @@ type UpdateCleanRoom struct { Owner types.String `tfsdk:"owner" tf:"optional"` } +func (newState *UpdateCleanRoom) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCleanRoom) { +} + +func (newState *UpdateCleanRoom) SyncEffectiveFieldsDuringRead(existingState UpdateCleanRoom) { +} + type UpdatePermissionsResponse struct { } +func (newState *UpdatePermissionsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdatePermissionsResponse) { +} + +func (newState *UpdatePermissionsResponse) SyncEffectiveFieldsDuringRead(existingState UpdatePermissionsResponse) { +} + type UpdateProvider struct { // Description about the provider. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -707,6 +1067,12 @@ type UpdateProvider struct { RecipientProfileStr types.String `tfsdk:"recipient_profile_str" tf:"optional"` } +func (newState *UpdateProvider) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateProvider) { +} + +func (newState *UpdateProvider) SyncEffectiveFieldsDuringRead(existingState UpdateProvider) { +} + type UpdateRecipient struct { // Description about the recipient. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -727,9 +1093,21 @@ type UpdateRecipient struct { PropertiesKvpairs []SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs" tf:"optional,object"` } +func (newState *UpdateRecipient) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateRecipient) { +} + +func (newState *UpdateRecipient) SyncEffectiveFieldsDuringRead(existingState UpdateRecipient) { +} + type UpdateResponse struct { } +func (newState *UpdateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateResponse) { +} + +func (newState *UpdateResponse) SyncEffectiveFieldsDuringRead(existingState UpdateResponse) { +} + type UpdateShare struct { // User-provided free-form text description. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -738,13 +1116,25 @@ type UpdateShare struct { // New name for the share. NewName types.String `tfsdk:"new_name" tf:"optional"` // Username of current owner of share. - Owner types.String `tfsdk:"owner" tf:"optional"` + Owner types.String `tfsdk:"owner" tf:"optional"` + EffectiveOwner types.String `tfsdk:"effective_owner" tf:"computed,optional"` // Storage root URL for the share. StorageRoot types.String `tfsdk:"storage_root" tf:"optional"` // Array of shared data object updates. Updates []SharedDataObjectUpdate `tfsdk:"updates" tf:"optional"` } +func (newState *UpdateShare) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateShare) { + newState.EffectiveOwner = newState.Owner + newState.Owner = plan.Owner +} + +func (newState *UpdateShare) SyncEffectiveFieldsDuringRead(existingState UpdateShare) { + if existingState.EffectiveOwner.ValueString() == newState.Owner.ValueString() { + newState.Owner = existingState.Owner + } +} + type UpdateSharePermissions struct { // Array of permission changes. Changes catalog.PermissionsChange `tfsdk:"changes" tf:"optional"` @@ -763,3 +1153,9 @@ type UpdateSharePermissions struct { // Opaque pagination token to go to next page based on previous query. PageToken types.String `tfsdk:"-"` } + +func (newState *UpdateSharePermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateSharePermissions) { +} + +func (newState *UpdateSharePermissions) SyncEffectiveFieldsDuringRead(existingState UpdateSharePermissions) { +} diff --git a/internal/service/sql_tf/model.go b/internal/service/sql_tf/model.go index 18cf637b87..b961f17901 100755 --- a/internal/service/sql_tf/model.go +++ b/internal/service/sql_tf/model.go @@ -23,6 +23,12 @@ type AccessControl struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *AccessControl) SyncEffectiveFieldsDuringCreateOrUpdate(plan AccessControl) { +} + +func (newState *AccessControl) SyncEffectiveFieldsDuringRead(existingState AccessControl) { +} + type Alert struct { // Trigger conditions of the alert. Condition []AlertCondition `tfsdk:"condition" tf:"optional,object"` @@ -45,6 +51,8 @@ type Alert struct { Id types.String `tfsdk:"id" tf:"optional"` // The workspace state of the alert. Used for tracking trashed status. LifecycleState types.String `tfsdk:"lifecycle_state" tf:"optional"` + // Whether to notify alert subscribers when alert returns back to normal. + NotifyOnOk types.Bool `tfsdk:"notify_on_ok" tf:"optional"` // The owner's username. This field is set to "Unavailable" if the user has // been deleted. OwnerUserName types.String `tfsdk:"owner_user_name" tf:"optional"` @@ -67,6 +75,12 @@ type Alert struct { UpdateTime types.String `tfsdk:"update_time" tf:"optional"` } +func (newState *Alert) SyncEffectiveFieldsDuringCreateOrUpdate(plan Alert) { +} + +func (newState *Alert) SyncEffectiveFieldsDuringRead(existingState Alert) { +} + type AlertCondition struct { // Alert state if result is empty. EmptyResultState types.String `tfsdk:"empty_result_state" tf:"optional"` @@ -79,18 +93,42 @@ type AlertCondition struct { Threshold []AlertConditionThreshold `tfsdk:"threshold" tf:"optional,object"` } +func (newState *AlertCondition) SyncEffectiveFieldsDuringCreateOrUpdate(plan AlertCondition) { +} + +func (newState *AlertCondition) SyncEffectiveFieldsDuringRead(existingState AlertCondition) { +} + type AlertConditionOperand struct { Column []AlertOperandColumn `tfsdk:"column" tf:"optional,object"` } +func (newState *AlertConditionOperand) SyncEffectiveFieldsDuringCreateOrUpdate(plan AlertConditionOperand) { +} + +func (newState *AlertConditionOperand) SyncEffectiveFieldsDuringRead(existingState AlertConditionOperand) { +} + type AlertConditionThreshold struct { Value []AlertOperandValue `tfsdk:"value" tf:"optional,object"` } +func (newState *AlertConditionThreshold) SyncEffectiveFieldsDuringCreateOrUpdate(plan AlertConditionThreshold) { +} + +func (newState *AlertConditionThreshold) SyncEffectiveFieldsDuringRead(existingState AlertConditionThreshold) { +} + type AlertOperandColumn struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *AlertOperandColumn) SyncEffectiveFieldsDuringCreateOrUpdate(plan AlertOperandColumn) { +} + +func (newState *AlertOperandColumn) SyncEffectiveFieldsDuringRead(existingState AlertOperandColumn) { +} + type AlertOperandValue struct { BoolValue types.Bool `tfsdk:"bool_value" tf:"optional"` @@ -99,6 +137,12 @@ type AlertOperandValue struct { StringValue types.String `tfsdk:"string_value" tf:"optional"` } +func (newState *AlertOperandValue) SyncEffectiveFieldsDuringCreateOrUpdate(plan AlertOperandValue) { +} + +func (newState *AlertOperandValue) SyncEffectiveFieldsDuringRead(existingState AlertOperandValue) { +} + // Alert configuration options. type AlertOptions struct { // Name of column in the query result to compare in alert evaluation. @@ -127,6 +171,12 @@ type AlertOptions struct { Value any `tfsdk:"value" tf:""` } +func (newState *AlertOptions) SyncEffectiveFieldsDuringCreateOrUpdate(plan AlertOptions) { +} + +func (newState *AlertOptions) SyncEffectiveFieldsDuringRead(existingState AlertOptions) { +} + type AlertQuery struct { // The timestamp when this query was created. CreatedAt types.String `tfsdk:"created_at" tf:"optional"` @@ -169,6 +219,12 @@ type AlertQuery struct { UserId types.Int64 `tfsdk:"user_id" tf:"optional"` } +func (newState *AlertQuery) SyncEffectiveFieldsDuringCreateOrUpdate(plan AlertQuery) { +} + +func (newState *AlertQuery) SyncEffectiveFieldsDuringRead(existingState AlertQuery) { +} + // Describes metadata for a particular chunk, within a result set; this // structure is used both within a manifest, and when fetching individual chunk // data or links. @@ -184,6 +240,12 @@ type BaseChunkInfo struct { RowOffset types.Int64 `tfsdk:"row_offset" tf:"optional"` } +func (newState *BaseChunkInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan BaseChunkInfo) { +} + +func (newState *BaseChunkInfo) SyncEffectiveFieldsDuringRead(existingState BaseChunkInfo) { +} + // Cancel statement execution type CancelExecutionRequest struct { // The statement ID is returned upon successfully submitting a SQL @@ -191,9 +253,21 @@ type CancelExecutionRequest struct { StatementId types.String `tfsdk:"-"` } +func (newState *CancelExecutionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CancelExecutionRequest) { +} + +func (newState *CancelExecutionRequest) SyncEffectiveFieldsDuringRead(existingState CancelExecutionRequest) { +} + type CancelExecutionResponse struct { } +func (newState *CancelExecutionResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CancelExecutionResponse) { +} + +func (newState *CancelExecutionResponse) SyncEffectiveFieldsDuringRead(existingState CancelExecutionResponse) { +} + // Configures the channel name and DBSQL version of the warehouse. // CHANNEL_NAME_CUSTOM should be chosen only when `dbsql_version` is specified. type Channel struct { @@ -202,6 +276,12 @@ type Channel struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *Channel) SyncEffectiveFieldsDuringCreateOrUpdate(plan Channel) { +} + +func (newState *Channel) SyncEffectiveFieldsDuringRead(existingState Channel) { +} + // Details about a Channel. type ChannelInfo struct { // DB SQL Version the Channel is mapped to. @@ -210,6 +290,12 @@ type ChannelInfo struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *ChannelInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ChannelInfo) { +} + +func (newState *ChannelInfo) SyncEffectiveFieldsDuringRead(existingState ChannelInfo) { +} + type ColumnInfo struct { // The name of the column. Name types.String `tfsdk:"name" tf:"optional"` @@ -230,6 +316,12 @@ type ColumnInfo struct { TypeText types.String `tfsdk:"type_text" tf:"optional"` } +func (newState *ColumnInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ColumnInfo) { +} + +func (newState *ColumnInfo) SyncEffectiveFieldsDuringRead(existingState ColumnInfo) { +} + type CreateAlert struct { // Name of the alert. Name types.String `tfsdk:"name" tf:""` @@ -245,10 +337,22 @@ type CreateAlert struct { Rearm types.Int64 `tfsdk:"rearm" tf:"optional"` } +func (newState *CreateAlert) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAlert) { +} + +func (newState *CreateAlert) SyncEffectiveFieldsDuringRead(existingState CreateAlert) { +} + type CreateAlertRequest struct { Alert []CreateAlertRequestAlert `tfsdk:"alert" tf:"optional,object"` } +func (newState *CreateAlertRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAlertRequest) { +} + +func (newState *CreateAlertRequest) SyncEffectiveFieldsDuringRead(existingState CreateAlertRequest) { +} + type CreateAlertRequestAlert struct { // Trigger conditions of the alert. Condition []AlertCondition `tfsdk:"condition" tf:"optional,object"` @@ -265,6 +369,8 @@ type CreateAlertRequestAlert struct { CustomSubject types.String `tfsdk:"custom_subject" tf:"optional"` // The display name of the alert. DisplayName types.String `tfsdk:"display_name" tf:"optional"` + // Whether to notify alert subscribers when alert returns back to normal. + NotifyOnOk types.Bool `tfsdk:"notify_on_ok" tf:"optional"` // The workspace path of the folder containing the alert. ParentPath types.String `tfsdk:"parent_path" tf:"optional"` // UUID of the query attached to the alert. @@ -275,10 +381,22 @@ type CreateAlertRequestAlert struct { SecondsToRetrigger types.Int64 `tfsdk:"seconds_to_retrigger" tf:"optional"` } +func (newState *CreateAlertRequestAlert) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAlertRequestAlert) { +} + +func (newState *CreateAlertRequestAlert) SyncEffectiveFieldsDuringRead(existingState CreateAlertRequestAlert) { +} + type CreateQueryRequest struct { Query []CreateQueryRequestQuery `tfsdk:"query" tf:"optional,object"` } +func (newState *CreateQueryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateQueryRequest) { +} + +func (newState *CreateQueryRequest) SyncEffectiveFieldsDuringRead(existingState CreateQueryRequest) { +} + type CreateQueryRequestQuery struct { // Whether to apply a 1000 row limit to the query result. ApplyAutoLimit types.Bool `tfsdk:"apply_auto_limit" tf:"optional"` @@ -306,6 +424,12 @@ type CreateQueryRequestQuery struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *CreateQueryRequestQuery) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateQueryRequestQuery) { +} + +func (newState *CreateQueryRequestQuery) SyncEffectiveFieldsDuringRead(existingState CreateQueryRequestQuery) { +} + // Add visualization to a query type CreateQueryVisualizationsLegacyRequest struct { // A short description of this visualization. This is not displayed in the @@ -324,10 +448,22 @@ type CreateQueryVisualizationsLegacyRequest struct { Type types.String `tfsdk:"type" tf:""` } +func (newState *CreateQueryVisualizationsLegacyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateQueryVisualizationsLegacyRequest) { +} + +func (newState *CreateQueryVisualizationsLegacyRequest) SyncEffectiveFieldsDuringRead(existingState CreateQueryVisualizationsLegacyRequest) { +} + type CreateVisualizationRequest struct { Visualization []CreateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional,object"` } +func (newState *CreateVisualizationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateVisualizationRequest) { +} + +func (newState *CreateVisualizationRequest) SyncEffectiveFieldsDuringRead(existingState CreateVisualizationRequest) { +} + type CreateVisualizationRequestVisualization struct { // The display name of the visualization. DisplayName types.String `tfsdk:"display_name" tf:"optional"` @@ -345,6 +481,12 @@ type CreateVisualizationRequestVisualization struct { Type types.String `tfsdk:"type" tf:"optional"` } +func (newState *CreateVisualizationRequestVisualization) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateVisualizationRequestVisualization) { +} + +func (newState *CreateVisualizationRequestVisualization) SyncEffectiveFieldsDuringRead(existingState CreateVisualizationRequestVisualization) { +} + type CreateWarehouseRequest struct { // The amount of time in minutes that a SQL warehouse must be idle (i.e., no // RUNNING queries) before it is automatically stopped. @@ -409,11 +551,23 @@ type CreateWarehouseRequest struct { WarehouseType types.String `tfsdk:"warehouse_type" tf:"optional"` } +func (newState *CreateWarehouseRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateWarehouseRequest) { +} + +func (newState *CreateWarehouseRequest) SyncEffectiveFieldsDuringRead(existingState CreateWarehouseRequest) { +} + type CreateWarehouseResponse struct { // Id for the SQL warehouse. This value is unique across all SQL warehouses. Id types.String `tfsdk:"id" tf:"optional"` } +func (newState *CreateWarehouseResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateWarehouseResponse) { +} + +func (newState *CreateWarehouseResponse) SyncEffectiveFieldsDuringRead(existingState CreateWarehouseResponse) { +} + type CreateWidget struct { // Dashboard ID returned by :method:dashboards/create. DashboardId types.String `tfsdk:"dashboard_id" tf:""` @@ -431,6 +585,12 @@ type CreateWidget struct { Width types.Int64 `tfsdk:"width" tf:""` } +func (newState *CreateWidget) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateWidget) { +} + +func (newState *CreateWidget) SyncEffectiveFieldsDuringRead(existingState CreateWidget) { +} + // A JSON representing a dashboard containing widgets of visualizations and text // boxes. type Dashboard struct { @@ -480,6 +640,12 @@ type Dashboard struct { Widgets []Widget `tfsdk:"widgets" tf:"optional"` } +func (newState *Dashboard) SyncEffectiveFieldsDuringCreateOrUpdate(plan Dashboard) { +} + +func (newState *Dashboard) SyncEffectiveFieldsDuringRead(existingState Dashboard) { +} + type DashboardEditContent struct { DashboardId types.String `tfsdk:"-"` // The title of this dashboard that appears in list views and at the top of @@ -493,6 +659,12 @@ type DashboardEditContent struct { Tags []types.String `tfsdk:"tags" tf:"optional"` } +func (newState *DashboardEditContent) SyncEffectiveFieldsDuringCreateOrUpdate(plan DashboardEditContent) { +} + +func (newState *DashboardEditContent) SyncEffectiveFieldsDuringRead(existingState DashboardEditContent) { +} + type DashboardOptions struct { // The timestamp when this dashboard was moved to trash. Only present when // the `is_archived` property is `true`. Trashed items are deleted after @@ -500,6 +672,12 @@ type DashboardOptions struct { MovedToTrashAt types.String `tfsdk:"moved_to_trash_at" tf:"optional"` } +func (newState *DashboardOptions) SyncEffectiveFieldsDuringCreateOrUpdate(plan DashboardOptions) { +} + +func (newState *DashboardOptions) SyncEffectiveFieldsDuringRead(existingState DashboardOptions) { +} + type DashboardPostContent struct { // Indicates whether the dashboard filters are enabled DashboardFiltersEnabled types.Bool `tfsdk:"dashboard_filters_enabled" tf:"optional"` @@ -519,6 +697,12 @@ type DashboardPostContent struct { Tags []types.String `tfsdk:"tags" tf:"optional"` } +func (newState *DashboardPostContent) SyncEffectiveFieldsDuringCreateOrUpdate(plan DashboardPostContent) { +} + +func (newState *DashboardPostContent) SyncEffectiveFieldsDuringRead(existingState DashboardPostContent) { +} + // A JSON object representing a DBSQL data source / SQL warehouse. type DataSource struct { // Data source ID maps to the ID of the data source used by the resource and @@ -547,12 +731,24 @@ type DataSource struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *DataSource) SyncEffectiveFieldsDuringCreateOrUpdate(plan DataSource) { +} + +func (newState *DataSource) SyncEffectiveFieldsDuringRead(existingState DataSource) { +} + type DateRange struct { End types.String `tfsdk:"end" tf:""` Start types.String `tfsdk:"start" tf:""` } +func (newState *DateRange) SyncEffectiveFieldsDuringCreateOrUpdate(plan DateRange) { +} + +func (newState *DateRange) SyncEffectiveFieldsDuringRead(existingState DateRange) { +} + type DateRangeValue struct { // Manually specified date-time range value. DateRangeValue []DateRange `tfsdk:"date_range_value" tf:"optional,object"` @@ -565,6 +761,12 @@ type DateRangeValue struct { StartDayOfWeek types.Int64 `tfsdk:"start_day_of_week" tf:"optional"` } +func (newState *DateRangeValue) SyncEffectiveFieldsDuringCreateOrUpdate(plan DateRangeValue) { +} + +func (newState *DateRangeValue) SyncEffectiveFieldsDuringRead(existingState DateRangeValue) { +} + type DateValue struct { // Manually specified date-time value. DateValue types.String `tfsdk:"date_value" tf:"optional"` @@ -575,50 +777,110 @@ type DateValue struct { Precision types.String `tfsdk:"precision" tf:"optional"` } +func (newState *DateValue) SyncEffectiveFieldsDuringCreateOrUpdate(plan DateValue) { +} + +func (newState *DateValue) SyncEffectiveFieldsDuringRead(existingState DateValue) { +} + // Delete an alert type DeleteAlertsLegacyRequest struct { AlertId types.String `tfsdk:"-"` } +func (newState *DeleteAlertsLegacyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAlertsLegacyRequest) { +} + +func (newState *DeleteAlertsLegacyRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAlertsLegacyRequest) { +} + // Remove a dashboard type DeleteDashboardRequest struct { DashboardId types.String `tfsdk:"-"` } +func (newState *DeleteDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDashboardRequest) { +} + +func (newState *DeleteDashboardRequest) SyncEffectiveFieldsDuringRead(existingState DeleteDashboardRequest) { +} + // Remove widget type DeleteDashboardWidgetRequest struct { // Widget ID returned by :method:dashboardwidgets/create Id types.String `tfsdk:"-"` } +func (newState *DeleteDashboardWidgetRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDashboardWidgetRequest) { +} + +func (newState *DeleteDashboardWidgetRequest) SyncEffectiveFieldsDuringRead(existingState DeleteDashboardWidgetRequest) { +} + // Delete a query type DeleteQueriesLegacyRequest struct { QueryId types.String `tfsdk:"-"` } +func (newState *DeleteQueriesLegacyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteQueriesLegacyRequest) { +} + +func (newState *DeleteQueriesLegacyRequest) SyncEffectiveFieldsDuringRead(existingState DeleteQueriesLegacyRequest) { +} + // Remove visualization type DeleteQueryVisualizationsLegacyRequest struct { // Widget ID returned by :method:queryvizualisations/create Id types.String `tfsdk:"-"` } +func (newState *DeleteQueryVisualizationsLegacyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteQueryVisualizationsLegacyRequest) { +} + +func (newState *DeleteQueryVisualizationsLegacyRequest) SyncEffectiveFieldsDuringRead(existingState DeleteQueryVisualizationsLegacyRequest) { +} + type DeleteResponse struct { } +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + // Remove a visualization type DeleteVisualizationRequest struct { Id types.String `tfsdk:"-"` } +func (newState *DeleteVisualizationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteVisualizationRequest) { +} + +func (newState *DeleteVisualizationRequest) SyncEffectiveFieldsDuringRead(existingState DeleteVisualizationRequest) { +} + // Delete a warehouse type DeleteWarehouseRequest struct { // Required. Id of the SQL warehouse. Id types.String `tfsdk:"-"` } +func (newState *DeleteWarehouseRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteWarehouseRequest) { +} + +func (newState *DeleteWarehouseRequest) SyncEffectiveFieldsDuringRead(existingState DeleteWarehouseRequest) { +} + type DeleteWarehouseResponse struct { } +func (newState *DeleteWarehouseResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteWarehouseResponse) { +} + +func (newState *DeleteWarehouseResponse) SyncEffectiveFieldsDuringRead(existingState DeleteWarehouseResponse) { +} + type EditAlert struct { AlertId types.String `tfsdk:"-"` // Name of the alert. @@ -633,6 +895,12 @@ type EditAlert struct { Rearm types.Int64 `tfsdk:"rearm" tf:"optional"` } +func (newState *EditAlert) SyncEffectiveFieldsDuringCreateOrUpdate(plan EditAlert) { +} + +func (newState *EditAlert) SyncEffectiveFieldsDuringRead(existingState EditAlert) { +} + type EditWarehouseRequest struct { // The amount of time in minutes that a SQL warehouse must be idle (i.e., no // RUNNING queries) before it is automatically stopped. @@ -697,20 +965,44 @@ type EditWarehouseRequest struct { WarehouseType types.String `tfsdk:"warehouse_type" tf:"optional"` } +func (newState *EditWarehouseRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan EditWarehouseRequest) { +} + +func (newState *EditWarehouseRequest) SyncEffectiveFieldsDuringRead(existingState EditWarehouseRequest) { +} + type EditWarehouseResponse struct { } +func (newState *EditWarehouseResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan EditWarehouseResponse) { +} + +func (newState *EditWarehouseResponse) SyncEffectiveFieldsDuringRead(existingState EditWarehouseResponse) { +} + // Represents an empty message, similar to google.protobuf.Empty, which is not // available in the firm right now. type Empty struct { } +func (newState *Empty) SyncEffectiveFieldsDuringCreateOrUpdate(plan Empty) { +} + +func (newState *Empty) SyncEffectiveFieldsDuringRead(existingState Empty) { +} + type EndpointConfPair struct { Key types.String `tfsdk:"key" tf:"optional"` Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *EndpointConfPair) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointConfPair) { +} + +func (newState *EndpointConfPair) SyncEffectiveFieldsDuringRead(existingState EndpointConfPair) { +} + type EndpointHealth struct { // Details about errors that are causing current degraded/failed status. Details types.String `tfsdk:"details" tf:"optional"` @@ -726,6 +1018,12 @@ type EndpointHealth struct { Summary types.String `tfsdk:"summary" tf:"optional"` } +func (newState *EndpointHealth) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointHealth) { +} + +func (newState *EndpointHealth) SyncEffectiveFieldsDuringRead(existingState EndpointHealth) { +} + type EndpointInfo struct { // The amount of time in minutes that a SQL warehouse must be idle (i.e., no // RUNNING queries) before it is automatically stopped. @@ -803,16 +1101,34 @@ type EndpointInfo struct { WarehouseType types.String `tfsdk:"warehouse_type" tf:"optional"` } +func (newState *EndpointInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointInfo) { +} + +func (newState *EndpointInfo) SyncEffectiveFieldsDuringRead(existingState EndpointInfo) { +} + type EndpointTagPair struct { Key types.String `tfsdk:"key" tf:"optional"` Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *EndpointTagPair) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointTagPair) { +} + +func (newState *EndpointTagPair) SyncEffectiveFieldsDuringRead(existingState EndpointTagPair) { +} + type EndpointTags struct { CustomTags []EndpointTagPair `tfsdk:"custom_tags" tf:"optional"` } +func (newState *EndpointTags) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointTags) { +} + +func (newState *EndpointTags) SyncEffectiveFieldsDuringRead(existingState EndpointTags) { +} + type EnumValue struct { // List of valid query parameter values, newline delimited. EnumOptions types.String `tfsdk:"enum_options" tf:"optional"` @@ -822,6 +1138,12 @@ type EnumValue struct { Values []types.String `tfsdk:"values" tf:"optional"` } +func (newState *EnumValue) SyncEffectiveFieldsDuringCreateOrUpdate(plan EnumValue) { +} + +func (newState *EnumValue) SyncEffectiveFieldsDuringRead(existingState EnumValue) { +} + type ExecuteStatementRequest struct { // Applies the given byte limit to the statement's result size. Byte counts // are based on internal data representations and might not match the final @@ -948,6 +1270,12 @@ type ExecuteStatementRequest struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:""` } +func (newState *ExecuteStatementRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExecuteStatementRequest) { +} + +func (newState *ExecuteStatementRequest) SyncEffectiveFieldsDuringRead(existingState ExecuteStatementRequest) { +} + type ExternalLink struct { // The number of bytes in the result chunk. This field is not available when // using `INLINE` disposition. @@ -981,21 +1309,45 @@ type ExternalLink struct { RowOffset types.Int64 `tfsdk:"row_offset" tf:"optional"` } +func (newState *ExternalLink) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExternalLink) { +} + +func (newState *ExternalLink) SyncEffectiveFieldsDuringRead(existingState ExternalLink) { +} + // Get an alert type GetAlertRequest struct { Id types.String `tfsdk:"-"` } +func (newState *GetAlertRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAlertRequest) { +} + +func (newState *GetAlertRequest) SyncEffectiveFieldsDuringRead(existingState GetAlertRequest) { +} + // Get an alert type GetAlertsLegacyRequest struct { AlertId types.String `tfsdk:"-"` } +func (newState *GetAlertsLegacyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAlertsLegacyRequest) { +} + +func (newState *GetAlertsLegacyRequest) SyncEffectiveFieldsDuringRead(existingState GetAlertsLegacyRequest) { +} + // Retrieve a definition type GetDashboardRequest struct { DashboardId types.String `tfsdk:"-"` } +func (newState *GetDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetDashboardRequest) { +} + +func (newState *GetDashboardRequest) SyncEffectiveFieldsDuringRead(existingState GetDashboardRequest) { +} + // Get object ACL type GetDbsqlPermissionRequest struct { // Object ID. An ACL is returned for the object with this UUID. @@ -1004,16 +1356,34 @@ type GetDbsqlPermissionRequest struct { ObjectType types.String `tfsdk:"-"` } +func (newState *GetDbsqlPermissionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetDbsqlPermissionRequest) { +} + +func (newState *GetDbsqlPermissionRequest) SyncEffectiveFieldsDuringRead(existingState GetDbsqlPermissionRequest) { +} + // Get a query definition. type GetQueriesLegacyRequest struct { QueryId types.String `tfsdk:"-"` } +func (newState *GetQueriesLegacyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetQueriesLegacyRequest) { +} + +func (newState *GetQueriesLegacyRequest) SyncEffectiveFieldsDuringRead(existingState GetQueriesLegacyRequest) { +} + // Get a query type GetQueryRequest struct { Id types.String `tfsdk:"-"` } +func (newState *GetQueryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetQueryRequest) { +} + +func (newState *GetQueryRequest) SyncEffectiveFieldsDuringRead(existingState GetQueryRequest) { +} + type GetResponse struct { AccessControlList []AccessControl `tfsdk:"access_control_list" tf:"optional"` // An object's type and UUID, separated by a forward slash (/) character. @@ -1022,6 +1392,12 @@ type GetResponse struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *GetResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetResponse) { +} + +func (newState *GetResponse) SyncEffectiveFieldsDuringRead(existingState GetResponse) { +} + // Get status, manifest, and result first chunk type GetStatementRequest struct { // The statement ID is returned upon successfully submitting a SQL @@ -1029,6 +1405,12 @@ type GetStatementRequest struct { StatementId types.String `tfsdk:"-"` } +func (newState *GetStatementRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetStatementRequest) { +} + +func (newState *GetStatementRequest) SyncEffectiveFieldsDuringRead(existingState GetStatementRequest) { +} + // Get result chunk by index type GetStatementResultChunkNRequest struct { ChunkIndex types.Int64 `tfsdk:"-"` @@ -1037,29 +1419,59 @@ type GetStatementResultChunkNRequest struct { StatementId types.String `tfsdk:"-"` } +func (newState *GetStatementResultChunkNRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetStatementResultChunkNRequest) { +} + +func (newState *GetStatementResultChunkNRequest) SyncEffectiveFieldsDuringRead(existingState GetStatementResultChunkNRequest) { +} + // Get SQL warehouse permission levels type GetWarehousePermissionLevelsRequest struct { // The SQL warehouse for which to get or manage permissions. WarehouseId types.String `tfsdk:"-"` } +func (newState *GetWarehousePermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetWarehousePermissionLevelsRequest) { +} + +func (newState *GetWarehousePermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetWarehousePermissionLevelsRequest) { +} + type GetWarehousePermissionLevelsResponse struct { // Specific permission levels PermissionLevels []WarehousePermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetWarehousePermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetWarehousePermissionLevelsResponse) { +} + +func (newState *GetWarehousePermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetWarehousePermissionLevelsResponse) { +} + // Get SQL warehouse permissions type GetWarehousePermissionsRequest struct { // The SQL warehouse for which to get or manage permissions. WarehouseId types.String `tfsdk:"-"` } +func (newState *GetWarehousePermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetWarehousePermissionsRequest) { +} + +func (newState *GetWarehousePermissionsRequest) SyncEffectiveFieldsDuringRead(existingState GetWarehousePermissionsRequest) { +} + // Get warehouse info type GetWarehouseRequest struct { // Required. Id of the SQL warehouse. Id types.String `tfsdk:"-"` } +func (newState *GetWarehouseRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetWarehouseRequest) { +} + +func (newState *GetWarehouseRequest) SyncEffectiveFieldsDuringRead(existingState GetWarehouseRequest) { +} + type GetWarehouseResponse struct { // The amount of time in minutes that a SQL warehouse must be idle (i.e., no // RUNNING queries) before it is automatically stopped. @@ -1137,6 +1549,12 @@ type GetWarehouseResponse struct { WarehouseType types.String `tfsdk:"warehouse_type" tf:"optional"` } +func (newState *GetWarehouseResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetWarehouseResponse) { +} + +func (newState *GetWarehouseResponse) SyncEffectiveFieldsDuringRead(existingState GetWarehouseResponse) { +} + type GetWorkspaceWarehouseConfigResponse struct { // Optional: Channel selection details Channel []Channel `tfsdk:"channel" tf:"optional,object"` @@ -1165,6 +1583,12 @@ type GetWorkspaceWarehouseConfigResponse struct { SqlConfigurationParameters []RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional,object"` } +func (newState *GetWorkspaceWarehouseConfigResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetWorkspaceWarehouseConfigResponse) { +} + +func (newState *GetWorkspaceWarehouseConfigResponse) SyncEffectiveFieldsDuringRead(existingState GetWorkspaceWarehouseConfigResponse) { +} + type LegacyAlert struct { // Timestamp when the alert was created. CreatedAt types.String `tfsdk:"created_at" tf:"optional"` @@ -1194,6 +1618,12 @@ type LegacyAlert struct { User []User `tfsdk:"user" tf:"optional,object"` } +func (newState *LegacyAlert) SyncEffectiveFieldsDuringCreateOrUpdate(plan LegacyAlert) { +} + +func (newState *LegacyAlert) SyncEffectiveFieldsDuringRead(existingState LegacyAlert) { +} + type LegacyQuery struct { // Describes whether the authenticated user is allowed to edit the // definition of this query. @@ -1265,6 +1695,12 @@ type LegacyQuery struct { Visualizations []LegacyVisualization `tfsdk:"visualizations" tf:"optional"` } +func (newState *LegacyQuery) SyncEffectiveFieldsDuringCreateOrUpdate(plan LegacyQuery) { +} + +func (newState *LegacyQuery) SyncEffectiveFieldsDuringRead(existingState LegacyQuery) { +} + // The visualization description API changes frequently and is unsupported. You // can duplicate a visualization by copying description objects received _from // the API_ and then using them to create a new one with a POST request to the @@ -1292,6 +1728,12 @@ type LegacyVisualization struct { UpdatedAt types.String `tfsdk:"updated_at" tf:"optional"` } +func (newState *LegacyVisualization) SyncEffectiveFieldsDuringCreateOrUpdate(plan LegacyVisualization) { +} + +func (newState *LegacyVisualization) SyncEffectiveFieldsDuringRead(existingState LegacyVisualization) { +} + // List alerts type ListAlertsRequest struct { PageSize types.Int64 `tfsdk:"-"` @@ -1299,12 +1741,24 @@ type ListAlertsRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListAlertsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAlertsRequest) { +} + +func (newState *ListAlertsRequest) SyncEffectiveFieldsDuringRead(existingState ListAlertsRequest) { +} + type ListAlertsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` Results []ListAlertsResponseAlert `tfsdk:"results" tf:"optional"` } +func (newState *ListAlertsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAlertsResponse) { +} + +func (newState *ListAlertsResponse) SyncEffectiveFieldsDuringRead(existingState ListAlertsResponse) { +} + type ListAlertsResponseAlert struct { // Trigger conditions of the alert. Condition []AlertCondition `tfsdk:"condition" tf:"optional,object"` @@ -1327,6 +1781,8 @@ type ListAlertsResponseAlert struct { Id types.String `tfsdk:"id" tf:"optional"` // The workspace state of the alert. Used for tracking trashed status. LifecycleState types.String `tfsdk:"lifecycle_state" tf:"optional"` + // Whether to notify alert subscribers when alert returns back to normal. + NotifyOnOk types.Bool `tfsdk:"notify_on_ok" tf:"optional"` // The owner's username. This field is set to "Unavailable" if the user has // been deleted. OwnerUserName types.String `tfsdk:"owner_user_name" tf:"optional"` @@ -1347,6 +1803,12 @@ type ListAlertsResponseAlert struct { UpdateTime types.String `tfsdk:"update_time" tf:"optional"` } +func (newState *ListAlertsResponseAlert) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAlertsResponseAlert) { +} + +func (newState *ListAlertsResponseAlert) SyncEffectiveFieldsDuringRead(existingState ListAlertsResponseAlert) { +} + // Get dashboard objects type ListDashboardsRequest struct { // Name of dashboard attribute to order by. @@ -1359,6 +1821,12 @@ type ListDashboardsRequest struct { Q types.String `tfsdk:"-"` } +func (newState *ListDashboardsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListDashboardsRequest) { +} + +func (newState *ListDashboardsRequest) SyncEffectiveFieldsDuringRead(existingState ListDashboardsRequest) { +} + // Get a list of queries type ListQueriesLegacyRequest struct { // Name of query attribute to order by. Default sort order is ascending. @@ -1384,6 +1852,12 @@ type ListQueriesLegacyRequest struct { Q types.String `tfsdk:"-"` } +func (newState *ListQueriesLegacyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListQueriesLegacyRequest) { +} + +func (newState *ListQueriesLegacyRequest) SyncEffectiveFieldsDuringRead(existingState ListQueriesLegacyRequest) { +} + // List queries type ListQueriesRequest struct { PageSize types.Int64 `tfsdk:"-"` @@ -1391,6 +1865,12 @@ type ListQueriesRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListQueriesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListQueriesRequest) { +} + +func (newState *ListQueriesRequest) SyncEffectiveFieldsDuringRead(existingState ListQueriesRequest) { +} + type ListQueriesResponse struct { // Whether there is another page of results. HasNextPage types.Bool `tfsdk:"has_next_page" tf:"optional"` @@ -1400,6 +1880,12 @@ type ListQueriesResponse struct { Res []QueryInfo `tfsdk:"res" tf:"optional"` } +func (newState *ListQueriesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListQueriesResponse) { +} + +func (newState *ListQueriesResponse) SyncEffectiveFieldsDuringRead(existingState ListQueriesResponse) { +} + // List Queries type ListQueryHistoryRequest struct { // A filter to limit query history results. This field is optional. @@ -1417,12 +1903,24 @@ type ListQueryHistoryRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListQueryHistoryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListQueryHistoryRequest) { +} + +func (newState *ListQueryHistoryRequest) SyncEffectiveFieldsDuringRead(existingState ListQueryHistoryRequest) { +} + type ListQueryObjectsResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` Results []ListQueryObjectsResponseQuery `tfsdk:"results" tf:"optional"` } +func (newState *ListQueryObjectsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListQueryObjectsResponse) { +} + +func (newState *ListQueryObjectsResponse) SyncEffectiveFieldsDuringRead(existingState ListQueryObjectsResponse) { +} + type ListQueryObjectsResponseQuery struct { // Whether to apply a 1000 row limit to the query result. ApplyAutoLimit types.Bool `tfsdk:"apply_auto_limit" tf:"optional"` @@ -1460,6 +1958,12 @@ type ListQueryObjectsResponseQuery struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *ListQueryObjectsResponseQuery) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListQueryObjectsResponseQuery) { +} + +func (newState *ListQueryObjectsResponseQuery) SyncEffectiveFieldsDuringRead(existingState ListQueryObjectsResponseQuery) { +} + type ListResponse struct { // The total number of dashboards. Count types.Int64 `tfsdk:"count" tf:"optional"` @@ -1471,6 +1975,12 @@ type ListResponse struct { Results []Dashboard `tfsdk:"results" tf:"optional"` } +func (newState *ListResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListResponse) { +} + +func (newState *ListResponse) SyncEffectiveFieldsDuringRead(existingState ListResponse) { +} + // List visualizations on a query type ListVisualizationsForQueryRequest struct { Id types.String `tfsdk:"-"` @@ -1480,12 +1990,24 @@ type ListVisualizationsForQueryRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListVisualizationsForQueryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListVisualizationsForQueryRequest) { +} + +func (newState *ListVisualizationsForQueryRequest) SyncEffectiveFieldsDuringRead(existingState ListVisualizationsForQueryRequest) { +} + type ListVisualizationsForQueryResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` Results []Visualization `tfsdk:"results" tf:"optional"` } +func (newState *ListVisualizationsForQueryResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListVisualizationsForQueryResponse) { +} + +func (newState *ListVisualizationsForQueryResponse) SyncEffectiveFieldsDuringRead(existingState ListVisualizationsForQueryResponse) { +} + // List warehouses type ListWarehousesRequest struct { // Service Principal which will be used to fetch the list of warehouses. If @@ -1493,11 +2015,23 @@ type ListWarehousesRequest struct { RunAsUserId types.Int64 `tfsdk:"-"` } +func (newState *ListWarehousesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListWarehousesRequest) { +} + +func (newState *ListWarehousesRequest) SyncEffectiveFieldsDuringRead(existingState ListWarehousesRequest) { +} + type ListWarehousesResponse struct { // A list of warehouses and their configurations. Warehouses []EndpointInfo `tfsdk:"warehouses" tf:"optional"` } +func (newState *ListWarehousesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListWarehousesResponse) { +} + +func (newState *ListWarehousesResponse) SyncEffectiveFieldsDuringRead(existingState ListWarehousesResponse) { +} + type MultiValuesOptions struct { // Character that prefixes each selected parameter value. Prefix types.String `tfsdk:"prefix" tf:"optional"` @@ -1508,10 +2042,22 @@ type MultiValuesOptions struct { Suffix types.String `tfsdk:"suffix" tf:"optional"` } +func (newState *MultiValuesOptions) SyncEffectiveFieldsDuringCreateOrUpdate(plan MultiValuesOptions) { +} + +func (newState *MultiValuesOptions) SyncEffectiveFieldsDuringRead(existingState MultiValuesOptions) { +} + type NumericValue struct { Value types.Float64 `tfsdk:"value" tf:"optional"` } +func (newState *NumericValue) SyncEffectiveFieldsDuringCreateOrUpdate(plan NumericValue) { +} + +func (newState *NumericValue) SyncEffectiveFieldsDuringRead(existingState NumericValue) { +} + type OdbcParams struct { Hostname types.String `tfsdk:"hostname" tf:"optional"` @@ -1522,6 +2068,12 @@ type OdbcParams struct { Protocol types.String `tfsdk:"protocol" tf:"optional"` } +func (newState *OdbcParams) SyncEffectiveFieldsDuringCreateOrUpdate(plan OdbcParams) { +} + +func (newState *OdbcParams) SyncEffectiveFieldsDuringRead(existingState OdbcParams) { +} + type Parameter struct { // List of valid parameter values, newline delimited. Only applies for // dropdown list parameters. @@ -1543,6 +2095,12 @@ type Parameter struct { Value any `tfsdk:"value" tf:"optional"` } +func (newState *Parameter) SyncEffectiveFieldsDuringCreateOrUpdate(plan Parameter) { +} + +func (newState *Parameter) SyncEffectiveFieldsDuringRead(existingState Parameter) { +} + type Query struct { // Whether to apply a 1000 row limit to the query result. ApplyAutoLimit types.Bool `tfsdk:"apply_auto_limit" tf:"optional"` @@ -1582,6 +2140,12 @@ type Query struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *Query) SyncEffectiveFieldsDuringCreateOrUpdate(plan Query) { +} + +func (newState *Query) SyncEffectiveFieldsDuringRead(existingState Query) { +} + type QueryBackedValue struct { // If specified, allows multiple values to be selected for this parameter. MultiValuesOptions []MultiValuesOptions `tfsdk:"multi_values_options" tf:"optional,object"` @@ -1591,6 +2155,12 @@ type QueryBackedValue struct { Values []types.String `tfsdk:"values" tf:"optional"` } +func (newState *QueryBackedValue) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryBackedValue) { +} + +func (newState *QueryBackedValue) SyncEffectiveFieldsDuringRead(existingState QueryBackedValue) { +} + type QueryEditContent struct { // Data source ID maps to the ID of the data source used by the resource and // is distinct from the warehouse ID. [Learn more] @@ -1619,6 +2189,12 @@ type QueryEditContent struct { Tags []types.String `tfsdk:"tags" tf:"optional"` } +func (newState *QueryEditContent) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryEditContent) { +} + +func (newState *QueryEditContent) SyncEffectiveFieldsDuringRead(existingState QueryEditContent) { +} + type QueryFilter struct { // A range filter for query submitted time. The time range must be <= 30 // days. @@ -1633,6 +2209,12 @@ type QueryFilter struct { WarehouseIds []types.String `tfsdk:"warehouse_ids" tf:"optional"` } +func (newState *QueryFilter) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryFilter) { +} + +func (newState *QueryFilter) SyncEffectiveFieldsDuringRead(existingState QueryFilter) { +} + type QueryInfo struct { // SQL Warehouse channel information at the time of query execution ChannelUsed []ChannelInfo `tfsdk:"channel_used" tf:"optional,object"` @@ -1685,6 +2267,12 @@ type QueryInfo struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *QueryInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryInfo) { +} + +func (newState *QueryInfo) SyncEffectiveFieldsDuringRead(existingState QueryInfo) { +} + type QueryList struct { // The total number of queries. Count types.Int64 `tfsdk:"count" tf:"optional"` @@ -1696,6 +2284,12 @@ type QueryList struct { Results []LegacyQuery `tfsdk:"results" tf:"optional"` } +func (newState *QueryList) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryList) { +} + +func (newState *QueryList) SyncEffectiveFieldsDuringRead(existingState QueryList) { +} + // A query metric that encapsulates a set of measurements for a single query. // Metrics come from the driver and are stored in the history service database. type QueryMetrics struct { @@ -1757,6 +2351,12 @@ type QueryMetrics struct { WriteRemoteBytes types.Int64 `tfsdk:"write_remote_bytes" tf:"optional"` } +func (newState *QueryMetrics) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryMetrics) { +} + +func (newState *QueryMetrics) SyncEffectiveFieldsDuringRead(existingState QueryMetrics) { +} + type QueryOptions struct { // The name of the catalog to execute this query in. Catalog types.String `tfsdk:"catalog" tf:"optional"` @@ -1770,6 +2370,12 @@ type QueryOptions struct { Schema types.String `tfsdk:"schema" tf:"optional"` } +func (newState *QueryOptions) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryOptions) { +} + +func (newState *QueryOptions) SyncEffectiveFieldsDuringRead(existingState QueryOptions) { +} + type QueryParameter struct { // Date-range query parameter value. Can only specify one of // `dynamic_date_range_value` or `date_range_value`. @@ -1792,6 +2398,12 @@ type QueryParameter struct { Title types.String `tfsdk:"title" tf:"optional"` } +func (newState *QueryParameter) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryParameter) { +} + +func (newState *QueryParameter) SyncEffectiveFieldsDuringRead(existingState QueryParameter) { +} + type QueryPostContent struct { // Data source ID maps to the ID of the data source used by the resource and // is distinct from the warehouse ID. [Learn more] @@ -1820,6 +2432,12 @@ type QueryPostContent struct { Tags []types.String `tfsdk:"tags" tf:"optional"` } +func (newState *QueryPostContent) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryPostContent) { +} + +func (newState *QueryPostContent) SyncEffectiveFieldsDuringRead(existingState QueryPostContent) { +} + type RepeatedEndpointConfPairs struct { // Deprecated: Use configuration_pairs ConfigPair []EndpointConfPair `tfsdk:"config_pair" tf:"optional"` @@ -1827,19 +2445,43 @@ type RepeatedEndpointConfPairs struct { ConfigurationPairs []EndpointConfPair `tfsdk:"configuration_pairs" tf:"optional"` } +func (newState *RepeatedEndpointConfPairs) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepeatedEndpointConfPairs) { +} + +func (newState *RepeatedEndpointConfPairs) SyncEffectiveFieldsDuringRead(existingState RepeatedEndpointConfPairs) { +} + // Restore a dashboard type RestoreDashboardRequest struct { DashboardId types.String `tfsdk:"-"` } +func (newState *RestoreDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestoreDashboardRequest) { +} + +func (newState *RestoreDashboardRequest) SyncEffectiveFieldsDuringRead(existingState RestoreDashboardRequest) { +} + // Restore a query type RestoreQueriesLegacyRequest struct { QueryId types.String `tfsdk:"-"` } +func (newState *RestoreQueriesLegacyRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestoreQueriesLegacyRequest) { +} + +func (newState *RestoreQueriesLegacyRequest) SyncEffectiveFieldsDuringRead(existingState RestoreQueriesLegacyRequest) { +} + type RestoreResponse struct { } +func (newState *RestoreResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestoreResponse) { +} + +func (newState *RestoreResponse) SyncEffectiveFieldsDuringRead(existingState RestoreResponse) { +} + type ResultData struct { // The number of bytes in the result chunk. This field is not available when // using `INLINE` disposition. @@ -1867,6 +2509,12 @@ type ResultData struct { RowOffset types.Int64 `tfsdk:"row_offset" tf:"optional"` } +func (newState *ResultData) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResultData) { +} + +func (newState *ResultData) SyncEffectiveFieldsDuringRead(existingState ResultData) { +} + // The result manifest provides schema and metadata for the result set. type ResultManifest struct { // Array of result set chunk metadata. @@ -1887,6 +2535,12 @@ type ResultManifest struct { Truncated types.Bool `tfsdk:"truncated" tf:"optional"` } +func (newState *ResultManifest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResultManifest) { +} + +func (newState *ResultManifest) SyncEffectiveFieldsDuringRead(existingState ResultManifest) { +} + // The schema is an ordered list of column descriptions. type ResultSchema struct { ColumnCount types.Int64 `tfsdk:"column_count" tf:"optional"` @@ -1894,12 +2548,24 @@ type ResultSchema struct { Columns []ColumnInfo `tfsdk:"columns" tf:"optional"` } +func (newState *ResultSchema) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResultSchema) { +} + +func (newState *ResultSchema) SyncEffectiveFieldsDuringRead(existingState ResultSchema) { +} + type ServiceError struct { ErrorCode types.String `tfsdk:"error_code" tf:"optional"` // A brief summary of the error condition. Message types.String `tfsdk:"message" tf:"optional"` } +func (newState *ServiceError) SyncEffectiveFieldsDuringCreateOrUpdate(plan ServiceError) { +} + +func (newState *ServiceError) SyncEffectiveFieldsDuringRead(existingState ServiceError) { +} + // Set object ACL type SetRequest struct { AccessControlList []AccessControl `tfsdk:"access_control_list" tf:"optional"` @@ -1910,6 +2576,12 @@ type SetRequest struct { ObjectType types.String `tfsdk:"-"` } +func (newState *SetRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetRequest) { +} + +func (newState *SetRequest) SyncEffectiveFieldsDuringRead(existingState SetRequest) { +} + type SetResponse struct { AccessControlList []AccessControl `tfsdk:"access_control_list" tf:"optional"` // An object's type and UUID, separated by a forward slash (/) character. @@ -1918,6 +2590,12 @@ type SetResponse struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *SetResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetResponse) { +} + +func (newState *SetResponse) SyncEffectiveFieldsDuringRead(existingState SetResponse) { +} + type SetWorkspaceWarehouseConfigRequest struct { // Optional: Channel selection details Channel []Channel `tfsdk:"channel" tf:"optional,object"` @@ -1946,18 +2624,42 @@ type SetWorkspaceWarehouseConfigRequest struct { SqlConfigurationParameters []RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters" tf:"optional,object"` } +func (newState *SetWorkspaceWarehouseConfigRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetWorkspaceWarehouseConfigRequest) { +} + +func (newState *SetWorkspaceWarehouseConfigRequest) SyncEffectiveFieldsDuringRead(existingState SetWorkspaceWarehouseConfigRequest) { +} + type SetWorkspaceWarehouseConfigResponse struct { } +func (newState *SetWorkspaceWarehouseConfigResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SetWorkspaceWarehouseConfigResponse) { +} + +func (newState *SetWorkspaceWarehouseConfigResponse) SyncEffectiveFieldsDuringRead(existingState SetWorkspaceWarehouseConfigResponse) { +} + // Start a warehouse type StartRequest struct { // Required. Id of the SQL warehouse. Id types.String `tfsdk:"-"` } +func (newState *StartRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan StartRequest) { +} + +func (newState *StartRequest) SyncEffectiveFieldsDuringRead(existingState StartRequest) { +} + type StartWarehouseResponse struct { } +func (newState *StartWarehouseResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan StartWarehouseResponse) { +} + +func (newState *StartWarehouseResponse) SyncEffectiveFieldsDuringRead(existingState StartWarehouseResponse) { +} + type StatementParameterListItem struct { // The name of a parameter marker to be substituted in the statement. Name types.String `tfsdk:"name" tf:""` @@ -1974,6 +2676,12 @@ type StatementParameterListItem struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *StatementParameterListItem) SyncEffectiveFieldsDuringCreateOrUpdate(plan StatementParameterListItem) { +} + +func (newState *StatementParameterListItem) SyncEffectiveFieldsDuringRead(existingState StatementParameterListItem) { +} + type StatementResponse struct { // The result manifest provides schema and metadata for the result set. Manifest []ResultManifest `tfsdk:"manifest" tf:"optional,object"` @@ -1987,6 +2695,12 @@ type StatementResponse struct { Status []StatementStatus `tfsdk:"status" tf:"optional,object"` } +func (newState *StatementResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan StatementResponse) { +} + +func (newState *StatementResponse) SyncEffectiveFieldsDuringRead(existingState StatementResponse) { +} + // The status response includes execution state and if relevant, error // information. type StatementStatus struct { @@ -2001,19 +2715,43 @@ type StatementStatus struct { State types.String `tfsdk:"state" tf:"optional"` } +func (newState *StatementStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan StatementStatus) { +} + +func (newState *StatementStatus) SyncEffectiveFieldsDuringRead(existingState StatementStatus) { +} + // Stop a warehouse type StopRequest struct { // Required. Id of the SQL warehouse. Id types.String `tfsdk:"-"` } +func (newState *StopRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan StopRequest) { +} + +func (newState *StopRequest) SyncEffectiveFieldsDuringRead(existingState StopRequest) { +} + type StopWarehouseResponse struct { } +func (newState *StopWarehouseResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan StopWarehouseResponse) { +} + +func (newState *StopWarehouseResponse) SyncEffectiveFieldsDuringRead(existingState StopWarehouseResponse) { +} + type Success struct { Message types.String `tfsdk:"message" tf:"optional"` } +func (newState *Success) SyncEffectiveFieldsDuringCreateOrUpdate(plan Success) { +} + +func (newState *Success) SyncEffectiveFieldsDuringRead(existingState Success) { +} + type TerminationReason struct { // status code indicating why the cluster was terminated Code types.String `tfsdk:"code" tf:"optional"` @@ -2024,10 +2762,22 @@ type TerminationReason struct { Type types.String `tfsdk:"type" tf:"optional"` } +func (newState *TerminationReason) SyncEffectiveFieldsDuringCreateOrUpdate(plan TerminationReason) { +} + +func (newState *TerminationReason) SyncEffectiveFieldsDuringRead(existingState TerminationReason) { +} + type TextValue struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *TextValue) SyncEffectiveFieldsDuringCreateOrUpdate(plan TextValue) { +} + +func (newState *TextValue) SyncEffectiveFieldsDuringRead(existingState TextValue) { +} + type TimeRange struct { // The end time in milliseconds. EndTimeMs types.Int64 `tfsdk:"end_time_ms" tf:"optional"` @@ -2035,11 +2785,23 @@ type TimeRange struct { StartTimeMs types.Int64 `tfsdk:"start_time_ms" tf:"optional"` } +func (newState *TimeRange) SyncEffectiveFieldsDuringCreateOrUpdate(plan TimeRange) { +} + +func (newState *TimeRange) SyncEffectiveFieldsDuringRead(existingState TimeRange) { +} + type TransferOwnershipObjectId struct { // Email address for the new owner, who must exist in the workspace. NewOwner types.String `tfsdk:"new_owner" tf:"optional"` } +func (newState *TransferOwnershipObjectId) SyncEffectiveFieldsDuringCreateOrUpdate(plan TransferOwnershipObjectId) { +} + +func (newState *TransferOwnershipObjectId) SyncEffectiveFieldsDuringRead(existingState TransferOwnershipObjectId) { +} + // Transfer object ownership type TransferOwnershipRequest struct { // Email address for the new owner, who must exist in the workspace. @@ -2050,16 +2812,34 @@ type TransferOwnershipRequest struct { ObjectType types.String `tfsdk:"-"` } +func (newState *TransferOwnershipRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan TransferOwnershipRequest) { +} + +func (newState *TransferOwnershipRequest) SyncEffectiveFieldsDuringRead(existingState TransferOwnershipRequest) { +} + // Delete an alert type TrashAlertRequest struct { Id types.String `tfsdk:"-"` } +func (newState *TrashAlertRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan TrashAlertRequest) { +} + +func (newState *TrashAlertRequest) SyncEffectiveFieldsDuringRead(existingState TrashAlertRequest) { +} + // Delete a query type TrashQueryRequest struct { Id types.String `tfsdk:"-"` } +func (newState *TrashQueryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan TrashQueryRequest) { +} + +func (newState *TrashQueryRequest) SyncEffectiveFieldsDuringRead(existingState TrashQueryRequest) { +} + type UpdateAlertRequest struct { Alert []UpdateAlertRequestAlert `tfsdk:"alert" tf:"optional,object"` @@ -2071,6 +2851,12 @@ type UpdateAlertRequest struct { UpdateMask types.String `tfsdk:"update_mask" tf:""` } +func (newState *UpdateAlertRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAlertRequest) { +} + +func (newState *UpdateAlertRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAlertRequest) { +} + type UpdateAlertRequestAlert struct { // Trigger conditions of the alert. Condition []AlertCondition `tfsdk:"condition" tf:"optional,object"` @@ -2087,6 +2873,8 @@ type UpdateAlertRequestAlert struct { CustomSubject types.String `tfsdk:"custom_subject" tf:"optional"` // The display name of the alert. DisplayName types.String `tfsdk:"display_name" tf:"optional"` + // Whether to notify alert subscribers when alert returns back to normal. + NotifyOnOk types.Bool `tfsdk:"notify_on_ok" tf:"optional"` // The owner's username. This field is set to "Unavailable" if the user has // been deleted. OwnerUserName types.String `tfsdk:"owner_user_name" tf:"optional"` @@ -2098,6 +2886,12 @@ type UpdateAlertRequestAlert struct { SecondsToRetrigger types.Int64 `tfsdk:"seconds_to_retrigger" tf:"optional"` } +func (newState *UpdateAlertRequestAlert) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAlertRequestAlert) { +} + +func (newState *UpdateAlertRequestAlert) SyncEffectiveFieldsDuringRead(existingState UpdateAlertRequestAlert) { +} + type UpdateQueryRequest struct { Id types.String `tfsdk:"-"` @@ -2109,6 +2903,12 @@ type UpdateQueryRequest struct { UpdateMask types.String `tfsdk:"update_mask" tf:""` } +func (newState *UpdateQueryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateQueryRequest) { +} + +func (newState *UpdateQueryRequest) SyncEffectiveFieldsDuringRead(existingState UpdateQueryRequest) { +} + type UpdateQueryRequestQuery struct { // Whether to apply a 1000 row limit to the query result. ApplyAutoLimit types.Bool `tfsdk:"apply_auto_limit" tf:"optional"` @@ -2136,9 +2936,21 @@ type UpdateQueryRequestQuery struct { WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } +func (newState *UpdateQueryRequestQuery) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateQueryRequestQuery) { +} + +func (newState *UpdateQueryRequestQuery) SyncEffectiveFieldsDuringRead(existingState UpdateQueryRequestQuery) { +} + type UpdateResponse struct { } +func (newState *UpdateResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateResponse) { +} + +func (newState *UpdateResponse) SyncEffectiveFieldsDuringRead(existingState UpdateResponse) { +} + type UpdateVisualizationRequest struct { Id types.String `tfsdk:"-"` // Field mask is required to be passed into the PATCH request. Field mask @@ -2150,6 +2962,12 @@ type UpdateVisualizationRequest struct { Visualization []UpdateVisualizationRequestVisualization `tfsdk:"visualization" tf:"optional,object"` } +func (newState *UpdateVisualizationRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateVisualizationRequest) { +} + +func (newState *UpdateVisualizationRequest) SyncEffectiveFieldsDuringRead(existingState UpdateVisualizationRequest) { +} + type UpdateVisualizationRequestVisualization struct { // The display name of the visualization. DisplayName types.String `tfsdk:"display_name" tf:"optional"` @@ -2165,6 +2983,12 @@ type UpdateVisualizationRequestVisualization struct { Type types.String `tfsdk:"type" tf:"optional"` } +func (newState *UpdateVisualizationRequestVisualization) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateVisualizationRequestVisualization) { +} + +func (newState *UpdateVisualizationRequestVisualization) SyncEffectiveFieldsDuringRead(existingState UpdateVisualizationRequestVisualization) { +} + type User struct { Email types.String `tfsdk:"email" tf:"optional"` @@ -2173,6 +2997,12 @@ type User struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *User) SyncEffectiveFieldsDuringCreateOrUpdate(plan User) { +} + +func (newState *User) SyncEffectiveFieldsDuringRead(existingState User) { +} + type Visualization struct { // The timestamp indicating when the visualization was created. CreateTime types.String `tfsdk:"create_time" tf:"optional"` @@ -2196,6 +3026,12 @@ type Visualization struct { UpdateTime types.String `tfsdk:"update_time" tf:"optional"` } +func (newState *Visualization) SyncEffectiveFieldsDuringCreateOrUpdate(plan Visualization) { +} + +func (newState *Visualization) SyncEffectiveFieldsDuringRead(existingState Visualization) { +} + type WarehouseAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -2207,6 +3043,12 @@ type WarehouseAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *WarehouseAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan WarehouseAccessControlRequest) { +} + +func (newState *WarehouseAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState WarehouseAccessControlRequest) { +} + type WarehouseAccessControlResponse struct { // All permissions. AllPermissions []WarehousePermission `tfsdk:"all_permissions" tf:"optional"` @@ -2220,6 +3062,12 @@ type WarehouseAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *WarehouseAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan WarehouseAccessControlResponse) { +} + +func (newState *WarehouseAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState WarehouseAccessControlResponse) { +} + type WarehousePermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -2228,6 +3076,12 @@ type WarehousePermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *WarehousePermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan WarehousePermission) { +} + +func (newState *WarehousePermission) SyncEffectiveFieldsDuringRead(existingState WarehousePermission) { +} + type WarehousePermissions struct { AccessControlList []WarehouseAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -2236,18 +3090,36 @@ type WarehousePermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *WarehousePermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan WarehousePermissions) { +} + +func (newState *WarehousePermissions) SyncEffectiveFieldsDuringRead(existingState WarehousePermissions) { +} + type WarehousePermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *WarehousePermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan WarehousePermissionsDescription) { +} + +func (newState *WarehousePermissionsDescription) SyncEffectiveFieldsDuringRead(existingState WarehousePermissionsDescription) { +} + type WarehousePermissionsRequest struct { AccessControlList []WarehouseAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The SQL warehouse for which to get or manage permissions. WarehouseId types.String `tfsdk:"-"` } +func (newState *WarehousePermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan WarehousePermissionsRequest) { +} + +func (newState *WarehousePermissionsRequest) SyncEffectiveFieldsDuringRead(existingState WarehousePermissionsRequest) { +} + type WarehouseTypePair struct { // If set to false the specific warehouse type will not be be allowed as a // value for warehouse_type in CreateWarehouse and EditWarehouse @@ -2256,6 +3128,12 @@ type WarehouseTypePair struct { WarehouseType types.String `tfsdk:"warehouse_type" tf:"optional"` } +func (newState *WarehouseTypePair) SyncEffectiveFieldsDuringCreateOrUpdate(plan WarehouseTypePair) { +} + +func (newState *WarehouseTypePair) SyncEffectiveFieldsDuringRead(existingState WarehouseTypePair) { +} + type Widget struct { // The unique ID for this widget. Id types.String `tfsdk:"id" tf:"optional"` @@ -2271,6 +3149,12 @@ type Widget struct { Width types.Int64 `tfsdk:"width" tf:"optional"` } +func (newState *Widget) SyncEffectiveFieldsDuringCreateOrUpdate(plan Widget) { +} + +func (newState *Widget) SyncEffectiveFieldsDuringRead(existingState Widget) { +} + type WidgetOptions struct { // Timestamp when this object was created CreatedAt types.String `tfsdk:"created_at" tf:"optional"` @@ -2291,6 +3175,12 @@ type WidgetOptions struct { UpdatedAt types.String `tfsdk:"updated_at" tf:"optional"` } +func (newState *WidgetOptions) SyncEffectiveFieldsDuringCreateOrUpdate(plan WidgetOptions) { +} + +func (newState *WidgetOptions) SyncEffectiveFieldsDuringRead(existingState WidgetOptions) { +} + // Coordinates of this widget on a dashboard. This portion of the API changes // frequently and is unsupported. type WidgetPosition struct { @@ -2305,3 +3195,9 @@ type WidgetPosition struct { // height of the widget measured in dashboard grid cells SizeY types.Int64 `tfsdk:"sizeY" tf:"optional"` } + +func (newState *WidgetPosition) SyncEffectiveFieldsDuringCreateOrUpdate(plan WidgetPosition) { +} + +func (newState *WidgetPosition) SyncEffectiveFieldsDuringRead(existingState WidgetPosition) { +} diff --git a/internal/service/vectorsearch_tf/model.go b/internal/service/vectorsearch_tf/model.go index e0590e7ad9..4c35584942 100755 --- a/internal/service/vectorsearch_tf/model.go +++ b/internal/service/vectorsearch_tf/model.go @@ -19,6 +19,12 @@ type ColumnInfo struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *ColumnInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ColumnInfo) { +} + +func (newState *ColumnInfo) SyncEffectiveFieldsDuringRead(existingState ColumnInfo) { +} + type CreateEndpoint struct { // Type of endpoint. EndpointType types.String `tfsdk:"endpoint_type" tf:""` @@ -26,6 +32,12 @@ type CreateEndpoint struct { Name types.String `tfsdk:"name" tf:""` } +func (newState *CreateEndpoint) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateEndpoint) { +} + +func (newState *CreateEndpoint) SyncEffectiveFieldsDuringRead(existingState CreateEndpoint) { +} + type CreateVectorIndexRequest struct { // Specification for Delta Sync Index. Required if `index_type` is // `DELTA_SYNC`. @@ -49,10 +61,22 @@ type CreateVectorIndexRequest struct { PrimaryKey types.String `tfsdk:"primary_key" tf:""` } +func (newState *CreateVectorIndexRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateVectorIndexRequest) { +} + +func (newState *CreateVectorIndexRequest) SyncEffectiveFieldsDuringRead(existingState CreateVectorIndexRequest) { +} + type CreateVectorIndexResponse struct { VectorIndex []VectorIndex `tfsdk:"vector_index" tf:"optional,object"` } +func (newState *CreateVectorIndexResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateVectorIndexResponse) { +} + +func (newState *CreateVectorIndexResponse) SyncEffectiveFieldsDuringRead(existingState CreateVectorIndexResponse) { +} + // Result of the upsert or delete operation. type DeleteDataResult struct { // List of primary keys for rows that failed to process. @@ -61,6 +85,12 @@ type DeleteDataResult struct { SuccessRowCount types.Int64 `tfsdk:"success_row_count" tf:"optional"` } +func (newState *DeleteDataResult) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDataResult) { +} + +func (newState *DeleteDataResult) SyncEffectiveFieldsDuringRead(existingState DeleteDataResult) { +} + // Request payload for deleting data from a vector index. type DeleteDataVectorIndexRequest struct { // Name of the vector index where data is to be deleted. Must be a Direct @@ -70,6 +100,12 @@ type DeleteDataVectorIndexRequest struct { PrimaryKeys []types.String `tfsdk:"primary_keys" tf:""` } +func (newState *DeleteDataVectorIndexRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDataVectorIndexRequest) { +} + +func (newState *DeleteDataVectorIndexRequest) SyncEffectiveFieldsDuringRead(existingState DeleteDataVectorIndexRequest) { +} + // Response to a delete data vector index request. type DeleteDataVectorIndexResponse struct { // Result of the upsert or delete operation. @@ -78,24 +114,54 @@ type DeleteDataVectorIndexResponse struct { Status types.String `tfsdk:"status" tf:"optional"` } +func (newState *DeleteDataVectorIndexResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteDataVectorIndexResponse) { +} + +func (newState *DeleteDataVectorIndexResponse) SyncEffectiveFieldsDuringRead(existingState DeleteDataVectorIndexResponse) { +} + // Delete an endpoint type DeleteEndpointRequest struct { // Name of the endpoint EndpointName types.String `tfsdk:"-"` } +func (newState *DeleteEndpointRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteEndpointRequest) { +} + +func (newState *DeleteEndpointRequest) SyncEffectiveFieldsDuringRead(existingState DeleteEndpointRequest) { +} + type DeleteEndpointResponse struct { } +func (newState *DeleteEndpointResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteEndpointResponse) { +} + +func (newState *DeleteEndpointResponse) SyncEffectiveFieldsDuringRead(existingState DeleteEndpointResponse) { +} + // Delete an index type DeleteIndexRequest struct { // Name of the index IndexName types.String `tfsdk:"-"` } +func (newState *DeleteIndexRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteIndexRequest) { +} + +func (newState *DeleteIndexRequest) SyncEffectiveFieldsDuringRead(existingState DeleteIndexRequest) { +} + type DeleteIndexResponse struct { } +func (newState *DeleteIndexResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteIndexResponse) { +} + +func (newState *DeleteIndexResponse) SyncEffectiveFieldsDuringRead(existingState DeleteIndexResponse) { +} + type DeltaSyncVectorIndexSpecRequest struct { // [Optional] Select the columns to sync with the vector index. If you leave // this field blank, all columns from the source table are synced with the @@ -124,6 +190,12 @@ type DeltaSyncVectorIndexSpecRequest struct { SourceTable types.String `tfsdk:"source_table" tf:"optional"` } +func (newState *DeltaSyncVectorIndexSpecRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeltaSyncVectorIndexSpecRequest) { +} + +func (newState *DeltaSyncVectorIndexSpecRequest) SyncEffectiveFieldsDuringRead(existingState DeltaSyncVectorIndexSpecRequest) { +} + type DeltaSyncVectorIndexSpecResponse struct { // The columns that contain the embedding source. EmbeddingSourceColumns []EmbeddingSourceColumn `tfsdk:"embedding_source_columns" tf:"optional"` @@ -147,6 +219,12 @@ type DeltaSyncVectorIndexSpecResponse struct { SourceTable types.String `tfsdk:"source_table" tf:"optional"` } +func (newState *DeltaSyncVectorIndexSpecResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeltaSyncVectorIndexSpecResponse) { +} + +func (newState *DeltaSyncVectorIndexSpecResponse) SyncEffectiveFieldsDuringRead(existingState DeltaSyncVectorIndexSpecResponse) { +} + type DirectAccessVectorIndexSpec struct { // Contains the optional model endpoint to use during query time. EmbeddingSourceColumns []EmbeddingSourceColumn `tfsdk:"embedding_source_columns" tf:"optional"` @@ -161,6 +239,12 @@ type DirectAccessVectorIndexSpec struct { SchemaJson types.String `tfsdk:"schema_json" tf:"optional"` } +func (newState *DirectAccessVectorIndexSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan DirectAccessVectorIndexSpec) { +} + +func (newState *DirectAccessVectorIndexSpec) SyncEffectiveFieldsDuringRead(existingState DirectAccessVectorIndexSpec) { +} + type EmbeddingSourceColumn struct { // Name of the embedding model endpoint EmbeddingModelEndpointName types.String `tfsdk:"embedding_model_endpoint_name" tf:"optional"` @@ -168,6 +252,12 @@ type EmbeddingSourceColumn struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *EmbeddingSourceColumn) SyncEffectiveFieldsDuringCreateOrUpdate(plan EmbeddingSourceColumn) { +} + +func (newState *EmbeddingSourceColumn) SyncEffectiveFieldsDuringRead(existingState EmbeddingSourceColumn) { +} + type EmbeddingVectorColumn struct { // Dimension of the embedding vector EmbeddingDimension types.Int64 `tfsdk:"embedding_dimension" tf:"optional"` @@ -175,6 +265,12 @@ type EmbeddingVectorColumn struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *EmbeddingVectorColumn) SyncEffectiveFieldsDuringCreateOrUpdate(plan EmbeddingVectorColumn) { +} + +func (newState *EmbeddingVectorColumn) SyncEffectiveFieldsDuringRead(existingState EmbeddingVectorColumn) { +} + type EndpointInfo struct { // Timestamp of endpoint creation CreationTimestamp types.Int64 `tfsdk:"creation_timestamp" tf:"optional"` @@ -196,6 +292,12 @@ type EndpointInfo struct { NumIndexes types.Int64 `tfsdk:"num_indexes" tf:"optional"` } +func (newState *EndpointInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointInfo) { +} + +func (newState *EndpointInfo) SyncEffectiveFieldsDuringRead(existingState EndpointInfo) { +} + // Status information of an endpoint type EndpointStatus struct { // Additional status message @@ -204,18 +306,36 @@ type EndpointStatus struct { State types.String `tfsdk:"state" tf:"optional"` } +func (newState *EndpointStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan EndpointStatus) { +} + +func (newState *EndpointStatus) SyncEffectiveFieldsDuringRead(existingState EndpointStatus) { +} + // Get an endpoint type GetEndpointRequest struct { // Name of the endpoint EndpointName types.String `tfsdk:"-"` } +func (newState *GetEndpointRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetEndpointRequest) { +} + +func (newState *GetEndpointRequest) SyncEffectiveFieldsDuringRead(existingState GetEndpointRequest) { +} + // Get an index type GetIndexRequest struct { // Name of the index IndexName types.String `tfsdk:"-"` } +func (newState *GetIndexRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetIndexRequest) { +} + +func (newState *GetIndexRequest) SyncEffectiveFieldsDuringRead(existingState GetIndexRequest) { +} + type ListEndpointResponse struct { // An array of Endpoint objects Endpoints []EndpointInfo `tfsdk:"endpoints" tf:"optional"` @@ -224,12 +344,24 @@ type ListEndpointResponse struct { NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` } +func (newState *ListEndpointResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListEndpointResponse) { +} + +func (newState *ListEndpointResponse) SyncEffectiveFieldsDuringRead(existingState ListEndpointResponse) { +} + // List all endpoints type ListEndpointsRequest struct { // Token for pagination PageToken types.String `tfsdk:"-"` } +func (newState *ListEndpointsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListEndpointsRequest) { +} + +func (newState *ListEndpointsRequest) SyncEffectiveFieldsDuringRead(existingState ListEndpointsRequest) { +} + // List indexes type ListIndexesRequest struct { // Name of the endpoint @@ -238,10 +370,22 @@ type ListIndexesRequest struct { PageToken types.String `tfsdk:"-"` } +func (newState *ListIndexesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListIndexesRequest) { +} + +func (newState *ListIndexesRequest) SyncEffectiveFieldsDuringRead(existingState ListIndexesRequest) { +} + type ListValue struct { Values []Value `tfsdk:"values" tf:"optional"` } +func (newState *ListValue) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListValue) { +} + +func (newState *ListValue) SyncEffectiveFieldsDuringRead(existingState ListValue) { +} + type ListVectorIndexesResponse struct { // A token that can be used to get the next page of results. If not present, // there are no more results to show. @@ -250,6 +394,12 @@ type ListVectorIndexesResponse struct { VectorIndexes []MiniVectorIndex `tfsdk:"vector_indexes" tf:"optional"` } +func (newState *ListVectorIndexesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListVectorIndexesResponse) { +} + +func (newState *ListVectorIndexesResponse) SyncEffectiveFieldsDuringRead(existingState ListVectorIndexesResponse) { +} + // Key-value pair. type MapStringValueEntry struct { // Column name. @@ -258,6 +408,12 @@ type MapStringValueEntry struct { Value []Value `tfsdk:"value" tf:"optional,object"` } +func (newState *MapStringValueEntry) SyncEffectiveFieldsDuringCreateOrUpdate(plan MapStringValueEntry) { +} + +func (newState *MapStringValueEntry) SyncEffectiveFieldsDuringRead(existingState MapStringValueEntry) { +} + type MiniVectorIndex struct { // The user who created the index. Creator types.String `tfsdk:"creator" tf:"optional"` @@ -277,6 +433,12 @@ type MiniVectorIndex struct { PrimaryKey types.String `tfsdk:"primary_key" tf:"optional"` } +func (newState *MiniVectorIndex) SyncEffectiveFieldsDuringCreateOrUpdate(plan MiniVectorIndex) { +} + +func (newState *MiniVectorIndex) SyncEffectiveFieldsDuringRead(existingState MiniVectorIndex) { +} + // Request payload for getting next page of results. type QueryVectorIndexNextPageRequest struct { // Name of the endpoint. @@ -288,6 +450,12 @@ type QueryVectorIndexNextPageRequest struct { PageToken types.String `tfsdk:"page_token" tf:"optional"` } +func (newState *QueryVectorIndexNextPageRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryVectorIndexNextPageRequest) { +} + +func (newState *QueryVectorIndexNextPageRequest) SyncEffectiveFieldsDuringRead(existingState QueryVectorIndexNextPageRequest) { +} + type QueryVectorIndexRequest struct { // List of column names to include in the response. Columns []types.String `tfsdk:"columns" tf:""` @@ -313,6 +481,12 @@ type QueryVectorIndexRequest struct { ScoreThreshold types.Float64 `tfsdk:"score_threshold" tf:"optional"` } +func (newState *QueryVectorIndexRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryVectorIndexRequest) { +} + +func (newState *QueryVectorIndexRequest) SyncEffectiveFieldsDuringRead(existingState QueryVectorIndexRequest) { +} + type QueryVectorIndexResponse struct { // Metadata about the result set. Manifest []ResultManifest `tfsdk:"manifest" tf:"optional,object"` @@ -324,6 +498,12 @@ type QueryVectorIndexResponse struct { Result []ResultData `tfsdk:"result" tf:"optional,object"` } +func (newState *QueryVectorIndexResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan QueryVectorIndexResponse) { +} + +func (newState *QueryVectorIndexResponse) SyncEffectiveFieldsDuringRead(existingState QueryVectorIndexResponse) { +} + // Data returned in the query result. type ResultData struct { // Data rows returned in the query. @@ -332,6 +512,12 @@ type ResultData struct { RowCount types.Int64 `tfsdk:"row_count" tf:"optional"` } +func (newState *ResultData) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResultData) { +} + +func (newState *ResultData) SyncEffectiveFieldsDuringRead(existingState ResultData) { +} + // Metadata about the result set. type ResultManifest struct { // Number of columns in the result set. @@ -340,6 +526,12 @@ type ResultManifest struct { Columns []ColumnInfo `tfsdk:"columns" tf:"optional"` } +func (newState *ResultManifest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ResultManifest) { +} + +func (newState *ResultManifest) SyncEffectiveFieldsDuringRead(existingState ResultManifest) { +} + // Request payload for scanning data from a vector index. type ScanVectorIndexRequest struct { // Name of the vector index to scan. @@ -350,6 +542,12 @@ type ScanVectorIndexRequest struct { NumResults types.Int64 `tfsdk:"num_results" tf:"optional"` } +func (newState *ScanVectorIndexRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ScanVectorIndexRequest) { +} + +func (newState *ScanVectorIndexRequest) SyncEffectiveFieldsDuringRead(existingState ScanVectorIndexRequest) { +} + // Response to a scan vector index request. type ScanVectorIndexResponse struct { // List of data entries @@ -358,20 +556,44 @@ type ScanVectorIndexResponse struct { LastPrimaryKey types.String `tfsdk:"last_primary_key" tf:"optional"` } +func (newState *ScanVectorIndexResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ScanVectorIndexResponse) { +} + +func (newState *ScanVectorIndexResponse) SyncEffectiveFieldsDuringRead(existingState ScanVectorIndexResponse) { +} + type Struct struct { // Data entry, corresponding to a row in a vector index. Fields []MapStringValueEntry `tfsdk:"fields" tf:"optional"` } +func (newState *Struct) SyncEffectiveFieldsDuringCreateOrUpdate(plan Struct) { +} + +func (newState *Struct) SyncEffectiveFieldsDuringRead(existingState Struct) { +} + // Synchronize an index type SyncIndexRequest struct { // Name of the vector index to synchronize. Must be a Delta Sync Index. IndexName types.String `tfsdk:"-"` } +func (newState *SyncIndexRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan SyncIndexRequest) { +} + +func (newState *SyncIndexRequest) SyncEffectiveFieldsDuringRead(existingState SyncIndexRequest) { +} + type SyncIndexResponse struct { } +func (newState *SyncIndexResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan SyncIndexResponse) { +} + +func (newState *SyncIndexResponse) SyncEffectiveFieldsDuringRead(existingState SyncIndexResponse) { +} + // Result of the upsert or delete operation. type UpsertDataResult struct { // List of primary keys for rows that failed to process. @@ -380,6 +602,12 @@ type UpsertDataResult struct { SuccessRowCount types.Int64 `tfsdk:"success_row_count" tf:"optional"` } +func (newState *UpsertDataResult) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpsertDataResult) { +} + +func (newState *UpsertDataResult) SyncEffectiveFieldsDuringRead(existingState UpsertDataResult) { +} + // Request payload for upserting data into a vector index. type UpsertDataVectorIndexRequest struct { // Name of the vector index where data is to be upserted. Must be a Direct @@ -389,6 +617,12 @@ type UpsertDataVectorIndexRequest struct { InputsJson types.String `tfsdk:"inputs_json" tf:""` } +func (newState *UpsertDataVectorIndexRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpsertDataVectorIndexRequest) { +} + +func (newState *UpsertDataVectorIndexRequest) SyncEffectiveFieldsDuringRead(existingState UpsertDataVectorIndexRequest) { +} + // Response to an upsert data vector index request. type UpsertDataVectorIndexResponse struct { // Result of the upsert or delete operation. @@ -397,6 +631,12 @@ type UpsertDataVectorIndexResponse struct { Status types.String `tfsdk:"status" tf:"optional"` } +func (newState *UpsertDataVectorIndexResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpsertDataVectorIndexResponse) { +} + +func (newState *UpsertDataVectorIndexResponse) SyncEffectiveFieldsDuringRead(existingState UpsertDataVectorIndexResponse) { +} + type Value struct { BoolValue types.Bool `tfsdk:"bool_value" tf:"optional"` @@ -411,6 +651,12 @@ type Value struct { StructValue []Struct `tfsdk:"struct_value" tf:"optional,object"` } +func (newState *Value) SyncEffectiveFieldsDuringCreateOrUpdate(plan Value) { +} + +func (newState *Value) SyncEffectiveFieldsDuringRead(existingState Value) { +} + type VectorIndex struct { // The user who created the index. Creator types.String `tfsdk:"creator" tf:"optional"` @@ -436,6 +682,12 @@ type VectorIndex struct { Status []VectorIndexStatus `tfsdk:"status" tf:"optional,object"` } +func (newState *VectorIndex) SyncEffectiveFieldsDuringCreateOrUpdate(plan VectorIndex) { +} + +func (newState *VectorIndex) SyncEffectiveFieldsDuringRead(existingState VectorIndex) { +} + type VectorIndexStatus struct { // Index API Url to be used to perform operations on the index IndexUrl types.String `tfsdk:"index_url" tf:"optional"` @@ -446,3 +698,9 @@ type VectorIndexStatus struct { // Whether the index is ready for search Ready types.Bool `tfsdk:"ready" tf:"optional"` } + +func (newState *VectorIndexStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan VectorIndexStatus) { +} + +func (newState *VectorIndexStatus) SyncEffectiveFieldsDuringRead(existingState VectorIndexStatus) { +} diff --git a/internal/service/workspace_tf/model.go b/internal/service/workspace_tf/model.go index 6845913417..058ea6da2d 100755 --- a/internal/service/workspace_tf/model.go +++ b/internal/service/workspace_tf/model.go @@ -21,6 +21,12 @@ type AclItem struct { Principal types.String `tfsdk:"principal" tf:""` } +func (newState *AclItem) SyncEffectiveFieldsDuringCreateOrUpdate(plan AclItem) { +} + +func (newState *AclItem) SyncEffectiveFieldsDuringRead(existingState AclItem) { +} + type AzureKeyVaultSecretScopeMetadata struct { // The DNS of the KeyVault DnsName types.String `tfsdk:"dns_name" tf:""` @@ -29,6 +35,12 @@ type AzureKeyVaultSecretScopeMetadata struct { ResourceId types.String `tfsdk:"resource_id" tf:""` } +func (newState *AzureKeyVaultSecretScopeMetadata) SyncEffectiveFieldsDuringCreateOrUpdate(plan AzureKeyVaultSecretScopeMetadata) { +} + +func (newState *AzureKeyVaultSecretScopeMetadata) SyncEffectiveFieldsDuringRead(existingState AzureKeyVaultSecretScopeMetadata) { +} + type CreateCredentialsRequest struct { // Git provider. This field is case-insensitive. The available Git providers // are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, @@ -51,6 +63,12 @@ type CreateCredentialsRequest struct { PersonalAccessToken types.String `tfsdk:"personal_access_token" tf:"optional"` } +func (newState *CreateCredentialsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCredentialsRequest) { +} + +func (newState *CreateCredentialsRequest) SyncEffectiveFieldsDuringRead(existingState CreateCredentialsRequest) { +} + type CreateCredentialsResponse struct { // ID of the credential object in the workspace. CredentialId types.Int64 `tfsdk:"credential_id" tf:""` @@ -61,6 +79,12 @@ type CreateCredentialsResponse struct { GitUsername types.String `tfsdk:"git_username" tf:"optional"` } +func (newState *CreateCredentialsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCredentialsResponse) { +} + +func (newState *CreateCredentialsResponse) SyncEffectiveFieldsDuringRead(existingState CreateCredentialsResponse) { +} + type CreateRepoRequest struct { // Desired path for the repo in the workspace. Almost any path in the // workspace can be chosen. If repo is created in `/Repos`, path must be in @@ -78,6 +102,12 @@ type CreateRepoRequest struct { Url types.String `tfsdk:"url" tf:""` } +func (newState *CreateRepoRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateRepoRequest) { +} + +func (newState *CreateRepoRequest) SyncEffectiveFieldsDuringRead(existingState CreateRepoRequest) { +} + type CreateRepoResponse struct { // Branch that the Git folder (repo) is checked out to. Branch types.String `tfsdk:"branch" tf:"optional"` @@ -96,6 +126,12 @@ type CreateRepoResponse struct { Url types.String `tfsdk:"url" tf:"optional"` } +func (newState *CreateRepoResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateRepoResponse) { +} + +func (newState *CreateRepoResponse) SyncEffectiveFieldsDuringRead(existingState CreateRepoResponse) { +} + type CreateScope struct { // The metadata for the secret scope if the type is `AZURE_KEYVAULT` BackendAzureKeyvault []AzureKeyVaultSecretScopeMetadata `tfsdk:"backend_azure_keyvault" tf:"optional,object"` @@ -109,9 +145,21 @@ type CreateScope struct { ScopeBackendType types.String `tfsdk:"scope_backend_type" tf:"optional"` } +func (newState *CreateScope) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateScope) { +} + +func (newState *CreateScope) SyncEffectiveFieldsDuringRead(existingState CreateScope) { +} + type CreateScopeResponse struct { } +func (newState *CreateScopeResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateScopeResponse) { +} + +func (newState *CreateScopeResponse) SyncEffectiveFieldsDuringRead(existingState CreateScopeResponse) { +} + type CredentialInfo struct { // ID of the credential object in the workspace. CredentialId types.Int64 `tfsdk:"credential_id" tf:""` @@ -122,6 +170,12 @@ type CredentialInfo struct { GitUsername types.String `tfsdk:"git_username" tf:"optional"` } +func (newState *CredentialInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CredentialInfo) { +} + +func (newState *CredentialInfo) SyncEffectiveFieldsDuringRead(existingState CredentialInfo) { +} + type Delete struct { // The absolute path of the notebook or directory. Path types.String `tfsdk:"path" tf:""` @@ -132,6 +186,12 @@ type Delete struct { Recursive types.Bool `tfsdk:"recursive" tf:"optional"` } +func (newState *Delete) SyncEffectiveFieldsDuringCreateOrUpdate(plan Delete) { +} + +func (newState *Delete) SyncEffectiveFieldsDuringRead(existingState Delete) { +} + type DeleteAcl struct { // The principal to remove an existing ACL from. Principal types.String `tfsdk:"principal" tf:""` @@ -139,38 +199,92 @@ type DeleteAcl struct { Scope types.String `tfsdk:"scope" tf:""` } +func (newState *DeleteAcl) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAcl) { +} + +func (newState *DeleteAcl) SyncEffectiveFieldsDuringRead(existingState DeleteAcl) { +} + type DeleteAclResponse struct { } +func (newState *DeleteAclResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAclResponse) { +} + +func (newState *DeleteAclResponse) SyncEffectiveFieldsDuringRead(existingState DeleteAclResponse) { +} + // Delete a credential type DeleteCredentialsRequest struct { // The ID for the corresponding credential to access. CredentialId types.Int64 `tfsdk:"-"` } +func (newState *DeleteCredentialsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCredentialsRequest) { +} + +func (newState *DeleteCredentialsRequest) SyncEffectiveFieldsDuringRead(existingState DeleteCredentialsRequest) { +} + type DeleteCredentialsResponse struct { } +func (newState *DeleteCredentialsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCredentialsResponse) { +} + +func (newState *DeleteCredentialsResponse) SyncEffectiveFieldsDuringRead(existingState DeleteCredentialsResponse) { +} + // Delete a repo type DeleteRepoRequest struct { // ID of the Git folder (repo) object in the workspace. RepoId types.Int64 `tfsdk:"-"` } +func (newState *DeleteRepoRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteRepoRequest) { +} + +func (newState *DeleteRepoRequest) SyncEffectiveFieldsDuringRead(existingState DeleteRepoRequest) { +} + type DeleteRepoResponse struct { } +func (newState *DeleteRepoResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteRepoResponse) { +} + +func (newState *DeleteRepoResponse) SyncEffectiveFieldsDuringRead(existingState DeleteRepoResponse) { +} + type DeleteResponse struct { } +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + type DeleteScope struct { // Name of the scope to delete. Scope types.String `tfsdk:"scope" tf:""` } +func (newState *DeleteScope) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteScope) { +} + +func (newState *DeleteScope) SyncEffectiveFieldsDuringRead(existingState DeleteScope) { +} + type DeleteScopeResponse struct { } +func (newState *DeleteScopeResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteScopeResponse) { +} + +func (newState *DeleteScopeResponse) SyncEffectiveFieldsDuringRead(existingState DeleteScopeResponse) { +} + type DeleteSecret struct { // Name of the secret to delete. Key types.String `tfsdk:"key" tf:""` @@ -178,9 +292,21 @@ type DeleteSecret struct { Scope types.String `tfsdk:"scope" tf:""` } +func (newState *DeleteSecret) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteSecret) { +} + +func (newState *DeleteSecret) SyncEffectiveFieldsDuringRead(existingState DeleteSecret) { +} + type DeleteSecretResponse struct { } +func (newState *DeleteSecretResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteSecretResponse) { +} + +func (newState *DeleteSecretResponse) SyncEffectiveFieldsDuringRead(existingState DeleteSecretResponse) { +} + // Export a workspace object type ExportRequest struct { // This specifies the format of the exported file. By default, this is @@ -203,6 +329,12 @@ type ExportRequest struct { Path types.String `tfsdk:"-"` } +func (newState *ExportRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExportRequest) { +} + +func (newState *ExportRequest) SyncEffectiveFieldsDuringRead(existingState ExportRequest) { +} + type ExportResponse struct { // The base64-encoded content. If the limit (10MB) is exceeded, exception // with error code **MAX_NOTEBOOK_SIZE_EXCEEDED** is thrown. @@ -211,6 +343,12 @@ type ExportResponse struct { FileType types.String `tfsdk:"file_type" tf:"optional"` } +func (newState *ExportResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExportResponse) { +} + +func (newState *ExportResponse) SyncEffectiveFieldsDuringRead(existingState ExportResponse) { +} + // Get secret ACL details type GetAclRequest struct { // The principal to fetch ACL information for. @@ -219,12 +357,24 @@ type GetAclRequest struct { Scope types.String `tfsdk:"-"` } +func (newState *GetAclRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAclRequest) { +} + +func (newState *GetAclRequest) SyncEffectiveFieldsDuringRead(existingState GetAclRequest) { +} + // Get a credential entry type GetCredentialsRequest struct { // The ID for the corresponding credential to access. CredentialId types.Int64 `tfsdk:"-"` } +func (newState *GetCredentialsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCredentialsRequest) { +} + +func (newState *GetCredentialsRequest) SyncEffectiveFieldsDuringRead(existingState GetCredentialsRequest) { +} + type GetCredentialsResponse struct { // ID of the credential object in the workspace. CredentialId types.Int64 `tfsdk:"credential_id" tf:""` @@ -235,29 +385,59 @@ type GetCredentialsResponse struct { GitUsername types.String `tfsdk:"git_username" tf:"optional"` } +func (newState *GetCredentialsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCredentialsResponse) { +} + +func (newState *GetCredentialsResponse) SyncEffectiveFieldsDuringRead(existingState GetCredentialsResponse) { +} + // Get repo permission levels type GetRepoPermissionLevelsRequest struct { // The repo for which to get or manage permissions. RepoId types.String `tfsdk:"-"` } +func (newState *GetRepoPermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRepoPermissionLevelsRequest) { +} + +func (newState *GetRepoPermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetRepoPermissionLevelsRequest) { +} + type GetRepoPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []RepoPermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetRepoPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRepoPermissionLevelsResponse) { +} + +func (newState *GetRepoPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetRepoPermissionLevelsResponse) { +} + // Get repo permissions type GetRepoPermissionsRequest struct { // The repo for which to get or manage permissions. RepoId types.String `tfsdk:"-"` } +func (newState *GetRepoPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRepoPermissionsRequest) { +} + +func (newState *GetRepoPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState GetRepoPermissionsRequest) { +} + // Get a repo type GetRepoRequest struct { // ID of the Git folder (repo) object in the workspace. RepoId types.Int64 `tfsdk:"-"` } +func (newState *GetRepoRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRepoRequest) { +} + +func (newState *GetRepoRequest) SyncEffectiveFieldsDuringRead(existingState GetRepoRequest) { +} + type GetRepoResponse struct { // Branch that the local version of the repo is checked out to. Branch types.String `tfsdk:"branch" tf:"optional"` @@ -275,6 +455,12 @@ type GetRepoResponse struct { Url types.String `tfsdk:"url" tf:"optional"` } +func (newState *GetRepoResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetRepoResponse) { +} + +func (newState *GetRepoResponse) SyncEffectiveFieldsDuringRead(existingState GetRepoResponse) { +} + // Get a secret type GetSecretRequest struct { // The key to fetch secret for. @@ -283,6 +469,12 @@ type GetSecretRequest struct { Scope types.String `tfsdk:"-"` } +func (newState *GetSecretRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetSecretRequest) { +} + +func (newState *GetSecretRequest) SyncEffectiveFieldsDuringRead(existingState GetSecretRequest) { +} + type GetSecretResponse struct { // A unique name to identify the secret. Key types.String `tfsdk:"key" tf:"optional"` @@ -290,12 +482,24 @@ type GetSecretResponse struct { Value types.String `tfsdk:"value" tf:"optional"` } +func (newState *GetSecretResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetSecretResponse) { +} + +func (newState *GetSecretResponse) SyncEffectiveFieldsDuringRead(existingState GetSecretResponse) { +} + // Get status type GetStatusRequest struct { // The absolute path of the notebook or directory. Path types.String `tfsdk:"-"` } +func (newState *GetStatusRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetStatusRequest) { +} + +func (newState *GetStatusRequest) SyncEffectiveFieldsDuringRead(existingState GetStatusRequest) { +} + // Get workspace object permission levels type GetWorkspaceObjectPermissionLevelsRequest struct { // The workspace object for which to get or manage permissions. @@ -304,11 +508,23 @@ type GetWorkspaceObjectPermissionLevelsRequest struct { WorkspaceObjectType types.String `tfsdk:"-"` } +func (newState *GetWorkspaceObjectPermissionLevelsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetWorkspaceObjectPermissionLevelsRequest) { +} + +func (newState *GetWorkspaceObjectPermissionLevelsRequest) SyncEffectiveFieldsDuringRead(existingState GetWorkspaceObjectPermissionLevelsRequest) { +} + type GetWorkspaceObjectPermissionLevelsResponse struct { // Specific permission levels PermissionLevels []WorkspaceObjectPermissionsDescription `tfsdk:"permission_levels" tf:"optional"` } +func (newState *GetWorkspaceObjectPermissionLevelsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetWorkspaceObjectPermissionLevelsResponse) { +} + +func (newState *GetWorkspaceObjectPermissionLevelsResponse) SyncEffectiveFieldsDuringRead(existingState GetWorkspaceObjectPermissionLevelsResponse) { +} + // Get workspace object permissions type GetWorkspaceObjectPermissionsRequest struct { // The workspace object for which to get or manage permissions. @@ -317,6 +533,12 @@ type GetWorkspaceObjectPermissionsRequest struct { WorkspaceObjectType types.String `tfsdk:"-"` } +func (newState *GetWorkspaceObjectPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetWorkspaceObjectPermissionsRequest) { +} + +func (newState *GetWorkspaceObjectPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState GetWorkspaceObjectPermissionsRequest) { +} + type Import struct { // The base64-encoded content. This has a limit of 10 MB. // @@ -350,25 +572,55 @@ type Import struct { Path types.String `tfsdk:"path" tf:""` } +func (newState *Import) SyncEffectiveFieldsDuringCreateOrUpdate(plan Import) { +} + +func (newState *Import) SyncEffectiveFieldsDuringRead(existingState Import) { +} + type ImportResponse struct { } +func (newState *ImportResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ImportResponse) { +} + +func (newState *ImportResponse) SyncEffectiveFieldsDuringRead(existingState ImportResponse) { +} + // Lists ACLs type ListAclsRequest struct { // The name of the scope to fetch ACL information from. Scope types.String `tfsdk:"-"` } +func (newState *ListAclsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAclsRequest) { +} + +func (newState *ListAclsRequest) SyncEffectiveFieldsDuringRead(existingState ListAclsRequest) { +} + type ListAclsResponse struct { // The associated ACLs rule applied to principals in the given scope. Items []AclItem `tfsdk:"items" tf:"optional"` } +func (newState *ListAclsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListAclsResponse) { +} + +func (newState *ListAclsResponse) SyncEffectiveFieldsDuringRead(existingState ListAclsResponse) { +} + type ListCredentialsResponse struct { // List of credentials. Credentials []CredentialInfo `tfsdk:"credentials" tf:"optional"` } +func (newState *ListCredentialsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCredentialsResponse) { +} + +func (newState *ListCredentialsResponse) SyncEffectiveFieldsDuringRead(existingState ListCredentialsResponse) { +} + // Get repos type ListReposRequest struct { // Token used to get the next page of results. If not specified, returns the @@ -381,6 +633,12 @@ type ListReposRequest struct { PathPrefix types.String `tfsdk:"-"` } +func (newState *ListReposRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListReposRequest) { +} + +func (newState *ListReposRequest) SyncEffectiveFieldsDuringRead(existingState ListReposRequest) { +} + type ListReposResponse struct { // Token that can be specified as a query parameter to the `GET /repos` // endpoint to retrieve the next page of results. @@ -389,27 +647,57 @@ type ListReposResponse struct { Repos []RepoInfo `tfsdk:"repos" tf:"optional"` } +func (newState *ListReposResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListReposResponse) { +} + +func (newState *ListReposResponse) SyncEffectiveFieldsDuringRead(existingState ListReposResponse) { +} + type ListResponse struct { // List of objects. Objects []ObjectInfo `tfsdk:"objects" tf:"optional"` } +func (newState *ListResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListResponse) { +} + +func (newState *ListResponse) SyncEffectiveFieldsDuringRead(existingState ListResponse) { +} + type ListScopesResponse struct { // The available secret scopes. Scopes []SecretScope `tfsdk:"scopes" tf:"optional"` } +func (newState *ListScopesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListScopesResponse) { +} + +func (newState *ListScopesResponse) SyncEffectiveFieldsDuringRead(existingState ListScopesResponse) { +} + // List secret keys type ListSecretsRequest struct { // The name of the scope to list secrets within. Scope types.String `tfsdk:"-"` } +func (newState *ListSecretsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSecretsRequest) { +} + +func (newState *ListSecretsRequest) SyncEffectiveFieldsDuringRead(existingState ListSecretsRequest) { +} + type ListSecretsResponse struct { // Metadata information of all secrets contained within the given scope. Secrets []SecretMetadata `tfsdk:"secrets" tf:"optional"` } +func (newState *ListSecretsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListSecretsResponse) { +} + +func (newState *ListSecretsResponse) SyncEffectiveFieldsDuringRead(existingState ListSecretsResponse) { +} + // List contents type ListWorkspaceRequest struct { // UTC timestamp in milliseconds @@ -418,6 +706,12 @@ type ListWorkspaceRequest struct { Path types.String `tfsdk:"-"` } +func (newState *ListWorkspaceRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListWorkspaceRequest) { +} + +func (newState *ListWorkspaceRequest) SyncEffectiveFieldsDuringRead(existingState ListWorkspaceRequest) { +} + type Mkdirs struct { // The absolute path of the directory. If the parent directories do not // exist, it will also create them. If the directory already exists, this @@ -425,9 +719,21 @@ type Mkdirs struct { Path types.String `tfsdk:"path" tf:""` } +func (newState *Mkdirs) SyncEffectiveFieldsDuringCreateOrUpdate(plan Mkdirs) { +} + +func (newState *Mkdirs) SyncEffectiveFieldsDuringRead(existingState Mkdirs) { +} + type MkdirsResponse struct { } +func (newState *MkdirsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan MkdirsResponse) { +} + +func (newState *MkdirsResponse) SyncEffectiveFieldsDuringRead(existingState MkdirsResponse) { +} + type ObjectInfo struct { // Only applicable to files. The creation UTC timestamp. CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` @@ -453,6 +759,12 @@ type ObjectInfo struct { Size types.Int64 `tfsdk:"size" tf:"optional"` } +func (newState *ObjectInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ObjectInfo) { +} + +func (newState *ObjectInfo) SyncEffectiveFieldsDuringRead(existingState ObjectInfo) { +} + type PutAcl struct { // The permission level applied to the principal. Permission types.String `tfsdk:"permission" tf:""` @@ -462,9 +774,21 @@ type PutAcl struct { Scope types.String `tfsdk:"scope" tf:""` } +func (newState *PutAcl) SyncEffectiveFieldsDuringCreateOrUpdate(plan PutAcl) { +} + +func (newState *PutAcl) SyncEffectiveFieldsDuringRead(existingState PutAcl) { +} + type PutAclResponse struct { } +func (newState *PutAclResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan PutAclResponse) { +} + +func (newState *PutAclResponse) SyncEffectiveFieldsDuringRead(existingState PutAclResponse) { +} + type PutSecret struct { // If specified, value will be stored as bytes. BytesValue types.String `tfsdk:"bytes_value" tf:"optional"` @@ -476,9 +800,21 @@ type PutSecret struct { StringValue types.String `tfsdk:"string_value" tf:"optional"` } +func (newState *PutSecret) SyncEffectiveFieldsDuringCreateOrUpdate(plan PutSecret) { +} + +func (newState *PutSecret) SyncEffectiveFieldsDuringRead(existingState PutSecret) { +} + type PutSecretResponse struct { } +func (newState *PutSecretResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan PutSecretResponse) { +} + +func (newState *PutSecretResponse) SyncEffectiveFieldsDuringRead(existingState PutSecretResponse) { +} + type RepoAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -490,6 +826,12 @@ type RepoAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *RepoAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepoAccessControlRequest) { +} + +func (newState *RepoAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState RepoAccessControlRequest) { +} + type RepoAccessControlResponse struct { // All permissions. AllPermissions []RepoPermission `tfsdk:"all_permissions" tf:"optional"` @@ -503,6 +845,12 @@ type RepoAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *RepoAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepoAccessControlResponse) { +} + +func (newState *RepoAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState RepoAccessControlResponse) { +} + // Git folder (repo) information. type RepoInfo struct { // Name of the current git branch of the git folder (repo). @@ -521,6 +869,12 @@ type RepoInfo struct { Url types.String `tfsdk:"url" tf:"optional"` } +func (newState *RepoInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepoInfo) { +} + +func (newState *RepoInfo) SyncEffectiveFieldsDuringRead(existingState RepoInfo) { +} + type RepoPermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -529,6 +883,12 @@ type RepoPermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *RepoPermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepoPermission) { +} + +func (newState *RepoPermission) SyncEffectiveFieldsDuringRead(existingState RepoPermission) { +} + type RepoPermissions struct { AccessControlList []RepoAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -537,18 +897,36 @@ type RepoPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *RepoPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepoPermissions) { +} + +func (newState *RepoPermissions) SyncEffectiveFieldsDuringRead(existingState RepoPermissions) { +} + type RepoPermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *RepoPermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepoPermissionsDescription) { +} + +func (newState *RepoPermissionsDescription) SyncEffectiveFieldsDuringRead(existingState RepoPermissionsDescription) { +} + type RepoPermissionsRequest struct { AccessControlList []RepoAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The repo for which to get or manage permissions. RepoId types.String `tfsdk:"-"` } +func (newState *RepoPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan RepoPermissionsRequest) { +} + +func (newState *RepoPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState RepoPermissionsRequest) { +} + type SecretMetadata struct { // A unique name to identify the secret. Key types.String `tfsdk:"key" tf:"optional"` @@ -556,6 +934,12 @@ type SecretMetadata struct { LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp" tf:"optional"` } +func (newState *SecretMetadata) SyncEffectiveFieldsDuringCreateOrUpdate(plan SecretMetadata) { +} + +func (newState *SecretMetadata) SyncEffectiveFieldsDuringRead(existingState SecretMetadata) { +} + type SecretScope struct { // The type of secret scope backend. BackendType types.String `tfsdk:"backend_type" tf:"optional"` @@ -565,6 +949,12 @@ type SecretScope struct { Name types.String `tfsdk:"name" tf:"optional"` } +func (newState *SecretScope) SyncEffectiveFieldsDuringCreateOrUpdate(plan SecretScope) { +} + +func (newState *SecretScope) SyncEffectiveFieldsDuringRead(existingState SecretScope) { +} + // Sparse checkout configuration, it contains options like cone patterns. type SparseCheckout struct { // List of sparse checkout cone patterns, see [cone mode handling] for @@ -574,6 +964,12 @@ type SparseCheckout struct { Patterns []types.String `tfsdk:"patterns" tf:"optional"` } +func (newState *SparseCheckout) SyncEffectiveFieldsDuringCreateOrUpdate(plan SparseCheckout) { +} + +func (newState *SparseCheckout) SyncEffectiveFieldsDuringRead(existingState SparseCheckout) { +} + // Sparse checkout configuration, it contains options like cone patterns. type SparseCheckoutUpdate struct { // List of sparse checkout cone patterns, see [cone mode handling] for @@ -583,6 +979,12 @@ type SparseCheckoutUpdate struct { Patterns []types.String `tfsdk:"patterns" tf:"optional"` } +func (newState *SparseCheckoutUpdate) SyncEffectiveFieldsDuringCreateOrUpdate(plan SparseCheckoutUpdate) { +} + +func (newState *SparseCheckoutUpdate) SyncEffectiveFieldsDuringRead(existingState SparseCheckoutUpdate) { +} + type UpdateCredentialsRequest struct { // The ID for the corresponding credential to access. CredentialId types.Int64 `tfsdk:"-"` @@ -607,9 +1009,21 @@ type UpdateCredentialsRequest struct { PersonalAccessToken types.String `tfsdk:"personal_access_token" tf:"optional"` } +func (newState *UpdateCredentialsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCredentialsRequest) { +} + +func (newState *UpdateCredentialsRequest) SyncEffectiveFieldsDuringRead(existingState UpdateCredentialsRequest) { +} + type UpdateCredentialsResponse struct { } +func (newState *UpdateCredentialsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCredentialsResponse) { +} + +func (newState *UpdateCredentialsResponse) SyncEffectiveFieldsDuringRead(existingState UpdateCredentialsResponse) { +} + type UpdateRepoRequest struct { // Branch that the local version of the repo is checked out to. Branch types.String `tfsdk:"branch" tf:"optional"` @@ -625,9 +1039,21 @@ type UpdateRepoRequest struct { Tag types.String `tfsdk:"tag" tf:"optional"` } +func (newState *UpdateRepoRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateRepoRequest) { +} + +func (newState *UpdateRepoRequest) SyncEffectiveFieldsDuringRead(existingState UpdateRepoRequest) { +} + type UpdateRepoResponse struct { } +func (newState *UpdateRepoResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateRepoResponse) { +} + +func (newState *UpdateRepoResponse) SyncEffectiveFieldsDuringRead(existingState UpdateRepoResponse) { +} + type WorkspaceObjectAccessControlRequest struct { // name of the group GroupName types.String `tfsdk:"group_name" tf:"optional"` @@ -639,6 +1065,12 @@ type WorkspaceObjectAccessControlRequest struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *WorkspaceObjectAccessControlRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan WorkspaceObjectAccessControlRequest) { +} + +func (newState *WorkspaceObjectAccessControlRequest) SyncEffectiveFieldsDuringRead(existingState WorkspaceObjectAccessControlRequest) { +} + type WorkspaceObjectAccessControlResponse struct { // All permissions. AllPermissions []WorkspaceObjectPermission `tfsdk:"all_permissions" tf:"optional"` @@ -652,6 +1084,12 @@ type WorkspaceObjectAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +func (newState *WorkspaceObjectAccessControlResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan WorkspaceObjectAccessControlResponse) { +} + +func (newState *WorkspaceObjectAccessControlResponse) SyncEffectiveFieldsDuringRead(existingState WorkspaceObjectAccessControlResponse) { +} + type WorkspaceObjectPermission struct { Inherited types.Bool `tfsdk:"inherited" tf:"optional"` @@ -660,6 +1098,12 @@ type WorkspaceObjectPermission struct { PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *WorkspaceObjectPermission) SyncEffectiveFieldsDuringCreateOrUpdate(plan WorkspaceObjectPermission) { +} + +func (newState *WorkspaceObjectPermission) SyncEffectiveFieldsDuringRead(existingState WorkspaceObjectPermission) { +} + type WorkspaceObjectPermissions struct { AccessControlList []WorkspaceObjectAccessControlResponse `tfsdk:"access_control_list" tf:"optional"` @@ -668,12 +1112,24 @@ type WorkspaceObjectPermissions struct { ObjectType types.String `tfsdk:"object_type" tf:"optional"` } +func (newState *WorkspaceObjectPermissions) SyncEffectiveFieldsDuringCreateOrUpdate(plan WorkspaceObjectPermissions) { +} + +func (newState *WorkspaceObjectPermissions) SyncEffectiveFieldsDuringRead(existingState WorkspaceObjectPermissions) { +} + type WorkspaceObjectPermissionsDescription struct { Description types.String `tfsdk:"description" tf:"optional"` // Permission level PermissionLevel types.String `tfsdk:"permission_level" tf:"optional"` } +func (newState *WorkspaceObjectPermissionsDescription) SyncEffectiveFieldsDuringCreateOrUpdate(plan WorkspaceObjectPermissionsDescription) { +} + +func (newState *WorkspaceObjectPermissionsDescription) SyncEffectiveFieldsDuringRead(existingState WorkspaceObjectPermissionsDescription) { +} + type WorkspaceObjectPermissionsRequest struct { AccessControlList []WorkspaceObjectAccessControlRequest `tfsdk:"access_control_list" tf:"optional"` // The workspace object for which to get or manage permissions. @@ -681,3 +1137,9 @@ type WorkspaceObjectPermissionsRequest struct { // The workspace object type for which to get or manage permissions. WorkspaceObjectType types.String `tfsdk:"-"` } + +func (newState *WorkspaceObjectPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan WorkspaceObjectPermissionsRequest) { +} + +func (newState *WorkspaceObjectPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState WorkspaceObjectPermissionsRequest) { +} From 908981c6d7f24026f5d01fee181b28b60aa76ad7 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 16 Oct 2024 08:51:08 -0400 Subject: [PATCH 49/99] [Doc] Update documentation for `databricks_model_serving` (#4115) ## Changes Changes include: - Add config for Google Cloud Vertex AI - Add more authentication parameters for Open AI config - Add mention of `_plaintext` attributes - Add documentation for AI Gateway - Grammar fixes ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/resources/model_serving.md | 66 +++++++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 16 deletions(-) diff --git a/docs/resources/model_serving.md b/docs/resources/model_serving.md index 0bb116bfa9..b9db2a34ff 100644 --- a/docs/resources/model_serving.md +++ b/docs/resources/model_serving.md @@ -45,48 +45,65 @@ resource "databricks_model_serving" "this" { The following arguments are supported: -* `name` - (Required) The name of the model serving endpoint. This field is required and must be unique across a workspace. An endpoint name can consist of alphanumeric characters, dashes, and underscores. NOTE: Changing this name will delete the existing endpoint and create a new endpoint with the update name. +* `name` - (Required) The name of the model serving endpoint. This field is required and must be unique across a workspace. An endpoint name can consist of alphanumeric characters, dashes, and underscores. NOTE: Changing this name will delete the existing endpoint and create a new endpoint with the updated name. * `config` - (Required) The model serving endpoint configuration. * `served_entities` - A list of served entities for the endpoint to serve. A serving endpoint can have up to 10 served entities. * `served_models` - (Deprecated, use `served_entities` instead) Each block represents a served model for the endpoint to serve. A model serving endpoint can have up to 10 served models. * `traffic_config` - A single block represents the traffic split configuration amongst the served models. * `auto_capture_config` - Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog. * `tags` - Tags to be attached to the serving endpoint and automatically propagated to billing logs. -* `rate_limits` - A list of rate limits to be applied to the serving endpoint. NOTE: only external and foundation model endpoints are supported as of now. -* `route_optimized` - (Optional) A boolean enabling route optimization for the endpoint. NOTE: only available for custom models. +* `rate_limits` - A list of rate limit blocks to be applied to the serving endpoint. *Note: only external and foundation model endpoints are supported as of now.* +* `ai_gateway` - (Optional) A block with AI Gateway configuration for the serving endpoint. *Note: only external model endpoints are supported as of now.* +* `route_optimized` - (Optional) A boolean enabling route optimization for the endpoint. *Note: only available for custom models.* ### served_entities Configuration Block * `name` - The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores. If not specified for an external model, this field defaults to `external_model.name`, with '.' and ':' replaced with '-', and if not specified for other entities, it defaults to -. -* `external_model` - The external model to be served. NOTE: Only one of `external_model` and (`entity_name`, `entity_version`, `workload_size`, `workload_type`, and `scale_to_zero_enabled`) can be specified with the latter set being used for custom model serving for a Databricks registered model. When an `external_model` is present, the served entities list can only have one `served_entity` object. For an existing endpoint with `external_model`, it can not be updated to an endpoint without `external_model`. If the endpoint is created without `external_model`, users cannot update it to add `external_model` later. - * `provider` - (Required) The name of the provider for the external model. Currently, the supported providers are `ai21labs`, `anthropic`, `amazon-bedrock`, `cohere`, `databricks-model-serving`, `openai`, and `palm`. +* `external_model` - The external model to be served. NOTE: Only one of `external_model` and (`entity_name`, `entity_version`, `workload_size`, `workload_type`, and `scale_to_zero_enabled`) can be specified with the latter set being used for custom model serving for a Databricks registered model. When an `external_model` is present, the served entities list can only have one `served_entity` object. An existing endpoint with `external_model` can not be updated to an endpoint without `external_model`. If the endpoint is created without `external_model`, users cannot update it to add `external_model` later. + * `provider` - (Required) The name of the provider for the external model. Currently, the supported providers are `ai21labs`, `anthropic`, `amazon-bedrock`, `cohere`, `databricks-model-serving`, `google-cloud-vertex-ai`, `openai`, and `palm`. * `name` - The name of the external model. * `task` - The task type of the external model. - * `config` - The config for the external model, which must match the provider. + * `config` - The config for the external model, which must match the provider. *Note that API keys could be provided either as a reference to the Databricks Secret (parameters without `_plaintext` suffix) or in plain text (parameters with `_plaintext` suffix)!* * `ai21labs_config` - AI21Labs Config * `ai21labs_api_key` - The Databricks secret key reference for an AI21Labs API key. + * `ai21labs_api_key_plaintext` - An AI21 Labs API key provided as a plaintext string. * `anthropic_config` - Anthropic Config * `anthropic_api_key` - The Databricks secret key reference for an Anthropic API key. - The Databricks secret key reference for an Anthropic API key. + * `anthropic_api_key_plaintext` - The Anthropic API key provided as a plaintext string. * `amazon_bedrock_config` - Amazon Bedrock Config * `aws_region` - The AWS region to use. Bedrock has to be enabled there. * `aws_access_key_id` - The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services. + * `aws_access_key_id_plaintext` - An AWS access key ID with permissions to interact with Bedrock services provided as a plaintext string. * `aws_secret_access_key` - The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services. + * `aws_secret_access_key_plaintext` - An AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services provided as a plaintext string. * `bedrock_provider` - The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: `Anthropic`, `Cohere`, `AI21Labs`, `Amazon`. * `cohere_config` - Cohere Config * `cohere_api_key` - The Databricks secret key reference for a Cohere API key. + * `cohere_api_key_plaintext` - The Cohere API key provided as a plaintext string. * `databricks_model_serving_config` - Databricks Model Serving Config * `databricks_api_token` - The Databricks secret key reference for a Databricks API token that corresponds to a user or service principal with Can Query access to the model serving endpoint pointed to by this external model. + * `databricks_api_token_plaintext` - The Databricks API token that corresponds to a user or service principal with Can Query access to the model serving endpoint pointed to by this external model provided as a plaintext string. * `databricks_workspace_url` - The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model. + * `google_cloud_vertex_ai_config` - Google Cloud Vertex AI Config. + * `private_key` - The Databricks secret key reference for a private key for the service account that has access to the Google Cloud Vertex AI Service. + * `private_key_plaintext` - The private key for the service account that has access to the Google Cloud Vertex AI Service is provided as a plaintext secret. + * `project_id` - This is the Google Cloud project id that the service account is associated with. + * `region` - This is the region for the Google Cloud Vertex AI Service. * `openai_config` - OpenAI Config * `openai_api_key` - The Databricks secret key reference for an OpenAI or Azure OpenAI API key. - * `openai_api_type` - This is an optional field to specify the type of OpenAI API to use. For Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security access validation protocol. For access token validation, use azure. For authentication using Azure Active Directory (Azure AD) use, azuread. - * `openai_api_base` - This is the base URL for the OpenAI API (default: "https://api.openai.com/v1"). For Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service provided by Azure. - * `openai_api_version` - This is an optional field to specify the OpenAI API version. For Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to utilize, specified by a date. + * `openai_api_key_plaintext` - The OpenAI API key using the OpenAI or Azure service provided as a plaintext string. + * `openai_api_type` - This is an optional field to specify the type of OpenAI API to use. For Azure OpenAI, this field is required, and this parameter represents the preferred security access validation protocol. For access token validation, use `azure`. For authentication using Azure Active Directory (Azure AD) use, `azuread`. + * `microsoft_entra_client_id` - This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID. + * `microsoft_entra_client_secret` - The Databricks secret key reference for a client secret used for Microsoft Entra ID authentication. + * `microsoft_entra_client_secret_plaintext` - The client secret used for Microsoft Entra ID authentication provided as a plaintext string. + * `microsoft_entra_tenant_id` - This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID. + * `openai_api_base` - This is the base URL for the OpenAI API (default: "https://api.openai.com/v1"). For Azure OpenAI, this field is required and is the base URL for the Azure OpenAI API service provided by Azure. + * `openai_api_version` - This is an optional field to specify the OpenAI API version. For Azure OpenAI, this field is required and is the version of the Azure OpenAI service to utilize, specified by a date. * `openai_organization` - This is an optional field to specify the organization in OpenAI or Azure OpenAI. * `openai_deployment_name` - This field is only required for Azure OpenAI and is the name of the deployment resource for the Azure OpenAI service. * `palm_config` - PaLM Config * `palm_api_key` - The Databricks secret key reference for a PaLM API key. + * `palm_api_key_plaintext` - The PaLM API key provided as a plaintext string. * `entity_name` - The name of the entity to be served. The entity may be a model in the Databricks Model Registry, a model in the Unity Catalog (UC), or a function of type `FEATURE_SPEC` in the UC. If it is a UC object, the full name of the object should be given in the form of `catalog_name.schema_name.model_name`. * `entity_version` - The version of the model in Databricks Model Registry to be served or empty if the entity is a `FEATURE_SPEC`. * `min_provisioned_throughput`- The minimum tokens per second that the endpoint can scale down to. @@ -94,7 +111,7 @@ The following arguments are supported: * `workload_size` - The workload size of the served entity. The workload size corresponds to a range of provisioned concurrency that the compute autoscales between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are `Small` (4 - 4 provisioned concurrency), `Medium` (8 - 16 provisioned concurrency), and `Large` (16 - 64 provisioned concurrency). If `scale-to-zero` is enabled, the lower bound of the provisioned concurrency for each workload size is 0. * `workload_type` - The workload type of the served entity. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is `CPU`. For deep learning workloads, GPU acceleration is available by selecting workload types like `GPU_SMALL` and others. See the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types). * `scale_to_zero_enabled` - Whether the compute resources for the served entity should scale down to zero. -* `environment_vars` - An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and subject to change. Example entity environment variables that refer to Databricks secrets: ```{"OPENAI_API_KEY": "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": "{{secrets/my_scope2/my_key2}}"}``` +* `environment_vars` - An object containing a set of optional, user-specified environment variable key-value pairs used for serving this entity. Note: this is an experimental feature and is subject to change. Example entity environment variables that refer to Databricks secrets: ```{"OPENAI_API_KEY": "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": "{{secrets/my_scope2/my_key2}}"}``` * `instance_profile_arn` - ARN of the instance profile that the served entity uses to access AWS resources. ### served_models Configuration Block (deprecated) @@ -106,8 +123,8 @@ The following arguments are supported: * `model_version` - (Required) The version of the model in Databricks Model Registry to be served. * `workload_size` - (Required) The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between. A single unit of provisioned concurrency can process one request at a time. Valid workload sizes are `Small` (4 - 4 provisioned concurrency), `Medium` (8 - 16 provisioned concurrency), and `Large` (16 - 64 provisioned concurrency). * `scale_to_zero_enabled` - Whether the compute resources for the served model should scale down to zero. If `scale-to-zero` is enabled, the lower bound of the provisioned concurrency for each workload size will be 0. The default value is `true`. -* `workload_type` - The workload type of the served model. The workload type selects which type of compute to use in the endpoint. For deep learning workloads, GPU acceleration is available by selecting workload types like `GPU_SMALL` and others. See documentation for all options. The default value is `CPU`. -* `environment_vars` - (Optional) a map of environment variable name/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. +* `workload_type` - The workload type of the served model. The workload type selects which type of compute to use in the endpoint. For deep learning workloads, GPU acceleration is available by selecting workload types like `GPU_SMALL` and others. See the documentation for all options. The default value is `CPU`. +* `environment_vars` - (Optional) a map of environment variable names/values that will be used for serving this model. Environment variables may refer to Databricks secrets using the standard syntax: `{{secrets/secret_scope/secret_key}}`. * `instance_profile_arn` - (Optional) ARN of the instance profile that the served model will use to access AWS resources. ### traffic_config Configuration Block @@ -121,7 +138,7 @@ The following arguments are supported: * `catalog_name` - The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if it was already set. * `schema_name` - The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if it was already set. * `table_name_prefix` - The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if it was already set. -* `enabled` - If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again. +* `enabled` - If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable it again. ### tags Configuration Block @@ -131,12 +148,27 @@ The following arguments are supported: ### rate_limits Configuration Block * `calls` - (Required) Used to specify how many calls are allowed for a key within the renewal_period. -* `key` - Key field for a serving endpoint rate limit. Currently, only `user` and `endpoint` are supported, with `endpoint` being the default if not specified. +* `key` - (Optional) Key field for a serving endpoint rate limit. Currently, only `user` and `endpoint` are supported, with `endpoint` being the default if not specified. * `renewal_period` - (Required) Renewal period field for a serving endpoint rate limit. Currently, only `minute` is supported. +### ai_gateway Configuration Block + +* `guardrails` - (Optional) Block with configuration for AI Guardrails to prevent unwanted data and unsafe data in requests and responses. Consists of the following attributes: + * `input` - A block with configuration for input guardrail filters: + * `invalid_keywords` - List of invalid keywords. AI guardrail uses keyword or string matching to decide if the keyword exists in the request or response content. + * `valid_topics` - The list of allowed topics. Given a chat request, this guardrail flags the request if its topic is not in the allowed topics. + * `safety` - the boolean flag that indicates whether the safety filter is enabled. + * `pii` - Block with configuration for guardrail PII filter: + * `behavior` - a string that describes the behavior for PII filter. Currently only `BLOCK` value is supported. + * `output` - A block with configuration for output guardrail filters. Has the same structure as `input` block. +* `rate_limits` - (Optional) Block describing rate limits for AI gateway. For details see the description of `rate_limits` block above. +* `usage_tracking_config` - (Optional) Block with configuration for payload logging using inference tables. For details see the description of `auto_capture_config` block above. +* `inference_table_config` - (Optional) Block describing the configuration of usage tracking. Consists of the following attributes: + * `enabled` - boolean flag specifying if usage tracking is enabled. + ## Attribute Reference -In addition to all arguments above, the following attributes are exported: +In addition to all the arguments above, the following attributes are exported: * `id` - Equal to the `name` argument and used to identify the serving endpoint. * `serving_endpoint_id` - Unique identifier of the serving endpoint primarily used to set permissions and refer to this instance for other operations. @@ -174,3 +206,5 @@ The following resources are often used in the same context: * [databricks_notebook](notebook.md) to manage [Databricks Notebooks](https://docs.databricks.com/notebooks/index.html). * [databricks_notebook](../data-sources/notebook.md) data to export a notebook from Databricks Workspace. * [databricks_repo](repo.md) to manage [Databricks Repos](https://docs.databricks.com/repos.html). + + From 2bbf2511e4fd74fe7a560fb509ff87285ada22a8 Mon Sep 17 00:00:00 2001 From: Omer Lachish <289488+rauchy@users.noreply.github.com> Date: Wed, 16 Oct 2024 14:51:40 +0200 Subject: [PATCH 50/99] [Fix] Mark unity_catalog_provisioning_state as ReadOnly (#4116) ## Changes In a recent update to the GoSDK, a new field called `unity_catalog_provisioning_state` was added. This field changes values based on server decisions, so it's supposed to be marked as computed. This pull request sorts out that issue. ## Tests - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK Co-authored-by: Omer Lachish --- catalog/resource_online_table.go | 1 + docs/resources/online_table.md | 1 + 2 files changed, 2 insertions(+) diff --git a/catalog/resource_online_table.go b/catalog/resource_online_table.go index 7c317a9742..ca24d5f76f 100644 --- a/catalog/resource_online_table.go +++ b/catalog/resource_online_table.go @@ -59,6 +59,7 @@ func ResourceOnlineTable() common.Resource { common.CustomizeSchemaPath(m, "spec", "source_table_full_name").SetCustomSuppressDiff(common.EqualFoldDiffSuppress) common.CustomizeSchemaPath(m, "name").SetRequired().SetForceNew() common.CustomizeSchemaPath(m, "status").SetReadOnly() + common.CustomizeSchemaPath(m, "unity_catalog_provisioning_state").SetReadOnly() common.CustomizeSchemaPath(m, "table_serving_url").SetReadOnly() common.CustomizeSchemaPath(m, "spec", "pipeline_id").SetReadOnly() diff --git a/docs/resources/online_table.md b/docs/resources/online_table.md index df026991aa..9119c191e4 100644 --- a/docs/resources/online_table.md +++ b/docs/resources/online_table.md @@ -49,6 +49,7 @@ In addition to all arguments above, the following attributes are exported: * `detailed_state` - The state of the online table. * `message` - A text description of the current state of the online table. * `table_serving_url` - Data serving REST API URL for this table. +* `unity_catalog_provisioning_state` - The provisioning state of the online table entity in Unity Catalog. This is distinct from the state of the data synchronization pipeline (i.e. the table may be in "ACTIVE" but the pipeline may be in "PROVISIONING" as it runs asynchronously). ## Import From c3ae6f3b20d70e542f8977ab96fe64ff4c1d5d34 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 16 Oct 2024 08:59:16 -0400 Subject: [PATCH 51/99] [Doc] Document `budget_policy_id` in `databricks_pipeline` and `databricks_job` (#4110) ## Changes `databricks_pipeline` already has this change and for `databricks_job` we need to merge Go SDK 0.49.0 ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/resources/job.md | 1 + docs/resources/pipeline.md | 1 + 2 files changed, 2 insertions(+) diff --git a/docs/resources/job.md b/docs/resources/job.md index dc8eebc587..efc6bd8ca7 100644 --- a/docs/resources/job.md +++ b/docs/resources/job.md @@ -107,6 +107,7 @@ The resource supports the following arguments: * `notification_settings` - (Optional) An optional block controlling the notification settings on the job level [documented below](#notification_settings-configuration-block). * `health` - (Optional) An optional block that specifies the health conditions for the job [documented below](#health-configuration-block). * `tags` - (Optional) An optional map of the tags associated with the job. See [tags Configuration Map](#tags-configuration-map) +* `budget_policy_id` - (Optional) The ID of the user-specified budget policy to use for this job. If not specified, a default budget policy may be applied when creating or modifying the job. ### task Configuration Block diff --git a/docs/resources/pipeline.md b/docs/resources/pipeline.md index 6e610aa435..76a60d75db 100644 --- a/docs/resources/pipeline.md +++ b/docs/resources/pipeline.md @@ -83,6 +83,7 @@ The following arguments are supported: * `target` - The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI. * `edition` - optional name of the [product edition](https://docs.databricks.com/data-engineering/delta-live-tables/delta-live-tables-concepts.html#editions). Supported values are: `CORE`, `PRO`, `ADVANCED` (default). Not required when `serverless` is set to `true`. * `channel` - optional name of the release channel for Spark version used by DLT pipeline. Supported values are: `CURRENT` (default) and `PREVIEW`. +* `budget_policy_id` - optional string specifying ID of the budget policy for this DLT pipeline. * `allow_duplicate_names` - Optional boolean flag. If false, deployment will fail if name conflicts with that of another pipeline. default is `false`. * `deployment` - Deployment type of this pipeline. Supports following attributes: * `kind` - The deployment method that manages the pipeline. From e24c78008e2f2c2994f45bc461c2bfae3f0d8dfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Diego=20G=C3=B3mez=20Moreno?= <69928678+dgomez04@users.noreply.github.com> Date: Thu, 17 Oct 2024 02:58:32 -0600 Subject: [PATCH 52/99] [Feature] Add data source `databricks_notification_destinations` (#4087) ## Changes Added `databricks_notification_destinations` data source as requested in #3950 to retrieve notification destinations that are created out-of-band or in other terraform templates. ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --------- Co-authored-by: Alex Ott --- .../data-sources/notification_destinations.md | 62 ++++++++++ internal/providers/pluginfw/pluginfw.go | 2 + .../data_notification_destinations.go | 116 ++++++++++++++++++ ...data_notification_destinations_acc_test.go | 63 ++++++++++ .../data_notification_destinations_test.go | 15 +++ 5 files changed, 258 insertions(+) create mode 100644 docs/data-sources/notification_destinations.md create mode 100755 internal/providers/pluginfw/resources/notificationdestinations/data_notification_destinations.go create mode 100644 internal/providers/pluginfw/resources/notificationdestinations/data_notification_destinations_acc_test.go create mode 100755 internal/providers/pluginfw/resources/notificationdestinations/data_notification_destinations_test.go diff --git a/docs/data-sources/notification_destinations.md b/docs/data-sources/notification_destinations.md new file mode 100644 index 0000000000..777e41f5cb --- /dev/null +++ b/docs/data-sources/notification_destinations.md @@ -0,0 +1,62 @@ +--- +subcategory: "Workspace" +--- +# databricks_notification_destinations Data Source + +This data source allows you to retrieve information about [Notification Destinations](https://docs.databricks.com/api/workspace/notificationdestinations). Notification Destinations are used to send notifications for query alerts and jobs to external systems such as email, Slack, Microsoft Teams, PagerDuty, or generic webhooks. + +## Example Usage + + +```hcl +resource "databricks_notification_destination" "email" { + display_name = "Email Destination" + config { + email { + addresses = ["abc@gmail.com"] + } + } +} + +resource "databricks_notification_destination" "slack" { + display_name = "Slack Destination" + config { + slack { + url = "https://hooks.slack.com/services/..." + } + } +} + +# Lists all notification desitnations +data "databricks_notification_destinations" "this" {} + +# List destinations of specific type and name +data "databricks_notification_destinations" "filtered_notification" { + display_name_contains = "Destination" + type = "EMAIL" +} +``` + + +## Argument Reference + +The following arguments are supported: + +* `display_name_contains` - (Optional) A **case-insensitive** substring to filter Notification Destinations by their display name. +* `type` - (Optional) The type of the Notification Destination to filter by. Valid values are: + * `EMAIL` - Filters Notification Destinations of type Email. + * `MICROSOFT_TEAMS` - Filters Notification Destinations of type Microsoft Teams. + * `PAGERDUTY` - Filters Notification Destinations of type PagerDuty. + * `SLACK` - Filters Notification Destinations of type Slack. + * `WEBHOOK` - Filters Notification Destinations of type Webhook. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `notification_destinations` - A list of Notification Destinations matching the specified criteria. Each element contains the following attributes: + * `id` - The unique ID of the Notification Destination. + * `display_name` - The display name of the Notification Destination. + * `destination_type` - The type of the notification destination. Possible values are `EMAIL`, `MICROSOFT_TEAMS`, `PAGERDUTY`, `SLACK`, or `WEBHOOK`. + +If no matches are found, an empty list will be returned. diff --git a/internal/providers/pluginfw/pluginfw.go b/internal/providers/pluginfw/pluginfw.go index cc51975133..db811d5ae2 100644 --- a/internal/providers/pluginfw/pluginfw.go +++ b/internal/providers/pluginfw/pluginfw.go @@ -18,6 +18,7 @@ import ( providercommon "github.com/databricks/terraform-provider-databricks/internal/providers/common" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/cluster" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/library" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/notificationdestinations" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/qualitymonitor" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/registered_model" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/volume" @@ -54,6 +55,7 @@ func (p *DatabricksProviderPluginFramework) DataSources(ctx context.Context) []f cluster.DataSourceCluster, volume.DataSourceVolumes, registered_model.DataSourceRegisteredModel, + notificationdestinations.DataSourceNotificationDestinations, } } diff --git a/internal/providers/pluginfw/resources/notificationdestinations/data_notification_destinations.go b/internal/providers/pluginfw/resources/notificationdestinations/data_notification_destinations.go new file mode 100755 index 0000000000..441877fdd8 --- /dev/null +++ b/internal/providers/pluginfw/resources/notificationdestinations/data_notification_destinations.go @@ -0,0 +1,116 @@ +package notificationdestinations + +import ( + "context" + "fmt" + "slices" + "strings" + + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/databricks/terraform-provider-databricks/internal/service/settings_tf" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func DataSourceNotificationDestinations() datasource.DataSource { + return &NotificationDestinationsDataSource{} +} + +var _ datasource.DataSourceWithConfigure = &NotificationDestinationsDataSource{} + +type NotificationDestinationsDataSource struct { + Client *common.DatabricksClient +} + +type NotificationDestinationsInfo struct { + DisplayNameContains types.String `tfsdk:"display_name_contains" tf:"optional"` + Type types.String `tfsdk:"type" tf:"optional"` + NotificationDestinations []settings_tf.ListNotificationDestinationsResult `tfsdk:"notification_destinations" tf:"computed"` +} + +func (d *NotificationDestinationsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = "databricks_notification_destinations" +} + +func (d *NotificationDestinationsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + attrs, blocks := tfschema.DataSourceStructToSchemaMap(NotificationDestinationsInfo{}, nil) + resp.Schema = schema.Schema{ + Attributes: attrs, + Blocks: blocks, + } +} + +func (d *NotificationDestinationsDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if d.Client == nil { + d.Client = pluginfwcommon.ConfigureDataSource(req, resp) + } +} + +func validateType(notificationType string) diag.Diagnostics { + validTypes := []string{ + string(settings.DestinationTypeEmail), + string(settings.DestinationTypeMicrosoftTeams), + string(settings.DestinationTypePagerduty), + string(settings.DestinationTypeSlack), + string(settings.DestinationTypeWebhook), + } + + if !slices.Contains(validTypes, notificationType) { + return diag.Diagnostics{diag.NewErrorDiagnostic(fmt.Sprintf("Invalid type '%s'; valid types are %s.", notificationType, strings.Join(validTypes, ", ")), "")} + } + return nil +} + +func AppendDiagAndCheckErrors(resp *datasource.ReadResponse, diags diag.Diagnostics) bool { + resp.Diagnostics.Append(diags...) + return resp.Diagnostics.HasError() +} + +func (d *NotificationDestinationsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + w, diags := d.Client.GetWorkspaceClient() + if AppendDiagAndCheckErrors(resp, diags) { + return + } + + var notificationInfo NotificationDestinationsInfo + if AppendDiagAndCheckErrors(resp, req.Config.Get(ctx, ¬ificationInfo)) { + return + } + + notificationType := notificationInfo.Type.ValueString() + notificationDisplayName := strings.ToLower(notificationInfo.DisplayNameContains.ValueString()) + + if notificationType != "" && AppendDiagAndCheckErrors(resp, validateType(notificationType)) { + return + } + + notificationsGoSdk, err := w.NotificationDestinations.ListAll(ctx, settings.ListNotificationDestinationsRequest{}) + if err != nil { + resp.Diagnostics.AddError("Failed to fetch Notification Destinations", err.Error()) + return + } + + var notificationsTfSdk []settings_tf.ListNotificationDestinationsResult + for _, notification := range notificationsGoSdk { + if (notificationType != "" && notification.DestinationType.String() != notificationType) || + (notificationDisplayName != "" && !strings.Contains(strings.ToLower(notification.DisplayName), notificationDisplayName)) { + continue + } + + var notificationDestination settings_tf.ListNotificationDestinationsResult + if AppendDiagAndCheckErrors(resp, converters.GoSdkToTfSdkStruct(ctx, notification, ¬ificationDestination)) { + return + } + notificationsTfSdk = append(notificationsTfSdk, notificationDestination) + } + + notificationInfo.NotificationDestinations = notificationsTfSdk + resp.Diagnostics.Append(resp.State.Set(ctx, notificationInfo)...) + +} diff --git a/internal/providers/pluginfw/resources/notificationdestinations/data_notification_destinations_acc_test.go b/internal/providers/pluginfw/resources/notificationdestinations/data_notification_destinations_acc_test.go new file mode 100644 index 0000000000..cf72b0da2d --- /dev/null +++ b/internal/providers/pluginfw/resources/notificationdestinations/data_notification_destinations_acc_test.go @@ -0,0 +1,63 @@ +package notificationdestinations_test + +import ( + "testing" + + "github.com/databricks/terraform-provider-databricks/internal/acceptance" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func CheckDataSourceNotificationsPopulated(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + ds, ok := s.Modules[0].Resources["data.databricks_notification_destinations.this"] + require.True(t, ok, "data.databricks_notification_destinations.this has to be there") + + notificationCount := ds.Primary.Attributes["notification_destinations.#"] + require.Equal(t, "2", notificationCount, "expected two notifications") + + notificationIds := []string{ + ds.Primary.Attributes["notification_destinations.0.id"], + ds.Primary.Attributes["notification_destinations.1.id"], + } + + expectedNotificationIds := []string{ + s.Modules[0].Resources["databricks_notification_destination.email_notification"].Primary.ID, + s.Modules[0].Resources["databricks_notification_destination.slack_notification"].Primary.ID, + } + + assert.ElementsMatch(t, expectedNotificationIds, notificationIds, "expected notification_destination ids to match") + + return nil + } +} + +func TestAccNotificationsCreation(t *testing.T) { + acceptance.WorkspaceLevel(t, acceptance.Step{ + Template: ` + resource "databricks_notification_destination" "email_notification" { + display_name = "email notification destination" + config { + email { + addresses = ["abc@gmail.com"] + } + } + } + + resource "databricks_notification_destination" "slack_notification" { + display_name = "slack notification destination" + config { + slack { + url = "https://hooks.slack.com/services/..." + } + } + } + + data "databricks_notification_destinations" "this" { + depends_on = [databricks_notification_destination.email_notification, databricks_notification_destination.slack_notification] + } + `, + Check: CheckDataSourceNotificationsPopulated(t), + }) +} diff --git a/internal/providers/pluginfw/resources/notificationdestinations/data_notification_destinations_test.go b/internal/providers/pluginfw/resources/notificationdestinations/data_notification_destinations_test.go new file mode 100755 index 0000000000..45f9625bd5 --- /dev/null +++ b/internal/providers/pluginfw/resources/notificationdestinations/data_notification_destinations_test.go @@ -0,0 +1,15 @@ +package notificationdestinations + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/stretchr/testify/assert" +) + +func TestValidateType_InvalidType(t *testing.T) { + actualDiagnostics := validateType("INVALID") + expectedDiagnostics := diag.Diagnostics{diag.NewErrorDiagnostic("Invalid type 'INVALID'; valid types are EMAIL, MICROSOFT_TEAMS, PAGERDUTY, SLACK, WEBHOOK.", "")} + assert.True(t, actualDiagnostics.HasError()) + assert.Equal(t, expectedDiagnostics, actualDiagnostics) +} From 1eb0c51fce11b2583d1db352d8b040b12a2ea489 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Thu, 17 Oct 2024 13:41:03 +0200 Subject: [PATCH 53/99] [Fix] Tolerate invalid keys in `databricks_workspace_conf` (#4102) ## Changes Occasionally, support for configuration keys is removed from the GetStatus/SetStatus APIs used to manage workspace configuration. When this happens, users who depended on those keys are in a bad state. The provider queries for the values for each configuration by key in the Terraform state, but those keys no longer exist. To work around this, this PR makes `databricks_workspace_conf` resilient to keys being removed. On the read path, the provider queries for the status of all keys in state. If any key is no longer valid, it is removed from the request and the request is retried. Setting the value for invalid configuration keys is not supported. When removing an unsupported configuration key, the provider resets the key to an original state (`false` or `""`). Failures to reset invalid keys are ignored, both on the update path (removing an unsupported key from the conf) and on the delete path (removing the `databricks_workspace_conf` resource altogether). ## Tests Integration tests verify that the new SafeGetStatus and SafeSetStatus methods work with invalid keys as expected. On the read side, they are ignored, and on the write side, they are ignored if marked for removal. - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- internal/acceptance/workspace_conf_test.go | 49 +++++++++++- workspace/resource_workspace_conf.go | 88 ++++++++++++++++++++-- 2 files changed, 130 insertions(+), 7 deletions(-) diff --git a/internal/acceptance/workspace_conf_test.go b/internal/acceptance/workspace_conf_test.go index f0468ea6b2..80f12bfe89 100644 --- a/internal/acceptance/workspace_conf_test.go +++ b/internal/acceptance/workspace_conf_test.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/databricks/terraform-provider-databricks/workspace" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -58,7 +59,7 @@ func TestAccWorkspaceConfFullLifecycle(t *testing.T) { } }`, // Assert on server side error returned - ExpectError: regexp.MustCompile(`cannot update workspace conf: Invalid keys`), + ExpectError: regexp.MustCompile(`cannot update workspace conf: failed to set workspace conf because some new keys are invalid: enableIpAccessLissss`), }, Step{ // Set enableIpAccessLists to true with strange case and maxTokenLifetimeDays to verify // failed deletion case @@ -79,3 +80,49 @@ func TestAccWorkspaceConfFullLifecycle(t *testing.T) { }, }) } + +func TestAccWorkspaceConf_GetValidKey(t *testing.T) { + loadWorkspaceEnv(t) + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + conf, err := workspace.SafeGetStatus(ctx, w, []string{"enableIpAccessLists"}) + assert.NoError(t, err) + assert.Contains(t, conf, "enableIpAccessLists") +} + +func TestAccWorkspaceConf_GetInvalidKey(t *testing.T) { + loadWorkspaceEnv(t) + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + conf, err := workspace.SafeGetStatus(ctx, w, []string{"invalidKey", "enableIpAccessLists"}) + assert.NoError(t, err) + assert.Contains(t, conf, "enableIpAccessLists") +} + +func TestAccWorkspaceConf_GetOnlyInvalidKeys(t *testing.T) { + loadWorkspaceEnv(t) + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + _, err := workspace.SafeGetStatus(ctx, w, []string{"invalidKey"}) + assert.ErrorContains(t, err, "failed to get workspace conf because all keys are invalid: invalidKey") +} + +func TestAccWorkspaceConf_SetInvalidKey(t *testing.T) { + loadWorkspaceEnv(t) + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + err := workspace.SafeSetStatus(ctx, w, map[string]struct{}{}, map[string]string{ + "invalidKey": "invalidValue", + }) + assert.ErrorContains(t, err, "failed to set workspace conf because some new keys are invalid: invalidKey") +} + +func TestAccWorkspaceConf_DeleteInvalidKey(t *testing.T) { + loadWorkspaceEnv(t) + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + err := workspace.SafeSetStatus(ctx, w, map[string]struct{}{"invalidKey": {}}, map[string]string{ + "invalidKey": "", + }) + assert.NoError(t, err) +} diff --git a/workspace/resource_workspace_conf.go b/workspace/resource_workspace_conf.go index 2c95fc3881..c24b077875 100644 --- a/workspace/resource_workspace_conf.go +++ b/workspace/resource_workspace_conf.go @@ -5,13 +5,16 @@ package workspace import ( "context" + "encoding/json" "errors" "fmt" "log" + "slices" "sort" "strconv" "strings" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/internal/docs" @@ -30,6 +33,7 @@ func applyWorkspaceConf(ctx context.Context, d *schema.ResourceData, c *common.D return fmt.Errorf("internal type casting error") } log.Printf("[DEBUG] Old workspace config: %v, new: %v", old, new) + removed := map[string]struct{}{} patch := settings.WorkspaceConf{} // Add new configuration keys @@ -44,6 +48,7 @@ func applyWorkspaceConf(ctx context.Context, d *schema.ResourceData, c *common.D continue } log.Printf("[DEBUG] Erasing configuration of %s", k) + removed[k] = struct{}{} switch r := v.(type) { default: patch[k] = "" @@ -63,7 +68,7 @@ func applyWorkspaceConf(ctx context.Context, d *schema.ResourceData, c *common.D if err != nil { return err } - err = w.WorkspaceConf.SetStatus(ctx, patch) + err = SafeSetStatus(ctx, w, removed, patch) if err != nil { return err } @@ -121,7 +126,7 @@ func deleteWorkspaceConf(ctx context.Context, d *schema.ResourceData, c *common. case bool: patch[k] = "false" } - err = w.WorkspaceConf.SetStatus(ctx, patch) + err = SafeSetStatus(ctx, w, map[string]struct{}{k: {}}, patch) // Tolerate errors like the following on deletion: // cannot delete workspace conf: Some values are not allowed: {"enableGp3":"","enableIpAccessLists":""} // The API for workspace conf is quite limited and doesn't support a generic "reset to original state" @@ -136,6 +141,79 @@ func deleteWorkspaceConf(ctx context.Context, d *schema.ResourceData, c *common. return nil } +func parseInvalidKeysFromError(err error) ([]string, error) { + var apiErr *apierr.APIError + // The workspace conf API returns an error with a message like "Invalid keys: [key1, key2, ...]" + // when some keys are invalid. We parse this message to get the list of invalid keys. + if errors.As(err, &apiErr) && strings.HasPrefix(apiErr.Message, "Invalid keys: ") { + invalidKeysStr := strings.TrimPrefix(apiErr.Message, "Invalid keys: ") + var invalidKeys []string + err = json.Unmarshal([]byte(invalidKeysStr), &invalidKeys) + if err != nil { + return nil, fmt.Errorf("failed to parse missing keys: %w", err) + } + return invalidKeys, nil + } + return nil, nil +} + +// SafeGetStatus is a wrapper around the GetStatus API that tolerates invalid keys. +// If any of the provided keys are not valid, the GetStatus API is called again with only the valid keys. +// If all keys are invalid, an error is returned. +func SafeGetStatus(ctx context.Context, w *databricks.WorkspaceClient, keys []string) (map[string]string, error) { + conf, err := w.WorkspaceConf.GetStatus(ctx, settings.GetStatusRequest{ + Keys: strings.Join(keys, ","), + }) + invalidKeys, parseErr := parseInvalidKeysFromError(err) + if parseErr != nil { + return nil, parseErr + } else if invalidKeys != nil { + tflog.Warn(ctx, fmt.Sprintf("the following keys are not supported by the api: %s. Remove these keys from the configuration to avoid this warning.", strings.Join(invalidKeys, ", "))) + // Request again but remove invalid keys + validKeys := make([]string, 0, len(keys)) + for _, k := range keys { + if !slices.Contains(invalidKeys, k) { + validKeys = append(validKeys, k) + } + } + if len(validKeys) == 0 { + return nil, fmt.Errorf("failed to get workspace conf because all keys are invalid: %s", strings.Join(keys, ", ")) + } + conf, err = w.WorkspaceConf.GetStatus(context.Background(), settings.GetStatusRequest{ + Keys: strings.Join(validKeys, ","), + }) + } + if err != nil { + return nil, err + } + return *conf, nil +} + +// SafeSetStatus is a wrapper around the SetStatus API that tolerates invalid keys. +// If any of the provided keys are not valid, the removed map is checked to see if those keys are being removed. +// If all keys are being removed, the error is ignored. Otherwise, an error is returned. +func SafeSetStatus(ctx context.Context, w *databricks.WorkspaceClient, removed map[string]struct{}, newConf map[string]string) error { + err := w.WorkspaceConf.SetStatus(ctx, settings.WorkspaceConf(newConf)) + invalidKeys, parseErr := parseInvalidKeysFromError(err) + if parseErr != nil { + return parseErr + } else if invalidKeys != nil { + // Tolerate the request if all invalid keys are present in the old map, indicating that they are being removed. + newInvalidKeys := make([]string, 0, len(invalidKeys)) + for _, k := range invalidKeys { + if _, ok := removed[k]; !ok { + newInvalidKeys = append(newInvalidKeys, k) + } + } + if len(newInvalidKeys) > 0 { + return fmt.Errorf("failed to set workspace conf because some new keys are invalid: %s", strings.Join(newInvalidKeys, ", ")) + } + tflog.Info(ctx, fmt.Sprintf("ignored the following invalid keys because they are being removed: %s", strings.Join(invalidKeys, ", "))) + return nil + } + return err +} + // ResourceWorkspaceConf maintains workspace configuration for specified keys func ResourceWorkspaceConf() common.Resource { return common.Resource{ @@ -155,13 +233,11 @@ func ResourceWorkspaceConf() common.Resource { if len(keys) == 0 { return nil } - remote, err := w.WorkspaceConf.GetStatus(ctx, settings.GetStatusRequest{ - Keys: strings.Join(keys, ","), - }) + remote, err := SafeGetStatus(ctx, w, keys) if err != nil { return err } - for k, v := range *remote { + for k, v := range remote { config[k] = v } log.Printf("[DEBUG] Setting new config to state: %v", config) From b656822b4e9e2c9626cad87e9089c147d865a840 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Fri, 18 Oct 2024 09:32:17 +0200 Subject: [PATCH 54/99] [Release] Release v1.54.0 (#4117) ### New Features and Improvements * Add `databricks_registered_model` data source ([#4033](https://github.com/databricks/terraform-provider-databricks/pull/4033)). * Add data source `databricks_notification_destinations` ([#4087](https://github.com/databricks/terraform-provider-databricks/pull/4087)). ### Bug Fixes * Fix databricks_cluster_pluginframework data source ([#4097](https://github.com/databricks/terraform-provider-databricks/pull/4097)). * Mark unity_catalog_provisioning_state as ReadOnly ([#4116](https://github.com/databricks/terraform-provider-databricks/pull/4116)). * Tolerate invalid keys in `databricks_workspace_conf` ([#4102](https://github.com/databricks/terraform-provider-databricks/pull/4102)). * force send `read_only` in `databricks_external_location` when it's changed ([#4067](https://github.com/databricks/terraform-provider-databricks/pull/4067)). * force send `read_only` in `databricks_storage_credential` when it's changed ([#4083](https://github.com/databricks/terraform-provider-databricks/pull/4083)). ### Documentation * Document `budget_policy_id` in `databricks_pipeline` and `databricks_job` ([#4110](https://github.com/databricks/terraform-provider-databricks/pull/4110)). * Reformat code examples in documentation ([#4081](https://github.com/databricks/terraform-provider-databricks/pull/4081)). * Update documentation for `databricks_model_serving` ([#4115](https://github.com/databricks/terraform-provider-databricks/pull/4115)). * Updates to resource examples ([#4093](https://github.com/databricks/terraform-provider-databricks/pull/4093)). ### Internal Changes * Add maxItem=1 validator for object types in plugin framework schema ([#4094](https://github.com/databricks/terraform-provider-databricks/pull/4094)). * Fix acceptance test for `databricks_registered_model` data source ([#4105](https://github.com/databricks/terraform-provider-databricks/pull/4105)). * Generate Effective Fields ([#4057](https://github.com/databricks/terraform-provider-databricks/pull/4057)). * Generate Effective Fields ([#4112](https://github.com/databricks/terraform-provider-databricks/pull/4112)). * Set SDK used in the useragent in context ([#4092](https://github.com/databricks/terraform-provider-databricks/pull/4092)). * Support adding context in resources and data sources ([#4085](https://github.com/databricks/terraform-provider-databricks/pull/4085)). * Update plugin framework schema to use ListNestedBlocks ([#4040](https://github.com/databricks/terraform-provider-databricks/pull/4040)). --- CHANGELOG.md | 36 ++++++++++++++++++++++++++++++++++++ common/version.go | 2 +- 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 769b48fa35..3f7b4ef3b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,41 @@ # Version changelog +## [Release] Release v1.54.0 + +### New Features and Improvements + + * Add `databricks_registered_model` data source ([#4033](https://github.com/databricks/terraform-provider-databricks/pull/4033)). + * Add data source `databricks_notification_destinations` ([#4087](https://github.com/databricks/terraform-provider-databricks/pull/4087)). + + +### Bug Fixes + + * Fix databricks_cluster_pluginframework data source ([#4097](https://github.com/databricks/terraform-provider-databricks/pull/4097)). + * Mark unity_catalog_provisioning_state as ReadOnly ([#4116](https://github.com/databricks/terraform-provider-databricks/pull/4116)). + * Tolerate invalid keys in `databricks_workspace_conf` ([#4102](https://github.com/databricks/terraform-provider-databricks/pull/4102)). + * force send `read_only` in `databricks_external_location` when it's changed ([#4067](https://github.com/databricks/terraform-provider-databricks/pull/4067)). + * force send `read_only` in `databricks_storage_credential` when it's changed ([#4083](https://github.com/databricks/terraform-provider-databricks/pull/4083)). + + +### Documentation + + * Document `budget_policy_id` in `databricks_pipeline` and `databricks_job` ([#4110](https://github.com/databricks/terraform-provider-databricks/pull/4110)). + * Reformat code examples in documentation ([#4081](https://github.com/databricks/terraform-provider-databricks/pull/4081)). + * Update documentation for `databricks_model_serving` ([#4115](https://github.com/databricks/terraform-provider-databricks/pull/4115)). + * Updates to resource examples ([#4093](https://github.com/databricks/terraform-provider-databricks/pull/4093)). + + +### Internal Changes + + * Add maxItem=1 validator for object types in plugin framework schema ([#4094](https://github.com/databricks/terraform-provider-databricks/pull/4094)). + * Fix acceptance test for `databricks_registered_model` data source ([#4105](https://github.com/databricks/terraform-provider-databricks/pull/4105)). + * Generate Effective Fields ([#4057](https://github.com/databricks/terraform-provider-databricks/pull/4057)). + * Generate Effective Fields ([#4112](https://github.com/databricks/terraform-provider-databricks/pull/4112)). + * Set SDK used in the useragent in context ([#4092](https://github.com/databricks/terraform-provider-databricks/pull/4092)). + * Support adding context in resources and data sources ([#4085](https://github.com/databricks/terraform-provider-databricks/pull/4085)). + * Update plugin framework schema to use ListNestedBlocks ([#4040](https://github.com/databricks/terraform-provider-databricks/pull/4040)). + + ## [Release] Release v1.53.0 ### New Features and Improvements diff --git a/common/version.go b/common/version.go index 592603882a..adf11608fb 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.53.0" + version = "1.54.0" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider From 2dfee52f8468a90e91205b6c7a98430a72518d42 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Fri, 18 Oct 2024 03:51:12 -0400 Subject: [PATCH 55/99] [Exporter] Exclude some system schemas from export (#4121) ## Changes ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- exporter/importables.go | 3 +++ exporter/importables_test.go | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/exporter/importables.go b/exporter/importables.go index 5ea235c335..f6814b1872 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -2476,6 +2476,9 @@ var resourcesMap map[string]importable = map[string]importable{ return err } for _, v := range systemSchemas { + if v.Schema == "information_schema" || v.Schema == "__internal_logging" { + continue + } if v.State == catalog.SystemSchemaInfoStateEnableCompleted || v.State == catalog.SystemSchemaInfoStateEnableInitialized { id := fmt.Sprintf("%s|%s", currentMetastore, v.Schema) data := ic.Resources["databricks_system_schema"].Data( diff --git a/exporter/importables_test.go b/exporter/importables_test.go index 6bea1a8cf0..a28c2d30de 100644 --- a/exporter/importables_test.go +++ b/exporter/importables_test.go @@ -1423,6 +1423,10 @@ func TestListSystemSchemasSuccess(t *testing.T) { Schema: "access", State: catalog.SystemSchemaInfoStateEnableCompleted, }, + { + Schema: "information_schema", + State: catalog.SystemSchemaInfoStateEnableCompleted, + }, { Schema: "marketplace", State: catalog.SystemSchemaInfoStateAvailable, From 6112713b4c3d2040cd2f3b7652231bd11904d321 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Fri, 18 Oct 2024 10:04:06 +0200 Subject: [PATCH 56/99] [Fix] Change repo used in test (#4122) ## Changes Change repo used in test ## Tests - [X] `make test` run locally - [ ] relevant change in `docs/` folder - [X] covered with integration tests in `internal/acceptance` - [X] relevant acceptance tests are passing - [X] using Go SDK --- internal/acceptance/permissions_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index bcd67fa8c9..66b196e5b3 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -508,8 +508,8 @@ func TestAccPermissions_Repo_Id(t *testing.T) { loadDebugEnvIfRunsFromIDE(t, "workspace") template := ` resource "databricks_repo" "this" { - url = "https://github.com/databrickslabs/tempo.git" - path = "/Repos/terraform-tests/tempo-{var.STICKY_RANDOM}" + url = "https://github.com/databricks/databricks-sdk-go.git" + path = "/Repos/terraform-tests/databricks-sdk-go-{var.STICKY_RANDOM}" } ` WorkspaceLevel(t, Step{ @@ -542,8 +542,8 @@ func TestAccPermissions_Repo_Path(t *testing.T) { loadDebugEnvIfRunsFromIDE(t, "workspace") template := ` resource "databricks_repo" "this" { - url = "https://github.com/databrickslabs/tempo.git" - path = "/Repos/terraform-tests/tempo-{var.STICKY_RANDOM}" + url = "https://github.com/databricks/databricks-sdk-go.git" + path = "/Repos/terraform-tests/databricks-sdk-go-{var.STICKY_RANDOM}" } ` WorkspaceLevel(t, Step{ From b9fb47c28f326332c77c4f671260daea494f0019 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Fri, 18 Oct 2024 04:42:41 -0400 Subject: [PATCH 57/99] [Feature] Add `databricks_alert` resource to replace `databricks_sql_alert` (#4051) ## Changes The new resource uses the [new Alerts API](https://docs.databricks.com/api/workspace/alerts/create) instead of the legacy one that will be deprecated. Since the new resource has a slightly different set of parameters, it was decided to create a new resource and deprecate the old one. This resource uses old TF SDK to be compatible with TF exporter (until #4050 is implemented). TODOs: - Need to discuss how to handle permissions - `sql_alert` permissions look like working, but not sure if we should continue to use that API - Support in the exporter will be in a separate PR ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --------- Co-authored-by: Miles Yucht --- common/resource.go | 9 +- common/resource_test.go | 9 + docs/resources/alert.md | 196 +++++++++++++++++++ docs/resources/sql_alert.md | 12 ++ internal/acceptance/alert_test.go | 60 ++++++ internal/acceptance/permissions_test.go | 39 ++++ internal/acceptance/sql_alert_test.go | 2 +- internal/providers/sdkv2/sdkv2.go | 1 + sql/resource_alert.go | 125 ++++++++++++ sql/resource_alert_test.go | 245 ++++++++++++++++++++++++ sql/resource_sql_alerts.go | 3 +- 11 files changed, 698 insertions(+), 3 deletions(-) create mode 100644 docs/resources/alert.md create mode 100644 internal/acceptance/alert_test.go create mode 100644 sql/resource_alert.go create mode 100644 sql/resource_alert_test.go diff --git a/common/resource.go b/common/resource.go index 9e639eb962..4e357305db 100644 --- a/common/resource.go +++ b/common/resource.go @@ -443,13 +443,20 @@ func genericDatabricksData[T, P, C any]( // WorkspacePathPrefixDiffSuppress suppresses diffs for workspace paths where both sides // may or may not include the `/Workspace` prefix. // -// This is the case for dashboards where at create time, the user may include the `/Workspace` +// This is the case for dashboards, alerts and queries where at create time, the user may include the `/Workspace` // prefix for the `parent_path` field, but the read response will not include the prefix. func WorkspacePathPrefixDiffSuppress(k, old, new string, d *schema.ResourceData) bool { const prefix = "/Workspace" return strings.TrimPrefix(old, prefix) == strings.TrimPrefix(new, prefix) } +// WorkspaceOrEmptyPathPrefixDiffSuppress is similar WorkspacePathPrefixDiffSuppress but also suppresses diffs +// when the new value is empty (not specified by user). +func WorkspaceOrEmptyPathPrefixDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + const prefix = "/Workspace" + return (old != "" && new == "") || strings.TrimPrefix(old, prefix) == strings.TrimPrefix(new, prefix) +} + func EqualFoldDiffSuppress(k, old, new string, d *schema.ResourceData) bool { if strings.EqualFold(old, new) { log.Printf("[INFO] Suppressing diff on %s", k) diff --git a/common/resource_test.go b/common/resource_test.go index e93885a02c..f01f373ff5 100644 --- a/common/resource_test.go +++ b/common/resource_test.go @@ -187,6 +187,15 @@ func TestWorkspacePathPrefixDiffSuppress(t *testing.T) { assert.False(t, WorkspacePathPrefixDiffSuppress("k", "/Workspace/1", "/Workspace/2", nil)) } +func TestWorkspaceOrEmptyPathPrefixDiffSuppress(t *testing.T) { + assert.True(t, WorkspaceOrEmptyPathPrefixDiffSuppress("k", "/Workspace/foo/bar", "/Workspace/foo/bar", nil)) + assert.True(t, WorkspaceOrEmptyPathPrefixDiffSuppress("k", "/Workspace/foo/bar", "/foo/bar", nil)) + assert.True(t, WorkspaceOrEmptyPathPrefixDiffSuppress("k", "/foo/bar", "/Workspace/foo/bar", nil)) + assert.True(t, WorkspaceOrEmptyPathPrefixDiffSuppress("k", "/foo/bar", "/foo/bar", nil)) + assert.True(t, WorkspaceOrEmptyPathPrefixDiffSuppress("k", "/foo/bar", "", nil)) + assert.False(t, WorkspaceOrEmptyPathPrefixDiffSuppress("k", "/Workspace/1", "/Workspace/2", nil)) +} + func TestEqualFoldDiffSuppress(t *testing.T) { assert.True(t, EqualFoldDiffSuppress("k", "A", "a", nil)) assert.False(t, EqualFoldDiffSuppress("k", "A", "A2", nil)) diff --git a/docs/resources/alert.md b/docs/resources/alert.md new file mode 100644 index 0000000000..f15bdaf116 --- /dev/null +++ b/docs/resources/alert.md @@ -0,0 +1,196 @@ +--- +subcategory: "Databricks SQL" +--- +# databricks_alert Resource + +This resource allows you to manage [Databricks SQL Alerts](https://docs.databricks.com/en/sql/user/alerts/index.html). It supersedes [databricks_sql_alert](sql_alert.md) resource - see migration guide below for more details. + +## Example Usage + +```hcl +resource "databricks_directory" "shared_dir" { + path = "/Shared/Queries" +} + +# This will be replaced with new databricks_query resource +resource "databricks_sql_query" "this" { + data_source_id = databricks_sql_endpoint.example.data_source_id + name = "My Query Name" + query = "SELECT 42 as value" + parent = "folders/${databricks_directory.shared_dir.object_id}" +} + +resource "databricks_alert" "alert" { + query_id = databricks_sql_query.this.id + display_name = "TF new alert" + parent_path = databricks_directory.shared_dir.path + condition { + op = "GREATER_THAN" + operand { + column { + name = "value" + } + } + threshold { + value { + double_value = 42 + } + } + } +} +``` + +## Argument Reference + +The following arguments are available: + +* `query_id` - (Required, String) ID of the query evaluated by the alert. +* `display_name` - (Required, String) Name of the alert. +* `condition` - (Required) Trigger conditions of the alert. Block consists of the following attributes: + * `op` - (Required, String Enum) Operator used for comparison in alert evaluation. (Enum: `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `EQUAL`, `NOT_EQUAL`, `IS_NULL`) + * `operand` - (Required, Block) Name of the column from the query result to use for comparison in alert evaluation: + * `column` - (Required, Block) Block describing the column from the query result to use for comparison in alert evaluation: + * `name` - (Required, String) Name of the column. + * `threshold` - (Optional for `IS_NULL` operation, Block) Threshold value used for comparison in alert evaluation: + * `value` - (Required, Block) actual value used in comparison (one of the attributes is required): + * `string_value` - string value to compare against string results. + * `double_value` - double value to compare against integer and double results. + * `bool_value` - boolean value (`true` or `false`) to compare against boolean results. + * `empty_result_state` - (Optional, String Enum) Alert state if the result is empty (`UNKNOWN`, `OK`, `TRIGGERED`) +* `custom_subject` - (Optional, String) Custom subject of alert notification, if it exists. This includes email subject, Slack notification header, etc. See [Alerts API reference](https://docs.databricks.com/en/sql/user/alerts/index.html) for custom templating instructions. +* `custom_body` - (Optional, String) Custom body of alert notification, if it exists. See [Alerts API reference](https://docs.databricks.com/en/sql/user/alerts/index.html) for custom templating instructions. +* `parent_path` - (Optional, String) The path to a workspace folder containing the alert. The default is the user's home folder. If changed, the alert will be recreated. +* `seconds_to_retrigger` - (Optional, Integer) Number of seconds an alert must wait after being triggered to rearm itself. After rearming, it can be triggered again. If 0 or not specified, the alert will not be triggered again. +* `owner_user_name` - (Optional, String) Alert owner's username. +* `notify_on_ok` - (Optional, Boolean) Whether to notify alert subscribers when alert returns back to normal. + +## Attribute Reference + +In addition to all the arguments above, the following attributes are exported: + +* `id` - unique ID of the Alert. +* `lifecycle_state` - The workspace state of the alert. Used for tracking trashed status. (Possible values are `ACTIVE` or `TRASHED`). +* `state` - Current state of the alert's trigger status (`UNKNOWN`, `OK`, `TRIGGERED`). This field is set to `UNKNOWN` if the alert has not yet been evaluated or ran into an error during the last evaluation. +* `create_time` - The timestamp string indicating when the alert was created. +* `update_time` - The timestamp string indicating when the alert was updated. +* `trigger_time` - The timestamp string when the alert was last triggered if the alert has been triggered before. + +## Migrating from `databricks_sql_alert` resource + +Under the hood, the new resource uses the same data as the `databricks_sql_alert`, but is exposed via a different API. This means that we can migrate existing alerts without recreating them. This operation is done in few steps: + +* Record the ID of existing `databricks_sql_alert`, for example, by executing the `terraform state show databricks_sql_alert.alert` command. +* Create the code for the new implementation by performing the following changes: + * the `name` attribute is now named `display_name` + * the `parent` (if exists) is renamed to `parent_path` attribute and should be converted from `folders/object_id` to the actual path. + * the `options` block is converted into the `condition` block with the following changes: + * the value of the `op` attribute should be converted from a mathematical operator into a string name, like, `>` is becoming `GREATER_THAN`, `==` is becoming `EQUAL`, etc. + * the `column` attribute is becoming the `operand` block + * the `value` attribute is becoming the `threshold` block. **Please note that the old implementation always used strings so you may have changes after import if you use `double_value` or `bool_value` inside the block.** + * the `rearm` attribute is renamed to `seconds_to_retrigger`. + +For example, if we have the original `databricks_sql_alert` defined as: + +```hcl +resource "databricks_sql_alert" "alert" { + query_id = databricks_sql_query.this.id + name = "My Alert" + parent = "folders/${databricks_directory.shared_dir.object_id}" + options { + column = "value" + op = ">" + value = "42" + muted = false + } +} +``` + +we'll have a new resource defined as: + +```hcl +resource "databricks_alert" "alert" { + query_id = databricks_sql_query.this.id + display_name = "My Alert" + parent_path = databricks_directory.shared_dir.path + condition { + op = "GREATER_THAN" + operand { + column { + name = "value" + } + } + threshold { + value { + double_value = 42 + } + } + } +} +``` + +### For Terraform version >= 1.7.0 + +Terraform 1.7 introduced the [removed](https://developer.hashicorp.com/terraform/language/resources/syntax#removing-resources) block in addition to the [import](https://developer.hashicorp.com/terraform/language/import) block introduced in Terraform 1.5. Together they make import and removal of resources easier, avoiding manual execution of `terraform import` and `terraform state rm` commands. + +So with Terraform 1.7+, the migration looks as the following: + +* remove the old alert definition and replace it with the new one. +* Adjust references, like, `databricks_permissions`. +* Add `import` and `removed` blocks like this: + +```hcl +import { + to = databricks_alert.alert + id = "" +} + +removed { + from = databricks_sql_alert.alert + + lifecycle { + destroy = false + } +} +``` + +* Run the `terraform plan` command to check possible changes, such as value type change, etc. +* Run the `terraform apply` command to apply changes. +* Remove the `import` and `removed` blocks from the code. + +### For Terraform version < 1.7.0 + +* Remove the old alert definition and replace it with the new one. +* Remove the old resource from the state with the `terraform state rm databricks_sql_alert.alert` command. +* Import new resource with the `terraform import databricks_alert.alert ` command. +* Adjust references, like, `databricks_permissions`. +* Run the `terraform plan` command to check possible changes, such as value type change, etc. + +## Access Control + +[databricks_permissions](permissions.md#sql-alert-usage) can control which groups or individual users can *Manage*, *Edit*, *Run* or *View* individual alerts. + +```hcl +resource "databricks_permissions" "alert_usage" { + sql_alert_id = databricks_alert.alert.id + access_control { + group_name = "users" + permission_level = "CAN_RUN" + } +} +``` + +## Import + +This resource can be imported using alert ID: + +```bash +terraform import databricks_alert.this +``` + +## Related Resources + +The following resources are often used in the same context: + +* [databricks_sql_query](sql_query.md) to manage Databricks SQL [Queries](https://docs.databricks.com/sql/user/queries/index.html). +* [databricks_sql_endpoint](sql_endpoint.md) to manage Databricks SQL [Endpoints](https://docs.databricks.com/sql/admin/sql-endpoints.html). +* [databricks_directory](directory.md) to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). diff --git a/docs/resources/sql_alert.md b/docs/resources/sql_alert.md index 689a52a5d5..12063c2a70 100644 --- a/docs/resources/sql_alert.md +++ b/docs/resources/sql_alert.md @@ -58,6 +58,18 @@ In addition to all arguments above, the following attributes are exported: * `id` - unique ID of the SQL Alert. +## Access Control + +[databricks_permissions](permissions.md#sql-alert-usage) can control which groups or individual users can *Manage*, *Edit*, *Run* or *View* individual alerts. + +## Import + +This resource can be imported using alert ID: + +```bash +terraform import databricks_sql_alert.this +``` + ## Related Resources The following resources are often used in the same context: diff --git a/internal/acceptance/alert_test.go b/internal/acceptance/alert_test.go new file mode 100644 index 0000000000..22ed542468 --- /dev/null +++ b/internal/acceptance/alert_test.go @@ -0,0 +1,60 @@ +package acceptance + +import ( + "testing" +) + +func TestAccAlert(t *testing.T) { + WorkspaceLevel(t, Step{ + Template: ` + resource "databricks_sql_query" "this" { + data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" + name = "tf-{var.RANDOM}" + query = "SELECT 1 AS p1, 2 as p2" + } + + resource "databricks_alert" "alert" { + query_id = databricks_sql_query.this.id + display_name = "tf-alert-{var.RANDOM}" + condition { + op = "EQUAL" + operand { + column { + name = "p2" + } + } + threshold { + value { + double_value = 2 + } + } + } + } +`, + }, Step{ + Template: ` + resource "databricks_sql_query" "this" { + data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" + name = "tf-{var.RANDOM}" + query = "SELECT 1 AS p1, 2 as p2" + } + + resource "databricks_alert" "alert" { + query_id = databricks_sql_query.this.id + display_name = "tf-alert-{var.RANDOM}" + condition { + op = "GREATER_THAN" + operand { + column { + name = "p2" + } + } + threshold { + value { + double_value = 3 + } + } + } + }`, + }) +} diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index 66b196e5b3..7c5da72512 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -837,3 +837,42 @@ func TestAccPermissions_ServingEndpoint(t *testing.T) { ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for serving-endpoint, allowed levels: CAN_MANAGE"), }) } + +func TestAccPermissions_Alert(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + alertTemplate := ` + resource "databricks_sql_query" "this" { + name = "{var.STICKY_RANDOM}-query" + query = "SELECT 1 AS p1, 2 as p2" + data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" + } + + resource "databricks_alert" "this" { + query_id = databricks_sql_query.this.id + display_name = "{var.STICKY_RANDOM}-alert" + condition { + op = "GREATER_THAN" + operand { + column { + name = "value" + } + } + threshold { + value { + double_value = 42 + } + } + } + } +` + WorkspaceLevel(t, Step{ + Template: alertTemplate + makePermissionsTestStage("sql_alert_id", "databricks_alert.this.id", groupPermissions("CAN_VIEW")), + }, Step{ + Template: alertTemplate + makePermissionsTestStage("sql_alert_id", "databricks_alert.this.id", + currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_VIEW", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + }, Step{ + Template: alertTemplate + makePermissionsTestStage("sql_alert_id", "databricks_alert.this.id", + currentPrincipalPermission(t, "CAN_VIEW"), groupPermissions("CAN_VIEW", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for alert, allowed levels: CAN_MANAGE"), + }) +} diff --git a/internal/acceptance/sql_alert_test.go b/internal/acceptance/sql_alert_test.go index 612df0653e..9db6bb72da 100644 --- a/internal/acceptance/sql_alert_test.go +++ b/internal/acceptance/sql_alert_test.go @@ -4,7 +4,7 @@ import ( "testing" ) -func TestAccAlert(t *testing.T) { +func TestAccSqlAlert(t *testing.T) { WorkspaceLevel(t, Step{ Template: ` resource "databricks_sql_query" "this" { diff --git a/internal/providers/sdkv2/sdkv2.go b/internal/providers/sdkv2/sdkv2.go index 65ab1f4973..7c90851314 100644 --- a/internal/providers/sdkv2/sdkv2.go +++ b/internal/providers/sdkv2/sdkv2.go @@ -128,6 +128,7 @@ func DatabricksProvider() *schema.Provider { }, ResourcesMap: map[string]*schema.Resource{ // must be in alphabetical order "databricks_access_control_rule_set": permissions.ResourceAccessControlRuleSet().ToResource(), + "databricks_alert": sql.ResourceAlert().ToResource(), "databricks_artifact_allowlist": catalog.ResourceArtifactAllowlist().ToResource(), "databricks_aws_s3_mount": storage.ResourceAWSS3Mount().ToResource(), "databricks_azure_adls_gen1_mount": storage.ResourceAzureAdlsGen1Mount().ToResource(), diff --git a/sql/resource_alert.go b/sql/resource_alert.go new file mode 100644 index 0000000000..03281d5006 --- /dev/null +++ b/sql/resource_alert.go @@ -0,0 +1,125 @@ +package sql + +import ( + "context" + "log" + "strings" + + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceAlert() common.Resource { + s := common.StructToSchema(sql.Alert{}, func(m map[string]*schema.Schema) map[string]*schema.Schema { + common.CustomizeSchemaPath(m, "display_name").SetRequired() + common.CustomizeSchemaPath(m, "query_id").SetRequired() + common.CustomizeSchemaPath(m, "condition").SetRequired() + // TODO: can we automatically generate it from SDK? Or should we avoid validation at all? + common.CustomizeSchemaPath(m, "condition", "op").SetRequired().SetValidateFunc(validation.StringInSlice([]string{ + "GREATER_THAN", "GREATER_THAN_OR_EQUAL", "LESS_THAN", "LESS_THAN_OR_EQUAL", "EQUAL", "NOT_EQUAL", "IS_NULL"}, true)) + common.CustomizeSchemaPath(m, "condition", "op").SetRequired() + common.CustomizeSchemaPath(m, "parent_path").SetCustomSuppressDiff(common.WorkspaceOrEmptyPathPrefixDiffSuppress).SetForceNew() + common.CustomizeSchemaPath(m, "condition", "operand").SetRequired() + common.CustomizeSchemaPath(m, "condition", "operand", "column").SetRequired() + common.CustomizeSchemaPath(m, "condition", "operand", "column", "name").SetRequired() + common.CustomizeSchemaPath(m, "condition", "empty_result_state").SetValidateFunc( + validation.StringInSlice([]string{"UNKNOWN", "OK", "TRIGGERED"}, true)) + // We may not need it for some conditions + // common.CustomizeSchemaPath(m, "condition", "threshold").SetRequired() + common.CustomizeSchemaPath(m, "condition", "threshold", "value").SetRequired() + alof := []string{ + "condition.0.threshold.0.value.0.string_value", + "condition.0.threshold.0.value.0.double_value", + "condition.0.threshold.0.value.0.bool_value", + } + for _, f := range alof { + common.CustomizeSchemaPath(m, "condition", "threshold", "value", + strings.TrimPrefix(f, "condition.0.threshold.0.value.0.")).SetExactlyOneOf(alof) + } + common.CustomizeSchemaPath(m, "owner_user_name").SetSuppressDiff() + common.CustomizeSchemaPath(m, "id").SetReadOnly() + common.CustomizeSchemaPath(m, "create_time").SetReadOnly() + common.CustomizeSchemaPath(m, "lifecycle_state").SetReadOnly() + common.CustomizeSchemaPath(m, "state").SetReadOnly() + common.CustomizeSchemaPath(m, "trigger_time").SetReadOnly() + common.CustomizeSchemaPath(m, "update_time").SetReadOnly() + return m + }) + + return common.Resource{ + Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + var a sql.CreateAlertRequestAlert + common.DataToStructPointer(d, s, &a) + apiAlert, err := w.Alerts.Create(ctx, sql.CreateAlertRequest{ + Alert: &a, + }) + if err != nil { + return err + } + d.SetId(apiAlert.Id) + owner := d.Get("owner_user_name").(string) + if owner != "" { + _, err = w.Alerts.Update(ctx, sql.UpdateAlertRequest{ + Alert: &sql.UpdateAlertRequestAlert{ + OwnerUserName: owner, + }, + Id: apiAlert.Id, + UpdateMask: "owner_user_name", + }) + } + return err + }, + Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + apiAlert, err := w.Alerts.GetById(ctx, d.Id()) + if err != nil { + log.Printf("[WARN] error getting alert by ID: %v", err) + return err + } + parentPath := d.Get("parent_path").(string) + if parentPath != "" && strings.HasPrefix(apiAlert.ParentPath, "/Workspace") && !strings.HasPrefix(parentPath, "/Workspace") { + apiAlert.ParentPath = strings.TrimPrefix(parentPath, "/Workspace") + } + return common.StructToData(apiAlert, s, d) + }, + Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + var a sql.UpdateAlertRequestAlert + updateMask := "display_name,query_id,seconds_to_retrigger,condition,custom_body,custom_subject" + if d.HasChange("owner_user_name") { + updateMask += ",owner_user_name" + } + if d.HasChange("notify_on_ok") { + updateMask += ",notify_on_ok" + a.ForceSendFields = append(a.ForceSendFields, "NotifyOnOk") + } + common.DataToStructPointer(d, s, &a) + _, err = w.Alerts.Update(ctx, sql.UpdateAlertRequest{ + Alert: &a, + Id: d.Id(), + UpdateMask: updateMask, + }) + return err + }, + Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + return w.Alerts.DeleteById(ctx, d.Id()) + }, + Schema: s, + } +} diff --git a/sql/resource_alert_test.go b/sql/resource_alert_test.go new file mode 100644 index 0000000000..f0559434b3 --- /dev/null +++ b/sql/resource_alert_test.go @@ -0,0 +1,245 @@ +package sql + +import ( + "net/http" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/mock" +) + +var ( + alertResponse = sql.Alert{ + Id: "7890", + QueryId: "123456", + DisplayName: "TF new alert", + OwnerUserName: "user@domain.com", + Condition: &sql.AlertCondition{ + Op: "GREATER_THAN", + Operand: &sql.AlertConditionOperand{ + Column: &sql.AlertOperandColumn{ + Name: "value", + }, + }, + Threshold: &sql.AlertConditionThreshold{ + Value: &sql.AlertOperandValue{ + DoubleValue: 42, + }, + }, + }, + ParentPath: "/Workspace/Shared/Alerts", + } + createHcl = `query_id = "123456" + display_name = "TF new alert" + parent_path = "/Shared/Alerts" + owner_user_name = "user@domain.com" + condition { + op = "GREATER_THAN" + operand { + column { + name = "value" + } + } + threshold { + value { + double_value = 42 + } + } + }` + createAlertRequest = sql.CreateAlertRequest{ + Alert: &sql.CreateAlertRequestAlert{ + QueryId: "123456", + DisplayName: "TF new alert", + ParentPath: "/Shared/Alerts", + Condition: &sql.AlertCondition{ + Op: "GREATER_THAN", + Operand: &sql.AlertConditionOperand{ + Column: &sql.AlertOperandColumn{ + Name: "value", + }, + }, + Threshold: &sql.AlertConditionThreshold{ + Value: &sql.AlertOperandValue{ + DoubleValue: 42, + }, + }, + }, + }} +) + +func TestAlertCreate(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockAlertsAPI().EXPECT() + e.Create(mock.Anything, createAlertRequest).Return(&alertResponse, nil) + e.Update(mock.Anything, sql.UpdateAlertRequest{ + Id: "7890", + UpdateMask: "owner_user_name", + Alert: &sql.UpdateAlertRequestAlert{ + OwnerUserName: "user@domain.com", + }, + }).Return(&alertResponse, nil) + e.GetById(mock.Anything, "7890").Return(&alertResponse, nil) + }, + Resource: ResourceAlert(), + Create: true, + HCL: createHcl, + }.ApplyAndExpectData(t, map[string]any{ + "id": "7890", + "query_id": "123456", + "display_name": "TF new alert", + "owner_user_name": "user@domain.com", + }) +} + +func TestAlertCreate_BackendError(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockAlertsAPI().EXPECT() + e.Create(mock.Anything, createAlertRequest).Return(nil, &apierr.APIError{ + StatusCode: http.StatusBadRequest, + Message: "bad payload", + }) + }, + Resource: ResourceAlert(), + Create: true, + HCL: createHcl, + }.ExpectError(t, "bad payload") +} + +func TestAlertCreate_ErrorMultipleValues(t *testing.T) { + qa.ResourceFixture{ + Resource: ResourceAlert(), + Create: true, + HCL: `query_id = "123456" + display_name = "TF new alert" + parent_path = "/Shared/Alerts" + owner_user_name = "user@domain.com" + condition { + op = "GREATER_THAN" + operand { + column { + name = "value" + } + } + threshold { + value { + double_value = 42 + } + } + threshold { + value { + bool_value = 42 + } + } +}`, + }.ExpectError(t, "invalid config supplied. [condition.#.threshold] Too many list items") +} + +func TestAlertRead_Import(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockAlertsAPI().EXPECT().GetById(mock.Anything, "7890").Return(&alertResponse, nil) + }, + Resource: ResourceAlert(), + Read: true, + ID: "7890", + New: true, + }.ApplyAndExpectData(t, map[string]any{ + "id": "7890", + "query_id": "123456", + "display_name": "TF new alert", + "owner_user_name": "user@domain.com", + }) +} + +func TestAlertRead_Error(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockAlertsAPI().EXPECT().GetById(mock.Anything, "7890").Return(nil, &apierr.APIError{ + StatusCode: http.StatusBadRequest, + Message: "bad payload", + }) + }, + Resource: ResourceAlert(), + Read: true, + ID: "7890", + New: true, + }.ExpectError(t, "bad payload") +} + +func TestAlertDelete(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockAlertsAPI().EXPECT().DeleteById(mock.Anything, "7890").Return(nil) + }, + Resource: ResourceAlert(), + Delete: true, + ID: "7890", + New: true, + }.ApplyNoError(t) +} + +func TestAlertUpdate(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockAlertsAPI().EXPECT() + e.Update(mock.Anything, sql.UpdateAlertRequest{ + Id: "7890", + UpdateMask: "display_name,query_id,seconds_to_retrigger,condition,custom_body,custom_subject,owner_user_name,notify_on_ok", + Alert: &sql.UpdateAlertRequestAlert{ + QueryId: "123456", + DisplayName: "TF new alert", + OwnerUserName: "user@domain.com", + Condition: &sql.AlertCondition{ + Op: "GREATER_THAN", + Operand: &sql.AlertConditionOperand{ + Column: &sql.AlertOperandColumn{ + Name: "value", + }, + }, + Threshold: &sql.AlertConditionThreshold{ + Value: &sql.AlertOperandValue{ + DoubleValue: 42, + }, + }, + }, + ForceSendFields: []string{"NotifyOnOk"}, + }}).Return(&alertResponse, nil) + e.GetById(mock.Anything, "7890").Return(&alertResponse, nil) + }, + Resource: ResourceAlert(), + Update: true, + ID: "7890", + New: true, + InstanceState: map[string]string{ + "id": "7890", + "query_id": "123456", + "notify_on_ok": "true", + }, + HCL: `query_id = "123456" + display_name = "TF new alert" + owner_user_name = "user@domain.com" + condition { + op = "GREATER_THAN" + operand { + column { + name = "value" + } + } + threshold { + value { + double_value = 42 + } + } + }`, + }.ApplyAndExpectData(t, map[string]any{ + "id": "7890", + "query_id": "123456", + "display_name": "TF new alert", + "owner_user_name": "user@domain.com", + }) +} diff --git a/sql/resource_sql_alerts.go b/sql/resource_sql_alerts.go index 156834bd44..7a54a59ecb 100644 --- a/sql/resource_sql_alerts.go +++ b/sql/resource_sql_alerts.go @@ -178,6 +178,7 @@ func ResourceSqlAlert() common.Resource { } return w.AlertsLegacy.DeleteByAlertId(ctx, data.Id()) }, - Schema: s, + Schema: s, + DeprecationMessage: "This resource is deprecated and will be removed in the future. Please use the `databricks_alert` resource instead.", } } From 29bb830b5fd09a1c1e00018f0168200afc298289 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Fri, 18 Oct 2024 05:54:50 -0400 Subject: [PATCH 58/99] [Exporter] **Breaking change**: Move `databricks_workspace_file` to a separate service (#4118) ## Changes Move `databricks_workspace_file` to a separate service `wsfiles`, so we can list and export them separately from notebooks. If you used `notebooks` in `-listing` or `-services` options, then you need to append `wsfiles` to these options. ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/guides/experimental-exporter.md | 3 ++- exporter/command.go | 5 +++-- exporter/context.go | 20 ++++++++++++++++++-- exporter/exporter_test.go | 2 +- exporter/importables.go | 27 +-------------------------- exporter/importables_test.go | 16 ++++++++++------ exporter/util_test.go | 2 -- exporter/util_workspace.go | 25 ++++++++++++++++++------- 8 files changed, 53 insertions(+), 47 deletions(-) diff --git a/docs/guides/experimental-exporter.md b/docs/guides/experimental-exporter.md index 713c6579f7..e709f4765d 100644 --- a/docs/guides/experimental-exporter.md +++ b/docs/guides/experimental-exporter.md @@ -120,7 +120,7 @@ Services are just logical groups of resources used for filtering and organizatio * `mlflow-webhooks` - **listing** [databricks_mlflow_webhook](../resources/mlflow_webhook.md). * `model-serving` - **listing** [databricks_model_serving](../resources/model_serving.md). * `mounts` - **listing** works only in combination with `-mounts` command-line option. -* `notebooks` - **listing** [databricks_notebook](../resources/notebook.md) and [databricks_workspace_file](../resources/workspace_file.md). +* `notebooks` - **listing** [databricks_notebook](../resources/notebook.md). * `policies` - **listing** [databricks_cluster_policy](../resources/cluster_policy). * `pools` - **listing** [instance pools](../resources/instance_pool.md). * `repos` - **listing** [databricks_repo](../resources/repo.md) @@ -148,6 +148,7 @@ Services are just logical groups of resources used for filtering and organizatio * `users` - [databricks_user](../resources/user.md) and [databricks_service_principal](../resources/service_principal.md) are written to their own file, simply because of their amount. If you use SCIM provisioning, migrating workspaces is the only use case for importing `users` service. * `vector-search` - **listing** exports [databricks_vector_search_endpoint](../resources/vector_search_endpoint.md) and [databricks_vector_search_index](../resources/vector_search_index.md) * `workspace` - **listing** [databricks_workspace_conf](../resources/workspace_conf.md) and [databricks_global_init_script](../resources/global_init_script.md) +* `wsfiles` - **listing** [databricks_workspace_file](../resources/workspace_file.md). ## Secrets diff --git a/exporter/command.go b/exporter/command.go index bc0a74efd2..5e40b9a039 100644 --- a/exporter/command.go +++ b/exporter/command.go @@ -41,8 +41,9 @@ func (ic *importContext) allServicesAndListing() (string, string) { listing[ir.Service] = struct{}{} } } - // We need this to specify default listings of UC objects... - for _, ir := range []string{"uc-schemas", "uc-models", "uc-tables", "uc-volumes"} { + // We need this to specify default listings of UC & Workspace objects... + for _, ir := range []string{"uc-schemas", "uc-models", "uc-tables", "uc-volumes", + "notebooks", "directories", "wsfiles"} { listing[ir] = struct{}{} } return strings.Join(maps.Keys(services), ","), strings.Join(maps.Keys(listing), ",") diff --git a/exporter/context.go b/exporter/context.go index 9fbfcb20d6..1ab4fbbb80 100644 --- a/exporter/context.go +++ b/exporter/context.go @@ -374,14 +374,30 @@ func (ic *importContext) Run() error { ic.startImportChannels() // Start listing of objects + listWorkspaceObjectsAlreadyRunning := false for rnLoop, irLoop := range ic.Importables { resourceName := rnLoop ir := irLoop + // TODO: extend this to other services? Like, Git Folders + if !ic.accountLevel && (ir.Service == "notebooks" || ir.Service == "wsfiles" || (ir.Service == "directories" && !ic.incremental)) { + if _, exists := ic.listing[ir.Service]; exists && !listWorkspaceObjectsAlreadyRunning { + ic.waitGroup.Add(1) + log.Printf("[DEBUG] Starting listing of workspace objects") + go func() { + if err := listWorkspaceObjects(ic); err != nil { + log.Printf("[ERROR] listing of workspace objects failed %s", err) + } + log.Print("[DEBUG] Finished listing of workspace objects") + ic.waitGroup.Done() + }() + listWorkspaceObjectsAlreadyRunning = true + } + continue + } if ir.List == nil { continue } - _, exists := ic.listing[ir.Service] - if !exists { + if _, exists := ic.listing[ir.Service]; !exists { log.Printf("[DEBUG] %s (%s service) is not part of listing", resourceName, ir.Service) continue } diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index 43c6c10916..3f605930c0 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -2180,7 +2180,7 @@ func TestImportingDLTPipelines(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir ic.enableListing("dlt") - ic.enableServices("dlt,access,notebooks,users,repos,secrets") + ic.enableServices("dlt,access,notebooks,users,repos,secrets,wsfiles") err := ic.Run() assert.NoError(t, err) diff --git a/exporter/importables.go b/exporter/importables.go index f6814b1872..418ddfcb6a 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -1576,7 +1576,6 @@ var resourcesMap map[string]importable = map[string]importable{ WorkspaceLevel: true, Service: "notebooks", Name: workspaceObjectResouceName, - List: listNotebooksAndWorkspaceFiles, Import: func(ic *importContext, r *resource) error { ic.emitUserOrServicePrincipalForPath(r.ID, "/Users") notebooksAPI := workspace.NewNotebooksAPI(ic.Context, ic.Client) @@ -1623,10 +1622,8 @@ var resourcesMap map[string]importable = map[string]importable{ }, "databricks_workspace_file": { WorkspaceLevel: true, - Service: "notebooks", + Service: "wsfiles", Name: workspaceObjectResouceName, - // We don't need list function for workspace files because it will be handled by the notebooks listing - // List: createListWorkspaceObjectsFunc(workspace.File, "databricks_workspace_file", "workspace_file"), Import: func(ic *importContext, r *resource) error { ic.emitUserOrServicePrincipalForPath(r.ID, "/Users") notebooksAPI := workspace.NewNotebooksAPI(ic.Context, ic.Client) @@ -2175,28 +2172,6 @@ var resourcesMap map[string]importable = map[string]importable{ } return fmt.Errorf("can't find directory with object_id: %s", r.Value) }, - // TODO: think if we really need this, we need directories only for permissions, - // and only when they are different from parents & notebooks - List: func(ic *importContext) error { - if ic.incremental { - return nil - } - directoryList := ic.getAllDirectories() - for offset, directory := range directoryList { - if strings.HasPrefix(directory.Path, "/Repos") { - continue - } - if res := ignoreIdeFolderRegex.FindStringSubmatch(directory.Path); res != nil { - continue - } - ic.maybeEmitWorkspaceObject("databricks_directory", directory.Path, &directory) - - if offset%50 == 0 { - log.Printf("[INFO] Scanned %d of %d directories", offset+1, len(directoryList)) - } - } - return nil - }, Import: func(ic *importContext, r *resource) error { ic.emitUserOrServicePrincipalForPath(r.ID, "/Users") // Existing permissions API doesn't allow to set permissions for diff --git a/exporter/importables_test.go b/exporter/importables_test.go index a28c2d30de..8e82f7c4fc 100644 --- a/exporter/importables_test.go +++ b/exporter/importables_test.go @@ -1084,7 +1084,8 @@ func TestNotebookGeneration(t *testing.T) { }, }, "notebooks", false, func(ic *importContext) { ic.notebooksFormat = "SOURCE" - err := resourcesMap["databricks_notebook"].List(ic) + ic.enableListing("notebooks") + err := listWorkspaceObjects(ic) assert.NoError(t, err) ic.waitGroup.Wait() ic.closeImportChannels() @@ -1127,7 +1128,8 @@ func TestNotebookGenerationJupyter(t *testing.T) { }, }, "notebooks", false, func(ic *importContext) { ic.notebooksFormat = "JUPYTER" - err := resourcesMap["databricks_notebook"].List(ic) + ic.enableListing("notebooks") + err := listWorkspaceObjects(ic) assert.NoError(t, err) ic.waitGroup.Wait() ic.closeImportChannels() @@ -1184,7 +1186,8 @@ func TestNotebookGenerationBadCharacters(t *testing.T) { }, "notebooks,directories", true, func(ic *importContext) { ic.notebooksFormat = "SOURCE" ic.enableServices("notebooks") - err := resourcesMap["databricks_notebook"].List(ic) + ic.enableListing("notebooks") + err := listWorkspaceObjects(ic) assert.NoError(t, err) ic.waitGroup.Wait() ic.closeImportChannels() @@ -1231,7 +1234,8 @@ func TestDirectoryGeneration(t *testing.T) { }, }, }, "directories", false, func(ic *importContext) { - err := resourcesMap["databricks_directory"].List(ic) + ic.enableListing("directories") + err := listWorkspaceObjects(ic) assert.NoError(t, err) ic.waitGroup.Wait() @@ -1521,7 +1525,7 @@ func TestEmitSqlParent(t *testing.T) { func TestEmitFilesFromSlice(t *testing.T) { ic := importContextForTest() - ic.enableServices("storage,notebooks") + ic.enableServices("storage,notebooks,wsfiles") ic.emitFilesFromSlice([]string{ "dbfs:/FileStore/test.txt", "/Workspace/Shared/test.txt", @@ -1534,7 +1538,7 @@ func TestEmitFilesFromSlice(t *testing.T) { func TestEmitFilesFromMap(t *testing.T) { ic := importContextForTest() - ic.enableServices("storage,notebooks") + ic.enableServices("storage,notebooks,wsfiles") ic.emitFilesFromMap(map[string]string{ "k1": "dbfs:/FileStore/test.txt", "k2": "/Workspace/Shared/test.txt", diff --git a/exporter/util_test.go b/exporter/util_test.go index 3c451b9568..588c831db7 100644 --- a/exporter/util_test.go +++ b/exporter/util_test.go @@ -434,8 +434,6 @@ func TestDirectoryIncrementalMode(t *testing.T) { ic := importContextForTest() ic.incremental = true - // test direct listing - assert.Nil(t, resourcesMap["databricks_directory"].List(ic)) // test emit during workspace listing assert.True(t, ic.shouldSkipWorkspaceObject(workspace.ObjectStatus{ObjectType: workspace.Directory}, 111111)) } diff --git a/exporter/util_workspace.go b/exporter/util_workspace.go index 388c2b57e1..470e590ef0 100644 --- a/exporter/util_workspace.go +++ b/exporter/util_workspace.go @@ -193,7 +193,7 @@ func (ic *importContext) shouldSkipWorkspaceObject(object workspace.ObjectStatus } if !(object.ObjectType == workspace.Notebook || object.ObjectType == workspace.File) || strings.HasPrefix(object.Path, "/Repos") { - // log.Printf("[DEBUG] Skipping unsupported entry %v", object) + log.Printf("[DEBUG] Skipping unsupported entry %v", object) return true } if res := ignoreIdeFolderRegex.FindStringSubmatch(object.Path); res != nil { @@ -236,7 +236,7 @@ func emitWorkpaceObject(ic *importContext, object workspace.ObjectStatus) { } } -func listNotebooksAndWorkspaceFiles(ic *importContext) error { +func listWorkspaceObjects(ic *importContext) error { objectsChannel := make(chan workspace.ObjectStatus, defaultChannelSize) numRoutines := 2 // TODO: make configurable? together with the channel size? var processedObjects atomic.Uint64 @@ -257,10 +257,13 @@ func listNotebooksAndWorkspaceFiles(ic *importContext) error { } // There are two use cases - this function will handle listing, or it will receive listing updatedSinceMs := ic.getUpdatedSinceMs() + isNotebooksListingEnabled := ic.isServiceInListing("notebooks") + isDirectoryListingEnabled := ic.isServiceInListing("directories") + isWsFilesListingEnabled := ic.isServiceInListing("wsfiles") allObjects := ic.getAllWorkspaceObjects(func(objects []workspace.ObjectStatus) { for _, object := range objects { if object.ObjectType == workspace.Directory { - if !ic.incremental && object.Path != "/" && ic.isServiceInListing("directories") { + if !ic.incremental && object.Path != "/" && isDirectoryListingEnabled { objectsChannel <- object } } else { @@ -269,8 +272,14 @@ func listNotebooksAndWorkspaceFiles(ic *importContext) error { } object := object switch object.ObjectType { - case workspace.Notebook, workspace.File: - objectsChannel <- object + case workspace.Notebook: + if isNotebooksListingEnabled { + objectsChannel <- object + } + case workspace.File: + if isWsFilesListingEnabled { + objectsChannel <- object + } default: log.Printf("[WARN] unknown type %s for path %s", object.ObjectType, object.Path) } @@ -285,9 +294,11 @@ func listNotebooksAndWorkspaceFiles(ic *importContext) error { if ic.shouldSkipWorkspaceObject(object, updatedSinceMs) { continue } - if object.ObjectType == workspace.Directory && !ic.incremental && ic.isServiceInListing("directories") && object.Path != "/" { + if !ic.incremental && isDirectoryListingEnabled && object.ObjectType == workspace.Directory && object.Path != "/" { + emitWorkpaceObject(ic, object) + } else if isNotebooksListingEnabled && object.ObjectType == workspace.Notebook { emitWorkpaceObject(ic, object) - } else if (object.ObjectType == workspace.Notebook || object.ObjectType == workspace.File) && ic.isServiceInListing("notebooks") { + } else if isWsFilesListingEnabled && object.ObjectType == workspace.File { emitWorkpaceObject(ic, object) } } From 39d745e56d3ee77851d819439254fe764d6ee6cc Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Sat, 19 Oct 2024 02:00:45 -0400 Subject: [PATCH 59/99] [Doc] Clarify that `graviton` option of `databricks_node_type` could be used on Azure (#4125) ## Changes When used on Azure the `databricks_node_type` will search for VMs built on Azure Cobalt CPUs. ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/data-sources/node_type.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/data-sources/node_type.md b/docs/data-sources/node_type.md index 43bb36029b..e537a05563 100644 --- a/docs/data-sources/node_type.md +++ b/docs/data-sources/node_type.md @@ -56,7 +56,7 @@ Data source allows you to pick groups by the following attributes * `GPU Accelerated` (AWS, Azure) * `photon_worker_capable` - (Optional) Pick only nodes that can run Photon workers. Defaults to _false_. * `photon_driver_capable` - (Optional) Pick only nodes that can run Photon driver. Defaults to _false_. -* `graviton` - (boolean, optional) if we should limit the search only to nodes with AWS Graviton CPUs. Default to _false_. +* `graviton` - (boolean, optional) if we should limit the search only to nodes with AWS Graviton or Azure Cobalt CPUs. Default to _false_. * `fleet` - (boolean, optional) if we should limit the search only to [AWS fleet instance types](https://docs.databricks.com/compute/aws-fleet-instances.html). Default to _false_. * `is_io_cache_enabled` - (Optional) . Pick only nodes that have IO Cache. Defaults to _false_. * `support_port_forwarding` - (Optional) Pick only nodes that support port forwarding. Defaults to _false_. From 4bebb0da6c078eabdae5b3693b35f7f141bd1da6 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Sat, 19 Oct 2024 02:19:30 -0400 Subject: [PATCH 60/99] [Exporter] Use `List` + iteration instead of call to `ListAll` (#4123) ## Changes This change significantly improve performance of export for resources with big number of objects because we're starting to export objects as soon as we get first page with list of objects. I.e., for Lakeview dashboards, the export time for ~10k dashboards went from 47 minutes down to 22 minutes. Resolves #4119 ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [x] using Go SDK --- exporter/context.go | 1 + exporter/importables.go | 218 ++++++++++++++++++++++------------------ 2 files changed, 123 insertions(+), 96 deletions(-) diff --git a/exporter/context.go b/exporter/context.go index 1ab4fbbb80..70ab462029 100644 --- a/exporter/context.go +++ b/exporter/context.go @@ -200,6 +200,7 @@ var goroutinesNumber = map[string]int{ "databricks_dbfs_file": 3, "databricks_user": 1, "databricks_service_principal": 1, + "databricks_dashboard": 4, "databricks_sql_dashboard": 3, "databricks_sql_widget": 4, "databricks_sql_visualization": 4, diff --git a/exporter/importables.go b/exporter/importables.go index 418ddfcb6a..5426845f39 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -222,11 +222,14 @@ var resourcesMap map[string]importable = map[string]importable{ return raw.(string) }, List: func(ic *importContext) error { - pools, err := ic.workspaceClient.InstancePools.ListAll(ic.Context) - if err != nil { - return err - } - for i, pool := range pools { + it := ic.workspaceClient.InstancePools.List(ic.Context) + i := 0 + for it.HasNext(ic.Context) { + pool, err := it.Next(ic.Context) + if err != nil { + return err + } + i++ if !ic.MatchesName(pool.InstancePoolName) { continue } @@ -234,7 +237,7 @@ var resourcesMap map[string]importable = map[string]importable{ Resource: "databricks_instance_pool", ID: pool.InstancePoolId, }) - log.Printf("[INFO] Imported %d of %d instance pools", i+1, len(pools)) + log.Printf("[INFO] Imported %d instance pools", i) } return nil }, @@ -756,14 +759,16 @@ var resourcesMap map[string]importable = map[string]importable{ if err != nil { return err } - policiesList, err := w.ClusterPolicies.ListAll(ic.Context, compute.ListClusterPoliciesRequest{}) - if err != nil { - return err - } - builtInClusterPolicies := ic.getBuiltinPolicyFamilies() - for offset, policy := range policiesList { - log.Printf("[TRACE] Scanning %d: %v", offset+1, policy) + it := w.ClusterPolicies.List(ic.Context, compute.ListClusterPoliciesRequest{}) + i := 0 + for it.HasNext(ic.Context) { + policy, err := it.Next(ic.Context) + if err != nil { + return err + } + i++ + log.Printf("[TRACE] Scanning %d: %v", i, policy) family, isBuiltin := builtInClusterPolicies[policy.PolicyFamilyId] if policy.PolicyFamilyId != "" && isBuiltin && family.Name == policy.Name && policy.PolicyFamilyDefinitionOverrides == "" { @@ -778,8 +783,8 @@ var resourcesMap map[string]importable = map[string]importable{ Resource: "databricks_cluster_policy", ID: policy.PolicyId, }) - if offset%10 == 0 { - log.Printf("[INFO] Scanned %d of %d cluster policies", offset+1, len(policiesList)) + if i%10 == 0 { + log.Printf("[INFO] Scanned %d cluster policies", i) } } return nil @@ -1741,11 +1746,13 @@ var resourcesMap map[string]importable = map[string]importable{ return name }, List: func(ic *importContext) error { - endpointsList, err := ic.workspaceClient.Warehouses.ListAll(ic.Context, sql.ListWarehousesRequest{}) - if err != nil { - return err - } - for i, q := range endpointsList { + it := ic.workspaceClient.Warehouses.List(ic.Context, sql.ListWarehousesRequest{}) + i := 0 + for it.HasNext(ic.Context) { + q, err := it.Next(ic.Context) + if err != nil { + return err + } if !ic.MatchesName(q.Name) { continue } @@ -1753,7 +1760,8 @@ var resourcesMap map[string]importable = map[string]importable{ Resource: "databricks_sql_endpoint", ID: q.Id, }) - log.Printf("[INFO] Imported %d of %d SQL endpoints", i+1, len(endpointsList)) + i++ + log.Printf("[INFO] Imported %d SQL endpoints", i) } return nil }, @@ -2202,11 +2210,13 @@ var resourcesMap map[string]importable = map[string]importable{ return strings.ToLower(d.Id()) + "_" + nameMd5[:8] }, List: func(ic *importContext) error { - endpointsList, err := ic.workspaceClient.ServingEndpoints.ListAll(ic.Context) - if err != nil { - return err - } - for offset, endpoint := range endpointsList { + it := ic.workspaceClient.ServingEndpoints.List(ic.Context) + i := 0 + for it.HasNext(ic.Context) { + endpoint, err := it.Next(ic.Context) + if err != nil { + return err + } if endpoint.Config != nil && endpoint.Config.ServedEntities != nil && len(endpoint.Config.ServedEntities) > 0 { if endpoint.Config.ServedEntities[0].FoundationModel != nil { log.Printf("[INFO] skipping endpoint %s that is foundation model", endpoint.Name) @@ -2217,8 +2227,9 @@ var resourcesMap map[string]importable = map[string]importable{ Resource: "databricks_model_serving", ID: endpoint.Name, }, endpoint.LastUpdatedTimestamp, fmt.Sprintf("serving endpoint '%s'", endpoint.Name)) - if offset%50 == 0 { - log.Printf("[INFO] Scanned %d of %d Serving Endpoints", offset+1, len(endpointsList)) + i++ + if i%50 == 0 { + log.Printf("[INFO] Scanned %d Serving Endpoints", i) } } return nil @@ -2522,12 +2533,12 @@ var resourcesMap map[string]importable = map[string]importable{ if ic.currentMetastore == nil { return fmt.Errorf("there is no UC metastore information") } - - catalogs, err := ic.workspaceClient.Catalogs.ListAll(ic.Context, catalog.ListCatalogsRequest{}) - if err != nil { - return err - } - for _, v := range catalogs { + it := ic.workspaceClient.Catalogs.List(ic.Context, catalog.ListCatalogsRequest{}) + for it.HasNext(ic.Context) { + v, err := it.Next(ic.Context) + if err != nil { + return err + } switch v.CatalogType { case "MANAGED_CATALOG", "FOREIGN_CATALOG", "DELTASHARING_CATALOG": { @@ -2564,12 +2575,13 @@ var resourcesMap map[string]importable = map[string]importable{ } else if cat.ShareName == "" { // TODO: We need to be careful here if we add more catalog types... Really we need to have CatalogType in resource if ic.isServiceInListing("uc-schemas") { - schemas, err := ic.workspaceClient.Schemas.ListAll(ic.Context, catalog.ListSchemasRequest{CatalogName: r.ID}) - if err != nil { - return err - } ignoredSchemas := []string{"information_schema"} - for _, schema := range schemas { + it := ic.workspaceClient.Schemas.List(ic.Context, catalog.ListSchemasRequest{CatalogName: r.ID}) + for it.HasNext(ic.Context) { + schema, err := it.Next(ic.Context) + if err != nil { + return err + } if schema.CatalogType != "MANAGED_CATALOG" || slices.Contains(ignoredSchemas, schema.Name) { continue } @@ -2622,15 +2634,16 @@ var resourcesMap map[string]importable = map[string]importable{ // TODO: somehow add depends on catalog's grant... // TODO: emit owner? See comment in catalog resource if ic.isServiceInListing("uc-models") { - models, err := ic.workspaceClient.RegisteredModels.ListAll(ic.Context, + it := ic.workspaceClient.RegisteredModels.List(ic.Context, catalog.ListRegisteredModelsRequest{ CatalogName: catalogName, SchemaName: schemaName, }) - if err != nil { // TODO: should we continue? - return err - } - for _, model := range models { + for it.HasNext(ic.Context) { + model, err := it.Next(ic.Context) + if err != nil { + return err // TODO: should we continue? + } ic.EmitIfUpdatedAfterMillis(&resource{ Resource: "databricks_registered_model", ID: model.FullName, @@ -2640,15 +2653,16 @@ var resourcesMap map[string]importable = map[string]importable{ } if ic.isServiceInListing("uc-volumes") { // list volumes - volumes, err := ic.workspaceClient.Volumes.ListAll(ic.Context, + it := ic.workspaceClient.Volumes.List(ic.Context, catalog.ListVolumesRequest{ CatalogName: catalogName, SchemaName: schemaName, }) - if err != nil { - return err - } - for _, volume := range volumes { + for it.HasNext(ic.Context) { + volume, err := it.Next(ic.Context) + if err != nil { + return err // TODO: should we continue? + } ic.EmitIfUpdatedAfterMillis(&resource{ Resource: "databricks_volume", ID: volume.FullName, @@ -2658,14 +2672,15 @@ var resourcesMap map[string]importable = map[string]importable{ } if ic.isServiceInListing("uc-tables") { // list tables - tables, err := ic.workspaceClient.Tables.ListAll(ic.Context, catalog.ListTablesRequest{ + it := ic.workspaceClient.Tables.List(ic.Context, catalog.ListTablesRequest{ CatalogName: catalogName, SchemaName: schemaName, }) - if err != nil { - return err - } - for _, table := range tables { + for it.HasNext(ic.Context) { + table, err := it.Next(ic.Context) + if err != nil { + return err // TODO: should we continue? + } switch table.TableType { case "MANAGED", "EXTERNAL", "VIEW": ic.EmitIfUpdatedAfterMillis(&resource{ @@ -2848,11 +2863,12 @@ var resourcesMap map[string]importable = map[string]importable{ return nil }, List: func(ic *importContext) error { - objList, err := ic.workspaceClient.StorageCredentials.ListAll(ic.Context, catalog.ListStorageCredentialsRequest{}) - if err != nil { - return err - } - for _, v := range objList { + it := ic.workspaceClient.StorageCredentials.List(ic.Context, catalog.ListStorageCredentialsRequest{}) + for it.HasNext(ic.Context) { + v, err := it.Next(ic.Context) + if err != nil { + return err + } ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ Resource: "databricks_storage_credential", ID: v.Name, @@ -2890,11 +2906,12 @@ var resourcesMap map[string]importable = map[string]importable{ return nil }, List: func(ic *importContext) error { - objList, err := ic.workspaceClient.ExternalLocations.ListAll(ic.Context, catalog.ListExternalLocationsRequest{}) - if err != nil { - return err - } - for _, v := range objList { + it := ic.workspaceClient.ExternalLocations.List(ic.Context, catalog.ListExternalLocationsRequest{}) + for it.HasNext(ic.Context) { + v, err := it.Next(ic.Context) + if err != nil { + return err + } if v.Name != "metastore_default_location" { ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ Resource: "databricks_external_location", @@ -2930,11 +2947,12 @@ var resourcesMap map[string]importable = map[string]importable{ return connectionType + "_" + connectionName }, List: func(ic *importContext) error { - connections, err := ic.workspaceClient.Connections.ListAll(ic.Context, catalog.ListConnectionsRequest{}) - if err != nil { - return err - } - for _, conn := range connections { + it := ic.workspaceClient.Connections.List(ic.Context, catalog.ListConnectionsRequest{}) + for it.HasNext(ic.Context) { + conn, err := it.Next(ic.Context) + if err != nil { + return err + } ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ Resource: "databricks_connection", ID: conn.MetastoreId + "|" + conn.Name, @@ -2955,11 +2973,12 @@ var resourcesMap map[string]importable = map[string]importable{ WorkspaceLevel: true, Service: "uc-shares", List: func(ic *importContext) error { - shares, err := ic.workspaceClient.Shares.ListAll(ic.Context, sharing.ListSharesRequest{}) - if err != nil { - return err - } - for _, share := range shares { + it := ic.workspaceClient.Shares.List(ic.Context, sharing.ListSharesRequest{}) + for it.HasNext(ic.Context) { + share, err := it.Next(ic.Context) + if err != nil { + return err + } ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ Resource: "databricks_share", ID: share.Name, @@ -3011,11 +3030,12 @@ var resourcesMap map[string]importable = map[string]importable{ WorkspaceLevel: true, Service: "uc-shares", List: func(ic *importContext) error { - recipients, err := ic.workspaceClient.Recipients.ListAll(ic.Context, sharing.ListRecipientsRequest{}) - if err != nil { - return err - } - for _, rec := range recipients { + it := ic.workspaceClient.Recipients.List(ic.Context, sharing.ListRecipientsRequest{}) + for it.HasNext(ic.Context) { + rec, err := it.Next(ic.Context) + if err != nil { + return err + } ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ Resource: "databricks_recipient", ID: rec.Name, @@ -3086,11 +3106,12 @@ var resourcesMap map[string]importable = map[string]importable{ return name }, List: func(ic *importContext) error { - metastores, err := ic.accountClient.Metastores.ListAll(ic.Context) - if err != nil { - return err - } - for _, mstore := range metastores { + it := ic.accountClient.Metastores.List(ic.Context) + for it.HasNext(ic.Context) { + mstore, err := it.Next(ic.Context) + if err != nil { + return err + } ic.EmitIfUpdatedAfterMillisAndNameMatches(&resource{ Resource: "databricks_metastore", ID: mstore.MetastoreId, @@ -3285,23 +3306,27 @@ var resourcesMap map[string]importable = map[string]importable{ WorkspaceLevel: true, Service: "dashboards", List: func(ic *importContext) error { - dashboards, err := ic.workspaceClient.Lakeview.ListAll(ic.Context, dashboards.ListDashboardsRequest{PageSize: 100}) - if err != nil { - return err - } - for i, d := range dashboards { + it := ic.workspaceClient.Lakeview.List(ic.Context, dashboards.ListDashboardsRequest{PageSize: 100}) + i := 0 + for it.HasNext(ic.Context) { + d, err := it.Next(ic.Context) + if err != nil { + return err + } + i++ if !ic.MatchesName(d.DisplayName) { continue } - // TODO: add emit for incremental mode. Use already defined functions for emitting? + // TODO: add emit for incremental mode. But this information isn't included into the List response ic.Emit(&resource{ Resource: "databricks_dashboard", ID: d.DashboardId, }) if i%100 == 0 { - log.Printf("[INFO] Processed %d dashboard out of %d", i+1, len(dashboards)) + log.Printf("[INFO] Processed %d dashboards", i) } } + log.Printf("[INFO] Listed %d dashboards", i) return nil }, Name: func(ic *importContext, d *schema.ResourceData) string { @@ -3391,11 +3416,12 @@ var resourcesMap map[string]importable = map[string]importable{ if !ic.meAdmin { return fmt.Errorf("notifications can be imported only by admin") } - notifications, err := ic.workspaceClient.NotificationDestinations.ListAll(ic.Context, settings.ListNotificationDestinationsRequest{}) - if err != nil { - return err - } - for _, n := range notifications { + it := ic.workspaceClient.NotificationDestinations.List(ic.Context, settings.ListNotificationDestinationsRequest{}) + for it.HasNext(ic.Context) { + n, err := it.Next(ic.Context) + if err != nil { + return err + } ic.Emit(&resource{ Resource: "databricks_notification_destination", ID: n.Id, From 6088aa8635031b740b92ab7b30531c136193cf30 Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Sun, 20 Oct 2024 16:51:16 +0800 Subject: [PATCH 61/99] [Feature] Added resource `databricks_custom_app_integration` (#4124) ## Changes Add resource `databricks_custom_app_integration` for OAuth custom app integration ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- apps/resource_custom_app_integration.go | 80 ++++++++ apps/resource_custom_app_integration_test.go | 175 ++++++++++++++++++ docs/resources/custom_app_integration.md | 58 ++++++ .../acceptance/custom_app_integration_test.go | 34 ++++ internal/providers/sdkv2/sdkv2.go | 2 + 5 files changed, 349 insertions(+) create mode 100644 apps/resource_custom_app_integration.go create mode 100644 apps/resource_custom_app_integration_test.go create mode 100644 docs/resources/custom_app_integration.md create mode 100644 internal/acceptance/custom_app_integration_test.go diff --git a/apps/resource_custom_app_integration.go b/apps/resource_custom_app_integration.go new file mode 100644 index 0000000000..30a604abb1 --- /dev/null +++ b/apps/resource_custom_app_integration.go @@ -0,0 +1,80 @@ +package apps + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/service/oauth2" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +type CustomAppIntegration struct { + oauth2.GetCustomAppIntegrationOutput + // OAuth client-secret generated by the Databricks. If this is a + // confidential OAuth app client-secret will be generated. + ClientSecret string `json:"client_secret,omitempty"` +} + +func ResourceCustomAppIntegration() common.Resource { + s := common.StructToSchema(CustomAppIntegration{}, func(m map[string]*schema.Schema) map[string]*schema.Schema { + for _, p := range []string{"client_id", "create_time", "created_by", "creator_username", "integration_id"} { + common.CustomizeSchemaPath(m, p).SetComputed() + } + for _, p := range []string{"confidential", "name", "scopes"} { + common.CustomizeSchemaPath(m, p).SetForceNew() + } + common.CustomizeSchemaPath(m, "client_secret").SetSensitive().SetComputed() + common.CustomizeSchemaPath(m, "token_access_policy", "access_token_ttl_in_minutes").SetValidateFunc(validation.IntBetween(5, 1440)) + common.CustomizeSchemaPath(m, "token_access_policy", "refresh_token_ttl_in_minutes").SetValidateFunc(validation.IntBetween(5, 129600)) + return m + }) + return common.Resource{ + Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + var create oauth2.CreateCustomAppIntegration + common.DataToStructPointer(d, s, &create) + acc, err := c.AccountClient() + if err != nil { + return err + } + integration, err := acc.CustomAppIntegration.Create(ctx, create) + if err != nil { + return err + } + d.Set("integration_id", integration.IntegrationId) + d.Set("client_id", integration.ClientId) + d.Set("client_secret", integration.ClientSecret) + d.SetId(integration.IntegrationId) + return nil + }, + Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + acc, err := c.AccountClient() + if err != nil { + return err + } + integration, err := acc.CustomAppIntegration.GetByIntegrationId(ctx, d.Id()) + if err != nil { + return err + } + return common.StructToData(integration, s, d) + }, + Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + var update oauth2.UpdateCustomAppIntegration + update.IntegrationId = d.Id() + common.DataToStructPointer(d, s, &update) + acc, err := c.AccountClient() + if err != nil { + return err + } + return acc.CustomAppIntegration.Update(ctx, update) + }, + Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + acc, err := c.AccountClient() + if err != nil { + return err + } + return acc.CustomAppIntegration.DeleteByIntegrationId(ctx, d.Id()) + }, + Schema: s, + } +} diff --git a/apps/resource_custom_app_integration_test.go b/apps/resource_custom_app_integration_test.go new file mode 100644 index 0000000000..37034fad10 --- /dev/null +++ b/apps/resource_custom_app_integration_test.go @@ -0,0 +1,175 @@ +package apps + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/oauth2" + "github.com/stretchr/testify/mock" + + "github.com/databricks/terraform-provider-databricks/qa" +) + +func TestResourceCustomAppIntegrationCreate(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + api := a.GetMockCustomAppIntegrationAPI().EXPECT() + api.Create(mock.Anything, oauth2.CreateCustomAppIntegration{ + Name: "custom_integration_name", + RedirectUrls: []string{ + "https://example.com", + }, + Scopes: []string{ + "all", + }, + TokenAccessPolicy: &oauth2.TokenAccessPolicy{ + AccessTokenTtlInMinutes: 60, + RefreshTokenTtlInMinutes: 30, + }, + }).Return(&oauth2.CreateCustomAppIntegrationOutput{ + ClientId: "client_id", + ClientSecret: "client_secret", + IntegrationId: "integration_id", + }, nil) + api.GetByIntegrationId(mock.Anything, "integration_id").Return( + &oauth2.GetCustomAppIntegrationOutput{ + Name: "custom_integration_name", + RedirectUrls: []string{ + "https://example.com", + }, + Scopes: []string{ + "all", + }, + TokenAccessPolicy: &oauth2.TokenAccessPolicy{ + AccessTokenTtlInMinutes: 60, + RefreshTokenTtlInMinutes: 30, + }, + ClientId: "client_id", + IntegrationId: "integration_id", + }, nil, + ) + }, + Create: true, + AccountID: "account_id", + HCL: ` + name = "custom_integration_name" + redirect_urls = ["https://example.com"] + scopes = ["all"] + token_access_policy { + access_token_ttl_in_minutes = 60 + refresh_token_ttl_in_minutes = 30 + }`, + Resource: ResourceCustomAppIntegration(), + }.ApplyAndExpectData(t, map[string]any{ + "name": "custom_integration_name", + "integration_id": "integration_id", + "client_id": "client_id", + "client_secret": "client_secret", + }) +} + +func TestResourceCustomAppIntegrationRead(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + a.GetMockCustomAppIntegrationAPI().EXPECT().GetByIntegrationId(mock.Anything, "integration_id").Return( + &oauth2.GetCustomAppIntegrationOutput{ + Name: "custom_integration_name", + RedirectUrls: []string{ + "https://example.com", + }, + Scopes: []string{ + "all", + }, + TokenAccessPolicy: &oauth2.TokenAccessPolicy{ + AccessTokenTtlInMinutes: 60, + RefreshTokenTtlInMinutes: 30, + }, + ClientId: "client_id", + IntegrationId: "integration_id", + }, nil, + ) + }, + Resource: ResourceCustomAppIntegration(), + Read: true, + New: true, + AccountID: "account_id", + ID: "integration_id", + }.ApplyAndExpectData(t, map[string]any{ + "name": "custom_integration_name", + "integration_id": "integration_id", + "client_id": "client_id", + }) +} + +func TestResourceCustomAppIntegrationUpdate(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + api := a.GetMockCustomAppIntegrationAPI().EXPECT() + api.Update(mock.Anything, oauth2.UpdateCustomAppIntegration{ + IntegrationId: "integration_id", + RedirectUrls: []string{ + "https://example.com", + }, + TokenAccessPolicy: &oauth2.TokenAccessPolicy{ + AccessTokenTtlInMinutes: 30, + RefreshTokenTtlInMinutes: 30, + }, + }).Return(nil) + api.GetByIntegrationId(mock.Anything, "integration_id").Return( + &oauth2.GetCustomAppIntegrationOutput{ + Name: "custom_integration_name", + RedirectUrls: []string{ + "https://example.com", + }, + Scopes: []string{ + "all", + }, + TokenAccessPolicy: &oauth2.TokenAccessPolicy{ + AccessTokenTtlInMinutes: 30, + RefreshTokenTtlInMinutes: 30, + }, + ClientId: "client_id", + IntegrationId: "integration_id", + }, nil, + ) + }, + Resource: ResourceCustomAppIntegration(), + Update: true, + HCL: ` + name = "custom_integration_name" + redirect_urls = ["https://example.com"] + scopes = ["all"] + token_access_policy { + access_token_ttl_in_minutes = 30 + refresh_token_ttl_in_minutes = 30 + }`, + InstanceState: map[string]string{ + "name": "custom_integration_name", + "integration_id": "integration_id", + "client_id": "client_id", + "scopes.#": "1", + "scopes.0": "all", + "redirect_urls.#": "1", + "redirect_urls.0": "https://example.com", + "token_access_policy.access_token_ttl_in_minutes": "30", + "token_access_policy.refresh_token_ttl_in_minutes": "30", + }, + AccountID: "account_id", + ID: "integration_id", + }.ApplyAndExpectData(t, map[string]any{ + "name": "custom_integration_name", + "token_access_policy.0.access_token_ttl_in_minutes": 30, + }) +} + +func TestResourceCustomAppIntegrationDelete(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + a.GetMockCustomAppIntegrationAPI().EXPECT().DeleteByIntegrationId(mock.Anything, "integration_id").Return(nil) + }, + Resource: ResourceCustomAppIntegration(), + AccountID: "account_id", + Delete: true, + ID: "integration_id", + }.ApplyAndExpectData(t, nil) +} diff --git a/docs/resources/custom_app_integration.md b/docs/resources/custom_app_integration.md new file mode 100644 index 0000000000..ffd2b79eb9 --- /dev/null +++ b/docs/resources/custom_app_integration.md @@ -0,0 +1,58 @@ +--- +subcategory: "Apps" +--- +# databricks_custom_app_integration Resource + +-> Initialize provider with `alias = "account"`, and `host` pointing to the account URL, like, `host = "https://accounts.cloud.databricks.com"`. Use `provider = databricks.account` for all account-level resources. + +This resource allows you to enable [custom OAuth applications](https://docs.databricks.com/en/integrations/enable-disable-oauth.html#enable-custom-oauth-applications-using-the-databricks-ui). + +## Example Usage + +```hcl +resource "databricks_custom_app_integration" "this" { + name = "custom_integration_name" + redirect_urls = ["https://example.com"] + scopes = ["all-apis"] + token_access_policy { + access_token_ttl_in_minutes = %s + refresh_token_ttl_in_minutes = 30 + } +} +``` + +## Argument Reference + +The following arguments are available: + +* `name` - (Required) Name of the custom OAuth app. Change requires a new resource. +* `confidential` - Indicates whether an OAuth client secret is required to authenticate this client. Default to `false`. Change requires a new resource. +* `redirect_urls` - List of OAuth redirect urls. +* `scopes` - OAuth scopes granted to the application. Supported scopes: `all-apis`, `sql`, `offline_access`, `openid`, `profile`, `email`. + +### token_access_policy Configuration Block (Optional) + +* `access_token_ttl_in_minutes` - access token time to live (TTL) in minutes. +* `refresh_token_ttl_in_minutes` - refresh token TTL in minutes. The TTL of refresh token cannot be lower than TTL of access token. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `integration_id` - Unique integration id for the custom OAuth app. +* `client_id` - OAuth client-id generated by Databricks +* `client_secret` - OAuth client-secret generated by the Databricks if this is a confidential OAuth app. + +## Import + +This resource can be imported by its integration ID. + +```sh +terraform import databricks_custom_app_integration.this '' +``` + +## Related Resources + +The following resources are used in the context: + +* [databricks_mws_workspaces](mws_workspaces.md) to set up Databricks workspaces. diff --git a/internal/acceptance/custom_app_integration_test.go b/internal/acceptance/custom_app_integration_test.go new file mode 100644 index 0000000000..280e5c039b --- /dev/null +++ b/internal/acceptance/custom_app_integration_test.go @@ -0,0 +1,34 @@ +package acceptance + +import ( + "fmt" + "testing" +) + +var ( + customAppIntegrationTemplate = `resource "databricks_custom_app_integration" "this" { + name = "custom_integration_name" + redirect_urls = ["https://example.com"] + scopes = ["all-apis"] + token_access_policy { + access_token_ttl_in_minutes = %s + refresh_token_ttl_in_minutes = 30 + } + }` +) + +func TestMwsAccCustomAppIntegrationCreate(t *testing.T) { + loadAccountEnv(t) + AccountLevel(t, Step{ + Template: fmt.Sprintf(customAppIntegrationTemplate, "30"), + }) +} + +func TestMwsAccCustomAppIntegrationUpdate(t *testing.T) { + loadAccountEnv(t) + AccountLevel(t, Step{ + Template: fmt.Sprintf(customAppIntegrationTemplate, "30"), + }, Step{ + Template: fmt.Sprintf(customAppIntegrationTemplate, "15"), + }) +} diff --git a/internal/providers/sdkv2/sdkv2.go b/internal/providers/sdkv2/sdkv2.go index 7c90851314..8136901ddf 100644 --- a/internal/providers/sdkv2/sdkv2.go +++ b/internal/providers/sdkv2/sdkv2.go @@ -23,6 +23,7 @@ import ( "github.com/databricks/databricks-sdk-go/useragent" "github.com/databricks/terraform-provider-databricks/access" + "github.com/databricks/terraform-provider-databricks/apps" "github.com/databricks/terraform-provider-databricks/aws" "github.com/databricks/terraform-provider-databricks/catalog" "github.com/databricks/terraform-provider-databricks/clusters" @@ -137,6 +138,7 @@ func DatabricksProvider() *schema.Provider { "databricks_budget": finops.ResourceBudget().ToResource(), "databricks_catalog": catalog.ResourceCatalog().ToResource(), "databricks_catalog_workspace_binding": catalog.ResourceCatalogWorkspaceBinding().ToResource(), + "databricks_custom_app_integration": apps.ResourceCustomAppIntegration().ToResource(), "databricks_connection": catalog.ResourceConnection().ToResource(), "databricks_cluster": clusters.ResourceCluster().ToResource(), "databricks_cluster_policy": policies.ResourceClusterPolicy().ToResource(), From 6f70c7329337dd15ad5fb3e7133b50865516bf5c Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Mon, 21 Oct 2024 09:18:23 -0400 Subject: [PATCH 62/99] [Doc] Fix argument in example for `databricks_custom_app_integration` (#4132) ## Changes ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/resources/custom_app_integration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/resources/custom_app_integration.md b/docs/resources/custom_app_integration.md index ffd2b79eb9..01b3c99ff4 100644 --- a/docs/resources/custom_app_integration.md +++ b/docs/resources/custom_app_integration.md @@ -15,7 +15,7 @@ resource "databricks_custom_app_integration" "this" { redirect_urls = ["https://example.com"] scopes = ["all-apis"] token_access_policy { - access_token_ttl_in_minutes = %s + access_token_ttl_in_minutes = 15 refresh_token_ttl_in_minutes = 30 } } From fa3c3de29da5807a3d28eac26f7e557a8638d2b3 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Mon, 21 Oct 2024 09:18:38 -0400 Subject: [PATCH 63/99] [Doc] Fix for UC on AWS guide - use `databricks_aws_unity_catalog_assume_role_policy` where necessary (#4109) ## Changes Resolves #3964 ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/guides/unity-catalog.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/unity-catalog.md b/docs/guides/unity-catalog.md index 52c187ff7c..61b1bff7d4 100644 --- a/docs/guides/unity-catalog.md +++ b/docs/guides/unity-catalog.md @@ -262,7 +262,7 @@ resource "aws_iam_policy" "external_data_access" { resource "aws_iam_role" "external_data_access" { name = local.uc_iam_role - assume_role_policy = data.aws_iam_policy_document.this.json + assume_role_policy = data.databricks_aws_unity_catalog_assume_role_policy.this.json managed_policy_arns = [aws_iam_policy.external_data_access.arn] tags = merge(var.tags, { Name = "${local.prefix}-unity-catalog external access IAM role" From df26499c7be5b9b57e5bbb831a6fc62b123db67a Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 23 Oct 2024 06:48:42 -0400 Subject: [PATCH 64/99] [Feature] Handle `schema` attribute in `databricks_pipeline` (#4137) ## Changes The new `schema` attribute was added to support direct publishing mode. Besides documentation we were need to add TF schema customization as it conflicts with the `target` attribute. ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [ ] using Go SDK --- docs/resources/job.md | 1 - docs/resources/pipeline.md | 3 ++- pipelines/resource_pipeline.go | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/resources/job.md b/docs/resources/job.md index efc6bd8ca7..32759fa931 100644 --- a/docs/resources/job.md +++ b/docs/resources/job.md @@ -372,7 +372,6 @@ This block describes the queue settings of the job: * `periodic` - (Optional) configuration block to define a trigger for Periodic Triggers consisting of the following attributes: * `interval` - (Required) Specifies the interval at which the job should run. This value is required. * `unit` - (Required) Options are {"DAYS", "HOURS", "WEEKS"}. - * `file_arrival` - (Optional) configuration block to define a trigger for [File Arrival events](https://learn.microsoft.com/en-us/azure/databricks/workflows/jobs/file-arrival-triggers) consisting of following attributes: * `url` - (Required) URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (`/`). * `min_time_between_triggers_seconds` - (Optional) If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds. diff --git a/docs/resources/pipeline.md b/docs/resources/pipeline.md index 76a60d75db..28ea211616 100644 --- a/docs/resources/pipeline.md +++ b/docs/resources/pipeline.md @@ -80,7 +80,8 @@ The following arguments are supported: * `photon` - A flag indicating whether to use Photon engine. The default value is `false`. * `serverless` - An optional flag indicating if serverless compute should be used for this DLT pipeline. Requires `catalog` to be set, as it could be used only with Unity Catalog. * `catalog` - The name of catalog in Unity Catalog. *Change of this parameter forces recreation of the pipeline.* (Conflicts with `storage`). -* `target` - The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI. +* `target` - (Optional, String, Conflicts with `schema`) The name of a database (in either the Hive metastore or in a UC catalog) for persisting pipeline output data. Configuring the target setting allows you to view and query the pipeline output data from the Databricks UI. +* `schema` - (Optional, String, Conflicts with `target`) The default schema (database) where tables are read from or published to. The presence of this attribute implies that the pipeline is in direct publishing mode. * `edition` - optional name of the [product edition](https://docs.databricks.com/data-engineering/delta-live-tables/delta-live-tables-concepts.html#editions). Supported values are: `CORE`, `PRO`, `ADVANCED` (default). Not required when `serverless` is set to `true`. * `channel` - optional name of the release channel for Spark version used by DLT pipeline. Supported values are: `CURRENT` (default) and `PREVIEW`. * `budget_policy_id` - optional string specifying ID of the budget policy for this DLT pipeline. diff --git a/pipelines/resource_pipeline.go b/pipelines/resource_pipeline.go index d187e43336..ac18eef8ff 100644 --- a/pipelines/resource_pipeline.go +++ b/pipelines/resource_pipeline.go @@ -246,6 +246,8 @@ func (Pipeline) CustomizeSchema(s *common.CustomizableSchema) *common.Customizab s.SchemaPath("storage").SetConflictsWith([]string{"catalog"}) s.SchemaPath("catalog").SetConflictsWith([]string{"storage"}) s.SchemaPath("ingestion_definition", "connection_name").SetConflictsWith([]string{"ingestion_definition.0.ingestion_gateway_id"}) + s.SchemaPath("target").SetConflictsWith([]string{"schema"}) + s.SchemaPath("schema").SetConflictsWith([]string{"target"}) // MinItems fields s.SchemaPath("library").SetMinItems(1) From 03659b6c4e5b82e9e4e701473827fba432c86fc4 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 23 Oct 2024 12:46:05 -0400 Subject: [PATCH 65/99] [Feature] Add `databricks_query` resource instead of `databricks_sql_query` (#4103) ## Changes This PR is built on top of #4051 which should be merged first. The new resource uses the new [Queries API](https://docs.databricks.com/api/workspace/queries/create) instead of the legacy one that will be deprecated. Since the new resource has a slightly different set of parameters, it was decided to create a new resource and deprecate the old one. This resource uses old TF SDK to be compatible with TF exporter (until #4050 is implemented). TODOs: - Need to discuss how to handle permissions - `sql_query` permissions look like working, but not sure if we should continue to use that API - Support in the exporter will be in a separate PR ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- docs/resources/alert.md | 38 +++-- docs/resources/job.md | 4 +- docs/resources/query.md | 195 ++++++++++++++++++++++++ internal/acceptance/alert_test.go | 20 +-- internal/acceptance/permissions_test.go | 32 +++- internal/acceptance/query_test.go | 32 ++++ internal/acceptance/sql_query_test.go | 2 +- internal/providers/sdkv2/sdkv2.go | 1 + sql/resource_alert.go | 2 +- sql/resource_alert_test.go | 5 + sql/resource_query.go | 169 ++++++++++++++++++++ sql/resource_query_test.go | 153 +++++++++++++++++++ sql/resource_sql_query.go | 3 +- sql/resource_sql_query_test.go | 24 +-- 14 files changed, 637 insertions(+), 43 deletions(-) create mode 100644 docs/resources/query.md create mode 100644 internal/acceptance/query_test.go create mode 100644 sql/resource_query.go create mode 100644 sql/resource_query_test.go diff --git a/docs/resources/alert.md b/docs/resources/alert.md index f15bdaf116..478090892b 100644 --- a/docs/resources/alert.md +++ b/docs/resources/alert.md @@ -13,15 +13,15 @@ resource "databricks_directory" "shared_dir" { } # This will be replaced with new databricks_query resource -resource "databricks_sql_query" "this" { - data_source_id = databricks_sql_endpoint.example.data_source_id - name = "My Query Name" - query = "SELECT 42 as value" - parent = "folders/${databricks_directory.shared_dir.object_id}" +resource "databricks_query" "this" { + warehouse_id = databricks_sql_endpoint.example.id + display_name = "My Query Name" + query_text = "SELECT 42 as value" + parent_path = databricks_directory.shared_dir.path } resource "databricks_alert" "alert" { - query_id = databricks_sql_query.this.id + query_id = databricks_query.this.id display_name = "TF new alert" parent_path = databricks_directory.shared_dir.path condition { @@ -77,7 +77,11 @@ In addition to all the arguments above, the following attributes are exported: ## Migrating from `databricks_sql_alert` resource -Under the hood, the new resource uses the same data as the `databricks_sql_alert`, but is exposed via a different API. This means that we can migrate existing alerts without recreating them. This operation is done in few steps: +Under the hood, the new resource uses the same data as the `databricks_sql_alert`, but is exposed via a different API. This means that we can migrate existing alerts without recreating them. + +-> It's also recommended to migrate to the `databricks_query` resource - see [databricks_query](query.md) for more details. + +This operation is done in few steps: * Record the ID of existing `databricks_sql_alert`, for example, by executing the `terraform state show databricks_sql_alert.alert` command. * Create the code for the new implementation by performing the following changes: @@ -109,7 +113,7 @@ we'll have a new resource defined as: ```hcl resource "databricks_alert" "alert" { - query_id = databricks_sql_query.this.id + query_id = databricks_query.this.id display_name = "My Alert" parent_path = databricks_directory.shared_dir.path condition { @@ -179,6 +183,20 @@ resource "databricks_permissions" "alert_usage" { } ``` +## Access Control + +[databricks_permissions](permissions.md#sql-alert-usage) can control which groups or individual users can *Manage*, *Edit*, *Run* or *View* individual alerts. + +```hcl +resource "databricks_permissions" "alert_usage" { + sql_alert_id = databricks_alert.alert.id + access_control { + group_name = "users" + permission_level = "CAN_RUN" + } +} +``` + ## Import This resource can be imported using alert ID: @@ -191,6 +209,6 @@ terraform import databricks_alert.this The following resources are often used in the same context: -* [databricks_sql_query](sql_query.md) to manage Databricks SQL [Queries](https://docs.databricks.com/sql/user/queries/index.html). -* [databricks_sql_endpoint](sql_endpoint.md) to manage Databricks SQL [Endpoints](https://docs.databricks.com/sql/admin/sql-endpoints.html). +* [databricks_query](query.md) to manage [Databricks SQL Queries](https://docs.databricks.com/sql/user/queries/index.html). +* [databricks_sql_endpoint](sql_endpoint.md) to manage [Databricks SQL Endpoints](https://docs.databricks.com/sql/admin/sql-endpoints.html). * [databricks_directory](directory.md) to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). diff --git a/docs/resources/job.md b/docs/resources/job.md index 32759fa931..e239066a44 100644 --- a/docs/resources/job.md +++ b/docs/resources/job.md @@ -224,14 +224,14 @@ One of the `query`, `dashboard` or `alert` needs to be provided. * `warehouse_id` - (Required) ID of the (the [databricks_sql_endpoint](sql_endpoint.md)) that will be used to execute the task. Only Serverless & Pro warehouses are supported right now. * `parameters` - (Optional) (Map) parameters to be used for each run of this task. The SQL alert task does not support custom parameters. -* `query` - (Optional) block consisting of single string field: `query_id` - identifier of the Databricks SQL Query ([databricks_sql_query](sql_query.md)). +* `query` - (Optional) block consisting of single string field: `query_id` - identifier of the Databricks Query ([databricks_query](query.md)). * `dashboard` - (Optional) block consisting of following fields: * `dashboard_id` - (Required) (String) identifier of the Databricks SQL Dashboard [databricks_sql_dashboard](sql_dashboard.md). * `subscriptions` - (Optional) a list of subscription blocks consisting out of one of the required fields: `user_name` for user emails or `destination_id` - for Alert destination's identifier. * `custom_subject` - (Optional) string specifying a custom subject of email sent. * `pause_subscriptions` - (Optional) flag that specifies if subscriptions are paused or not. * `alert` - (Optional) block consisting of following fields: - * `alert_id` - (Required) (String) identifier of the Databricks SQL Alert. + * `alert_id` - (Required) (String) identifier of the Databricks Alert ([databricks_alert](alert.md)). * `subscriptions` - (Optional) a list of subscription blocks consisting out of one of the required fields: `user_name` for user emails or `destination_id` - for Alert destination's identifier. * `pause_subscriptions` - (Optional) flag that specifies if subscriptions are paused or not. * `file` - (Optional) block consisting of single string fields: diff --git a/docs/resources/query.md b/docs/resources/query.md new file mode 100644 index 0000000000..cc8bc90edd --- /dev/null +++ b/docs/resources/query.md @@ -0,0 +1,195 @@ +--- +subcategory: "Databricks SQL" +--- +# databricks_query Resource + +This resource allows you to manage [Databricks SQL Queries](https://docs.databricks.com/en/sql/user/queries/index.html). It supersedes [databricks_sql_query](sql_query.md) resource - see migration guide below for more details. + +## Example Usage + +```hcl +resource "databricks_directory" "shared_dir" { + path = "/Shared/Queries" +} + +# This will be replaced with new databricks_query resource +resource "databricks_query" "this" { + warehouse_id = databricks_sql_endpoint.example.id + display_name = "My Query Name" + query_text = "SELECT 42 as value" + parent_path = databricks_directory.shared_dir.path +} +``` + +## Argument Reference + +The following arguments are available: + +* `query_text` - (Required, String) Text of SQL query. +* `display_name` - (Required, String) Name of the query. +* `warehouse_id` - (Required, String) ID of a SQL warehouse which will be used to execute this query. +* `parent_path` - (Optional, String) The path to a workspace folder containing the query. The default is the user's home folder. If changed, the query will be recreated. +* `owner_user_name` - (Optional, String) Query owner's username. +* `apply_auto_limit` - (Optional, Boolean) Whether to apply a 1000 row limit to the query result. +* `catalog` - (Optional, String) Name of the catalog where this query will be executed. +* `schema` - (Optional, String) Name of the schema where this query will be executed. +* `description` - (Optional, String) General description that conveys additional information about this query such as usage notes. +* `run_as_mode` - (Optional, String) Sets the "Run as" role for the object. +* `tags` - (Optional, List of strings) Tags that will be added to the query. +* `parameter` - (Optional, Block) Query parameter definition. Consists of following attributes (one of `*_value` is required): + * `name` - (Required, String) Literal parameter marker that appears between double curly braces in the query text. + * `title` - (Optional, String) Text displayed in the user-facing parameter widget in the UI. + * `text_value` - (Block) Text parameter value. Consists of following attributes: + * `value` - (Required, String) - actual text value. + * `numeric_value` - (Block) Numeric parameter value. Consists of following attributes: + * `value` - (Required, Double) - actual numeric value. + * `date_value` - (Block) Date query parameter value. Consists of following attributes (Can only specify one of `dynamic_date_value` or `date_value`): + * `date_value` - (String) Manually specified date-time value + * `dynamic_date_value` - (String) Dynamic date-time value based on current date-time. Possible values are `NOW`, `YESTERDAY`. + * `precision` - (Optional, String) Date-time precision to format the value into when the query is run. Possible values are `DAY_PRECISION`, `MINUTE_PRECISION`, `SECOND_PRECISION`. Defaults to `DAY_PRECISION` (`YYYY-MM-DD`). + * `date_range_value` - (Block) Date-range query parameter value. Consists of following attributes (Can only specify one of `dynamic_date_range_value` or `date_range_value`): + * `date_range_value` - (Block) Manually specified date-time range value. Consists of the following attributes: + * `start` (Required, String) - begin of the date range. + * `end` (Required, String) - end of the date range. + * `dynamic_date_range_value` - (String) Dynamic date-time range value based on current date-time. Possible values are `TODAY`, `YESTERDAY`, `THIS_WEEK`, `THIS_MONTH`, `THIS_YEAR`, `LAST_WEEK`, `LAST_MONTH`, `LAST_YEAR`, `LAST_HOUR`, `LAST_8_HOURS`, `LAST_24_HOURS`, `LAST_7_DAYS`, `LAST_14_DAYS`, `LAST_30_DAYS`, `LAST_60_DAYS`, `LAST_90_DAYS`, `LAST_12_MONTHS`. + * `start_day_of_week` - (Optional, Int) Specify what day that starts the week. + * `precision` - (Optional, String) Date-time precision to format the value into when the query is run. Possible values are `DAY_PRECISION`, `MINUTE_PRECISION`, `SECOND_PRECISION`. Defaults to `DAY_PRECISION` (`YYYY-MM-DD`). + * `enum_value` - (Block) Dropdown parameter value. Consists of following attributes: + * `enum_options` - (String) List of valid query parameter values, newline delimited. + * `values` - (Array of strings) List of selected query parameter values. + * `multi_values_options` - (Optional, Block) If specified, allows multiple values to be selected for this parameter. Consists of following attributes: + * `prefix` - (Optional, String) Character that prefixes each selected parameter value. + * `separator` - (Optional, String) Character that separates each selected parameter value. Defaults to a comma. + * `suffix` - (Optional, String) Character that suffixes each selected parameter value. + * `query_backed_value` - (Block) Query-based dropdown parameter value. Consists of following attributes: + * `query_id` - (Required, String) ID of the query that provides the parameter values. + * `values` - (Array of strings) List of selected query parameter values. + * `multi_values_options` - (Optional, Block) If specified, allows multiple values to be selected for this parameter. Consists of following attributes: + * `prefix` - (Optional, String) Character that prefixes each selected parameter value. + * `separator` - (Optional, String) Character that separates each selected parameter value. Defaults to a comma. + * `suffix` - (Optional, String) Character that suffixes each selected parameter value. + +## Attribute Reference + +In addition to all the arguments above, the following attributes are exported: + +* `id` - unique ID of the created Query. +* `lifecycle_state` - The workspace state of the query. Used for tracking trashed status. (Possible values are `ACTIVE` or `TRASHED`). +* `last_modifier_user_name` - Username of the user who last saved changes to this query. +* `create_time` - The timestamp string indicating when the query was created. +* `update_time` - The timestamp string indicating when the query was updated. + +## Migrating from `databricks_sql_query` resource + +Under the hood, the new resource uses the same data as the `databricks_sql_query`, but exposed via different API. This means that we can migrate existing queries without recreating them. This operation is done in few steps: + +* Record the ID of existing `databricks_sql_query`, for example, by executing the `terraform state show databricks_sql_query.query` command. +* Create the code for the new implementation performing following changes: + * the `name` attribute is now named `display_name` + * the `parent` (if exists) is renamed to `parent_path` attribute, and should be converted from `folders/object_id` to the actual path. + * Blocks that specify values in the `parameter` block were renamed (see above). + +For example, if we have the original `databricks_sql_query` defined as: + +```hcl +resource "databricks_sql_query" "query" { + data_source_id = databricks_sql_endpoint.example.data_source_id + query = "select 42 as value" + name = "My Query" + parent = "folders/${databricks_directory.shared_dir.object_id}" + + parameter { + name = "p1" + title = "Title for p1" + text { + value = "default" + } + } +} +``` + +we'll have a new resource defined as: + +```hcl +resource "databricks_query" "query" { + warehouse_id = databricks_sql_endpoint.example.id + query_text = "select 42 as value" + display_name = "My Query" + parent_path = databricks_directory.shared_dir.path + + parameter { + name = "p1" + title = "Title for p1" + text_value { + value = "default" + } + } +} +``` + +### For Terraform version >= 1.7.0 + +Terraform 1.7 introduced the [removed](https://developer.hashicorp.com/terraform/language/resources/syntax#removing-resources) block in addition to the [import](https://developer.hashicorp.com/terraform/language/import) block introduced in Terraform 1.5. Together they make import and removal of resources easier, avoiding manual execution of `terraform import` and `terraform state rm` commands. + +So with Terraform 1.7+, the migration looks as the following: + +* remove the old query definition and replace it with the new one. +* Adjust references, like, `databricks_permissions`. +* Add `import` and `removed` blocks like this: + +```hcl +import { + to = databricks_query.query + id = "" +} + +removed { + from = databricks_sql_query.query + + lifecycle { + destroy = false + } +} +``` + +* Run the `terraform plan` command to check possible changes, such as value type change, etc. +* Run the `terraform apply` command to apply changes. +* Remove the `import` and `removed` blocks from the code. + +### For Terraform version < 1.7.0 + +* Remove the old query definition and replace it with the new one. +* Remove the old resource from the state with the `terraform state rm databricks_sql_query.query` command. +* Import new resource with the `terraform import databricks_query.query ` command. +* Adjust references, like, `databricks_permissions`. +* Run the `terraform plan` command to check possible changes, such as value type change, etc. + +## Access Control + +[databricks_permissions](permissions.md#sql-query-usage) can control which groups or individual users can *Manage*, *Edit*, *Run* or *View* individual queries. + +```hcl +resource "databricks_permissions" "query_usage" { + sql_query_id = databricks_query.query.id + access_control { + group_name = "users" + permission_level = "CAN_RUN" + } +} +``` + +## Import + +This resource can be imported using query ID: + +```bash +terraform import databricks_query.this +``` + +## Related Resources + +The following resources are often used in the same context: + +* [databricks_alert](alert.md) to manage [Databricks SQL Alerts](https://docs.databricks.com/en/sql/user/alerts/index.html). +* [databricks_sql_endpoint](sql_endpoint.md) to manage [Databricks SQL Endpoints](https://docs.databricks.com/sql/admin/sql-endpoints.html). +* [databricks_directory](directory.md) to manage directories in [Databricks Workpace](https://docs.databricks.com/workspace/workspace-objects.html). diff --git a/internal/acceptance/alert_test.go b/internal/acceptance/alert_test.go index 22ed542468..b7ffe2e4c5 100644 --- a/internal/acceptance/alert_test.go +++ b/internal/acceptance/alert_test.go @@ -7,14 +7,14 @@ import ( func TestAccAlert(t *testing.T) { WorkspaceLevel(t, Step{ Template: ` - resource "databricks_sql_query" "this" { - data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" - name = "tf-{var.RANDOM}" - query = "SELECT 1 AS p1, 2 as p2" + resource "databricks_query" "this" { + warehouse_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" + display_name = "tf-{var.RANDOM}" + query_text = "SELECT 1 AS p1, 2 as p2" } resource "databricks_alert" "alert" { - query_id = databricks_sql_query.this.id + query_id = databricks_query.this.id display_name = "tf-alert-{var.RANDOM}" condition { op = "EQUAL" @@ -33,14 +33,14 @@ func TestAccAlert(t *testing.T) { `, }, Step{ Template: ` - resource "databricks_sql_query" "this" { - data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" - name = "tf-{var.RANDOM}" - query = "SELECT 1 AS p1, 2 as p2" + resource "databricks_query" "this" { + warehouse_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" + display_name = "tf-{var.RANDOM}" + query_text = "SELECT 1 AS p1, 2 as p2" } resource "databricks_alert" "alert" { - query_id = databricks_sql_query.this.id + query_id = databricks_query.this.id display_name = "tf-alert-{var.RANDOM}" condition { op = "GREATER_THAN" diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index 7c5da72512..2033a100ad 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -841,20 +841,20 @@ func TestAccPermissions_ServingEndpoint(t *testing.T) { func TestAccPermissions_Alert(t *testing.T) { loadDebugEnvIfRunsFromIDE(t, "workspace") alertTemplate := ` - resource "databricks_sql_query" "this" { - name = "{var.STICKY_RANDOM}-query" - query = "SELECT 1 AS p1, 2 as p2" - data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" + resource "databricks_query" "this" { + display_name = "{var.STICKY_RANDOM}-query" + query_text = "SELECT 1 AS p1, 2 as p2" + warehouse_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" } resource "databricks_alert" "this" { - query_id = databricks_sql_query.this.id + query_id = databricks_query.this.id display_name = "{var.STICKY_RANDOM}-alert" condition { op = "GREATER_THAN" operand { column { - name = "value" + name = "p1" } } threshold { @@ -876,3 +876,23 @@ func TestAccPermissions_Alert(t *testing.T) { ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for alert, allowed levels: CAN_MANAGE"), }) } + +func TestAccPermissions_Query(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + queryTemplate := ` + resource "databricks_query" "this" { + display_name = "{var.STICKY_RANDOM}-query" + query_text = "SELECT 1 AS p1, 2 as p2" + warehouse_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" + }` + WorkspaceLevel(t, Step{ + Template: queryTemplate + makePermissionsTestStage("sql_query_id", "databricks_query.this.id", groupPermissions("CAN_VIEW")), + }, Step{ + Template: queryTemplate + makePermissionsTestStage("sql_query_id", "databricks_query.this.id", + currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_VIEW", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + }, Step{ + Template: queryTemplate + makePermissionsTestStage("sql_query_id", "databricks_query.this.id", + currentPrincipalPermission(t, "CAN_VIEW"), groupPermissions("CAN_VIEW", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for query, allowed levels: CAN_MANAGE"), + }) +} diff --git a/internal/acceptance/query_test.go b/internal/acceptance/query_test.go new file mode 100644 index 0000000000..72230d3373 --- /dev/null +++ b/internal/acceptance/query_test.go @@ -0,0 +1,32 @@ +package acceptance + +import ( + "testing" +) + +func TestAccQuery(t *testing.T) { + WorkspaceLevel(t, Step{ + Template: ` + resource "databricks_query" "this" { + warehouse_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" + display_name = "tf-{var.RANDOM}" + query_text = "SELECT 1 AS p1, 2 as p2" + } +`, + }, Step{ + Template: ` + resource "databricks_query" "this" { + warehouse_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" + display_name = "tf-{var.RANDOM}" + query_text = "SELECT 1 AS p1, 2 as p2" + parameter { + name = "foo" + text_value { + value = "bar" + } + title = "foo" + } + } +`, + }) +} diff --git a/internal/acceptance/sql_query_test.go b/internal/acceptance/sql_query_test.go index bc49c9ee6f..156374db1c 100644 --- a/internal/acceptance/sql_query_test.go +++ b/internal/acceptance/sql_query_test.go @@ -4,7 +4,7 @@ import ( "testing" ) -func TestAccQuery(t *testing.T) { +func TestAccSqlQuery(t *testing.T) { WorkspaceLevel(t, Step{ Template: ` resource "databricks_sql_query" "q1" { diff --git a/internal/providers/sdkv2/sdkv2.go b/internal/providers/sdkv2/sdkv2.go index 8136901ddf..d40d663ee2 100644 --- a/internal/providers/sdkv2/sdkv2.go +++ b/internal/providers/sdkv2/sdkv2.go @@ -191,6 +191,7 @@ func DatabricksProvider() *schema.Provider { "databricks_pipeline": pipelines.ResourcePipeline().ToResource(), "databricks_provider": sharing.ResourceProvider().ToResource(), "databricks_quality_monitor": catalog.ResourceQualityMonitor().ToResource(), + "databricks_query": sql.ResourceQuery().ToResource(), "databricks_recipient": sharing.ResourceRecipient().ToResource(), "databricks_registered_model": catalog.ResourceRegisteredModel().ToResource(), "databricks_repo": repos.ResourceRepo().ToResource(), diff --git a/sql/resource_alert.go b/sql/resource_alert.go index 03281d5006..16022548a4 100644 --- a/sql/resource_alert.go +++ b/sql/resource_alert.go @@ -19,7 +19,6 @@ func ResourceAlert() common.Resource { // TODO: can we automatically generate it from SDK? Or should we avoid validation at all? common.CustomizeSchemaPath(m, "condition", "op").SetRequired().SetValidateFunc(validation.StringInSlice([]string{ "GREATER_THAN", "GREATER_THAN_OR_EQUAL", "LESS_THAN", "LESS_THAN_OR_EQUAL", "EQUAL", "NOT_EQUAL", "IS_NULL"}, true)) - common.CustomizeSchemaPath(m, "condition", "op").SetRequired() common.CustomizeSchemaPath(m, "parent_path").SetCustomSuppressDiff(common.WorkspaceOrEmptyPathPrefixDiffSuppress).SetForceNew() common.CustomizeSchemaPath(m, "condition", "operand").SetRequired() common.CustomizeSchemaPath(m, "condition", "operand", "column").SetRequired() @@ -39,6 +38,7 @@ func ResourceAlert() common.Resource { strings.TrimPrefix(f, "condition.0.threshold.0.value.0.")).SetExactlyOneOf(alof) } common.CustomizeSchemaPath(m, "owner_user_name").SetSuppressDiff() + common.CustomizeSchemaPath(m, "notify_on_ok").SetDefault(true) common.CustomizeSchemaPath(m, "id").SetReadOnly() common.CustomizeSchemaPath(m, "create_time").SetReadOnly() common.CustomizeSchemaPath(m, "lifecycle_state").SetReadOnly() diff --git a/sql/resource_alert_test.go b/sql/resource_alert_test.go index f0559434b3..31ef7ed05d 100644 --- a/sql/resource_alert_test.go +++ b/sql/resource_alert_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/sql" "github.com/databricks/terraform-provider-databricks/qa" @@ -31,6 +32,7 @@ var ( }, }, ParentPath: "/Workspace/Shared/Alerts", + NotifyOnOk: true, } createHcl = `query_id = "123456" display_name = "TF new alert" @@ -54,6 +56,7 @@ var ( QueryId: "123456", DisplayName: "TF new alert", ParentPath: "/Shared/Alerts", + NotifyOnOk: true, Condition: &sql.AlertCondition{ Op: "GREATER_THAN", Operand: &sql.AlertConditionOperand{ @@ -194,6 +197,7 @@ func TestAlertUpdate(t *testing.T) { QueryId: "123456", DisplayName: "TF new alert", OwnerUserName: "user@domain.com", + NotifyOnOk: false, Condition: &sql.AlertCondition{ Op: "GREATER_THAN", Operand: &sql.AlertConditionOperand{ @@ -223,6 +227,7 @@ func TestAlertUpdate(t *testing.T) { HCL: `query_id = "123456" display_name = "TF new alert" owner_user_name = "user@domain.com" + notify_on_ok = false condition { op = "GREATER_THAN" operand { diff --git a/sql/resource_query.go b/sql/resource_query.go new file mode 100644 index 0000000000..80a69a385c --- /dev/null +++ b/sql/resource_query.go @@ -0,0 +1,169 @@ +package sql + +import ( + "context" + "log" + "strings" + + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +// Need a struct for Query because there are aliases we need and it'll be needed in the create method. +type queryStruct struct { + sql.Query +} + +var queryAliasMap = map[string]string{ + "parameters": "parameter", +} + +func (queryStruct) Aliases() map[string]map[string]string { + return map[string]map[string]string{ + "sql.queryStruct": queryAliasMap, + } +} + +func (queryStruct) CustomizeSchema(m *common.CustomizableSchema) *common.CustomizableSchema { + m.SchemaPath("display_name").SetRequired().SetValidateFunc(validation.StringIsNotWhiteSpace) + m.SchemaPath("query_text").SetRequired() + m.SchemaPath("warehouse_id").SetRequired().SetValidateFunc(validation.StringIsNotWhiteSpace) + m.SchemaPath("parent_path").SetCustomSuppressDiff(common.WorkspaceOrEmptyPathPrefixDiffSuppress).SetForceNew() + m.SchemaPath("owner_user_name").SetSuppressDiff() + m.SchemaPath("run_as_mode").SetSuppressDiff() + //m.SchemaPath("").SetSuppressDiff() + //m.SchemaPath("").SetSuppressDiff() + m.SchemaPath("id").SetReadOnly() + m.SchemaPath("create_time").SetReadOnly() + m.SchemaPath("lifecycle_state").SetReadOnly() + m.SchemaPath("last_modifier_user_name").SetReadOnly() + m.SchemaPath("update_time").SetReadOnly() + + // customize parameters + m.SchemaPath("parameter", "name").SetRequired().SetValidateFunc(validation.StringIsNotWhiteSpace) + m.SchemaPath("parameter", "date_range_value", "precision").SetSuppressDiff() + m.SchemaPath("parameter", "date_value", "precision").SetSuppressDiff() + m.SchemaPath("parameter", "query_backed_value", "query_id").SetRequired() + m.SchemaPath("parameter", "text_value", "value").SetRequired() + m.SchemaPath("parameter", "numeric_value", "value").SetRequired() + // TODO: fix setting of AtLeastOneOf + // valuesAlof := []string{ + // "parameter.0.date_range_value", + // "parameter.0.date_value", + // "parameter.0.query_backed_value", + // "parameter.0.text_value", + // "parameter.0.numeric_value", + // "parameter.0.enum_value", + // } + // for _, f := range valuesAlof { + // m.SchemaPath("parameter", strings.TrimPrefix(f, "parameter.0.")).SetAtLeastOneOf(valuesAlof) + // } + return m +} + +type queryCreateStruct struct { + sql.CreateQueryRequestQuery +} + +func (queryCreateStruct) Aliases() map[string]map[string]string { + return map[string]map[string]string{ + "sql.queryCreateStruct": queryAliasMap, + } +} + +func (queryCreateStruct) CustomizeSchema(s *common.CustomizableSchema) *common.CustomizableSchema { + return s +} + +type queryUpdateStruct struct { + sql.UpdateQueryRequestQuery +} + +func (queryUpdateStruct) Aliases() map[string]map[string]string { + return map[string]map[string]string{ + "sql.queryUpdateStruct": queryAliasMap, + } +} + +func (queryUpdateStruct) CustomizeSchema(s *common.CustomizableSchema) *common.CustomizableSchema { + return s +} + +func ResourceQuery() common.Resource { + s := common.StructToSchema(queryStruct{}, nil) + return common.Resource{ + Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + var q queryCreateStruct + common.DataToStructPointer(d, s, &q) + apiQuery, err := w.Queries.Create(ctx, sql.CreateQueryRequest{ + Query: &q.CreateQueryRequestQuery, + }) + if err != nil { + return err + } + d.SetId(apiQuery.Id) + owner := d.Get("owner_user_name").(string) + if owner != "" { + _, err = w.Queries.Update(ctx, sql.UpdateQueryRequest{ + Query: &sql.UpdateQueryRequestQuery{ + OwnerUserName: owner, + }, + Id: apiQuery.Id, + UpdateMask: "owner_user_name", + }) + } + return err + }, + Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + apiQuery, err := w.Queries.GetById(ctx, d.Id()) + if err != nil { + log.Printf("[WARN] error getting query by ID: %v", err) + return err + } + parentPath := d.Get("parent_path").(string) + if parentPath != "" && strings.HasPrefix(apiQuery.ParentPath, "/Workspace") && !strings.HasPrefix(parentPath, "/Workspace") { + apiQuery.ParentPath = strings.TrimPrefix(parentPath, "/Workspace") + } + return common.StructToData(queryStruct{Query: *apiQuery}, s, d) + }, + Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + var q queryUpdateStruct + common.DataToStructPointer(d, s, &q) + updateMask := "display_name,query_text,warehouse_id,parameters" + for _, f := range []string{"run_as_mode", "owner_user_name", "description", "tags", + "apply_auto_limit", "catalog", "schema"} { + if d.HasChange(f) { + updateMask += "," + f + } + } + _, err = w.Queries.Update(ctx, sql.UpdateQueryRequest{ + Query: &q.UpdateQueryRequestQuery, + Id: d.Id(), + UpdateMask: updateMask, + }) + return err + }, + Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + return w.Queries.DeleteById(ctx, d.Id()) + }, + Schema: s, + } +} diff --git a/sql/resource_query_test.go b/sql/resource_query_test.go new file mode 100644 index 0000000000..ccc3c13608 --- /dev/null +++ b/sql/resource_query_test.go @@ -0,0 +1,153 @@ +package sql + +import ( + "net/http" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/mock" +) + +var ( + queryResponse = sql.Query{ + Id: "7890", + WarehouseId: "123456", + DisplayName: "TF new query", + OwnerUserName: "user@domain.com", + ParentPath: "/Workspace/Shared/Querys", + QueryText: "select 42 as value", + } + createQueryHcl = `warehouse_id = "123456" + query_text = "select 42 as value" + display_name = "TF new query" + parent_path = "/Shared/Querys" + owner_user_name = "user@domain.com" +` + createQueryRequest = sql.CreateQueryRequest{ + Query: &sql.CreateQueryRequestQuery{ + WarehouseId: "123456", + QueryText: "select 42 as value", + DisplayName: "TF new query", + ParentPath: "/Shared/Querys", + }} +) + +func TestQueryCreate(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockQueriesAPI().EXPECT() + e.Create(mock.Anything, createQueryRequest).Return(&queryResponse, nil) + e.Update(mock.Anything, sql.UpdateQueryRequest{ + Id: "7890", + UpdateMask: "owner_user_name", + Query: &sql.UpdateQueryRequestQuery{ + OwnerUserName: "user@domain.com", + }, + }).Return(&queryResponse, nil) + e.GetById(mock.Anything, "7890").Return(&queryResponse, nil) + }, + Resource: ResourceQuery(), + Create: true, + HCL: createQueryHcl, + }.ApplyAndExpectData(t, map[string]any{ + "id": "7890", + "warehouse_id": "123456", + "display_name": "TF new query", + "owner_user_name": "user@domain.com", + }) +} + +func TestQueryCreate_Error(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockQueriesAPI().EXPECT() + e.Create(mock.Anything, createQueryRequest).Return(nil, &apierr.APIError{ + StatusCode: http.StatusBadRequest, + Message: "bad payload", + }) + }, + Resource: ResourceQuery(), + Create: true, + HCL: createQueryHcl, + }.ExpectError(t, "bad payload") +} + +func TestQueryRead_Import(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockQueriesAPI().EXPECT().GetById(mock.Anything, "7890").Return(&queryResponse, nil) + }, + Resource: ResourceQuery(), + Read: true, + ID: "7890", + New: true, + }.ApplyAndExpectData(t, map[string]any{ + "id": "7890", + "warehouse_id": "123456", + "query_text": "select 42 as value", + "display_name": "TF new query", + "owner_user_name": "user@domain.com", + }) +} + +func TestQueryRead_Error(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockQueriesAPI().EXPECT().GetById(mock.Anything, "7890").Return(nil, &apierr.APIError{ + StatusCode: http.StatusBadRequest, + Message: "bad payload", + }) + }, + Resource: ResourceQuery(), + Read: true, + ID: "7890", + New: true, + }.ExpectError(t, "bad payload") +} + +func TestQueryDelete(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + w.GetMockQueriesAPI().EXPECT().DeleteById(mock.Anything, "7890").Return(nil) + }, + Resource: ResourceQuery(), + Delete: true, + ID: "7890", + New: true, + }.ApplyNoError(t) +} + +func TestQueryUpdate(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockQueriesAPI().EXPECT() + e.Update(mock.Anything, sql.UpdateQueryRequest{ + Id: "7890", + UpdateMask: "display_name,query_text,warehouse_id,parameters,owner_user_name", + Query: &sql.UpdateQueryRequestQuery{ + WarehouseId: "123456", + DisplayName: "TF new query", + OwnerUserName: "user@domain.com", + QueryText: "select 42 as value", + }}).Return(&queryResponse, nil) + e.GetById(mock.Anything, "7890").Return(&queryResponse, nil) + }, + Resource: ResourceQuery(), + Update: true, + ID: "7890", + HCL: `warehouse_id = "123456" + query_text = "select 42 as value" + display_name = "TF new query" + owner_user_name = "user@domain.com" +`, + }.ApplyAndExpectData(t, map[string]any{ + "id": "7890", + "warehouse_id": "123456", + "query_text": "select 42 as value", + "display_name": "TF new query", + "owner_user_name": "user@domain.com", + }) +} diff --git a/sql/resource_sql_query.go b/sql/resource_sql_query.go index 9359098df9..07b438aa31 100644 --- a/sql/resource_sql_query.go +++ b/sql/resource_sql_query.go @@ -587,6 +587,7 @@ func ResourceSqlQuery() common.Resource { Delete: func(ctx context.Context, data *schema.ResourceData, c *common.DatabricksClient) error { return NewQueryAPI(ctx, c).Delete(data.Id()) }, - Schema: s, + Schema: s, + DeprecationMessage: "This resource is deprecated and will be removed in the future. Please use the `databricks_query` resource instead.", } } diff --git a/sql/resource_sql_query_test.go b/sql/resource_sql_query_test.go index 227373fcbb..6417731003 100644 --- a/sql/resource_sql_query_test.go +++ b/sql/resource_sql_query_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestQueryCreate(t *testing.T) { +func TestSqlQueryCreate(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ { @@ -65,7 +65,7 @@ func TestQueryCreate(t *testing.T) { assert.Equal(t, "viewer", d.Get("run_as_role")) } -func TestQueryCreateWithMultipleSchedules(t *testing.T) { +func TestSqlQueryCreateWithMultipleSchedules(t *testing.T) { qa.ResourceFixture{ Resource: ResourceSqlQuery(), Create: true, @@ -84,10 +84,10 @@ func TestQueryCreateWithMultipleSchedules(t *testing.T) { } } `, - }.ExpectError(t, "invalid config supplied. [schedule.#.continuous] Conflicting configuration arguments. [schedule.#.daily] Conflicting configuration arguments. [schedule] Argument is deprecated") + }.ExpectError(t, "invalid config supplied. [schedule.#.continuous] Conflicting configuration arguments. [schedule.#.daily] Conflicting configuration arguments. [schedule] Argument is deprecated. Deprecated Resource") } -func TestQueryCreateWithContinuousSchedule(t *testing.T) { +func TestSqlQueryCreateWithContinuousSchedule(t *testing.T) { intervalSeconds := 3600 untilDate := "2021-04-21" @@ -149,7 +149,7 @@ func TestQueryCreateWithContinuousSchedule(t *testing.T) { assert.Equal(t, untilDate, d.Get("schedule.0.continuous.0.until_date")) } -func TestQueryCreateWithDailySchedule(t *testing.T) { +func TestSqlQueryCreateWithDailySchedule(t *testing.T) { intervalDays := 2 intervalSeconds := intervalDays * 24 * 60 * 60 timeOfDay := "06:00" @@ -215,7 +215,7 @@ func TestQueryCreateWithDailySchedule(t *testing.T) { assert.Equal(t, untilDate, d.Get("schedule.0.daily.0.until_date")) } -func TestQueryCreateWithWeeklySchedule(t *testing.T) { +func TestSqlQueryCreateWithWeeklySchedule(t *testing.T) { intervalWeeks := 2 intervalSeconds := intervalWeeks * 7 * 24 * 60 * 60 timeOfDay := "06:00" @@ -284,7 +284,7 @@ func TestQueryCreateWithWeeklySchedule(t *testing.T) { assert.Equal(t, untilDate, d.Get("schedule.0.weekly.0.until_date")) } -func TestQueryCreateDeletesDefaultVisualization(t *testing.T) { +func TestSqlQueryCreateDeletesDefaultVisualization(t *testing.T) { _, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ { @@ -338,7 +338,7 @@ func TestQueryCreateDeletesDefaultVisualization(t *testing.T) { assert.NoError(t, err) } -func TestQueryRead(t *testing.T) { +func TestSqlQueryRead(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ { @@ -363,7 +363,7 @@ func TestQueryRead(t *testing.T) { assert.Equal(t, "foo", d.Id()) } -func TestQueryReadWithSchedule(t *testing.T) { +func TestSqlQueryReadWithSchedule(t *testing.T) { // Note: this tests that if a schedule is returned by the API, // it will always show up in the resulting resource data. // If it doesn't, we wouldn't be able to erase a schedule @@ -390,7 +390,7 @@ func TestQueryReadWithSchedule(t *testing.T) { assert.Equal(t, 12345, d.Get("schedule.0.continuous.0.interval_seconds")) } -func TestQueryUpdate(t *testing.T) { +func TestSqlQueryUpdate(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ { @@ -436,7 +436,7 @@ func TestQueryUpdate(t *testing.T) { assert.Equal(t, "SELECT 2", d.Get("query")) } -func TestQueryUpdateWithParams(t *testing.T) { +func TestSqlQueryUpdateWithParams(t *testing.T) { body := api.Query{ ID: "foo", DataSourceID: "xyz", @@ -679,7 +679,7 @@ func TestQueryUpdateWithParams(t *testing.T) { assert.Len(t, d.Get("parameter").([]any), 12) } -func TestQueryDelete(t *testing.T) { +func TestSqlQueryDelete(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ { From 962c436912e123bae491ab19bb214851ec1d1945 Mon Sep 17 00:00:00 2001 From: Omer Lachish <289488+rauchy@users.noreply.github.com> Date: Thu, 24 Oct 2024 09:58:37 +0200 Subject: [PATCH 66/99] [Release] Release v1.55.0 (#4148) ### New Features and Improvements * Add `databricks_alert` resource to replace `databricks_sql_alert` ([#4051](https://github.com/databricks/terraform-provider-databricks/pull/4051)). * Add `databricks_query` resource instead of `databricks_sql_query` ([#4103](https://github.com/databricks/terraform-provider-databricks/pull/4103)). * Added resource `databricks_custom_app_integration` ([#4124](https://github.com/databricks/terraform-provider-databricks/pull/4124)). * Handle `schema` attribute in `databricks_pipeline` ([#4137](https://github.com/databricks/terraform-provider-databricks/pull/4137)). ### Bug Fixes * Change repo used in test ([#4122](https://github.com/databricks/terraform-provider-databricks/pull/4122)). ### Documentation * Clarify that `graviton` option of `databricks_node_type` could be used on Azure ([#4125](https://github.com/databricks/terraform-provider-databricks/pull/4125)). * Fix argument in example for `databricks_custom_app_integration` ([#4132](https://github.com/databricks/terraform-provider-databricks/pull/4132)). * Fix for UC on AWS guide - use `databricks_aws_unity_catalog_assume_role_policy` where necessary ([#4109](https://github.com/databricks/terraform-provider-databricks/pull/4109)). ### Exporter * **Breaking change**: Move `databricks_workspace_file` to a separate service ([#4118](https://github.com/databricks/terraform-provider-databricks/pull/4118)). * Exclude some system schemas from export ([#4121](https://github.com/databricks/terraform-provider-databricks/pull/4121)). * Use `List` + iteration instead of call to `ListAll` ([#4123](https://github.com/databricks/terraform-provider-databricks/pull/4123)). Co-authored-by: Omer Lachish --- CHANGELOG.md | 29 +++++++++++++++++++++++++++++ common/version.go | 2 +- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f7b4ef3b8..77a6da9f8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,34 @@ # Version changelog +## [Release] Release v1.55.0 + +### New Features and Improvements + + * Add `databricks_alert` resource to replace `databricks_sql_alert` ([#4051](https://github.com/databricks/terraform-provider-databricks/pull/4051)). + * Add `databricks_query` resource instead of `databricks_sql_query` ([#4103](https://github.com/databricks/terraform-provider-databricks/pull/4103)). + * Added resource `databricks_custom_app_integration` ([#4124](https://github.com/databricks/terraform-provider-databricks/pull/4124)). + * Handle `schema` attribute in `databricks_pipeline` ([#4137](https://github.com/databricks/terraform-provider-databricks/pull/4137)). + + +### Bug Fixes + + * Change repo used in test ([#4122](https://github.com/databricks/terraform-provider-databricks/pull/4122)). + + +### Documentation + + * Clarify that `graviton` option of `databricks_node_type` could be used on Azure ([#4125](https://github.com/databricks/terraform-provider-databricks/pull/4125)). + * Fix argument in example for `databricks_custom_app_integration` ([#4132](https://github.com/databricks/terraform-provider-databricks/pull/4132)). + * Fix for UC on AWS guide - use `databricks_aws_unity_catalog_assume_role_policy` where necessary ([#4109](https://github.com/databricks/terraform-provider-databricks/pull/4109)). + + +### Exporter + + * **Breaking change**: Move `databricks_workspace_file` to a separate service ([#4118](https://github.com/databricks/terraform-provider-databricks/pull/4118)). + * Exclude some system schemas from export ([#4121](https://github.com/databricks/terraform-provider-databricks/pull/4121)). + * Use `List` + iteration instead of call to `ListAll` ([#4123](https://github.com/databricks/terraform-provider-databricks/pull/4123)). + + ## [Release] Release v1.54.0 ### New Features and Improvements diff --git a/common/version.go b/common/version.go index adf11608fb..c3770f13cd 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.54.0" + version = "1.55.0" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider From 83984a621784583742355914e5e21424db226537 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Thu, 24 Oct 2024 10:50:30 +0200 Subject: [PATCH 67/99] [Internal] Automatically trigger integration tests on PR (#4149) ## Changes Automatically trigger integration tests on PR ## Tests Workflow below - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- .github/workflows/integration-tests.yml | 61 +++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 .github/workflows/integration-tests.yml diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml new file mode 100644 index 0000000000..b92be6da5f --- /dev/null +++ b/.github/workflows/integration-tests.yml @@ -0,0 +1,61 @@ +name: Integration Tests + +on: + + pull_request: + types: [opened, synchronize] + + merge_group: + + +jobs: + trigger-tests: + if: github.event_name == 'pull_request' + name: Trigger Tests + runs-on: ubuntu-latest + environment: "test-trigger-is" + + steps: + - uses: actions/checkout@v3 + + - name: Generate GitHub App Token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }} + private-key: ${{ secrets.DECO_WORKFLOW_TRIGGER_PRIVATE_KEY }} + owner: ${{ secrets.ORG_NAME }} + repositories: ${{secrets.REPO_NAME}} + + - name: Trigger Workflow in Another Repo + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + run: | + gh workflow run terraform-isolated-pr.yml -R ${{ secrets.ORG_NAME }}/${{secrets.REPO_NAME}} \ + --ref main \ + -f pull_request_number=${{ github.event.pull_request.number }} \ + -f commit_sha=${{ github.event.pull_request.head.sha }} + + + + # Statuses and checks apply to specific commits (by hash). + # Enforcement of required checks is done both at the PR level and the merge queue level. + # In case of multiple commits in a single PR, the hash of the squashed commit + # will not match the one for the latest (approved) commit in the PR. + # We auto approve the check for the merge queue for two reasons: + # * Queue times out due to duration of tests. + # * Avoid running integration tests twice, since it was already run at the tip of the branch before squashing. + auto-approve: + if: github.event_name == 'merge_group' + runs-on: ubuntu-latest + steps: + - name: Mark Check + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + shell: bash + run: | + gh api -X POST -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/${{ github.repository }}/statuses/${{ github.sha }} \ + -f 'state=success' \ + -f 'context=Integration Tests Check' \ No newline at end of file From 8b005724eceb4b5f1fe25bfc08abe985293ef400 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Thu, 24 Oct 2024 06:17:44 -0400 Subject: [PATCH 68/99] [Exporter] Improve exporting of `databricks_pipeline` resources (#4142) ## Changes Changes include: - Use `List` + iterator instead of waiting for full list - improves performance in big workspaces with a lot of DLT pipelines - Better handling of pipelines deployed via DABs - fix error that lead to emitting of notebooks even for DLT pipelines deployed with DABs. - Emit `databricks_schema` for pipelines with direct publishing mode enabled. ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- exporter/exporter_test.go | 8 +++--- exporter/importables.go | 51 +++++++++++++++++++++++------------- exporter/importables_test.go | 2 +- 3 files changed, 38 insertions(+), 23 deletions(-) diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index 3f605930c0..2b528f56b8 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -252,7 +252,7 @@ var meAdminFixture = qa.HTTPFixture{ var emptyPipelines = qa.HTTPFixture{ Method: "GET", ReuseRequest: true, - Resource: "/api/2.0/pipelines?max_results=50", + Resource: "/api/2.0/pipelines?max_results=100", Response: pipelines.ListPipelinesResponse{}, } @@ -2021,7 +2021,7 @@ func TestImportingDLTPipelines(t *testing.T) { emptyIpAccessLIst, { Method: "GET", - Resource: "/api/2.0/pipelines?max_results=50", + Resource: "/api/2.0/pipelines?max_results=100", Response: pipelines.ListPipelinesResponse{ Statuses: []pipelines.PipelineStateInfo{ { @@ -2236,7 +2236,7 @@ func TestImportingDLTPipelinesMatchingOnly(t *testing.T) { userReadFixture, { Method: "GET", - Resource: "/api/2.0/pipelines?max_results=50", + Resource: "/api/2.0/pipelines?max_results=100", Response: pipelines.ListPipelinesResponse{ Statuses: []pipelines.PipelineStateInfo{ { @@ -2601,7 +2601,7 @@ func TestIncrementalDLTAndMLflowWebhooks(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/pipelines?max_results=50", + Resource: "/api/2.0/pipelines?max_results=100", Response: pipelines.ListPipelinesResponse{ Statuses: []pipelines.PipelineStateInfo{ { diff --git a/exporter/importables.go b/exporter/importables.go index 5426845f39..54f0fb6da3 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -2002,23 +2002,22 @@ var resourcesMap map[string]importable = map[string]importable{ return name + "_" + d.Id() }, List: func(ic *importContext) error { - w, err := ic.Client.WorkspaceClient() - if err != nil { - return err - } - pipelinesList, err := w.Pipelines.ListPipelinesAll(ic.Context, pipelines.ListPipelinesRequest{ - MaxResults: 50, + it := ic.workspaceClient.Pipelines.ListPipelines(ic.Context, pipelines.ListPipelinesRequest{ + MaxResults: 100, }) - if err != nil { - return err - } - for i, q := range pipelinesList { + i := 0 + for it.HasNext(ic.Context) { + q, err := it.Next(ic.Context) + if err != nil { + return err + } + i++ if !ic.MatchesName(q.Name) { continue } var modifiedAt int64 if ic.incremental { - pipeline, err := w.Pipelines.Get(ic.Context, pipelines.GetPipelineRequest{ + pipeline, err := ic.workspaceClient.Pipelines.Get(ic.Context, pipelines.GetPipelineRequest{ PipelineId: q.PipelineId, }) if err != nil { @@ -2030,21 +2029,37 @@ var resourcesMap map[string]importable = map[string]importable{ Resource: "databricks_pipeline", ID: q.PipelineId, }, modifiedAt, fmt.Sprintf("DLT Pipeline '%s'", q.Name)) - log.Printf("[INFO] Imported %d of %d DLT Pipelines", i+1, len(pipelinesList)) + if i%100 == 0 { + log.Printf("[INFO] Imported %d DLT Pipelines", i) + } } + log.Printf("[INFO] Listed %d DLT pipelines", i) return nil }, Import: func(ic *importContext, r *resource) error { var pipeline tfpipelines.Pipeline s := ic.Resources["databricks_pipeline"].Schema common.DataToStructPointer(r.Data, s, &pipeline) - if pipeline.Catalog != "" && pipeline.Target != "" { - ic.Emit(&resource{ - Resource: "databricks_schema", - ID: pipeline.Catalog + "." + pipeline.Target, - }) + if pipeline.Deployment != nil && pipeline.Deployment.Kind == "BUNDLE" { + log.Printf("[INFO] Skipping processing of DLT Pipeline with ID %s (%s) as deployed with DABs", + r.ID, pipeline.Name) + return nil + } + if pipeline.Catalog != "" { + var schemaName string + if pipeline.Target != "" { + schemaName = pipeline.Target + } else if pipeline.Schema != "" { + schemaName = pipeline.Schema + } + if schemaName != "" { + ic.Emit(&resource{ + Resource: "databricks_schema", + ID: pipeline.Catalog + "." + pipeline.Target, + }) + } } - if pipeline.Deployment == nil || pipeline.Deployment.Kind == "BUNDLE" { + if pipeline.Deployment == nil || pipeline.Deployment.Kind != "BUNDLE" { for _, lib := range pipeline.Libraries { if lib.Notebook != nil { ic.emitNotebookOrRepo(lib.Notebook.Path) diff --git a/exporter/importables_test.go b/exporter/importables_test.go index 8e82f7c4fc..645fba93e4 100644 --- a/exporter/importables_test.go +++ b/exporter/importables_test.go @@ -1369,7 +1369,7 @@ func TestIncrementalListDLT(t *testing.T) { qa.HTTPFixturesApply(t, []qa.HTTPFixture{ { Method: "GET", - Resource: "/api/2.0/pipelines?max_results=50", + Resource: "/api/2.0/pipelines?max_results=100", Response: pipelines.ListPipelinesResponse{ Statuses: []pipelines.PipelineStateInfo{ { From faa6a89e6dc9be41fc67bcc5c768f85c2471d182 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Thu, 24 Oct 2024 08:46:41 -0400 Subject: [PATCH 69/99] [Exporter] **Breaking change** Use new query and alert resources instead of legacy resources (#4150) ## Changes This change replaces legacy `databricks_sql_query` and `databricks_sql_alert` with new resources `databricks_query` and `databricks_alert`. Also, services `sql-queries` and `sql-alerts` are renamed to `queries` and `alerts`. Other changes include: * Improve performance of Lakeview dashboards scan by using bigger page size * Generalize `isMatchingCatalogAndSchema` implementation for use in multiple resources where attribute names could be different * Generalize handling of `/Workspace` prefix when emitting notebooks, workspace files and directories. ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [x] using Go SDK --- docs/guides/experimental-exporter.md | 6 +- exporter/context.go | 4 +- exporter/exporter_test.go | 83 +++++++---- exporter/importables.go | 184 ++++++++++++++---------- exporter/importables_test.go | 1 + exporter/test-data/get-alert.json | 25 ++++ exporter/test-data/get-alerts.json | 29 ++++ exporter/test-data/get-queries.json | 17 +++ exporter/test-data/get-query.json | 13 ++ exporter/test-data/get-sql-alert.json | 83 ----------- exporter/test-data/get-sql-alerts.json | 84 ----------- exporter/test-data/get-sql-queries.json | 39 ----- exporter/util.go | 62 +++----- exporter/util_workspace.go | 26 ++-- sql/resource_query.go | 12 +- 15 files changed, 295 insertions(+), 373 deletions(-) create mode 100644 exporter/test-data/get-alert.json create mode 100644 exporter/test-data/get-alerts.json create mode 100644 exporter/test-data/get-queries.json create mode 100644 exporter/test-data/get-query.json delete mode 100644 exporter/test-data/get-sql-alert.json delete mode 100644 exporter/test-data/get-sql-alerts.json delete mode 100644 exporter/test-data/get-sql-queries.json diff --git a/docs/guides/experimental-exporter.md b/docs/guides/experimental-exporter.md index e709f4765d..6f41bf6154 100644 --- a/docs/guides/experimental-exporter.md +++ b/docs/guides/experimental-exporter.md @@ -111,6 +111,7 @@ Services are just logical groups of resources used for filtering and organizatio Please note that for services not marked with **listing**, we'll export resources only if they are referenced from other resources. * `access` - [databricks_permissions](../resources/permissions.md), [databricks_instance_profile](../resources/instance_profile.md), [databricks_ip_access_list](../resources/ip_access_list.md), [databricks_mws_permission_assignment](../resources/mws_permission_assignment.md) and [databricks_access_control_rule_set](../resources/access_control_rule_set.md). +* `alerts` - **listing** [databricks_alert](../resources/alert.md). * `compute` - **listing** [databricks_cluster](../resources/cluster.md). * `dashboards` - **listing** [databricks_dashboard](../resources/dashboard.md). * `directories` - **listing** [databricks_directory](../resources/directory.md). *Please note that directories aren't listed when running in the incremental mode! Only directories with updated notebooks will be emitted.* @@ -123,13 +124,12 @@ Services are just logical groups of resources used for filtering and organizatio * `notebooks` - **listing** [databricks_notebook](../resources/notebook.md). * `policies` - **listing** [databricks_cluster_policy](../resources/cluster_policy). * `pools` - **listing** [instance pools](../resources/instance_pool.md). +* `queries` - **listing** [databricks_query](../resources/query.md). * `repos` - **listing** [databricks_repo](../resources/repo.md) * `secrets` - **listing** [databricks_secret_scope](../resources/secret_scope.md) along with [keys](../resources/secret.md) and [ACLs](../resources/secret_acl.md). * `settings` - **listing** [databricks_notification_destination](../resources/notification_destination.md). -* `sql-alerts` - **listing** [databricks_sql_alert](../resources/sql_alert.md). -* `sql-dashboards` - **listing** [databricks_sql_dashboard](../resources/sql_dashboard.md) along with associated [databricks_sql_widget](../resources/sql_widget.md) and [databricks_sql_visualization](../resources/sql_visualization.md). +* `sql-dashboards` - **listing** Legacy [databricks_sql_dashboard](../resources/sql_dashboard.md) along with associated [databricks_sql_widget](../resources/sql_widget.md) and [databricks_sql_visualization](../resources/sql_visualization.md). * `sql-endpoints` - **listing** [databricks_sql_endpoint](../resources/sql_endpoint.md) along with [databricks_sql_global_config](../resources/sql_global_config.md). -* `sql-queries` - **listing** [databricks_sql_query](../resources/sql_query.md). * `storage` - only [databricks_dbfs_file](../resources/dbfs_file.md) and [databricks_file](../resources/file.md) referenced in other resources (libraries, init scripts, ...) will be downloaded locally and properly arranged into terraform state. * `uc-artifact-allowlist` - **listing** exports [databricks_artifact_allowlist](../resources/artifact_allowlist.md) resources for Unity Catalog Allow Lists attached to the current metastore. * `uc-catalogs` - **listing** [databricks_catalog](../resources/catalog.md) and [databricks_workspace_binding](../resources/workspace_binding.md) diff --git a/exporter/context.go b/exporter/context.go index 70ab462029..c7f2b18235 100644 --- a/exporter/context.go +++ b/exporter/context.go @@ -204,8 +204,8 @@ var goroutinesNumber = map[string]int{ "databricks_sql_dashboard": 3, "databricks_sql_widget": 4, "databricks_sql_visualization": 4, - "databricks_sql_query": 5, - "databricks_sql_alert": 2, + "databricks_query": 4, + "databricks_alert": 2, "databricks_permissions": 11, } diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index 2b528f56b8..9c2f64cf15 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -56,20 +56,6 @@ func getJSONObject(filename string) any { return obj } -func getJSONArray(filename string) any { - data, err := os.ReadFile(filename) - if err != nil { - panic(err) - } - var obj []any - err = json.Unmarshal(data, &obj) - if err != nil { - fmt.Printf("[ERROR] error! file=%s err=%v\n", filename, err) - fmt.Printf("[ERROR] data=%s\n", string(data)) - } - return obj -} - func workspaceConfKeysToURL() string { keys := make([]string, 0, len(workspaceConfKeys)) for k := range workspaceConfKeys { @@ -379,14 +365,14 @@ var emptySqlDashboards = qa.HTTPFixture{ var emptySqlQueries = qa.HTTPFixture{ Method: "GET", - Resource: "/api/2.0/preview/sql/queries?page_size=100", + Resource: "/api/2.0/sql/queries?page_size=100", Response: map[string]any{}, ReuseRequest: true, } var emptySqlAlerts = qa.HTTPFixture{ Method: "GET", - Resource: "/api/2.0/preview/sql/alerts", + Resource: "/api/2.0/sql/alerts?page_size=100", Response: []tfsql.AlertEntity{}, ReuseRequest: true, } @@ -447,7 +433,7 @@ var emptyMetastoreList = qa.HTTPFixture{ var emptyLakeviewList = qa.HTTPFixture{ Method: "GET", - Resource: "/api/2.0/lakeview/dashboards?page_size=100", + Resource: "/api/2.0/lakeview/dashboards?page_size=1000", Response: sdk_dashboards.ListDashboardsResponse{}, ReuseRequest: true, } @@ -1015,6 +1001,16 @@ func TestImportingClusters(t *testing.T) { }, }, }, + { + Method: "GET", + Resource: "/api/2.0/preview/scim/v2/Users?attributes=id%2CuserName&count=100&startIndex=1", + ReuseRequest: true, + Response: scim.UserList{ + Resources: []scim.User{ + {ID: "123", DisplayName: "test@test.com", UserName: "test@test.com"}, + }, + }, + }, }, func(ctx context.Context, client *common.DatabricksClient) { os.Setenv("EXPORTER_PARALLELISM_default", "1") @@ -1950,16 +1946,21 @@ func TestImportingSqlObjects(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/preview/sql/queries?page_size=100", - Response: getJSONObject("test-data/get-sql-queries.json"), + Resource: "/api/2.0/sql/queries?page_size=100", + Response: getJSONObject("test-data/get-queries.json"), ReuseRequest: true, }, { Method: "GET", - Resource: "/api/2.0/preview/sql/queries/16c4f969-eea0-4aad-8f82-03d79b078dcc", - Response: getJSONObject("test-data/get-sql-query.json"), + Resource: "/api/2.0/sql/queries/16c4f969-eea0-4aad-8f82-03d79b078dcc?", + Response: getJSONObject("test-data/get-query.json"), ReuseRequest: true, }, + { + Method: "GET", + Resource: "/api/2.0/preview/sql/queries/16c4f969-eea0-4aad-8f82-03d79b078dcc", + Response: getJSONObject("test-data/get-sql-query.json"), + }, { Method: "GET", Resource: "/api/2.0/permissions/sql/queries/16c4f969-eea0-4aad-8f82-03d79b078dcc?", @@ -1972,14 +1973,14 @@ func TestImportingSqlObjects(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/preview/sql/alerts", - Response: getJSONArray("test-data/get-sql-alerts.json"), + Resource: "/api/2.0/sql/alerts?page_size=100", + Response: getJSONObject("test-data/get-alerts.json"), ReuseRequest: true, }, { Method: "GET", - Resource: "/api/2.0/preview/sql/alerts/3cf91a42-6217-4f3c-a6f0-345d489051b9?", - Response: getJSONObject("test-data/get-sql-alert.json"), + Resource: "/api/2.0/sql/alerts/3cf91a42-6217-4f3c-a6f0-345d489051b9?", + Response: getJSONObject("test-data/get-alert.json"), }, { Method: "GET", @@ -1993,18 +1994,44 @@ func TestImportingSqlObjects(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.enableListing("sql-dashboards,sql-queries,sql-endpoints,sql-alerts") - ic.enableServices("sql-dashboards,sql-queries,sql-alerts,sql-endpoints,access,notebooks") + ic.enableListing("sql-dashboards,queries,sql-endpoints,alerts") + ic.enableServices("sql-dashboards,queries,alerts,sql-endpoints,access") err := ic.Run() assert.NoError(t, err) + // check the generated HCL for SQL Warehouses content, err := os.ReadFile(tmpDir + "/sql-endpoints.tf") assert.NoError(t, err) contentStr := string(content) assert.True(t, strings.Contains(contentStr, `enable_serverless_compute = false`)) assert.True(t, strings.Contains(contentStr, `resource "databricks_sql_endpoint" "test" {`)) assert.False(t, strings.Contains(contentStr, `tags {`)) + // check the generated HCL for SQL Dashboards + content, err = os.ReadFile(tmpDir + "/sql-dashboards.tf") + assert.NoError(t, err) + contentStr = string(content) + assert.True(t, strings.Contains(contentStr, `resource "databricks_sql_dashboard" "test_9cb0c8f5_6262_4a1f_a741_2181de76028f" {`)) + assert.True(t, strings.Contains(contentStr, `dashboard_id = databricks_sql_dashboard.test_9cb0c8f5_6262_4a1f_a741_2181de76028f.id`)) + assert.True(t, strings.Contains(contentStr, `resource "databricks_sql_widget" "rd4dd2082685" {`)) + assert.True(t, strings.Contains(contentStr, `resource "databricks_sql_visualization" "chart_16c4f969_eea0_4aad_8f82_03d79b078dcc_1a062d3a_eefe_11eb_9559_dc7cd9c86087"`)) + // check the generated HCL for Qieries + content, err = os.ReadFile(tmpDir + "/queries.tf") + assert.NoError(t, err) + contentStr = string(content) + assert.True(t, strings.Contains(contentStr, `resource "databricks_query" "jobs_per_day_per_status_last_30_days_16c4f969_eea0_4aad_8f82_03d79b078dcc"`)) + assert.True(t, strings.Contains(contentStr, `warehouse_id = databricks_sql_endpoint.test.id`)) + assert.True(t, strings.Contains(contentStr, `owner_user_name = "user@domain.com"`)) + assert.True(t, strings.Contains(contentStr, `display_name = "Jobs per day per status last 30 days"`)) + // check the generated HCL for Alerts + content, err = os.ReadFile(tmpDir + "/alerts.tf") + assert.NoError(t, err) + contentStr = string(content) + assert.True(t, strings.Contains(contentStr, `resource "databricks_alert" "test_alert_3cf91a42_6217_4f3c_a6f0_345d489051b9"`)) + assert.True(t, strings.Contains(contentStr, `query_id = databricks_query.jobs_per_day_per_status_last_30_days_16c4f969_eea0_4aad_8f82_03d79b078dcc.id`)) + assert.True(t, strings.Contains(contentStr, `display_name = "Test Alert"`)) + assert.True(t, strings.Contains(contentStr, `op = "GREATER_THAN"`)) + assert.True(t, strings.Contains(contentStr, `owner_user_name = "test@domain.com"`)) }) } @@ -2795,7 +2822,7 @@ func TestImportingLakeviewDashboards(t *testing.T) { noCurrentMetastoreAttached, { Method: "GET", - Resource: "/api/2.0/lakeview/dashboards?page_size=100", + Resource: "/api/2.0/lakeview/dashboards?page_size=1000", Response: sdk_dashboards.ListDashboardsResponse{ Dashboards: []sdk_dashboards.Dashboard{ { diff --git a/exporter/importables.go b/exporter/importables.go index 54f0fb6da3..04833df814 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -437,9 +437,9 @@ var resourcesMap map[string]importable = map[string]importable{ {Path: "task.spark_submit_task.parameters", Resource: "databricks_workspace_file", Match: "workspace_path"}, {Path: "task.sql_task.file.path", Resource: "databricks_workspace_file", Match: "path"}, {Path: "task.dbt_task.project_directory", Resource: "databricks_directory", Match: "path"}, - {Path: "task.sql_task.alert.alert_id", Resource: "databricks_sql_alert"}, + {Path: "task.sql_task.alert.alert_id", Resource: "databricks_alert"}, {Path: "task.sql_task.dashboard.dashboard_id", Resource: "databricks_sql_dashboard"}, - {Path: "task.sql_task.query.query_id", Resource: "databricks_sql_query"}, + {Path: "task.sql_task.query.query_id", Resource: "databricks_query"}, {Path: "task.sql_task.warehouse_id", Resource: "databricks_sql_endpoint"}, {Path: "task.webhook_notifications.on_duration_warning_threshold_exceeded.id", Resource: "databricks_notification_destination"}, {Path: "task.webhook_notifications.on_failure.id", Resource: "databricks_notification_destination"}, @@ -544,7 +544,7 @@ var resourcesMap map[string]importable = map[string]importable{ if task.SqlTask != nil { if task.SqlTask.Query != nil { ic.Emit(&resource{ - Resource: "databricks_sql_query", + Resource: "databricks_query", ID: task.SqlTask.Query.QueryId, }) } @@ -556,7 +556,7 @@ var resourcesMap map[string]importable = map[string]importable{ } if task.SqlTask.Alert != nil { ic.Emit(&resource{ - Resource: "databricks_sql_alert", + Resource: "databricks_alert", ID: task.SqlTask.Alert.AlertId, }) } @@ -1169,8 +1169,8 @@ var resourcesMap map[string]importable = map[string]importable{ {Path: "cluster_id", Resource: "databricks_cluster"}, {Path: "instance_pool_id", Resource: "databricks_instance_pool"}, {Path: "cluster_policy_id", Resource: "databricks_cluster_policy"}, - {Path: "sql_query_id", Resource: "databricks_sql_query"}, - {Path: "sql_alert_id", Resource: "databricks_sql_alert"}, + {Path: "sql_query_id", Resource: "databricks_query"}, + {Path: "sql_alert_id", Resource: "databricks_alert"}, {Path: "sql_dashboard_id", Resource: "databricks_sql_dashboard"}, {Path: "sql_endpoint_id", Resource: "databricks_sql_endpoint"}, {Path: "dashboard_id", Resource: "databricks_dashboard"}, @@ -1674,65 +1674,84 @@ var resourcesMap map[string]importable = map[string]importable{ MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, }, }, - "databricks_sql_query": { + "databricks_query": { WorkspaceLevel: true, - Service: "sql-queries", + Service: "queries", Name: func(ic *importContext, d *schema.ResourceData) string { - return d.Get("name").(string) + "_" + d.Id() + return d.Get("display_name").(string) + "_" + d.Id() }, List: func(ic *importContext) error { - qs, err := dbsqlListObjects(ic, "/preview/sql/queries") - if err != nil { - return nil - } - for i, q := range qs { - name := q["name"].(string) - if !ic.MatchesName(name) { + it := ic.workspaceClient.Queries.List(ic.Context, sql.ListQueriesRequest{PageSize: 100}) + i := 0 + for it.HasNext(ic.Context) { + q, err := it.Next(ic.Context) + if err != nil { + return err + } + i++ + if !ic.MatchesName(q.DisplayName) { continue } + // TODO: look if we can create data based on the response, without calling Get ic.EmitIfUpdatedAfterIsoString(&resource{ - Resource: "databricks_sql_query", - ID: q["id"].(string), + Resource: "databricks_query", + ID: q.Id, Incremental: ic.incremental, - }, q["updated_at"].(string), fmt.Sprintf("query '%s'", name)) - log.Printf("[INFO] Imported %d of %d SQL queries", i+1, len(qs)) + }, q.UpdateTime, fmt.Sprintf("query '%s'", q.DisplayName)) + if i%50 == 0 { + log.Printf("[INFO] Imported %d Queries", i) + } } - + log.Printf("[INFO] Listed %d Queries", i) return nil }, Import: func(ic *importContext, r *resource) error { - var query tfsql.QueryEntity - s := ic.Resources["databricks_sql_query"].Schema + var query tfsql.QueryStruct + s := ic.Resources["databricks_query"].Schema common.DataToStructPointer(r.Data, s, &query) - sqlEndpointID, err := ic.getSqlEndpoint(query.DataSourceID) - if err == nil { + if query.WarehouseId != "" { ic.Emit(&resource{ Resource: "databricks_sql_endpoint", - ID: sqlEndpointID, + ID: query.WarehouseId, }) - } else { - log.Printf("[WARN] Can't find SQL endpoint for data source '%s'", query.DataSourceID) } // emit queries specified as parameters - for _, p := range query.Parameter { - if p.Query != nil { + for _, p := range query.Parameters { + if p.QueryBackedValue != nil { ic.Emit(&resource{ - Resource: "databricks_sql_query", - ID: p.Query.QueryID, + Resource: "databricks_query", + ID: p.QueryBackedValue.QueryId, }) } } - ic.emitSqlParentDirectory(query.Parent) + ic.emitUserOrServicePrincipal(query.OwnerUserName) + ic.emitDirectoryOrRepo(query.ParentPath) + // TODO: r.AddExtraData(ParentDirectoryExtraKey, directoryPath) ? ic.emitPermissionsIfNotIgnored(r, fmt.Sprintf("/sql/queries/%s", r.ID), - "sql_query_"+ic.Importables["databricks_sql_query"].Name(ic, r.Data)) + "query_"+ic.Importables["databricks_query"].Name(ic, r.Data)) + if query.Catalog != "" && query.Schema != "" { + ic.Emit(&resource{ + Resource: "databricks_schema", + ID: fmt.Sprintf("%s.%s", query.Catalog, query.Schema), + }) + } return nil }, - Ignore: generateIgnoreObjectWithEmptyAttributeValue("databricks_sql_query", "name"), + // TODO: exclude owner if it's the current user? + Ignore: generateIgnoreObjectWithEmptyAttributeValue("databricks_query", "display_name"), Depends: []reference{ - {Path: "data_source_id", Resource: "databricks_sql_endpoint", Match: "data_source_id"}, - {Path: "parameter.query.query_id", Resource: "databricks_sql_query", Match: "id"}, - {Path: "parent", Resource: "databricks_directory", Match: "object_id", MatchType: MatchRegexp, - Regexp: sqlParentRegexp}, + {Path: "warehouse_id", Resource: "databricks_sql_endpoint"}, + {Path: "parameter.query_backed_value.query_id", Resource: "databricks_query", Match: "id"}, + {Path: "owner_user_name", Resource: "databricks_user", Match: "user_name", MatchType: MatchCaseInsensitive}, + {Path: "owner_user_name", Resource: "databricks_service_principal", Match: "application_id"}, + {Path: "catalog", Resource: "databricks_catalog"}, + {Path: "schema", Resource: "databricks_schema", Match: "name", + IsValidApproximation: createIsMatchingCatalogAndSchema("catalog", "schema"), + SkipDirectLookup: true}, + // TODO: add match like for workspace files? + {Path: "parent_path", Resource: "databricks_directory"}, + {Path: "parent_path", Resource: "databricks_directory", Match: "workspace_path"}, + // TODO: add support for Repos? }, }, "databricks_sql_endpoint": { @@ -1901,7 +1920,7 @@ var resourcesMap map[string]importable = map[string]importable{ ID: visualizationID, }) ic.Emit(&resource{ - Resource: "databricks_sql_query", + Resource: "databricks_query", ID: query.ID, }) sqlEndpointID, err := ic.getSqlEndpoint(query.DataSourceID) @@ -1933,7 +1952,7 @@ var resourcesMap map[string]importable = map[string]importable{ }, Depends: []reference{ {Path: "visualization_id", Resource: "databricks_sql_visualization", Match: "visualization_id"}, - {Path: "dashboard_id", Resource: "databricks_sql_dashboard", Match: "id"}, + {Path: "dashboard_id", Resource: "databricks_sql_dashboard"}, }, }, "databricks_sql_visualization": { @@ -1944,51 +1963,63 @@ var resourcesMap map[string]importable = map[string]importable{ return name }, Depends: []reference{ - {Path: "query_id", Resource: "databricks_sql_query", Match: "id"}, + {Path: "query_id", Resource: "databricks_query"}, }, }, - "databricks_sql_alert": { + "databricks_alert": { WorkspaceLevel: true, - Service: "sql-alerts", + Service: "alerts", Name: func(ic *importContext, d *schema.ResourceData) string { - return d.Get("name").(string) + "_" + d.Id() + return d.Get("display_name").(string) + "_" + d.Id() }, List: func(ic *importContext) error { - alerts, err := ic.workspaceClient.AlertsLegacy.List(ic.Context) - if err != nil { - return err - } - for i, alert := range alerts { - name := alert.Name - if !ic.MatchesName(name) { + it := ic.workspaceClient.Alerts.List(ic.Context, sql.ListAlertsRequest{PageSize: 100}) + i := 0 + for it.HasNext(ic.Context) { + a, err := it.Next(ic.Context) + if err != nil { + return err + } + i++ + if !ic.MatchesName(a.DisplayName) { continue } + // TODO: look if we can create data based on the response, without calling Get ic.EmitIfUpdatedAfterIsoString(&resource{ - Resource: "databricks_sql_alert", - ID: alert.Id, + Resource: "databricks_alert", + ID: a.Id, Incremental: ic.incremental, - }, alert.UpdatedAt, fmt.Sprintf("alert '%s'", name)) - log.Printf("[INFO] Imported %d of %d SQL alerts", i+1, len(alerts)) + }, a.UpdateTime, fmt.Sprintf("alert '%s'", a.DisplayName)) + if i%50 == 0 { + log.Printf("[INFO] Imported %d Alerts", i) + } } + log.Printf("[INFO] Listed %d Alerts", i) return nil }, Import: func(ic *importContext, r *resource) error { - var alert tfsql.AlertEntity - s := ic.Resources["databricks_sql_alert"].Schema + var alert sql.Alert + s := ic.Resources["databricks_alert"].Schema common.DataToStructPointer(r.Data, s, &alert) if alert.QueryId != "" { - ic.Emit(&resource{Resource: "databricks_sql_query", ID: alert.QueryId}) + ic.Emit(&resource{Resource: "databricks_query", ID: alert.QueryId}) } - ic.emitSqlParentDirectory(alert.Parent) + ic.emitDirectoryOrRepo(alert.ParentPath) + ic.emitUserOrServicePrincipal(alert.OwnerUserName) + // TODO: r.AddExtraData(ParentDirectoryExtraKey, directoryPath) ? ic.emitPermissionsIfNotIgnored(r, fmt.Sprintf("/sql/alerts/%s", r.ID), - "sql_alert_"+ic.Importables["databricks_sql_alert"].Name(ic, r.Data)) + "alert_"+ic.Importables["databricks_alert"].Name(ic, r.Data)) return nil }, - Ignore: generateIgnoreObjectWithEmptyAttributeValue("databricks_sql_alert", "name"), + // TODO: exclude owner if it's the current user? + Ignore: generateIgnoreObjectWithEmptyAttributeValue("databricks_alert", "display_name"), Depends: []reference{ - {Path: "query_id", Resource: "databricks_sql_query", Match: "id"}, - {Path: "parent", Resource: "databricks_directory", Match: "object_id", - MatchType: MatchRegexp, Regexp: sqlParentRegexp}, + {Path: "query_id", Resource: "databricks_query"}, + {Path: "owner_user_name", Resource: "databricks_user", Match: "user_name", MatchType: MatchCaseInsensitive}, + {Path: "owner_user_name", Resource: "databricks_service_principal", Match: "application_id"}, + // TODO: add match like for workspace files? + {Path: "parent_path", Resource: "databricks_directory"}, + {Path: "parent_path", Resource: "databricks_directory", Match: "workspace_path"}, }, }, "databricks_pipeline": { @@ -2145,7 +2176,11 @@ var resourcesMap map[string]importable = map[string]importable{ Depends: []reference{ {Path: "catalog", Resource: "databricks_catalog"}, {Path: "target", Resource: "databricks_schema", Match: "name", - IsValidApproximation: dltIsMatchingCatalogAndSchema, SkipDirectLookup: true}, + IsValidApproximation: createIsMatchingCatalogAndSchema("catalog", "target"), + SkipDirectLookup: true}, + {Path: "schema", Resource: "databricks_schema", Match: "name", + IsValidApproximation: createIsMatchingCatalogAndSchema("catalog", "schema"), + SkipDirectLookup: true}, {Path: "cluster.aws_attributes.instance_profile_arn", Resource: "databricks_instance_profile"}, {Path: "cluster.init_scripts.dbfs.destination", Resource: "databricks_dbfs_file", Match: "dbfs_path"}, {Path: "cluster.init_scripts.volumes.destination", Resource: "databricks_file"}, @@ -2352,7 +2387,8 @@ var resourcesMap map[string]importable = map[string]importable{ {Path: "config.served_entities.entity_name", Resource: "databricks_registered_model"}, {Path: "config.auto_capture_config.catalog_name", Resource: "databricks_catalog"}, {Path: "config.auto_capture_config.schema_name", Resource: "databricks_schema", Match: "name", - IsValidApproximation: isMatchingCatalogAndSchemaInModelServing, SkipDirectLookup: true}, + IsValidApproximation: createIsMatchingCatalogAndSchema("config.0.auto_capture_config.0.catalog_name", "config.0.auto_capture_config.0.schema_name"), + SkipDirectLookup: true}, }, }, "databricks_mlflow_webhook": { @@ -2760,7 +2796,8 @@ var resourcesMap map[string]importable = map[string]importable{ Depends: []reference{ {Path: "catalog_name", Resource: "databricks_catalog"}, {Path: "schema_name", Resource: "databricks_schema", Match: "name", - IsValidApproximation: isMatchingCatalogAndSchema, SkipDirectLookup: true}, + IsValidApproximation: createIsMatchingCatalogAndSchema("catalog_name", "schema_name"), + SkipDirectLookup: true}, {Path: "storage_location", Resource: "databricks_external_location", Match: "url", MatchType: MatchLongestPrefix}, }, @@ -2794,7 +2831,8 @@ var resourcesMap map[string]importable = map[string]importable{ Depends: []reference{ {Path: "catalog_name", Resource: "databricks_catalog"}, {Path: "schema_name", Resource: "databricks_schema", Match: "name", - IsValidApproximation: isMatchingCatalogAndSchema, SkipDirectLookup: true}, + IsValidApproximation: createIsMatchingCatalogAndSchema("catalog_name", "schema_name"), + SkipDirectLookup: true}, {Path: "storage_location", Resource: "databricks_external_location", Match: "url", MatchType: MatchLongestPrefix}, }, @@ -3106,7 +3144,8 @@ var resourcesMap map[string]importable = map[string]importable{ Depends: []reference{ {Path: "catalog_name", Resource: "databricks_catalog"}, {Path: "schema_name", Resource: "databricks_schema", Match: "name", - IsValidApproximation: isMatchingCatalogAndSchema, SkipDirectLookup: true}, + IsValidApproximation: createIsMatchingCatalogAndSchema("catalog_name", "schema_name"), + SkipDirectLookup: true}, {Path: "storage_root", Resource: "databricks_external_location", Match: "url", MatchType: MatchLongestPrefix}, }, }, @@ -3321,7 +3360,7 @@ var resourcesMap map[string]importable = map[string]importable{ WorkspaceLevel: true, Service: "dashboards", List: func(ic *importContext) error { - it := ic.workspaceClient.Lakeview.List(ic.Context, dashboards.ListDashboardsRequest{PageSize: 100}) + it := ic.workspaceClient.Lakeview.List(ic.Context, dashboards.ListDashboardsRequest{PageSize: 1000}) i := 0 for it.HasNext(ic.Context) { d, err := it.Next(ic.Context) @@ -3516,7 +3555,8 @@ var resourcesMap map[string]importable = map[string]importable{ Depends: []reference{ {Path: "catalog_name", Resource: "databricks_catalog"}, {Path: "schema_name", Resource: "databricks_schema", Match: "name", - IsValidApproximation: isMatchingCatalogAndSchema, SkipDirectLookup: true}, + IsValidApproximation: createIsMatchingCatalogAndSchema("catalog_name", "schema_name"), + SkipDirectLookup: true}, {Path: "spec.source_table_full_name", Resource: "databricks_sql_table"}, }, }, diff --git a/exporter/importables_test.go b/exporter/importables_test.go index 645fba93e4..34a25b88ce 100644 --- a/exporter/importables_test.go +++ b/exporter/importables_test.go @@ -1343,6 +1343,7 @@ func TestDbfsFileGeneration(t *testing.T) { }) } +// TODO: remove it completely after we remove support for legacy dashboards func TestSqlListObjects(t *testing.T) { qa.HTTPFixturesApply(t, []qa.HTTPFixture{ { diff --git a/exporter/test-data/get-alert.json b/exporter/test-data/get-alert.json new file mode 100644 index 0000000000..2d367e2332 --- /dev/null +++ b/exporter/test-data/get-alert.json @@ -0,0 +1,25 @@ +{ + "condition": { + "op":"GREATER_THAN", + "operand": { + "column": { + "name":"threshold" + } + }, + "threshold": { + "value": { + "string_value":"50" + } + } + }, + "create_time":"2023-04-10T08:14:47Z", + "display_name":"Test Alert", + "id":"3cf91a42-6217-4f3c-a6f0-345d489051b9", + "lifecycle_state":"ACTIVE", + "notify_on_ok":true, + "owner_user_name":"test@domain.com", + "query_id":"16c4f969-eea0-4aad-8f82-03d79b078dcc", + "state":"OK", + "trigger_time":"2023-04-10T08:15:56Z", + "update_time":"2023-04-10T08:14:47Z" +} diff --git a/exporter/test-data/get-alerts.json b/exporter/test-data/get-alerts.json new file mode 100644 index 0000000000..cc52cd4834 --- /dev/null +++ b/exporter/test-data/get-alerts.json @@ -0,0 +1,29 @@ +{ + "results": [ + { + "condition": { + "op":"GREATER_THAN", + "operand": { + "column": { + "name":"threshold" + } + }, + "threshold": { + "value": { + "string_value":"50" + } + } + }, + "create_time":"2023-04-10T08:14:47Z", + "display_name":"Test Alert", + "id":"3cf91a42-6217-4f3c-a6f0-345d489051b9", + "lifecycle_state":"ACTIVE", + "notify_on_ok":true, + "owner_user_name":"test@domain.com", + "query_id":"16c4f969-eea0-4aad-8f82-03d79b078dcc", + "state":"OK", + "trigger_time":"2023-04-10T08:15:56Z", + "update_time":"2023-04-10T08:14:47Z" + } + ] +} diff --git a/exporter/test-data/get-queries.json b/exporter/test-data/get-queries.json new file mode 100644 index 0000000000..951a154266 --- /dev/null +++ b/exporter/test-data/get-queries.json @@ -0,0 +1,17 @@ +{ + "results": [ + { + "create_time":"2021-04-03T13:03:51Z", + "description":"", + "display_name":"Jobs per day per status last 30 days", + "id":"16c4f969-eea0-4aad-8f82-03d79b078dcc", + "last_modifier_user_name":"user@domain.com", + "lifecycle_state":"ACTIVE", + "owner_user_name":"user@domain.com", + "query_text":"select\n to_date(job_runtime.startTS) as day,\n job_terminal_state,\n count(1) as cnt\nfrom\n overwatch.jobrun\ngroup by\n to_date(job_runtime.startTS),\n job_terminal_state\nhaving day \u003e date_sub(current_date(), 30)\norder by\n day desc", + "run_as_mode":"OWNER", + "update_time":"2021-04-03T13:03:51Z", + "warehouse_id":"f562046bc1272886" + } + ] +} diff --git a/exporter/test-data/get-query.json b/exporter/test-data/get-query.json new file mode 100644 index 0000000000..d172a1074e --- /dev/null +++ b/exporter/test-data/get-query.json @@ -0,0 +1,13 @@ +{ + "create_time":"2021-04-03T13:03:51Z", + "description":"", + "display_name":"Jobs per day per status last 30 days", + "id":"16c4f969-eea0-4aad-8f82-03d79b078dcc", + "last_modifier_user_name":"user@domain.com", + "lifecycle_state":"ACTIVE", + "owner_user_name":"user@domain.com", + "query_text":"select\n to_date(job_runtime.startTS) as day,\n job_terminal_state,\n count(1) as cnt\nfrom\n overwatch.jobrun\ngroup by\n to_date(job_runtime.startTS),\n job_terminal_state\nhaving day \u003e date_sub(current_date(), 30)\norder by\n day desc", + "run_as_mode":"OWNER", + "update_time":"2021-04-03T13:03:51Z", + "warehouse_id":"f562046bc1272886" +} diff --git a/exporter/test-data/get-sql-alert.json b/exporter/test-data/get-sql-alert.json deleted file mode 100644 index 8723224c2c..0000000000 --- a/exporter/test-data/get-sql-alert.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "conditions": { - "alert": { - "column": { - "aggregation": null, - "display": "threshold", - "name": "threshold" - } - }, - "op": "\u003e", - "query_plan": null, - "threshold": { - "value": "50" - } - }, - "created_at": "2023-04-10T08:14:47Z", - "id": "3cf91a42-6217-4f3c-a6f0-345d489051b9", - "last_triggered_at": "2023-04-10T08:15:56Z", - "name": "Test Alert", - "parent": "folders/4451965692354143", - "options": { - "aggregation": null, - "column": "threshold", - "display_column": "threshold", - "folder_node_internal_name": "tree/3467386930489745", - "folder_node_status": "ACTIVE", - "muted": false, - "op": "\u003e", - "parent": "folders/4451965692354143", - "query_plan": null, - "value": "50" - }, - "query": { - "created_at": "2023-04-10T08:13:33Z", - "data_source_id": "78520023-ab69-44a4-84d0-4fda0c69ea91", - "description": null, - "id": "16c4f969-eea0-4aad-8f82-03d79b078dcc", - "is_archived": false, - "is_draft": false, - "is_safe": true, - "name": "Alert Query", - "options": { - "apply_auto_limit": true, - "folder_node_internal_name": "tree/3467386930489744", - "folder_node_status": "ACTIVE", - "parameters": null, - "parent": "folders/4451965692354143", - "visualization_control_order": null - }, - "query": "select 42 as threshold", - "run_as_role": null, - "run_as_service_principal_id": null, - "schedule": null, - "tags": null, - "updated_at": "2023-04-10T08:14:13Z", - "user_id": 661448457191611, - "version": 1 - }, - "rearm": null, - "refresh_schedules": [ - { - "cron": "1 15 8 * * ?", - "data_source_id": "78520023-ab69-44a4-84d0-4fda0c69ea91", - "id": "71cebca8-3684-4b60-95f1-5d9b3786b9f8", - "job_id": "91aeb0a4644e0d357a36f61824a8c71436b61506" - } - ], - "state": "ok", - "subscriptions": [ - { - "user_id": 661448457191611 - } - ], - "updated_at": "2023-04-10T08:17:21Z", - "user": { - "email": "user@domain.com", - "id": 661448457191611, - "is_db_admin": false, - "name": "user@domain.com", - "profile_image_url": "https://www.gravatar.com/avatar/1111?s=40\u0026d=identicon" - }, - "user_id": 661448457191611 -} diff --git a/exporter/test-data/get-sql-alerts.json b/exporter/test-data/get-sql-alerts.json deleted file mode 100644 index 088b20bbdf..0000000000 --- a/exporter/test-data/get-sql-alerts.json +++ /dev/null @@ -1,84 +0,0 @@ -[ - { - "conditions": { - "alert": { - "column": { - "aggregation": null, - "display": "threshold", - "name": "threshold" - } - }, - "op": "\u003e", - "query_plan": null, - "threshold": { - "value": "50" - } - }, - "created_at": "2023-04-10T08:14:47Z", - "id": "3cf91a42-6217-4f3c-a6f0-345d489051b9", - "last_triggered_at": "2023-04-10T08:15:56Z", - "name": "Test Alert", - "options": { - "aggregation": null, - "column": "threshold", - "display_column": "threshold", - "folder_node_internal_name": "tree/3467386930489745", - "folder_node_status": "ACTIVE", - "muted": false, - "op": "\u003e", - "parent": "folders/4451965692354143", - "query_plan": null, - "value": "50" - }, - "query": { - "created_at": "2023-04-10T08:13:33Z", - "data_source_id": "78520023-ab69-44a4-84d0-4fda0c69ea91", - "description": null, - "id": "16c4f969-eea0-4aad-8f82-03d79b078dcc", - "is_archived": false, - "is_draft": false, - "is_safe": true, - "name": "Alert Query", - "options": { - "apply_auto_limit": true, - "folder_node_internal_name": "tree/3467386930489744", - "folder_node_status": "ACTIVE", - "parameters": null, - "parent": "folders/4451965692354143", - "visualization_control_order": null - }, - "query": "select 42 as threshold", - "run_as_role": null, - "run_as_service_principal_id": null, - "schedule": null, - "tags": null, - "updated_at": "2023-04-10T08:14:13Z", - "user_id": 661448457191611, - "version": 1 - }, - "rearm": null, - "refresh_schedules": [ - { - "cron": "1 15 8 * * ?", - "data_source_id": "78520023-ab69-44a4-84d0-4fda0c69ea91", - "id": "71cebca8-3684-4b60-95f1-5d9b3786b9f8", - "job_id": "91aeb0a4644e0d357a36f61824a8c71436b61506" - } - ], - "state": "ok", - "subscriptions": [ - { - "user_id": 661448457191611 - } - ], - "updated_at": "2023-04-10T08:17:21Z", - "user": { - "email": "user@domain.com", - "id": 661448457191611, - "is_db_admin": false, - "name": "user@domain.com", - "profile_image_url": "https://www.gravatar.com/avatar/1111?s=40\u0026d=identicon" - }, - "user_id": 661448457191611 - } -] diff --git a/exporter/test-data/get-sql-queries.json b/exporter/test-data/get-sql-queries.json deleted file mode 100644 index 3ebb168036..0000000000 --- a/exporter/test-data/get-sql-queries.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "count": 1, - "page": 1, - "page_size": 25, - "results": [ - { - "created_at": "2021-04-03T13:03:51Z", - "data_source_id": "147164a6-8316-4a9d-beff-f57261801374", - "description": null, - "id": "16c4f969-eea0-4aad-8f82-03d79b078dcc", - "is_archived": false, - "is_draft": false, - "is_favorite": false, - "is_safe": true, - "name": "Jobs per day per status last 30 days", - "options": { - "apply_auto_limit": true, - "parameters": [] - }, - "query": "select\n to_date(job_runtime.startTS) as day,\n job_terminal_state,\n count(1) as cnt\nfrom\n overwatch.jobrun\ngroup by\n to_date(job_runtime.startTS),\n job_terminal_state\nhaving day \u003e date_sub(current_date(), 30)\norder by\n day desc", - "retrieved_at": "2022-01-07T13:53:08Z", - "runtime": 32.9793, - "schedule": null, - "tags": [ - "overwatch" - ], - "updated_at": "2021-09-21T16:04:23Z", - "user": { - "email": "user@domain.com", - "id": 661448457191611, - "is_db_admin": false, - "name": "Test", - "profile_image_url": "https://www.gravatar.com/avatar/12242?s=40\u0026d=identicon" - }, - "user_id": 661448457191611, - "version": 1 - } - ] -} diff --git a/exporter/util.go b/exporter/util.go index 6687f4dc8f..e9380a9b56 100644 --- a/exporter/util.go +++ b/exporter/util.go @@ -438,33 +438,27 @@ func appendEndingSlashToDirName(dir string) string { return dir + "/" } -func isMatchingCatalogAndSchema(ic *importContext, res *resource, ra *resourceApproximation, origPath string) bool { - res_catalog_name := res.Data.Get("catalog_name").(string) - res_schema_name := res.Data.Get("schema_name").(string) - ra_catalog_name, cat_found := ra.Get("catalog_name") - ra_schema_name, schema_found := ra.Get("name") - if !cat_found || !schema_found { - log.Printf("[WARN] Can't find attributes in approximation: %s %s, catalog='%v' (found? %v) schema='%v' (found? %v). Resource: %s, catalog='%s', schema='%s'", - ra.Type, ra.Name, ra_catalog_name, cat_found, ra_schema_name, schema_found, res.Resource, res_catalog_name, res_schema_name) - return true - } - result := ra_catalog_name.(string) == res_catalog_name && ra_schema_name.(string) == res_schema_name - return result -} +func createIsMatchingCatalogAndSchema(catalog_name_attr, schema_name_attr string) func(ic *importContext, res *resource, ra *resourceApproximation, origPath string) bool { + return func(ic *importContext, res *resource, ra *resourceApproximation, origPath string) bool { + // catalog and schema names for the source resource + res_catalog_name := res.Data.Get(catalog_name_attr).(string) + res_schema_name := res.Data.Get(schema_name_attr).(string) + // In some cases catalog or schema name could be empty, like, in non-UC DLT pipelines, so we need to skip it + if res_catalog_name == "" || res_schema_name == "" { + return false + } + // catalog and schema names for target resource approximation + ra_catalog_name, cat_found := ra.Get("catalog_name") + ra_schema_name, schema_found := ra.Get("name") + if !cat_found || !schema_found { + log.Printf("[WARN] Can't find attributes in approximation: %s %s, catalog='%v' (found? %v) schema='%v' (found? %v). Resource: %s, catalog='%s', schema='%s'", + ra.Type, ra.Name, ra_catalog_name, cat_found, ra_schema_name, schema_found, res.Resource, res_catalog_name, res_schema_name) + return false + } + result := ra_catalog_name.(string) == res_catalog_name && ra_schema_name.(string) == res_schema_name + return result -func isMatchingCatalogAndSchemaInModelServing(ic *importContext, res *resource, ra *resourceApproximation, origPath string) bool { - res_catalog_name := res.Data.Get("config.0.auto_capture_config.0.catalog_name").(string) - res_schema_name := res.Data.Get("config.0.auto_capture_config.0.schema_name").(string) - ra_catalog_name, cat_found := ra.Get("catalog_name") - ra_schema_name, schema_found := ra.Get("name") - if !cat_found || !schema_found { - log.Printf("[WARN] Can't find attributes in approximation: %s %s, catalog='%v' (found? %v) schema='%v' (found? %v). Resource: %s, catalog='%s', schema='%s'", - ra.Type, ra.Name, ra_catalog_name, cat_found, ra_schema_name, schema_found, res.Resource, res_catalog_name, res_schema_name) - return true } - - result := ra_catalog_name.(string) == res_catalog_name && ra_schema_name.(string) == res_schema_name - return result } func isMatchingShareRecipient(ic *importContext, res *resource, ra *resourceApproximation, origPath string) bool { @@ -537,24 +531,6 @@ func (ic *importContext) emitPermissionsIfNotIgnored(r *resource, id, name strin } } -func dltIsMatchingCatalogAndSchema(ic *importContext, res *resource, ra *resourceApproximation, origPath string) bool { - res_catalog_name := res.Data.Get("catalog").(string) - if res_catalog_name == "" { - return false - } - res_schema_name := res.Data.Get("target").(string) - ra_catalog_name, cat_found := ra.Get("catalog_name") - ra_schema_name, schema_found := ra.Get("name") - if !cat_found || !schema_found { - log.Printf("[WARN] Can't find attributes in approximation: %s %s, catalog='%v' (found? %v) schema='%v' (found? %v). Resource: %s, catalog='%s', schema='%s'", - ra.Type, ra.Name, ra_catalog_name, cat_found, ra_schema_name, schema_found, res.Resource, res_catalog_name, res_schema_name) - return true - } - - result := ra_catalog_name.(string) == res_catalog_name && ra_schema_name.(string) == res_schema_name - return result -} - func (ic *importContext) emitWorkspaceBindings(securableType, securableName string) { bindings, err := ic.workspaceClient.WorkspaceBindings.GetBindingsAll(ic.Context, catalog.GetBindingsRequest{ SecurableName: securableName, diff --git a/exporter/util_workspace.go b/exporter/util_workspace.go index 470e590ef0..5a5621f806 100644 --- a/exporter/util_workspace.go +++ b/exporter/util_workspace.go @@ -52,25 +52,25 @@ func maybeStringWorkspacePrefix(path string) string { return path } -func (ic *importContext) emitWorkspaceFileOrRepo(path string) { +func (ic *importContext) emitWorkspaceObject(objType, path string) { + path = maybeStringWorkspacePrefix(path) if isRepoPath(path) { - ic.emitRepoByPath(maybeStringWorkspacePrefix(path)) + ic.emitRepoByPath(path) } else { - // TODO: wrap this into ic.shouldEmit... - // TODO: strip /Workspace prefix if it's provided - ic.Emit(&resource{ - Resource: "databricks_workspace_file", - ID: maybeStringWorkspacePrefix(path), - }) + ic.maybeEmitWorkspaceObject(objType, path, nil) } } +func (ic *importContext) emitDirectoryOrRepo(path string) { + ic.emitWorkspaceObject("databricks_directory", path) +} + +func (ic *importContext) emitWorkspaceFileOrRepo(path string) { + ic.emitWorkspaceObject("databricks_workspace_file", path) +} + func (ic *importContext) emitNotebookOrRepo(path string) { - if isRepoPath(path) { - ic.emitRepoByPath(maybeStringWorkspacePrefix(path)) - } else { - ic.maybeEmitWorkspaceObject("databricks_notebook", maybeStringWorkspacePrefix(path), nil) - } + ic.emitWorkspaceObject("databricks_notebook", path) } func (ic *importContext) getAllDirectories() []workspace.ObjectStatus { diff --git a/sql/resource_query.go b/sql/resource_query.go index 80a69a385c..120353c171 100644 --- a/sql/resource_query.go +++ b/sql/resource_query.go @@ -12,7 +12,7 @@ import ( ) // Need a struct for Query because there are aliases we need and it'll be needed in the create method. -type queryStruct struct { +type QueryStruct struct { sql.Query } @@ -20,13 +20,13 @@ var queryAliasMap = map[string]string{ "parameters": "parameter", } -func (queryStruct) Aliases() map[string]map[string]string { +func (QueryStruct) Aliases() map[string]map[string]string { return map[string]map[string]string{ - "sql.queryStruct": queryAliasMap, + "sql.QueryStruct": queryAliasMap, } } -func (queryStruct) CustomizeSchema(m *common.CustomizableSchema) *common.CustomizableSchema { +func (QueryStruct) CustomizeSchema(m *common.CustomizableSchema) *common.CustomizableSchema { m.SchemaPath("display_name").SetRequired().SetValidateFunc(validation.StringIsNotWhiteSpace) m.SchemaPath("query_text").SetRequired() m.SchemaPath("warehouse_id").SetRequired().SetValidateFunc(validation.StringIsNotWhiteSpace) @@ -92,7 +92,7 @@ func (queryUpdateStruct) CustomizeSchema(s *common.CustomizableSchema) *common.C } func ResourceQuery() common.Resource { - s := common.StructToSchema(queryStruct{}, nil) + s := common.StructToSchema(QueryStruct{}, nil) return common.Resource{ Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { w, err := c.WorkspaceClient() @@ -134,7 +134,7 @@ func ResourceQuery() common.Resource { if parentPath != "" && strings.HasPrefix(apiQuery.ParentPath, "/Workspace") && !strings.HasPrefix(parentPath, "/Workspace") { apiQuery.ParentPath = strings.TrimPrefix(parentPath, "/Workspace") } - return common.StructToData(queryStruct{Query: *apiQuery}, s, d) + return common.StructToData(QueryStruct{Query: *apiQuery}, s, d) }, Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { w, err := c.WorkspaceClient() From 8b2a735576d8c3fba1f3ca242622f512a703c8cf Mon Sep 17 00:00:00 2001 From: Wai Lau <59835087+Waiwait@users.noreply.github.com> Date: Thu, 24 Oct 2024 14:19:21 +0100 Subject: [PATCH 70/99] [Fix] Recreate missing system schema (#4068) ## Changes Addresses https://github.com/databricks/terraform-provider-databricks/issues/4066. Makes read function more in line with rest of module to ignore non `enabled` schemas. ## Tests - [x] `make test` run locally - [ ] ~relevant change in `docs/` folder~ - [ ] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [ ] ~using Go SDK~ --------- Co-authored-by: wai Lau Co-authored-by: Alex Ott --- catalog/resource_system_schema.go | 11 +++++ catalog/resource_system_schema_test.go | 68 ++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) diff --git a/catalog/resource_system_schema.go b/catalog/resource_system_schema.go index 06b1ff24d3..6c033fab89 100644 --- a/catalog/resource_system_schema.go +++ b/catalog/resource_system_schema.go @@ -93,10 +93,21 @@ func ResourceSystemSchema() common.Resource { if err != nil { return err } + // only track enabled/legacy schemas + if schema.State != catalog.SystemSchemaInfoStateEnableCompleted && + schema.State != catalog.SystemSchemaInfoStateEnableInitialized && + schema.State != catalog.SystemSchemaInfoStateUnavailable { + log.Printf("[WARN] %s is not enabled, ignoring it", schemaName) + d.SetId("") + return nil + } + d.Set("full_name", fmt.Sprintf("system.%s", schemaName)) return nil } } + log.Printf("[WARN] %s does not exist, ignoring it", schemaName) + d.SetId("") return nil }, Update: createOrUpdate, diff --git a/catalog/resource_system_schema_test.go b/catalog/resource_system_schema_test.go index e576eec072..8973d8727a 100644 --- a/catalog/resource_system_schema_test.go +++ b/catalog/resource_system_schema_test.go @@ -239,6 +239,74 @@ func TestSystemSchemaRead_Error(t *testing.T) { assert.Equal(t, "abc|access", d.Id(), "Id should not be empty for error reads") } +func TestSystemSchemaRead_NotEnabled(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastore_summary", + Response: catalog.GetMetastoreSummaryResponse{ + MetastoreId: "abc", + }, + }, + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas?", + Response: catalog.ListSystemSchemasResponse{ + Schemas: []catalog.SystemSchemaInfo{ + { + Schema: "access", + State: catalog.SystemSchemaInfoStateAvailable, + }, + { + Schema: "billing", + State: catalog.SystemSchemaInfoStateEnableCompleted, + }, + }, + }, + }, + }, + Resource: ResourceSystemSchema(), + Read: true, + Removed: true, + ID: "abc|access", + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "", d.Id(), "Id should be empty if a schema is not enabled") +} + +func TestSystemSchemaRead_NotExists(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastore_summary", + Response: catalog.GetMetastoreSummaryResponse{ + MetastoreId: "abc", + }, + }, + { + Method: http.MethodGet, + Resource: "/api/2.1/unity-catalog/metastores/abc/systemschemas?", + Response: catalog.ListSystemSchemasResponse{ + Schemas: []catalog.SystemSchemaInfo{ + { + Schema: "billing", + State: catalog.SystemSchemaInfoStateEnableCompleted, + }, + }, + }, + }, + }, + Resource: ResourceSystemSchema(), + Read: true, + Removed: true, + ID: "abc|access", + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "", d.Id(), "Id should be empty if a schema does not exist") +} + func TestSystemSchemaDelete(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ From e504790393db332f22cefbffdec742adca1ec0f9 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Mon, 28 Oct 2024 11:22:23 +0100 Subject: [PATCH 71/99] [Fix] Tolerate `databricks_permissions` resources for SQL warehouses with `/warehouses/...` IDs (#4158) ## Changes #4143 reported a regression to the `databricks_permissions` resource caused by https://github.com/databricks/terraform-provider-databricks/pull/3956. Normally, the ID for this resource when configured for a SQL warehouse is `/sql/warehouses/`. However, it seems like at some point in the past, some users may have had an ID of `/warehouses/`. It's possible that importing this resource worked like this: when calling the permissions REST API, whether using object type `sql/warehouses` or `warehouses`, the API returns permissions for the same resources: ``` 15:13:01 DEBUG GET /api/2.0/permissions/sql/warehouses/ < HTTP/2.0 200 OK < { < "access_control_list": [ < { < "all_permissions": [ < { < "inherited": false, < "permission_level": "IS_OWNER" < } < ], < "display_name": "", < "user_name": "" < }, < { < "all_permissions": [ < { < "inherited": true, < "inherited_from_object": [ < "/sql/warehouses/" < ], < "permission_level": "CAN_MANAGE" < } < ], < "group_name": "admins" < } < ], < "object_id": "/sql/warehouses/", < "object_type": "warehouses" < } pid=53287 sdk=true ... 15:12:56 DEBUG GET /api/2.0/permissions/warehouses/ < HTTP/2.0 200 OK < { < "access_control_list": [ < { < "all_permissions": [ < { < "inherited": false, < "permission_level": "IS_OWNER" < } < ], < "display_name": "", < "user_name": "" < }, < { < "all_permissions": [ < { < "inherited": true, < "inherited_from_object": [ < "/sql/warehouses/" < ], < "permission_level": "CAN_MANAGE" < } < ], < "group_name": "admins" < } < ], < "object_id": "/sql/warehouses/", < "object_type": "warehouses" < } pid=53248 sdk=true ``` This PR modifies the SQL warehouse configuration for `databricks_permissions` to be chosen for instances with an ID of the form `/warehouses/...`. ## Tests The additional integration test ensures that a resource can be imported with the `/warehouses/` format. --- internal/acceptance/permissions_test.go | 26 +++++++++++++++++++++---- permissions/permission_definitions.go | 5 +++++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index 2033a100ad..325bc398fe 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -617,7 +617,16 @@ func TestAccPermissions_SqlWarehouses(t *testing.T) { resource "databricks_sql_endpoint" "this" { name = "{var.STICKY_RANDOM}" cluster_size = "2X-Small" + tags { + custom_tags { + key = "Owner" + value = "eng-dev-ecosystem-team_at_databricks.com" + } + } }` + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + var warehouseId string WorkspaceLevel(t, Step{ Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", groupPermissions("CAN_USE")), }, Step{ @@ -638,15 +647,24 @@ func TestAccPermissions_SqlWarehouses(t *testing.T) { }, Step{ Template: sqlWarehouseTemplate, Check: func(s *terraform.State) error { - w := databricks.Must(databricks.NewWorkspaceClient()) - id := s.RootModule().Resources["databricks_sql_endpoint.this"].Primary.ID - warehouse, err := w.Warehouses.GetById(context.Background(), id) + warehouseId = s.RootModule().Resources["databricks_sql_endpoint.this"].Primary.ID + warehouse, err := w.Warehouses.GetById(ctx, warehouseId) assert.NoError(t, err) - permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "warehouses", id) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "warehouses", warehouseId) assert.NoError(t, err) assertContainsPermission(t, permissions, currentPrincipalType(t), warehouse.CreatorName, iam.PermissionLevelIsOwner) return nil }, + }, Step{ + // To test import, a new permission must be added to the warehouse, as it is not possible to import databricks_permissions + // for a warehouse that has the default permissions (i.e. current user has IS_OWNER and admins have CAN_MANAGE). + Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", groupPermissions("CAN_USE")), + }, Step{ + Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", groupPermissions("CAN_USE")), + // Verify that we can use "/warehouses/" instead of "/sql/warehouses/" + ResourceName: "databricks_permissions.this", + ImportState: true, + ImportStateIdFunc: func(s *terraform.State) (string, error) { return "/warehouses/" + warehouseId, nil }, }) } diff --git a/permissions/permission_definitions.go b/permissions/permission_definitions.go index fbc9158517..48e6d7a56f 100644 --- a/permissions/permission_definitions.go +++ b/permissions/permission_definitions.go @@ -558,6 +558,11 @@ func allResourcePermissions() []resourcePermissions { field: "sql_endpoint_id", objectType: "warehouses", requestObjectType: "sql/warehouses", + // ISSUE-4143: some older warehouse permissions have an ID that starts with "/warehouses" instead of "/sql/warehouses" + // Because no idRetriever is defined, any warehouse permissions resources will be created with the "/sql/warehouses" prefix. + idMatcher: func(id string) bool { + return strings.HasPrefix(id, "/sql/warehouses/") || strings.HasPrefix(id, "/warehouses/") + }, allowedPermissionLevels: map[string]permissionLevelOptions{ "CAN_USE": {isManagementPermission: false}, "CAN_MANAGE": {isManagementPermission: true}, From 948bf08769c1cccc82a85e747ea3cb03c2033a5a Mon Sep 17 00:00:00 2001 From: Kohei Watanabe Date: Mon, 28 Oct 2024 22:46:16 +0900 Subject: [PATCH 72/99] [Doc] Fix `databricks_grant` regarding metastore_id description (#4164) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I found the mistakes in the doc: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/grant so let me fix them 🙇 ## Changes ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/resources/grant.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/resources/grant.md b/docs/resources/grant.md index 5f2f8b4326..9cd9bc0163 100644 --- a/docs/resources/grant.md +++ b/docs/resources/grant.md @@ -30,11 +30,15 @@ See [databricks_grants Metastore grants](grants.md#metastore-grants) for the lis ```hcl resource "databricks_grant" "sandbox_data_engineers" { + metastore = "metastore_id" + principal = "Data Engineers" privileges = ["CREATE_CATALOG", "CREATE_EXTERNAL_LOCATION"] } resource "databricks_grant" "sandbox_data_sharer" { + metastore = "metastore_id" + principal = "Data Sharer" privileges = ["CREATE_RECIPIENT", "CREATE_SHARE"] } @@ -46,7 +50,6 @@ See [databricks_grants Catalog grants](grants.md#catalog-grants) for the list of ```hcl resource "databricks_catalog" "sandbox" { - metastore_id = databricks_metastore.this.id name = "sandbox" comment = "this catalog is managed by terraform" properties = { From 2c13e8df5c75ea782ac58a946dba99efc2059fb9 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Mon, 28 Oct 2024 10:06:19 -0400 Subject: [PATCH 73/99] [Exporter] Improving reliability of `Emit` function (#4163) ## Changes I found in the large-scale testing that sometimes we don't handle emitting of the same resource reliably, and this may lead to generation of duplicate resources (very small amount, but still) - found this in a very specific case when notebooks were listed without directories. This PR fixes this problem: - by tracking if resource is already in importing queue - detecting duplicates during code generation It also may improve performance a bit (2-3%). ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- exporter/codegen.go | 25 +++++++++++++++---------- exporter/context.go | 41 ++++++++++++++++++++++++++++------------- 2 files changed, 43 insertions(+), 23 deletions(-) diff --git a/exporter/codegen.go b/exporter/codegen.go index dee6f12fd4..549b836ce4 100644 --- a/exporter/codegen.go +++ b/exporter/codegen.go @@ -904,22 +904,27 @@ func (ic *importContext) handleResourceWrite(generatedFile string, ch dataWriteC return } - // newResources := make(map[string]struct{}, 100) log.Printf("[DEBUG] started processing new writes for %s", generatedFile) for f := range ch { if f != nil { - log.Printf("[DEBUG] started writing resource body for %s", f.BlockName) - _, err = tf.WriteString(f.ResourceBody) - if err == nil { - newResources[f.BlockName] = struct{}{} - if f.ImportCommand != "" { - ic.waitGroup.Add(1) - importChan <- f.ImportCommand + // check if we have the same blockname already written. To avoid duplicates + _, exists := newResources[f.BlockName] + if !exists { + log.Printf("[DEBUG] started writing resource body for %s", f.BlockName) + _, err = tf.WriteString(f.ResourceBody) + if err == nil { + newResources[f.BlockName] = struct{}{} + if f.ImportCommand != "" { + ic.waitGroup.Add(1) + importChan <- f.ImportCommand + } + log.Printf("[DEBUG] finished writing resource body for %s", f.BlockName) + } else { + log.Printf("[ERROR] Error when writing to %s: %v", generatedFile, err) } - log.Printf("[DEBUG] finished writing resource body for %s", f.BlockName) } else { - log.Printf("[ERROR] Error when writing to %s: %v", generatedFile, err) + log.Printf("[WARN] Found duplicate resource: '%s'", f.BlockName) } } else { log.Print("[WARN] got nil as resourceWriteData!") diff --git a/exporter/context.go b/exporter/context.go index c7f2b18235..ffb230a4e8 100644 --- a/exporter/context.go +++ b/exporter/context.go @@ -204,7 +204,7 @@ var goroutinesNumber = map[string]int{ "databricks_sql_dashboard": 3, "databricks_sql_widget": 4, "databricks_sql_visualization": 4, - "databricks_query": 4, + "databricks_query": 6, "databricks_alert": 2, "databricks_permissions": 11, } @@ -615,17 +615,20 @@ func (ic *importContext) HasInState(r *resource, onlyAdded bool) bool { return ic.State.Has(r) } -func (ic *importContext) setImportingState(s string, state bool) { - ic.importingMutex.Lock() - defer ic.importingMutex.Unlock() - ic.importing[s] = state -} - func (ic *importContext) Add(r *resource) { if ic.HasInState(r, true) { // resource must exist and already marked as added return } - ic.setImportingState(r.String(), true) // mark resource as added + rString := r.String() + ic.importingMutex.Lock() + _, ok := ic.importing[rString] + if ok { + ic.importingMutex.Unlock() + log.Printf("[DEBUG] %s already being added", rString) + return + } + ic.importing[rString] = true // mark resource as added + ic.importingMutex.Unlock() state := r.Data.State() if state == nil { log.Printf("[ERROR] state is nil for %s", r) @@ -648,7 +651,6 @@ func (ic *importContext) Add(r *resource) { Instances: []instanceApproximation{inst}, Resource: r, }) - // in single-threaded scenario scope is toposorted ic.Scope.Append(r) } @@ -727,14 +729,25 @@ func (ic *importContext) Emit(r *resource) { log.Printf("[DEBUG] %s already imported", r) return } + rString := r.String() if ic.testEmits != nil { log.Printf("[INFO] %s is emitted in test mode", r) ic.testEmitsMutex.Lock() - ic.testEmits[r.String()] = true + ic.testEmits[rString] = true ic.testEmitsMutex.Unlock() return } - ic.setImportingState(r.String(), false) // we're starting to add a new resource + // we need to check that we're not importing the same resource twice - this may happen under high concurrency + // for specific resources, for example, directories when they aren't part of the listing + ic.importingMutex.Lock() + res, ok := ic.importing[rString] + if ok { + ic.importingMutex.Unlock() + log.Printf("[DEBUG] %s already being imported: %v", rString, res) + return + } + ic.importing[rString] = false // // we're starting to add a new resource + ic.importingMutex.Unlock() _, ok = ic.Resources[r.Resource] if !ok { log.Printf("[ERROR] %s is not available in provider", r) @@ -745,8 +758,10 @@ func (ic *importContext) Emit(r *resource) { log.Printf("[DEBUG] %s (%s service) is not part of the account level export", r.Resource, ir.Service) return } - // TODO: add similar condition for checking workspace-level objects only. After new ACLs import is merged - + if !ic.accountLevel && !ir.WorkspaceLevel { + log.Printf("[DEBUG] %s (%s service) is not part of the workspace level export", r.Resource, ir.Service) + return + } // from here, it should be done by the goroutine... send resource into the channel ch, exists := ic.channels[r.Resource] if exists { From f382e4fb1dc7cc5701b22cd32c597c011c38cb5e Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Mon, 28 Oct 2024 17:58:33 +0100 Subject: [PATCH 74/99] [Release] Release v1.56.0 (#4167) ### Bug Fixes * Recreate missing system schema ([#4068](https://github.com/databricks/terraform-provider-databricks/pull/4068)). * Tolerate `databricks_permissions` resources for SQL warehouses with `/warehouses/...` IDs ([#4158](https://github.com/databricks/terraform-provider-databricks/pull/4158)). ### Documentation * Fix `databricks_grant` regarding metastore_id description ([#4164](https://github.com/databricks/terraform-provider-databricks/pull/4164)). ### Internal Changes * Automatically trigger integration tests on PR ([#4149](https://github.com/databricks/terraform-provider-databricks/pull/4149)). ### Exporter * **Breaking change** Use new query and alert resources instead of legacy resources ([#4150](https://github.com/databricks/terraform-provider-databricks/pull/4150)). * Improve exporting of `databricks_pipeline` resources ([#4142](https://github.com/databricks/terraform-provider-databricks/pull/4142)). * Improving reliability of `Emit` function ([#4163](https://github.com/databricks/terraform-provider-databricks/pull/4163)). --- CHANGELOG.md | 25 +++++++++++++++++++++++++ common/version.go | 2 +- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 77a6da9f8d..7f2de01332 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Version changelog +## [Release] Release v1.56.0 + +### Bug Fixes + + * Recreate missing system schema ([#4068](https://github.com/databricks/terraform-provider-databricks/pull/4068)). + * Tolerate `databricks_permissions` resources for SQL warehouses with `/warehouses/...` IDs ([#4158](https://github.com/databricks/terraform-provider-databricks/pull/4158)). + + +### Documentation + + * Fix `databricks_grant` regarding metastore_id description ([#4164](https://github.com/databricks/terraform-provider-databricks/pull/4164)). + + +### Internal Changes + + * Automatically trigger integration tests on PR ([#4149](https://github.com/databricks/terraform-provider-databricks/pull/4149)). + + +### Exporter + + * **Breaking change** Use new query and alert resources instead of legacy resources ([#4150](https://github.com/databricks/terraform-provider-databricks/pull/4150)). + * Improve exporting of `databricks_pipeline` resources ([#4142](https://github.com/databricks/terraform-provider-databricks/pull/4142)). + * Improving reliability of `Emit` function ([#4163](https://github.com/databricks/terraform-provider-databricks/pull/4163)). + + ## [Release] Release v1.55.0 ### New Features and Improvements diff --git a/common/version.go b/common/version.go index c3770f13cd..44a7242a45 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.55.0" + version = "1.56.0" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider From 0975310040346b75843993e92f76f8abc891b70c Mon Sep 17 00:00:00 2001 From: Omer Lachish <289488+rauchy@users.noreply.github.com> Date: Tue, 29 Oct 2024 10:26:54 +0100 Subject: [PATCH 75/99] [Internal] Migrate Share Resource to Plugin Framework (#4047) ## Changes This PR migrates the share resource to the Plugin framework. The code was largely copied "as is" from the previous implementation of the share resource, with the necessary adaptations made for integration with the Plugin framework. This implementation utilizes the newly generated Effective fields to provide the functionality that was previously achieved through diff suppression. ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --------- Co-authored-by: Omer Lachish --- .codegen/model.go.tmpl | 1 + .../providers/pluginfw/converters/tf_to_go.go | 2 +- internal/providers/pluginfw/pluginfw.go | 2 + .../resources/sharing/resource_acc_test.go | 204 +++++++++ .../resources/sharing/resource_share.go | 401 ++++++++++++++++++ internal/service/apps_tf/model.go | 13 + internal/service/catalog_tf/model.go | 2 + internal/service/dashboards_tf/model.go | 51 +++ internal/service/jobs_tf/model.go | 2 + internal/service/provisioning_tf/model.go | 9 + internal/service/settings_tf/model.go | 10 + internal/service/sharing_tf/model.go | 6 + 12 files changed, 702 insertions(+), 1 deletion(-) create mode 100644 internal/providers/pluginfw/resources/sharing/resource_acc_test.go create mode 100644 internal/providers/pluginfw/resources/sharing/resource_share.go diff --git a/.codegen/model.go.tmpl b/.codegen/model.go.tmpl index 7d20bea4e7..714401729e 100644 --- a/.codegen/model.go.tmpl +++ b/.codegen/model.go.tmpl @@ -53,6 +53,7 @@ func (newState *{{.PascalName}}) SyncEffectiveFieldsDuringRead(existingState {{. {{- if .Entity.IsFloat64}}{{$type = "Float64"}}{{end}} {{- if .Entity.IsInt}}{{$type = "Int64"}}{{end}} {{- if .Entity.Enum}}{{$type = "String"}}{{end}} + newState.Effective{{.PascalName}} = existingState.Effective{{.PascalName}} if existingState.Effective{{.PascalName}}.Value{{$type}}() == newState.{{.PascalName}}.Value{{$type}}() { newState.{{.PascalName}} = existingState.{{.PascalName}} } diff --git a/internal/providers/pluginfw/converters/tf_to_go.go b/internal/providers/pluginfw/converters/tf_to_go.go index 70efd92a36..27eb02d915 100644 --- a/internal/providers/pluginfw/converters/tf_to_go.go +++ b/internal/providers/pluginfw/converters/tf_to_go.go @@ -184,7 +184,7 @@ func tfsdkToGoSdkStructField(srcField reflect.Value, destField reflect.Value, sr // This is the case for enum. // Skip unset value. - if srcField.IsZero() { + if srcField.IsZero() || v.ValueString() == "" { return } diff --git a/internal/providers/pluginfw/pluginfw.go b/internal/providers/pluginfw/pluginfw.go index db811d5ae2..53b361f998 100644 --- a/internal/providers/pluginfw/pluginfw.go +++ b/internal/providers/pluginfw/pluginfw.go @@ -21,6 +21,7 @@ import ( "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/notificationdestinations" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/qualitymonitor" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/registered_model" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/sharing" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/volume" "github.com/hashicorp/terraform-plugin-framework/datasource" @@ -47,6 +48,7 @@ func (p *DatabricksProviderPluginFramework) Resources(ctx context.Context) []fun return []func() resource.Resource{ qualitymonitor.ResourceQualityMonitor, library.ResourceLibrary, + sharing.ResourceShare, } } diff --git a/internal/providers/pluginfw/resources/sharing/resource_acc_test.go b/internal/providers/pluginfw/resources/sharing/resource_acc_test.go new file mode 100644 index 0000000000..7018e0b402 --- /dev/null +++ b/internal/providers/pluginfw/resources/sharing/resource_acc_test.go @@ -0,0 +1,204 @@ +package sharing_test + +import ( + "fmt" + "testing" + + "github.com/databricks/terraform-provider-databricks/internal/acceptance" +) + +const preTestTemplate = ` + resource "databricks_catalog" "sandbox" { + name = "sandbox{var.STICKY_RANDOM}" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + } + + resource "databricks_schema" "things" { + catalog_name = databricks_catalog.sandbox.id + name = "things{var.STICKY_RANDOM}" + comment = "this database is managed by terraform" + properties = { + kind = "various" + } + } + + resource "databricks_table" "mytable" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "id" + position = 0 + type_name = "INT" + type_text = "int" + type_json = "{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}" + } + } + + resource "databricks_table" "mytable_2" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar_2" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "id" + position = 0 + type_name = "INT" + type_text = "int" + type_json = "{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}" + } + } + + resource "databricks_table" "mytable_3" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar_3" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "id" + position = 0 + type_name = "INT" + type_text = "int" + type_json = "{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}" + } + } +` + +const preTestTemplateUpdate = ` + resource "databricks_grants" "some" { + catalog = databricks_catalog.sandbox.id + grant { + principal = "account users" + privileges = ["ALL_PRIVILEGES"] + } + grant { + principal = "{env.TEST_METASTORE_ADMIN_GROUP_NAME}" + privileges = ["ALL_PRIVILEGES"] + } + } +` + +func TestUcAccCreateShare(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplate + ` + resource "databricks_share_pluginframework" "myshare" { + name = "{var.STICKY_RANDOM}-terraform-delta-share" + owner = "account users" + object { + name = databricks_table.mytable.id + comment = "c" + data_object_type = "TABLE" + } + object { + name = databricks_table.mytable_2.id + cdf_enabled = false + comment = "c" + data_object_type = "TABLE" + } + } + + resource "databricks_recipient" "db2open" { + name = "{var.STICKY_RANDOM}-terraform-db2open-recipient" + comment = "made by terraform" + authentication_type = "TOKEN" + sharing_code = "{var.STICKY_RANDOM}" + ip_access_list { + // using private ip for acc testing + allowed_ip_addresses = ["10.0.0.0/16"] + } + } + + resource "databricks_grants" "some" { + share = databricks_share_pluginframework.myshare.name + grant { + principal = databricks_recipient.db2open.name + privileges = ["SELECT"] + } + } + `, + }) +} + +func shareTemplateWithOwner(comment string, owner string) string { + return fmt.Sprintf(` + resource "databricks_share_pluginframework" "myshare" { + name = "{var.STICKY_RANDOM}-terraform-delta-share" + owner = "%s" + object { + name = databricks_table.mytable.id + comment = "%s" + data_object_type = "TABLE" + history_data_sharing_status = "DISABLED" + } + + }`, owner, comment) +} + +func TestUcAccUpdateShare(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("c", "account users"), + }, acceptance.Step{ + Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("e", "account users"), + }, acceptance.Step{ + Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("e", "{env.TEST_DATA_ENG_GROUP}"), + }, acceptance.Step{ + Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("f", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}"), + }) +} + +func TestUcAccUpdateShareAddObject(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplate + preTestTemplateUpdate + + `resource "databricks_share_pluginframework" "myshare" { + name = "{var.STICKY_RANDOM}-terraform-delta-share" + owner = "account users" + object { + name = databricks_table.mytable.id + comment = "A" + data_object_type = "TABLE" + history_data_sharing_status = "DISABLED" + } + object { + name = databricks_table.mytable_3.id + comment = "C" + data_object_type = "TABLE" + history_data_sharing_status = "DISABLED" + } + + }`, + }, acceptance.Step{ + Template: preTestTemplate + preTestTemplateUpdate + + `resource "databricks_share_pluginframework" "myshare" { + name = "{var.STICKY_RANDOM}-terraform-delta-share" + owner = "account users" + object { + name = databricks_table.mytable.id + comment = "AA" + data_object_type = "TABLE" + history_data_sharing_status = "DISABLED" + } + object { + name = databricks_table.mytable_2.id + comment = "BB" + data_object_type = "TABLE" + history_data_sharing_status = "DISABLED" + } + object { + name = databricks_table.mytable_3.id + comment = "CC" + data_object_type = "TABLE" + history_data_sharing_status = "DISABLED" + } + }`, + }) +} diff --git a/internal/providers/pluginfw/resources/sharing/resource_share.go b/internal/providers/pluginfw/resources/sharing/resource_share.go new file mode 100644 index 0000000000..b96cd0e976 --- /dev/null +++ b/internal/providers/pluginfw/resources/sharing/resource_share.go @@ -0,0 +1,401 @@ +package sharing + +import ( + "context" + "reflect" + "sort" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/sharing" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + pluginfwcontext "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/context" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/databricks/terraform-provider-databricks/internal/service/sharing_tf" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +) + +const resourceName = "share" + +var _ resource.ResourceWithConfigure = &ShareResource{} + +func ResourceShare() resource.Resource { + return &ShareResource{} +} + +type ShareInfoExtended struct { + sharing_tf.ShareInfo +} + +func matchOrder[T any, K comparable](target, reference []T, keyFunc func(T) K) { + // Create a map to store the index positions of each key in the reference slice. + orderMap := make(map[K]int) + for index, item := range reference { + orderMap[keyFunc(item)] = index + } + + // Sort the target slice based on the order defined in the orderMap. + sort.Slice(target, func(i, j int) bool { + return orderMap[keyFunc(target[i])] < orderMap[keyFunc(target[j])] + }) +} + +func suppressCDFEnabledDiff(si *sharing.ShareInfo) { + //suppress diff for CDF Enabled if HistoryDataSharingStatus is enabled , as API does not accept both fields to be set + for i := range si.Objects { + if si.Objects[i].HistoryDataSharingStatus == "ENABLED" { + si.Objects[i].CdfEnabled = false + } + } +} + +func resourceShareMap(si sharing.ShareInfo) map[string]sharing.SharedDataObject { + m := make(map[string]sharing.SharedDataObject, len(si.Objects)) + for _, sdo := range si.Objects { + m[sdo.Name] = sdo + } + return m +} + +func equal(this sharing.SharedDataObject, other sharing.SharedDataObject) bool { + if other.SharedAs == "" { + other.SharedAs = this.SharedAs + } + //don't compare computed fields + other.AddedAt = this.AddedAt + other.AddedBy = this.AddedBy + other.Status = this.Status + other.HistoryDataSharingStatus = this.HistoryDataSharingStatus + other.ForceSendFields = this.ForceSendFields // TODO: is this the right thing to do? + return reflect.DeepEqual(this, other) +} + +func diff(beforeSi sharing.ShareInfo, afterSi sharing.ShareInfo) []sharing.SharedDataObjectUpdate { + beforeMap := resourceShareMap(beforeSi) + afterMap := resourceShareMap(afterSi) + changes := []sharing.SharedDataObjectUpdate{} + // not in after so remove + for _, beforeSdo := range beforeSi.Objects { + if _, ok := afterMap[beforeSdo.Name]; ok { + continue + } + changes = append(changes, sharing.SharedDataObjectUpdate{ + Action: sharing.SharedDataObjectUpdateActionRemove, + DataObject: &beforeSdo, + }) + } + + // not in before so add + // if in before but diff then update + for _, afterSdo := range afterSi.Objects { + if beforeSdo, ok := beforeMap[afterSdo.Name]; ok { + if !equal(beforeSdo, afterSdo) { + // do not send SharedAs + afterSdo.SharedAs = "" + changes = append(changes, sharing.SharedDataObjectUpdate{ + Action: sharing.SharedDataObjectUpdateActionUpdate, + DataObject: &afterSdo, + }) + } + continue + } + changes = append(changes, sharing.SharedDataObjectUpdate{ + Action: sharing.SharedDataObjectUpdateActionAdd, + DataObject: &afterSdo, + }) + } + return changes +} + +func shareChanges(si sharing.ShareInfo, action string) sharing.UpdateShare { + var changes []sharing.SharedDataObjectUpdate + for _, obj := range si.Objects { + changes = append(changes, sharing.SharedDataObjectUpdate{ + Action: sharing.SharedDataObjectUpdateAction(action), + DataObject: &obj, + }, + ) + } + return sharing.UpdateShare{ + Name: si.Name, + Owner: si.Owner, + Updates: changes, + } +} + +type ShareResource struct { + Client *common.DatabricksClient +} + +func (r *ShareResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = pluginfwcommon.GetDatabricksStagingName(resourceName) +} + +func (r *ShareResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + attrs, blocks := tfschema.ResourceStructToSchemaMap(ShareInfoExtended{}, func(c tfschema.CustomizableSchema) tfschema.CustomizableSchema { + c.SetRequired("name") + + c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "name") // ForceNew + c.AddPlanModifier(int64planmodifier.UseStateForUnknown(), "created_at") + c.AddPlanModifier(stringplanmodifier.UseStateForUnknown(), "created_by") + + c.SetRequired("object", "data_object_type") + c.SetRequired("object", "partitions", "values", "op") + c.SetRequired("object", "partitions", "values", "name") + return c + }) + resp.Schema = schema.Schema{ + Description: "Terraform schema for Databricks Share", + Attributes: attrs, + Blocks: blocks, + } +} + +func (d *ShareResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if d.Client == nil && req.ProviderData != nil { + d.Client = pluginfwcommon.ConfigureResource(req, resp) + } +} + +func (r *ShareResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) + + w, diags := r.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + var plan ShareInfoExtended + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var planGoSDK sharing.ShareInfo + resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, plan, &planGoSDK)...) + if resp.Diagnostics.HasError() { + return + } + + var createShare sharing.CreateShare + resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, plan, &createShare)...) + if resp.Diagnostics.HasError() { + return + } + shareInfo, err := w.Shares.Create(ctx, createShare) + if err != nil { + resp.Diagnostics.AddError("failed to create share", err.Error()) + return + } + + shareChanges := shareChanges(planGoSDK, string(sharing.SharedDataObjectUpdateActionAdd)) + + updatedShareInfo, err := w.Shares.Update(ctx, shareChanges) + if err != nil { + // delete orphaned share if update fails + if d_err := w.Shares.DeleteByName(ctx, shareInfo.Name); d_err != nil { + resp.Diagnostics.AddError("failed to delete orphaned share", d_err.Error()) + return + } + resp.Diagnostics.AddError("failed to update share", err.Error()) + return + } + + matchOrder(updatedShareInfo.Objects, planGoSDK.Objects, func(obj sharing.SharedDataObject) string { return obj.Name }) + + var newState ShareInfoExtended + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, updatedShareInfo, &newState)...) + if resp.Diagnostics.HasError() { + return + } + + newState.SyncEffectiveFieldsDuringCreateOrUpdate(plan.ShareInfo) + for i := range newState.Objects { + newState.Objects[i].SyncEffectiveFieldsDuringCreateOrUpdate(plan.Objects[i]) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, newState)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *ShareResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) + + var existingState ShareInfoExtended + resp.Diagnostics.Append(req.State.Get(ctx, &existingState)...) + if resp.Diagnostics.HasError() { + return + } + + var stateGoSDK sharing.ShareInfo + resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, existingState, &stateGoSDK)...) + if resp.Diagnostics.HasError() { + return + } + + w, diags := r.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var getShareRequest sharing.GetShareRequest + getShareRequest.IncludeSharedData = true + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("name"), &getShareRequest.Name)...) + if resp.Diagnostics.HasError() { + return + } + + shareInfo, err := w.Shares.Get(ctx, getShareRequest) + if err != nil { + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + return + } + resp.Diagnostics.AddError("failed to get share", err.Error()) + return + } + + matchOrder(shareInfo.Objects, stateGoSDK.Objects, func(obj sharing.SharedDataObject) string { return obj.Name }) + suppressCDFEnabledDiff(shareInfo) + + var newState ShareInfoExtended + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, shareInfo, &newState)...) + if resp.Diagnostics.HasError() { + return + } + + newState.SyncEffectiveFieldsDuringRead(existingState.ShareInfo) + for i := range newState.Objects { + newState.Objects[i].SyncEffectiveFieldsDuringRead(existingState.Objects[i]) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, newState)...) +} + +func (r *ShareResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) + + var state ShareInfoExtended + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + client, diags := r.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var plan ShareInfoExtended + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var planGoSDK sharing.ShareInfo + resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, plan, &planGoSDK)...) + if resp.Diagnostics.HasError() { + return + } + + var getShareRequest sharing.GetShareRequest + getShareRequest.Name = state.Name.ValueString() + getShareRequest.IncludeSharedData = true + + currentShareInfo, err := client.Shares.Get(ctx, getShareRequest) + if err != nil { + resp.Diagnostics.AddError("failed to get current share info", err.Error()) + return + } + + matchOrder(currentShareInfo.Objects, planGoSDK.Objects, func(obj sharing.SharedDataObject) string { return obj.Name }) + suppressCDFEnabledDiff(currentShareInfo) + + changes := diff(*currentShareInfo, planGoSDK) + + // if owner has changed, update the share owner + if !plan.Owner.IsNull() { + updatedShareInfo, err := client.Shares.Update(ctx, sharing.UpdateShare{ + Name: state.Name.ValueString(), + Owner: plan.Owner.ValueString(), + }) + if err == nil { + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, updatedShareInfo, &state)...) + if resp.Diagnostics.HasError() { + return + } + } else { + resp.Diagnostics.AddError("failed to update share owner", err.Error()) + return + } + } + + if len(changes) > 0 { + // if there are any other changes, update the share with the changes + updatedShareInfo, err := client.Shares.Update(ctx, sharing.UpdateShare{ + Name: plan.Name.ValueString(), + Updates: changes, + }) + + if err != nil { + resp.Diagnostics.AddError("failed to update share", err.Error()) + + rollbackShareInfo, rollbackErr := client.Shares.Update(ctx, sharing.UpdateShare{ + Name: currentShareInfo.Name, + Owner: currentShareInfo.Owner, + }) + if rollbackErr != nil { + resp.Diagnostics.AddError("failed to roll back", common.OwnerRollbackError(err, rollbackErr, currentShareInfo.Owner, plan.Owner.ValueString()).Error()) + return + } + + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, rollbackShareInfo, &state)...) + if resp.Diagnostics.HasError() { + return + } + } + + matchOrder(updatedShareInfo.Objects, planGoSDK.Objects, func(obj sharing.SharedDataObject) string { return obj.Name }) + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, updatedShareInfo, &state)...) + if resp.Diagnostics.HasError() { + return + } + } + + state.SyncEffectiveFieldsDuringCreateOrUpdate(plan.ShareInfo) + for i := range state.Objects { + state.Objects[i].SyncEffectiveFieldsDuringCreateOrUpdate(plan.Objects[i]) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, state)...) +} + +func (r *ShareResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) + + w, diags := r.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var deleteShareRequest sharing_tf.DeleteShareRequest + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("name"), &deleteShareRequest.Name)...) + if resp.Diagnostics.HasError() { + return + } + err := w.Shares.DeleteByName(ctx, deleteShareRequest.Name.ValueString()) + if err != nil && !apierr.IsMissing(err) { + resp.Diagnostics.AddError("failed to delete share", err.Error()) + return + } +} diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index 2c5594a1ec..4f90baf118 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -77,24 +77,31 @@ func (newState *App) SyncEffectiveFieldsDuringCreateOrUpdate(plan App) { } func (newState *App) SyncEffectiveFieldsDuringRead(existingState App) { + newState.EffectiveCreateTime = existingState.EffectiveCreateTime if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { newState.CreateTime = existingState.CreateTime } + newState.EffectiveCreator = existingState.EffectiveCreator if existingState.EffectiveCreator.ValueString() == newState.Creator.ValueString() { newState.Creator = existingState.Creator } + newState.EffectiveServicePrincipalId = existingState.EffectiveServicePrincipalId if existingState.EffectiveServicePrincipalId.ValueInt64() == newState.ServicePrincipalId.ValueInt64() { newState.ServicePrincipalId = existingState.ServicePrincipalId } + newState.EffectiveServicePrincipalName = existingState.EffectiveServicePrincipalName if existingState.EffectiveServicePrincipalName.ValueString() == newState.ServicePrincipalName.ValueString() { newState.ServicePrincipalName = existingState.ServicePrincipalName } + newState.EffectiveUpdateTime = existingState.EffectiveUpdateTime if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { newState.UpdateTime = existingState.UpdateTime } + newState.EffectiveUpdater = existingState.EffectiveUpdater if existingState.EffectiveUpdater.ValueString() == newState.Updater.ValueString() { newState.Updater = existingState.Updater } + newState.EffectiveUrl = existingState.EffectiveUrl if existingState.EffectiveUrl.ValueString() == newState.Url.ValueString() { newState.Url = existingState.Url } @@ -174,12 +181,15 @@ func (newState *AppDeployment) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppD } func (newState *AppDeployment) SyncEffectiveFieldsDuringRead(existingState AppDeployment) { + newState.EffectiveCreateTime = existingState.EffectiveCreateTime if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { newState.CreateTime = existingState.CreateTime } + newState.EffectiveCreator = existingState.EffectiveCreator if existingState.EffectiveCreator.ValueString() == newState.Creator.ValueString() { newState.Creator = existingState.Creator } + newState.EffectiveUpdateTime = existingState.EffectiveUpdateTime if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { newState.UpdateTime = existingState.UpdateTime } @@ -211,6 +221,7 @@ func (newState *AppDeploymentStatus) SyncEffectiveFieldsDuringCreateOrUpdate(pla } func (newState *AppDeploymentStatus) SyncEffectiveFieldsDuringRead(existingState AppDeploymentStatus) { + newState.EffectiveMessage = existingState.EffectiveMessage if existingState.EffectiveMessage.ValueString() == newState.Message.ValueString() { newState.Message = existingState.Message } @@ -361,6 +372,7 @@ func (newState *ApplicationStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan } func (newState *ApplicationStatus) SyncEffectiveFieldsDuringRead(existingState ApplicationStatus) { + newState.EffectiveMessage = existingState.EffectiveMessage if existingState.EffectiveMessage.ValueString() == newState.Message.ValueString() { newState.Message = existingState.Message } @@ -380,6 +392,7 @@ func (newState *ComputeStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan Comp } func (newState *ComputeStatus) SyncEffectiveFieldsDuringRead(existingState ComputeStatus) { + newState.EffectiveMessage = existingState.EffectiveMessage if existingState.EffectiveMessage.ValueString() == newState.Message.ValueString() { newState.Message = existingState.Message } diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index d064881534..55778dfa87 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -3012,6 +3012,7 @@ func (newState *OnlineTable) SyncEffectiveFieldsDuringCreateOrUpdate(plan Online } func (newState *OnlineTable) SyncEffectiveFieldsDuringRead(existingState OnlineTable) { + newState.EffectiveTableServingUrl = existingState.EffectiveTableServingUrl if existingState.EffectiveTableServingUrl.ValueString() == newState.TableServingUrl.ValueString() { newState.TableServingUrl = existingState.TableServingUrl } @@ -3051,6 +3052,7 @@ func (newState *OnlineTableSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan On } func (newState *OnlineTableSpec) SyncEffectiveFieldsDuringRead(existingState OnlineTableSpec) { + newState.EffectivePipelineId = existingState.EffectivePipelineId if existingState.EffectivePipelineId.ValueString() == newState.PipelineId.ValueString() { newState.PipelineId = existingState.PipelineId } diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index 85023fe5e2..b76126b18b 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -41,6 +41,7 @@ func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate( } func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState CreateDashboardRequest) { + newState.EffectiveParentPath = existingState.EffectiveParentPath if existingState.EffectiveParentPath.ValueString() == newState.ParentPath.ValueString() { newState.ParentPath = existingState.ParentPath } @@ -65,6 +66,7 @@ func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(p } func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState CreateScheduleRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } @@ -90,9 +92,11 @@ func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpda } func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState CreateSubscriptionRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } @@ -175,21 +179,27 @@ func (newState *Dashboard) SyncEffectiveFieldsDuringCreateOrUpdate(plan Dashboar } func (newState *Dashboard) SyncEffectiveFieldsDuringRead(existingState Dashboard) { + newState.EffectiveCreateTime = existingState.EffectiveCreateTime if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { newState.CreateTime = existingState.CreateTime } + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } + newState.EffectiveParentPath = existingState.EffectiveParentPath if existingState.EffectiveParentPath.ValueString() == newState.ParentPath.ValueString() { newState.ParentPath = existingState.ParentPath } + newState.EffectivePath = existingState.EffectivePath if existingState.EffectivePath.ValueString() == newState.Path.ValueString() { newState.Path = existingState.Path } + newState.EffectiveUpdateTime = existingState.EffectiveUpdateTime if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { newState.UpdateTime = existingState.UpdateTime } @@ -219,12 +229,15 @@ func (newState *DeleteScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(p } func (newState *DeleteScheduleRequest) SyncEffectiveFieldsDuringRead(existingState DeleteScheduleRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } @@ -268,15 +281,19 @@ func (newState *DeleteSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpda } func (newState *DeleteSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState DeleteSubscriptionRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } + newState.EffectiveSubscriptionId = existingState.EffectiveSubscriptionId if existingState.EffectiveSubscriptionId.ValueString() == newState.SubscriptionId.ValueString() { newState.SubscriptionId = existingState.SubscriptionId } @@ -520,9 +537,11 @@ func (newState *GetScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan } func (newState *GetScheduleRequest) SyncEffectiveFieldsDuringRead(existingState GetScheduleRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } @@ -551,12 +570,15 @@ func (newState *GetSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpdate( } func (newState *GetSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState GetSubscriptionRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } + newState.EffectiveSubscriptionId = existingState.EffectiveSubscriptionId if existingState.EffectiveSubscriptionId.ValueString() == newState.SubscriptionId.ValueString() { newState.SubscriptionId = existingState.SubscriptionId } @@ -583,6 +605,7 @@ func (newState *ListDashboardsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(p } func (newState *ListDashboardsRequest) SyncEffectiveFieldsDuringRead(existingState ListDashboardsRequest) { + newState.EffectivePageToken = existingState.EffectivePageToken if existingState.EffectivePageToken.ValueString() == newState.PageToken.ValueString() { newState.PageToken = existingState.PageToken } @@ -602,6 +625,7 @@ func (newState *ListDashboardsResponse) SyncEffectiveFieldsDuringCreateOrUpdate( } func (newState *ListDashboardsResponse) SyncEffectiveFieldsDuringRead(existingState ListDashboardsResponse) { + newState.EffectiveNextPageToken = existingState.EffectiveNextPageToken if existingState.EffectiveNextPageToken.ValueString() == newState.NextPageToken.ValueString() { newState.NextPageToken = existingState.NextPageToken } @@ -628,9 +652,11 @@ func (newState *ListSchedulesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(pl } func (newState *ListSchedulesRequest) SyncEffectiveFieldsDuringRead(existingState ListSchedulesRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectivePageToken = existingState.EffectivePageToken if existingState.EffectivePageToken.ValueString() == newState.PageToken.ValueString() { newState.PageToken = existingState.PageToken } @@ -652,6 +678,7 @@ func (newState *ListSchedulesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(p } func (newState *ListSchedulesResponse) SyncEffectiveFieldsDuringRead(existingState ListSchedulesResponse) { + newState.EffectiveNextPageToken = existingState.EffectiveNextPageToken if existingState.EffectiveNextPageToken.ValueString() == newState.NextPageToken.ValueString() { newState.NextPageToken = existingState.NextPageToken } @@ -683,12 +710,15 @@ func (newState *ListSubscriptionsRequest) SyncEffectiveFieldsDuringCreateOrUpdat } func (newState *ListSubscriptionsRequest) SyncEffectiveFieldsDuringRead(existingState ListSubscriptionsRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectivePageToken = existingState.EffectivePageToken if existingState.EffectivePageToken.ValueString() == newState.PageToken.ValueString() { newState.PageToken = existingState.PageToken } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } @@ -710,6 +740,7 @@ func (newState *ListSubscriptionsResponse) SyncEffectiveFieldsDuringCreateOrUpda } func (newState *ListSubscriptionsResponse) SyncEffectiveFieldsDuringRead(existingState ListSubscriptionsResponse) { + newState.EffectiveNextPageToken = existingState.EffectiveNextPageToken if existingState.EffectiveNextPageToken.ValueString() == newState.NextPageToken.ValueString() { newState.NextPageToken = existingState.NextPageToken } @@ -782,9 +813,11 @@ func (newState *PublishedDashboard) SyncEffectiveFieldsDuringCreateOrUpdate(plan } func (newState *PublishedDashboard) SyncEffectiveFieldsDuringRead(existingState PublishedDashboard) { + newState.EffectiveDisplayName = existingState.EffectiveDisplayName if existingState.EffectiveDisplayName.ValueString() == newState.DisplayName.ValueString() { newState.DisplayName = existingState.DisplayName } + newState.EffectiveRevisionCreateTime = existingState.EffectiveRevisionCreateTime if existingState.EffectiveRevisionCreateTime.ValueString() == newState.RevisionCreateTime.ValueString() { newState.RevisionCreateTime = existingState.RevisionCreateTime } @@ -873,18 +906,23 @@ func (newState *Schedule) SyncEffectiveFieldsDuringCreateOrUpdate(plan Schedule) } func (newState *Schedule) SyncEffectiveFieldsDuringRead(existingState Schedule) { + newState.EffectiveCreateTime = existingState.EffectiveCreateTime if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { newState.CreateTime = existingState.CreateTime } + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } + newState.EffectiveUpdateTime = existingState.EffectiveUpdateTime if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { newState.UpdateTime = existingState.UpdateTime } @@ -953,24 +991,31 @@ func (newState *Subscription) SyncEffectiveFieldsDuringCreateOrUpdate(plan Subsc } func (newState *Subscription) SyncEffectiveFieldsDuringRead(existingState Subscription) { + newState.EffectiveCreateTime = existingState.EffectiveCreateTime if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { newState.CreateTime = existingState.CreateTime } + newState.EffectiveCreatedByUserId = existingState.EffectiveCreatedByUserId if existingState.EffectiveCreatedByUserId.ValueInt64() == newState.CreatedByUserId.ValueInt64() { newState.CreatedByUserId = existingState.CreatedByUserId } + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } + newState.EffectiveSubscriptionId = existingState.EffectiveSubscriptionId if existingState.EffectiveSubscriptionId.ValueString() == newState.SubscriptionId.ValueString() { newState.SubscriptionId = existingState.SubscriptionId } + newState.EffectiveUpdateTime = existingState.EffectiveUpdateTime if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { newState.UpdateTime = existingState.UpdateTime } @@ -989,6 +1034,7 @@ func (newState *SubscriptionSubscriberDestination) SyncEffectiveFieldsDuringCrea } func (newState *SubscriptionSubscriberDestination) SyncEffectiveFieldsDuringRead(existingState SubscriptionSubscriberDestination) { + newState.EffectiveDestinationId = existingState.EffectiveDestinationId if existingState.EffectiveDestinationId.ValueString() == newState.DestinationId.ValueString() { newState.DestinationId = existingState.DestinationId } @@ -1006,6 +1052,7 @@ func (newState *SubscriptionSubscriberUser) SyncEffectiveFieldsDuringCreateOrUpd } func (newState *SubscriptionSubscriberUser) SyncEffectiveFieldsDuringRead(existingState SubscriptionSubscriberUser) { + newState.EffectiveUserId = existingState.EffectiveUserId if existingState.EffectiveUserId.ValueInt64() == newState.UserId.ValueInt64() { newState.UserId = existingState.UserId } @@ -1094,6 +1141,7 @@ func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate( } func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState UpdateDashboardRequest) { + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } @@ -1130,12 +1178,15 @@ func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(p } func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState UpdateScheduleRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index 35f110fbe2..295d1e1a99 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -43,6 +43,7 @@ func (newState *BaseJob) SyncEffectiveFieldsDuringCreateOrUpdate(plan BaseJob) { } func (newState *BaseJob) SyncEffectiveFieldsDuringRead(existingState BaseJob) { + newState.EffectiveEffectiveBudgetPolicyId = existingState.EffectiveEffectiveBudgetPolicyId if existingState.EffectiveEffectiveBudgetPolicyId.ValueString() == newState.EffectiveBudgetPolicyId.ValueString() { newState.EffectiveBudgetPolicyId = existingState.EffectiveBudgetPolicyId } @@ -944,6 +945,7 @@ func (newState *Job) SyncEffectiveFieldsDuringCreateOrUpdate(plan Job) { } func (newState *Job) SyncEffectiveFieldsDuringRead(existingState Job) { + newState.EffectiveEffectiveBudgetPolicyId = existingState.EffectiveEffectiveBudgetPolicyId if existingState.EffectiveEffectiveBudgetPolicyId.ValueString() == newState.EffectiveBudgetPolicyId.ValueString() { newState.EffectiveBudgetPolicyId = existingState.EffectiveBudgetPolicyId } diff --git a/internal/service/provisioning_tf/model.go b/internal/service/provisioning_tf/model.go index be8e5f0c2f..17d8bbc18e 100755 --- a/internal/service/provisioning_tf/model.go +++ b/internal/service/provisioning_tf/model.go @@ -342,6 +342,7 @@ func (newState *Credential) SyncEffectiveFieldsDuringCreateOrUpdate(plan Credent } func (newState *Credential) SyncEffectiveFieldsDuringRead(existingState Credential) { + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } @@ -382,6 +383,7 @@ func (newState *CustomerManagedKey) SyncEffectiveFieldsDuringCreateOrUpdate(plan } func (newState *CustomerManagedKey) SyncEffectiveFieldsDuringRead(existingState CustomerManagedKey) { + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } @@ -739,9 +741,11 @@ func (newState *Network) SyncEffectiveFieldsDuringCreateOrUpdate(plan Network) { } func (newState *Network) SyncEffectiveFieldsDuringRead(existingState Network) { + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } + newState.EffectiveVpcStatus = existingState.EffectiveVpcStatus if existingState.EffectiveVpcStatus.ValueString() == newState.VpcStatus.ValueString() { newState.VpcStatus = existingState.VpcStatus } @@ -871,9 +875,11 @@ func (newState *StorageConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(pl } func (newState *StorageConfiguration) SyncEffectiveFieldsDuringRead(existingState StorageConfiguration) { + newState.EffectiveAccountId = existingState.EffectiveAccountId if existingState.EffectiveAccountId.ValueString() == newState.AccountId.ValueString() { newState.AccountId = existingState.AccountId } + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } @@ -1135,12 +1141,15 @@ func (newState *Workspace) SyncEffectiveFieldsDuringCreateOrUpdate(plan Workspac } func (newState *Workspace) SyncEffectiveFieldsDuringRead(existingState Workspace) { + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } + newState.EffectiveWorkspaceStatus = existingState.EffectiveWorkspaceStatus if existingState.EffectiveWorkspaceStatus.ValueString() == newState.WorkspaceStatus.ValueString() { newState.WorkspaceStatus = existingState.WorkspaceStatus } + newState.EffectiveWorkspaceStatusMessage = existingState.EffectiveWorkspaceStatusMessage if existingState.EffectiveWorkspaceStatusMessage.ValueString() == newState.WorkspaceStatusMessage.ValueString() { newState.WorkspaceStatusMessage = existingState.WorkspaceStatusMessage } diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index 3ca9895b89..71b73ba253 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -1543,24 +1543,31 @@ func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringCreateOrUp } func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringRead(existingState NccAzurePrivateEndpointRule) { + newState.EffectiveConnectionState = existingState.EffectiveConnectionState if existingState.EffectiveConnectionState.ValueString() == newState.ConnectionState.ValueString() { newState.ConnectionState = existingState.ConnectionState } + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } + newState.EffectiveDeactivated = existingState.EffectiveDeactivated if existingState.EffectiveDeactivated.ValueBool() == newState.Deactivated.ValueBool() { newState.Deactivated = existingState.Deactivated } + newState.EffectiveDeactivatedAt = existingState.EffectiveDeactivatedAt if existingState.EffectiveDeactivatedAt.ValueInt64() == newState.DeactivatedAt.ValueInt64() { newState.DeactivatedAt = existingState.DeactivatedAt } + newState.EffectiveEndpointName = existingState.EffectiveEndpointName if existingState.EffectiveEndpointName.ValueString() == newState.EndpointName.ValueString() { newState.EndpointName = existingState.EndpointName } + newState.EffectiveRuleId = existingState.EffectiveRuleId if existingState.EffectiveRuleId.ValueString() == newState.RuleId.ValueString() { newState.RuleId = existingState.RuleId } + newState.EffectiveUpdatedTime = existingState.EffectiveUpdatedTime if existingState.EffectiveUpdatedTime.ValueInt64() == newState.UpdatedTime.ValueInt64() { newState.UpdatedTime = existingState.UpdatedTime } @@ -1672,12 +1679,15 @@ func (newState *NetworkConnectivityConfiguration) SyncEffectiveFieldsDuringCreat } func (newState *NetworkConnectivityConfiguration) SyncEffectiveFieldsDuringRead(existingState NetworkConnectivityConfiguration) { + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } + newState.EffectiveNetworkConnectivityConfigId = existingState.EffectiveNetworkConnectivityConfigId if existingState.EffectiveNetworkConnectivityConfigId.ValueString() == newState.NetworkConnectivityConfigId.ValueString() { newState.NetworkConnectivityConfigId = existingState.NetworkConnectivityConfigId } + newState.EffectiveUpdatedTime = existingState.EffectiveUpdatedTime if existingState.EffectiveUpdatedTime.ValueInt64() == newState.UpdatedTime.ValueInt64() { newState.UpdatedTime = existingState.UpdatedTime } diff --git a/internal/service/sharing_tf/model.go b/internal/service/sharing_tf/model.go index 1cb5022027..0192deeaaa 100755 --- a/internal/service/sharing_tf/model.go +++ b/internal/service/sharing_tf/model.go @@ -892,6 +892,7 @@ func (newState *ShareInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ShareInf } func (newState *ShareInfo) SyncEffectiveFieldsDuringRead(existingState ShareInfo) { + newState.EffectiveOwner = existingState.EffectiveOwner if existingState.EffectiveOwner.ValueString() == newState.Owner.ValueString() { newState.Owner = existingState.Owner } @@ -1000,15 +1001,19 @@ func (newState *SharedDataObject) SyncEffectiveFieldsDuringCreateOrUpdate(plan S } func (newState *SharedDataObject) SyncEffectiveFieldsDuringRead(existingState SharedDataObject) { + newState.EffectiveCdfEnabled = existingState.EffectiveCdfEnabled if existingState.EffectiveCdfEnabled.ValueBool() == newState.CdfEnabled.ValueBool() { newState.CdfEnabled = existingState.CdfEnabled } + newState.EffectiveHistoryDataSharingStatus = existingState.EffectiveHistoryDataSharingStatus if existingState.EffectiveHistoryDataSharingStatus.ValueString() == newState.HistoryDataSharingStatus.ValueString() { newState.HistoryDataSharingStatus = existingState.HistoryDataSharingStatus } + newState.EffectiveSharedAs = existingState.EffectiveSharedAs if existingState.EffectiveSharedAs.ValueString() == newState.SharedAs.ValueString() { newState.SharedAs = existingState.SharedAs } + newState.EffectiveStartVersion = existingState.EffectiveStartVersion if existingState.EffectiveStartVersion.ValueInt64() == newState.StartVersion.ValueInt64() { newState.StartVersion = existingState.StartVersion } @@ -1130,6 +1135,7 @@ func (newState *UpdateShare) SyncEffectiveFieldsDuringCreateOrUpdate(plan Update } func (newState *UpdateShare) SyncEffectiveFieldsDuringRead(existingState UpdateShare) { + newState.EffectiveOwner = existingState.EffectiveOwner if existingState.EffectiveOwner.ValueString() == newState.Owner.ValueString() { newState.Owner = existingState.Owner } From 92357dcf9cfb4acd64ace55b00e82e38300ec2ad Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Tue, 29 Oct 2024 06:17:42 -0400 Subject: [PATCH 76/99] [Fix] Handle edge case for `effective_properties` in `databricks_sql_table` (#4153) ## Changes It was reported in #4098 that some of the specified options, like, `multiLine`, `recursiveFileLookup` and potentially more, aren't returned as `option.multiLine`, etc., but instead are expanded into full names, like, `spark.sql.dataSourceOptions.multiLine`. This PR changes lookup logic a bit, and if we can't find `option.something`, then we're looking for all options ending with `.something` (only if there are no `.` in the name). Resolves #4098 ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- catalog/resource_sql_table.go | 29 +++++++++++++++++++++-------- catalog/resource_sql_table_test.go | 7 +++++-- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/catalog/resource_sql_table.go b/catalog/resource_sql_table.go index ce9d4dbd7a..710c8a20bf 100644 --- a/catalog/resource_sql_table.go +++ b/catalog/resource_sql_table.go @@ -21,6 +21,7 @@ import ( ) var MaxSqlExecWaitTimeout = 50 +var optionPrefixes = []string{"option.", "spark.sql.dataSourceOptions."} type SqlColumnInfo struct { Name string `json:"name"` @@ -67,7 +68,6 @@ type SqlTableInfo struct { } func (ti SqlTableInfo) CustomizeSchema(s *common.CustomizableSchema) *common.CustomizableSchema { - caseInsensitiveFields := []string{"name", "catalog_name", "schema_name"} for _, field := range caseInsensitiveFields { s.SchemaPath(field).SetCustomSuppressDiff(common.EqualFoldDiffSuppress) @@ -598,18 +598,31 @@ func ResourceSqlTable() common.Resource { // If the user specified a property but the value of that property has changed, that will appear // as a change in the effective property/option. To cause a diff to be detected, we need to // reset the effective property/option to the requested value. - userSpecifiedProperties := d.Get("properties").(map[string]interface{}) - userSpecifiedOptions := d.Get("options").(map[string]interface{}) - effectiveProperties := d.Get("effective_properties").(map[string]interface{}) - diff := make(map[string]interface{}) + userSpecifiedProperties := d.Get("properties").(map[string]any) + userSpecifiedOptions := d.Get("options").(map[string]any) + effectiveProperties := d.Get("effective_properties").(map[string]any) + diff := make(map[string]any) for k, userSpecifiedValue := range userSpecifiedProperties { if effectiveValue, ok := effectiveProperties[k]; !ok || effectiveValue != userSpecifiedValue { diff[k] = userSpecifiedValue } } - for k, userSpecifiedValue := range userSpecifiedOptions { - if effectiveValue, ok := effectiveProperties["option."+k]; !ok || effectiveValue != userSpecifiedValue { - diff["option."+k] = userSpecifiedValue + for userOptName, userSpecifiedValue := range userSpecifiedOptions { + var found bool + var effectiveValue any + var effectOptName string + // If the option is not found, check if the user specified the option without the prefix + // i.e. if user specified `multiLine` for JSON, then backend returns `spark.sql.dataSourceOptions.multiLine` + for _, prefix := range optionPrefixes { + effectOptName = prefix + userOptName + if v, ok := effectiveProperties[effectOptName]; ok { + found = true + effectiveValue = v + break + } + } + if !found || effectiveValue != userSpecifiedValue { + diff[effectOptName] = userSpecifiedValue } } if len(diff) > 0 { diff --git a/catalog/resource_sql_table_test.go b/catalog/resource_sql_table_test.go index f2f0a6c5e2..b2495480cb 100644 --- a/catalog/resource_sql_table_test.go +++ b/catalog/resource_sql_table_test.go @@ -1625,15 +1625,18 @@ func TestResourceSqlTable_Diff_ExistingResource(t *testing.T) { } options = { "myopt" = "myval" + "multiLine" = "true" }`, map[string]string{ "properties.%": "1", "properties.myprop": "myval", - "options.%": "1", + "options.%": "2", "options.myopt": "myval", - "effective_properties.%": "2", + "options.multiLine": "true", + "effective_properties.%": "3", "effective_properties.myprop": "myval", "effective_properties.option.myopt": "myval", + "effective_properties.spark.sql.dataSourceOptions.multiLine": "true", }, nil, }, From 38eeb21e56f9e3ec3fe271010430dbdf6a62ec0b Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 29 Oct 2024 20:43:31 +0530 Subject: [PATCH 77/99] [Fix] Provide more prescriptive error when users fail to create a single node cluster (#4168) ## Changes A better error message is warranted because many DABs customers have reportedly run into this. Original issue: https://github.com/databricks/cli/issues/1546 ## Tests Unit test --- clusters/clusters_api.go | 2 +- clusters/resource_cluster.go | 21 ++++++++++++++++++- clusters/resource_cluster_test.go | 6 ++---- jobs/resource_job_test.go | 34 +++++++++++++++++++++++++++---- 4 files changed, 53 insertions(+), 10 deletions(-) diff --git a/clusters/clusters_api.go b/clusters/clusters_api.go index d47cfb6090..6a08a4a608 100644 --- a/clusters/clusters_api.go +++ b/clusters/clusters_api.go @@ -447,7 +447,7 @@ func (cluster Cluster) Validate() error { if profile == "singleNode" && strings.HasPrefix(master, "local") && resourceClass == "SingleNode" { return nil } - return fmt.Errorf("NumWorkers could be 0 only for SingleNode clusters. See https://docs.databricks.com/clusters/single-node.html for more details") + return errors.New(numWorkerErr) } // TODO: Remove this once all the resources using clusters are migrated to Go SDK. diff --git a/clusters/resource_cluster.go b/clusters/resource_cluster.go index fb77a5f76d..3c03502023 100644 --- a/clusters/resource_cluster.go +++ b/clusters/resource_cluster.go @@ -26,7 +26,26 @@ var clusterSchema = resourceClusterSchema() var clusterSchemaVersion = 4 const ( - numWorkerErr = "NumWorkers could be 0 only for SingleNode clusters. See https://docs.databricks.com/clusters/single-node.html for more details" + numWorkerErr = `num_workers may be 0 only for single-node clusters. To create a single node +cluster please include the following configuration in your cluster configuration: + + spark_conf = { + "spark.databricks.cluster.profile" : "singleNode" + "spark.master" : "local[*]" + } + + custom_tags = { + "ResourceClass" = "SingleNode" + } + +Please note that the Databricks Terraform provider cannot detect if the above configuration +is defined in a policy used by the cluster. Please define this in the cluster configuration +itself to create a single node cluster. + +For more details please see: + 1. https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster#fixed-size-or-autoscaling-cluster + 2. https://docs.databricks.com/clusters/single-node.html` + unsupportedExceptCreateEditClusterSpecErr = "unsupported type %T, must be one of %scompute.CreateCluster, %scompute.ClusterSpec or %scompute.EditCluster. Please report this issue to the GitHub repo" ) diff --git a/clusters/resource_cluster_test.go b/clusters/resource_cluster_test.go index 804067597b..240b62cb4e 100644 --- a/clusters/resource_cluster_test.go +++ b/clusters/resource_cluster_test.go @@ -1860,8 +1860,7 @@ func TestResourceClusterCreate_SingleNodeFail(t *testing.T) { "is_pinned": false, }, }.Apply(t) - assert.Error(t, err) - require.Equal(t, true, strings.Contains(err.Error(), "NumWorkers could be 0 only for SingleNode clusters")) + assert.EqualError(t, err, numWorkerErr) } func TestResourceClusterCreate_NegativeNumWorkers(t *testing.T) { @@ -1900,8 +1899,7 @@ func TestResourceClusterUpdate_FailNumWorkersZero(t *testing.T) { "num_workers": 0, }, }.Apply(t) - assert.Error(t, err) - require.Equal(t, true, strings.Contains(err.Error(), "NumWorkers could be 0 only for SingleNode clusters")) + assert.EqualError(t, err, numWorkerErr) } func TestModifyClusterRequestAws(t *testing.T) { diff --git a/jobs/resource_job_test.go b/jobs/resource_job_test.go index 95ffb03923..75a780c00a 100644 --- a/jobs/resource_job_test.go +++ b/jobs/resource_job_test.go @@ -2056,8 +2056,21 @@ func TestResourceJobCreateSingleNode_Fail(t *testing.T) { jar = "dbfs://ff/gg/hh.jar" }`, }.Apply(t) - assert.Error(t, err) - require.Equal(t, true, strings.Contains(err.Error(), "NumWorkers could be 0 only for SingleNode clusters")) + assert.ErrorContains(t, err, `num_workers may be 0 only for single-node clusters. To create a single node +cluster please include the following configuration in your cluster configuration: + + spark_conf = { + "spark.databricks.cluster.profile" : "singleNode" + "spark.master" : "local[*]" + } + + custom_tags = { + "ResourceClass" = "SingleNode" + } + +Please note that the Databricks Terraform provider cannot detect if the above configuration +is defined in a policy used by the cluster. Please define this in the cluster configuration +itself to create a single node cluster.`) } func TestResourceJobRead(t *testing.T) { @@ -2946,8 +2959,21 @@ func TestResourceJobUpdate_FailNumWorkersZero(t *testing.T) { parameters = ["--cleanup", "full"] }`, }.Apply(t) - assert.Error(t, err) - require.Equal(t, true, strings.Contains(err.Error(), "NumWorkers could be 0 only for SingleNode clusters")) + assert.ErrorContains(t, err, `num_workers may be 0 only for single-node clusters. To create a single node +cluster please include the following configuration in your cluster configuration: + + spark_conf = { + "spark.databricks.cluster.profile" : "singleNode" + "spark.master" : "local[*]" + } + + custom_tags = { + "ResourceClass" = "SingleNode" + } + +Please note that the Databricks Terraform provider cannot detect if the above configuration +is defined in a policy used by the cluster. Please define this in the cluster configuration +itself to create a single node cluster.`) } func TestJobsAPIList(t *testing.T) { From dfa6bc0bebf4541fc71b8b08a567d16b341f024d Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Wed, 30 Oct 2024 10:14:44 +0100 Subject: [PATCH 78/99] [Internal] Add test instructions for external contributors (#4169) ## Changes Add test instructions for external contributors ## Tests See Go Changes https://github.com/databricks/databricks-sdk-go/pull/1073 --- .github/workflows/external-message.yml | 114 ++++++++++++++++++++++++ .github/workflows/integration-tests.yml | 20 ++++- 2 files changed, 133 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/external-message.yml diff --git a/.github/workflows/external-message.yml b/.github/workflows/external-message.yml new file mode 100644 index 0000000000..b9534520a0 --- /dev/null +++ b/.github/workflows/external-message.yml @@ -0,0 +1,114 @@ +name: PR Comment + +# WARNING: +# THIS WORKFLOW ALWAYS RUNS FOR EXTERNAL CONTRIBUTORS WITHOUT ANY APPROVAL. +# THIS WORKFLOW RUNS FROM MAIN BRANCH, NOT FROM THE PR BRANCH. +# DO NOT PULL THE PR OR EXECUTE ANY CODE FROM THE PR. + +on: + pull_request_target: + types: [opened, reopened, synchronize] + branches: + - main + + +jobs: + comment-on-pr: + runs-on: ubuntu-latest + permissions: + pull-requests: write + + steps: + # NOTE: The following checks may not be accurate depending on Org or Repo settings. + - name: Check user and potential secret access + id: check-secrets-access + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + USER_LOGIN="${{ github.event.pull_request.user.login }}" + REPO_OWNER="${{ github.repository_owner }}" + REPO_NAME="${{ github.event.repository.name }}" + + echo "Pull request opened by: $USER_LOGIN" + + # Check if PR is from a fork + IS_FORK=$([[ "${{ github.event.pull_request.head.repo.full_name }}" != "${{ github.repository }}" ]] && echo "true" || echo "false") + + HAS_ACCESS="false" + + # Check user's permission level on the repository + USER_PERMISSION=$(gh api repos/$REPO_OWNER/$REPO_NAME/collaborators/$USER_LOGIN/permission --jq '.permission') + + if [[ "$USER_PERMISSION" == "admin" || "$USER_PERMISSION" == "write" ]]; then + HAS_ACCESS="true" + elif [[ "$USER_PERMISSION" == "read" ]]; then + # For read access, we need to check if the user has been explicitly granted secret access + # This information is not directly available via API, so we'll make an assumption + # that read access does not imply secret access + HAS_ACCESS="false" + fi + + # Check if repo owner is an organization + IS_ORG=$(gh api users/$REPO_OWNER --jq '.type == "Organization"') + + if [[ "$IS_ORG" == "true" && "$HAS_ACCESS" == "false" ]]; then + # Check if user is a member of any team with write or admin access to the repo + TEAMS_WITH_ACCESS=$(gh api repos/$REPO_OWNER/$REPO_NAME/teams --jq '.[] | select(.permission == "push" or .permission == "admin") | .slug') + for team in $TEAMS_WITH_ACCESS; do + IS_TEAM_MEMBER=$(gh api orgs/$REPO_OWNER/teams/$team/memberships/$USER_LOGIN --silent && echo "true" || echo "false") + if [[ "$IS_TEAM_MEMBER" == "true" ]]; then + HAS_ACCESS="true" + break + fi + done + fi + + # If it's a fork, set HAS_ACCESS to false regardless of other checks + if [[ "$IS_FORK" == "true" ]]; then + HAS_ACCESS="false" + fi + + echo "has_secrets_access=$HAS_ACCESS" >> $GITHUB_OUTPUT + if [[ "$HAS_ACCESS" == "true" ]]; then + echo "User $USER_LOGIN likely has access to secrets" + else + echo "User $USER_LOGIN likely does not have access to secrets" + fi + + + - uses: actions/checkout@v4 + + - name: Delete old comments + if: steps.check-secrets-access.outputs.has_secrets_access != 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Delete previous comment if it exists + previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \ + --jq '.[] | select(.body | startswith("")) | .id') + echo "Previous comment IDs: $previous_comment_ids" + # Iterate over each comment ID and delete the comment + if [ ! -z "$previous_comment_ids" ]; then + echo "$previous_comment_ids" | while read -r comment_id; do + echo "Deleting comment with ID: $comment_id" + gh api "repos/${{ github.repository }}/issues/comments/$comment_id" -X DELETE + done + fi + + - name: Comment on PR + if: steps.check-secrets-access.outputs.has_secrets_access != 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + COMMIT_SHA: ${{ github.event.pull_request.head.sha }} + run: | + gh pr comment ${{ github.event.pull_request.number }} --body \ + " + Run integration tests manually: + [go/deco-tests-run/terraform](https://go/deco-tests-run/terraform) + + Inputs: + * PR number: ${{github.event.pull_request.number}} + * Commit SHA: \`${{ env.COMMIT_SHA }}\` + + Checks will be approved automatically on success. + " diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index b92be6da5f..67ed709365 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -9,10 +9,28 @@ on: jobs: + check-token: + name: Check secrets access + runs-on: ubuntu-latest + outputs: + has_token: ${{ steps.set-token-status.outputs.has_token }} + steps: + - name: Check if GITHUB_TOKEN is set + id: set-token-status + run: | + if [ -z "${{ secrets.GITHUB_TOKEN }}" ]; then + echo "GITHUB_TOKEN is empty. User has no access to tokens." + echo "::set-output name=has_token::false" + else + echo "GITHUB_TOKEN is set. User has no access to tokens." + echo "::set-output name=has_token::true" + fi + trigger-tests: - if: github.event_name == 'pull_request' name: Trigger Tests runs-on: ubuntu-latest + needs: check-token + if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true' environment: "test-trigger-is" steps: From 613ed1ab6ff138601bbc83bf9c48a2a7f41b3f7e Mon Sep 17 00:00:00 2001 From: Omer Lachish <289488+rauchy@users.noreply.github.com> Date: Wed, 30 Oct 2024 13:36:09 +0100 Subject: [PATCH 79/99] [Internal] Migrate Share Data Source to Plugin Framework (#4161) ## Changes This PR migrates the share/shares data sources to the Plugin framework. The code was largely copied "as is" from the previous implementation of the share data source, with the necessary adaptations made for integration with the Plugin framework. ## Tests ~~Note: current tests create shares using the SDKv2 resource, but fetch them using the new plugin framework data source. Once the resource migration will be merged, I will amend this.~~ Edit: Now that the resource itself is merged, the acceptance tests use the plugin framework's version of the resource. - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --------- Co-authored-by: Omer Lachish --- internal/providers/pluginfw/pluginfw.go | 2 + .../pluginfw/resources/sharing/data_share.go | 79 +++++++++++++++ .../pluginfw/resources/sharing/data_shares.go | 67 +++++++++++++ .../resources/sharing/data_shares_acc_test.go | 98 +++++++++++++++++++ 4 files changed, 246 insertions(+) create mode 100644 internal/providers/pluginfw/resources/sharing/data_share.go create mode 100644 internal/providers/pluginfw/resources/sharing/data_shares.go create mode 100644 internal/providers/pluginfw/resources/sharing/data_shares_acc_test.go diff --git a/internal/providers/pluginfw/pluginfw.go b/internal/providers/pluginfw/pluginfw.go index 53b361f998..e813c94aa3 100644 --- a/internal/providers/pluginfw/pluginfw.go +++ b/internal/providers/pluginfw/pluginfw.go @@ -58,6 +58,8 @@ func (p *DatabricksProviderPluginFramework) DataSources(ctx context.Context) []f volume.DataSourceVolumes, registered_model.DataSourceRegisteredModel, notificationdestinations.DataSourceNotificationDestinations, + sharing.DataSourceShare, + sharing.DataSourceShares, } } diff --git a/internal/providers/pluginfw/resources/sharing/data_share.go b/internal/providers/pluginfw/resources/sharing/data_share.go new file mode 100644 index 0000000000..f96d56ac12 --- /dev/null +++ b/internal/providers/pluginfw/resources/sharing/data_share.go @@ -0,0 +1,79 @@ +package sharing + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/sharing" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/databricks/terraform-provider-databricks/internal/service/sharing_tf" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func DataSourceShare() datasource.DataSource { + return &ShareDataSource{} +} + +var _ datasource.DataSourceWithConfigure = &ShareDataSource{} + +type ShareDataSource struct { + Client *common.DatabricksClient +} + +func (d *ShareDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = pluginfwcommon.GetDatabricksStagingName("share") +} + +func (d *ShareDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + attrs, blocks := tfschema.DataSourceStructToSchemaMap(sharing_tf.ShareInfo{}, nil) + resp.Schema = schema.Schema{ + Attributes: attrs, + Blocks: blocks, + } +} + +func (d *ShareDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if d.Client == nil { + d.Client = pluginfwcommon.ConfigureDataSource(req, resp) + } +} + +func (d *ShareDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + w, diags := d.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var config sharing_tf.ShareInfo + diags = req.Config.Get(ctx, &config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + share, err := w.Shares.Get(ctx, sharing.GetShareRequest{ + Name: config.Name.ValueString(), + IncludeSharedData: true, + }) + if err != nil { + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + } + + resp.Diagnostics.AddError("Failed to fetch share", err.Error()) + return + } + + var shareInfoTfSdk sharing_tf.ShareInfo + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, share, &shareInfoTfSdk)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, shareInfoTfSdk)...) +} diff --git a/internal/providers/pluginfw/resources/sharing/data_shares.go b/internal/providers/pluginfw/resources/sharing/data_shares.go new file mode 100644 index 0000000000..1753621192 --- /dev/null +++ b/internal/providers/pluginfw/resources/sharing/data_shares.go @@ -0,0 +1,67 @@ +package sharing + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/databricks/databricks-sdk-go/service/sharing" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +type SharesList struct { + Shares []types.String `tfsdk:"shares" tf:"computed,optional,slice_set"` +} + +func DataSourceShares() datasource.DataSource { + return &SharesDataSource{} +} + +var _ datasource.DataSourceWithConfigure = &SharesDataSource{} + +type SharesDataSource struct { + Client *common.DatabricksClient +} + +func (d *SharesDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = pluginfwcommon.GetDatabricksStagingName("shares") +} + +func (d *SharesDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + attrs, blocks := tfschema.DataSourceStructToSchemaMap(SharesList{}, nil) + resp.Schema = schema.Schema{ + Attributes: attrs, + Blocks: blocks, + } +} + +func (d *SharesDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if d.Client == nil { + d.Client = pluginfwcommon.ConfigureDataSource(req, resp) + } +} + +func (d *SharesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + w, diags := d.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + shares, err := w.Shares.ListAll(ctx, sharing.ListSharesRequest{}) + if err != nil { + resp.Diagnostics.AddError("Failed to fetch shares", err.Error()) + return + } + + shareNames := make([]types.String, len(shares)) + for i, share := range shares { + shareNames[i] = types.StringValue(share.Name) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, SharesList{Shares: shareNames})...) +} diff --git a/internal/providers/pluginfw/resources/sharing/data_shares_acc_test.go b/internal/providers/pluginfw/resources/sharing/data_shares_acc_test.go new file mode 100644 index 0000000000..9b0440e5d3 --- /dev/null +++ b/internal/providers/pluginfw/resources/sharing/data_shares_acc_test.go @@ -0,0 +1,98 @@ +package sharing_test + +import ( + "strconv" + "testing" + + "github.com/databricks/terraform-provider-databricks/internal/acceptance" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func checkSharesDataSourcePopulated(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + _, ok := s.Modules[0].Resources["data.databricks_shares_pluginframework.this"] + require.True(t, ok, "data.databricks_shares_pluginframework.this has to be there") + num_shares, _ := strconv.Atoi(s.Modules[0].Outputs["shares"].Value.(string)) + assert.GreaterOrEqual(t, num_shares, 1) + return nil + } +} +func TestUcAccDataSourceShares(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: ` + resource "databricks_catalog" "sandbox" { + name = "sandbox{var.RANDOM}" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + } + + resource "databricks_schema" "things" { + catalog_name = databricks_catalog.sandbox.id + name = "things{var.RANDOM}" + comment = "this database is managed by terraform" + properties = { + kind = "various" + } + } + + resource "databricks_table" "mytable" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "id" + position = 0 + type_name = "INT" + type_text = "int" + type_json = "{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}" + } + } + + resource "databricks_table" "mytable_2" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar_2" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "id" + position = 0 + type_name = "INT" + type_text = "int" + type_json = "{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}" + } + } + + resource "databricks_share_pluginframework" "myshare" { + name = "{var.RANDOM}-terraform-delta-share" + object { + name = databricks_table.mytable.id + comment = "c" + data_object_type = "TABLE" + } + object { + name = databricks_table.mytable_2.id + cdf_enabled = false + comment = "c" + data_object_type = "TABLE" + } + } + + data "databricks_shares_pluginframework" "this" { + depends_on = [databricks_share_pluginframework.myshare] + } + output "shares" { + value = length(data.databricks_shares_pluginframework.this.shares) + } + `, + Check: checkSharesDataSourcePopulated(t), + }) +} From 5daf2ed398531581329a5afb110a2b4ff5d36e76 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 30 Oct 2024 10:33:35 -0400 Subject: [PATCH 80/99] [Feature] Added `databricks_functions` data source (#4154) ## Changes It's now possible to fetch information about functions defined in a specific UC schema. No integration test yet because we don't have `databricks_function` resource yet. Resolves #4111 ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [x] using Go SDK --------- Co-authored-by: Miles Yucht --- docs/data-sources/functions.md | 86 ++++++++++++++++++ internal/providers/pluginfw/pluginfw.go | 2 + .../resources/catalog/data_functions.go | 90 +++++++++++++++++++ 3 files changed, 178 insertions(+) create mode 100644 docs/data-sources/functions.md create mode 100644 internal/providers/pluginfw/resources/catalog/data_functions.go diff --git a/docs/data-sources/functions.md b/docs/data-sources/functions.md new file mode 100644 index 0000000000..9a02db5b3b --- /dev/null +++ b/docs/data-sources/functions.md @@ -0,0 +1,86 @@ +--- +subcategory: "Unity Catalog" +--- +# databricks_functionss Data Source + +-> This data source can only be used with a workspace-level provider! + +Retrieves a list of [User-Defined Functions (UDFs) registered in the Unity Catalog](https://docs.databricks.com/en/udf/unity-catalog.html). + +## Example Usage + +List all functions defined in a specific schema (`main.default` in this example): + +```hcl +data "databricks_functions" "all" { + catalog_name = "main" + schema_name = "default" +} + +output "all_external_locations" { + value = data.databricks_functions.all.functions +} +``` + +## Argument Reference + +The following arguments are supported: + +* `catalog_name` - (Required) Name of [databricks_catalog](../resources/catalog.md). +* `schema_name` - (Required) Name of [databricks_schema](../resources/schema.md). +* `include_browse` - (Optional, Boolean) flag to specify if include UDFs in the response for which the principal can only access selective metadata for. + +## Attribute Reference + +This data source exports the following attributes: + +* `functions` - list of objects describing individual UDF. Each object consists of the following attributes (refer to [REST API documentation](https://docs.databricks.com/api/workspace/functions/list#functions) for up-to-date list of attributes. Default type is String): + * `name` - Name of function, relative to parent schema. + * `catalog_name` - Name of parent catalog. + * `schema_name` - Name of parent schema relative to its parent catalog. + * `input_params` - object describing input parameters. Consists of the single attribute: + * `parameters` - The array of definitions of the function's parameters: + * `name` - Name of parameter. + * `type_text` - Full data type spec, SQL/catalogString text. + * `type_json` - Full data type spec, JSON-serialized. + * `type_name` - Name of type (INT, STRUCT, MAP, etc.). + * `type_precision` - Digits of precision; required on Create for DecimalTypes. + * `type_scale` - Digits to right of decimal; Required on Create for DecimalTypes. + * `type_interval_type` - Format of IntervalType. + * `position` - Ordinal position of column (starting at position 0). + * `parameter_mode` - The mode of the function parameter. + * `parameter_type` - The type of function parameter (`PARAM` or `COLUMN`). + * `parameter_default` - Default value of the parameter. + * `comment` - User-provided free-form text description. + * `return_params` - Table function return parameters. See `input_params` for description. + * `data_type` - Scalar function return data type. + * `full_data_type` - Pretty printed function data type. + * `routine_body` - Function language (`SQL` or `EXTERNAL`). When `EXTERNAL` is used, the language of the routine function should be specified in the `external_language` field, and the `return_params` of the function cannot be used (as `TABLE` return type is not supported), and the `sql_data_access` field must be `NO_SQL`. + * `routine_definition` - Function body. + * `routine_dependencies` - Function dependencies. + * `parameter_style` - Function parameter style. `S` is the value for SQL. + * `is_deterministic` - Boolean flag specifying whether the function is deterministic. + * `sql_data_access` - Function SQL data access (`CONTAINS_SQL`, `READS_SQL_DATA`, `NO_SQL`). + * `is_null_call` - Boolean flag whether function null call. + * `security_type` - Function security type. (Enum: `DEFINER`). + * `specific_name` - Specific name of the function; Reserved for future use. + * `external_name` - External function name. + * `external_language` - External function language. + * `sql_path` - List of schemes whose objects can be referenced without qualification. + * `owner` - Username of current owner of function. + * `comment` - User-provided free-form text description. + * `properties` - JSON-serialized key-value pair map, encoded (escaped) as a string. + * `metastore_id` - Unique identifier of parent metastore. + * `full_name` - Full name of function, in form of catalog_name.schema_name.function__name + * `created_at` - Time at which this function was created, in epoch milliseconds. + * `created_by` - Username of function creator. + * `updated_at` - Time at which this function was created, in epoch milliseconds. + * `updated_by` - Username of user who last modified function. + * `function_id` - Id of Function, relative to parent schema. + * `browse_only` - Indicates whether the principal is limited to retrieving metadata for the associated object through the `BROWSE` privilege when `include_browse` is enabled in the request. + +## Related Resources + +The following resources are used in the same context: + +* [databricks_schema](./schema.md) to get information about a single schema diff --git a/internal/providers/pluginfw/pluginfw.go b/internal/providers/pluginfw/pluginfw.go index e813c94aa3..5592e3e29b 100644 --- a/internal/providers/pluginfw/pluginfw.go +++ b/internal/providers/pluginfw/pluginfw.go @@ -16,6 +16,7 @@ import ( "github.com/databricks/terraform-provider-databricks/commands" "github.com/databricks/terraform-provider-databricks/common" providercommon "github.com/databricks/terraform-provider-databricks/internal/providers/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/catalog" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/cluster" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/library" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/notificationdestinations" @@ -60,6 +61,7 @@ func (p *DatabricksProviderPluginFramework) DataSources(ctx context.Context) []f notificationdestinations.DataSourceNotificationDestinations, sharing.DataSourceShare, sharing.DataSourceShares, + catalog.DataSourceFunctions, } } diff --git a/internal/providers/pluginfw/resources/catalog/data_functions.go b/internal/providers/pluginfw/resources/catalog/data_functions.go new file mode 100644 index 0000000000..6837800b51 --- /dev/null +++ b/internal/providers/pluginfw/resources/catalog/data_functions.go @@ -0,0 +1,90 @@ +package catalog + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/databricks/terraform-provider-databricks/internal/service/catalog_tf" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func DataSourceFunctions() datasource.DataSource { + return &FunctionsDataSource{} +} + +var _ datasource.DataSourceWithConfigure = &FunctionsDataSource{} + +type FunctionsDataSource struct { + Client *common.DatabricksClient +} + +type FunctionsData struct { + CatalogName types.String `tfsdk:"catalog_name"` + SchemaName types.String `tfsdk:"schema_name"` + IncludeBrowse types.Bool `tfsdk:"include_browse" tf:"optional"` + Functions []catalog_tf.FunctionInfo `tfsdk:"functions" tf:"optional,computed"` +} + +func (d *FunctionsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = "databricks_functions" +} + +func (d *FunctionsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + attrs, blocks := tfschema.DataSourceStructToSchemaMap(FunctionsData{}, nil) + resp.Schema = schema.Schema{ + Attributes: attrs, + Blocks: blocks, + } +} + +func (d *FunctionsDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if d.Client == nil { + d.Client = pluginfwcommon.ConfigureDataSource(req, resp) + } +} + +func (d *FunctionsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + w, diags := d.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var functions FunctionsData + diags = req.Config.Get(ctx, &functions) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + catalogName := functions.CatalogName.ValueString() + schemaName := functions.SchemaName.ValueString() + functionsInfosSdk, err := w.Functions.ListAll(ctx, catalog.ListFunctionsRequest{ + CatalogName: catalogName, + SchemaName: schemaName, + IncludeBrowse: functions.IncludeBrowse.ValueBool(), + }) + if err != nil { + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + } + resp.Diagnostics.AddError(fmt.Sprintf("failed to get functions for %s.%s schema", catalogName, schemaName), err.Error()) + return + } + for _, functionSdk := range functionsInfosSdk { + var function catalog_tf.FunctionInfo + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, functionSdk, &function)...) + if resp.Diagnostics.HasError() { + return + } + functions.Functions = append(functions.Functions, function) + } + resp.Diagnostics.Append(resp.State.Set(ctx, functions)...) +} From 17641de1cd3110b33c4b0b1a17f1ac17b0d6835b Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Thu, 31 Oct 2024 21:27:09 +0100 Subject: [PATCH 81/99] [Dependency] Bump Go SDK to v0.50.0 (#4178) ## Changes Use the latest Go SDK in the Terraform provider. The main changes affect Dashboards and Online Tables, whose Create and Update RPCs now accept an instance of the resource, rather than inlining the fields of the resource. Additionally, Online Tables introduced a waiter configuration, so we can remove hand-written waiter logic used before. ## Tests - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- .codegen/_openapi_sha | 2 +- catalog/resource_online_table.go | 31 +--- catalog/resource_online_table_test.go | 66 +++++--- dashboards/resource_dashboard.go | 27 +-- dashboards/resource_dashboard_test.go | 42 +++-- go.mod | 2 +- go.sum | 4 +- internal/acceptance/dashboard_test.go | 26 +-- internal/service/apps_tf/model.go | 57 +------ internal/service/catalog_tf/model.go | 14 -- internal/service/dashboards_tf/model.go | 198 ++-------------------- internal/service/jobs_tf/model.go | 154 +++++++++-------- internal/service/oauth2_tf/model.go | 12 ++ internal/service/provisioning_tf/model.go | 23 +++ internal/service/settings_tf/model.go | 156 ++++++++++++++++- 15 files changed, 395 insertions(+), 419 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 2d9cb6d86d..ecf041814d 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -cf9c61453990df0f9453670f2fe68e1b128647a2 \ No newline at end of file +25b2478e5a18c888f0d423249abde5499dc58424 \ No newline at end of file diff --git a/catalog/resource_online_table.go b/catalog/resource_online_table.go index ca24d5f76f..ee4aa44754 100644 --- a/catalog/resource_online_table.go +++ b/catalog/resource_online_table.go @@ -16,29 +16,6 @@ import ( const onlineTableDefaultProvisionTimeout = 90 * time.Minute -func waitForOnlineTableCreation(w *databricks.WorkspaceClient, ctx context.Context, onlineTableName string) error { - return retry.RetryContext(ctx, onlineTableDefaultProvisionTimeout, func() *retry.RetryError { - endpoint, err := w.OnlineTables.GetByName(ctx, onlineTableName) - if err != nil { - return retry.NonRetryableError(err) - } - if endpoint.Status == nil { - return retry.RetryableError(fmt.Errorf("online table status is not available yet")) - } - switch endpoint.Status.DetailedState { - case catalog.OnlineTableStateOnline, catalog.OnlineTableStateOnlineContinuousUpdate, - catalog.OnlineTableStateOnlineNoPendingUpdate, catalog.OnlineTableStateOnlineTriggeredUpdate: - return nil - - // does catalog.OnlineTableStateOffline means that it's failed? - case catalog.OnlineTableStateOfflineFailed, catalog.OnlineTableStateOnlinePipelineFailed: - return retry.NonRetryableError(fmt.Errorf("online table status returned %s for online table: %s", - endpoint.Status.DetailedState.String(), onlineTableName)) - } - return retry.RetryableError(fmt.Errorf("online table %s is still pending", onlineTableName)) - }) -} - func waitForOnlineTableDeletion(w *databricks.WorkspaceClient, ctx context.Context, onlineTableName string) error { return retry.RetryContext(ctx, onlineTableDefaultProvisionTimeout, func() *retry.RetryError { _, err := w.OnlineTables.GetByName(ctx, onlineTableName) @@ -75,9 +52,9 @@ func ResourceOnlineTable() common.Resource { if err != nil { return err } - var req catalog.CreateOnlineTableRequest - common.DataToStructPointer(d, s, &req) - res, err := w.OnlineTables.Create(ctx, req) + var table catalog.OnlineTable + common.DataToStructPointer(d, s, &table) + res, err := w.OnlineTables.Create(ctx, catalog.CreateOnlineTableRequest{Table: &table}) if err != nil { return err } @@ -85,7 +62,7 @@ func ResourceOnlineTable() common.Resource { // If the resource creation timeout is exceeded while waiting for the online table to be ready, this ensures the online table is persisted in the state. d.SetId(res.Name) // this should be specified in the API Spec - filed a ticket to add it - err = waitForOnlineTableCreation(w, ctx, res.Name) + _, err = res.GetWithTimeout(onlineTableDefaultProvisionTimeout) if err != nil { return err } diff --git a/catalog/resource_online_table_test.go b/catalog/resource_online_table_test.go index 1deddd02a3..9f19063b48 100644 --- a/catalog/resource_online_table_test.go +++ b/catalog/resource_online_table_test.go @@ -1,8 +1,10 @@ package catalog import ( + "errors" "fmt" "testing" + "time" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/experimental/mocks" @@ -47,6 +49,13 @@ func TestOnlineTableCreate(t *testing.T) { PrimaryKeyColumns: []string{"id"}, }, } + otStatusNotSetWait := &catalog.WaitGetOnlineTableActive[catalog.OnlineTable]{ + Response: otStatusNotSet, + Name: "main.default.online_table", + Poll: func(d time.Duration, f func(*catalog.OnlineTable)) (*catalog.OnlineTable, error) { + return otStatusOnline, nil + }, + } // otStatusUnknown := &catalog.OnlineTable{ // Name: "main.default.online_table", // Spec: &catalog.OnlineTableSpec{ @@ -60,16 +69,15 @@ func TestOnlineTableCreate(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockOnlineTablesAPI().EXPECT() e.Create(mock.Anything, catalog.CreateOnlineTableRequest{ - Name: "main.default.online_table", - Spec: &catalog.OnlineTableSpec{ - RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, - SourceTableFullName: "main.default.test", - PrimaryKeyColumns: []string{"id"}, + Table: &catalog.OnlineTable{ + Name: "main.default.online_table", + Spec: &catalog.OnlineTableSpec{ + RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, + SourceTableFullName: "main.default.test", + PrimaryKeyColumns: []string{"id"}, + }, }, - }).Return(otStatusNotSet, nil) - // TODO: how to emulate the status change - // e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusNotSet, nil) - // e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusUnknown, nil) + }).Return(otStatusNotSetWait, nil) e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusOnline, nil) }, Resource: ResourceOnlineTable(), @@ -85,11 +93,13 @@ func TestOnlineTableCreate_ErrorImmediately(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockOnlineTablesAPI().EXPECT() e.Create(mock.Anything, catalog.CreateOnlineTableRequest{ - Name: "main.default.online_table", - Spec: &catalog.OnlineTableSpec{ - RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, - SourceTableFullName: "main.default.test", - PrimaryKeyColumns: []string{"id"}, + Table: &catalog.OnlineTable{ + Name: "main.default.online_table", + Spec: &catalog.OnlineTableSpec{ + RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, + SourceTableFullName: "main.default.test", + PrimaryKeyColumns: []string{"id"}, + }, }, }).Return(nil, fmt.Errorf("error!")) }, @@ -100,33 +110,41 @@ func TestOnlineTableCreate_ErrorImmediately(t *testing.T) { } func TestOnlineTableCreate_ErrorInWait(t *testing.T) { - otStatusError := &catalog.OnlineTable{ + otStatusProvisioning := &catalog.OnlineTable{ Name: "main.default.online_table", Spec: &catalog.OnlineTableSpec{ RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, SourceTableFullName: "main.default.test", PrimaryKeyColumns: []string{"id"}, }, - Status: &catalog.OnlineTableStatus{DetailedState: catalog.OnlineTableStateOfflineFailed}, + Status: &catalog.OnlineTableStatus{DetailedState: catalog.OnlineTableStateProvisioning}, + } + otStatusErrorWait := &catalog.WaitGetOnlineTableActive[catalog.OnlineTable]{ + Response: otStatusProvisioning, + Name: "main.default.online_table", + Poll: func(d time.Duration, f func(*catalog.OnlineTable)) (*catalog.OnlineTable, error) { + return nil, errors.New("failed to reach ACTIVE, got OFFLINE_FAILED: error!") + }, } d, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockOnlineTablesAPI().EXPECT() e.Create(mock.Anything, catalog.CreateOnlineTableRequest{ - Name: "main.default.online_table", - Spec: &catalog.OnlineTableSpec{ - RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, - SourceTableFullName: "main.default.test", - PrimaryKeyColumns: []string{"id"}, + Table: &catalog.OnlineTable{ + Name: "main.default.online_table", + Spec: &catalog.OnlineTableSpec{ + RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, + SourceTableFullName: "main.default.test", + PrimaryKeyColumns: []string{"id"}, + }, }, - }).Return(otStatusError, nil) - e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusError, nil) + }).Return(otStatusErrorWait, nil) }, Resource: ResourceOnlineTable(), HCL: onlineTableHcl, Create: true, }.Apply(t) - qa.AssertErrorStartsWith(t, err, "online table status returned OFFLINE_FAILED for online table: main.default.online_table") + qa.AssertErrorStartsWith(t, err, "failed to reach ACTIVE, got OFFLINE_FAILED: error!") assert.Equal(t, "main.default.online_table", d.Id()) } diff --git a/dashboards/resource_dashboard.go b/dashboards/resource_dashboard.go index d872b33f49..de61205243 100644 --- a/dashboards/resource_dashboard.go +++ b/dashboards/resource_dashboard.go @@ -68,22 +68,22 @@ func ResourceDashboard() common.Resource { if err != nil { return err } - var newDashboardRequest dashboards.CreateDashboardRequest - common.DataToStructPointer(d, dashboardSchema, &newDashboardRequest) + var dashboard dashboards.Dashboard + common.DataToStructPointer(d, dashboardSchema, &dashboard) content, md5Hash, err := common.ReadSerializedJsonContent(d.Get("serialized_dashboard").(string), d.Get("file_path").(string)) if err != nil { return err } d.Set("md5", md5Hash) - newDashboardRequest.SerializedDashboard = content - createdDashboard, err := w.Lakeview.Create(ctx, newDashboardRequest) + dashboard.SerializedDashboard = content + createdDashboard, err := w.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{Dashboard: &dashboard}) if err != nil && isParentDoesntExistError(err) { - log.Printf("[DEBUG] Parent folder '%s' doesn't exist, creating...", newDashboardRequest.ParentPath) - err = w.Workspace.MkdirsByPath(ctx, newDashboardRequest.ParentPath) + log.Printf("[DEBUG] Parent folder '%s' doesn't exist, creating...", dashboard.ParentPath) + err = w.Workspace.MkdirsByPath(ctx, dashboard.ParentPath) if err != nil { return err } - createdDashboard, err = w.Lakeview.Create(ctx, newDashboardRequest) + createdDashboard, err = w.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{Dashboard: &dashboard}) } if err != nil { return err @@ -132,16 +132,19 @@ func ResourceDashboard() common.Resource { if err != nil { return err } - var updateDashboardRequest dashboards.UpdateDashboardRequest - common.DataToStructPointer(d, dashboardSchema, &updateDashboardRequest) - updateDashboardRequest.DashboardId = d.Id() + var dashboard dashboards.Dashboard + common.DataToStructPointer(d, dashboardSchema, &dashboard) + dashboard.DashboardId = d.Id() content, md5Hash, err := common.ReadSerializedJsonContent(d.Get("serialized_dashboard").(string), d.Get("file_path").(string)) if err != nil { return err } d.Set("md5", md5Hash) - updateDashboardRequest.SerializedDashboard = content - updatedDashboard, err := w.Lakeview.Update(ctx, updateDashboardRequest) + dashboard.SerializedDashboard = content + updatedDashboard, err := w.Lakeview.Update(ctx, dashboards.UpdateDashboardRequest{ + DashboardId: dashboard.DashboardId, + Dashboard: &dashboard, + }) if err != nil { return err } diff --git a/dashboards/resource_dashboard_test.go b/dashboards/resource_dashboard_test.go index 0b450fdd7d..9016ce2dda 100644 --- a/dashboards/resource_dashboard_test.go +++ b/dashboards/resource_dashboard_test.go @@ -16,10 +16,12 @@ func TestDashboardCreate(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockLakeviewAPI().EXPECT() e.Create(mock.Anything, dashboards.CreateDashboardRequest{ - DisplayName: "Dashboard name", - WarehouseId: "abc", - ParentPath: "/path", - SerializedDashboard: "serialized_json", + Dashboard: &dashboards.Dashboard{ + DisplayName: "Dashboard name", + WarehouseId: "abc", + ParentPath: "/path", + SerializedDashboard: "serialized_json", + }, }).Return(&dashboards.Dashboard{ DashboardId: "xyz", DisplayName: "Dashboard name", @@ -67,17 +69,21 @@ func TestDashboardCreate_NoParent(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { lv := w.GetMockLakeviewAPI().EXPECT() lv.Create(mock.Anything, dashboards.CreateDashboardRequest{ - DisplayName: "Dashboard name", - WarehouseId: "abc", - ParentPath: "/path", - SerializedDashboard: "serialized_json", + Dashboard: &dashboards.Dashboard{ + DisplayName: "Dashboard name", + WarehouseId: "abc", + ParentPath: "/path", + SerializedDashboard: "serialized_json", + }, }).Return(nil, fmt.Errorf("Path (/path) doesn't exist.")).Once() w.GetMockWorkspaceAPI().EXPECT().MkdirsByPath(mock.Anything, "/path").Return(nil) lv.Create(mock.Anything, dashboards.CreateDashboardRequest{ - DisplayName: "Dashboard name", - WarehouseId: "abc", - ParentPath: "/path", - SerializedDashboard: "serialized_json", + Dashboard: &dashboards.Dashboard{ + DisplayName: "Dashboard name", + WarehouseId: "abc", + ParentPath: "/path", + SerializedDashboard: "serialized_json", + }, }).Return(&dashboards.Dashboard{ DashboardId: "xyz", DisplayName: "Dashboard name", @@ -154,10 +160,14 @@ func TestDashboardUpdate(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockLakeviewAPI().EXPECT() e.Update(mock.Anything, dashboards.UpdateDashboardRequest{ - DashboardId: "xyz", - DisplayName: "Dashboard name", - WarehouseId: "abc", - SerializedDashboard: "serialized_dashboard_updated", + DashboardId: "xyz", + Dashboard: &dashboards.Dashboard{ + DashboardId: "xyz", + DisplayName: "Dashboard name", + WarehouseId: "abc", + SerializedDashboard: "serialized_dashboard_updated", + ParentPath: "/path", + }, }).Return(&dashboards.Dashboard{ DashboardId: "xyz", DisplayName: "Dashboard name", diff --git a/go.mod b/go.mod index 4e97cc0d23..1e72ea27a6 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/databricks/terraform-provider-databricks go 1.22 require ( - github.com/databricks/databricks-sdk-go v0.49.0 + github.com/databricks/databricks-sdk-go v0.50.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 diff --git a/go.sum b/go.sum index e95a0ffe39..1188a3923d 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.49.0 h1:VBTeZZMLIuBSM4kxOCfUcW9z4FUQZY2QeNRD5qm9FUQ= -github.com/databricks/databricks-sdk-go v0.49.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.50.0 h1:Zl4uBhYMT5z6aDojCQJPT2zCYjjfqxBQSQn8uLTphpo= +github.com/databricks/databricks-sdk-go v0.50.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/acceptance/dashboard_test.go b/internal/acceptance/dashboard_test.go index 49118c9455..91c6335b9a 100644 --- a/internal/acceptance/dashboard_test.go +++ b/internal/acceptance/dashboard_test.go @@ -315,11 +315,14 @@ func TestAccDashboardWithRemoteChange(t *testing.T) { w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) _, err = w.Lakeview.Update(context.Background(), dashboards.UpdateDashboardRequest{ - DashboardId: dashboard_id, - DisplayName: display_name, - Etag: etag, - WarehouseId: warehouse_id, - SerializedDashboard: "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page Modified Remote\"}]}", + DashboardId: dashboard_id, + Dashboard: &dashboards.Dashboard{ + DashboardId: dashboard_id, + DisplayName: display_name, + Etag: etag, + WarehouseId: warehouse_id, + SerializedDashboard: "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page Modified Remote\"}]}", + }, }) require.NoError(t, err) }, @@ -419,11 +422,14 @@ func TestAccDashboardTestAll(t *testing.T) { w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) _, err = w.Lakeview.Update(context.Background(), dashboards.UpdateDashboardRequest{ - DashboardId: dashboard_id, - DisplayName: display_name, - Etag: etag, - WarehouseId: warehouse_id, - SerializedDashboard: "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page Modified Remote\"}]}", + DashboardId: dashboard_id, + Dashboard: &dashboards.Dashboard{ + DashboardId: dashboard_id, + DisplayName: display_name, + Etag: etag, + WarehouseId: warehouse_id, + SerializedDashboard: "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page Modified Remote\"}]}", + }, }) require.NoError(t, err) }, diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index 4f90baf118..b5a602ba1f 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -144,6 +144,8 @@ func (newState *AppAccessControlResponse) SyncEffectiveFieldsDuringRead(existing } type AppDeployment struct { + // The name of the app. + AppName types.String `tfsdk:"-"` // The creation time of the deployment. Formatted timestamp in ISO 6801. CreateTime types.String `tfsdk:"create_time" tf:"optional"` EffectiveCreateTime types.String `tfsdk:"effective_create_time" tf:"computed,optional"` @@ -398,45 +400,6 @@ func (newState *ComputeStatus) SyncEffectiveFieldsDuringRead(existingState Compu } } -type CreateAppDeploymentRequest struct { - // The name of the app. - AppName types.String `tfsdk:"-"` - // The unique id of the deployment. - DeploymentId types.String `tfsdk:"deployment_id" tf:"optional"` - // The mode of which the deployment will manage the source code. - Mode types.String `tfsdk:"mode" tf:"optional"` - // The workspace file system path of the source code used to create the app - // deployment. This is different from - // `deployment_artifacts.source_code_path`, which is the path used by the - // deployed app. The former refers to the original source code location of - // the app in the workspace during deployment creation, whereas the latter - // provides a system generated stable snapshotted source code path used by - // the deployment. - SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` -} - -func (newState *CreateAppDeploymentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAppDeploymentRequest) { -} - -func (newState *CreateAppDeploymentRequest) SyncEffectiveFieldsDuringRead(existingState CreateAppDeploymentRequest) { -} - -type CreateAppRequest struct { - // The description of the app. - Description types.String `tfsdk:"description" tf:"optional"` - // The name of the app. The name must contain only lowercase alphanumeric - // characters and hyphens. It must be unique within the workspace. - Name types.String `tfsdk:"name" tf:""` - // Resources for the app. - Resources []AppResource `tfsdk:"resources" tf:"optional"` -} - -func (newState *CreateAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAppRequest) { -} - -func (newState *CreateAppRequest) SyncEffectiveFieldsDuringRead(existingState CreateAppRequest) { -} - // Delete an app type DeleteAppRequest struct { // The name of the app. @@ -588,19 +551,3 @@ func (newState *StopAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan Sto func (newState *StopAppRequest) SyncEffectiveFieldsDuringRead(existingState StopAppRequest) { } - -type UpdateAppRequest struct { - // The description of the app. - Description types.String `tfsdk:"description" tf:"optional"` - // The name of the app. The name must contain only lowercase alphanumeric - // characters and hyphens. It must be unique within the workspace. - Name types.String `tfsdk:"name" tf:""` - // Resources for the app. - Resources []AppResource `tfsdk:"resources" tf:"optional"` -} - -func (newState *UpdateAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAppRequest) { -} - -func (newState *UpdateAppRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAppRequest) { -} diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index 55778dfa87..caf38f865c 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -728,20 +728,6 @@ func (newState *CreateMonitor) SyncEffectiveFieldsDuringCreateOrUpdate(plan Crea func (newState *CreateMonitor) SyncEffectiveFieldsDuringRead(existingState CreateMonitor) { } -// Online Table information. -type CreateOnlineTableRequest struct { - // Full three-part (catalog, schema, table) name of the table. - Name types.String `tfsdk:"name" tf:"optional"` - // Specification of the online table. - Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional,object"` -} - -func (newState *CreateOnlineTableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateOnlineTableRequest) { -} - -func (newState *CreateOnlineTableRequest) SyncEffectiveFieldsDuringRead(existingState CreateOnlineTableRequest) { -} - type CreateRegisteredModelRequest struct { // The name of the catalog where the schema and the registered model reside CatalogName types.String `tfsdk:"catalog_name" tf:""` diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index b76126b18b..2066f6a422 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -15,93 +15,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -type CreateDashboardRequest struct { - // The display name of the dashboard. - DisplayName types.String `tfsdk:"display_name" tf:""` - // The workspace path of the folder containing the dashboard. Includes - // leading slash and no trailing slash. This field is excluded in List - // Dashboards responses. - ParentPath types.String `tfsdk:"parent_path" tf:"optional"` - EffectiveParentPath types.String `tfsdk:"effective_parent_path" tf:"computed,optional"` - // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. Use the [get dashboard API] to - // retrieve an example response, which includes the `serialized_dashboard` - // field. This field provides the structure of the JSON string that - // represents the dashboard's layout and components. - // - // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get - SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` - // The warehouse ID used to run the dashboard. - WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` -} - -func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateDashboardRequest) { - newState.EffectiveParentPath = newState.ParentPath - newState.ParentPath = plan.ParentPath -} - -func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState CreateDashboardRequest) { - newState.EffectiveParentPath = existingState.EffectiveParentPath - if existingState.EffectiveParentPath.ValueString() == newState.ParentPath.ValueString() { - newState.ParentPath = existingState.ParentPath - } -} - -type CreateScheduleRequest struct { - // The cron expression describing the frequency of the periodic refresh for - // this schedule. - CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` - // UUID identifying the dashboard to which the schedule belongs. - DashboardId types.String `tfsdk:"-"` - EffectiveDashboardId types.String `tfsdk:"-"` - // The display name for schedule. - DisplayName types.String `tfsdk:"display_name" tf:"optional"` - // The status indicates whether this schedule is paused or not. - PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` -} - -func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateScheduleRequest) { - newState.EffectiveDashboardId = newState.DashboardId - newState.DashboardId = plan.DashboardId -} - -func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState CreateScheduleRequest) { - newState.EffectiveDashboardId = existingState.EffectiveDashboardId - if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { - newState.DashboardId = existingState.DashboardId - } -} - -type CreateSubscriptionRequest struct { - // UUID identifying the dashboard to which the subscription belongs. - DashboardId types.String `tfsdk:"-"` - EffectiveDashboardId types.String `tfsdk:"-"` - // UUID identifying the schedule to which the subscription belongs. - ScheduleId types.String `tfsdk:"-"` - EffectiveScheduleId types.String `tfsdk:"-"` - // Subscriber details for users and destinations to be added as subscribers - // to the schedule. - Subscriber []Subscriber `tfsdk:"subscriber" tf:"object"` -} - -func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateSubscriptionRequest) { - newState.EffectiveDashboardId = newState.DashboardId - newState.DashboardId = plan.DashboardId - newState.EffectiveScheduleId = newState.ScheduleId - newState.ScheduleId = plan.ScheduleId -} - -func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState CreateSubscriptionRequest) { - newState.EffectiveDashboardId = existingState.EffectiveDashboardId - if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { - newState.DashboardId = existingState.DashboardId - } - newState.EffectiveScheduleId = existingState.EffectiveScheduleId - if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { - newState.ScheduleId = existingState.ScheduleId - } -} - type CronSchedule struct { // A cron expression using quartz syntax. EX: `0 0 8 * * ?` represents // everyday at 8am. See [Cron Trigger] for details. @@ -308,22 +221,6 @@ func (newState *DeleteSubscriptionResponse) SyncEffectiveFieldsDuringCreateOrUpd func (newState *DeleteSubscriptionResponse) SyncEffectiveFieldsDuringRead(existingState DeleteSubscriptionResponse) { } -// Execute SQL query in a conversation message -type ExecuteMessageQueryRequest struct { - // Conversation ID - ConversationId types.String `tfsdk:"-"` - // Message ID - MessageId types.String `tfsdk:"-"` - // Genie space ID - SpaceId types.String `tfsdk:"-"` -} - -func (newState *ExecuteMessageQueryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExecuteMessageQueryRequest) { -} - -func (newState *ExecuteMessageQueryRequest) SyncEffectiveFieldsDuringRead(existingState ExecuteMessageQueryRequest) { -} - // Genie AI Response type GenieAttachment struct { Query []QueryAttachment `tfsdk:"query" tf:"optional,object"` @@ -373,6 +270,22 @@ func (newState *GenieCreateConversationMessageRequest) SyncEffectiveFieldsDuring func (newState *GenieCreateConversationMessageRequest) SyncEffectiveFieldsDuringRead(existingState GenieCreateConversationMessageRequest) { } +// Execute SQL query in a conversation message +type GenieExecuteMessageQueryRequest struct { + // Conversation ID + ConversationId types.String `tfsdk:"-"` + // Message ID + MessageId types.String `tfsdk:"-"` + // Genie space ID + SpaceId types.String `tfsdk:"-"` +} + +func (newState *GenieExecuteMessageQueryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenieExecuteMessageQueryRequest) { +} + +func (newState *GenieExecuteMessageQueryRequest) SyncEffectiveFieldsDuringRead(existingState GenieExecuteMessageQueryRequest) { +} + // Get conversation message type GenieGetConversationMessageRequest struct { // The ID associated with the target conversation. @@ -1112,82 +1025,3 @@ func (newState *UnpublishDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpd func (newState *UnpublishDashboardResponse) SyncEffectiveFieldsDuringRead(existingState UnpublishDashboardResponse) { } - -type UpdateDashboardRequest struct { - // UUID identifying the dashboard. - DashboardId types.String `tfsdk:"-"` - // The display name of the dashboard. - DisplayName types.String `tfsdk:"display_name" tf:"optional"` - // The etag for the dashboard. Can be optionally provided on updates to - // ensure that the dashboard has not been modified since the last read. This - // field is excluded in List Dashboards responses. - Etag types.String `tfsdk:"etag" tf:"optional"` - EffectiveEtag types.String `tfsdk:"effective_etag" tf:"computed,optional"` - // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. Use the [get dashboard API] to - // retrieve an example response, which includes the `serialized_dashboard` - // field. This field provides the structure of the JSON string that - // represents the dashboard's layout and components. - // - // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get - SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` - // The warehouse ID used to run the dashboard. - WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` -} - -func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateDashboardRequest) { - newState.EffectiveEtag = newState.Etag - newState.Etag = plan.Etag -} - -func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState UpdateDashboardRequest) { - newState.EffectiveEtag = existingState.EffectiveEtag - if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { - newState.Etag = existingState.Etag - } -} - -type UpdateScheduleRequest struct { - // The cron expression describing the frequency of the periodic refresh for - // this schedule. - CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` - // UUID identifying the dashboard to which the schedule belongs. - DashboardId types.String `tfsdk:"-"` - EffectiveDashboardId types.String `tfsdk:"-"` - // The display name for schedule. - DisplayName types.String `tfsdk:"display_name" tf:"optional"` - // The etag for the schedule. Must be left empty on create, must be provided - // on updates to ensure that the schedule has not been modified since the - // last read, and can be optionally provided on delete. - Etag types.String `tfsdk:"etag" tf:"optional"` - EffectiveEtag types.String `tfsdk:"effective_etag" tf:"computed,optional"` - // The status indicates whether this schedule is paused or not. - PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` - // UUID identifying the schedule. - ScheduleId types.String `tfsdk:"-"` - EffectiveScheduleId types.String `tfsdk:"-"` -} - -func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateScheduleRequest) { - newState.EffectiveDashboardId = newState.DashboardId - newState.DashboardId = plan.DashboardId - newState.EffectiveEtag = newState.Etag - newState.Etag = plan.Etag - newState.EffectiveScheduleId = newState.ScheduleId - newState.ScheduleId = plan.ScheduleId -} - -func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState UpdateScheduleRequest) { - newState.EffectiveDashboardId = existingState.EffectiveDashboardId - if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { - newState.DashboardId = existingState.DashboardId - } - newState.EffectiveEtag = existingState.EffectiveEtag - if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { - newState.Etag = existingState.Etag - } - newState.EffectiveScheduleId = existingState.EffectiveScheduleId - if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { - newState.ScheduleId = existingState.ScheduleId - } -} diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index 295d1e1a99..fe3918dabd 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -839,9 +839,8 @@ type GetRunRequest struct { IncludeHistory types.Bool `tfsdk:"-"` // Whether to include resolved parameter values in the response. IncludeResolvedValues types.Bool `tfsdk:"-"` - // To list the next page or the previous page of job tasks, set this field - // to the value of the `next_page_token` or `prev_page_token` returned in - // the GetJob response. + // To list the next page of job tasks, set this field to the value of the + // `next_page_token` returned in the GetJob response. PageToken types.String `tfsdk:"-"` // The canonical identifier of the run for which to retrieve the metadata. // This field is required. @@ -1727,8 +1726,10 @@ type RepairRun struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []types.String `tfsdk:"jar_params" tf:"optional"` // Job-level parameters used in the run. for example `"param": // "overriding_val"` @@ -2040,8 +2041,6 @@ type Run struct { OriginalAttemptRunId types.Int64 `tfsdk:"original_attempt_run_id" tf:"optional"` // The parameters used for this run. OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional,object"` - // A token that can be used to list the previous page of sub-resources. - PrevPageToken types.String `tfsdk:"prev_page_token" tf:"optional"` // The time in milliseconds that the run has spent in the queue. QueueDuration types.Int64 `tfsdk:"queue_duration" tf:"optional"` // The repair history of the run. @@ -2182,8 +2181,10 @@ type RunJobTask struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []types.String `tfsdk:"jar_params" tf:"optional"` // ID of the job to trigger. JobId types.Int64 `tfsdk:"job_id" tf:""` @@ -2290,8 +2291,10 @@ type RunNow struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []types.String `tfsdk:"jar_params" tf:"optional"` // The ID of the job to be executed JobId types.Int64 `tfsdk:"job_id" tf:""` @@ -2447,8 +2450,10 @@ type RunParameters struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []types.String `tfsdk:"jar_params" tf:"optional"` // A map from keys to values for jobs with notebook task, for example // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed @@ -2584,13 +2589,14 @@ type RunTask struct { // cluster, this field is set once the Jobs service has requested a cluster // for the run. ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional,object"` - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. ConditionTask []RunConditionTask `tfsdk:"condition_task" tf:"optional,object"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before @@ -2622,8 +2628,8 @@ type RunTask struct { // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` - // If for_each_task, indicates that this task must execute the nested task - // within it. + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. ForEachTask []RunForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by @@ -2643,16 +2649,17 @@ type RunTask struct { // If new_cluster, a description of a new cluster that is created for each // run. NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. + // The task runs a notebook when the `notebook_task` field is present. NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` - // If pipeline_task, indicates that this task must execute a Pipeline. + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. + // The task runs a Python wheel when the `python_wheel_task` field is + // present. PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // The time in milliseconds that the run has spent in the queue. QueueDuration types.Int64 `tfsdk:"queue_duration" tf:"optional"` @@ -2668,7 +2675,7 @@ type RunTask struct { // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf types.String `tfsdk:"run_if" tf:"optional"` - // If run_job_task, indicates that this task must execute another job. + // The task triggers another job when the `run_job_task` field is present. RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` RunPageUrl types.String `tfsdk:"run_page_url" tf:"optional"` @@ -2680,12 +2687,14 @@ type RunTask struct { // job runs. The total duration of a multitask job run is the value of the // `run_duration` field. SetupDuration types.Int64 `tfsdk:"setup_duration" tf:"optional"` - // If spark_jar_task, indicates that this task must run a JAR. + // The task runs a JAR when the `spark_jar_task` field is present. SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` - // If spark_python_task, indicates that this task must run a Python file. + // The task runs a Python file when the `spark_python_task` field is + // present. SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python @@ -2702,7 +2711,8 @@ type RunTask struct { // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` - // If sql_task, indicates that this job must execute a SQL task. + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // The time at which this run was started in epoch milliseconds // (milliseconds since 1/1/1970 UTC). This may not be the time when the job @@ -3112,13 +3122,14 @@ func (newState *SubmitRunResponse) SyncEffectiveFieldsDuringRead(existingState S } type SubmitTask struct { - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional,object"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before @@ -3139,8 +3150,8 @@ type SubmitTask struct { // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` - // If for_each_task, indicates that this task must execute the nested task - // within it. + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional set of health rules that can be defined for this job. Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` @@ -3150,30 +3161,33 @@ type SubmitTask struct { // If new_cluster, a description of a new cluster that is created for each // run. NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. + // The task runs a notebook when the `notebook_task` field is present. NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` - // If pipeline_task, indicates that this task must execute a Pipeline. + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. + // The task runs a Python wheel when the `python_wheel_task` field is + // present. PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // An optional value indicating the condition that determines whether the // task should be run once its dependencies have been completed. When // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf types.String `tfsdk:"run_if" tf:"optional"` - // If run_job_task, indicates that this task must execute another job. + // The task triggers another job when the `run_job_task` field is present. RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` - // If spark_jar_task, indicates that this task must run a JAR. + // The task runs a JAR when the `spark_jar_task` field is present. SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` - // If spark_python_task, indicates that this task must run a Python file. + // The task runs a Python file when the `spark_python_task` field is + // present. SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python @@ -3190,7 +3204,8 @@ type SubmitTask struct { // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` - // If sql_task, indicates that this job must execute a SQL task. + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent @@ -3236,13 +3251,14 @@ func (newState *TableUpdateTriggerConfiguration) SyncEffectiveFieldsDuringRead(e } type Task struct { - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional,object"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete before executing this @@ -3266,8 +3282,8 @@ type Task struct { // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` - // If for_each_task, indicates that this task must execute the nested task - // within it. + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional set of health rules that can be defined for this job. Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` @@ -3289,16 +3305,17 @@ type Task struct { // If new_cluster, a description of a new cluster that is created for each // run. NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. + // The task runs a notebook when the `notebook_task` field is present. NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task. NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` - // If pipeline_task, indicates that this task must execute a Pipeline. + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. + // The task runs a Python wheel when the `python_wheel_task` field is + // present. PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // An optional policy to specify whether to retry a job when it times out. // The default behavior is to not retry on timeout. @@ -3313,14 +3330,16 @@ type Task struct { // `AT_LEAST_ONE_FAILED`: At least one dependency failed * `ALL_FAILED`: ALl // dependencies have failed RunIf types.String `tfsdk:"run_if" tf:"optional"` - // If run_job_task, indicates that this task must execute another job. + // The task triggers another job when the `run_job_task` field is present. RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` - // If spark_jar_task, indicates that this task must run a JAR. + // The task runs a JAR when the `spark_jar_task` field is present. SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` - // If spark_python_task, indicates that this task must run a Python file. + // The task runs a Python file when the `spark_python_task` field is + // present. SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python @@ -3337,7 +3356,8 @@ type Task struct { // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` - // If sql_task, indicates that this job must execute a SQL task. + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent diff --git a/internal/service/oauth2_tf/model.go b/internal/service/oauth2_tf/model.go index e738e7f094..eacea7e75a 100755 --- a/internal/service/oauth2_tf/model.go +++ b/internal/service/oauth2_tf/model.go @@ -342,6 +342,16 @@ func (newState *ListPublishedAppIntegrationsRequest) SyncEffectiveFieldsDuringRe // List service principal secrets type ListServicePrincipalSecretsRequest struct { + // An opaque page token which was the `next_page_token` in the response of + // the previous request to list the secrets for this service principal. + // Provide this token to retrieve the next page of secret entries. When + // providing a `page_token`, all other parameters provided to the request + // must match the previous request. To list all of the secrets for a service + // principal, it is necessary to continue requesting pages of entries until + // the response contains no `next_page_token`. Note that the number of + // entries returned must not be used to determine when the listing is + // complete. + PageToken types.String `tfsdk:"-"` // The service principal ID. ServicePrincipalId types.Int64 `tfsdk:"-"` } @@ -353,6 +363,8 @@ func (newState *ListServicePrincipalSecretsRequest) SyncEffectiveFieldsDuringRea } type ListServicePrincipalSecretsResponse struct { + // A token, which can be sent as `page_token` to retrieve the next page. + NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` // List of the secrets Secrets []SecretInfo `tfsdk:"secrets" tf:"optional"` } diff --git a/internal/service/provisioning_tf/model.go b/internal/service/provisioning_tf/model.go index 17d8bbc18e..188e8f48df 100755 --- a/internal/service/provisioning_tf/model.go +++ b/internal/service/provisioning_tf/model.go @@ -277,6 +277,8 @@ type CreateWorkspaceRequest struct { GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional,object"` // The configurations for the GKE cluster of a Databricks workspace. GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional,object"` + // Whether no public IP is enabled for the workspace. + IsNoPublicIpEnabled types.Bool `tfsdk:"is_no_public_ip_enabled" tf:"optional"` // The Google Cloud region of the workspace data plane in your Google // account. For example, `us-east4`. Location types.String `tfsdk:"location" tf:"optional"` @@ -482,6 +484,21 @@ func (newState *DeleteWorkspaceRequest) SyncEffectiveFieldsDuringCreateOrUpdate( func (newState *DeleteWorkspaceRequest) SyncEffectiveFieldsDuringRead(existingState DeleteWorkspaceRequest) { } +type ExternalCustomerInfo struct { + // Email of the authoritative user. + AuthoritativeUserEmail types.String `tfsdk:"authoritative_user_email" tf:"optional"` + // The authoritative user full name. + AuthoritativeUserFullName types.String `tfsdk:"authoritative_user_full_name" tf:"optional"` + // The legal entity name for the external workspace + CustomerName types.String `tfsdk:"customer_name" tf:"optional"` +} + +func (newState *ExternalCustomerInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExternalCustomerInfo) { +} + +func (newState *ExternalCustomerInfo) SyncEffectiveFieldsDuringRead(existingState ExternalCustomerInfo) { +} + type GcpKeyInfo struct { // The GCP KMS key's resource name KmsKeyId types.String `tfsdk:"kms_key_id" tf:""` @@ -1063,6 +1080,10 @@ type Workspace struct { // This value must be unique across all non-deleted deployments across all // AWS regions. DeploymentName types.String `tfsdk:"deployment_name" tf:"optional"` + // If this workspace is for a external customer, then external_customer_info + // is populated. If this workspace is not for a external customer, then + // external_customer_info is empty. + ExternalCustomerInfo []ExternalCustomerInfo `tfsdk:"external_customer_info" tf:"optional,object"` // The network settings for the workspace. The configurations are only for // Databricks-managed VPCs. It is ignored if you specify a customer-managed // VPC in the `network_id` field.", All the IP range configurations must be @@ -1089,6 +1110,8 @@ type Workspace struct { GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional,object"` // The configurations for the GKE cluster of a Databricks workspace. GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional,object"` + // Whether no public IP is enabled for the workspace. + IsNoPublicIpEnabled types.Bool `tfsdk:"is_no_public_ip_enabled" tf:"optional"` // The Google Cloud region of the workspace data plane in your Google // account (for example, `us-east4`). Location types.String `tfsdk:"location" tf:"optional"` diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index 71b73ba253..4564aeb780 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -14,6 +14,74 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) +type AibiDashboardEmbeddingAccessPolicy struct { + AccessPolicyType types.String `tfsdk:"access_policy_type" tf:""` +} + +func (newState *AibiDashboardEmbeddingAccessPolicy) SyncEffectiveFieldsDuringCreateOrUpdate(plan AibiDashboardEmbeddingAccessPolicy) { +} + +func (newState *AibiDashboardEmbeddingAccessPolicy) SyncEffectiveFieldsDuringRead(existingState AibiDashboardEmbeddingAccessPolicy) { +} + +type AibiDashboardEmbeddingAccessPolicySetting struct { + AibiDashboardEmbeddingAccessPolicy []AibiDashboardEmbeddingAccessPolicy `tfsdk:"aibi_dashboard_embedding_access_policy" tf:"object"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + +func (newState *AibiDashboardEmbeddingAccessPolicySetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan AibiDashboardEmbeddingAccessPolicySetting) { +} + +func (newState *AibiDashboardEmbeddingAccessPolicySetting) SyncEffectiveFieldsDuringRead(existingState AibiDashboardEmbeddingAccessPolicySetting) { +} + +type AibiDashboardEmbeddingApprovedDomains struct { + ApprovedDomains []types.String `tfsdk:"approved_domains" tf:"optional"` +} + +func (newState *AibiDashboardEmbeddingApprovedDomains) SyncEffectiveFieldsDuringCreateOrUpdate(plan AibiDashboardEmbeddingApprovedDomains) { +} + +func (newState *AibiDashboardEmbeddingApprovedDomains) SyncEffectiveFieldsDuringRead(existingState AibiDashboardEmbeddingApprovedDomains) { +} + +type AibiDashboardEmbeddingApprovedDomainsSetting struct { + AibiDashboardEmbeddingApprovedDomains []AibiDashboardEmbeddingApprovedDomains `tfsdk:"aibi_dashboard_embedding_approved_domains" tf:"object"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + +func (newState *AibiDashboardEmbeddingApprovedDomainsSetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan AibiDashboardEmbeddingApprovedDomainsSetting) { +} + +func (newState *AibiDashboardEmbeddingApprovedDomainsSetting) SyncEffectiveFieldsDuringRead(existingState AibiDashboardEmbeddingApprovedDomainsSetting) { +} + type AutomaticClusterUpdateSetting struct { AutomaticClusterUpdateWorkspace []ClusterAutoRestartMessage `tfsdk:"automatic_cluster_update_workspace" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag @@ -962,6 +1030,42 @@ func (newState *GetAccountIpAccessListRequest) SyncEffectiveFieldsDuringCreateOr func (newState *GetAccountIpAccessListRequest) SyncEffectiveFieldsDuringRead(existingState GetAccountIpAccessListRequest) { } +// Retrieve the AI/BI dashboard embedding access policy +type GetAibiDashboardEmbeddingAccessPolicySettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +func (newState *GetAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +func (newState *GetAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringRead(existingState GetAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +// Retrieve the list of domains approved to host embedded AI/BI dashboards +type GetAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +func (newState *GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + +func (newState *GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringRead(existingState GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + // Get the automatic cluster update setting type GetAutomaticClusterUpdateSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -1494,8 +1598,7 @@ type NccAzurePrivateEndpointRule struct { // DISCONNECTED: Connection was removed by the private link resource owner, // the private endpoint becomes informative and should be deleted for // clean-up. - ConnectionState types.String `tfsdk:"connection_state" tf:"optional"` - EffectiveConnectionState types.String `tfsdk:"effective_connection_state" tf:"computed,optional"` + ConnectionState types.String `tfsdk:"connection_state" tf:"optional"` // Time in epoch milliseconds when this object was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` EffectiveCreationTime types.Int64 `tfsdk:"effective_creation_time" tf:"computed,optional"` @@ -1526,8 +1629,6 @@ type NccAzurePrivateEndpointRule struct { } func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringCreateOrUpdate(plan NccAzurePrivateEndpointRule) { - newState.EffectiveConnectionState = newState.ConnectionState - newState.ConnectionState = plan.ConnectionState newState.EffectiveCreationTime = newState.CreationTime newState.CreationTime = plan.CreationTime newState.EffectiveDeactivated = newState.Deactivated @@ -1543,10 +1644,6 @@ func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringCreateOrUp } func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringRead(existingState NccAzurePrivateEndpointRule) { - newState.EffectiveConnectionState = existingState.EffectiveConnectionState - if existingState.EffectiveConnectionState.ValueString() == newState.ConnectionState.ValueString() { - newState.ConnectionState = existingState.ConnectionState - } newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime @@ -1968,6 +2065,9 @@ type TokenInfo struct { CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // Timestamp when the token expires. ExpiryTime types.Int64 `tfsdk:"expiry_time" tf:"optional"` + // Approximate timestamp for the day the token was last used. Accurate up to + // 1 day. + LastUsedDay types.Int64 `tfsdk:"last_used_day" tf:"optional"` // User ID of the user that owns the token. OwnerId types.Int64 `tfsdk:"owner_id" tf:"optional"` // ID of the token. @@ -2032,6 +2132,46 @@ func (newState *TokenPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate func (newState *TokenPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState TokenPermissionsRequest) { } +// Details required to update a setting. +type UpdateAibiDashboardEmbeddingAccessPolicySettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting []AibiDashboardEmbeddingAccessPolicySetting `tfsdk:"setting" tf:"object"` +} + +func (newState *UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +func (newState *UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +// Details required to update a setting. +type UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting []AibiDashboardEmbeddingApprovedDomainsSetting `tfsdk:"setting" tf:"object"` +} + +func (newState *UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + +func (newState *UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + // Details required to update a setting. type UpdateAutomaticClusterUpdateSettingRequest struct { // This should always be set to true for Settings API. Added for AIP From da1f7e440af4eb42ecf2487f34973a3d9a0897ee Mon Sep 17 00:00:00 2001 From: Edward Feng <67326663+edwardfeng-db@users.noreply.github.com> Date: Thu, 31 Oct 2024 20:51:09 -0700 Subject: [PATCH 82/99] [Internal] Rollout Plugin Framework (#4134) ## Changes - Made it possible to use environment variable USE_SDK_V2 to control rollout - Bumped resource and data source names from staging names to production names (can debate on whether we want to do that now or in a more gradual fashion) - Proposed mechanism: https://docs.google.com/document/d/1zfTp8YesMe4GxkbIt9Sbwd5nQL7Y4mXed8dPDGmAoxU/edit?tab=t.0 - Added infra for switching providers from sdkv2 to plugin framework in integration tests and added test cases for those cases ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- docs/data-sources/volumes.md | 3 + docs/guides/troubleshooting.md | 12 + docs/resources/library.md | 3 + docs/resources/quality_monitor.md | 3 + internal/providers/pluginfw/pluginfw.go | 30 +- .../pluginfw/pluginfw_rollout_utils.go | 205 +++++++++++ .../resources/library/resource_library.go | 5 +- .../library/resource_library_acc_test.go | 131 ++++--- .../resource_quality_monitor.go | 8 +- .../resource_quality_monitor_acc_test.go | 104 +++++- .../pluginfw/resources/volume/data_volumes.go | 2 +- .../resources/volume/data_volumes_acc_test.go | 8 +- internal/providers/providers.go | 19 +- internal/providers/sdkv2/sdkv2.go | 330 ++++++++++-------- 14 files changed, 617 insertions(+), 246 deletions(-) create mode 100644 internal/providers/pluginfw/pluginfw_rollout_utils.go diff --git a/docs/data-sources/volumes.md b/docs/data-sources/volumes.md index 89ee190f39..4c529185cd 100644 --- a/docs/data-sources/volumes.md +++ b/docs/data-sources/volumes.md @@ -7,6 +7,9 @@ subcategory: "Unity Catalog" Retrieves a list of [databricks_volume](../resources/volume.md) ids (full names), that were created by Terraform or manually. +## Plugin Framework Migration +The volumes data source has been migrated from sdkv2 to plugin framework in version 1.57。 If you encounter any problem with this data source and suspect it is due to the migration, you can fallback to sdkv2 by setting the environment variable in the following way `export USE_SDK_V2_DATA_SOURCES="databricks_volumes"`. + ## Example Usage Listing all volumes in a _things_ [databricks_schema](../resources/schema.md) of a _sandbox_ [databricks_catalog](../resources/catalog.md): diff --git a/docs/guides/troubleshooting.md b/docs/guides/troubleshooting.md index 93d9d89fec..dadd4a51c9 100644 --- a/docs/guides/troubleshooting.md +++ b/docs/guides/troubleshooting.md @@ -17,6 +17,18 @@ TF_LOG=DEBUG DATABRICKS_DEBUG_TRUNCATE_BYTES=250000 terraform apply -no-color 2> * Open a [new GitHub issue](https://github.com/databricks/terraform-provider-databricks/issues/new/choose) providing all information described in the issue template - debug logs, your Terraform code, Terraform & plugin versions, etc. +## Plugin Framework Migration Problems +The following resources and data sources have been migrated from sdkv2 to plugin framework。 If you encounter any problem with those, you can fallback to sdkv2 by setting the `USE_SDK_V2_RESOURCES` and `USE_SDK_V2_DATA_SOURCES` environment variables. + +Example: `export USE_SDK_V2_RESOURCES="databricks_library,databricks_quality_monitor"` + +### Resources migrated + - databricks_quality_monitor + - databricks_library +### Data sources migrated + - databricks_volumes + + ## Typical problems ### Data resources and Authentication is not configured errors diff --git a/docs/resources/library.md b/docs/resources/library.md index c693bfed8d..e03ad0ea40 100644 --- a/docs/resources/library.md +++ b/docs/resources/library.md @@ -7,6 +7,9 @@ Installs a [library](https://docs.databricks.com/libraries/index.html) on [datab -> `databricks_library` resource would always start the associated cluster if it's not running, so make sure to have auto-termination configured. It's not possible to atomically change the version of the same library without cluster restart. Libraries are fully removed from the cluster only after restart. +## Plugin Framework Migration +The library resource has been migrated from sdkv2 to plugin framework。 If you encounter any problem with this resource and suspect it is due to the migration, you can fallback to sdkv2 by setting the environment variable in the following way `export USE_SDK_V2_RESOURCES="databricks_library"`. + ## Installing library on all clusters You can install libraries on all clusters with the help of [databricks_clusters](../data-sources/clusters.md) data resource: diff --git a/docs/resources/quality_monitor.md b/docs/resources/quality_monitor.md index 71613a6e0d..64e06f187a 100644 --- a/docs/resources/quality_monitor.md +++ b/docs/resources/quality_monitor.md @@ -7,6 +7,9 @@ This resource allows you to manage [Lakehouse Monitors](https://docs.databricks. A `databricks_quality_monitor` is attached to a [databricks_sql_table](sql_table.md) and can be of type timeseries, snapshot or inference. +## Plugin Framework Migration +The quality monitor resource has been migrated from sdkv2 to plugin framework。 If you encounter any problem with this resource and suspect it is due to the migration, you can fallback to sdkv2 by setting the environment variable in the following way `export USE_SDK_V2_RESOURCES="databricks_quality_monitor"`. + ## Example Usage ```hcl diff --git a/internal/providers/pluginfw/pluginfw.go b/internal/providers/pluginfw/pluginfw.go index 5592e3e29b..4eaecd9938 100644 --- a/internal/providers/pluginfw/pluginfw.go +++ b/internal/providers/pluginfw/pluginfw.go @@ -16,15 +16,6 @@ import ( "github.com/databricks/terraform-provider-databricks/commands" "github.com/databricks/terraform-provider-databricks/common" providercommon "github.com/databricks/terraform-provider-databricks/internal/providers/common" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/catalog" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/cluster" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/library" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/notificationdestinations" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/qualitymonitor" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/registered_model" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/sharing" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/volume" - "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" @@ -35,34 +26,23 @@ import ( "github.com/hashicorp/terraform-plugin-log/tflog" ) -func GetDatabricksProviderPluginFramework() provider.Provider { - p := &DatabricksProviderPluginFramework{} +func GetDatabricksProviderPluginFramework(sdkV2FallbackOptions ...SdkV2FallbackOption) provider.Provider { + p := &DatabricksProviderPluginFramework{sdkV2Fallbacks: sdkV2FallbackOptions} return p } type DatabricksProviderPluginFramework struct { + sdkV2Fallbacks []SdkV2FallbackOption } var _ provider.Provider = (*DatabricksProviderPluginFramework)(nil) func (p *DatabricksProviderPluginFramework) Resources(ctx context.Context) []func() resource.Resource { - return []func() resource.Resource{ - qualitymonitor.ResourceQualityMonitor, - library.ResourceLibrary, - sharing.ResourceShare, - } + return getPluginFrameworkResourcesToRegister(p.sdkV2Fallbacks...) } func (p *DatabricksProviderPluginFramework) DataSources(ctx context.Context) []func() datasource.DataSource { - return []func() datasource.DataSource{ - cluster.DataSourceCluster, - volume.DataSourceVolumes, - registered_model.DataSourceRegisteredModel, - notificationdestinations.DataSourceNotificationDestinations, - sharing.DataSourceShare, - sharing.DataSourceShares, - catalog.DataSourceFunctions, - } + return getPluginFrameworkDataSourcesToRegister(p.sdkV2Fallbacks...) } func (p *DatabricksProviderPluginFramework) Schema(ctx context.Context, req provider.SchemaRequest, resp *provider.SchemaResponse) { diff --git a/internal/providers/pluginfw/pluginfw_rollout_utils.go b/internal/providers/pluginfw/pluginfw_rollout_utils.go new file mode 100644 index 0000000000..90b782a511 --- /dev/null +++ b/internal/providers/pluginfw/pluginfw_rollout_utils.go @@ -0,0 +1,205 @@ +package pluginfw + +// This file contains all of the utils for controlling the plugin framework rollout. +// For migrated resources and data sources, we can add them to the two maps below to have them registered with the plugin framework. +// Users can manually specify resources and data sources to use SDK V2 instead of the plugin framework by setting the USE_SDK_V2_RESOURCES and USE_SDK_V2_DATA_SOURCES environment variables. +// +// Example: USE_SDK_V2_RESOURCES="databricks_library" would force the library resource to use SDK V2 instead of the plugin framework. + +import ( + "context" + "os" + "slices" + "strings" + + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/catalog" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/cluster" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/library" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/notificationdestinations" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/qualitymonitor" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/registered_model" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/sharing" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/volume" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +// List of resources that have been migrated from SDK V2 to plugin framework +var migratedResources = []func() resource.Resource{ + qualitymonitor.ResourceQualityMonitor, + library.ResourceLibrary, +} + +// List of data sources that have been migrated from SDK V2 to plugin framework +var migratedDataSources = []func() datasource.DataSource{ + volume.DataSourceVolumes, +} + +// List of resources that have been onboarded to the plugin framework - not migrated from sdkv2. +var pluginFwOnlyResources = []func() resource.Resource{ + // TODO Add resources here + sharing.ResourceShare, // Using the staging name (with pluginframework suffix) +} + +// List of data sources that have been onboarded to the plugin framework - not migrated from sdkv2. +var pluginFwOnlyDataSources = []func() datasource.DataSource{ + registered_model.DataSourceRegisteredModel, + notificationdestinations.DataSourceNotificationDestinations, + catalog.DataSourceFunctions, + // TODO: Add DataSourceCluster into migratedDataSources after fixing unit tests. + cluster.DataSourceCluster, // Using the staging name (with pluginframework suffix) + sharing.DataSourceShare, // Using the staging name (with pluginframework suffix) + sharing.DataSourceShares, // Using the staging name (with pluginframework suffix) +} + +type sdkV2FallbackOptions struct { + resourceFallbacks []string + dataSourceFallbacks []string +} + +// SdkV2FallbackOption is an interface for acceptance tests to specify resources / data sources to fallback to SDK V2 +type SdkV2FallbackOption interface { + Apply(*sdkV2FallbackOptions) +} + +type sdkV2ResourceFallback struct { + resourceFallbacks []string +} + +func (o *sdkV2ResourceFallback) Apply(options *sdkV2FallbackOptions) { + options.resourceFallbacks = o.resourceFallbacks +} + +// WithSdkV2ResourceFallbacks is a helper function to specify resources to fallback to SDK V2 +func WithSdkV2ResourceFallbacks(fallbacks ...string) SdkV2FallbackOption { + return &sdkV2ResourceFallback{resourceFallbacks: fallbacks} +} + +type sdkv2DataSourceFallback struct { + dataSourceFallbacks []string +} + +func (o *sdkv2DataSourceFallback) Apply(options *sdkV2FallbackOptions) { + options.dataSourceFallbacks = o.dataSourceFallbacks +} + +// WithSdkV2DataSourceFallbacks is a helper function to specify data sources to fallback to SDK V2 +func WithSdkV2DataSourceFallbacks(fallbacks []string) SdkV2FallbackOption { + return &sdkv2DataSourceFallback{dataSourceFallbacks: fallbacks} +} + +// GetUseSdkV2DataSources is a helper function to get name of resources that should use SDK V2 instead of plugin framework +func getUseSdkV2Resources() []string { + useSdkV2 := os.Getenv("USE_SDK_V2_RESOURCES") + if useSdkV2 == "" { + return []string{} + } + return strings.Split(useSdkV2, ",") +} + +// GetUseSdkV2DataSources is a helper function to get name of data sources that should use SDK V2 instead of plugin framework +func getUseSdkV2DataSources() []string { + useSdkV2 := os.Getenv("USE_SDK_V2_DATA_SOURCES") + if useSdkV2 == "" { + return []string{} + } + return strings.Split(useSdkV2, ",") +} + +// Helper function to check if a resource should use be in SDK V2 instead of plugin framework +func shouldUseSdkV2Resource(resourceName string) bool { + useSdkV2Resources := getUseSdkV2Resources() + return slices.Contains(useSdkV2Resources, resourceName) +} + +// Helper function to check if a data source should use be in SDK V2 instead of plugin framework +func shouldUseSdkV2DataSource(dataSourceName string) bool { + sdkV2DataSources := getUseSdkV2DataSources() + return slices.Contains(sdkV2DataSources, dataSourceName) +} + +// getPluginFrameworkResourcesToRegister is a helper function to get the list of resources that are migrated away from sdkv2 to plugin framework +func getPluginFrameworkResourcesToRegister(sdkV2Fallbacks ...SdkV2FallbackOption) []func() resource.Resource { + fallbackOption := sdkV2FallbackOptions{} + for _, o := range sdkV2Fallbacks { + o.Apply(&fallbackOption) + } + + var resources []func() resource.Resource + + // Loop through the map and add resources if they're not specifically marked to use the SDK V2 + for _, resourceFunc := range migratedResources { + name := getResourceName(resourceFunc) + if !shouldUseSdkV2Resource(name) && !slices.Contains(fallbackOption.resourceFallbacks, name) { + resources = append(resources, resourceFunc) + } + } + + return append(resources, pluginFwOnlyResources...) +} + +// getPluginFrameworkDataSourcesToRegister is a helper function to get the list of data sources that are migrated away from sdkv2 to plugin framework +func getPluginFrameworkDataSourcesToRegister(sdkV2Fallbacks ...SdkV2FallbackOption) []func() datasource.DataSource { + fallbackOption := sdkV2FallbackOptions{} + for _, o := range sdkV2Fallbacks { + o.Apply(&fallbackOption) + } + + var dataSources []func() datasource.DataSource + + // Loop through the map and add data sources if they're not specifically marked to use the SDK V2 + for _, dataSourceFunc := range migratedDataSources { + name := getDataSourceName(dataSourceFunc) + if !shouldUseSdkV2DataSource(name) && !slices.Contains(fallbackOption.dataSourceFallbacks, name) { + dataSources = append(dataSources, dataSourceFunc) + } + } + + return append(dataSources, pluginFwOnlyDataSources...) +} + +func getResourceName(resourceFunc func() resource.Resource) string { + resp := resource.MetadataResponse{} + resourceFunc().Metadata(context.Background(), resource.MetadataRequest{ProviderTypeName: "databricks"}, &resp) + return resp.TypeName +} + +func getDataSourceName(dataSourceFunc func() datasource.DataSource) string { + resp := datasource.MetadataResponse{} + dataSourceFunc().Metadata(context.Background(), datasource.MetadataRequest{ProviderTypeName: "databricks"}, &resp) + return resp.TypeName +} + +// GetSdkV2ResourcesToRemove is a helper function to get the list of resources that are migrated away from sdkv2 to plugin framework +func GetSdkV2ResourcesToRemove(sdkV2Fallbacks ...SdkV2FallbackOption) []string { + fallbackOption := sdkV2FallbackOptions{} + for _, o := range sdkV2Fallbacks { + o.Apply(&fallbackOption) + } + + resourcesToRemove := []string{} + for _, resourceFunc := range migratedResources { + name := getResourceName(resourceFunc) + if !shouldUseSdkV2Resource(name) && !slices.Contains(fallbackOption.resourceFallbacks, name) { + resourcesToRemove = append(resourcesToRemove, name) + } + } + return resourcesToRemove +} + +// GetSdkV2DataSourcesToRemove is a helper function to get the list of data sources that are migrated away from sdkv2 to plugin framework +func GetSdkV2DataSourcesToRemove(sdkV2Fallbacks ...SdkV2FallbackOption) []string { + fallbackOption := sdkV2FallbackOptions{} + for _, o := range sdkV2Fallbacks { + o.Apply(&fallbackOption) + } + + dataSourcesToRemove := []string{} + for _, dataSourceFunc := range migratedDataSources { + name := getDataSourceName(dataSourceFunc) + if !shouldUseSdkV2DataSource(name) && !slices.Contains(fallbackOption.dataSourceFallbacks, name) { + dataSourcesToRemove = append(dataSourcesToRemove, name) + } + } + return dataSourcesToRemove +} diff --git a/internal/providers/pluginfw/resources/library/resource_library.go b/internal/providers/pluginfw/resources/library/resource_library.go index 1c999bd2ed..17ac722bfa 100644 --- a/internal/providers/pluginfw/resources/library/resource_library.go +++ b/internal/providers/pluginfw/resources/library/resource_library.go @@ -62,6 +62,7 @@ func readLibrary(ctx context.Context, w *databricks.WorkspaceClient, waitParams type LibraryExtended struct { compute_tf.Library ClusterId types.String `tfsdk:"cluster_id"` + ID types.String `tfsdk:"id" tf:"optional,computed"` // Adding ID field to stay compatible with SDKv2 } type LibraryResource struct { @@ -69,7 +70,7 @@ type LibraryResource struct { } func (r *LibraryResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = pluginfwcommon.GetDatabricksStagingName(resourceName) + resp.TypeName = pluginfwcommon.GetDatabricksProductionName(resourceName) } func (r *LibraryResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { @@ -139,6 +140,8 @@ func (r *LibraryResource) Create(ctx context.Context, req resource.CreateRequest resp.Diagnostics.Append(readLibrary(ctx, w, waitParams, libraryRep, &installedLib)...) + installedLib.ID = types.StringValue(libGoSDK.String()) + if resp.Diagnostics.HasError() { return } diff --git a/internal/providers/pluginfw/resources/library/resource_library_acc_test.go b/internal/providers/pluginfw/resources/library/resource_library_acc_test.go index 153657ae41..138c803111 100644 --- a/internal/providers/pluginfw/resources/library/resource_library_acc_test.go +++ b/internal/providers/pluginfw/resources/library/resource_library_acc_test.go @@ -1,30 +1,37 @@ package library_test import ( + "context" "testing" "github.com/databricks/terraform-provider-databricks/internal/acceptance" + "github.com/databricks/terraform-provider-databricks/internal/providers" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" ) -func TestAccLibraryCreationPluginFramework(t *testing.T) { +var commonClusterConfig = `data "databricks_spark_version" "latest" { +} +resource "databricks_cluster" "this" { + cluster_name = "test-library-{var.RANDOM}" + spark_version = data.databricks_spark_version.latest.id + instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" + autotermination_minutes = 10 + num_workers = 0 + spark_conf = { + "spark.databricks.cluster.profile" = "singleNode" + "spark.master" = "local[*]" + } + custom_tags = { + "ResourceClass" = "SingleNode" + } +} + +` + +func TestAccLibraryCreation(t *testing.T) { acceptance.WorkspaceLevel(t, acceptance.Step{ - Template: `data "databricks_spark_version" "latest" { - } - resource "databricks_cluster" "this" { - cluster_name = "test-library-{var.RANDOM}" - spark_version = data.databricks_spark_version.latest.id - instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" - autotermination_minutes = 10 - num_workers = 0 - spark_conf = { - "spark.databricks.cluster.profile" = "singleNode" - "spark.master" = "local[*]" - } - custom_tags = { - "ResourceClass" = "SingleNode" - } - } - resource "databricks_library_pluginframework" "new_library" { + Template: commonClusterConfig + `resource "databricks_library" "new_library" { cluster_id = databricks_cluster.this.id pypi { repo = "https://pypi.org/dummy" @@ -35,26 +42,10 @@ func TestAccLibraryCreationPluginFramework(t *testing.T) { }) } -func TestAccLibraryUpdatePluginFramework(t *testing.T) { +func TestAccLibraryUpdate(t *testing.T) { acceptance.WorkspaceLevel(t, acceptance.Step{ - Template: `data "databricks_spark_version" "latest" { - } - resource "databricks_cluster" "this" { - cluster_name = "cluster-{var.STICKY_RANDOM}" - spark_version = data.databricks_spark_version.latest.id - instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" - autotermination_minutes = 10 - num_workers = 0 - spark_conf = { - "spark.databricks.cluster.profile" = "singleNode" - "spark.master" = "local[*]" - } - custom_tags = { - "ResourceClass" = "SingleNode" - } - } - resource "databricks_library_pluginframework" "new_library" { + Template: commonClusterConfig + `resource "databricks_library" "new_library" { cluster_id = databricks_cluster.this.id pypi { repo = "https://pypi.org/simple" @@ -64,23 +55,65 @@ func TestAccLibraryUpdatePluginFramework(t *testing.T) { `, }, acceptance.Step{ - Template: `data "databricks_spark_version" "latest" { + Template: commonClusterConfig + `resource "databricks_library" "new_library" { + cluster_id = databricks_cluster.this.id + pypi { + package = "networkx" + } } - resource "databricks_cluster" "this" { - cluster_name = "cluster-{var.STICKY_RANDOM}" - spark_version = data.databricks_spark_version.latest.id - instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" - autotermination_minutes = 10 - num_workers = 0 - spark_conf = { - "spark.databricks.cluster.profile" = "singleNode" - "spark.master" = "local[*]" + `, + }, + ) +} + +var sdkV2FallbackFactory = map[string]func() (tfprotov6.ProviderServer, error){ + "databricks": func() (tfprotov6.ProviderServer, error) { + return providers.GetProviderServer(context.Background(), providers.WithSdkV2FallbackOptions(pluginfw.WithSdkV2ResourceFallbacks("databricks_library"))) + }, +} + +// Testing the transition from sdkv2 to plugin framework. +func TestAccLibraryUpdateTransitionFromSdkV2(t *testing.T) { + acceptance.WorkspaceLevel(t, + acceptance.Step{ + ProtoV6ProviderFactories: sdkV2FallbackFactory, + Template: commonClusterConfig + `resource "databricks_library" "new_library" { + cluster_id = databricks_cluster.this.id + pypi { + repo = "https://pypi.org/simple" + package = "databricks-sdk" + } } - custom_tags = { - "ResourceClass" = "SingleNode" + `, + }, + acceptance.Step{ + Template: commonClusterConfig + `resource "databricks_library" "new_library" { + cluster_id = databricks_cluster.this.id + pypi { + package = "networkx" } } - resource "databricks_library_pluginframework" "new_library" { + `, + }, + ) +} + +// Testing the transition from plugin framework to sdkv2. +func TestAccLibraryUpdateTransitionFromPluginFw(t *testing.T) { + acceptance.WorkspaceLevel(t, + acceptance.Step{ + Template: commonClusterConfig + `resource "databricks_library" "new_library" { + cluster_id = databricks_cluster.this.id + pypi { + repo = "https://pypi.org/simple" + package = "databricks-sdk" + } + } + `, + }, + acceptance.Step{ + ProtoV6ProviderFactories: sdkV2FallbackFactory, + Template: commonClusterConfig + `resource "databricks_library" "new_library" { cluster_id = databricks_cluster.this.id pypi { package = "networkx" diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go index c0047d55cc..7a0445ddbb 100644 --- a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go @@ -58,6 +58,7 @@ type MonitorInfoExtended struct { catalog_tf.MonitorInfo WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` SkipBuiltinDashboard types.Bool `tfsdk:"skip_builtin_dashboard" tf:"optional"` + ID types.String `tfsdk:"id" tf:"optional,computed"` // Adding ID field to stay compatible with SDKv2 } type QualityMonitorResource struct { @@ -65,7 +66,7 @@ type QualityMonitorResource struct { } func (r *QualityMonitorResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = pluginfwcommon.GetDatabricksStagingName(resourceName) + resp.TypeName = pluginfwcommon.GetDatabricksProductionName(resourceName) } func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { @@ -131,6 +132,9 @@ func (r *QualityMonitorResource) Create(ctx context.Context, req resource.Create return } + // Set the ID to the table name + newMonitorInfoTfSDK.ID = newMonitorInfoTfSDK.TableName + resp.Diagnostics.Append(resp.State.Set(ctx, newMonitorInfoTfSDK)...) } @@ -162,6 +166,8 @@ func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequ return } + monitorInfoTfSDK.ID = monitorInfoTfSDK.TableName + resp.Diagnostics.Append(resp.State.Set(ctx, monitorInfoTfSDK)...) } diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go index 7f303d482e..bc87743cda 100644 --- a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go @@ -1,10 +1,14 @@ package qualitymonitor_test import ( + "context" "os" "testing" "github.com/databricks/terraform-provider-databricks/internal/acceptance" + "github.com/databricks/terraform-provider-databricks/internal/providers" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" ) var commonPartQualityMonitoring = `resource "databricks_catalog" "sandbox" { @@ -55,7 +59,7 @@ func TestUcAccQualityMonitor(t *testing.T) { acceptance.UnityWorkspaceLevel(t, acceptance.Step{ Template: commonPartQualityMonitoring + ` - resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { + resource "databricks_quality_monitor" "testMonitorInference" { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" output_schema_name = databricks_schema.things.id @@ -81,7 +85,7 @@ func TestUcAccQualityMonitor(t *testing.T) { } } - resource "databricks_quality_monitor_pluginframework" "testMonitorTimeseries" { + resource "databricks_quality_monitor" "testMonitorTimeseries" { table_name = databricks_sql_table.myTimeseries.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myTimeseries.name}" output_schema_name = databricks_schema.things.id @@ -104,7 +108,7 @@ func TestUcAccQualityMonitor(t *testing.T) { } } - resource "databricks_quality_monitor_pluginframework" "testMonitorSnapshot" { + resource "databricks_quality_monitor" "testMonitorSnapshot" { table_name = databricks_sql_table.mySnapshot.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myTimeseries.name}" output_schema_name = databricks_schema.things.id @@ -121,7 +125,7 @@ func TestUcAccUpdateQualityMonitor(t *testing.T) { } acceptance.UnityWorkspaceLevel(t, acceptance.Step{ Template: commonPartQualityMonitoring + ` - resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { + resource "databricks_quality_monitor" "testMonitorInference" { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" output_schema_name = databricks_schema.things.id @@ -136,7 +140,91 @@ func TestUcAccUpdateQualityMonitor(t *testing.T) { `, }, acceptance.Step{ Template: commonPartQualityMonitoring + ` - resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { + resource "databricks_quality_monitor" "testMonitorInference" { + table_name = databricks_sql_table.myInferenceTable.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" + output_schema_name = databricks_schema.things.id + inference_log { + granularities = ["1 hour"] + timestamp_col = "timestamp" + prediction_col = "prediction" + model_id_col = "model_id" + problem_type = "PROBLEM_TYPE_REGRESSION" + } + } + `, + }) +} + +var sdkV2FallbackFactory = map[string]func() (tfprotov6.ProviderServer, error){ + "databricks": func() (tfprotov6.ProviderServer, error) { + return providers.GetProviderServer(context.Background(), providers.WithSdkV2FallbackOptions(pluginfw.WithSdkV2ResourceFallbacks("databricks_quality_monitor"))) + }, +} + +// Testing the transition from sdkv2 to plugin framework. +func TestUcAccUpdateQualityMonitorTransitionFromSdkV2(t *testing.T) { + if os.Getenv("GOOGLE_CREDENTIALS") != "" { + t.Skipf("databricks_quality_monitor resource is not available on GCP") + } + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + ProtoV6ProviderFactories: sdkV2FallbackFactory, + Template: commonPartQualityMonitoring + ` + resource "databricks_quality_monitor" "testMonitorInference" { + table_name = databricks_sql_table.myInferenceTable.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" + output_schema_name = databricks_schema.things.id + inference_log { + granularities = ["1 day"] + timestamp_col = "timestamp" + prediction_col = "prediction" + model_id_col = "model_id" + problem_type = "PROBLEM_TYPE_REGRESSION" + } + } + `, + }, acceptance.Step{ + Template: commonPartQualityMonitoring + ` + resource "databricks_quality_monitor" "testMonitorInference" { + table_name = databricks_sql_table.myInferenceTable.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" + output_schema_name = databricks_schema.things.id + inference_log { + granularities = ["1 hour"] + timestamp_col = "timestamp" + prediction_col = "prediction" + model_id_col = "model_id" + problem_type = "PROBLEM_TYPE_REGRESSION" + } + } + `, + }) +} + +// Testing the transition from plugin framework back to SDK V2. +func TestUcAccUpdateQualityMonitorTransitionFromPluginFw(t *testing.T) { + if os.Getenv("GOOGLE_CREDENTIALS") != "" { + t.Skipf("databricks_quality_monitor resource is not available on GCP") + } + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: commonPartQualityMonitoring + ` + resource "databricks_quality_monitor" "testMonitorInference" { + table_name = databricks_sql_table.myInferenceTable.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" + output_schema_name = databricks_schema.things.id + inference_log { + granularities = ["1 day"] + timestamp_col = "timestamp" + prediction_col = "prediction" + model_id_col = "model_id" + problem_type = "PROBLEM_TYPE_REGRESSION" + } + } + `, + }, acceptance.Step{ + ProtoV6ProviderFactories: sdkV2FallbackFactory, + Template: commonPartQualityMonitoring + ` + resource "databricks_quality_monitor" "testMonitorInference" { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" output_schema_name = databricks_schema.things.id @@ -160,7 +248,7 @@ func TestUcAccQualityMonitorImportPluginFramework(t *testing.T) { acceptance.Step{ Template: commonPartQualityMonitoring + ` - resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { + resource "databricks_quality_monitor" "testMonitorInference" { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" output_schema_name = databricks_schema.things.id @@ -176,8 +264,8 @@ func TestUcAccQualityMonitorImportPluginFramework(t *testing.T) { }, acceptance.Step{ ImportState: true, - ResourceName: "databricks_quality_monitor_pluginframework.testMonitorInference", - ImportStateIdFunc: acceptance.BuildImportStateIdFunc("databricks_quality_monitor_pluginframework.testMonitorInference", "table_name"), + ResourceName: "databricks_quality_monitor.testMonitorInference", + ImportStateIdFunc: acceptance.BuildImportStateIdFunc("databricks_quality_monitor.testMonitorInference", "table_name"), ImportStateVerify: true, ImportStateVerifyIdentifierAttribute: "table_name", }, diff --git a/internal/providers/pluginfw/resources/volume/data_volumes.go b/internal/providers/pluginfw/resources/volume/data_volumes.go index 54eccf7bde..6a4af53ba0 100644 --- a/internal/providers/pluginfw/resources/volume/data_volumes.go +++ b/internal/providers/pluginfw/resources/volume/data_volumes.go @@ -35,7 +35,7 @@ type VolumesList struct { } func (d *VolumesDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { - resp.TypeName = pluginfwcommon.GetDatabricksStagingName(dataSourceName) + resp.TypeName = pluginfwcommon.GetDatabricksProductionName(dataSourceName) } func (d *VolumesDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { diff --git a/internal/providers/pluginfw/resources/volume/data_volumes_acc_test.go b/internal/providers/pluginfw/resources/volume/data_volumes_acc_test.go index 0fdfc8aa50..3416d20f26 100644 --- a/internal/providers/pluginfw/resources/volume/data_volumes_acc_test.go +++ b/internal/providers/pluginfw/resources/volume/data_volumes_acc_test.go @@ -12,8 +12,8 @@ import ( func checkDataSourceVolumesPopulated(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { - _, ok := s.Modules[0].Resources["data.databricks_volumes_pluginframework.this"] - require.True(t, ok, "data.databricks_volumes_pluginframework.this has to be there") + _, ok := s.Modules[0].Resources["data.databricks_volumes.this"] + require.True(t, ok, "data.databricks_volumes.this has to be there") num_volumes, _ := strconv.Atoi(s.Modules[0].Outputs["volumes"].Value.(string)) assert.GreaterOrEqual(t, num_volumes, 1) return nil @@ -45,13 +45,13 @@ func TestUcAccDataSourceVolumes(t *testing.T) { schema_name = databricks_schema.things.name volume_type = "MANAGED" } - data "databricks_volumes_pluginframework" "this" { + data "databricks_volumes" "this" { catalog_name = databricks_catalog.sandbox.name schema_name = databricks_schema.things.name depends_on = [ databricks_volume.this ] } output "volumes" { - value = length(data.databricks_volumes_pluginframework.this.ids) + value = length(data.databricks_volumes.this.ids) } `, Check: checkDataSourceVolumesPopulated(t), diff --git a/internal/providers/providers.go b/internal/providers/providers.go index 681df6f74d..64a8296467 100644 --- a/internal/providers/providers.go +++ b/internal/providers/providers.go @@ -20,6 +20,7 @@ import ( type serverOptions struct { sdkV2Provider *schema.Provider pluginFrameworkProvider provider.Provider + sdkV2fallbacks []pluginfw.SdkV2FallbackOption } // ServerOption is a common interface for overriding providers in GetProviderServer functino call. @@ -41,6 +42,20 @@ func WithSdkV2Provider(sdkV2Provider *schema.Provider) ServerOption { return &sdkV2ProviderOption{sdkV2Provider: sdkV2Provider} } +type sdkV2FallbackOption struct { + sdkV2fallbacks []pluginfw.SdkV2FallbackOption +} + +func (o *sdkV2FallbackOption) Apply(options *serverOptions) { + options.sdkV2fallbacks = o.sdkV2fallbacks +} + +// WithSdkV2FallbackOptions allows overriding the SDKv2 fallback options used when creating a Terraform provider with muxing. +// This is typically used in acceptance test for testing the compatibility between sdkv2 and plugin framework. +func WithSdkV2FallbackOptions(options ...pluginfw.SdkV2FallbackOption) ServerOption { + return &sdkV2FallbackOption{sdkV2fallbacks: options} +} + // GetProviderServer initializes and returns a Terraform Protocol v6 ProviderServer. // The function begins by initializing the Databricks provider using the SDK plugin // and then upgrades this provider to be compatible with Terraform's Protocol v6 using @@ -60,11 +75,11 @@ func GetProviderServer(ctx context.Context, options ...ServerOption) (tfprotov6. } sdkPluginProvider := serverOptions.sdkV2Provider if sdkPluginProvider == nil { - sdkPluginProvider = sdkv2.DatabricksProvider() + sdkPluginProvider = sdkv2.DatabricksProvider(serverOptions.sdkV2fallbacks...) } pluginFrameworkProvider := serverOptions.pluginFrameworkProvider if pluginFrameworkProvider == nil { - pluginFrameworkProvider = pluginfw.GetDatabricksProviderPluginFramework() + pluginFrameworkProvider = pluginfw.GetDatabricksProviderPluginFramework(serverOptions.sdkV2fallbacks...) } upgradedSdkPluginProvider, err := tf5to6server.UpgradeServer( diff --git a/internal/providers/sdkv2/sdkv2.go b/internal/providers/sdkv2/sdkv2.go index d40d663ee2..e689b5b693 100644 --- a/internal/providers/sdkv2/sdkv2.go +++ b/internal/providers/sdkv2/sdkv2.go @@ -32,6 +32,7 @@ import ( "github.com/databricks/terraform-provider-databricks/dashboards" "github.com/databricks/terraform-provider-databricks/finops" providercommon "github.com/databricks/terraform-provider-databricks/internal/providers/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw" "github.com/databricks/terraform-provider-databricks/jobs" "github.com/databricks/terraform-provider-databricks/logger" "github.com/databricks/terraform-provider-databricks/mlflow" @@ -71,162 +72,181 @@ func init() { } // DatabricksProvider returns the entire terraform provider object -func DatabricksProvider() *schema.Provider { +func DatabricksProvider(sdkV2Fallbacks ...pluginfw.SdkV2FallbackOption) *schema.Provider { + dataSourceMap := map[string]*schema.Resource{ // must be in alphabetical order + "databricks_aws_crossaccount_policy": aws.DataAwsCrossaccountPolicy().ToResource(), + "databricks_aws_assume_role_policy": aws.DataAwsAssumeRolePolicy().ToResource(), + "databricks_aws_bucket_policy": aws.DataAwsBucketPolicy().ToResource(), + "databricks_aws_unity_catalog_assume_role_policy": aws.DataAwsUnityCatalogAssumeRolePolicy().ToResource(), + "databricks_aws_unity_catalog_policy": aws.DataAwsUnityCatalogPolicy().ToResource(), + "databricks_cluster": clusters.DataSourceCluster().ToResource(), + "databricks_clusters": clusters.DataSourceClusters().ToResource(), + "databricks_cluster_policy": policies.DataSourceClusterPolicy().ToResource(), + "databricks_catalog": catalog.DataSourceCatalog().ToResource(), + "databricks_catalogs": catalog.DataSourceCatalogs().ToResource(), + "databricks_current_config": mws.DataSourceCurrentConfiguration().ToResource(), + "databricks_current_metastore": catalog.DataSourceCurrentMetastore().ToResource(), + "databricks_current_user": scim.DataSourceCurrentUser().ToResource(), + "databricks_dbfs_file": storage.DataSourceDbfsFile().ToResource(), + "databricks_dbfs_file_paths": storage.DataSourceDbfsFilePaths().ToResource(), + "databricks_directory": workspace.DataSourceDirectory().ToResource(), + "databricks_external_location": catalog.DataSourceExternalLocation().ToResource(), + "databricks_external_locations": catalog.DataSourceExternalLocations().ToResource(), + "databricks_group": scim.DataSourceGroup().ToResource(), + "databricks_instance_pool": pools.DataSourceInstancePool().ToResource(), + "databricks_instance_profiles": aws.DataSourceInstanceProfiles().ToResource(), + "databricks_jobs": jobs.DataSourceJobs().ToResource(), + "databricks_job": jobs.DataSourceJob().ToResource(), + "databricks_metastore": catalog.DataSourceMetastore().ToResource(), + "databricks_metastores": catalog.DataSourceMetastores().ToResource(), + "databricks_mlflow_experiment": mlflow.DataSourceExperiment().ToResource(), + "databricks_mlflow_model": mlflow.DataSourceModel().ToResource(), + "databricks_mlflow_models": mlflow.DataSourceModels().ToResource(), + "databricks_mws_credentials": mws.DataSourceMwsCredentials().ToResource(), + "databricks_mws_workspaces": mws.DataSourceMwsWorkspaces().ToResource(), + "databricks_node_type": clusters.DataSourceNodeType().ToResource(), + "databricks_notebook": workspace.DataSourceNotebook().ToResource(), + "databricks_notebook_paths": workspace.DataSourceNotebookPaths().ToResource(), + "databricks_pipelines": pipelines.DataSourcePipelines().ToResource(), + "databricks_schema": catalog.DataSourceSchema().ToResource(), + "databricks_schemas": catalog.DataSourceSchemas().ToResource(), + "databricks_service_principal": scim.DataSourceServicePrincipal().ToResource(), + "databricks_service_principals": scim.DataSourceServicePrincipals().ToResource(), + "databricks_share": sharing.DataSourceShare().ToResource(), + "databricks_shares": sharing.DataSourceShares().ToResource(), + "databricks_spark_version": clusters.DataSourceSparkVersion().ToResource(), + "databricks_sql_warehouse": sql.DataSourceWarehouse().ToResource(), + "databricks_sql_warehouses": sql.DataSourceWarehouses().ToResource(), + "databricks_storage_credential": catalog.DataSourceStorageCredential().ToResource(), + "databricks_storage_credentials": catalog.DataSourceStorageCredentials().ToResource(), + "databricks_table": catalog.DataSourceTable().ToResource(), + "databricks_tables": catalog.DataSourceTables().ToResource(), + "databricks_views": catalog.DataSourceViews().ToResource(), + "databricks_volume": catalog.DataSourceVolume().ToResource(), + "databricks_volumes": catalog.DataSourceVolumes().ToResource(), + "databricks_user": scim.DataSourceUser().ToResource(), + "databricks_zones": clusters.DataSourceClusterZones().ToResource(), + } + + resourceMap := map[string]*schema.Resource{ // must be in alphabetical order + "databricks_access_control_rule_set": permissions.ResourceAccessControlRuleSet().ToResource(), + "databricks_alert": sql.ResourceAlert().ToResource(), + "databricks_artifact_allowlist": catalog.ResourceArtifactAllowlist().ToResource(), + "databricks_aws_s3_mount": storage.ResourceAWSS3Mount().ToResource(), + "databricks_azure_adls_gen1_mount": storage.ResourceAzureAdlsGen1Mount().ToResource(), + "databricks_azure_adls_gen2_mount": storage.ResourceAzureAdlsGen2Mount().ToResource(), + "databricks_azure_blob_mount": storage.ResourceAzureBlobMount().ToResource(), + "databricks_budget": finops.ResourceBudget().ToResource(), + "databricks_catalog": catalog.ResourceCatalog().ToResource(), + "databricks_catalog_workspace_binding": catalog.ResourceCatalogWorkspaceBinding().ToResource(), + "databricks_custom_app_integration": apps.ResourceCustomAppIntegration().ToResource(), + "databricks_connection": catalog.ResourceConnection().ToResource(), + "databricks_cluster": clusters.ResourceCluster().ToResource(), + "databricks_cluster_policy": policies.ResourceClusterPolicy().ToResource(), + "databricks_dashboard": dashboards.ResourceDashboard().ToResource(), + "databricks_dbfs_file": storage.ResourceDbfsFile().ToResource(), + "databricks_directory": workspace.ResourceDirectory().ToResource(), + "databricks_entitlements": scim.ResourceEntitlements().ToResource(), + "databricks_external_location": catalog.ResourceExternalLocation().ToResource(), + "databricks_file": storage.ResourceFile().ToResource(), + "databricks_git_credential": repos.ResourceGitCredential().ToResource(), + "databricks_global_init_script": workspace.ResourceGlobalInitScript().ToResource(), + "databricks_grant": catalog.ResourceGrant().ToResource(), + "databricks_grants": catalog.ResourceGrants().ToResource(), + "databricks_group": scim.ResourceGroup().ToResource(), + "databricks_group_instance_profile": aws.ResourceGroupInstanceProfile().ToResource(), + "databricks_group_member": scim.ResourceGroupMember().ToResource(), + "databricks_group_role": scim.ResourceGroupRole().ToResource(), + "databricks_instance_pool": pools.ResourceInstancePool().ToResource(), + "databricks_instance_profile": aws.ResourceInstanceProfile().ToResource(), + "databricks_ip_access_list": access.ResourceIPAccessList().ToResource(), + "databricks_job": jobs.ResourceJob().ToResource(), + "databricks_lakehouse_monitor": catalog.ResourceLakehouseMonitor().ToResource(), + "databricks_library": clusters.ResourceLibrary().ToResource(), + "databricks_metastore": catalog.ResourceMetastore().ToResource(), + "databricks_metastore_assignment": catalog.ResourceMetastoreAssignment().ToResource(), + "databricks_metastore_data_access": catalog.ResourceMetastoreDataAccess().ToResource(), + "databricks_mlflow_experiment": mlflow.ResourceMlflowExperiment().ToResource(), + "databricks_mlflow_model": mlflow.ResourceMlflowModel().ToResource(), + "databricks_mlflow_webhook": mlflow.ResourceMlflowWebhook().ToResource(), + "databricks_model_serving": serving.ResourceModelServing().ToResource(), + "databricks_mount": storage.ResourceMount().ToResource(), + "databricks_mws_customer_managed_keys": mws.ResourceMwsCustomerManagedKeys().ToResource(), + "databricks_mws_credentials": mws.ResourceMwsCredentials().ToResource(), + "databricks_mws_log_delivery": mws.ResourceMwsLogDelivery().ToResource(), + "databricks_mws_ncc_binding": mws.ResourceMwsNccBinding().ToResource(), + "databricks_mws_ncc_private_endpoint_rule": mws.ResourceMwsNccPrivateEndpointRule().ToResource(), + "databricks_mws_networks": mws.ResourceMwsNetworks().ToResource(), + "databricks_mws_network_connectivity_config": mws.ResourceMwsNetworkConnectivityConfig().ToResource(), + "databricks_mws_permission_assignment": mws.ResourceMwsPermissionAssignment().ToResource(), + "databricks_mws_private_access_settings": mws.ResourceMwsPrivateAccessSettings().ToResource(), + "databricks_mws_storage_configurations": mws.ResourceMwsStorageConfigurations().ToResource(), + "databricks_mws_vpc_endpoint": mws.ResourceMwsVpcEndpoint().ToResource(), + "databricks_mws_workspaces": mws.ResourceMwsWorkspaces().ToResource(), + "databricks_notebook": workspace.ResourceNotebook().ToResource(), + "databricks_notification_destination": settings.ResourceNotificationDestination().ToResource(), + "databricks_obo_token": tokens.ResourceOboToken().ToResource(), + "databricks_online_table": catalog.ResourceOnlineTable().ToResource(), + "databricks_permission_assignment": access.ResourcePermissionAssignment().ToResource(), + "databricks_permissions": permissions.ResourcePermissions().ToResource(), + "databricks_pipeline": pipelines.ResourcePipeline().ToResource(), + "databricks_provider": sharing.ResourceProvider().ToResource(), + "databricks_quality_monitor": catalog.ResourceQualityMonitor().ToResource(), + "databricks_query": sql.ResourceQuery().ToResource(), + "databricks_recipient": sharing.ResourceRecipient().ToResource(), + "databricks_registered_model": catalog.ResourceRegisteredModel().ToResource(), + "databricks_repo": repos.ResourceRepo().ToResource(), + "databricks_schema": catalog.ResourceSchema().ToResource(), + "databricks_secret": secrets.ResourceSecret().ToResource(), + "databricks_secret_scope": secrets.ResourceSecretScope().ToResource(), + "databricks_secret_acl": secrets.ResourceSecretACL().ToResource(), + "databricks_service_principal": scim.ResourceServicePrincipal().ToResource(), + "databricks_service_principal_role": aws.ResourceServicePrincipalRole().ToResource(), + "databricks_service_principal_secret": tokens.ResourceServicePrincipalSecret().ToResource(), + "databricks_share": sharing.ResourceShare().ToResource(), + "databricks_sql_dashboard": sql.ResourceSqlDashboard().ToResource(), + "databricks_sql_endpoint": sql.ResourceSqlEndpoint().ToResource(), + "databricks_sql_global_config": sql.ResourceSqlGlobalConfig().ToResource(), + "databricks_sql_permissions": access.ResourceSqlPermissions().ToResource(), + "databricks_sql_query": sql.ResourceSqlQuery().ToResource(), + "databricks_sql_alert": sql.ResourceSqlAlert().ToResource(), + "databricks_sql_table": catalog.ResourceSqlTable().ToResource(), + "databricks_sql_visualization": sql.ResourceSqlVisualization().ToResource(), + "databricks_sql_widget": sql.ResourceSqlWidget().ToResource(), + "databricks_storage_credential": catalog.ResourceStorageCredential().ToResource(), + "databricks_system_schema": catalog.ResourceSystemSchema().ToResource(), + "databricks_table": catalog.ResourceTable().ToResource(), + "databricks_token": tokens.ResourceToken().ToResource(), + "databricks_user": scim.ResourceUser().ToResource(), + "databricks_user_instance_profile": aws.ResourceUserInstanceProfile().ToResource(), + "databricks_user_role": aws.ResourceUserRole().ToResource(), + "databricks_vector_search_endpoint": vectorsearch.ResourceVectorSearchEndpoint().ToResource(), + "databricks_vector_search_index": vectorsearch.ResourceVectorSearchIndex().ToResource(), + "databricks_volume": catalog.ResourceVolume().ToResource(), + "databricks_workspace_binding": catalog.ResourceWorkspaceBinding().ToResource(), + "databricks_workspace_conf": workspace.ResourceWorkspaceConf().ToResource(), + "databricks_workspace_file": workspace.ResourceWorkspaceFile().ToResource(), + } + + // Remove the resources and data sources that are being migrated to plugin framework + for _, dataSourceToRemove := range pluginfw.GetSdkV2DataSourcesToRemove(sdkV2Fallbacks...) { + if _, ok := dataSourceMap[dataSourceToRemove]; !ok { + panic(fmt.Sprintf("data source %s not found", dataSourceToRemove)) + } + delete(dataSourceMap, dataSourceToRemove) + } + + for _, resourceToRemove := range pluginfw.GetSdkV2ResourcesToRemove(sdkV2Fallbacks...) { + if _, ok := resourceMap[resourceToRemove]; !ok { + panic(fmt.Sprintf("resource %s not found", resourceToRemove)) + } + delete(resourceMap, resourceToRemove) + } + p := &schema.Provider{ - DataSourcesMap: map[string]*schema.Resource{ // must be in alphabetical order - "databricks_aws_crossaccount_policy": aws.DataAwsCrossaccountPolicy().ToResource(), - "databricks_aws_assume_role_policy": aws.DataAwsAssumeRolePolicy().ToResource(), - "databricks_aws_bucket_policy": aws.DataAwsBucketPolicy().ToResource(), - "databricks_aws_unity_catalog_assume_role_policy": aws.DataAwsUnityCatalogAssumeRolePolicy().ToResource(), - "databricks_aws_unity_catalog_policy": aws.DataAwsUnityCatalogPolicy().ToResource(), - "databricks_cluster": clusters.DataSourceCluster().ToResource(), - "databricks_clusters": clusters.DataSourceClusters().ToResource(), - "databricks_cluster_policy": policies.DataSourceClusterPolicy().ToResource(), - "databricks_catalog": catalog.DataSourceCatalog().ToResource(), - "databricks_catalogs": catalog.DataSourceCatalogs().ToResource(), - "databricks_current_config": mws.DataSourceCurrentConfiguration().ToResource(), - "databricks_current_metastore": catalog.DataSourceCurrentMetastore().ToResource(), - "databricks_current_user": scim.DataSourceCurrentUser().ToResource(), - "databricks_dbfs_file": storage.DataSourceDbfsFile().ToResource(), - "databricks_dbfs_file_paths": storage.DataSourceDbfsFilePaths().ToResource(), - "databricks_directory": workspace.DataSourceDirectory().ToResource(), - "databricks_external_location": catalog.DataSourceExternalLocation().ToResource(), - "databricks_external_locations": catalog.DataSourceExternalLocations().ToResource(), - "databricks_group": scim.DataSourceGroup().ToResource(), - "databricks_instance_pool": pools.DataSourceInstancePool().ToResource(), - "databricks_instance_profiles": aws.DataSourceInstanceProfiles().ToResource(), - "databricks_jobs": jobs.DataSourceJobs().ToResource(), - "databricks_job": jobs.DataSourceJob().ToResource(), - "databricks_metastore": catalog.DataSourceMetastore().ToResource(), - "databricks_metastores": catalog.DataSourceMetastores().ToResource(), - "databricks_mlflow_experiment": mlflow.DataSourceExperiment().ToResource(), - "databricks_mlflow_model": mlflow.DataSourceModel().ToResource(), - "databricks_mlflow_models": mlflow.DataSourceModels().ToResource(), - "databricks_mws_credentials": mws.DataSourceMwsCredentials().ToResource(), - "databricks_mws_workspaces": mws.DataSourceMwsWorkspaces().ToResource(), - "databricks_node_type": clusters.DataSourceNodeType().ToResource(), - "databricks_notebook": workspace.DataSourceNotebook().ToResource(), - "databricks_notebook_paths": workspace.DataSourceNotebookPaths().ToResource(), - "databricks_pipelines": pipelines.DataSourcePipelines().ToResource(), - "databricks_schema": catalog.DataSourceSchema().ToResource(), - "databricks_schemas": catalog.DataSourceSchemas().ToResource(), - "databricks_service_principal": scim.DataSourceServicePrincipal().ToResource(), - "databricks_service_principals": scim.DataSourceServicePrincipals().ToResource(), - "databricks_share": sharing.DataSourceShare().ToResource(), - "databricks_shares": sharing.DataSourceShares().ToResource(), - "databricks_spark_version": clusters.DataSourceSparkVersion().ToResource(), - "databricks_sql_warehouse": sql.DataSourceWarehouse().ToResource(), - "databricks_sql_warehouses": sql.DataSourceWarehouses().ToResource(), - "databricks_storage_credential": catalog.DataSourceStorageCredential().ToResource(), - "databricks_storage_credentials": catalog.DataSourceStorageCredentials().ToResource(), - "databricks_table": catalog.DataSourceTable().ToResource(), - "databricks_tables": catalog.DataSourceTables().ToResource(), - "databricks_views": catalog.DataSourceViews().ToResource(), - "databricks_volume": catalog.DataSourceVolume().ToResource(), - "databricks_volumes": catalog.DataSourceVolumes().ToResource(), - "databricks_user": scim.DataSourceUser().ToResource(), - "databricks_zones": clusters.DataSourceClusterZones().ToResource(), - }, - ResourcesMap: map[string]*schema.Resource{ // must be in alphabetical order - "databricks_access_control_rule_set": permissions.ResourceAccessControlRuleSet().ToResource(), - "databricks_alert": sql.ResourceAlert().ToResource(), - "databricks_artifact_allowlist": catalog.ResourceArtifactAllowlist().ToResource(), - "databricks_aws_s3_mount": storage.ResourceAWSS3Mount().ToResource(), - "databricks_azure_adls_gen1_mount": storage.ResourceAzureAdlsGen1Mount().ToResource(), - "databricks_azure_adls_gen2_mount": storage.ResourceAzureAdlsGen2Mount().ToResource(), - "databricks_azure_blob_mount": storage.ResourceAzureBlobMount().ToResource(), - "databricks_budget": finops.ResourceBudget().ToResource(), - "databricks_catalog": catalog.ResourceCatalog().ToResource(), - "databricks_catalog_workspace_binding": catalog.ResourceCatalogWorkspaceBinding().ToResource(), - "databricks_custom_app_integration": apps.ResourceCustomAppIntegration().ToResource(), - "databricks_connection": catalog.ResourceConnection().ToResource(), - "databricks_cluster": clusters.ResourceCluster().ToResource(), - "databricks_cluster_policy": policies.ResourceClusterPolicy().ToResource(), - "databricks_dashboard": dashboards.ResourceDashboard().ToResource(), - "databricks_dbfs_file": storage.ResourceDbfsFile().ToResource(), - "databricks_directory": workspace.ResourceDirectory().ToResource(), - "databricks_entitlements": scim.ResourceEntitlements().ToResource(), - "databricks_external_location": catalog.ResourceExternalLocation().ToResource(), - "databricks_file": storage.ResourceFile().ToResource(), - "databricks_git_credential": repos.ResourceGitCredential().ToResource(), - "databricks_global_init_script": workspace.ResourceGlobalInitScript().ToResource(), - "databricks_grant": catalog.ResourceGrant().ToResource(), - "databricks_grants": catalog.ResourceGrants().ToResource(), - "databricks_group": scim.ResourceGroup().ToResource(), - "databricks_group_instance_profile": aws.ResourceGroupInstanceProfile().ToResource(), - "databricks_group_member": scim.ResourceGroupMember().ToResource(), - "databricks_group_role": scim.ResourceGroupRole().ToResource(), - "databricks_instance_pool": pools.ResourceInstancePool().ToResource(), - "databricks_instance_profile": aws.ResourceInstanceProfile().ToResource(), - "databricks_ip_access_list": access.ResourceIPAccessList().ToResource(), - "databricks_job": jobs.ResourceJob().ToResource(), - "databricks_lakehouse_monitor": catalog.ResourceLakehouseMonitor().ToResource(), - "databricks_library": clusters.ResourceLibrary().ToResource(), - "databricks_metastore": catalog.ResourceMetastore().ToResource(), - "databricks_metastore_assignment": catalog.ResourceMetastoreAssignment().ToResource(), - "databricks_metastore_data_access": catalog.ResourceMetastoreDataAccess().ToResource(), - "databricks_mlflow_experiment": mlflow.ResourceMlflowExperiment().ToResource(), - "databricks_mlflow_model": mlflow.ResourceMlflowModel().ToResource(), - "databricks_mlflow_webhook": mlflow.ResourceMlflowWebhook().ToResource(), - "databricks_model_serving": serving.ResourceModelServing().ToResource(), - "databricks_mount": storage.ResourceMount().ToResource(), - "databricks_mws_customer_managed_keys": mws.ResourceMwsCustomerManagedKeys().ToResource(), - "databricks_mws_credentials": mws.ResourceMwsCredentials().ToResource(), - "databricks_mws_log_delivery": mws.ResourceMwsLogDelivery().ToResource(), - "databricks_mws_ncc_binding": mws.ResourceMwsNccBinding().ToResource(), - "databricks_mws_ncc_private_endpoint_rule": mws.ResourceMwsNccPrivateEndpointRule().ToResource(), - "databricks_mws_networks": mws.ResourceMwsNetworks().ToResource(), - "databricks_mws_network_connectivity_config": mws.ResourceMwsNetworkConnectivityConfig().ToResource(), - "databricks_mws_permission_assignment": mws.ResourceMwsPermissionAssignment().ToResource(), - "databricks_mws_private_access_settings": mws.ResourceMwsPrivateAccessSettings().ToResource(), - "databricks_mws_storage_configurations": mws.ResourceMwsStorageConfigurations().ToResource(), - "databricks_mws_vpc_endpoint": mws.ResourceMwsVpcEndpoint().ToResource(), - "databricks_mws_workspaces": mws.ResourceMwsWorkspaces().ToResource(), - "databricks_notebook": workspace.ResourceNotebook().ToResource(), - "databricks_notification_destination": settings.ResourceNotificationDestination().ToResource(), - "databricks_obo_token": tokens.ResourceOboToken().ToResource(), - "databricks_online_table": catalog.ResourceOnlineTable().ToResource(), - "databricks_permission_assignment": access.ResourcePermissionAssignment().ToResource(), - "databricks_permissions": permissions.ResourcePermissions().ToResource(), - "databricks_pipeline": pipelines.ResourcePipeline().ToResource(), - "databricks_provider": sharing.ResourceProvider().ToResource(), - "databricks_quality_monitor": catalog.ResourceQualityMonitor().ToResource(), - "databricks_query": sql.ResourceQuery().ToResource(), - "databricks_recipient": sharing.ResourceRecipient().ToResource(), - "databricks_registered_model": catalog.ResourceRegisteredModel().ToResource(), - "databricks_repo": repos.ResourceRepo().ToResource(), - "databricks_schema": catalog.ResourceSchema().ToResource(), - "databricks_secret": secrets.ResourceSecret().ToResource(), - "databricks_secret_scope": secrets.ResourceSecretScope().ToResource(), - "databricks_secret_acl": secrets.ResourceSecretACL().ToResource(), - "databricks_service_principal": scim.ResourceServicePrincipal().ToResource(), - "databricks_service_principal_role": aws.ResourceServicePrincipalRole().ToResource(), - "databricks_service_principal_secret": tokens.ResourceServicePrincipalSecret().ToResource(), - "databricks_share": sharing.ResourceShare().ToResource(), - "databricks_sql_dashboard": sql.ResourceSqlDashboard().ToResource(), - "databricks_sql_endpoint": sql.ResourceSqlEndpoint().ToResource(), - "databricks_sql_global_config": sql.ResourceSqlGlobalConfig().ToResource(), - "databricks_sql_permissions": access.ResourceSqlPermissions().ToResource(), - "databricks_sql_query": sql.ResourceSqlQuery().ToResource(), - "databricks_sql_alert": sql.ResourceSqlAlert().ToResource(), - "databricks_sql_table": catalog.ResourceSqlTable().ToResource(), - "databricks_sql_visualization": sql.ResourceSqlVisualization().ToResource(), - "databricks_sql_widget": sql.ResourceSqlWidget().ToResource(), - "databricks_storage_credential": catalog.ResourceStorageCredential().ToResource(), - "databricks_system_schema": catalog.ResourceSystemSchema().ToResource(), - "databricks_table": catalog.ResourceTable().ToResource(), - "databricks_token": tokens.ResourceToken().ToResource(), - "databricks_user": scim.ResourceUser().ToResource(), - "databricks_user_instance_profile": aws.ResourceUserInstanceProfile().ToResource(), - "databricks_user_role": aws.ResourceUserRole().ToResource(), - "databricks_vector_search_endpoint": vectorsearch.ResourceVectorSearchEndpoint().ToResource(), - "databricks_vector_search_index": vectorsearch.ResourceVectorSearchIndex().ToResource(), - "databricks_volume": catalog.ResourceVolume().ToResource(), - "databricks_workspace_binding": catalog.ResourceWorkspaceBinding().ToResource(), - "databricks_workspace_conf": workspace.ResourceWorkspaceConf().ToResource(), - "databricks_workspace_file": workspace.ResourceWorkspaceFile().ToResource(), - }, - Schema: providerSchema(), + DataSourcesMap: dataSourceMap, + ResourcesMap: resourceMap, + Schema: providerSchema(), } for name, resource := range settings.AllSettingsResources() { p.ResourcesMap[fmt.Sprintf("databricks_%s_setting", name)] = resource.ToResource() From 0fbfbf4741a1d69a9b62bc0457263c8d49c19bdc Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Fri, 1 Nov 2024 03:31:31 -0400 Subject: [PATCH 83/99] [Exporter] Allow to match resource names by regular expression (#4177) ## Changes In addition to the existing `-match` option, this PR allows the matching of names by regex during the listing operation. There are new options: - `-matchRegex` - checks if name matches a regex - this could be useful for exporting notebooks for only specific users, or something like that. - `-excludeRegex` - checks if name matches a regex, and skips processing of that object. For example, it could be used to exclude `databricks_automl` directories. This parameter has higher priority than the `-match` and `-matchRegex`. - `filterDirectoriesDuringWorkspaceWalking` - if we should apply match logic to directory names when we're performing workspace tree walking. *Note: be careful with it as it will be applied to all entries, so if you want to filter only specific users, then you will need to specify the condition for `/Users` as well, so regex will be `^(/Users|/Users/[a-c].*)$`* ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --------- Co-authored-by: Miles Yucht --- docs/guides/experimental-exporter.md | 3 + exporter/command.go | 8 ++ exporter/context.go | 67 ++++++++---- exporter/exporter_test.go | 151 ++++++++++++++++++++++++++- exporter/util.go | 8 +- exporter/util_test.go | 12 +-- exporter/util_workspace.go | 17 ++- 7 files changed, 230 insertions(+), 36 deletions(-) diff --git a/docs/guides/experimental-exporter.md b/docs/guides/experimental-exporter.md index 6f41bf6154..9e0f357c03 100644 --- a/docs/guides/experimental-exporter.md +++ b/docs/guides/experimental-exporter.md @@ -61,6 +61,9 @@ All arguments are optional, and they tune what code is being generated. * `-listing` - Comma-separated list of services to be listed and further passed on for importing. For each service specified, the exporter performs a listing of available resources using the `List` function and emits them for importing together with their dependencies. The `-services` parameter could be used to control which transitive dependencies will be also imported. * `-services` - Comma-separated list of services to import. By default, all services are imported. * `-match` - Match resource names during listing operation. This filter applies to all resources that are getting listed, so if you want to import all dependencies of just one cluster, specify `-match=autoscaling -listing=compute`. By default, it is empty, which matches everything. +* `-matchRegex` - Match resource names against a given regex during listing operation. Applicable to all resources selected for listing. +* `-excludeRegex` - Exclude resource names matching a given regex. Applied during the listing operation and has higher priority than `-match` and `-matchRegex`. Applicable to all resources selected for listing. Could be used to exclude things like `databricks_automl` notebooks, etc. +* `-filterDirectoriesDuringWorkspaceWalking` - if we should apply match logic to directory names when we're performing workspace tree walking. *Note: be careful with it as it will be applied to all entries, so if you want to filter only specific users, then you will need to specify condition for `/Users` as well, so regex will be `^(/Users|/Users/[a-c].*)$`*. * `-mounts` - List DBFS mount points, an extremely slow operation that would not trigger unless explicitly specified. * `-generateProviderDeclaration` - the flag that toggles the generation of `databricks.tf` file with the declaration of the Databricks Terraform provider that is necessary for Terraform versions since Terraform 0.13 (disabled by default). * `-prefix` - optional prefix that will be added to the name of all exported resources - that's useful for exporting resources from multiple workspaces for merging into a single one. diff --git a/exporter/command.go b/exporter/command.go index 5e40b9a039..72eb8f25dd 100644 --- a/exporter/command.go +++ b/exporter/command.go @@ -131,6 +131,8 @@ func Run(args ...string) error { flags.BoolVar(&ic.mounts, "mounts", false, "List DBFS mount points.") flags.BoolVar(&ic.generateDeclaration, "generateProviderDeclaration", true, "Generate Databricks provider declaration.") + flags.BoolVar(&ic.filterDirectoriesDuringWorkspaceWalking, "filterDirectoriesDuringWorkspaceWalking", false, + "Apply filtering to directory names during workspace walking") flags.StringVar(&ic.notebooksFormat, "notebooksFormat", "SOURCE", "Format to export notebooks: SOURCE, DBC, JUPYTER. Default: SOURCE") services, listing := ic.allServicesAndListing() @@ -145,6 +147,12 @@ func Run(args ...string) error { flags.StringVar(&ic.match, "match", "", "Match resource names during listing operation. "+ "This filter applies to all resources that are getting listed, so if you want to import "+ "all dependencies of just one cluster, specify -listing=compute") + flags.StringVar(&ic.matchRegexStr, "matchRegex", "", "Match resource names during listing operation against a regex. "+ + "This filter applies to all resources that are getting listed, so if you want to import "+ + "all dependencies of just one cluster, specify -listing=compute") + flags.StringVar(&ic.excludeRegexStr, "excludeRegex", "", "Exclude resource names matching regex during listing operation. "+ + "This filter applies to all resources that are getting listed, so if you want to import "+ + "all dependencies of just one cluster, specify -listing=compute") prefix := "" flags.StringVar(&prefix, "prefix", "", "Prefix that will be added to the name of all exported resources") newArgs := args diff --git a/exporter/context.go b/exporter/context.go index ffb230a4e8..bfba5d24f1 100644 --- a/exporter/context.go +++ b/exporter/context.go @@ -78,28 +78,33 @@ type importContext struct { Scope importedResources // command-line resources (immutable, or set by the single thread) - includeUserDomains bool - importAllUsers bool - exportDeletedUsersAssets bool - incremental bool - mounts bool - noFormat bool - nativeImportSupported bool - services map[string]struct{} - listing map[string]struct{} - match string - lastActiveDays int64 - lastActiveMs int64 - generateDeclaration bool - exportSecrets bool - meAdmin bool - meUserName string - prefix string - accountLevel bool - shImports map[string]bool - notebooksFormat string - updatedSinceStr string - updatedSinceMs int64 + includeUserDomains bool + importAllUsers bool + exportDeletedUsersAssets bool + incremental bool + mounts bool + noFormat bool + nativeImportSupported bool + services map[string]struct{} + listing map[string]struct{} + match string + matchRegexStr string + matchRegex *regexp.Regexp + excludeRegexStr string + excludeRegex *regexp.Regexp + filterDirectoriesDuringWorkspaceWalking bool + lastActiveDays int64 + lastActiveMs int64 + generateDeclaration bool + exportSecrets bool + meAdmin bool + meUserName string + prefix string + accountLevel bool + shImports map[string]bool + notebooksFormat string + updatedSinceStr string + updatedSinceMs int64 waitGroup *sync.WaitGroup @@ -297,6 +302,24 @@ func (ic *importContext) Run() error { return fmt.Errorf("no services to import") } + if ic.matchRegexStr != "" { + log.Printf("[DEBUG] Using regex '%s' to filter resources", ic.matchRegexStr) + re, err := regexp.Compile(ic.matchRegexStr) + if err != nil { + log.Printf("[ERROR] can't compile regex '%s': %v", ic.matchRegexStr, err) + return err + } + ic.matchRegex = re + } + if ic.excludeRegexStr != "" { + log.Printf("[DEBUG] Using regex '%s' to filter resources", ic.excludeRegexStr) + re, err := regexp.Compile(ic.excludeRegexStr) + if err != nil { + log.Printf("[ERROR] can't compile regex '%s': %v", ic.excludeRegexStr, err) + return err + } + ic.excludeRegex = re + } if ic.incremental { if ic.updatedSinceStr == "" { ic.updatedSinceStr = getLastRunString(statsFileName) diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index 9c2f64cf15..ad485b9557 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -2349,7 +2349,7 @@ func TestImportingGlobalSqlConfig(t *testing.T) { }) } -func TestImportingNotebooksWorkspaceFiles(t *testing.T) { +func TestImportingNotebooksWorkspaceFilesWithFilter(t *testing.T) { fileStatus := workspace.ObjectStatus{ ObjectID: 123, ObjectType: workspace.File, @@ -2371,7 +2371,135 @@ func TestImportingNotebooksWorkspaceFiles(t *testing.T) { Method: "GET", Resource: "/api/2.0/workspace/list?path=%2F", Response: workspace.ObjectList{ - Objects: []workspace.ObjectStatus{notebookStatus, fileStatus}, + Objects: []workspace.ObjectStatus{notebookStatus, fileStatus, + { + ObjectID: 4567, + ObjectType: workspace.Notebook, + Path: "/UnmatchedNotebook", + Language: "PYTHON", + }, + { + ObjectID: 1234, + ObjectType: workspace.File, + Path: "/UnmatchedFile", + }, + { + ObjectID: 456, + ObjectType: workspace.Directory, + Path: "/databricks_automl", + }, + { + ObjectID: 456, + ObjectType: workspace.Directory, + Path: "/.bundle", + }, + }, + }, + ReuseRequest: true, + }, + { + Method: "GET", + Resource: "/api/2.0/workspace/list?path=%2Fdatabricks_automl", + Response: workspace.ObjectList{}, + }, + { + Method: "GET", + Resource: "/api/2.0/workspace/get-status?path=%2FNotebook", + Response: notebookStatus, + ReuseRequest: true, + }, + { + Method: "GET", + Resource: "/api/2.0/workspace/get-status?path=%2FFile", + Response: fileStatus, + ReuseRequest: true, + }, + { + Method: "GET", + Resource: "/api/2.0/workspace/export?format=AUTO&path=%2FFile", + Response: workspace.ExportPath{ + Content: "dGVzdA==", + }, + ReuseRequest: true, + }, + { + Method: "GET", + Resource: "/api/2.0/workspace/export?format=SOURCE&path=%2FNotebook", + Response: workspace.ExportPath{ + Content: "dGVzdA==", + }, + ReuseRequest: true, + }, + }, + func(ctx context.Context, client *common.DatabricksClient) { + tmpDir := fmt.Sprintf("/tmp/tf-%s", qa.RandomName()) + defer os.RemoveAll(tmpDir) + + ic := newImportContext(client) + ic.Directory = tmpDir + ic.enableListing("notebooks,wsfiles") + ic.excludeRegexStr = "databricks_automl" + ic.matchRegexStr = "^/[FN].*$" + + err := ic.Run() + assert.NoError(t, err) + // check generated code for notebooks + content, err := os.ReadFile(tmpDir + "/notebooks.tf") + assert.NoError(t, err) + contentStr := string(content) + assert.True(t, strings.Contains(contentStr, `resource "databricks_notebook" "notebook_456"`)) + assert.True(t, strings.Contains(contentStr, `path = "/Notebook"`)) + assert.False(t, strings.Contains(contentStr, `/UnmatchedNotebook`)) + // check generated code for workspace files + content, err = os.ReadFile(tmpDir + "/wsfiles.tf") + assert.NoError(t, err) + contentStr = string(content) + assert.True(t, strings.Contains(contentStr, `resource "databricks_workspace_file" "file_123"`)) + assert.True(t, strings.Contains(contentStr, `path = "/File"`)) + assert.False(t, strings.Contains(contentStr, `/UnmatchedFile`)) + }) +} + +func TestImportingNotebooksWorkspaceFilesWithFilterDuringWalking(t *testing.T) { + fileStatus := workspace.ObjectStatus{ + ObjectID: 123, + ObjectType: workspace.File, + Path: "/File", + } + notebookStatus := workspace.ObjectStatus{ + ObjectID: 456, + ObjectType: workspace.Notebook, + Path: "/Notebook", + Language: "PYTHON", + } + qa.HTTPFixturesApply(t, + []qa.HTTPFixture{ + meAdminFixture, + noCurrentMetastoreAttached, + emptyRepos, + emptyIpAccessLIst, + { + Method: "GET", + Resource: "/api/2.0/workspace/list?path=%2F", + Response: workspace.ObjectList{ + Objects: []workspace.ObjectStatus{notebookStatus, fileStatus, + { + ObjectID: 4567, + ObjectType: workspace.Notebook, + Path: "/UnmatchedNotebook", + Language: "PYTHON", + }, + { + ObjectID: 1234, + ObjectType: workspace.File, + Path: "/UnmatchedFile", + }, + { + ObjectID: 456, + ObjectType: workspace.Directory, + Path: "/databricks_automl", + }, + }, }, ReuseRequest: true, }, @@ -2410,10 +2538,27 @@ func TestImportingNotebooksWorkspaceFiles(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.enableListing("notebooks") + ic.enableListing("notebooks,wsfiles") + ic.excludeRegexStr = "databricks_automl" + ic.matchRegexStr = "^/[FN].*$" + ic.filterDirectoriesDuringWorkspaceWalking = true err := ic.Run() assert.NoError(t, err) + // check generated code for notebooks + content, err := os.ReadFile(tmpDir + "/notebooks.tf") + assert.NoError(t, err) + contentStr := string(content) + assert.True(t, strings.Contains(contentStr, `resource "databricks_notebook" "notebook_456"`)) + assert.True(t, strings.Contains(contentStr, `path = "/Notebook"`)) + assert.False(t, strings.Contains(contentStr, `/UnmatchedNotebook`)) + // check generated code for workspace files + content, err = os.ReadFile(tmpDir + "/wsfiles.tf") + assert.NoError(t, err) + contentStr = string(content) + assert.True(t, strings.Contains(contentStr, `resource "databricks_workspace_file" "file_123"`)) + assert.True(t, strings.Contains(contentStr, `path = "/File"`)) + assert.False(t, strings.Contains(contentStr, `/UnmatchedFile`)) }) } diff --git a/exporter/util.go b/exporter/util.go index e9380a9b56..5e4f53dcaa 100644 --- a/exporter/util.go +++ b/exporter/util.go @@ -35,9 +35,15 @@ func (ic *importContext) isServiceInListing(service string) bool { } func (ic *importContext) MatchesName(n string) bool { - if ic.match == "" { + if ic.match == "" && ic.matchRegex == nil && ic.excludeRegex == nil { return true } + if ic.excludeRegex != nil && ic.excludeRegex.MatchString(n) { + return false + } + if ic.matchRegex != nil { + return ic.matchRegex.MatchString(n) + } return strings.Contains(strings.ToLower(n), strings.ToLower(ic.match)) } diff --git a/exporter/util_test.go b/exporter/util_test.go index 588c831db7..912baa78b4 100644 --- a/exporter/util_test.go +++ b/exporter/util_test.go @@ -316,16 +316,16 @@ func TestGetEnvAsInt(t *testing.T) { } func TestExcludeAuxiliaryDirectories(t *testing.T) { - assert.True(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "", ObjectType: workspace.Directory})) - assert.True(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{ObjectType: workspace.File})) - assert.True(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "/Users/user@domain.com/abc", + assert.False(t, isAuxiliaryDirectory(workspace.ObjectStatus{Path: "", ObjectType: workspace.Directory})) + assert.False(t, isAuxiliaryDirectory(workspace.ObjectStatus{ObjectType: workspace.File})) + assert.False(t, isAuxiliaryDirectory(workspace.ObjectStatus{Path: "/Users/user@domain.com/abc", ObjectType: workspace.Directory})) // should be ignored - assert.False(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "/Users/user@domain.com/.ide", + assert.True(t, isAuxiliaryDirectory(workspace.ObjectStatus{Path: "/Users/user@domain.com/.ide", ObjectType: workspace.Directory})) - assert.False(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "/Shared/.bundle", + assert.True(t, isAuxiliaryDirectory(workspace.ObjectStatus{Path: "/Shared/.bundle", ObjectType: workspace.Directory})) - assert.False(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "/Users/user@domain.com/abc/__pycache__", + assert.True(t, isAuxiliaryDirectory(workspace.ObjectStatus{Path: "/Users/user@domain.com/abc/__pycache__", ObjectType: workspace.Directory})) } diff --git a/exporter/util_workspace.go b/exporter/util_workspace.go index 5a5621f806..8dcbefbaf0 100644 --- a/exporter/util_workspace.go +++ b/exporter/util_workspace.go @@ -93,17 +93,18 @@ func (ic *importContext) getAllDirectories() []workspace.ObjectStatus { var directoriesToIgnore = []string{".ide", ".bundle", "__pycache__"} // TODO: add ignoring directories of deleted users? This could potentially decrease the number of processed objects... -func excludeAuxiliaryDirectories(v workspace.ObjectStatus) bool { +func isAuxiliaryDirectory(v workspace.ObjectStatus) bool { if v.ObjectType != workspace.Directory { - return true + return false } // TODO: rewrite to use suffix check, etc., instead of split and slice contains? parts := strings.Split(v.Path, "/") result := len(parts) > 1 && slices.Contains[[]string, string](directoriesToIgnore, parts[len(parts)-1]) + log.Printf("[DEBUG] directory %s: %v", v.Path, result) if result { log.Printf("[DEBUG] Ignoring directory %s", v.Path) } - return !result + return result } func (ic *importContext) getAllWorkspaceObjects(visitor func([]workspace.ObjectStatus)) []workspace.ObjectStatus { @@ -113,7 +114,15 @@ func (ic *importContext) getAllWorkspaceObjects(visitor func([]workspace.ObjectS t1 := time.Now() log.Print("[INFO] Starting to list all workspace objects") notebooksAPI := workspace.NewNotebooksAPI(ic.Context, ic.Client) - ic.allWorkspaceObjects, _ = ListParallel(notebooksAPI, "/", excludeAuxiliaryDirectories, visitor) + shouldIncludeDirectory := func(v workspace.ObjectStatus) bool { + decision := !isAuxiliaryDirectory(v) + if decision && ic.filterDirectoriesDuringWorkspaceWalking { + decision = ic.MatchesName(v.Path) + } + // log.Printf("[DEBUG] decision of shouldIncludeDirectory for %s: %v", v.Path, decision) + return decision + } + ic.allWorkspaceObjects, _ = ListParallel(notebooksAPI, "/", shouldIncludeDirectory, visitor) log.Printf("[INFO] Finished listing of all workspace objects. %d objects in total. %v seconds", len(ic.allWorkspaceObjects), time.Since(t1).Seconds()) } From 28b8f4934c116d3d861703cf4dba55c0614ef535 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Fri, 1 Nov 2024 10:52:22 +0100 Subject: [PATCH 84/99] [Internal] Always write message for manual test integration (#4188) ## Changes Old script could not be run from master due to security restrictions and there is no reliable way to detect if a user as secrets. ## Tests Opened a PR in SDK Java from fork https://github.com/databricks/databricks-sdk-java/pull/375 --- .github/workflows/external-message.yml | 68 ++----------------------- .github/workflows/integration-tests.yml | 9 ++-- 2 files changed, 10 insertions(+), 67 deletions(-) diff --git a/.github/workflows/external-message.yml b/.github/workflows/external-message.yml index b9534520a0..d9a715d62f 100644 --- a/.github/workflows/external-message.yml +++ b/.github/workflows/external-message.yml @@ -11,7 +11,6 @@ on: branches: - main - jobs: comment-on-pr: runs-on: ubuntu-latest @@ -19,73 +18,15 @@ jobs: pull-requests: write steps: - # NOTE: The following checks may not be accurate depending on Org or Repo settings. - - name: Check user and potential secret access - id: check-secrets-access - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - USER_LOGIN="${{ github.event.pull_request.user.login }}" - REPO_OWNER="${{ github.repository_owner }}" - REPO_NAME="${{ github.event.repository.name }}" - - echo "Pull request opened by: $USER_LOGIN" - - # Check if PR is from a fork - IS_FORK=$([[ "${{ github.event.pull_request.head.repo.full_name }}" != "${{ github.repository }}" ]] && echo "true" || echo "false") - - HAS_ACCESS="false" - - # Check user's permission level on the repository - USER_PERMISSION=$(gh api repos/$REPO_OWNER/$REPO_NAME/collaborators/$USER_LOGIN/permission --jq '.permission') - - if [[ "$USER_PERMISSION" == "admin" || "$USER_PERMISSION" == "write" ]]; then - HAS_ACCESS="true" - elif [[ "$USER_PERMISSION" == "read" ]]; then - # For read access, we need to check if the user has been explicitly granted secret access - # This information is not directly available via API, so we'll make an assumption - # that read access does not imply secret access - HAS_ACCESS="false" - fi - - # Check if repo owner is an organization - IS_ORG=$(gh api users/$REPO_OWNER --jq '.type == "Organization"') - - if [[ "$IS_ORG" == "true" && "$HAS_ACCESS" == "false" ]]; then - # Check if user is a member of any team with write or admin access to the repo - TEAMS_WITH_ACCESS=$(gh api repos/$REPO_OWNER/$REPO_NAME/teams --jq '.[] | select(.permission == "push" or .permission == "admin") | .slug') - for team in $TEAMS_WITH_ACCESS; do - IS_TEAM_MEMBER=$(gh api orgs/$REPO_OWNER/teams/$team/memberships/$USER_LOGIN --silent && echo "true" || echo "false") - if [[ "$IS_TEAM_MEMBER" == "true" ]]; then - HAS_ACCESS="true" - break - fi - done - fi - - # If it's a fork, set HAS_ACCESS to false regardless of other checks - if [[ "$IS_FORK" == "true" ]]; then - HAS_ACCESS="false" - fi - - echo "has_secrets_access=$HAS_ACCESS" >> $GITHUB_OUTPUT - if [[ "$HAS_ACCESS" == "true" ]]; then - echo "User $USER_LOGIN likely has access to secrets" - else - echo "User $USER_LOGIN likely does not have access to secrets" - fi - - - uses: actions/checkout@v4 - name: Delete old comments - if: steps.check-secrets-access.outputs.has_secrets_access != 'true' env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | # Delete previous comment if it exists previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \ - --jq '.[] | select(.body | startswith("")) | .id') + --jq '.[] | select(.body | startswith("")) | .id') echo "Previous comment IDs: $previous_comment_ids" # Iterate over each comment ID and delete the comment if [ ! -z "$previous_comment_ids" ]; then @@ -96,14 +37,15 @@ jobs: fi - name: Comment on PR - if: steps.check-secrets-access.outputs.has_secrets_access != 'true' env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} COMMIT_SHA: ${{ github.event.pull_request.head.sha }} run: | gh pr comment ${{ github.event.pull_request.number }} --body \ - " - Run integration tests manually: + " + If integration tests don't run automatically, an authorized user can run them manually by following the instructions below: + + Trigger: [go/deco-tests-run/terraform](https://go/deco-tests-run/terraform) Inputs: diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 67ed709365..653a36c644 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -12,17 +12,18 @@ jobs: check-token: name: Check secrets access runs-on: ubuntu-latest + environment: "test-trigger-is" outputs: has_token: ${{ steps.set-token-status.outputs.has_token }} steps: - - name: Check if GITHUB_TOKEN is set + - name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set id: set-token-status run: | - if [ -z "${{ secrets.GITHUB_TOKEN }}" ]; then - echo "GITHUB_TOKEN is empty. User has no access to tokens." + if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then + echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets." echo "::set-output name=has_token::false" else - echo "GITHUB_TOKEN is set. User has no access to tokens." + echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets." echo "::set-output name=has_token::true" fi From 1e067f7e7914d4493fe4736aca8ee2f596ab0169 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Mon, 4 Nov 2024 14:21:08 -0500 Subject: [PATCH 85/99] [Internal] Make `Read` after `Create`/`Update` configurable (#4190) ## Changes This PR adds the ability for a resource to specify that it may not need to call `Read` after `Create` and `Update` operations so we can avoid performing another API call(s). The resource may implement `CanSkipReadAfterCreateAndUpdate` function that can decide if the `Read` operation should be skipped. I decided to move common part from #4173 to make it easier to review ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [ ] using Go SDK --- common/resource.go | 29 +++++++++----- common/resource_test.go | 89 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+), 11 deletions(-) diff --git a/common/resource.go b/common/resource.go index 4e357305db..fb8a09e5bb 100644 --- a/common/resource.go +++ b/common/resource.go @@ -16,17 +16,18 @@ import ( // Resource aims to simplify things like error & deleted entities handling type Resource struct { - Create func(ctx context.Context, d *schema.ResourceData, c *DatabricksClient) error - Read func(ctx context.Context, d *schema.ResourceData, c *DatabricksClient) error - Update func(ctx context.Context, d *schema.ResourceData, c *DatabricksClient) error - Delete func(ctx context.Context, d *schema.ResourceData, c *DatabricksClient) error - CustomizeDiff func(ctx context.Context, d *schema.ResourceDiff) error - StateUpgraders []schema.StateUpgrader - Schema map[string]*schema.Schema - SchemaVersion int - Timeouts *schema.ResourceTimeout - DeprecationMessage string - Importer *schema.ResourceImporter + Create func(ctx context.Context, d *schema.ResourceData, c *DatabricksClient) error + Read func(ctx context.Context, d *schema.ResourceData, c *DatabricksClient) error + Update func(ctx context.Context, d *schema.ResourceData, c *DatabricksClient) error + Delete func(ctx context.Context, d *schema.ResourceData, c *DatabricksClient) error + CustomizeDiff func(ctx context.Context, d *schema.ResourceDiff) error + StateUpgraders []schema.StateUpgrader + Schema map[string]*schema.Schema + SchemaVersion int + Timeouts *schema.ResourceTimeout + DeprecationMessage string + Importer *schema.ResourceImporter + CanSkipReadAfterCreateAndUpdate func(d *schema.ResourceData) bool } func nicerError(ctx context.Context, err error, action string) error { @@ -94,6 +95,9 @@ func (r Resource) ToResource() *schema.Resource { err = nicerError(ctx, err, "update") return diag.FromErr(err) } + if r.CanSkipReadAfterCreateAndUpdate != nil && r.CanSkipReadAfterCreateAndUpdate(d) { + return nil + } if err := recoverable(r.Read)(ctx, d, c); err != nil { err = nicerError(ctx, err, "read") return diag.FromErr(err) @@ -162,6 +166,9 @@ func (r Resource) ToResource() *schema.Resource { err = nicerError(ctx, err, "create") return diag.FromErr(err) } + if r.CanSkipReadAfterCreateAndUpdate != nil && r.CanSkipReadAfterCreateAndUpdate(d) { + return nil + } if err = recoverable(r.Read)(ctx, d, c); err != nil { err = nicerError(ctx, err, "read") return diag.FromErr(err) diff --git a/common/resource_test.go b/common/resource_test.go index f01f373ff5..2ece50d284 100644 --- a/common/resource_test.go +++ b/common/resource_test.go @@ -3,6 +3,7 @@ package common import ( "context" "fmt" + "log" "testing" "github.com/databricks/databricks-sdk-go/apierr" @@ -38,6 +39,94 @@ func TestImportingCallsRead(t *testing.T) { assert.Equal(t, 1, d.Get("foo")) } +func createTestResourceForSkipRead(skipRead bool) Resource { + res := Resource{ + Create: func(ctx context.Context, + d *schema.ResourceData, + c *DatabricksClient) error { + log.Println("[DEBUG] Create called") + return d.Set("foo", 1) + }, + Read: func(ctx context.Context, + d *schema.ResourceData, + c *DatabricksClient) error { + log.Println("[DEBUG] Read called") + d.Set("foo", 2) + return nil + }, + Update: func(ctx context.Context, + d *schema.ResourceData, + c *DatabricksClient) error { + log.Println("[DEBUG] Update called") + return d.Set("foo", 3) + }, + Schema: map[string]*schema.Schema{ + "foo": { + Type: schema.TypeInt, + Required: true, + }, + }, + } + if skipRead { + res.CanSkipReadAfterCreateAndUpdate = func(d *schema.ResourceData) bool { + return true + } + } + return res +} + +func TestCreateSkipRead(t *testing.T) { + client := &DatabricksClient{} + ctx := context.Background() + r := createTestResourceForSkipRead(true).ToResource() + d := r.TestResourceData() + diags := r.CreateContext(ctx, d, client) + assert.False(t, diags.HasError()) + assert.Equal(t, 1, d.Get("foo")) +} + +func TestCreateDontSkipRead(t *testing.T) { + client := &DatabricksClient{} + ctx := context.Background() + r := createTestResourceForSkipRead(false).ToResource() + d := r.TestResourceData() + diags := r.CreateContext(ctx, d, client) + assert.False(t, diags.HasError()) + assert.Equal(t, 2, d.Get("foo")) +} + +func TestUpdateSkipRead(t *testing.T) { + client := &DatabricksClient{} + ctx := context.Background() + r := createTestResourceForSkipRead(true).ToResource() + d := r.TestResourceData() + datas, err := r.Importer.StateContext(ctx, d, client) + require.NoError(t, err) + assert.Len(t, datas, 1) + assert.False(t, r.Schema["foo"].ForceNew) + assert.Equal(t, "", d.Id()) + + diags := r.UpdateContext(ctx, d, client) + assert.False(t, diags.HasError()) + assert.Equal(t, 3, d.Get("foo")) +} + +func TestUpdateDontSkipRead(t *testing.T) { + client := &DatabricksClient{} + ctx := context.Background() + r := createTestResourceForSkipRead(false).ToResource() + d := r.TestResourceData() + datas, err := r.Importer.StateContext(ctx, d, client) + require.NoError(t, err) + assert.Len(t, datas, 1) + assert.False(t, r.Schema["foo"].ForceNew) + assert.Equal(t, "", d.Id()) + + diags := r.UpdateContext(ctx, d, client) + assert.False(t, diags.HasError()) + assert.Equal(t, 2, d.Get("foo")) +} + func TestHTTP404TriggersResourceRemovalForReadAndDelete(t *testing.T) { nope := func(ctx context.Context, d *schema.ResourceData, From f381fbf5a6aa37f9858fe8a1e2cb0bcd8ae82460 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Wed, 6 Nov 2024 15:38:19 +0530 Subject: [PATCH 86/99] [Release] Release v1.57.0 (#4193) ### New Features and Improvements * Added `databricks_functions` data source ([#4154](https://github.com/databricks/terraform-provider-databricks/pull/4154)). ### Bug Fixes * Handle edge case for `effective_properties` in `databricks_sql_table` ([#4153](https://github.com/databricks/terraform-provider-databricks/pull/4153)). * Provide more prescriptive error when users fail to create a single node cluster ([#4168](https://github.com/databricks/terraform-provider-databricks/pull/4168)). ### Internal Changes * Add test instructions for external contributors ([#4169](https://github.com/databricks/terraform-provider-databricks/pull/4169)). * Always write message for manual test integration ([#4188](https://github.com/databricks/terraform-provider-databricks/pull/4188)). * Make `Read` after `Create`/`Update` configurable ([#4190](https://github.com/databricks/terraform-provider-databricks/pull/4190)). * Migrate Share Data Source to Plugin Framework ([#4161](https://github.com/databricks/terraform-provider-databricks/pull/4161)). * Migrate Share Resource to Plugin Framework ([#4047](https://github.com/databricks/terraform-provider-databricks/pull/4047)). * Rollout Plugin Framework ([#4134](https://github.com/databricks/terraform-provider-databricks/pull/4134)). ### Dependency Updates * Bump Go SDK to v0.50.0 ([#4178](https://github.com/databricks/terraform-provider-databricks/pull/4178)). ### Exporter * Allow to match resource names by regular expression ([#4177](https://github.com/databricks/terraform-provider-databricks/pull/4177)). --- CHANGELOG.md | 33 +++++++++++++++++++++++++++++++++ common/version.go | 2 +- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f2de01332..d2d4139a7d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,38 @@ # Version changelog +## [Release] Release v1.57.0 + +### New Features and Improvements + + * Added `databricks_functions` data source ([#4154](https://github.com/databricks/terraform-provider-databricks/pull/4154)). + + +### Bug Fixes + + * Handle edge case for `effective_properties` in `databricks_sql_table` ([#4153](https://github.com/databricks/terraform-provider-databricks/pull/4153)). + * Provide more prescriptive error when users fail to create a single node cluster ([#4168](https://github.com/databricks/terraform-provider-databricks/pull/4168)). + + +### Internal Changes + + * Add test instructions for external contributors ([#4169](https://github.com/databricks/terraform-provider-databricks/pull/4169)). + * Always write message for manual test integration ([#4188](https://github.com/databricks/terraform-provider-databricks/pull/4188)). + * Make `Read` after `Create`/`Update` configurable ([#4190](https://github.com/databricks/terraform-provider-databricks/pull/4190)). + * Migrate Share Data Source to Plugin Framework ([#4161](https://github.com/databricks/terraform-provider-databricks/pull/4161)). + * Migrate Share Resource to Plugin Framework ([#4047](https://github.com/databricks/terraform-provider-databricks/pull/4047)). + * Rollout Plugin Framework ([#4134](https://github.com/databricks/terraform-provider-databricks/pull/4134)). + + +### Dependency Updates + + * Bump Go SDK to v0.50.0 ([#4178](https://github.com/databricks/terraform-provider-databricks/pull/4178)). + + +### Exporter + + * Allow to match resource names by regular expression ([#4177](https://github.com/databricks/terraform-provider-databricks/pull/4177)). + + ## [Release] Release v1.56.0 ### Bug Fixes diff --git a/common/version.go b/common/version.go index 44a7242a45..b8959caac0 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.56.0" + version = "1.57.0" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider From 7c29ccae3c71c07b3b25407e98dfa98cb508fd0d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 12:34:32 +0100 Subject: [PATCH 87/99] [Dependency] Bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 (#4191) Bumps [github.com/golang-jwt/jwt/v4](https://github.com/golang-jwt/jwt) from 4.5.0 to 4.5.1.
Release notes

Sourced from github.com/golang-jwt/jwt/v4's releases.

v4.5.1

Security

Unclear documentation of the error behavior in ParseWithClaims in <= 4.5.0 could lead to situation where users are potentially not checking errors in the way they should be. Especially, if a token is both expired and invalid, the errors returned by ParseWithClaims return both error codes. If users only check for the jwt.ErrTokenExpired using error.Is, they will ignore the embedded jwt.ErrTokenSignatureInvalid and thus potentially accept invalid tokens.

This issue was documented in https://github.com/golang-jwt/jwt/security/advisories/GHSA-29wx-vh33-7x7r and fixed in this release.

Note: v5 was not affected by this issue. So upgrading to this release version is also recommended.

What's Changed

Full Changelog: https://github.com/golang-jwt/jwt/compare/v4.5.0...v4.5.1

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/golang-jwt/jwt/v4&package-manager=go_modules&previous-version=4.5.0&new-version=4.5.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/databricks/terraform-provider-databricks/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1e72ea27a6..e449f753c8 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22 require ( github.com/databricks/databricks-sdk-go v0.50.0 - github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/golang-jwt/jwt/v4 v4.5.1 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl/v2 v2.22.0 diff --git a/go.sum b/go.sum index 1188a3923d..6f416791a4 100644 --- a/go.sum +++ b/go.sum @@ -55,8 +55,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= From 5058e50a964339a88a794539255617f11fe7f9b2 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 6 Nov 2024 18:30:12 +0530 Subject: [PATCH 88/99] [Fix] Suppress equal fold diff for DLT pipeline resource (#4196) ## Changes Fixes https://github.com/databricks/cli/issues/1763. During creation the `catalog` name for a DLT pipeline is normalized to small case causing a persistent drift. ## Tests Manually with the following configuration: ``` resource "databricks_pipeline" "this" { name = "testing caps" catalog = "MaiN" library { notebook { path = "/a/b/c" } } } ``` Before: There'd be a persistent drift where terraform would try to convert "MaiN" -> "main" After: No diff detected. --- pipelines/resource_pipeline.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pipelines/resource_pipeline.go b/pipelines/resource_pipeline.go index ac18eef8ff..dc25e6266c 100644 --- a/pipelines/resource_pipeline.go +++ b/pipelines/resource_pipeline.go @@ -230,6 +230,11 @@ func (Pipeline) CustomizeSchema(s *common.CustomizableSchema) *common.Customizab s.SchemaPath("edition").SetCustomSuppressDiff(common.EqualFoldDiffSuppress) s.SchemaPath("storage").SetCustomSuppressDiff(suppressStorageDiff) + // As of 6th Nov 2024, the DLT API only normalizes the catalog name when creating + // a pipeline. So we only ignore the equal fold diff for the catalog name and not other + // UC resources like target, schema or ingestion_definition.connection_name. + s.SchemaPath("catalog").SetCustomSuppressDiff(common.EqualFoldDiffSuppress) + // Deprecated fields s.SchemaPath("cluster", "init_scripts", "dbfs").SetDeprecated(clusters.DbfsDeprecationWarning) s.SchemaPath("library", "whl").SetDeprecated("The 'whl' field is deprecated") From b814ca01b40dec2e2ac306b16032bd5258e516ec Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Thu, 7 Nov 2024 04:49:23 -0500 Subject: [PATCH 89/99] [Fix] Always fill `cluster_name` in `databricks_cluster` data source (#4197) ## Changes ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- clusters/data_cluster.go | 1 + clusters/data_cluster_test.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/clusters/data_cluster.go b/clusters/data_cluster.go index 73ae4a1e19..aee0503619 100644 --- a/clusters/data_cluster.go +++ b/clusters/data_cluster.go @@ -46,6 +46,7 @@ func DataSourceCluster() common.Resource { } data.Id = data.ClusterInfo.ClusterId data.ClusterId = data.ClusterInfo.ClusterId + data.Name = data.ClusterInfo.ClusterName return nil }) diff --git a/clusters/data_cluster_test.go b/clusters/data_cluster_test.go index cd20edec0d..f7744c2ba2 100644 --- a/clusters/data_cluster_test.go +++ b/clusters/data_cluster_test.go @@ -37,6 +37,7 @@ func TestClusterDataByID(t *testing.T) { "cluster_info.0.node_type_id": "i3.xlarge", "cluster_info.0.autoscale.0.max_workers": 4, "cluster_info.0.state": "RUNNING", + "cluster_name": "Shared Autoscaling", }) } @@ -68,6 +69,7 @@ func TestClusterDataByName(t *testing.T) { "cluster_info.0.node_type_id": "i3.xlarge", "cluster_info.0.autoscale.0.max_workers": 4, "cluster_info.0.state": "RUNNING", + "cluster_id": "abc", }) } From 80514f54b75e36253518da20ec7118d33d71f2ad Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Thu, 7 Nov 2024 14:04:05 +0100 Subject: [PATCH 90/99] [Internal] Update to latest OpenAPI spec and bump Go SDK (#4199) ## Changes Update to latest OpenAPI spec and Bump go sdk. ## Tests - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- .codegen/_openapi_sha | 2 +- go.mod | 2 +- go.sum | 4 +- internal/service/apps_tf/model.go | 39 ++- internal/service/catalog_tf/model.go | 343 +++++++++++++++++++++- internal/service/compute_tf/model.go | 2 +- internal/service/dashboards_tf/model.go | 107 +++++++ internal/service/jobs_tf/model.go | 3 + internal/service/pipelines_tf/model.go | 59 +++- internal/service/provisioning_tf/model.go | 3 + internal/service/sharing_tf/model.go | 285 ------------------ 11 files changed, 535 insertions(+), 314 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index ecf041814d..5f4b508602 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -25b2478e5a18c888f0d423249abde5499dc58424 \ No newline at end of file +d25296d2f4aa7bd6195c816fdf82e0f960f775da \ No newline at end of file diff --git a/go.mod b/go.mod index e449f753c8..87e265f72e 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/databricks/terraform-provider-databricks go 1.22 require ( - github.com/databricks/databricks-sdk-go v0.50.0 + github.com/databricks/databricks-sdk-go v0.51.0 github.com/golang-jwt/jwt/v4 v4.5.1 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 diff --git a/go.sum b/go.sum index 6f416791a4..2fe2fa4ab3 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.50.0 h1:Zl4uBhYMT5z6aDojCQJPT2zCYjjfqxBQSQn8uLTphpo= -github.com/databricks/databricks-sdk-go v0.50.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.51.0 h1:tcvB9TID3oUl0O8npccB5c+33tarBiYMBFbq4U4AB6M= +github.com/databricks/databricks-sdk-go v0.51.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index b5a602ba1f..d52106d4a8 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -144,8 +144,6 @@ func (newState *AppAccessControlResponse) SyncEffectiveFieldsDuringRead(existing } type AppDeployment struct { - // The name of the app. - AppName types.String `tfsdk:"-"` // The creation time of the deployment. Formatted timestamp in ISO 6801. CreateTime types.String `tfsdk:"create_time" tf:"optional"` EffectiveCreateTime types.String `tfsdk:"effective_create_time" tf:"computed,optional"` @@ -400,6 +398,30 @@ func (newState *ComputeStatus) SyncEffectiveFieldsDuringRead(existingState Compu } } +// Create an app deployment +type CreateAppDeploymentRequest struct { + AppDeployment []AppDeployment `tfsdk:"app_deployment" tf:"optional,object"` + // The name of the app. + AppName types.String `tfsdk:"-"` +} + +func (newState *CreateAppDeploymentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAppDeploymentRequest) { +} + +func (newState *CreateAppDeploymentRequest) SyncEffectiveFieldsDuringRead(existingState CreateAppDeploymentRequest) { +} + +// Create an app +type CreateAppRequest struct { + App []App `tfsdk:"app" tf:"optional,object"` +} + +func (newState *CreateAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAppRequest) { +} + +func (newState *CreateAppRequest) SyncEffectiveFieldsDuringRead(existingState CreateAppRequest) { +} + // Delete an app type DeleteAppRequest struct { // The name of the app. @@ -551,3 +573,16 @@ func (newState *StopAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan Sto func (newState *StopAppRequest) SyncEffectiveFieldsDuringRead(existingState StopAppRequest) { } + +// Update an app +type UpdateAppRequest struct { + App []App `tfsdk:"app" tf:"optional,object"` + // The name of the app. + Name types.String `tfsdk:"-"` +} + +func (newState *UpdateAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAppRequest) { +} + +func (newState *UpdateAppRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAppRequest) { +} diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index caf38f865c..a712a00dad 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -181,6 +181,25 @@ func (newState *AwsCredentials) SyncEffectiveFieldsDuringCreateOrUpdate(plan Aws func (newState *AwsCredentials) SyncEffectiveFieldsDuringRead(existingState AwsCredentials) { } +// The AWS IAM role configuration +type AwsIamRole struct { + // The external ID used in role assumption to prevent the confused deputy + // problem. + ExternalId types.String `tfsdk:"external_id" tf:"optional"` + // The Amazon Resource Name (ARN) of the AWS IAM role used to vend temporary + // credentials. + RoleArn types.String `tfsdk:"role_arn" tf:"optional"` + // The Amazon Resource Name (ARN) of the AWS IAM user managed by Databricks. + // This is the identity that is going to assume the AWS IAM role. + UnityCatalogIamArn types.String `tfsdk:"unity_catalog_iam_arn" tf:"optional"` +} + +func (newState *AwsIamRole) SyncEffectiveFieldsDuringCreateOrUpdate(plan AwsIamRole) { +} + +func (newState *AwsIamRole) SyncEffectiveFieldsDuringRead(existingState AwsIamRole) { +} + type AwsIamRoleRequest struct { // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access. RoleArn types.String `tfsdk:"role_arn" tf:""` @@ -209,6 +228,47 @@ func (newState *AwsIamRoleResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan func (newState *AwsIamRoleResponse) SyncEffectiveFieldsDuringRead(existingState AwsIamRoleResponse) { } +// Azure Active Directory token, essentially the Oauth token for Azure Service +// Principal or Managed Identity. Read more at +// https://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token +type AzureActiveDirectoryToken struct { + // Opaque token that contains claims that you can use in Azure Active + // Directory to access cloud services. + AadToken types.String `tfsdk:"aad_token" tf:"optional"` +} + +func (newState *AzureActiveDirectoryToken) SyncEffectiveFieldsDuringCreateOrUpdate(plan AzureActiveDirectoryToken) { +} + +func (newState *AzureActiveDirectoryToken) SyncEffectiveFieldsDuringRead(existingState AzureActiveDirectoryToken) { +} + +// The Azure managed identity configuration. +type AzureManagedIdentity struct { + // The Azure resource ID of the Azure Databricks Access Connector. Use the + // format + // `/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.Databricks/accessConnectors/{connector-name}`. + AccessConnectorId types.String `tfsdk:"access_connector_id" tf:"optional"` + // The Databricks internal ID that represents this managed identity. This + // field is only used to persist the credential_id once it is fetched from + // the credentials manager - as we only use the protobuf serializer to store + // credentials, this ID gets persisted to the database. . + CredentialId types.String `tfsdk:"credential_id" tf:"optional"` + // The Azure resource ID of the managed identity. Use the format, + // `/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identity-name}` + // This is only available for user-assgined identities. For system-assigned + // identities, the access_connector_id is used to identify the identity. If + // this field is not provided, then we assume the AzureManagedIdentity is + // using the system-assigned identity. + ManagedIdentityId types.String `tfsdk:"managed_identity_id" tf:"optional"` +} + +func (newState *AzureManagedIdentity) SyncEffectiveFieldsDuringCreateOrUpdate(plan AzureManagedIdentity) { +} + +func (newState *AzureManagedIdentity) SyncEffectiveFieldsDuringRead(existingState AzureManagedIdentity) { +} + type AzureManagedIdentityRequest struct { // The Azure resource ID of the Azure Databricks Access Connector. Use the // format @@ -550,6 +610,29 @@ func (newState *CreateConnection) SyncEffectiveFieldsDuringCreateOrUpdate(plan C func (newState *CreateConnection) SyncEffectiveFieldsDuringRead(existingState CreateConnection) { } +type CreateCredentialRequest struct { + // The AWS IAM role configuration + AwsIamRole []AwsIamRole `tfsdk:"aws_iam_role" tf:"optional,object"` + // The Azure managed identity configuration. + AzureManagedIdentity []AzureManagedIdentity `tfsdk:"azure_managed_identity" tf:"optional,object"` + // Comment associated with the credential. + Comment types.String `tfsdk:"comment" tf:"optional"` + // The credential name. The name must be unique among storage and service + // credentials within the metastore. + Name types.String `tfsdk:"name" tf:"optional"` + // Indicates the purpose of the credential. + Purpose types.String `tfsdk:"purpose" tf:"optional"` + // Optional. Supplying true to this argument skips validation of the created + // set of credentials. + SkipValidation types.Bool `tfsdk:"skip_validation" tf:"optional"` +} + +func (newState *CreateCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCredentialRequest) { +} + +func (newState *CreateCredentialRequest) SyncEffectiveFieldsDuringRead(existingState CreateCredentialRequest) { +} + type CreateExternalLocation struct { // The AWS access point to use when accesing s3 for this external location. AccessPoint types.String `tfsdk:"access_point" tf:"optional"` @@ -728,6 +811,18 @@ func (newState *CreateMonitor) SyncEffectiveFieldsDuringCreateOrUpdate(plan Crea func (newState *CreateMonitor) SyncEffectiveFieldsDuringRead(existingState CreateMonitor) { } +// Create an Online Table +type CreateOnlineTableRequest struct { + // Online Table information. + Table []OnlineTable `tfsdk:"table" tf:"optional,object"` +} + +func (newState *CreateOnlineTableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateOnlineTableRequest) { +} + +func (newState *CreateOnlineTableRequest) SyncEffectiveFieldsDuringRead(existingState CreateOnlineTableRequest) { +} + type CreateRegisteredModelRequest struct { // The name of the catalog where the schema and the registered model reside CatalogName types.String `tfsdk:"catalog_name" tf:""` @@ -840,6 +935,58 @@ func (newState *CreateVolumeRequestContent) SyncEffectiveFieldsDuringCreateOrUpd func (newState *CreateVolumeRequestContent) SyncEffectiveFieldsDuringRead(existingState CreateVolumeRequestContent) { } +type CredentialInfo struct { + // The AWS IAM role configuration + AwsIamRole []AwsIamRole `tfsdk:"aws_iam_role" tf:"optional,object"` + // The Azure managed identity configuration. + AzureManagedIdentity []AzureManagedIdentity `tfsdk:"azure_managed_identity" tf:"optional,object"` + // Comment associated with the credential. + Comment types.String `tfsdk:"comment" tf:"optional"` + // Time at which this credential was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` + // Username of credential creator. + CreatedBy types.String `tfsdk:"created_by" tf:"optional"` + // The full name of the credential. + FullName types.String `tfsdk:"full_name" tf:"optional"` + // The unique identifier of the credential. + Id types.String `tfsdk:"id" tf:"optional"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode types.String `tfsdk:"isolation_mode" tf:"optional"` + // Unique identifier of the parent metastore. + MetastoreId types.String `tfsdk:"metastore_id" tf:"optional"` + // The credential name. The name must be unique among storage and service + // credentials within the metastore. + Name types.String `tfsdk:"name" tf:"optional"` + // Username of current owner of credential. + Owner types.String `tfsdk:"owner" tf:"optional"` + // Indicates the purpose of the credential. + Purpose types.String `tfsdk:"purpose" tf:"optional"` + // Time at which this credential was last modified, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` + // Username of user who last modified the credential. + UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` +} + +func (newState *CredentialInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CredentialInfo) { +} + +func (newState *CredentialInfo) SyncEffectiveFieldsDuringRead(existingState CredentialInfo) { +} + +type CredentialValidationResult struct { + // Error message would exist when the result does not equal to **PASS**. + Message types.String `tfsdk:"message" tf:"optional"` + // The results of the tested operation. + Result types.String `tfsdk:"result" tf:"optional"` +} + +func (newState *CredentialValidationResult) SyncEffectiveFieldsDuringCreateOrUpdate(plan CredentialValidationResult) { +} + +func (newState *CredentialValidationResult) SyncEffectiveFieldsDuringRead(existingState CredentialValidationResult) { +} + // Currently assigned workspaces type CurrentWorkspaceBindings struct { // A list of workspace IDs. @@ -969,6 +1116,29 @@ func (newState *DeleteConnectionRequest) SyncEffectiveFieldsDuringCreateOrUpdate func (newState *DeleteConnectionRequest) SyncEffectiveFieldsDuringRead(existingState DeleteConnectionRequest) { } +// Delete a credential +type DeleteCredentialRequest struct { + // Force deletion even if there are dependent services. + Force types.Bool `tfsdk:"-"` + // Name of the credential. + NameArg types.String `tfsdk:"-"` +} + +func (newState *DeleteCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCredentialRequest) { +} + +func (newState *DeleteCredentialRequest) SyncEffectiveFieldsDuringRead(existingState DeleteCredentialRequest) { +} + +type DeleteCredentialResponse struct { +} + +func (newState *DeleteCredentialResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCredentialResponse) { +} + +func (newState *DeleteCredentialResponse) SyncEffectiveFieldsDuringRead(existingState DeleteCredentialResponse) { +} + // Delete an external location type DeleteExternalLocationRequest struct { // Force deletion even if there are dependent external tables or mounts. @@ -1339,8 +1509,7 @@ type ExternalLocationInfo struct { // When fallback mode is enabled, the access to the location falls back to // cluster credentials if UC credentials are not sufficient. Fallback types.Bool `tfsdk:"fallback" tf:"optional"` - // Whether the current securable is accessible from all workspaces or a - // specific set of workspaces. + IsolationMode types.String `tfsdk:"isolation_mode" tf:"optional"` // Unique identifier of metastore hosting the external location. MetastoreId types.String `tfsdk:"metastore_id" tf:"optional"` @@ -1548,6 +1717,34 @@ func (newState *GcpOauthToken) SyncEffectiveFieldsDuringCreateOrUpdate(plan GcpO func (newState *GcpOauthToken) SyncEffectiveFieldsDuringRead(existingState GcpOauthToken) { } +// Options to customize the requested temporary credential +type GenerateTemporaryServiceCredentialAzureOptions struct { + // The resources to which the temporary Azure credential should apply. These + // resources are the scopes that are passed to the token provider (see + // https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential?view=azure-python) + Resources []types.String `tfsdk:"resources" tf:"optional"` +} + +func (newState *GenerateTemporaryServiceCredentialAzureOptions) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenerateTemporaryServiceCredentialAzureOptions) { +} + +func (newState *GenerateTemporaryServiceCredentialAzureOptions) SyncEffectiveFieldsDuringRead(existingState GenerateTemporaryServiceCredentialAzureOptions) { +} + +type GenerateTemporaryServiceCredentialRequest struct { + // Options to customize the requested temporary credential + AzureOptions []GenerateTemporaryServiceCredentialAzureOptions `tfsdk:"azure_options" tf:"optional,object"` + // The name of the service credential used to generate a temporary + // credential + CredentialName types.String `tfsdk:"credential_name" tf:"optional"` +} + +func (newState *GenerateTemporaryServiceCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenerateTemporaryServiceCredentialRequest) { +} + +func (newState *GenerateTemporaryServiceCredentialRequest) SyncEffectiveFieldsDuringRead(existingState GenerateTemporaryServiceCredentialRequest) { +} + type GenerateTemporaryTableCredentialRequest struct { // The operation performed against the table data, either READ or // READ_WRITE. If READ_WRITE is specified, the credentials returned will @@ -1567,6 +1764,10 @@ type GenerateTemporaryTableCredentialResponse struct { // AWS temporary credentials for API authentication. Read more at // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. AwsTempCredentials []AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional,object"` + // Azure Active Directory token, essentially the Oauth token for Azure + // Service Principal or Managed Identity. Read more at + // https://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token + AzureAad []AzureActiveDirectoryToken `tfsdk:"azure_aad" tf:"optional,object"` // Azure temporary credentials for API authentication. Read more at // https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas AzureUserDelegationSas []AzureUserDelegationSas `tfsdk:"azure_user_delegation_sas" tf:"optional,object"` @@ -1706,6 +1907,18 @@ func (newState *GetConnectionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(pl func (newState *GetConnectionRequest) SyncEffectiveFieldsDuringRead(existingState GetConnectionRequest) { } +// Get a credential +type GetCredentialRequest struct { + // Name of the credential. + NameArg types.String `tfsdk:"-"` +} + +func (newState *GetCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCredentialRequest) { +} + +func (newState *GetCredentialRequest) SyncEffectiveFieldsDuringRead(existingState GetCredentialRequest) { +} + // Get effective permissions type GetEffectiveRequest struct { // Full name of securable. @@ -2116,6 +2329,40 @@ func (newState *ListConnectionsResponse) SyncEffectiveFieldsDuringCreateOrUpdate func (newState *ListConnectionsResponse) SyncEffectiveFieldsDuringRead(existingState ListConnectionsResponse) { } +// List credentials +type ListCredentialsRequest struct { + // Maximum number of credentials to return. - If not set, the default max + // page size is used. - When set to a value greater than 0, the page length + // is the minimum of this value and a server-configured value. - When set to + // 0, the page length is set to a server-configured value (recommended). - + // When set to a value less than 0, an invalid parameter error is returned. + MaxResults types.Int64 `tfsdk:"-"` + // Opaque token to retrieve the next page of results. + PageToken types.String `tfsdk:"-"` + // Return only credentials for the specified purpose. + Purpose types.String `tfsdk:"-"` +} + +func (newState *ListCredentialsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCredentialsRequest) { +} + +func (newState *ListCredentialsRequest) SyncEffectiveFieldsDuringRead(existingState ListCredentialsRequest) { +} + +type ListCredentialsResponse struct { + Credentials []CredentialInfo `tfsdk:"credentials" tf:"optional"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` +} + +func (newState *ListCredentialsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCredentialsResponse) { +} + +func (newState *ListCredentialsResponse) SyncEffectiveFieldsDuringRead(existingState ListCredentialsResponse) { +} + // List external locations type ListExternalLocationsRequest struct { // Whether to include external locations in the response for which the @@ -2515,6 +2762,9 @@ type ListTablesRequest struct { OmitColumns types.Bool `tfsdk:"-"` // Whether to omit the properties of the table from the response or not. OmitProperties types.Bool `tfsdk:"-"` + // Whether to omit the username of the table (e.g. owner, updated_by, + // created_by) from the response or not. + OmitUsername types.Bool `tfsdk:"-"` // Opaque token to send for the next page of results (pagination). PageToken types.String `tfsdk:"-"` // Parent schema of tables. @@ -3445,10 +3695,11 @@ type StorageCredentialInfo struct { CreatedBy types.String `tfsdk:"created_by" tf:"optional"` // The Databricks managed GCP service account configuration. DatabricksGcpServiceAccount []DatabricksGcpServiceAccountResponse `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` + // The full name of the credential. + FullName types.String `tfsdk:"full_name" tf:"optional"` // The unique identifier of the credential. Id types.String `tfsdk:"id" tf:"optional"` - // Whether the current securable is accessible from all workspaces or a - // specific set of workspaces. + IsolationMode types.String `tfsdk:"isolation_mode" tf:"optional"` // Unique identifier of parent metastore. MetastoreId types.String `tfsdk:"metastore_id" tf:"optional"` @@ -3642,6 +3893,25 @@ func (newState *TableSummary) SyncEffectiveFieldsDuringCreateOrUpdate(plan Table func (newState *TableSummary) SyncEffectiveFieldsDuringRead(existingState TableSummary) { } +type TemporaryCredentials struct { + // AWS temporary credentials for API authentication. Read more at + // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. + AwsTempCredentials []AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional,object"` + // Azure Active Directory token, essentially the Oauth token for Azure + // Service Principal or Managed Identity. Read more at + // https://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token + AzureAad []AzureActiveDirectoryToken `tfsdk:"azure_aad" tf:"optional,object"` + // Server time when the credential will expire, in epoch milliseconds. The + // API client is advised to cache the credential given this expiration time. + ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` +} + +func (newState *TemporaryCredentials) SyncEffectiveFieldsDuringCreateOrUpdate(plan TemporaryCredentials) { +} + +func (newState *TemporaryCredentials) SyncEffectiveFieldsDuringRead(existingState TemporaryCredentials) { +} + // Detailed status of an online table. Shown if the online table is in the // ONLINE_TRIGGERED_UPDATE or the ONLINE_NO_PENDING_UPDATE state. type TriggeredUpdateStatus struct { @@ -3736,6 +4006,35 @@ func (newState *UpdateConnection) SyncEffectiveFieldsDuringCreateOrUpdate(plan U func (newState *UpdateConnection) SyncEffectiveFieldsDuringRead(existingState UpdateConnection) { } +type UpdateCredentialRequest struct { + // The AWS IAM role configuration + AwsIamRole []AwsIamRole `tfsdk:"aws_iam_role" tf:"optional,object"` + // The Azure managed identity configuration. + AzureManagedIdentity []AzureManagedIdentity `tfsdk:"azure_managed_identity" tf:"optional,object"` + // Comment associated with the credential. + Comment types.String `tfsdk:"comment" tf:"optional"` + // Force update even if there are dependent services. + Force types.Bool `tfsdk:"force" tf:"optional"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode types.String `tfsdk:"isolation_mode" tf:"optional"` + // Name of the credential. + NameArg types.String `tfsdk:"-"` + // New name of credential. + NewName types.String `tfsdk:"new_name" tf:"optional"` + // Username of current owner of credential. + Owner types.String `tfsdk:"owner" tf:"optional"` + // Supply true to this argument to skip validation of the updated + // credential. + SkipValidation types.Bool `tfsdk:"skip_validation" tf:"optional"` +} + +func (newState *UpdateCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCredentialRequest) { +} + +func (newState *UpdateCredentialRequest) SyncEffectiveFieldsDuringRead(existingState UpdateCredentialRequest) { +} + type UpdateExternalLocation struct { // The AWS access point to use when accesing s3 for this external location. AccessPoint types.String `tfsdk:"access_point" tf:"optional"` @@ -3752,8 +4051,7 @@ type UpdateExternalLocation struct { // Force update even if changing url invalidates dependent external tables // or mounts. Force types.Bool `tfsdk:"force" tf:"optional"` - // Whether the current securable is accessible from all workspaces or a - // specific set of workspaces. + IsolationMode types.String `tfsdk:"isolation_mode" tf:"optional"` // Name of the external location. Name types.String `tfsdk:"-"` @@ -3970,8 +4268,7 @@ type UpdateStorageCredential struct { // Force update even if there are dependent external locations or external // tables. Force types.Bool `tfsdk:"force" tf:"optional"` - // Whether the current securable is accessible from all workspaces or a - // specific set of workspaces. + IsolationMode types.String `tfsdk:"isolation_mode" tf:"optional"` // Name of the storage credential. Name types.String `tfsdk:"-"` @@ -4055,6 +4352,36 @@ func (newState *UpdateWorkspaceBindingsParameters) SyncEffectiveFieldsDuringCrea func (newState *UpdateWorkspaceBindingsParameters) SyncEffectiveFieldsDuringRead(existingState UpdateWorkspaceBindingsParameters) { } +type ValidateCredentialRequest struct { + // The AWS IAM role configuration + AwsIamRole []AwsIamRole `tfsdk:"aws_iam_role" tf:"optional,object"` + // The Azure managed identity configuration. + AzureManagedIdentity []AzureManagedIdentity `tfsdk:"azure_managed_identity" tf:"optional,object"` + // Required. The name of an existing credential or long-lived cloud + // credential to validate. + CredentialName types.String `tfsdk:"credential_name" tf:"optional"` + // The purpose of the credential. This should only be used when the + // credential is specified. + Purpose types.String `tfsdk:"purpose" tf:"optional"` +} + +func (newState *ValidateCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ValidateCredentialRequest) { +} + +func (newState *ValidateCredentialRequest) SyncEffectiveFieldsDuringRead(existingState ValidateCredentialRequest) { +} + +type ValidateCredentialResponse struct { + // The results of the validation check. + Results []CredentialValidationResult `tfsdk:"results" tf:"optional"` +} + +func (newState *ValidateCredentialResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ValidateCredentialResponse) { +} + +func (newState *ValidateCredentialResponse) SyncEffectiveFieldsDuringRead(existingState ValidateCredentialResponse) { +} + type ValidateStorageCredential struct { // The AWS IAM role configuration. AwsIamRole []AwsIamRoleRequest `tfsdk:"aws_iam_role" tf:"optional,object"` diff --git a/internal/service/compute_tf/model.go b/internal/service/compute_tf/model.go index 653cfec24f..d1e67f00bb 100755 --- a/internal/service/compute_tf/model.go +++ b/internal/service/compute_tf/model.go @@ -1747,7 +1747,7 @@ type EditCluster struct { // Attributes related to clusters running on Microsoft Azure. If not // specified at cluster creation, a set of default values will be used. AzureAttributes []AzureAttributes `tfsdk:"azure_attributes" tf:"optional,object"` - // ID of the cluser + // ID of the cluster ClusterId types.String `tfsdk:"cluster_id" tf:""` // The configuration for delivering spark logs to a long-term storage // destination. Two kinds of destinations (dbfs and s3) are supported. Only diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index 2066f6a422..c49167cac7 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -15,6 +15,68 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) +// Create dashboard +type CreateDashboardRequest struct { + Dashboard []Dashboard `tfsdk:"dashboard" tf:"optional,object"` +} + +func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateDashboardRequest) { +} + +func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState CreateDashboardRequest) { +} + +// Create dashboard schedule +type CreateScheduleRequest struct { + // UUID identifying the dashboard to which the schedule belongs. + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` + + Schedule []Schedule `tfsdk:"schedule" tf:"optional,object"` +} + +func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateScheduleRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId +} + +func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState CreateScheduleRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } +} + +// Create schedule subscription +type CreateSubscriptionRequest struct { + // UUID identifying the dashboard to which the subscription belongs. + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` + // UUID identifying the schedule to which the subscription belongs. + ScheduleId types.String `tfsdk:"-"` + EffectiveScheduleId types.String `tfsdk:"-"` + + Subscription []Subscription `tfsdk:"subscription" tf:"optional,object"` +} + +func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateSubscriptionRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId +} + +func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState CreateSubscriptionRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } +} + type CronSchedule struct { // A cron expression using quartz syntax. EX: `0 0 8 * * ?` represents // everyday at 8am. See [Cron Trigger] for details. @@ -803,6 +865,8 @@ type Schedule struct { // A timestamp indicating when the schedule was last updated. UpdateTime types.String `tfsdk:"update_time" tf:"optional"` EffectiveUpdateTime types.String `tfsdk:"effective_update_time" tf:"computed,optional"` + // The warehouse id to run the dashboard with for the schedule. + WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` } func (newState *Schedule) SyncEffectiveFieldsDuringCreateOrUpdate(plan Schedule) { @@ -1025,3 +1089,46 @@ func (newState *UnpublishDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpd func (newState *UnpublishDashboardResponse) SyncEffectiveFieldsDuringRead(existingState UnpublishDashboardResponse) { } + +// Update dashboard +type UpdateDashboardRequest struct { + Dashboard []Dashboard `tfsdk:"dashboard" tf:"optional,object"` + // UUID identifying the dashboard. + DashboardId types.String `tfsdk:"-"` +} + +func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateDashboardRequest) { +} + +func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState UpdateDashboardRequest) { +} + +// Update dashboard schedule +type UpdateScheduleRequest struct { + // UUID identifying the dashboard to which the schedule belongs. + DashboardId types.String `tfsdk:"-"` + EffectiveDashboardId types.String `tfsdk:"-"` + + Schedule []Schedule `tfsdk:"schedule" tf:"optional,object"` + // UUID identifying the schedule. + ScheduleId types.String `tfsdk:"-"` + EffectiveScheduleId types.String `tfsdk:"-"` +} + +func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateScheduleRequest) { + newState.EffectiveDashboardId = newState.DashboardId + newState.DashboardId = plan.DashboardId + newState.EffectiveScheduleId = newState.ScheduleId + newState.ScheduleId = plan.ScheduleId +} + +func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState UpdateScheduleRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId + if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { + newState.DashboardId = existingState.DashboardId + } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId + if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { + newState.ScheduleId = existingState.ScheduleId + } +} diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index fe3918dabd..d4629abf94 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -2321,6 +2321,9 @@ type RunNow struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` + // A list of task keys to run inside of the job. If this field is not + // provided, all tasks in the job will be run. + Only []types.String `tfsdk:"only" tf:"optional"` // Controls whether the pipeline should perform a full refresh PipelineParams []PipelineParams `tfsdk:"pipeline_params" tf:"optional,object"` diff --git a/internal/service/pipelines_tf/model.go b/internal/service/pipelines_tf/model.go index 8adcfa0bfa..56f0b9d192 100755 --- a/internal/service/pipelines_tf/model.go +++ b/internal/service/pipelines_tf/model.go @@ -45,7 +45,7 @@ type CreatePipeline struct { Edition types.String `tfsdk:"edition" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. Filters []Filters `tfsdk:"filters" tf:"optional,object"` - // The definition of a gateway pipeline to support CDC. + // The definition of a gateway pipeline to support change data capture. GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional,object"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` @@ -60,6 +60,8 @@ type CreatePipeline struct { Notifications []Notifications `tfsdk:"notifications" tf:"optional"` // Whether Photon is enabled for this pipeline. Photon types.Bool `tfsdk:"photon" tf:"optional"` + // Restart window of this pipeline. + RestartWindow []RestartWindow `tfsdk:"restart_window" tf:"optional,object"` // The default schema (database) where tables are read from or published to. // The presence of this field implies that the pipeline is in direct // publishing mode. @@ -173,7 +175,7 @@ type EditPipeline struct { ExpectedLastModified types.Int64 `tfsdk:"expected_last_modified" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. Filters []Filters `tfsdk:"filters" tf:"optional,object"` - // The definition of a gateway pipeline to support CDC. + // The definition of a gateway pipeline to support change data capture. GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional,object"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` @@ -190,6 +192,8 @@ type EditPipeline struct { Photon types.Bool `tfsdk:"photon" tf:"optional"` // Unique identifier for this pipeline. PipelineId types.String `tfsdk:"pipeline_id" tf:"optional"` + // Restart window of this pipeline. + RestartWindow []RestartWindow `tfsdk:"restart_window" tf:"optional,object"` // The default schema (database) where tables are read from or published to. // The presence of this field implies that the pipeline is in direct // publishing mode. @@ -365,11 +369,11 @@ func (newState *GetUpdateResponse) SyncEffectiveFieldsDuringRead(existingState G } type IngestionConfig struct { - // Select tables from a specific source report. + // Select a specific source report. Report []ReportSpec `tfsdk:"report" tf:"optional,object"` - // Select tables from a specific source schema. + // Select all tables from a specific source schema. Schema []SchemaSpec `tfsdk:"schema" tf:"optional,object"` - // Select tables from a specific source table. + // Select a specific source table. Table []TableSpec `tfsdk:"table" tf:"optional,object"` } @@ -380,9 +384,13 @@ func (newState *IngestionConfig) SyncEffectiveFieldsDuringRead(existingState Ing } type IngestionGatewayPipelineDefinition struct { - // Immutable. The Unity Catalog connection this gateway pipeline uses to - // communicate with the source. + // [Deprecated, use connection_name instead] Immutable. The Unity Catalog + // connection that this gateway pipeline uses to communicate with the + // source. ConnectionId types.String `tfsdk:"connection_id" tf:"optional"` + // Immutable. The Unity Catalog connection that this gateway pipeline uses + // to communicate with the source. + ConnectionName types.String `tfsdk:"connection_name" tf:"optional"` // Required, Immutable. The name of the catalog for the gateway pipeline's // storage location. GatewayStorageCatalog types.String `tfsdk:"gateway_storage_catalog" tf:"optional"` @@ -403,13 +411,13 @@ func (newState *IngestionGatewayPipelineDefinition) SyncEffectiveFieldsDuringRea } type IngestionPipelineDefinition struct { - // Immutable. The Unity Catalog connection this ingestion pipeline uses to - // communicate with the source. Specify either ingestion_gateway_id or - // connection_name. + // Immutable. The Unity Catalog connection that this ingestion pipeline uses + // to communicate with the source. This is used with connectors for + // applications like Salesforce, Workday, and so on. ConnectionName types.String `tfsdk:"connection_name" tf:"optional"` - // Immutable. Identifier for the ingestion gateway used by this ingestion - // pipeline to communicate with the source. Specify either - // ingestion_gateway_id or connection_name. + // Immutable. Identifier for the gateway that is used by this ingestion + // pipeline to communicate with the source database. This is used with + // connectors to databases like SQL Server. IngestionGatewayId types.String `tfsdk:"ingestion_gateway_id" tf:"optional"` // Required. Settings specifying tables to replicate and the destination for // the replicated tables. @@ -934,7 +942,7 @@ type PipelineSpec struct { Edition types.String `tfsdk:"edition" tf:"optional"` // Filters on which Pipeline packages to include in the deployed graph. Filters []Filters `tfsdk:"filters" tf:"optional,object"` - // The definition of a gateway pipeline to support CDC. + // The definition of a gateway pipeline to support change data capture. GatewayDefinition []IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition" tf:"optional,object"` // Unique identifier for this pipeline. Id types.String `tfsdk:"id" tf:"optional"` @@ -949,6 +957,8 @@ type PipelineSpec struct { Notifications []Notifications `tfsdk:"notifications" tf:"optional"` // Whether Photon is enabled for this pipeline. Photon types.Bool `tfsdk:"photon" tf:"optional"` + // Restart window of this pipeline. + RestartWindow []RestartWindow `tfsdk:"restart_window" tf:"optional,object"` // The default schema (database) where tables are read from or published to. // The presence of this field implies that the pipeline is in direct // publishing mode. @@ -1032,6 +1042,27 @@ func (newState *ReportSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan ReportS func (newState *ReportSpec) SyncEffectiveFieldsDuringRead(existingState ReportSpec) { } +type RestartWindow struct { + // Days of week in which the restart is allowed to happen (within a + // five-hour window starting at start_hour). If not specified all days of + // the week will be used. + DaysOfWeek types.String `tfsdk:"days_of_week" tf:"optional"` + // An integer between 0 and 23 denoting the start hour for the restart + // window in the 24-hour day. Continuous pipeline restart is triggered only + // within a five-hour window starting at this hour. + StartHour types.Int64 `tfsdk:"start_hour" tf:""` + // Time zone id of restart window. See + // https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html + // for details. If not specified, UTC will be used. + TimeZoneId types.String `tfsdk:"time_zone_id" tf:"optional"` +} + +func (newState *RestartWindow) SyncEffectiveFieldsDuringCreateOrUpdate(plan RestartWindow) { +} + +func (newState *RestartWindow) SyncEffectiveFieldsDuringRead(existingState RestartWindow) { +} + type SchemaSpec struct { // Required. Destination catalog to store tables. DestinationCatalog types.String `tfsdk:"destination_catalog" tf:"optional"` diff --git a/internal/service/provisioning_tf/model.go b/internal/service/provisioning_tf/model.go index 188e8f48df..49b5d02e78 100755 --- a/internal/service/provisioning_tf/model.go +++ b/internal/service/provisioning_tf/model.go @@ -947,6 +947,9 @@ type UpdateWorkspaceRequest struct { // switch from a Databricks-managed VPC to a customer-managed VPC by // updating the workspace to add a network configuration ID. NetworkId types.String `tfsdk:"network_id" tf:"optional"` + // The ID of the workspace's private access settings configuration object. + // This parameter is available only for updating failed workspaces. + PrivateAccessSettingsId types.String `tfsdk:"private_access_settings_id" tf:"optional"` // The ID of the workspace's storage configuration object. This parameter is // available only for updating failed workspaces. StorageConfigurationId types.String `tfsdk:"storage_configuration_id" tf:"optional"` diff --git a/internal/service/sharing_tf/model.go b/internal/service/sharing_tf/model.go index 0192deeaaa..6bde086372 100755 --- a/internal/service/sharing_tf/model.go +++ b/internal/service/sharing_tf/model.go @@ -15,214 +15,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -type CentralCleanRoomInfo struct { - // All assets from all collaborators that are available in the clean room. - // Only one of table_info or notebook_info will be filled in. - CleanRoomAssets []CleanRoomAssetInfo `tfsdk:"clean_room_assets" tf:"optional"` - // All collaborators who are in the clean room. - Collaborators []CleanRoomCollaboratorInfo `tfsdk:"collaborators" tf:"optional"` - // The collaborator who created the clean room. - Creator []CleanRoomCollaboratorInfo `tfsdk:"creator" tf:"optional,object"` - // The cloud where clean room tasks will be run. - StationCloud types.String `tfsdk:"station_cloud" tf:"optional"` - // The region where clean room tasks will be run. - StationRegion types.String `tfsdk:"station_region" tf:"optional"` -} - -func (newState *CentralCleanRoomInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CentralCleanRoomInfo) { -} - -func (newState *CentralCleanRoomInfo) SyncEffectiveFieldsDuringRead(existingState CentralCleanRoomInfo) { -} - -type CleanRoomAssetInfo struct { - // Time at which this asset was added, in epoch milliseconds. - AddedAt types.Int64 `tfsdk:"added_at" tf:"optional"` - // Details about the notebook asset. - NotebookInfo []CleanRoomNotebookInfo `tfsdk:"notebook_info" tf:"optional,object"` - // The collaborator who owns the asset. - Owner []CleanRoomCollaboratorInfo `tfsdk:"owner" tf:"optional,object"` - // Details about the table asset. - TableInfo []CleanRoomTableInfo `tfsdk:"table_info" tf:"optional,object"` - // Time at which this asset was updated, in epoch milliseconds. - UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` -} - -func (newState *CleanRoomAssetInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomAssetInfo) { -} - -func (newState *CleanRoomAssetInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomAssetInfo) { -} - -type CleanRoomCatalog struct { - // Name of the catalog in the clean room station. Empty for notebooks. - CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` - // The details of the shared notebook files. - NotebookFiles []SharedDataObject `tfsdk:"notebook_files" tf:"optional"` - // The details of the shared tables. - Tables []SharedDataObject `tfsdk:"tables" tf:"optional"` -} - -func (newState *CleanRoomCatalog) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomCatalog) { -} - -func (newState *CleanRoomCatalog) SyncEffectiveFieldsDuringRead(existingState CleanRoomCatalog) { -} - -type CleanRoomCatalogUpdate struct { - // The name of the catalog to update assets. - CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` - // The updates to the assets in the catalog. - Updates []SharedDataObjectUpdate `tfsdk:"updates" tf:"optional,object"` -} - -func (newState *CleanRoomCatalogUpdate) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomCatalogUpdate) { -} - -func (newState *CleanRoomCatalogUpdate) SyncEffectiveFieldsDuringRead(existingState CleanRoomCatalogUpdate) { -} - -type CleanRoomCollaboratorInfo struct { - // The global Unity Catalog metastore id of the collaborator. Also known as - // the sharing identifier. The identifier is of format - // __cloud__:__region__:__metastore-uuid__. - GlobalMetastoreId types.String `tfsdk:"global_metastore_id" tf:"optional"` - // The organization name of the collaborator. This is configured in the - // metastore for Delta Sharing and is used to identify the organization to - // other collaborators. - OrganizationName types.String `tfsdk:"organization_name" tf:"optional"` -} - -func (newState *CleanRoomCollaboratorInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomCollaboratorInfo) { -} - -func (newState *CleanRoomCollaboratorInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomCollaboratorInfo) { -} - -type CleanRoomInfo struct { - // User-provided free-form text description. - Comment types.String `tfsdk:"comment" tf:"optional"` - // Time at which this clean room was created, in epoch milliseconds. - CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` - // Username of clean room creator. - CreatedBy types.String `tfsdk:"created_by" tf:"optional"` - // Catalog aliases shared by the current collaborator with asset details. - LocalCatalogs []CleanRoomCatalog `tfsdk:"local_catalogs" tf:"optional"` - // Name of the clean room. - Name types.String `tfsdk:"name" tf:"optional"` - // Username of current owner of clean room. - Owner types.String `tfsdk:"owner" tf:"optional"` - // Central clean room details. - RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"optional,object"` - // Time at which this clean room was updated, in epoch milliseconds. - UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` - // Username of clean room updater. - UpdatedBy types.String `tfsdk:"updated_by" tf:"optional"` -} - -func (newState *CleanRoomInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomInfo) { -} - -func (newState *CleanRoomInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomInfo) { -} - -type CleanRoomNotebookInfo struct { - // The base64 representation of the notebook content in HTML. - NotebookContent types.String `tfsdk:"notebook_content" tf:"optional"` - // The name of the notebook. - NotebookName types.String `tfsdk:"notebook_name" tf:"optional"` -} - -func (newState *CleanRoomNotebookInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomNotebookInfo) { -} - -func (newState *CleanRoomNotebookInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomNotebookInfo) { -} - -type CleanRoomTableInfo struct { - // Name of parent catalog. - CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` - // The array of __ColumnInfo__ definitions of the table's columns. - Columns []ColumnInfo `tfsdk:"columns" tf:"optional"` - // Full name of table, in form of - // __catalog_name__.__schema_name__.__table_name__ - FullName types.String `tfsdk:"full_name" tf:"optional"` - // Name of table, relative to parent schema. - Name types.String `tfsdk:"name" tf:"optional"` - // Name of parent schema relative to its parent catalog. - SchemaName types.String `tfsdk:"schema_name" tf:"optional"` -} - -func (newState *CleanRoomTableInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomTableInfo) { -} - -func (newState *CleanRoomTableInfo) SyncEffectiveFieldsDuringRead(existingState CleanRoomTableInfo) { -} - -type ColumnInfo struct { - // User-provided free-form text description. - Comment types.String `tfsdk:"comment" tf:"optional"` - - Mask []ColumnMask `tfsdk:"mask" tf:"optional,object"` - // Name of Column. - Name types.String `tfsdk:"name" tf:"optional"` - // Whether field may be Null (default: true). - Nullable types.Bool `tfsdk:"nullable" tf:"optional"` - // Partition index for column. - PartitionIndex types.Int64 `tfsdk:"partition_index" tf:"optional"` - // Ordinal position of column (starting at position 0). - Position types.Int64 `tfsdk:"position" tf:"optional"` - // Format of IntervalType. - TypeIntervalType types.String `tfsdk:"type_interval_type" tf:"optional"` - // Full data type specification, JSON-serialized. - TypeJson types.String `tfsdk:"type_json" tf:"optional"` - // Name of type (INT, STRUCT, MAP, etc.). - TypeName types.String `tfsdk:"type_name" tf:"optional"` - // Digits of precision; required for DecimalTypes. - TypePrecision types.Int64 `tfsdk:"type_precision" tf:"optional"` - // Digits to right of decimal; Required for DecimalTypes. - TypeScale types.Int64 `tfsdk:"type_scale" tf:"optional"` - // Full data type specification as SQL/catalogString text. - TypeText types.String `tfsdk:"type_text" tf:"optional"` -} - -func (newState *ColumnInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ColumnInfo) { -} - -func (newState *ColumnInfo) SyncEffectiveFieldsDuringRead(existingState ColumnInfo) { -} - -type ColumnMask struct { - // The full name of the column mask SQL UDF. - FunctionName types.String `tfsdk:"function_name" tf:"optional"` - // The list of additional table columns to be passed as input to the column - // mask function. The first arg of the mask function should be of the type - // of the column being masked and the types of the rest of the args should - // match the types of columns in 'using_column_names'. - UsingColumnNames []types.String `tfsdk:"using_column_names" tf:"optional"` -} - -func (newState *ColumnMask) SyncEffectiveFieldsDuringCreateOrUpdate(plan ColumnMask) { -} - -func (newState *ColumnMask) SyncEffectiveFieldsDuringRead(existingState ColumnMask) { -} - -type CreateCleanRoom struct { - // User-provided free-form text description. - Comment types.String `tfsdk:"comment" tf:"optional"` - // Name of the clean room. - Name types.String `tfsdk:"name" tf:""` - // Central clean room details. - RemoteDetailedInfo []CentralCleanRoomInfo `tfsdk:"remote_detailed_info" tf:"object"` -} - -func (newState *CreateCleanRoom) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCleanRoom) { -} - -func (newState *CreateCleanRoom) SyncEffectiveFieldsDuringRead(existingState CreateCleanRoom) { -} - type CreateProvider struct { // The delta sharing authentication type. AuthenticationType types.String `tfsdk:"authentication_type" tf:""` @@ -287,18 +79,6 @@ func (newState *CreateShare) SyncEffectiveFieldsDuringCreateOrUpdate(plan Create func (newState *CreateShare) SyncEffectiveFieldsDuringRead(existingState CreateShare) { } -// Delete a clean room -type DeleteCleanRoomRequest struct { - // The name of the clean room. - Name types.String `tfsdk:"-"` -} - -func (newState *DeleteCleanRoomRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCleanRoomRequest) { -} - -func (newState *DeleteCleanRoomRequest) SyncEffectiveFieldsDuringRead(existingState DeleteCleanRoomRequest) { -} - // Delete a provider type DeleteProviderRequest struct { // Name of the provider. @@ -365,20 +145,6 @@ func (newState *GetActivationUrlInfoResponse) SyncEffectiveFieldsDuringCreateOrU func (newState *GetActivationUrlInfoResponse) SyncEffectiveFieldsDuringRead(existingState GetActivationUrlInfoResponse) { } -// Get a clean room -type GetCleanRoomRequest struct { - // Whether to include remote details (central) on the clean room. - IncludeRemoteDetails types.Bool `tfsdk:"-"` - // The name of the clean room. - Name types.String `tfsdk:"-"` -} - -func (newState *GetCleanRoomRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCleanRoomRequest) { -} - -func (newState *GetCleanRoomRequest) SyncEffectiveFieldsDuringRead(existingState GetCleanRoomRequest) { -} - // Get a provider type GetProviderRequest struct { // Name of the provider. @@ -443,40 +209,6 @@ func (newState *IpAccessList) SyncEffectiveFieldsDuringCreateOrUpdate(plan IpAcc func (newState *IpAccessList) SyncEffectiveFieldsDuringRead(existingState IpAccessList) { } -// List clean rooms -type ListCleanRoomsRequest struct { - // Maximum number of clean rooms to return. If not set, all the clean rooms - // are returned (not recommended). - when set to a value greater than 0, the - // page length is the minimum of this value and a server configured value; - - // when set to 0, the page length is set to a server configured value - // (recommended); - when set to a value less than 0, an invalid parameter - // error is returned; - MaxResults types.Int64 `tfsdk:"-"` - // Opaque pagination token to go to next page based on previous query. - PageToken types.String `tfsdk:"-"` -} - -func (newState *ListCleanRoomsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCleanRoomsRequest) { -} - -func (newState *ListCleanRoomsRequest) SyncEffectiveFieldsDuringRead(existingState ListCleanRoomsRequest) { -} - -type ListCleanRoomsResponse struct { - // An array of clean rooms. Remote details (central) are not included. - CleanRooms []CleanRoomInfo `tfsdk:"clean_rooms" tf:"optional"` - // Opaque token to retrieve the next page of results. Absent if there are no - // more pages. __page_token__ should be set to this value for the next - // request (for the next page of results). - NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` -} - -func (newState *ListCleanRoomsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCleanRoomsResponse) { -} - -func (newState *ListCleanRoomsResponse) SyncEffectiveFieldsDuringRead(existingState ListCleanRoomsResponse) { -} - type ListProviderSharesResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -1032,23 +764,6 @@ func (newState *SharedDataObjectUpdate) SyncEffectiveFieldsDuringCreateOrUpdate( func (newState *SharedDataObjectUpdate) SyncEffectiveFieldsDuringRead(existingState SharedDataObjectUpdate) { } -type UpdateCleanRoom struct { - // Array of shared data object updates. - CatalogUpdates []CleanRoomCatalogUpdate `tfsdk:"catalog_updates" tf:"optional"` - // User-provided free-form text description. - Comment types.String `tfsdk:"comment" tf:"optional"` - // The name of the clean room. - Name types.String `tfsdk:"-"` - // Username of current owner of clean room. - Owner types.String `tfsdk:"owner" tf:"optional"` -} - -func (newState *UpdateCleanRoom) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCleanRoom) { -} - -func (newState *UpdateCleanRoom) SyncEffectiveFieldsDuringRead(existingState UpdateCleanRoom) { -} - type UpdatePermissionsResponse struct { } From 7ddbeab4529a271a0c03fca248a4898e6145bb6b Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Fri, 8 Nov 2024 03:49:09 -0500 Subject: [PATCH 91/99] [Fix] Upload content `databricks_workspace_file` using raw format (#4200) ## Changes This fixes a problem with uploading zip-based files with the `databricks_workspace_file` resource. ## Tests - [x] `make test` run locally - [ ] ~relevant change in `docs/` folder~ - [x] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [x] using Go SDK --- internal/acceptance/permissions_test.go | 30 ++++++++++++++------- internal/acceptance/workspace_file_test.go | 9 +++++++ workspace/acceptance/testdata/zipfile.zip | Bin 0 -> 171 bytes workspace/resource_workspace_file.go | 4 +-- workspace/resource_workspace_file_test.go | 16 +++++------ 5 files changed, 39 insertions(+), 20 deletions(-) create mode 100644 workspace/acceptance/testdata/zipfile.zip diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index 325bc398fe..0fdd5553b8 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -466,17 +466,22 @@ func TestAccPermissions_WorkspaceFile_Path(t *testing.T) { } resource "databricks_workspace_file" "this" { source = "{var.CWD}/../../storage/testdata/tf-test-python.py" - path = "${databricks_directory.this.path}/test_notebook" + path = "${databricks_directory.this.path}/test_ws_file" }` WorkspaceLevel(t, Step{ - Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", groupPermissions("CAN_RUN")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", + groupPermissions("CAN_RUN")), }, Step{ - Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", + currentPrincipalPermission(t, "CAN_MANAGE"), + allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), }, Step{ // The current user can be removed from permissions since they inherit permissions from the directory they created. - Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", + allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), }, Step{ - Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", currentPrincipalPermission(t, "CAN_READ")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", + currentPrincipalPermission(t, "CAN_READ")), ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for file, allowed levels: CAN_MANAGE"), }) } @@ -489,17 +494,22 @@ func TestAccPermissions_WorkspaceFile_Id(t *testing.T) { } resource "databricks_workspace_file" "this" { source = "{var.CWD}/../../storage/testdata/tf-test-python.py" - path = "${databricks_directory.this.path}/test_notebook" + path = "${databricks_directory.this.path}/test_ws_file" }` WorkspaceLevel(t, Step{ - Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", groupPermissions("CAN_RUN")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", + groupPermissions("CAN_RUN")), }, Step{ - Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", + currentPrincipalPermission(t, "CAN_MANAGE"), + allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), }, Step{ // The current user can be removed from permissions since they inherit permissions from the directory they created. - Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", + allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), }, Step{ - Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", currentPrincipalPermission(t, "CAN_READ")), + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", + currentPrincipalPermission(t, "CAN_READ")), ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for file, allowed levels: CAN_MANAGE"), }) } diff --git a/internal/acceptance/workspace_file_test.go b/internal/acceptance/workspace_file_test.go index 9a9a57c8e0..f0b8b27b45 100644 --- a/internal/acceptance/workspace_file_test.go +++ b/internal/acceptance/workspace_file_test.go @@ -27,6 +27,15 @@ func TestAccWorkspaceFileEmptyFile(t *testing.T) { }) } +func TestAccWorkspaceFileZipFile(t *testing.T) { + WorkspaceLevel(t, Step{ + Template: `resource "databricks_workspace_file" "zipfile" { + source = "{var.CWD}/../../workspace/acceptance/testdata/zipfile.zip" + path = "/Shared/provider-test/zipfile_{var.RANDOM}.zip" + }`, + }) +} + func TestAccWorkspaceFileBase64(t *testing.T) { WorkspaceLevel(t, Step{ Template: `resource "databricks_workspace_file" "this2" { diff --git a/workspace/acceptance/testdata/zipfile.zip b/workspace/acceptance/testdata/zipfile.zip new file mode 100644 index 0000000000000000000000000000000000000000..2be8cd176157ab85f01d3d5ee6ef14b32422b05b GIT binary patch literal 171 zcmWIWW@h1H0D-xQ>5)}c`EQwlY!K#TkYUJ3&B@8vE2$_64dG;9-nl?09fV6OxEUB( zzA-W|u!sN^W@K^&cr!A|G2=2v0;q_A0jQi|Nh64bWGXAfR5TL Date: Fri, 8 Nov 2024 19:51:21 +0530 Subject: [PATCH 92/99] [Release] Release v1.58.0 (#4202) ### Bug Fixes * Always fill `cluster_name` in `databricks_cluster` data source ([#4197](https://github.com/databricks/terraform-provider-databricks/pull/4197)). * Suppress equal fold diff for DLT pipeline resource ([#4196](https://github.com/databricks/terraform-provider-databricks/pull/4196)). * Upload content `databricks_workspace_file` using raw format ([#4200](https://github.com/databricks/terraform-provider-databricks/pull/4200)). ### Internal Changes * Update to latest OpenAPI spec and bump Go SDK ([#4199](https://github.com/databricks/terraform-provider-databricks/pull/4199)). ### Dependency Updates * Bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 ([#4191](https://github.com/databricks/terraform-provider-databricks/pull/4191)). --- CHANGELOG.md | 19 +++++++++++++++++++ common/version.go | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d2d4139a7d..a9e6a67691 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Version changelog +## [Release] Release v1.58.0 + +### Bug Fixes + + * Always fill `cluster_name` in `databricks_cluster` data source ([#4197](https://github.com/databricks/terraform-provider-databricks/pull/4197)). + * Suppress equal fold diff for DLT pipeline resource ([#4196](https://github.com/databricks/terraform-provider-databricks/pull/4196)). + * Upload content `databricks_workspace_file` using raw format ([#4200](https://github.com/databricks/terraform-provider-databricks/pull/4200)). + + +### Internal Changes + + * Update to latest OpenAPI spec and bump Go SDK ([#4199](https://github.com/databricks/terraform-provider-databricks/pull/4199)). + + +### Dependency Updates + + * Bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 ([#4191](https://github.com/databricks/terraform-provider-databricks/pull/4191)). + + ## [Release] Release v1.57.0 ### New Features and Improvements diff --git a/common/version.go b/common/version.go index b8959caac0..75ecf50a60 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.57.0" + version = "1.58.0" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider From ae65156df2b2e7fd683703e660055b0e15163860 Mon Sep 17 00:00:00 2001 From: Vibhor Nanavati Date: Tue, 12 Nov 2024 05:05:59 -0800 Subject: [PATCH 93/99] [Doc] Update "Databricks Workspace Creator" permissions on gcp-workspace.md (#4201) ## Changes Match the corresponding (upcoming) changes to https://docs.gcp.databricks.com/en/admin/cloud-configurations/gcp/permissions.html#required-permissions-for-the-workspace-creator ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/guides/gcp-workspace.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/guides/gcp-workspace.md b/docs/guides/gcp-workspace.md index e7d2044a12..0f0456c8c2 100644 --- a/docs/guides/gcp-workspace.md +++ b/docs/guides/gcp-workspace.md @@ -57,6 +57,8 @@ resource "google_project_iam_custom_role" "workspace_creator" { permissions = [ "iam.serviceAccounts.getIamPolicy", "iam.serviceAccounts.setIamPolicy", + "iam.serviceAccounts.create", + "iam.serviceAccounts.get", "iam.roles.create", "iam.roles.delete", "iam.roles.get", @@ -68,8 +70,13 @@ resource "google_project_iam_custom_role" "workspace_creator" { "serviceusage.services.list", "serviceusage.services.enable", "compute.networks.get", + "compute.networks.updatePolicy", "compute.projects.get", "compute.subnetworks.get", + "compute.subnetworks.getIamPolicy", + "compute.subnetworks.setIamPolicy", + "compute.firewalls.get", + "compute.firewalls.create", ] } From d4e461cb09d7480ede5c621cc1c9c82f520be233 Mon Sep 17 00:00:00 2001 From: Ashen Gunaratne Date: Tue, 12 Nov 2024 18:45:43 +0530 Subject: [PATCH 94/99] [Feature] Add support partitions in policy data sources (#4181) ## Changes - Resolves https://github.com/databricks/terraform-provider-databricks/issues/4054 - Resolves https://github.com/databricks/terraform-provider-databricks/issues/4152 - Add optional argument `aws_partition` to all aws policy data sources to allow usage in all aws partitions ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [ ] using Go SDK --- aws/constants.go | 17 +++++ aws/data_aws_assume_role_policy.go | 26 ++++++-- aws/data_aws_assume_role_policy_test.go | 49 ++++++++++++++ aws/data_aws_bucket_policy.go | 25 +++++-- aws/data_aws_bucket_policy_test.go | 16 +++++ aws/data_aws_crossaccount_policy.go | 51 ++++++++------- aws/data_aws_crossaccount_policy_test.go | 29 +++++++++ ...ta_aws_unity_catalog_assume_role_policy.go | 14 +++- ...s_unity_catalog_assume_role_policy_test.go | 65 +++++++++++++++++++ aws/data_aws_unity_catalog_policy.go | 17 +++-- aws/data_aws_unity_catalog_policy_test.go | 58 +++++++++++++++++ docs/data-sources/aws_assume_role_policy.md | 1 + docs/data-sources/aws_bucket_policy.md | 1 + docs/data-sources/aws_crossaccount_policy.md | 1 + .../aws_unity_catalog_assume_role_policy.md | 3 +- docs/data-sources/aws_unity_catalog_policy.md | 1 + 16 files changed, 330 insertions(+), 44 deletions(-) create mode 100644 aws/constants.go diff --git a/aws/constants.go b/aws/constants.go new file mode 100644 index 0000000000..36d9f84ea1 --- /dev/null +++ b/aws/constants.go @@ -0,0 +1,17 @@ +package aws + +var AwsConfig = map[string]map[string]string{ + "aws": { + "accountId": "414351767826", + "logDeliveryIamArn": "arn:aws:iam::414351767826:role/SaasUsageDeliveryRole-prod-IAMRole-3PLHICCRR1TK", + "unityCatalogueIamArn": "arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL", + }, + "aws-us-gov": { + "accountId": "044793339203", + "logDeliveryIamArn": "arn:aws-us-gov:iam::044793339203:role/SaasUsageDeliveryRole-prod-aws-gov-IAMRole-L4QM0RCHYQ1G", + "unityCatalogueIamArn": "arn:aws-us-gov:iam::044793339203:role/unity-catalog-prod-UCMasterRole-1QRFA8SGY15OJ", + }, +} + +var AwsPartitions = []string{"aws", "aws-us-gov"} +var AwsPartitionsValidationError = "aws_partition must be either 'aws' or 'aws-us-gov'" diff --git a/aws/data_aws_assume_role_policy.go b/aws/data_aws_assume_role_policy.go index 576321d819..1cbbed669d 100644 --- a/aws/data_aws_assume_role_policy.go +++ b/aws/data_aws_assume_role_policy.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) type awsIamPolicy struct { @@ -31,6 +32,13 @@ func DataAwsAssumeRolePolicy() common.Resource { return common.Resource{ Read: func(ctx context.Context, d *schema.ResourceData, m *common.DatabricksClient) error { externalID := d.Get("external_id").(string) + awsPartition := d.Get("aws_partition").(string) + databricksAwsAccountId := d.Get("databricks_account_id").(string) + + if databricksAwsAccountId == "" { + databricksAwsAccountId = AwsConfig[awsPartition]["accountId"] + } + policy := awsIamPolicy{ Version: "2012-10-17", Statements: []*awsIamPolicyStatement{ @@ -43,16 +51,14 @@ func DataAwsAssumeRolePolicy() common.Resource { }, }, Principal: map[string]string{ - "AWS": fmt.Sprintf("arn:aws:iam::%s:root", d.Get("databricks_account_id").(string)), + "AWS": fmt.Sprintf("arn:%s:iam::%s:root", awsPartition, databricksAwsAccountId), }, }, }, } if v, ok := d.GetOk("for_log_delivery"); ok { if v.(bool) { - // this is production UsageDelivery IAM role, that is considered a constant - logDeliveryARN := "arn:aws:iam::414351767826:role/SaasUsageDeliveryRole-prod-IAMRole-3PLHICCRR1TK" - policy.Statements[0].Principal["AWS"] = logDeliveryARN + policy.Statements[0].Principal["AWS"] = AwsConfig[awsPartition]["logDeliveryIamArn"] } } policyJSON, err := json.MarshalIndent(policy, "", " ") @@ -65,10 +71,16 @@ func DataAwsAssumeRolePolicy() common.Resource { return nil }, Schema: map[string]*schema.Schema{ + "aws_partition": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(AwsPartitions, false), + Default: "aws", + }, "databricks_account_id": { - Type: schema.TypeString, - Default: "414351767826", - Optional: true, + Type: schema.TypeString, + Optional: true, + Deprecated: "databricks_account_id will be will be removed in the next major release.", }, "for_log_delivery": { Type: schema.TypeBool, diff --git a/aws/data_aws_assume_role_policy_test.go b/aws/data_aws_assume_role_policy_test.go index 7322660420..f4a1fa1998 100644 --- a/aws/data_aws_assume_role_policy_test.go +++ b/aws/data_aws_assume_role_policy_test.go @@ -19,3 +19,52 @@ func TestDataAwsAssumeRolePolicy(t *testing.T) { j := d.Get("json") assert.Lenf(t, j, 299, "Strange length for policy: %s", j) } + +func TestDataAwsAssumeRolePolicyGov(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsAssumeRolePolicy(), + NonWritable: true, + ID: ".", + HCL: ` + aws_partition = "aws-us-gov" + external_id = "abc" + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json") + assert.Lenf(t, j, 306, "Strange length for policy: %s", j) +} + +func TestDataAwsAssumeRolePolicyLogDelivery(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsAssumeRolePolicy(), + NonWritable: true, + ID: ".", + HCL: ` + external_id = "abc" + for_log_delivery = true + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json") + assert.Lenf(t, j, 347, "Strange length for policy: %s", j) +} + +func TestDataAwsAssumeRolePolicyLogDeliveryGov(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsAssumeRolePolicy(), + NonWritable: true, + ID: ".", + HCL: ` + aws_partition = "aws-us-gov" + external_id = "abc" + for_log_delivery = true + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json") + assert.Lenf(t, j, 362, "Strange length for policy: %s", j) +} diff --git a/aws/data_aws_bucket_policy.go b/aws/data_aws_bucket_policy.go index b1cc42a8f4..dc8394d85e 100644 --- a/aws/data_aws_bucket_policy.go +++ b/aws/data_aws_bucket_policy.go @@ -16,6 +16,13 @@ func DataAwsBucketPolicy() common.Resource { return common.Resource{ Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { bucket := d.Get("bucket").(string) + awsPartition := d.Get("aws_partition").(string) + databricksAwsAccountId := AwsConfig[awsPartition]["accountId"] + + if databricksAwsAccountId == "" { + databricksAwsAccountId = AwsConfig[awsPartition]["accountId"] + } + policy := awsIamPolicy{ Version: "2012-10-17", Statements: []*awsIamPolicyStatement{ @@ -30,11 +37,11 @@ func DataAwsBucketPolicy() common.Resource { "s3:GetBucketLocation", }, Resources: []string{ - fmt.Sprintf("arn:aws:s3:::%s/*", bucket), - fmt.Sprintf("arn:aws:s3:::%s", bucket), + fmt.Sprintf("arn:%s:s3:::%s/*", awsPartition, bucket), + fmt.Sprintf("arn:%s:s3:::%s", awsPartition, bucket), }, Principal: map[string]string{ - "AWS": fmt.Sprintf("arn:aws:iam::%s:root", d.Get("databricks_account_id").(string)), + "AWS": fmt.Sprintf("arn:%s:iam::%s:root", awsPartition, databricksAwsAccountId), }, }, }, @@ -60,10 +67,16 @@ func DataAwsBucketPolicy() common.Resource { return nil }, Schema: map[string]*schema.Schema{ + "aws_partition": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(AwsPartitions, false), + Default: "aws", + }, "databricks_account_id": { - Type: schema.TypeString, - Default: "414351767826", - Optional: true, + Type: schema.TypeString, + Optional: true, + Deprecated: "databricks_account_id will be will be removed in the next major release.", }, "databricks_e2_account_id": { Type: schema.TypeString, diff --git a/aws/data_aws_bucket_policy_test.go b/aws/data_aws_bucket_policy_test.go index 75f3a13645..5ec6c763b2 100644 --- a/aws/data_aws_bucket_policy_test.go +++ b/aws/data_aws_bucket_policy_test.go @@ -53,3 +53,19 @@ func TestDataAwsBucketPolicyConfusedDeputyProblem(t *testing.T) { j := d.Get("json") assert.Lenf(t, j, 575, "Strange length for policy: %s", j) } + +func TestDataAwsBucketPolicyPartitionGov(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsBucketPolicy(), + NonWritable: true, + ID: ".", + HCL: ` + bucket = "abc" + aws_partition = "aws-us-gov" + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json") + assert.Lenf(t, j, 461, "Strange length for policy: %s", j) +} diff --git a/aws/data_aws_crossaccount_policy.go b/aws/data_aws_crossaccount_policy.go index a5da5d9365..46ff5a6b9e 100644 --- a/aws/data_aws_crossaccount_policy.go +++ b/aws/data_aws_crossaccount_policy.go @@ -3,6 +3,7 @@ package aws import ( "context" "encoding/json" + "errors" "fmt" "regexp" "slices" @@ -17,11 +18,16 @@ func DataAwsCrossaccountPolicy() common.Resource { PassRole []string `json:"pass_roles,omitempty"` JSON string `json:"json" tf:"computed"` AwsAccountId string `json:"aws_account_id,omitempty"` + AwsPartition string `json:"aws_partition,omitempty" tf:"default:aws"` VpcId string `json:"vpc_id,omitempty"` Region string `json:"region,omitempty"` SecurityGroupId string `json:"security_group_id,omitempty"` } return common.NoClientData(func(ctx context.Context, data *AwsCrossAccountPolicy) error { + if !slices.Contains(AwsPartitions, data.AwsPartition) { + return errors.New(AwsPartitionsValidationError) + } + if !slices.Contains([]string{"managed", "customer", "restricted"}, data.PolicyType) { return fmt.Errorf("policy_type must be either 'managed', 'customer' or 'restricted'") } @@ -145,7 +151,7 @@ func DataAwsCrossaccountPolicy() common.Resource { "iam:CreateServiceLinkedRole", "iam:PutRolePolicy", }, - Resources: "arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot", + Resources: fmt.Sprintf("arn:%s:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot", data.AwsPartition), Condition: map[string]map[string]string{ "StringLike": { "iam:AWSServiceName": "spot.amazonaws.com", @@ -168,6 +174,7 @@ func DataAwsCrossaccountPolicy() common.Resource { if data.PolicyType == "restricted" { region := data.Region aws_account_id := data.AwsAccountId + awsPartition := data.AwsPartition vpc_id := data.VpcId security_group_id := data.SecurityGroupId policy.Statements = append(policy.Statements, @@ -179,7 +186,7 @@ func DataAwsCrossaccountPolicy() common.Resource { "ec2:DisassociateIamInstanceProfile", "ec2:ReplaceIamInstanceProfileAssociation", }, - Resources: fmt.Sprintf("arn:aws:ec2:%s:%s:instance/*", region, aws_account_id), + Resources: fmt.Sprintf("arn:%s:ec2:%s:%s:instance/*", awsPartition, region, aws_account_id), Condition: map[string]map[string]string{ "StringEquals": { "ec2:ResourceTag/Vendor": "Databricks", @@ -191,8 +198,8 @@ func DataAwsCrossaccountPolicy() common.Resource { Effect: "Allow", Actions: "ec2:RunInstances", Resources: []string{ - fmt.Sprintf("arn:aws:ec2:%s:%s:volume/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:instance/*", region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:volume/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:instance/*", awsPartition, region, aws_account_id), }, Condition: map[string]map[string]string{ "StringEquals": { @@ -204,7 +211,7 @@ func DataAwsCrossaccountPolicy() common.Resource { Sid: "AllowEc2RunInstanceImagePerTag", Effect: "Allow", Actions: "ec2:RunInstances", - Resources: fmt.Sprintf("arn:aws:ec2:%s:%s:image/*", region, aws_account_id), + Resources: fmt.Sprintf("arn:%s:ec2:%s:%s:image/*", awsPartition, region, aws_account_id), Condition: map[string]map[string]string{ "StringEquals": { "aws:ResourceTag/Vendor": "Databricks", @@ -216,13 +223,13 @@ func DataAwsCrossaccountPolicy() common.Resource { Effect: "Allow", Actions: "ec2:RunInstances", Resources: []string{ - fmt.Sprintf("arn:aws:ec2:%s:%s:network-interface/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:subnet/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:security-group/*", region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:network-interface/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:subnet/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:security-group/*", awsPartition, region, aws_account_id), }, Condition: map[string]map[string]string{ "StringEquals": { - "ec2:vpc": fmt.Sprintf("arn:aws:ec2:%s:%s:vpc/%s", region, aws_account_id, vpc_id), + "ec2:vpc": fmt.Sprintf("arn:%s:ec2:%s:%s:vpc/%s", awsPartition, region, aws_account_id, vpc_id), }, }, }, @@ -231,19 +238,19 @@ func DataAwsCrossaccountPolicy() common.Resource { Effect: "Allow", Actions: "ec2:RunInstances", NotResources: []string{ - fmt.Sprintf("arn:aws:ec2:%s:%s:image/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:network-interface/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:subnet/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:security-group/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:volume/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:instance/*", region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:image/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:network-interface/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:subnet/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:security-group/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:volume/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:instance/*", awsPartition, region, aws_account_id), }, }, &awsIamPolicyStatement{ Sid: "EC2TerminateInstancesTag", Effect: "Allow", Actions: "ec2:TerminateInstances", - Resources: fmt.Sprintf("arn:aws:ec2:%s:%s:instance/*", region, aws_account_id), + Resources: fmt.Sprintf("arn:%s:ec2:%s:%s:instance/*", awsPartition, region, aws_account_id), Condition: map[string]map[string]string{ "StringEquals": { "ec2:ResourceTag/Vendor": "Databricks", @@ -258,8 +265,8 @@ func DataAwsCrossaccountPolicy() common.Resource { "ec2:DetachVolume", }, Resources: []string{ - fmt.Sprintf("arn:aws:ec2:%s:%s:instance/*", region, aws_account_id), - fmt.Sprintf("arn:aws:ec2:%s:%s:volume/*", region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:instance/*", awsPartition, region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:volume/*", awsPartition, region, aws_account_id), }, Condition: map[string]map[string]string{ "StringEquals": { @@ -271,7 +278,7 @@ func DataAwsCrossaccountPolicy() common.Resource { Sid: "EC2CreateVolumeByTag", Effect: "Allow", Actions: "ec2:CreateVolume", - Resources: fmt.Sprintf("arn:aws:ec2:%s:%s:volume/*", region, aws_account_id), + Resources: fmt.Sprintf("arn:%s:ec2:%s:%s:volume/*", awsPartition, region, aws_account_id), Condition: map[string]map[string]string{ "StringEquals": { "aws:RequestTag/Vendor": "Databricks", @@ -283,7 +290,7 @@ func DataAwsCrossaccountPolicy() common.Resource { Effect: "Allow", Actions: "ec2:DeleteVolume", Resources: []string{ - fmt.Sprintf("arn:aws:ec2:%s:%s:volume/*", region, aws_account_id), + fmt.Sprintf("arn:%s:ec2:%s:%s:volume/*", awsPartition, region, aws_account_id), }, Condition: map[string]map[string]string{ "StringEquals": { @@ -300,10 +307,10 @@ func DataAwsCrossaccountPolicy() common.Resource { "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", }, - Resources: fmt.Sprintf("arn:aws:ec2:%s:%s:security-group/%s", region, aws_account_id, security_group_id), + Resources: fmt.Sprintf("arn:%s:ec2:%s:%s:security-group/%s", awsPartition, region, aws_account_id, security_group_id), Condition: map[string]map[string]string{ "StringEquals": { - "ec2:vpc": fmt.Sprintf("arn:aws:ec2:%s:%s:vpc/%s", region, aws_account_id, vpc_id), + "ec2:vpc": fmt.Sprintf("arn:%s:ec2:%s:%s:vpc/%s", awsPartition, region, aws_account_id, vpc_id), }, }, }, diff --git a/aws/data_aws_crossaccount_policy_test.go b/aws/data_aws_crossaccount_policy_test.go index 177cb166e9..6832807be5 100644 --- a/aws/data_aws_crossaccount_policy_test.go +++ b/aws/data_aws_crossaccount_policy_test.go @@ -530,6 +530,25 @@ func TestDataAwsCrossAccountRestrictedPolicy(t *testing.T) { assert.Lenf(t, j, 5725, "Strange length for policy: %s", j) } +func TestDataAwsCrossAccountRestrictedPolicyPartitionGov(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: ` + policy_type = "restricted" + aws_account_id = "123456789012" + aws_partition = "aws-us-gov" + vpc_id = "vpc-12345678" + region = "us-west-2" + security_group_id = "sg-12345678"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json") + assert.Lenf(t, j, 5879, "Strange length for policy: %s", j) +} + func TestDataAwsCrossAccountInvalidPolicy(t *testing.T) { qa.ResourceFixture{ Read: true, @@ -552,6 +571,16 @@ func TestDataAwsCrossAccountInvalidAccountId(t *testing.T) { }.ExpectError(t, "aws_account_id must be a 12 digit number") } +func TestDataAwsCrossAccountInvalidPartition(t *testing.T) { + qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `aws_partition = "something"`, + ID: ".", + }.ExpectError(t, AwsPartitionsValidationError) +} + func TestDataAwsCrossAccountInvalidVpcId(t *testing.T) { qa.ResourceFixture{ Read: true, diff --git a/aws/data_aws_unity_catalog_assume_role_policy.go b/aws/data_aws_unity_catalog_assume_role_policy.go index d4706bdca5..a90ab98505 100644 --- a/aws/data_aws_unity_catalog_assume_role_policy.go +++ b/aws/data_aws_unity_catalog_assume_role_policy.go @@ -3,7 +3,9 @@ package aws import ( "context" "encoding/json" + "errors" "fmt" + "slices" "github.com/databricks/terraform-provider-databricks/common" ) @@ -14,13 +16,19 @@ func DataAwsUnityCatalogAssumeRolePolicy() common.Resource { UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty" tf:"computed"` ExternalId string `json:"external_id"` AwsAccountId string `json:"aws_account_id"` + AwsPartition string `json:"aws_partition,omitempty" tf:"default:aws"` JSON string `json:"json" tf:"computed"` Id string `json:"id" tf:"computed"` } return common.NoClientData(func(ctx context.Context, data *AwsUcAssumeRolePolicy) error { + if !slices.Contains(AwsPartitions, data.AwsPartition) { + return errors.New(AwsPartitionsValidationError) + } + if data.UnityCatalogIamArn == "" { - data.UnityCatalogIamArn = "arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL" + data.UnityCatalogIamArn = AwsConfig[data.AwsPartition]["unityCatalogueIamArn"] } + policy := awsIamPolicy{ Version: "2012-10-17", Statements: []*awsIamPolicyStatement{ @@ -43,11 +51,11 @@ func DataAwsUnityCatalogAssumeRolePolicy() common.Resource { Actions: "sts:AssumeRole", Condition: map[string]map[string]string{ "ArnLike": { - "aws:PrincipalArn": fmt.Sprintf("arn:aws:iam::%s:role/%s", data.AwsAccountId, data.RoleName), + "aws:PrincipalArn": fmt.Sprintf("arn:%s:iam::%s:role/%s", data.AwsPartition, data.AwsAccountId, data.RoleName), }, }, Principal: map[string]string{ - "AWS": fmt.Sprintf("arn:aws:iam::%s:root", data.AwsAccountId), + "AWS": fmt.Sprintf("arn:%s:iam::%s:root", data.AwsPartition, data.AwsAccountId), }, }, }, diff --git a/aws/data_aws_unity_catalog_assume_role_policy_test.go b/aws/data_aws_unity_catalog_assume_role_policy_test.go index 30c1d89f2d..4f2da8932e 100644 --- a/aws/data_aws_unity_catalog_assume_role_policy_test.go +++ b/aws/data_aws_unity_catalog_assume_role_policy_test.go @@ -103,3 +103,68 @@ func TestDataAwsUnityCatalogAssumeRolePolicyWithoutUcArn(t *testing.T) { }` compareJSON(t, j, p) } + +func TestDataAwsUnityCatalogAssumeRolePolicyGovWithoutUcArn(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsUnityCatalogAssumeRolePolicy(), + NonWritable: true, + ID: ".", + HCL: ` + aws_account_id = "123456789098" + aws_partition = "aws-us-gov" + role_name = "databricks-role" + external_id = "12345" + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json").(string) + p := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "UnityCatalogAssumeRole", + "Effect": "Allow", + "Action": "sts:AssumeRole", + "Principal": { + "AWS": "arn:aws-us-gov:iam::044793339203:role/unity-catalog-prod-UCMasterRole-1QRFA8SGY15OJ" + }, + "Condition": { + "StringEquals": { + "sts:ExternalId": "12345" + } + } + }, + { + "Sid": "ExplicitSelfRoleAssumption", + "Effect": "Allow", + "Action": "sts:AssumeRole", + "Principal": { + "AWS": "arn:aws-us-gov:iam::123456789098:root" + }, + "Condition": { + "ArnLike": { + "aws:PrincipalArn": "arn:aws-us-gov:iam::123456789098:role/databricks-role" + } + } + } + ] + }` + compareJSON(t, j, p) +} + +func TestDataAwsUnityCatalogAssumeRolePolicyInvalidPartition(t *testing.T) { + qa.ResourceFixture{ + Read: true, + Resource: DataAwsUnityCatalogAssumeRolePolicy(), + NonWritable: true, + ID: ".", + HCL: ` + aws_account_id = "123456789098" + aws_partition = "something" + role_name = "databricks-role" + unity_catalog_iam_arn = "arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL" + external_id = "12345" + `, + }.ExpectError(t, AwsPartitionsValidationError) +} diff --git a/aws/data_aws_unity_catalog_policy.go b/aws/data_aws_unity_catalog_policy.go index d332b84348..5dbc565b38 100644 --- a/aws/data_aws_unity_catalog_policy.go +++ b/aws/data_aws_unity_catalog_policy.go @@ -15,6 +15,7 @@ import ( func generateReadContext(ctx context.Context, d *schema.ResourceData, m *common.DatabricksClient) error { bucket := d.Get("bucket_name").(string) awsAccountId := d.Get("aws_account_id").(string) + awsPartition := d.Get("aws_partition").(string) roleName := d.Get("role_name").(string) policy := awsIamPolicy{ Version: "2012-10-17", @@ -29,8 +30,8 @@ func generateReadContext(ctx context.Context, d *schema.ResourceData, m *common. "s3:GetBucketLocation", }, Resources: []string{ - fmt.Sprintf("arn:aws:s3:::%s/*", bucket), - fmt.Sprintf("arn:aws:s3:::%s", bucket), + fmt.Sprintf("arn:%s:s3:::%s/*", awsPartition, bucket), + fmt.Sprintf("arn:%s:s3:::%s", awsPartition, bucket), }, }, { @@ -39,14 +40,14 @@ func generateReadContext(ctx context.Context, d *schema.ResourceData, m *common. "sts:AssumeRole", }, Resources: []string{ - fmt.Sprintf("arn:aws:iam::%s:role/%s", awsAccountId, roleName), + fmt.Sprintf("arn:%s:iam::%s:role/%s", awsPartition, awsAccountId, roleName), }, }, }, } if kmsKey, ok := d.GetOk("kms_name"); ok { - kmsArn := fmt.Sprintf("arn:aws:kms:%s", kmsKey) - if strings.HasPrefix(kmsKey.(string), "arn:aws") { + kmsArn := fmt.Sprintf("arn:%s:kms:%s", awsPartition, kmsKey) + if strings.HasPrefix(kmsKey.(string), fmt.Sprintf("arn:%s", awsPartition)) { kmsArn = kmsKey.(string) } policy.Statements = append(policy.Statements, &awsIamPolicyStatement{ @@ -92,6 +93,12 @@ func validateSchema() map[string]*schema.Schema { Type: schema.TypeString, Required: true, }, + "aws_partition": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(AwsPartitions, false), + Default: "aws", + }, "json": { Type: schema.TypeString, Computed: true, diff --git a/aws/data_aws_unity_catalog_policy_test.go b/aws/data_aws_unity_catalog_policy_test.go index 28a45a4f16..6ca159e290 100644 --- a/aws/data_aws_unity_catalog_policy_test.go +++ b/aws/data_aws_unity_catalog_policy_test.go @@ -167,6 +167,64 @@ func TestDataAwsUnityCatalogPolicyWithoutKMS(t *testing.T) { compareJSON(t, j, p) } +func TestDataAwsUnityCatalogPolicyPartionGov(t *testing.T) { + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsUnityCatalogPolicy(), + NonWritable: true, + ID: ".", + HCL: ` + aws_account_id = "123456789098" + aws_partition = "aws-us-gov" + bucket_name = "databricks-bucket" + role_name = "databricks-role" + kms_name = "databricks-kms" + `, + }.Apply(t) + assert.NoError(t, err) + j := d.Get("json").(string) + p := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": [ + "arn:aws-us-gov:s3:::databricks-bucket/*", + "arn:aws-us-gov:s3:::databricks-bucket" + ] + }, + { + "Effect": "Allow", + "Action": [ + "sts:AssumeRole" + ], + "Resource": [ + "arn:aws-us-gov:iam::123456789098:role/databricks-role" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:GenerateDataKey*" + ], + "Resource": [ + "arn:aws-us-gov:kms:databricks-kms" + ] + } + ] + }` + compareJSON(t, j, p) +} + func compareJSON(t *testing.T, json1 string, json2 string) { var i1 interface{} var i2 interface{} diff --git a/docs/data-sources/aws_assume_role_policy.md b/docs/data-sources/aws_assume_role_policy.md index 73d6fb0e11..d46f3520b7 100644 --- a/docs/data-sources/aws_assume_role_policy.md +++ b/docs/data-sources/aws_assume_role_policy.md @@ -49,6 +49,7 @@ resource "databricks_mws_credentials" "this" { ## Argument Reference * `external_id` (Required) Account Id that could be found in the top right corner of [Accounts Console](https://accounts.cloud.databricks.com/). +* `aws_partition` - (Optional) AWS partition. The options are `aws` or `aws-us-gov`. Defaults to `aws` * `for_log_delivery` (Optional) Either or not this assume role policy should be created for usage log delivery. Defaults to false. ## Attribute Reference diff --git a/docs/data-sources/aws_bucket_policy.md b/docs/data-sources/aws_bucket_policy.md index e42949e06a..f2e99edc2a 100644 --- a/docs/data-sources/aws_bucket_policy.md +++ b/docs/data-sources/aws_bucket_policy.md @@ -75,6 +75,7 @@ resource "aws_s3_bucket_policy" "ds" { ## Argument Reference * `bucket` - (Required) AWS S3 Bucket name for which to generate the policy document. +* `aws_partition` - (Optional) AWS partition. The options are `aws` or `aws-us-gov`. Defaults to `aws` * `full_access_role` - (Optional) Data access role that can have full access for this bucket * `databricks_e2_account_id` - (Optional) Your Databricks account ID. Used to generate restrictive IAM policies that will increase the security of your root bucket diff --git a/docs/data-sources/aws_crossaccount_policy.md b/docs/data-sources/aws_crossaccount_policy.md index 715cf59b15..883bd1b490 100644 --- a/docs/data-sources/aws_crossaccount_policy.md +++ b/docs/data-sources/aws_crossaccount_policy.md @@ -21,6 +21,7 @@ data "databricks_aws_crossaccount_policy" "this" {} * `pass_roles` (Optional) (List) List of Data IAM role ARNs that are explicitly granted `iam:PassRole` action. The below arguments are only valid for `restricted` policy type * `aws_account_id` — Your AWS account ID, which is a number. +* `aws_partition` - (Optional) AWS partition. The options are `aws` or `aws-us-gov`. Defaults to `aws` * `vpc_id` — ID of the AWS VPC where you want to launch workspaces. * `region` — AWS Region name for your VPC deployment, for example `us-west-2`. * `security_group_id` — ID of your AWS security group. When you add a security group restriction, you cannot reuse the cross-account IAM role or reference a credentials ID (`credentials_id`) for any other workspaces. For those other workspaces, you must create separate roles, policies, and credentials objects. diff --git a/docs/data-sources/aws_unity_catalog_assume_role_policy.md b/docs/data-sources/aws_unity_catalog_assume_role_policy.md index c5f66ddec9..1619855ca9 100644 --- a/docs/data-sources/aws_unity_catalog_assume_role_policy.md +++ b/docs/data-sources/aws_unity_catalog_assume_role_policy.md @@ -38,9 +38,10 @@ resource "aws_iam_role" "metastore_data_access" { ## Argument Reference * `aws_account_id` (Required) The Account ID of the current AWS account (not your Databricks account). +* `aws_partition` - (Optional) AWS partition. The options are `aws` or `aws-us-gov`. Defaults to `aws` * `external_id` (Required) The [storage credential](../resources/storage_credential.md) external id. * `role_name` (Required) The name of the AWS IAM role to be created for Unity Catalog. -* `unity_catalog_iam_arn` (Optional) The Databricks Unity Catalog IAM Role ARN. Defaults to `arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL` +* `unity_catalog_iam_arn` (Optional) The Databricks Unity Catalog IAM Role ARN. Defaults to `arn:aws:iam::414351767826:role/unity-catalog-prod-UCMasterRole-14S5ZJVKOTYTL` on standard AWS partition selection and `arn:aws-us-gov:iam::044793339203:role/unity-catalog-prod-UCMasterRole-1QRFA8SGY15OJ` on GovCloud partition selection ## Attribute Reference diff --git a/docs/data-sources/aws_unity_catalog_policy.md b/docs/data-sources/aws_unity_catalog_policy.md index 3804b1d5fa..2e65039d57 100644 --- a/docs/data-sources/aws_unity_catalog_policy.md +++ b/docs/data-sources/aws_unity_catalog_policy.md @@ -38,6 +38,7 @@ resource "aws_iam_role" "metastore_data_access" { ## Argument Reference * `aws_account_id` (Required) The Account ID of the current AWS account (not your Databricks account). +* `aws_partition` - (Optional) AWS partition. The options are `aws` or `aws-us-gov`. Defaults to `aws` * `bucket_name` (Required) The name of the S3 bucket used as root storage location for [managed tables](https://docs.databricks.com/data-governance/unity-catalog/index.html#managed-table) in Unity Catalog. * `role_name` (Required) The name of the AWS IAM role that you created in the previous step in the [official documentation](https://docs.databricks.com/data-governance/unity-catalog/get-started.html#configure-a-storage-bucket-and-iam-role-in-aws). * `kms_name` (Optional) If encryption is enabled, provide the ARN of the KMS key that encrypts the S3 bucket contents. If encryption is disabled, do not provide this argument. From 9178630bda6a4163c6248758263f607c8b9a2ee5 Mon Sep 17 00:00:00 2001 From: Brandon Grams <21249739+bgrams@users.noreply.github.com> Date: Tue, 12 Nov 2024 07:22:09 -0600 Subject: [PATCH 95/99] [Fix] Use cluster list API to determine pinned cluster status (#4203) ## Changes Modify `setPinnedStatus` to use the clusters list API internally for determining the pinning status of a cluster. The existing implementation using the cluster events API subjects the resource to drift as events expire after a period of time. Closes #3616 ## Tests * Coverage added to `TestResourceClusterCreate` and `TestResourceClusterCreatePinned`. * Fixtures modified to mock the necessary API calls in all other relevant tests. * `TestAccClusterResource_PinAndUnpin` acceptance test added - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- clusters/resource_cluster.go | 27 +-- clusters/resource_cluster_test.go | 320 ++++------------------------ exporter/exporter_test.go | 44 +--- internal/acceptance/cluster_test.go | 22 +- 4 files changed, 81 insertions(+), 332 deletions(-) diff --git a/clusters/resource_cluster.go b/clusters/resource_cluster.go index 3c03502023..a6100ae071 100644 --- a/clusters/resource_cluster.go +++ b/clusters/resource_cluster.go @@ -513,20 +513,23 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, c *commo } func setPinnedStatus(ctx context.Context, d *schema.ResourceData, clusterAPI compute.ClustersInterface) error { - events, err := clusterAPI.EventsAll(ctx, compute.GetEvents{ - ClusterId: d.Id(), - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, + clusterDetails := clusterAPI.List(ctx, compute.ListClustersRequest{ + FilterBy: &compute.ListClustersFilterBy{ + IsPinned: true, + }, + PageSize: 100, // pinned cluster limit - just get all of them }) - if err != nil { - return err - } - pinnedEvent := compute.EventTypeUnpinned - if len(events) > 0 { - pinnedEvent = events[0].Type + + for clusterDetails.HasNext(ctx) { + detail, err := clusterDetails.Next(ctx) + if err != nil { + return err + } + if detail.ClusterId == d.Id() { + return d.Set("is_pinned", true) + } } - return d.Set("is_pinned", pinnedEvent == compute.EventTypePinned) + return d.Set("is_pinned", false) } func resourceClusterRead(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { diff --git a/clusters/resource_cluster_test.go b/clusters/resource_cluster_test.go index 240b62cb4e..1ef37126d7 100644 --- a/clusters/resource_cluster_test.go +++ b/clusters/resource_cluster_test.go @@ -12,9 +12,18 @@ import ( "github.com/stretchr/testify/require" ) +var nothingPinned = qa.HTTPFixture{ + Method: "GET", + Resource: "/api/2.1/clusters/list?filter_by.is_pinned=true&page_size=100", + Response: compute.ListClustersResponse{ + Clusters: []compute.ClusterDetails{}, + }, +} + func TestResourceClusterCreate(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "POST", Resource: "/api/2.1/clusters/create", @@ -44,20 +53,6 @@ func TestResourceClusterCreate(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "GET", Resource: "/api/2.0/libraries/cluster-status?cluster_id=abc", @@ -79,6 +74,7 @@ func TestResourceClusterCreate(t *testing.T) { }.Apply(t) assert.NoError(t, err) assert.Equal(t, "abc", d.Id()) + assert.Equal(t, false, d.Get("is_pinned")) } func TestResourceClusterCreatePinned(t *testing.T) { @@ -128,24 +124,18 @@ func TestResourceClusterCreatePinned(t *testing.T) { }, }, { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{ - { - ClusterId: "abc", - Timestamp: int64(123), - Type: compute.EventTypePinned, - Details: &compute.EventDetails{}, - }, - }, - TotalCount: 1, + Method: "GET", + Resource: "/api/2.1/clusters/list?filter_by.is_pinned=true&page_size=100", + Response: compute.ListClustersResponse{ + Clusters: []compute.ClusterDetails{{ + ClusterId: "abc", + NumWorkers: 100, + ClusterName: "Shared Autoscaling", + SparkVersion: "7.1-scala12", + NodeTypeId: "i3.xlarge", + AutoterminationMinutes: 15, + State: compute.StateRunning, + }}, }, }, }, @@ -162,6 +152,7 @@ func TestResourceClusterCreatePinned(t *testing.T) { }.Apply(t) assert.NoError(t, err) assert.Equal(t, "abc", d.Id()) + assert.Equal(t, true, d.Get("is_pinned")) } func TestResourceClusterCreateErrorFollowedByDeletion(t *testing.T) { @@ -278,6 +269,7 @@ func TestResourceClusterCreateErrorFollowedByDeletionError(t *testing.T) { func TestResourceClusterCreate_WithLibraries(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "POST", Resource: "/api/2.1/clusters/create", @@ -306,20 +298,6 @@ func TestResourceClusterCreate_WithLibraries(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.0/libraries/install", @@ -474,6 +452,7 @@ func TestResourceClusterCreate_WithLibraries(t *testing.T) { func TestResourceClusterCreatePhoton(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "POST", Resource: "/api/2.1/clusters/create", @@ -505,20 +484,6 @@ func TestResourceClusterCreatePhoton(t *testing.T) { RuntimeEngine: "PHOTON", }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "GET", Resource: "/api/2.0/libraries/cluster-status?cluster_id=abc", @@ -546,6 +511,7 @@ func TestResourceClusterCreatePhoton(t *testing.T) { func TestResourceClusterCreateNoWait_WithLibraries(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "POST", Resource: "/api/2.1/clusters/create", @@ -573,20 +539,6 @@ func TestResourceClusterCreateNoWait_WithLibraries(t *testing.T) { State: compute.StateUnknown, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.0/libraries/install", @@ -638,6 +590,7 @@ func TestResourceClusterCreateNoWait_WithLibraries(t *testing.T) { func TestResourceClusterCreateNoWait(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "POST", Resource: "/api/2.1/clusters/create", @@ -667,20 +620,6 @@ func TestResourceClusterCreateNoWait(t *testing.T) { State: compute.StateUnknown, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, }, Create: true, Resource: ResourceCluster(), @@ -728,6 +667,7 @@ func TestResourceClusterCreate_Error(t *testing.T) { func TestResourceClusterRead(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -744,20 +684,6 @@ func TestResourceClusterRead(t *testing.T) { }, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, }, Resource: ResourceCluster(), Read: true, @@ -825,6 +751,7 @@ func TestResourceClusterRead_Error(t *testing.T) { func TestResourceClusterUpdate_ResizeForAutoscalingToNumWorkersCluster(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -842,20 +769,6 @@ func TestResourceClusterUpdate_ResizeForAutoscalingToNumWorkersCluster(t *testin State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/resize", @@ -892,6 +805,7 @@ func TestResourceClusterUpdate_ResizeForAutoscalingToNumWorkersCluster(t *testin func TestResourceClusterUpdate_ResizeForNumWorkersToAutoscalingCluster(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -906,20 +820,6 @@ func TestResourceClusterUpdate_ResizeForNumWorkersToAutoscalingCluster(t *testin State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/resize", @@ -959,6 +859,7 @@ func TestResourceClusterUpdate_ResizeForNumWorkersToAutoscalingCluster(t *testin func TestResourceClusterUpdate_EditNumWorkersWhenClusterTerminated(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -973,20 +874,6 @@ func TestResourceClusterUpdate_EditNumWorkersWhenClusterTerminated(t *testing.T) State: compute.StateTerminated, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/edit", @@ -1023,6 +910,7 @@ func TestResourceClusterUpdate_EditNumWorkersWhenClusterTerminated(t *testing.T) func TestResourceClusterUpdate_ResizeAutoscale(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -1049,20 +937,6 @@ func TestResourceClusterUpdate_ResizeAutoscale(t *testing.T) { }, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, }, ID: "abc", Update: true, @@ -1094,6 +968,7 @@ func TestResourceClusterUpdate_ResizeAutoscale(t *testing.T) { func TestResourceClusterUpdate_ResizeNumWorkers(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -1108,20 +983,6 @@ func TestResourceClusterUpdate_ResizeNumWorkers(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/resize", @@ -1154,6 +1015,7 @@ func TestResourceClusterUpdate_ResizeNumWorkers(t *testing.T) { func TestResourceClusterUpdate(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -1168,20 +1030,6 @@ func TestResourceClusterUpdate(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/start", @@ -1234,6 +1082,7 @@ func TestResourceClusterUpdate(t *testing.T) { func TestResourceClusterUpdate_WhileScaling(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -1248,20 +1097,6 @@ func TestResourceClusterUpdate_WhileScaling(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/start", @@ -1330,6 +1165,7 @@ func TestResourceClusterUpdate_WhileScaling(t *testing.T) { func TestResourceClusterUpdateWithPinned(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -1344,20 +1180,6 @@ func TestResourceClusterUpdateWithPinned(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/start", @@ -1446,6 +1268,7 @@ func TestResourceClusterUpdate_LibrariesChangeOnTerminatedCluster(t *testing.T) } d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, terminated, // 1 of ... { Method: "POST", @@ -1492,20 +1315,6 @@ func TestResourceClusterUpdate_LibrariesChangeOnTerminatedCluster(t *testing.T) State: compute.StateTerminated, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { // start cluster before libs install Method: "POST", Resource: "/api/2.1/clusters/start", @@ -1613,6 +1422,7 @@ func TestResourceClusterUpdate_Error(t *testing.T) { func TestResourceClusterUpdate_AutoAz(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -1632,20 +1442,6 @@ func TestResourceClusterUpdate_AutoAz(t *testing.T) { }, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/start", @@ -1762,6 +1558,7 @@ func TestResourceClusterDelete_Error(t *testing.T) { func TestResourceClusterCreate_SingleNode(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "POST", Resource: "/api/2.1/clusters/create", @@ -1785,20 +1582,6 @@ func TestResourceClusterCreate_SingleNode(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "GET", ReuseRequest: true, @@ -1962,6 +1745,7 @@ func TestReadOnStoppedClusterWithLibrariesDoesNotFail(t *testing.T) { qa.ResourceFixture{ Resource: ResourceCluster(), Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=foo", @@ -1969,10 +1753,6 @@ func TestReadOnStoppedClusterWithLibrariesDoesNotFail(t *testing.T) { State: compute.StateTerminated, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - }, { Method: "GET", ReuseRequest: true, @@ -2007,10 +1787,6 @@ func TestRefreshOnRunningClusterWithFailedLibraryUninstallsIt(t *testing.T) { State: compute.StateRunning, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - }, { Method: "GET", Resource: "/api/2.0/libraries/cluster-status?cluster_id=foo", @@ -2045,6 +1821,7 @@ func TestRefreshOnRunningClusterWithFailedLibraryUninstallsIt(t *testing.T) { }, }, }, + nothingPinned, }, Read: true, ID: "foo", @@ -2054,6 +1831,7 @@ func TestRefreshOnRunningClusterWithFailedLibraryUninstallsIt(t *testing.T) { func TestResourceClusterUpdate_LocalSsdCount(t *testing.T) { _, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ + nothingPinned, { Method: "GET", Resource: "/api/2.1/clusters/get?cluster_id=abc", @@ -2071,20 +1849,6 @@ func TestResourceClusterUpdate_LocalSsdCount(t *testing.T) { }, }, }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "abc", - Limit: 1, - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - }, - Response: compute.GetEventsResponse{ - Events: []compute.ClusterEvent{}, - TotalCount: 0, - }, - }, { Method: "POST", Resource: "/api/2.1/clusters/edit", diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index ad485b9557..120538efd1 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -836,9 +836,12 @@ func TestImportingClusters(t *testing.T) { ReuseRequest: true, }, { - Method: "POST", - Resource: "/api/2.1/clusters/events", - Response: compute.GetEvents{}, + Method: "GET", + Resource: "/api/2.1/clusters/list?filter_by.is_pinned=true&page_size=100", + Response: compute.ListClustersResponse{ + Clusters: []compute.ClusterDetails{}, + }, + ReuseRequest: true, }, { Method: "GET", @@ -868,30 +871,6 @@ func TestImportingClusters(t *testing.T) { Resource: "/api/2.1/clusters/get?cluster_id=test2", Response: getJSONObject("test-data/get-cluster-test2-response.json"), }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "test2", - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - Limit: 1, - }, - Response: compute.EventDetails{}, - ReuseRequest: true, - }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "test1", - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - Limit: 1, - }, - Response: compute.EventDetails{}, - ReuseRequest: true, - }, { Method: "GET", Resource: "/api/2.0/libraries/cluster-status?cluster_id=test2", @@ -917,17 +896,6 @@ func TestImportingClusters(t *testing.T) { Resource: "/api/2.1/clusters/get?cluster_id=awscluster", Response: getJSONObject("test-data/get-cluster-awscluster-response.json"), }, - { - Method: "POST", - Resource: "/api/2.1/clusters/events", - ExpectedRequest: compute.GetEvents{ - ClusterId: "awscluster", - Order: compute.GetEventsOrderDesc, - EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned}, - Limit: 1, - }, - Response: compute.EventDetails{}, - }, { Method: "GET", Resource: "/api/2.0/libraries/cluster-status?cluster_id=awscluster", diff --git a/internal/acceptance/cluster_test.go b/internal/acceptance/cluster_test.go index f399eece3f..bc1c4023d9 100644 --- a/internal/acceptance/cluster_test.go +++ b/internal/acceptance/cluster_test.go @@ -51,7 +51,7 @@ func TestAccClusterResource_CreateClusterWithLibraries(t *testing.T) { }) } -func singleNodeClusterTemplate(autoTerminationMinutes string) string { +func singleNodeClusterTemplate(autoTerminationMinutes string, isPinned bool) string { return fmt.Sprintf(` data "databricks_spark_version" "latest" { } @@ -61,6 +61,7 @@ func singleNodeClusterTemplate(autoTerminationMinutes string) string { instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" num_workers = 0 autotermination_minutes = %s + is_pinned = %t spark_conf = { "spark.databricks.cluster.profile" = "singleNode" "spark.master" = "local[*]" @@ -69,14 +70,14 @@ func singleNodeClusterTemplate(autoTerminationMinutes string) string { "ResourceClass" = "SingleNode" } } - `, autoTerminationMinutes) + `, autoTerminationMinutes, isPinned) } func TestAccClusterResource_CreateSingleNodeCluster(t *testing.T) { WorkspaceLevel(t, Step{ - Template: singleNodeClusterTemplate("10"), + Template: singleNodeClusterTemplate("10", false), }, Step{ - Template: singleNodeClusterTemplate("20"), + Template: singleNodeClusterTemplate("20", false), }) } @@ -176,6 +177,19 @@ func TestAccClusterResource_WorkloadType(t *testing.T) { }) } +func TestAccClusterResource_PinAndUnpin(t *testing.T) { + WorkspaceLevel(t, Step{ + Template: singleNodeClusterTemplate("10", false), + Check: resource.TestCheckResourceAttr("databricks_cluster.this", "is_pinned", "false"), + }, Step{ + Template: singleNodeClusterTemplate("10", true), + Check: resource.TestCheckResourceAttr("databricks_cluster.this", "is_pinned", "true"), + }, Step{ + Template: singleNodeClusterTemplate("10", false), + Check: resource.TestCheckResourceAttr("databricks_cluster.this", "is_pinned", "false"), + }) +} + func testAccClusterResourceWorkloadTypeTemplate(workloadType string) string { return fmt.Sprintf(` data "databricks_spark_version" "latest" {} From 2b381b07a2148ee4871e83ee321479da7a79cec2 Mon Sep 17 00:00:00 2001 From: ryan-gord-db <60911136+ryan-gord-db@users.noreply.github.com> Date: Tue, 12 Nov 2024 14:07:05 -0500 Subject: [PATCH 96/99] [Doc] Clarify workspace provider config (#4208) ## Changes Specifying the `account_id` field within a workspace provider block may cause an error when creating workspace resources (e.g. [#3495](https://github.com/databricks/terraform-provider-databricks/issues/3495)). This change explicitly calls out this misconfiguration in the docs. ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/guides/troubleshooting.md | 17 +++++++++++++++++ docs/index.md | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/docs/guides/troubleshooting.md b/docs/guides/troubleshooting.md index dadd4a51c9..f5ed88c3ad 100644 --- a/docs/guides/troubleshooting.md +++ b/docs/guides/troubleshooting.md @@ -228,3 +228,20 @@ There could be different reasons for this error: ### Provider "registry.terraform.io/databricks/databricks" planned an invalid value for ...: planned value ... for a non-computed attribute. Starting with version v1.51.0, the Terraform provider for Databricks supports `terraform` versions 1.1.5 and later. Older versions of `terraform`, such as v0.15.5, are known to erroneously generate this error. Check the version of `terraform` that you're using by running `terraform version` and upgrade it if necessary. + +### Error: cannot create ....: invalid Databricks Account configuration + +`....` is the descriptive name of a resource such as `access control rule set`. The error occurs when creating a workspace resource with a provider containing the `account_id` argument e.g.: + +```hcl +provider "databricks" { + host = "https://.cloud.databricks.com" + client_id = "..." + client_secret = "..." + + # This line is the problem + account_id = "..." +} +``` + +Remove the `account_id` argument from the workspace provider to resolve the error. diff --git a/docs/index.md b/docs/index.md index 05081f57ff..f9e14b935b 100644 --- a/docs/index.md +++ b/docs/index.md @@ -237,7 +237,7 @@ Alternatively, you can provide this value as an environment variable `DATABRICKS * `config_file` - (optional) Location of the Databricks CLI credentials file created by `databricks configure --token` command (~/.databrickscfg by default). Check [Databricks CLI documentation](https://docs.databricks.com/dev-tools/cli/index.html#set-up-authentication) for more details. The provider uses configuration file credentials when you don't specify host/token/azure attributes. Alternatively, you can provide this value as an environment variable `DATABRICKS_CONFIG_FILE`. This field defaults to `~/.databrickscfg`. * `profile` - (optional) Connection profile specified within ~/.databrickscfg. Please check [connection profiles section](https://docs.databricks.com/dev-tools/cli/index.html#connection-profiles) for more details. This field defaults to `DEFAULT`. -* `account_id` - (optional for workspace-level operations, but required for account-level) Account Id that could be found in the top right corner of [Accounts Console](https://accounts.cloud.databricks.com/). Alternatively, you can provide this value as an environment variable `DATABRICKS_ACCOUNT_ID`. Only has effect when `host = "https://accounts.cloud.databricks.com/"`, and is currently used to provision account admins via [databricks_user](resources/user.md). In the future releases of the provider this property will also be used specify account for `databricks_mws_*` resources as well. +* `account_id` - (required for account-level operations) Account ID found in the top right corner of [Accounts Console](https://accounts.cloud.databricks.com/). Alternatively, you can provide this value as an environment variable `DATABRICKS_ACCOUNT_ID`. Only has effect when `host = "https://accounts.cloud.databricks.com/"`, and is currently used to provision account admins via [databricks_user](resources/user.md). **Note: do NOT use in the workspace-level provider to avoid `...invalid Databricks Account configuration` errors**. * `auth_type` - (optional) enforce specific auth type to be used in very rare cases, where a single Terraform state manages Databricks workspaces on more than one cloud and `more than one authorization method configured` error is a false positive. Valid values are `pat`, `basic`, `oauth-m2m`, `azure-client-secret`, `azure-msi`, `azure-cli`, `github-oidc-azure`, `google-credentials`, and `google-id`. ## Special configurations for Azure From e3b25617d9c9357eccc10fba2eacbd26ac3e7118 Mon Sep 17 00:00:00 2001 From: zgcalebp <142928130+zgcalebp@users.noreply.github.com> Date: Wed, 13 Nov 2024 05:15:05 -0500 Subject: [PATCH 97/99] [Feature] Update databricks_permissions resource to support vector-search-endpoints (#4209) ## Changes Databricks permissions API has been updated to support `vector-search-endpoints`. This corresponding change to the permissions_definitions would enable the use of the `databricks_permissions` resource to manage ACLs for Vector Search Endpoints leveraging the existing APIs under the hood. Example CLI call confirming support: ``` databricks permissions get vector-search-endpoints {endpoint-id} --debug 18:56:25 INFO start pid=77800 version=0.224.1 args="databricks, permissions, get, vector-search-endpoints, {endpoint-id}, --debug" 18:56:25 INFO Ignoring pat auth, because databricks-cli is preferred pid=77800 sdk=true 18:56:25 INFO Ignoring basic auth, because databricks-cli is preferred pid=77800 sdk=true 18:56:25 INFO Ignoring oauth-m2m auth, because databricks-cli is preferred pid=77800 sdk=true 18:56:25 INFO Refreshed OAuth token from Databricks CLI, expires on 2024-11-11 19:00:53.515729 -0500 EST pid=77800 sdk=true 18:56:25 DEBUG Using Databricks CLI authentication with Databricks OAuth tokens pid=77800 sdk=true 18:56:25 INFO Refreshed OAuth token from Databricks CLI, expires on 2024-11-11 19:00:53.515729 -0500 EST pid=77800 sdk=true 18:56:26 DEBUG GET /api/2.0/permissions/vector-search-endpoints/{endpoint-id} < HTTP/2.0 200 OK < { < "access_control_list": [ .... ``` ## Tests - [X] `make test` run locally - [X] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [X] using Go SDK --------- Co-authored-by: Alex Ott --- docs/resources/permissions.md | 30 +++++++++++++++++++++++ internal/acceptance/permissions_test.go | 23 +++++++++++++++++ internal/acceptance/vector_search_test.go | 11 ++++++++- permissions/permission_definitions.go | 11 +++++++++ permissions/resource_permissions_test.go | 2 +- 5 files changed, 75 insertions(+), 2 deletions(-) diff --git a/docs/resources/permissions.md b/docs/resources/permissions.md index 868e2aa835..9696df577e 100644 --- a/docs/resources/permissions.md +++ b/docs/resources/permissions.md @@ -639,6 +639,35 @@ resource "databricks_permissions" "ml_serving_usage" { } ``` +## Mosaic AI Vector Search usage + +Valid permission levels for [databricks_vector_search_endpoint](vector_search_endpoint.md) are: `CAN_USE` and `CAN_MANAGE`. + +```hcl +resource "databricks_vector_search_endpoint" "this" { + name = "vector-search-test" + endpoint_type = "STANDARD" +} + +resource "databricks_group" "eng" { + display_name = "Engineering" +} + +resource "databricks_permissions" "vector_search_endpoint_usage" { + vector_search_endpoint_id = databricks_vector_search_endpoint.this.endpoint_id + + access_control { + group_name = "users" + permission_level = "CAN_USE" + } + + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_MANAGE" + } +} +``` + ## Passwords usage By default on AWS deployments, all admin users can sign in to Databricks using either SSO or their username and password, and all API users can authenticate to the Databricks REST APIs using their username and password. As an admin, you [can limit](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#optional-configure-password-access-control) admin users’ and API users’ ability to authenticate with their username and password by configuring `CAN_USE` permissions using password access control. @@ -895,6 +924,7 @@ Exactly one of the following arguments is required: - `experiment_id` - [MLflow experiment](mlflow_experiment.md) id - `registered_model_id` - [MLflow registered model](mlflow_model.md) id - `serving_endpoint_id` - [Model Serving](model_serving.md) endpoint id. +- `vector_search_endpoint_id` - [Vector Search](vector_search_endpoint.md) endpoint id. - `authorization` - either [`tokens`](https://docs.databricks.com/administration-guide/access-control/tokens.html) or [`passwords`](https://docs.databricks.com/administration-guide/users-groups/single-sign-on/index.html#configure-password-permission). - `sql_endpoint_id` - [SQL warehouse](sql_endpoint.md) id - `sql_dashboard_id` - [SQL dashboard](sql_dashboard.md) id diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index 0fdd5553b8..20dfb564d0 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -866,6 +866,29 @@ func TestAccPermissions_ServingEndpoint(t *testing.T) { }) } +// AlexOtt: Temporary disable as it takes too long to create a new vector search endpoint +// Testing is done in the `vector_search_test.go` +// func TestAccPermissions_VectorSearchEndpoint(t *testing.T) { +// loadDebugEnvIfRunsFromIDE(t, "workspace") +// if isGcp(t) { +// skipf(t)("Vector Search endpoints are not supported on GCP") +// } +// endpointTemplate := ` +// resource "databricks_vector_search_endpoint" "endpoint" { +// name = "{var.STICKY_RANDOM}" +// endpoint_type = "STANDARD" +// } +// ` +// WorkspaceLevel(t, Step{ +// Template: endpointTemplate + makePermissionsTestStage("vector_search_endpoint_id", "databricks_vector_search_endpoint.endpoint.endpoint_id", groupPermissions("CAN_USE")), +// }, Step{ +// Template: endpointTemplate + makePermissionsTestStage("vector_search_endpoint_id", "databricks_vector_search_endpoint.endpoint.endpoint_id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_USE")), +// }, Step{ +// Template: endpointTemplate + makePermissionsTestStage("vector_search_endpoint_id", "databricks_vector_search_endpoint.endpoint.endpoint_id", currentPrincipalPermission(t, "CAN_USE"), groupPermissions("CAN_USE")), +// ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for mlflowExperiment, allowed levels: CAN_MANAGE"), +// }) +// } + func TestAccPermissions_Alert(t *testing.T) { loadDebugEnvIfRunsFromIDE(t, "workspace") alertTemplate := ` diff --git a/internal/acceptance/vector_search_test.go b/internal/acceptance/vector_search_test.go index 2442d0fa05..890f36ca34 100644 --- a/internal/acceptance/vector_search_test.go +++ b/internal/acceptance/vector_search_test.go @@ -20,7 +20,16 @@ func TestUcAccVectorSearchEndpoint(t *testing.T) { resource "databricks_vector_search_endpoint" "this" { name = "%s" endpoint_type = "STANDARD" - } + } + + resource "databricks_permissions" "this" { + vector_search_endpoint_id = databricks_vector_search_endpoint.this.endpoint_id + + access_control { + group_name = "users" + permission_level = "CAN_USE" + } + } `, name), }, ) diff --git a/permissions/permission_definitions.go b/permissions/permission_definitions.go index 48e6d7a56f..398b032a64 100644 --- a/permissions/permission_definitions.go +++ b/permissions/permission_definitions.go @@ -732,5 +732,16 @@ func allResourcePermissions() []resourcePermissions { updateAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, deleteAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, }, + { + field: "vector_search_endpoint_id", + objectType: "vector-search-endpoints", + requestObjectType: "vector-search-endpoints", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_USE": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + deleteAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + }, } } diff --git a/permissions/resource_permissions_test.go b/permissions/resource_permissions_test.go index 7019ae5c56..983b59fbc1 100644 --- a/permissions/resource_permissions_test.go +++ b/permissions/resource_permissions_test.go @@ -593,7 +593,7 @@ func TestResourcePermissionsCreate_invalid(t *testing.T) { qa.ResourceFixture{ Resource: ResourcePermissions(), Create: true, - }.ExpectError(t, "at least one type of resource identifier must be set; allowed fields: authorization, cluster_id, cluster_policy_id, dashboard_id, directory_id, directory_path, experiment_id, instance_pool_id, job_id, notebook_id, notebook_path, pipeline_id, registered_model_id, repo_id, repo_path, serving_endpoint_id, sql_alert_id, sql_dashboard_id, sql_endpoint_id, sql_query_id, workspace_file_id, workspace_file_path") + }.ExpectError(t, "at least one type of resource identifier must be set; allowed fields: authorization, cluster_id, cluster_policy_id, dashboard_id, directory_id, directory_path, experiment_id, instance_pool_id, job_id, notebook_id, notebook_path, pipeline_id, registered_model_id, repo_id, repo_path, serving_endpoint_id, sql_alert_id, sql_dashboard_id, sql_endpoint_id, sql_query_id, vector_search_endpoint_id, workspace_file_id, workspace_file_path") } func TestResourcePermissionsCreate_no_access_control(t *testing.T) { From 8f68baadac798ba407cee32b9912447604916c3e Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 13 Nov 2024 17:22:18 +0530 Subject: [PATCH 98/99] [Fix] Remove single-node validation from jobs clusters (#4216) ## Changes Fixes https://github.com/databricks/cli/issues/1896. Introducing this validation has caused a regression for both DABs and TF customers. This PR removes the validation for job clusters. ## Tests Unit tests. --- clusters/clusters_api.go | 16 ------ clusters/resource_cluster.go | 2 - jobs/jobs_api_go_sdk.go | 18 +++--- jobs/resource_job.go | 13 ----- jobs/resource_job_test.go | 104 +++++++---------------------------- 5 files changed, 28 insertions(+), 125 deletions(-) diff --git a/clusters/clusters_api.go b/clusters/clusters_api.go index 6a08a4a608..308016bda1 100644 --- a/clusters/clusters_api.go +++ b/clusters/clusters_api.go @@ -434,22 +434,6 @@ type Cluster struct { ClusterMounts []MountInfo `json:"cluster_mount_infos,omitempty" tf:"alias:cluster_mount_info"` } -// TODO: Remove this once all the resources using clusters are migrated to Go SDK. -// They would then be using Validate(cluster compute.CreateCluster) defined in resource_cluster.go that is a duplicate of this method but uses Go SDK. -func (cluster Cluster) Validate() error { - // TODO: rewrite with CustomizeDiff - if cluster.NumWorkers > 0 || cluster.Autoscale != nil { - return nil - } - profile := cluster.SparkConf["spark.databricks.cluster.profile"] - master := cluster.SparkConf["spark.master"] - resourceClass := cluster.CustomTags["ResourceClass"] - if profile == "singleNode" && strings.HasPrefix(master, "local") && resourceClass == "SingleNode" { - return nil - } - return errors.New(numWorkerErr) -} - // TODO: Remove this once all the resources using clusters are migrated to Go SDK. // They would then be using ModifyRequestOnInstancePool(cluster *compute.CreateCluster) defined in resource_cluster.go that is a duplicate of this method but uses Go SDK. // ModifyRequestOnInstancePool helps remove all request fields that should not be submitted when instance pool is selected. diff --git a/clusters/resource_cluster.go b/clusters/resource_cluster.go index a6100ae071..28672e2962 100644 --- a/clusters/resource_cluster.go +++ b/clusters/resource_cluster.go @@ -130,8 +130,6 @@ func ZoneDiffSuppress(k, old, new string, d *schema.ResourceData) bool { return false } -// This method is a duplicate of Validate() in clusters/clusters_api.go that uses Go SDK. -// Long term, Validate() in clusters_api.go will be removed once all the resources using clusters are migrated to Go SDK. func Validate(cluster any) error { var profile, master, resourceClass string switch c := cluster.(type) { diff --git a/jobs/jobs_api_go_sdk.go b/jobs/jobs_api_go_sdk.go index 6051bafae5..15ac33dac7 100644 --- a/jobs/jobs_api_go_sdk.go +++ b/jobs/jobs_api_go_sdk.go @@ -156,12 +156,8 @@ func (c controlRunStateLifecycleManagerGoSdk) OnUpdate(ctx context.Context) erro return StopActiveRun(jobID, c.d.Timeout(schema.TimeoutUpdate), w, ctx) } -func updateAndValidateJobClusterSpec(clusterSpec *compute.ClusterSpec, d *schema.ResourceData) error { - err := clusters.Validate(*clusterSpec) - if err != nil { - return err - } - err = clusters.ModifyRequestOnInstancePool(clusterSpec) +func updateJobClusterSpec(clusterSpec *compute.ClusterSpec, d *schema.ResourceData) error { + err := clusters.ModifyRequestOnInstancePool(clusterSpec) if err != nil { return err } @@ -178,21 +174,21 @@ func updateAndValidateJobClusterSpec(clusterSpec *compute.ClusterSpec, d *schema func prepareJobSettingsForUpdateGoSdk(d *schema.ResourceData, js *JobSettingsResource) error { if js.NewCluster != nil { - err := updateAndValidateJobClusterSpec(js.NewCluster, d) + err := updateJobClusterSpec(js.NewCluster, d) if err != nil { return err } } for _, task := range js.Tasks { if task.NewCluster != nil { - err := updateAndValidateJobClusterSpec(task.NewCluster, d) + err := updateJobClusterSpec(task.NewCluster, d) if err != nil { return err } } } for i := range js.JobClusters { - err := updateAndValidateJobClusterSpec(&js.JobClusters[i].NewCluster, d) + err := updateJobClusterSpec(&js.JobClusters[i].NewCluster, d) if err != nil { return err } @@ -205,14 +201,14 @@ func prepareJobSettingsForCreateGoSdk(d *schema.ResourceData, jc *JobCreateStruc // Before the go-sdk migration, the field `num_workers` was required, so we always sent it. for _, task := range jc.Tasks { if task.NewCluster != nil { - err := updateAndValidateJobClusterSpec(task.NewCluster, d) + err := updateJobClusterSpec(task.NewCluster, d) if err != nil { return err } } } for i := range jc.JobClusters { - err := updateAndValidateJobClusterSpec(&jc.JobClusters[i].NewCluster, d) + err := updateJobClusterSpec(&jc.JobClusters[i].NewCluster, d) if err != nil { return err } diff --git a/jobs/resource_job.go b/jobs/resource_job.go index be2b982a79..e619ceac49 100644 --- a/jobs/resource_job.go +++ b/jobs/resource_job.go @@ -1068,19 +1068,6 @@ func ResourceJob() common.Resource { return fmt.Errorf("`control_run_state` must be specified only with `max_concurrent_runs = 1`") } } - for _, task := range js.Tasks { - if task.NewCluster == nil { - continue - } - if err := clusters.Validate(*task.NewCluster); err != nil { - return fmt.Errorf("task %s invalid: %w", task.TaskKey, err) - } - } - if js.NewCluster != nil { - if err := clusters.Validate(*js.NewCluster); err != nil { - return fmt.Errorf("invalid job cluster: %w", err) - } - } return nil }, Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { diff --git a/jobs/resource_job_test.go b/jobs/resource_job_test.go index 75a780c00a..17a8abc08a 100644 --- a/jobs/resource_job_test.go +++ b/jobs/resource_job_test.go @@ -823,6 +823,14 @@ func TestResourceJobCreate_JobClusters(t *testing.T) { NotebookPath: "/Stuff", }, }, + { + TaskKey: "c", + NewCluster: &clusters.Cluster{ + SparkVersion: "d", + NodeTypeID: "e", + NumWorkers: 0, + }, + }, }, MaxConcurrentRuns: 1, JobClusters: []JobCluster{ @@ -839,7 +847,7 @@ func TestResourceJobCreate_JobClusters(t *testing.T) { NewCluster: &clusters.Cluster{ SparkVersion: "x", NodeTypeID: "y", - NumWorkers: 9, + NumWorkers: 0, }, }, }, @@ -883,7 +891,7 @@ func TestResourceJobCreate_JobClusters(t *testing.T) { job_cluster { job_cluster_key = "k" new_cluster { - num_workers = 9 + num_workers = 0 spark_version = "x" node_type_id = "y" } @@ -910,7 +918,17 @@ func TestResourceJobCreate_JobClusters(t *testing.T) { notebook_task { notebook_path = "/Stuff" } - }`, + } + + task { + task_key = "c" + new_cluster { + spark_version = "d" + node_type_id = "e" + num_workers = 0 + } + } + `, }.Apply(t) assert.NoError(t, err) assert.Equal(t, "17", d.Id()) @@ -2031,48 +2049,6 @@ func TestResourceJobCreateFromGitSourceWithoutProviderFail(t *testing.T) { }.ExpectError(t, "git source is not empty but Git Provider is not specified and cannot be guessed by url &{GitBranch: GitCommit: GitProvider: GitSnapshot: GitTag:0.4.8 GitUrl:https://custom.git.hosting.com/databricks/terraform-provider-databricks JobSource: ForceSendFields:[]}") } -func TestResourceJobCreateSingleNode_Fail(t *testing.T) { - _, err := qa.ResourceFixture{ - Create: true, - Resource: ResourceJob(), - HCL: `new_cluster { - num_workers = 0 - spark_version = "7.3.x-scala2.12" - node_type_id = "Standard_DS3_v2" - } - max_concurrent_runs = 1 - max_retries = 3 - min_retry_interval_millis = 5000 - name = "Featurizer" - retry_on_timeout = true - - spark_jar_task { - main_class_name = "com.labs.BarMain" - } - library { - jar = "dbfs://aa/bb/cc.jar" - } - library { - jar = "dbfs://ff/gg/hh.jar" - }`, - }.Apply(t) - assert.ErrorContains(t, err, `num_workers may be 0 only for single-node clusters. To create a single node -cluster please include the following configuration in your cluster configuration: - - spark_conf = { - "spark.databricks.cluster.profile" : "singleNode" - "spark.master" : "local[*]" - } - - custom_tags = { - "ResourceClass" = "SingleNode" - } - -Please note that the Databricks Terraform provider cannot detect if the above configuration -is defined in a policy used by the cluster. Please define this in the cluster configuration -itself to create a single node cluster.`) -} - func TestResourceJobRead(t *testing.T) { d, err := qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ @@ -2938,44 +2914,6 @@ func TestResourceJobDelete(t *testing.T) { assert.Equal(t, "789", d.Id()) } -func TestResourceJobUpdate_FailNumWorkersZero(t *testing.T) { - _, err := qa.ResourceFixture{ - ID: "789", - Update: true, - Resource: ResourceJob(), - HCL: `new_cluster { - num_workers = 0 - spark_version = "7.3.x-scala2.12" - node_type_id = "Standard_DS3_v2" - } - max_concurrent_runs = 1 - max_retries = 3 - min_retry_interval_millis = 5000 - name = "Featurizer New" - retry_on_timeout = true - - spark_jar_task { - main_class_name = "com.labs.BarMain" - parameters = ["--cleanup", "full"] - }`, - }.Apply(t) - assert.ErrorContains(t, err, `num_workers may be 0 only for single-node clusters. To create a single node -cluster please include the following configuration in your cluster configuration: - - spark_conf = { - "spark.databricks.cluster.profile" : "singleNode" - "spark.master" : "local[*]" - } - - custom_tags = { - "ResourceClass" = "SingleNode" - } - -Please note that the Databricks Terraform provider cannot detect if the above configuration -is defined in a policy used by the cluster. Please define this in the cluster configuration -itself to create a single node cluster.`) -} - func TestJobsAPIList(t *testing.T) { qa.HTTPFixturesApply(t, []qa.HTTPFixture{ { From 714e78cc604db37fa75f6d97de9fe1ae93597be1 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Thu, 14 Nov 2024 02:43:19 -0500 Subject: [PATCH 99/99] [Dependency] Bump dependencies for Plugin Framework and SDK v2 (#4215) ## Changes ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- go.mod | 40 ++++++++++++++++--------------- go.sum | 76 +++++++++++++++++++++++++++++----------------------------- 2 files changed, 59 insertions(+), 57 deletions(-) diff --git a/go.mod b/go.mod index 87e265f72e..46f813bfe0 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/databricks/terraform-provider-databricks -go 1.22 +go 1.22.0 + +toolchain go1.22.5 require ( github.com/databricks/databricks-sdk-go v0.51.0 @@ -8,12 +10,12 @@ require ( github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl/v2 v2.22.0 - github.com/hashicorp/terraform-plugin-framework v1.11.0 - github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 - github.com/hashicorp/terraform-plugin-go v0.23.0 + github.com/hashicorp/terraform-plugin-framework v1.13.0 + github.com/hashicorp/terraform-plugin-framework-validators v0.15.0 + github.com/hashicorp/terraform-plugin-go v0.25.0 github.com/hashicorp/terraform-plugin-log v0.9.0 - github.com/hashicorp/terraform-plugin-mux v0.16.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 + github.com/hashicorp/terraform-plugin-mux v0.17.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.35.0 github.com/hashicorp/terraform-plugin-testing v1.10.0 github.com/stretchr/testify v1.9.0 github.com/zclconf/go-cty v1.15.0 @@ -23,7 +25,7 @@ require ( require ( cloud.google.com/go/auth v0.4.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect @@ -45,14 +47,14 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/go-plugin v1.6.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.7.0 // indirect - github.com/hashicorp/hc-install v0.8.0 // indirect + github.com/hashicorp/hc-install v0.9.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect - github.com/hashicorp/terraform-json v0.22.1 // indirect + github.com/hashicorp/terraform-json v0.23.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect @@ -74,20 +76,20 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.26.0 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.20.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/api v0.182.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect - google.golang.org/grpc v1.64.1 // indirect - google.golang.org/protobuf v1.34.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 2fe2fa4ab3..c2a71615c8 100644 --- a/go.sum +++ b/go.sum @@ -3,8 +3,8 @@ cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg= cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -111,8 +111,8 @@ github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB1 github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/hashicorp/go-plugin v1.6.2 h1:zdGAEd0V1lCaU0u+MxWQhtSDQmahpkwOun8U8EiRVog= +github.com/hashicorp/go-plugin v1.6.2/go.mod h1:CkgLQ5CZqNmdL9U9JzM532t8ZiYQ35+pj3b1FD37R0Q= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -120,8 +120,8 @@ github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/C github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.8.0 h1:LdpZeXkZYMQhoKPCecJHlKvUkQFixN/nvyR1CdfOLjI= -github.com/hashicorp/hc-install v0.8.0/go.mod h1:+MwJYjDfCruSD/udvBmRB22Nlkwwkwf5sAB6uTIhSaU= +github.com/hashicorp/hc-install v0.9.0 h1:2dIk8LcvANwtv3QZLckxcjyF5w8KVtiMxu6G6eLhghE= +github.com/hashicorp/hc-install v0.9.0/go.mod h1:+6vOP+mf3tuGgMApVYtmsnDoKWMDcFXeTxCACYZ8SFg= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl/v2 v2.22.0 h1:hkZ3nCtqeJsDhPRFz5EA9iwcG1hNWGePOTw6oyul12M= @@ -130,20 +130,20 @@ github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= -github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= -github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= -github.com/hashicorp/terraform-plugin-framework v1.11.0 h1:M7+9zBArexHFXDx/pKTxjE6n/2UCXY6b8FIq9ZYhwfE= -github.com/hashicorp/terraform-plugin-framework v1.11.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= -github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 h1:bxZfGo9DIUoLLtHMElsu+zwqI4IsMZQBRRy4iLzZJ8E= -github.com/hashicorp/terraform-plugin-framework-validators v0.13.0/go.mod h1:wGeI02gEhj9nPANU62F2jCaHjXulejm/X+af4PdZaNo= -github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= -github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= +github.com/hashicorp/terraform-json v0.23.0 h1:sniCkExU4iKtTADReHzACkk8fnpQXrdD2xoR+lppBkI= +github.com/hashicorp/terraform-json v0.23.0/go.mod h1:MHdXbBAbSg0GvzuWazEGKAn/cyNfIB7mN6y7KJN6y2c= +github.com/hashicorp/terraform-plugin-framework v1.13.0 h1:8OTG4+oZUfKgnfTdPTJwZ532Bh2BobF4H+yBiYJ/scw= +github.com/hashicorp/terraform-plugin-framework v1.13.0/go.mod h1:j64rwMGpgM3NYXTKuxrCnyubQb/4VKldEKlcG8cvmjU= +github.com/hashicorp/terraform-plugin-framework-validators v0.15.0 h1:RXMmu7JgpFjnI1a5QjMCBb11usrW2OtAG+iOTIj5c9Y= +github.com/hashicorp/terraform-plugin-framework-validators v0.15.0/go.mod h1:Bh89/hNmqsEWug4/XWKYBwtnw3tbz5BAy1L1OgvbIaY= +github.com/hashicorp/terraform-plugin-go v0.25.0 h1:oi13cx7xXA6QciMcpcFi/rwA974rdTxjqEhXJjbAyks= +github.com/hashicorp/terraform-plugin-go v0.25.0/go.mod h1:+SYagMYadJP86Kvn+TGeV+ofr/R3g4/If0O5sO96MVw= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= -github.com/hashicorp/terraform-plugin-mux v0.16.0 h1:RCzXHGDYwUwwqfYYWJKBFaS3fQsWn/ZECEiW7p2023I= -github.com/hashicorp/terraform-plugin-mux v0.16.0/go.mod h1:PF79mAsPc8CpusXPfEVa4X8PtkB+ngWoiUClMrNZlYo= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 h1:kJiWGx2kiQVo97Y5IOGR4EMcZ8DtMswHhUuFibsCQQE= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0/go.mod h1:sl/UoabMc37HA6ICVMmGO+/0wofkVIRxf+BMb/dnoIg= +github.com/hashicorp/terraform-plugin-mux v0.17.0 h1:/J3vv3Ps2ISkbLPiZOLspFcIZ0v5ycUXCEQScudGCCw= +github.com/hashicorp/terraform-plugin-mux v0.17.0/go.mod h1:yWuM9U1Jg8DryNfvCp+lH70WcYv6D8aooQxxxIzFDsE= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.35.0 h1:wyKCCtn6pBBL46c1uIIBNUOWlNfYXfXpVo16iDyLp8Y= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.35.0/go.mod h1:B0Al8NyYVr8Mp/KLwssKXG1RqnTk7FySqSn4fRuLNgw= github.com/hashicorp/terraform-plugin-testing v1.10.0 h1:2+tmRNhvnfE4Bs8rB6v58S/VpqzGC6RCh9Y8ujdn+aw= github.com/hashicorp/terraform-plugin-testing v1.10.0/go.mod h1:iWRW3+loP33WMch2P/TEyCxxct/ZEcCGMquSLSCVsrc= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= @@ -233,8 +233,8 @@ go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -242,8 +242,8 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -253,11 +253,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -279,19 +279,19 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -314,15 +314,15 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -334,8 +334,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=