From e504790393db332f22cefbffdec742adca1ec0f9 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Mon, 28 Oct 2024 11:22:23 +0100 Subject: [PATCH 01/14] [Fix] Tolerate `databricks_permissions` resources for SQL warehouses with `/warehouses/...` IDs (#4158) ## Changes #4143 reported a regression to the `databricks_permissions` resource caused by https://github.com/databricks/terraform-provider-databricks/pull/3956. Normally, the ID for this resource when configured for a SQL warehouse is `/sql/warehouses/`. However, it seems like at some point in the past, some users may have had an ID of `/warehouses/`. It's possible that importing this resource worked like this: when calling the permissions REST API, whether using object type `sql/warehouses` or `warehouses`, the API returns permissions for the same resources: ``` 15:13:01 DEBUG GET /api/2.0/permissions/sql/warehouses/ < HTTP/2.0 200 OK < { < "access_control_list": [ < { < "all_permissions": [ < { < "inherited": false, < "permission_level": "IS_OWNER" < } < ], < "display_name": "", < "user_name": "" < }, < { < "all_permissions": [ < { < "inherited": true, < "inherited_from_object": [ < "/sql/warehouses/" < ], < "permission_level": "CAN_MANAGE" < } < ], < "group_name": "admins" < } < ], < "object_id": "/sql/warehouses/", < "object_type": "warehouses" < } pid=53287 sdk=true ... 15:12:56 DEBUG GET /api/2.0/permissions/warehouses/ < HTTP/2.0 200 OK < { < "access_control_list": [ < { < "all_permissions": [ < { < "inherited": false, < "permission_level": "IS_OWNER" < } < ], < "display_name": "", < "user_name": "" < }, < { < "all_permissions": [ < { < "inherited": true, < "inherited_from_object": [ < "/sql/warehouses/" < ], < "permission_level": "CAN_MANAGE" < } < ], < "group_name": "admins" < } < ], < "object_id": "/sql/warehouses/", < "object_type": "warehouses" < } pid=53248 sdk=true ``` This PR modifies the SQL warehouse configuration for `databricks_permissions` to be chosen for instances with an ID of the form `/warehouses/...`. ## Tests The additional integration test ensures that a resource can be imported with the `/warehouses/` format. --- internal/acceptance/permissions_test.go | 26 +++++++++++++++++++++---- permissions/permission_definitions.go | 5 +++++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index 2033a100ad..325bc398fe 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -617,7 +617,16 @@ func TestAccPermissions_SqlWarehouses(t *testing.T) { resource "databricks_sql_endpoint" "this" { name = "{var.STICKY_RANDOM}" cluster_size = "2X-Small" + tags { + custom_tags { + key = "Owner" + value = "eng-dev-ecosystem-team_at_databricks.com" + } + } }` + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + var warehouseId string WorkspaceLevel(t, Step{ Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", groupPermissions("CAN_USE")), }, Step{ @@ -638,15 +647,24 @@ func TestAccPermissions_SqlWarehouses(t *testing.T) { }, Step{ Template: sqlWarehouseTemplate, Check: func(s *terraform.State) error { - w := databricks.Must(databricks.NewWorkspaceClient()) - id := s.RootModule().Resources["databricks_sql_endpoint.this"].Primary.ID - warehouse, err := w.Warehouses.GetById(context.Background(), id) + warehouseId = s.RootModule().Resources["databricks_sql_endpoint.this"].Primary.ID + warehouse, err := w.Warehouses.GetById(ctx, warehouseId) assert.NoError(t, err) - permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "warehouses", id) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "warehouses", warehouseId) assert.NoError(t, err) assertContainsPermission(t, permissions, currentPrincipalType(t), warehouse.CreatorName, iam.PermissionLevelIsOwner) return nil }, + }, Step{ + // To test import, a new permission must be added to the warehouse, as it is not possible to import databricks_permissions + // for a warehouse that has the default permissions (i.e. current user has IS_OWNER and admins have CAN_MANAGE). + Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", groupPermissions("CAN_USE")), + }, Step{ + Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", groupPermissions("CAN_USE")), + // Verify that we can use "/warehouses/" instead of "/sql/warehouses/" + ResourceName: "databricks_permissions.this", + ImportState: true, + ImportStateIdFunc: func(s *terraform.State) (string, error) { return "/warehouses/" + warehouseId, nil }, }) } diff --git a/permissions/permission_definitions.go b/permissions/permission_definitions.go index fbc9158517..48e6d7a56f 100644 --- a/permissions/permission_definitions.go +++ b/permissions/permission_definitions.go @@ -558,6 +558,11 @@ func allResourcePermissions() []resourcePermissions { field: "sql_endpoint_id", objectType: "warehouses", requestObjectType: "sql/warehouses", + // ISSUE-4143: some older warehouse permissions have an ID that starts with "/warehouses" instead of "/sql/warehouses" + // Because no idRetriever is defined, any warehouse permissions resources will be created with the "/sql/warehouses" prefix. + idMatcher: func(id string) bool { + return strings.HasPrefix(id, "/sql/warehouses/") || strings.HasPrefix(id, "/warehouses/") + }, allowedPermissionLevels: map[string]permissionLevelOptions{ "CAN_USE": {isManagementPermission: false}, "CAN_MANAGE": {isManagementPermission: true}, From 948bf08769c1cccc82a85e747ea3cb03c2033a5a Mon Sep 17 00:00:00 2001 From: Kohei Watanabe Date: Mon, 28 Oct 2024 22:46:16 +0900 Subject: [PATCH 02/14] [Doc] Fix `databricks_grant` regarding metastore_id description (#4164) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I found the mistakes in the doc: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/grant so let me fix them 🙇 ## Changes ## Tests - [ ] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- docs/resources/grant.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/resources/grant.md b/docs/resources/grant.md index 5f2f8b4326..9cd9bc0163 100644 --- a/docs/resources/grant.md +++ b/docs/resources/grant.md @@ -30,11 +30,15 @@ See [databricks_grants Metastore grants](grants.md#metastore-grants) for the lis ```hcl resource "databricks_grant" "sandbox_data_engineers" { + metastore = "metastore_id" + principal = "Data Engineers" privileges = ["CREATE_CATALOG", "CREATE_EXTERNAL_LOCATION"] } resource "databricks_grant" "sandbox_data_sharer" { + metastore = "metastore_id" + principal = "Data Sharer" privileges = ["CREATE_RECIPIENT", "CREATE_SHARE"] } @@ -46,7 +50,6 @@ See [databricks_grants Catalog grants](grants.md#catalog-grants) for the list of ```hcl resource "databricks_catalog" "sandbox" { - metastore_id = databricks_metastore.this.id name = "sandbox" comment = "this catalog is managed by terraform" properties = { From 2c13e8df5c75ea782ac58a946dba99efc2059fb9 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Mon, 28 Oct 2024 10:06:19 -0400 Subject: [PATCH 03/14] [Exporter] Improving reliability of `Emit` function (#4163) ## Changes I found in the large-scale testing that sometimes we don't handle emitting of the same resource reliably, and this may lead to generation of duplicate resources (very small amount, but still) - found this in a very specific case when notebooks were listed without directories. This PR fixes this problem: - by tracking if resource is already in importing queue - detecting duplicates during code generation It also may improve performance a bit (2-3%). ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- exporter/codegen.go | 25 +++++++++++++++---------- exporter/context.go | 41 ++++++++++++++++++++++++++++------------- 2 files changed, 43 insertions(+), 23 deletions(-) diff --git a/exporter/codegen.go b/exporter/codegen.go index dee6f12fd4..549b836ce4 100644 --- a/exporter/codegen.go +++ b/exporter/codegen.go @@ -904,22 +904,27 @@ func (ic *importContext) handleResourceWrite(generatedFile string, ch dataWriteC return } - // newResources := make(map[string]struct{}, 100) log.Printf("[DEBUG] started processing new writes for %s", generatedFile) for f := range ch { if f != nil { - log.Printf("[DEBUG] started writing resource body for %s", f.BlockName) - _, err = tf.WriteString(f.ResourceBody) - if err == nil { - newResources[f.BlockName] = struct{}{} - if f.ImportCommand != "" { - ic.waitGroup.Add(1) - importChan <- f.ImportCommand + // check if we have the same blockname already written. To avoid duplicates + _, exists := newResources[f.BlockName] + if !exists { + log.Printf("[DEBUG] started writing resource body for %s", f.BlockName) + _, err = tf.WriteString(f.ResourceBody) + if err == nil { + newResources[f.BlockName] = struct{}{} + if f.ImportCommand != "" { + ic.waitGroup.Add(1) + importChan <- f.ImportCommand + } + log.Printf("[DEBUG] finished writing resource body for %s", f.BlockName) + } else { + log.Printf("[ERROR] Error when writing to %s: %v", generatedFile, err) } - log.Printf("[DEBUG] finished writing resource body for %s", f.BlockName) } else { - log.Printf("[ERROR] Error when writing to %s: %v", generatedFile, err) + log.Printf("[WARN] Found duplicate resource: '%s'", f.BlockName) } } else { log.Print("[WARN] got nil as resourceWriteData!") diff --git a/exporter/context.go b/exporter/context.go index c7f2b18235..ffb230a4e8 100644 --- a/exporter/context.go +++ b/exporter/context.go @@ -204,7 +204,7 @@ var goroutinesNumber = map[string]int{ "databricks_sql_dashboard": 3, "databricks_sql_widget": 4, "databricks_sql_visualization": 4, - "databricks_query": 4, + "databricks_query": 6, "databricks_alert": 2, "databricks_permissions": 11, } @@ -615,17 +615,20 @@ func (ic *importContext) HasInState(r *resource, onlyAdded bool) bool { return ic.State.Has(r) } -func (ic *importContext) setImportingState(s string, state bool) { - ic.importingMutex.Lock() - defer ic.importingMutex.Unlock() - ic.importing[s] = state -} - func (ic *importContext) Add(r *resource) { if ic.HasInState(r, true) { // resource must exist and already marked as added return } - ic.setImportingState(r.String(), true) // mark resource as added + rString := r.String() + ic.importingMutex.Lock() + _, ok := ic.importing[rString] + if ok { + ic.importingMutex.Unlock() + log.Printf("[DEBUG] %s already being added", rString) + return + } + ic.importing[rString] = true // mark resource as added + ic.importingMutex.Unlock() state := r.Data.State() if state == nil { log.Printf("[ERROR] state is nil for %s", r) @@ -648,7 +651,6 @@ func (ic *importContext) Add(r *resource) { Instances: []instanceApproximation{inst}, Resource: r, }) - // in single-threaded scenario scope is toposorted ic.Scope.Append(r) } @@ -727,14 +729,25 @@ func (ic *importContext) Emit(r *resource) { log.Printf("[DEBUG] %s already imported", r) return } + rString := r.String() if ic.testEmits != nil { log.Printf("[INFO] %s is emitted in test mode", r) ic.testEmitsMutex.Lock() - ic.testEmits[r.String()] = true + ic.testEmits[rString] = true ic.testEmitsMutex.Unlock() return } - ic.setImportingState(r.String(), false) // we're starting to add a new resource + // we need to check that we're not importing the same resource twice - this may happen under high concurrency + // for specific resources, for example, directories when they aren't part of the listing + ic.importingMutex.Lock() + res, ok := ic.importing[rString] + if ok { + ic.importingMutex.Unlock() + log.Printf("[DEBUG] %s already being imported: %v", rString, res) + return + } + ic.importing[rString] = false // // we're starting to add a new resource + ic.importingMutex.Unlock() _, ok = ic.Resources[r.Resource] if !ok { log.Printf("[ERROR] %s is not available in provider", r) @@ -745,8 +758,10 @@ func (ic *importContext) Emit(r *resource) { log.Printf("[DEBUG] %s (%s service) is not part of the account level export", r.Resource, ir.Service) return } - // TODO: add similar condition for checking workspace-level objects only. After new ACLs import is merged - + if !ic.accountLevel && !ir.WorkspaceLevel { + log.Printf("[DEBUG] %s (%s service) is not part of the workspace level export", r.Resource, ir.Service) + return + } // from here, it should be done by the goroutine... send resource into the channel ch, exists := ic.channels[r.Resource] if exists { From f382e4fb1dc7cc5701b22cd32c597c011c38cb5e Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Mon, 28 Oct 2024 17:58:33 +0100 Subject: [PATCH 04/14] [Release] Release v1.56.0 (#4167) ### Bug Fixes * Recreate missing system schema ([#4068](https://github.com/databricks/terraform-provider-databricks/pull/4068)). * Tolerate `databricks_permissions` resources for SQL warehouses with `/warehouses/...` IDs ([#4158](https://github.com/databricks/terraform-provider-databricks/pull/4158)). ### Documentation * Fix `databricks_grant` regarding metastore_id description ([#4164](https://github.com/databricks/terraform-provider-databricks/pull/4164)). ### Internal Changes * Automatically trigger integration tests on PR ([#4149](https://github.com/databricks/terraform-provider-databricks/pull/4149)). ### Exporter * **Breaking change** Use new query and alert resources instead of legacy resources ([#4150](https://github.com/databricks/terraform-provider-databricks/pull/4150)). * Improve exporting of `databricks_pipeline` resources ([#4142](https://github.com/databricks/terraform-provider-databricks/pull/4142)). * Improving reliability of `Emit` function ([#4163](https://github.com/databricks/terraform-provider-databricks/pull/4163)). --- CHANGELOG.md | 25 +++++++++++++++++++++++++ common/version.go | 2 +- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 77a6da9f8d..7f2de01332 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Version changelog +## [Release] Release v1.56.0 + +### Bug Fixes + + * Recreate missing system schema ([#4068](https://github.com/databricks/terraform-provider-databricks/pull/4068)). + * Tolerate `databricks_permissions` resources for SQL warehouses with `/warehouses/...` IDs ([#4158](https://github.com/databricks/terraform-provider-databricks/pull/4158)). + + +### Documentation + + * Fix `databricks_grant` regarding metastore_id description ([#4164](https://github.com/databricks/terraform-provider-databricks/pull/4164)). + + +### Internal Changes + + * Automatically trigger integration tests on PR ([#4149](https://github.com/databricks/terraform-provider-databricks/pull/4149)). + + +### Exporter + + * **Breaking change** Use new query and alert resources instead of legacy resources ([#4150](https://github.com/databricks/terraform-provider-databricks/pull/4150)). + * Improve exporting of `databricks_pipeline` resources ([#4142](https://github.com/databricks/terraform-provider-databricks/pull/4142)). + * Improving reliability of `Emit` function ([#4163](https://github.com/databricks/terraform-provider-databricks/pull/4163)). + + ## [Release] Release v1.55.0 ### New Features and Improvements diff --git a/common/version.go b/common/version.go index c3770f13cd..44a7242a45 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.55.0" + version = "1.56.0" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider From 0975310040346b75843993e92f76f8abc891b70c Mon Sep 17 00:00:00 2001 From: Omer Lachish <289488+rauchy@users.noreply.github.com> Date: Tue, 29 Oct 2024 10:26:54 +0100 Subject: [PATCH 05/14] [Internal] Migrate Share Resource to Plugin Framework (#4047) ## Changes This PR migrates the share resource to the Plugin framework. The code was largely copied "as is" from the previous implementation of the share resource, with the necessary adaptations made for integration with the Plugin framework. This implementation utilizes the newly generated Effective fields to provide the functionality that was previously achieved through diff suppression. ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --------- Co-authored-by: Omer Lachish --- .codegen/model.go.tmpl | 1 + .../providers/pluginfw/converters/tf_to_go.go | 2 +- internal/providers/pluginfw/pluginfw.go | 2 + .../resources/sharing/resource_acc_test.go | 204 +++++++++ .../resources/sharing/resource_share.go | 401 ++++++++++++++++++ internal/service/apps_tf/model.go | 13 + internal/service/catalog_tf/model.go | 2 + internal/service/dashboards_tf/model.go | 51 +++ internal/service/jobs_tf/model.go | 2 + internal/service/provisioning_tf/model.go | 9 + internal/service/settings_tf/model.go | 10 + internal/service/sharing_tf/model.go | 6 + 12 files changed, 702 insertions(+), 1 deletion(-) create mode 100644 internal/providers/pluginfw/resources/sharing/resource_acc_test.go create mode 100644 internal/providers/pluginfw/resources/sharing/resource_share.go diff --git a/.codegen/model.go.tmpl b/.codegen/model.go.tmpl index 7d20bea4e7..714401729e 100644 --- a/.codegen/model.go.tmpl +++ b/.codegen/model.go.tmpl @@ -53,6 +53,7 @@ func (newState *{{.PascalName}}) SyncEffectiveFieldsDuringRead(existingState {{. {{- if .Entity.IsFloat64}}{{$type = "Float64"}}{{end}} {{- if .Entity.IsInt}}{{$type = "Int64"}}{{end}} {{- if .Entity.Enum}}{{$type = "String"}}{{end}} + newState.Effective{{.PascalName}} = existingState.Effective{{.PascalName}} if existingState.Effective{{.PascalName}}.Value{{$type}}() == newState.{{.PascalName}}.Value{{$type}}() { newState.{{.PascalName}} = existingState.{{.PascalName}} } diff --git a/internal/providers/pluginfw/converters/tf_to_go.go b/internal/providers/pluginfw/converters/tf_to_go.go index 70efd92a36..27eb02d915 100644 --- a/internal/providers/pluginfw/converters/tf_to_go.go +++ b/internal/providers/pluginfw/converters/tf_to_go.go @@ -184,7 +184,7 @@ func tfsdkToGoSdkStructField(srcField reflect.Value, destField reflect.Value, sr // This is the case for enum. // Skip unset value. - if srcField.IsZero() { + if srcField.IsZero() || v.ValueString() == "" { return } diff --git a/internal/providers/pluginfw/pluginfw.go b/internal/providers/pluginfw/pluginfw.go index db811d5ae2..53b361f998 100644 --- a/internal/providers/pluginfw/pluginfw.go +++ b/internal/providers/pluginfw/pluginfw.go @@ -21,6 +21,7 @@ import ( "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/notificationdestinations" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/qualitymonitor" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/registered_model" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/sharing" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/volume" "github.com/hashicorp/terraform-plugin-framework/datasource" @@ -47,6 +48,7 @@ func (p *DatabricksProviderPluginFramework) Resources(ctx context.Context) []fun return []func() resource.Resource{ qualitymonitor.ResourceQualityMonitor, library.ResourceLibrary, + sharing.ResourceShare, } } diff --git a/internal/providers/pluginfw/resources/sharing/resource_acc_test.go b/internal/providers/pluginfw/resources/sharing/resource_acc_test.go new file mode 100644 index 0000000000..7018e0b402 --- /dev/null +++ b/internal/providers/pluginfw/resources/sharing/resource_acc_test.go @@ -0,0 +1,204 @@ +package sharing_test + +import ( + "fmt" + "testing" + + "github.com/databricks/terraform-provider-databricks/internal/acceptance" +) + +const preTestTemplate = ` + resource "databricks_catalog" "sandbox" { + name = "sandbox{var.STICKY_RANDOM}" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + } + + resource "databricks_schema" "things" { + catalog_name = databricks_catalog.sandbox.id + name = "things{var.STICKY_RANDOM}" + comment = "this database is managed by terraform" + properties = { + kind = "various" + } + } + + resource "databricks_table" "mytable" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "id" + position = 0 + type_name = "INT" + type_text = "int" + type_json = "{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}" + } + } + + resource "databricks_table" "mytable_2" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar_2" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "id" + position = 0 + type_name = "INT" + type_text = "int" + type_json = "{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}" + } + } + + resource "databricks_table" "mytable_3" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar_3" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "id" + position = 0 + type_name = "INT" + type_text = "int" + type_json = "{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}" + } + } +` + +const preTestTemplateUpdate = ` + resource "databricks_grants" "some" { + catalog = databricks_catalog.sandbox.id + grant { + principal = "account users" + privileges = ["ALL_PRIVILEGES"] + } + grant { + principal = "{env.TEST_METASTORE_ADMIN_GROUP_NAME}" + privileges = ["ALL_PRIVILEGES"] + } + } +` + +func TestUcAccCreateShare(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplate + ` + resource "databricks_share_pluginframework" "myshare" { + name = "{var.STICKY_RANDOM}-terraform-delta-share" + owner = "account users" + object { + name = databricks_table.mytable.id + comment = "c" + data_object_type = "TABLE" + } + object { + name = databricks_table.mytable_2.id + cdf_enabled = false + comment = "c" + data_object_type = "TABLE" + } + } + + resource "databricks_recipient" "db2open" { + name = "{var.STICKY_RANDOM}-terraform-db2open-recipient" + comment = "made by terraform" + authentication_type = "TOKEN" + sharing_code = "{var.STICKY_RANDOM}" + ip_access_list { + // using private ip for acc testing + allowed_ip_addresses = ["10.0.0.0/16"] + } + } + + resource "databricks_grants" "some" { + share = databricks_share_pluginframework.myshare.name + grant { + principal = databricks_recipient.db2open.name + privileges = ["SELECT"] + } + } + `, + }) +} + +func shareTemplateWithOwner(comment string, owner string) string { + return fmt.Sprintf(` + resource "databricks_share_pluginframework" "myshare" { + name = "{var.STICKY_RANDOM}-terraform-delta-share" + owner = "%s" + object { + name = databricks_table.mytable.id + comment = "%s" + data_object_type = "TABLE" + history_data_sharing_status = "DISABLED" + } + + }`, owner, comment) +} + +func TestUcAccUpdateShare(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("c", "account users"), + }, acceptance.Step{ + Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("e", "account users"), + }, acceptance.Step{ + Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("e", "{env.TEST_DATA_ENG_GROUP}"), + }, acceptance.Step{ + Template: preTestTemplate + preTestTemplateUpdate + shareTemplateWithOwner("f", "{env.TEST_METASTORE_ADMIN_GROUP_NAME}"), + }) +} + +func TestUcAccUpdateShareAddObject(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: preTestTemplate + preTestTemplateUpdate + + `resource "databricks_share_pluginframework" "myshare" { + name = "{var.STICKY_RANDOM}-terraform-delta-share" + owner = "account users" + object { + name = databricks_table.mytable.id + comment = "A" + data_object_type = "TABLE" + history_data_sharing_status = "DISABLED" + } + object { + name = databricks_table.mytable_3.id + comment = "C" + data_object_type = "TABLE" + history_data_sharing_status = "DISABLED" + } + + }`, + }, acceptance.Step{ + Template: preTestTemplate + preTestTemplateUpdate + + `resource "databricks_share_pluginframework" "myshare" { + name = "{var.STICKY_RANDOM}-terraform-delta-share" + owner = "account users" + object { + name = databricks_table.mytable.id + comment = "AA" + data_object_type = "TABLE" + history_data_sharing_status = "DISABLED" + } + object { + name = databricks_table.mytable_2.id + comment = "BB" + data_object_type = "TABLE" + history_data_sharing_status = "DISABLED" + } + object { + name = databricks_table.mytable_3.id + comment = "CC" + data_object_type = "TABLE" + history_data_sharing_status = "DISABLED" + } + }`, + }) +} diff --git a/internal/providers/pluginfw/resources/sharing/resource_share.go b/internal/providers/pluginfw/resources/sharing/resource_share.go new file mode 100644 index 0000000000..b96cd0e976 --- /dev/null +++ b/internal/providers/pluginfw/resources/sharing/resource_share.go @@ -0,0 +1,401 @@ +package sharing + +import ( + "context" + "reflect" + "sort" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/sharing" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + pluginfwcontext "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/context" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/databricks/terraform-provider-databricks/internal/service/sharing_tf" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +) + +const resourceName = "share" + +var _ resource.ResourceWithConfigure = &ShareResource{} + +func ResourceShare() resource.Resource { + return &ShareResource{} +} + +type ShareInfoExtended struct { + sharing_tf.ShareInfo +} + +func matchOrder[T any, K comparable](target, reference []T, keyFunc func(T) K) { + // Create a map to store the index positions of each key in the reference slice. + orderMap := make(map[K]int) + for index, item := range reference { + orderMap[keyFunc(item)] = index + } + + // Sort the target slice based on the order defined in the orderMap. + sort.Slice(target, func(i, j int) bool { + return orderMap[keyFunc(target[i])] < orderMap[keyFunc(target[j])] + }) +} + +func suppressCDFEnabledDiff(si *sharing.ShareInfo) { + //suppress diff for CDF Enabled if HistoryDataSharingStatus is enabled , as API does not accept both fields to be set + for i := range si.Objects { + if si.Objects[i].HistoryDataSharingStatus == "ENABLED" { + si.Objects[i].CdfEnabled = false + } + } +} + +func resourceShareMap(si sharing.ShareInfo) map[string]sharing.SharedDataObject { + m := make(map[string]sharing.SharedDataObject, len(si.Objects)) + for _, sdo := range si.Objects { + m[sdo.Name] = sdo + } + return m +} + +func equal(this sharing.SharedDataObject, other sharing.SharedDataObject) bool { + if other.SharedAs == "" { + other.SharedAs = this.SharedAs + } + //don't compare computed fields + other.AddedAt = this.AddedAt + other.AddedBy = this.AddedBy + other.Status = this.Status + other.HistoryDataSharingStatus = this.HistoryDataSharingStatus + other.ForceSendFields = this.ForceSendFields // TODO: is this the right thing to do? + return reflect.DeepEqual(this, other) +} + +func diff(beforeSi sharing.ShareInfo, afterSi sharing.ShareInfo) []sharing.SharedDataObjectUpdate { + beforeMap := resourceShareMap(beforeSi) + afterMap := resourceShareMap(afterSi) + changes := []sharing.SharedDataObjectUpdate{} + // not in after so remove + for _, beforeSdo := range beforeSi.Objects { + if _, ok := afterMap[beforeSdo.Name]; ok { + continue + } + changes = append(changes, sharing.SharedDataObjectUpdate{ + Action: sharing.SharedDataObjectUpdateActionRemove, + DataObject: &beforeSdo, + }) + } + + // not in before so add + // if in before but diff then update + for _, afterSdo := range afterSi.Objects { + if beforeSdo, ok := beforeMap[afterSdo.Name]; ok { + if !equal(beforeSdo, afterSdo) { + // do not send SharedAs + afterSdo.SharedAs = "" + changes = append(changes, sharing.SharedDataObjectUpdate{ + Action: sharing.SharedDataObjectUpdateActionUpdate, + DataObject: &afterSdo, + }) + } + continue + } + changes = append(changes, sharing.SharedDataObjectUpdate{ + Action: sharing.SharedDataObjectUpdateActionAdd, + DataObject: &afterSdo, + }) + } + return changes +} + +func shareChanges(si sharing.ShareInfo, action string) sharing.UpdateShare { + var changes []sharing.SharedDataObjectUpdate + for _, obj := range si.Objects { + changes = append(changes, sharing.SharedDataObjectUpdate{ + Action: sharing.SharedDataObjectUpdateAction(action), + DataObject: &obj, + }, + ) + } + return sharing.UpdateShare{ + Name: si.Name, + Owner: si.Owner, + Updates: changes, + } +} + +type ShareResource struct { + Client *common.DatabricksClient +} + +func (r *ShareResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = pluginfwcommon.GetDatabricksStagingName(resourceName) +} + +func (r *ShareResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + attrs, blocks := tfschema.ResourceStructToSchemaMap(ShareInfoExtended{}, func(c tfschema.CustomizableSchema) tfschema.CustomizableSchema { + c.SetRequired("name") + + c.AddPlanModifier(stringplanmodifier.RequiresReplace(), "name") // ForceNew + c.AddPlanModifier(int64planmodifier.UseStateForUnknown(), "created_at") + c.AddPlanModifier(stringplanmodifier.UseStateForUnknown(), "created_by") + + c.SetRequired("object", "data_object_type") + c.SetRequired("object", "partitions", "values", "op") + c.SetRequired("object", "partitions", "values", "name") + return c + }) + resp.Schema = schema.Schema{ + Description: "Terraform schema for Databricks Share", + Attributes: attrs, + Blocks: blocks, + } +} + +func (d *ShareResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if d.Client == nil && req.ProviderData != nil { + d.Client = pluginfwcommon.ConfigureResource(req, resp) + } +} + +func (r *ShareResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) + + w, diags := r.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + var plan ShareInfoExtended + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var planGoSDK sharing.ShareInfo + resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, plan, &planGoSDK)...) + if resp.Diagnostics.HasError() { + return + } + + var createShare sharing.CreateShare + resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, plan, &createShare)...) + if resp.Diagnostics.HasError() { + return + } + shareInfo, err := w.Shares.Create(ctx, createShare) + if err != nil { + resp.Diagnostics.AddError("failed to create share", err.Error()) + return + } + + shareChanges := shareChanges(planGoSDK, string(sharing.SharedDataObjectUpdateActionAdd)) + + updatedShareInfo, err := w.Shares.Update(ctx, shareChanges) + if err != nil { + // delete orphaned share if update fails + if d_err := w.Shares.DeleteByName(ctx, shareInfo.Name); d_err != nil { + resp.Diagnostics.AddError("failed to delete orphaned share", d_err.Error()) + return + } + resp.Diagnostics.AddError("failed to update share", err.Error()) + return + } + + matchOrder(updatedShareInfo.Objects, planGoSDK.Objects, func(obj sharing.SharedDataObject) string { return obj.Name }) + + var newState ShareInfoExtended + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, updatedShareInfo, &newState)...) + if resp.Diagnostics.HasError() { + return + } + + newState.SyncEffectiveFieldsDuringCreateOrUpdate(plan.ShareInfo) + for i := range newState.Objects { + newState.Objects[i].SyncEffectiveFieldsDuringCreateOrUpdate(plan.Objects[i]) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, newState)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *ShareResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) + + var existingState ShareInfoExtended + resp.Diagnostics.Append(req.State.Get(ctx, &existingState)...) + if resp.Diagnostics.HasError() { + return + } + + var stateGoSDK sharing.ShareInfo + resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, existingState, &stateGoSDK)...) + if resp.Diagnostics.HasError() { + return + } + + w, diags := r.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var getShareRequest sharing.GetShareRequest + getShareRequest.IncludeSharedData = true + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("name"), &getShareRequest.Name)...) + if resp.Diagnostics.HasError() { + return + } + + shareInfo, err := w.Shares.Get(ctx, getShareRequest) + if err != nil { + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + return + } + resp.Diagnostics.AddError("failed to get share", err.Error()) + return + } + + matchOrder(shareInfo.Objects, stateGoSDK.Objects, func(obj sharing.SharedDataObject) string { return obj.Name }) + suppressCDFEnabledDiff(shareInfo) + + var newState ShareInfoExtended + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, shareInfo, &newState)...) + if resp.Diagnostics.HasError() { + return + } + + newState.SyncEffectiveFieldsDuringRead(existingState.ShareInfo) + for i := range newState.Objects { + newState.Objects[i].SyncEffectiveFieldsDuringRead(existingState.Objects[i]) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, newState)...) +} + +func (r *ShareResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) + + var state ShareInfoExtended + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + client, diags := r.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var plan ShareInfoExtended + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + var planGoSDK sharing.ShareInfo + resp.Diagnostics.Append(converters.TfSdkToGoSdkStruct(ctx, plan, &planGoSDK)...) + if resp.Diagnostics.HasError() { + return + } + + var getShareRequest sharing.GetShareRequest + getShareRequest.Name = state.Name.ValueString() + getShareRequest.IncludeSharedData = true + + currentShareInfo, err := client.Shares.Get(ctx, getShareRequest) + if err != nil { + resp.Diagnostics.AddError("failed to get current share info", err.Error()) + return + } + + matchOrder(currentShareInfo.Objects, planGoSDK.Objects, func(obj sharing.SharedDataObject) string { return obj.Name }) + suppressCDFEnabledDiff(currentShareInfo) + + changes := diff(*currentShareInfo, planGoSDK) + + // if owner has changed, update the share owner + if !plan.Owner.IsNull() { + updatedShareInfo, err := client.Shares.Update(ctx, sharing.UpdateShare{ + Name: state.Name.ValueString(), + Owner: plan.Owner.ValueString(), + }) + if err == nil { + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, updatedShareInfo, &state)...) + if resp.Diagnostics.HasError() { + return + } + } else { + resp.Diagnostics.AddError("failed to update share owner", err.Error()) + return + } + } + + if len(changes) > 0 { + // if there are any other changes, update the share with the changes + updatedShareInfo, err := client.Shares.Update(ctx, sharing.UpdateShare{ + Name: plan.Name.ValueString(), + Updates: changes, + }) + + if err != nil { + resp.Diagnostics.AddError("failed to update share", err.Error()) + + rollbackShareInfo, rollbackErr := client.Shares.Update(ctx, sharing.UpdateShare{ + Name: currentShareInfo.Name, + Owner: currentShareInfo.Owner, + }) + if rollbackErr != nil { + resp.Diagnostics.AddError("failed to roll back", common.OwnerRollbackError(err, rollbackErr, currentShareInfo.Owner, plan.Owner.ValueString()).Error()) + return + } + + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, rollbackShareInfo, &state)...) + if resp.Diagnostics.HasError() { + return + } + } + + matchOrder(updatedShareInfo.Objects, planGoSDK.Objects, func(obj sharing.SharedDataObject) string { return obj.Name }) + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, updatedShareInfo, &state)...) + if resp.Diagnostics.HasError() { + return + } + } + + state.SyncEffectiveFieldsDuringCreateOrUpdate(plan.ShareInfo) + for i := range state.Objects { + state.Objects[i].SyncEffectiveFieldsDuringCreateOrUpdate(plan.Objects[i]) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, state)...) +} + +func (r *ShareResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + ctx = pluginfwcontext.SetUserAgentInResourceContext(ctx, resourceName) + + w, diags := r.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var deleteShareRequest sharing_tf.DeleteShareRequest + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("name"), &deleteShareRequest.Name)...) + if resp.Diagnostics.HasError() { + return + } + err := w.Shares.DeleteByName(ctx, deleteShareRequest.Name.ValueString()) + if err != nil && !apierr.IsMissing(err) { + resp.Diagnostics.AddError("failed to delete share", err.Error()) + return + } +} diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index 2c5594a1ec..4f90baf118 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -77,24 +77,31 @@ func (newState *App) SyncEffectiveFieldsDuringCreateOrUpdate(plan App) { } func (newState *App) SyncEffectiveFieldsDuringRead(existingState App) { + newState.EffectiveCreateTime = existingState.EffectiveCreateTime if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { newState.CreateTime = existingState.CreateTime } + newState.EffectiveCreator = existingState.EffectiveCreator if existingState.EffectiveCreator.ValueString() == newState.Creator.ValueString() { newState.Creator = existingState.Creator } + newState.EffectiveServicePrincipalId = existingState.EffectiveServicePrincipalId if existingState.EffectiveServicePrincipalId.ValueInt64() == newState.ServicePrincipalId.ValueInt64() { newState.ServicePrincipalId = existingState.ServicePrincipalId } + newState.EffectiveServicePrincipalName = existingState.EffectiveServicePrincipalName if existingState.EffectiveServicePrincipalName.ValueString() == newState.ServicePrincipalName.ValueString() { newState.ServicePrincipalName = existingState.ServicePrincipalName } + newState.EffectiveUpdateTime = existingState.EffectiveUpdateTime if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { newState.UpdateTime = existingState.UpdateTime } + newState.EffectiveUpdater = existingState.EffectiveUpdater if existingState.EffectiveUpdater.ValueString() == newState.Updater.ValueString() { newState.Updater = existingState.Updater } + newState.EffectiveUrl = existingState.EffectiveUrl if existingState.EffectiveUrl.ValueString() == newState.Url.ValueString() { newState.Url = existingState.Url } @@ -174,12 +181,15 @@ func (newState *AppDeployment) SyncEffectiveFieldsDuringCreateOrUpdate(plan AppD } func (newState *AppDeployment) SyncEffectiveFieldsDuringRead(existingState AppDeployment) { + newState.EffectiveCreateTime = existingState.EffectiveCreateTime if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { newState.CreateTime = existingState.CreateTime } + newState.EffectiveCreator = existingState.EffectiveCreator if existingState.EffectiveCreator.ValueString() == newState.Creator.ValueString() { newState.Creator = existingState.Creator } + newState.EffectiveUpdateTime = existingState.EffectiveUpdateTime if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { newState.UpdateTime = existingState.UpdateTime } @@ -211,6 +221,7 @@ func (newState *AppDeploymentStatus) SyncEffectiveFieldsDuringCreateOrUpdate(pla } func (newState *AppDeploymentStatus) SyncEffectiveFieldsDuringRead(existingState AppDeploymentStatus) { + newState.EffectiveMessage = existingState.EffectiveMessage if existingState.EffectiveMessage.ValueString() == newState.Message.ValueString() { newState.Message = existingState.Message } @@ -361,6 +372,7 @@ func (newState *ApplicationStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan } func (newState *ApplicationStatus) SyncEffectiveFieldsDuringRead(existingState ApplicationStatus) { + newState.EffectiveMessage = existingState.EffectiveMessage if existingState.EffectiveMessage.ValueString() == newState.Message.ValueString() { newState.Message = existingState.Message } @@ -380,6 +392,7 @@ func (newState *ComputeStatus) SyncEffectiveFieldsDuringCreateOrUpdate(plan Comp } func (newState *ComputeStatus) SyncEffectiveFieldsDuringRead(existingState ComputeStatus) { + newState.EffectiveMessage = existingState.EffectiveMessage if existingState.EffectiveMessage.ValueString() == newState.Message.ValueString() { newState.Message = existingState.Message } diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index d064881534..55778dfa87 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -3012,6 +3012,7 @@ func (newState *OnlineTable) SyncEffectiveFieldsDuringCreateOrUpdate(plan Online } func (newState *OnlineTable) SyncEffectiveFieldsDuringRead(existingState OnlineTable) { + newState.EffectiveTableServingUrl = existingState.EffectiveTableServingUrl if existingState.EffectiveTableServingUrl.ValueString() == newState.TableServingUrl.ValueString() { newState.TableServingUrl = existingState.TableServingUrl } @@ -3051,6 +3052,7 @@ func (newState *OnlineTableSpec) SyncEffectiveFieldsDuringCreateOrUpdate(plan On } func (newState *OnlineTableSpec) SyncEffectiveFieldsDuringRead(existingState OnlineTableSpec) { + newState.EffectivePipelineId = existingState.EffectivePipelineId if existingState.EffectivePipelineId.ValueString() == newState.PipelineId.ValueString() { newState.PipelineId = existingState.PipelineId } diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index 85023fe5e2..b76126b18b 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -41,6 +41,7 @@ func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate( } func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState CreateDashboardRequest) { + newState.EffectiveParentPath = existingState.EffectiveParentPath if existingState.EffectiveParentPath.ValueString() == newState.ParentPath.ValueString() { newState.ParentPath = existingState.ParentPath } @@ -65,6 +66,7 @@ func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(p } func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState CreateScheduleRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } @@ -90,9 +92,11 @@ func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpda } func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState CreateSubscriptionRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } @@ -175,21 +179,27 @@ func (newState *Dashboard) SyncEffectiveFieldsDuringCreateOrUpdate(plan Dashboar } func (newState *Dashboard) SyncEffectiveFieldsDuringRead(existingState Dashboard) { + newState.EffectiveCreateTime = existingState.EffectiveCreateTime if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { newState.CreateTime = existingState.CreateTime } + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } + newState.EffectiveParentPath = existingState.EffectiveParentPath if existingState.EffectiveParentPath.ValueString() == newState.ParentPath.ValueString() { newState.ParentPath = existingState.ParentPath } + newState.EffectivePath = existingState.EffectivePath if existingState.EffectivePath.ValueString() == newState.Path.ValueString() { newState.Path = existingState.Path } + newState.EffectiveUpdateTime = existingState.EffectiveUpdateTime if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { newState.UpdateTime = existingState.UpdateTime } @@ -219,12 +229,15 @@ func (newState *DeleteScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(p } func (newState *DeleteScheduleRequest) SyncEffectiveFieldsDuringRead(existingState DeleteScheduleRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } @@ -268,15 +281,19 @@ func (newState *DeleteSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpda } func (newState *DeleteSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState DeleteSubscriptionRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } + newState.EffectiveSubscriptionId = existingState.EffectiveSubscriptionId if existingState.EffectiveSubscriptionId.ValueString() == newState.SubscriptionId.ValueString() { newState.SubscriptionId = existingState.SubscriptionId } @@ -520,9 +537,11 @@ func (newState *GetScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan } func (newState *GetScheduleRequest) SyncEffectiveFieldsDuringRead(existingState GetScheduleRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } @@ -551,12 +570,15 @@ func (newState *GetSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpdate( } func (newState *GetSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState GetSubscriptionRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } + newState.EffectiveSubscriptionId = existingState.EffectiveSubscriptionId if existingState.EffectiveSubscriptionId.ValueString() == newState.SubscriptionId.ValueString() { newState.SubscriptionId = existingState.SubscriptionId } @@ -583,6 +605,7 @@ func (newState *ListDashboardsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(p } func (newState *ListDashboardsRequest) SyncEffectiveFieldsDuringRead(existingState ListDashboardsRequest) { + newState.EffectivePageToken = existingState.EffectivePageToken if existingState.EffectivePageToken.ValueString() == newState.PageToken.ValueString() { newState.PageToken = existingState.PageToken } @@ -602,6 +625,7 @@ func (newState *ListDashboardsResponse) SyncEffectiveFieldsDuringCreateOrUpdate( } func (newState *ListDashboardsResponse) SyncEffectiveFieldsDuringRead(existingState ListDashboardsResponse) { + newState.EffectiveNextPageToken = existingState.EffectiveNextPageToken if existingState.EffectiveNextPageToken.ValueString() == newState.NextPageToken.ValueString() { newState.NextPageToken = existingState.NextPageToken } @@ -628,9 +652,11 @@ func (newState *ListSchedulesRequest) SyncEffectiveFieldsDuringCreateOrUpdate(pl } func (newState *ListSchedulesRequest) SyncEffectiveFieldsDuringRead(existingState ListSchedulesRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectivePageToken = existingState.EffectivePageToken if existingState.EffectivePageToken.ValueString() == newState.PageToken.ValueString() { newState.PageToken = existingState.PageToken } @@ -652,6 +678,7 @@ func (newState *ListSchedulesResponse) SyncEffectiveFieldsDuringCreateOrUpdate(p } func (newState *ListSchedulesResponse) SyncEffectiveFieldsDuringRead(existingState ListSchedulesResponse) { + newState.EffectiveNextPageToken = existingState.EffectiveNextPageToken if existingState.EffectiveNextPageToken.ValueString() == newState.NextPageToken.ValueString() { newState.NextPageToken = existingState.NextPageToken } @@ -683,12 +710,15 @@ func (newState *ListSubscriptionsRequest) SyncEffectiveFieldsDuringCreateOrUpdat } func (newState *ListSubscriptionsRequest) SyncEffectiveFieldsDuringRead(existingState ListSubscriptionsRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectivePageToken = existingState.EffectivePageToken if existingState.EffectivePageToken.ValueString() == newState.PageToken.ValueString() { newState.PageToken = existingState.PageToken } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } @@ -710,6 +740,7 @@ func (newState *ListSubscriptionsResponse) SyncEffectiveFieldsDuringCreateOrUpda } func (newState *ListSubscriptionsResponse) SyncEffectiveFieldsDuringRead(existingState ListSubscriptionsResponse) { + newState.EffectiveNextPageToken = existingState.EffectiveNextPageToken if existingState.EffectiveNextPageToken.ValueString() == newState.NextPageToken.ValueString() { newState.NextPageToken = existingState.NextPageToken } @@ -782,9 +813,11 @@ func (newState *PublishedDashboard) SyncEffectiveFieldsDuringCreateOrUpdate(plan } func (newState *PublishedDashboard) SyncEffectiveFieldsDuringRead(existingState PublishedDashboard) { + newState.EffectiveDisplayName = existingState.EffectiveDisplayName if existingState.EffectiveDisplayName.ValueString() == newState.DisplayName.ValueString() { newState.DisplayName = existingState.DisplayName } + newState.EffectiveRevisionCreateTime = existingState.EffectiveRevisionCreateTime if existingState.EffectiveRevisionCreateTime.ValueString() == newState.RevisionCreateTime.ValueString() { newState.RevisionCreateTime = existingState.RevisionCreateTime } @@ -873,18 +906,23 @@ func (newState *Schedule) SyncEffectiveFieldsDuringCreateOrUpdate(plan Schedule) } func (newState *Schedule) SyncEffectiveFieldsDuringRead(existingState Schedule) { + newState.EffectiveCreateTime = existingState.EffectiveCreateTime if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { newState.CreateTime = existingState.CreateTime } + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } + newState.EffectiveUpdateTime = existingState.EffectiveUpdateTime if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { newState.UpdateTime = existingState.UpdateTime } @@ -953,24 +991,31 @@ func (newState *Subscription) SyncEffectiveFieldsDuringCreateOrUpdate(plan Subsc } func (newState *Subscription) SyncEffectiveFieldsDuringRead(existingState Subscription) { + newState.EffectiveCreateTime = existingState.EffectiveCreateTime if existingState.EffectiveCreateTime.ValueString() == newState.CreateTime.ValueString() { newState.CreateTime = existingState.CreateTime } + newState.EffectiveCreatedByUserId = existingState.EffectiveCreatedByUserId if existingState.EffectiveCreatedByUserId.ValueInt64() == newState.CreatedByUserId.ValueInt64() { newState.CreatedByUserId = existingState.CreatedByUserId } + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } + newState.EffectiveSubscriptionId = existingState.EffectiveSubscriptionId if existingState.EffectiveSubscriptionId.ValueString() == newState.SubscriptionId.ValueString() { newState.SubscriptionId = existingState.SubscriptionId } + newState.EffectiveUpdateTime = existingState.EffectiveUpdateTime if existingState.EffectiveUpdateTime.ValueString() == newState.UpdateTime.ValueString() { newState.UpdateTime = existingState.UpdateTime } @@ -989,6 +1034,7 @@ func (newState *SubscriptionSubscriberDestination) SyncEffectiveFieldsDuringCrea } func (newState *SubscriptionSubscriberDestination) SyncEffectiveFieldsDuringRead(existingState SubscriptionSubscriberDestination) { + newState.EffectiveDestinationId = existingState.EffectiveDestinationId if existingState.EffectiveDestinationId.ValueString() == newState.DestinationId.ValueString() { newState.DestinationId = existingState.DestinationId } @@ -1006,6 +1052,7 @@ func (newState *SubscriptionSubscriberUser) SyncEffectiveFieldsDuringCreateOrUpd } func (newState *SubscriptionSubscriberUser) SyncEffectiveFieldsDuringRead(existingState SubscriptionSubscriberUser) { + newState.EffectiveUserId = existingState.EffectiveUserId if existingState.EffectiveUserId.ValueInt64() == newState.UserId.ValueInt64() { newState.UserId = existingState.UserId } @@ -1094,6 +1141,7 @@ func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate( } func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState UpdateDashboardRequest) { + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } @@ -1130,12 +1178,15 @@ func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(p } func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState UpdateScheduleRequest) { + newState.EffectiveDashboardId = existingState.EffectiveDashboardId if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { newState.DashboardId = existingState.DashboardId } + newState.EffectiveEtag = existingState.EffectiveEtag if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { newState.Etag = existingState.Etag } + newState.EffectiveScheduleId = existingState.EffectiveScheduleId if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { newState.ScheduleId = existingState.ScheduleId } diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index 35f110fbe2..295d1e1a99 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -43,6 +43,7 @@ func (newState *BaseJob) SyncEffectiveFieldsDuringCreateOrUpdate(plan BaseJob) { } func (newState *BaseJob) SyncEffectiveFieldsDuringRead(existingState BaseJob) { + newState.EffectiveEffectiveBudgetPolicyId = existingState.EffectiveEffectiveBudgetPolicyId if existingState.EffectiveEffectiveBudgetPolicyId.ValueString() == newState.EffectiveBudgetPolicyId.ValueString() { newState.EffectiveBudgetPolicyId = existingState.EffectiveBudgetPolicyId } @@ -944,6 +945,7 @@ func (newState *Job) SyncEffectiveFieldsDuringCreateOrUpdate(plan Job) { } func (newState *Job) SyncEffectiveFieldsDuringRead(existingState Job) { + newState.EffectiveEffectiveBudgetPolicyId = existingState.EffectiveEffectiveBudgetPolicyId if existingState.EffectiveEffectiveBudgetPolicyId.ValueString() == newState.EffectiveBudgetPolicyId.ValueString() { newState.EffectiveBudgetPolicyId = existingState.EffectiveBudgetPolicyId } diff --git a/internal/service/provisioning_tf/model.go b/internal/service/provisioning_tf/model.go index be8e5f0c2f..17d8bbc18e 100755 --- a/internal/service/provisioning_tf/model.go +++ b/internal/service/provisioning_tf/model.go @@ -342,6 +342,7 @@ func (newState *Credential) SyncEffectiveFieldsDuringCreateOrUpdate(plan Credent } func (newState *Credential) SyncEffectiveFieldsDuringRead(existingState Credential) { + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } @@ -382,6 +383,7 @@ func (newState *CustomerManagedKey) SyncEffectiveFieldsDuringCreateOrUpdate(plan } func (newState *CustomerManagedKey) SyncEffectiveFieldsDuringRead(existingState CustomerManagedKey) { + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } @@ -739,9 +741,11 @@ func (newState *Network) SyncEffectiveFieldsDuringCreateOrUpdate(plan Network) { } func (newState *Network) SyncEffectiveFieldsDuringRead(existingState Network) { + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } + newState.EffectiveVpcStatus = existingState.EffectiveVpcStatus if existingState.EffectiveVpcStatus.ValueString() == newState.VpcStatus.ValueString() { newState.VpcStatus = existingState.VpcStatus } @@ -871,9 +875,11 @@ func (newState *StorageConfiguration) SyncEffectiveFieldsDuringCreateOrUpdate(pl } func (newState *StorageConfiguration) SyncEffectiveFieldsDuringRead(existingState StorageConfiguration) { + newState.EffectiveAccountId = existingState.EffectiveAccountId if existingState.EffectiveAccountId.ValueString() == newState.AccountId.ValueString() { newState.AccountId = existingState.AccountId } + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } @@ -1135,12 +1141,15 @@ func (newState *Workspace) SyncEffectiveFieldsDuringCreateOrUpdate(plan Workspac } func (newState *Workspace) SyncEffectiveFieldsDuringRead(existingState Workspace) { + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } + newState.EffectiveWorkspaceStatus = existingState.EffectiveWorkspaceStatus if existingState.EffectiveWorkspaceStatus.ValueString() == newState.WorkspaceStatus.ValueString() { newState.WorkspaceStatus = existingState.WorkspaceStatus } + newState.EffectiveWorkspaceStatusMessage = existingState.EffectiveWorkspaceStatusMessage if existingState.EffectiveWorkspaceStatusMessage.ValueString() == newState.WorkspaceStatusMessage.ValueString() { newState.WorkspaceStatusMessage = existingState.WorkspaceStatusMessage } diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index 3ca9895b89..71b73ba253 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -1543,24 +1543,31 @@ func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringCreateOrUp } func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringRead(existingState NccAzurePrivateEndpointRule) { + newState.EffectiveConnectionState = existingState.EffectiveConnectionState if existingState.EffectiveConnectionState.ValueString() == newState.ConnectionState.ValueString() { newState.ConnectionState = existingState.ConnectionState } + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } + newState.EffectiveDeactivated = existingState.EffectiveDeactivated if existingState.EffectiveDeactivated.ValueBool() == newState.Deactivated.ValueBool() { newState.Deactivated = existingState.Deactivated } + newState.EffectiveDeactivatedAt = existingState.EffectiveDeactivatedAt if existingState.EffectiveDeactivatedAt.ValueInt64() == newState.DeactivatedAt.ValueInt64() { newState.DeactivatedAt = existingState.DeactivatedAt } + newState.EffectiveEndpointName = existingState.EffectiveEndpointName if existingState.EffectiveEndpointName.ValueString() == newState.EndpointName.ValueString() { newState.EndpointName = existingState.EndpointName } + newState.EffectiveRuleId = existingState.EffectiveRuleId if existingState.EffectiveRuleId.ValueString() == newState.RuleId.ValueString() { newState.RuleId = existingState.RuleId } + newState.EffectiveUpdatedTime = existingState.EffectiveUpdatedTime if existingState.EffectiveUpdatedTime.ValueInt64() == newState.UpdatedTime.ValueInt64() { newState.UpdatedTime = existingState.UpdatedTime } @@ -1672,12 +1679,15 @@ func (newState *NetworkConnectivityConfiguration) SyncEffectiveFieldsDuringCreat } func (newState *NetworkConnectivityConfiguration) SyncEffectiveFieldsDuringRead(existingState NetworkConnectivityConfiguration) { + newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime } + newState.EffectiveNetworkConnectivityConfigId = existingState.EffectiveNetworkConnectivityConfigId if existingState.EffectiveNetworkConnectivityConfigId.ValueString() == newState.NetworkConnectivityConfigId.ValueString() { newState.NetworkConnectivityConfigId = existingState.NetworkConnectivityConfigId } + newState.EffectiveUpdatedTime = existingState.EffectiveUpdatedTime if existingState.EffectiveUpdatedTime.ValueInt64() == newState.UpdatedTime.ValueInt64() { newState.UpdatedTime = existingState.UpdatedTime } diff --git a/internal/service/sharing_tf/model.go b/internal/service/sharing_tf/model.go index 1cb5022027..0192deeaaa 100755 --- a/internal/service/sharing_tf/model.go +++ b/internal/service/sharing_tf/model.go @@ -892,6 +892,7 @@ func (newState *ShareInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ShareInf } func (newState *ShareInfo) SyncEffectiveFieldsDuringRead(existingState ShareInfo) { + newState.EffectiveOwner = existingState.EffectiveOwner if existingState.EffectiveOwner.ValueString() == newState.Owner.ValueString() { newState.Owner = existingState.Owner } @@ -1000,15 +1001,19 @@ func (newState *SharedDataObject) SyncEffectiveFieldsDuringCreateOrUpdate(plan S } func (newState *SharedDataObject) SyncEffectiveFieldsDuringRead(existingState SharedDataObject) { + newState.EffectiveCdfEnabled = existingState.EffectiveCdfEnabled if existingState.EffectiveCdfEnabled.ValueBool() == newState.CdfEnabled.ValueBool() { newState.CdfEnabled = existingState.CdfEnabled } + newState.EffectiveHistoryDataSharingStatus = existingState.EffectiveHistoryDataSharingStatus if existingState.EffectiveHistoryDataSharingStatus.ValueString() == newState.HistoryDataSharingStatus.ValueString() { newState.HistoryDataSharingStatus = existingState.HistoryDataSharingStatus } + newState.EffectiveSharedAs = existingState.EffectiveSharedAs if existingState.EffectiveSharedAs.ValueString() == newState.SharedAs.ValueString() { newState.SharedAs = existingState.SharedAs } + newState.EffectiveStartVersion = existingState.EffectiveStartVersion if existingState.EffectiveStartVersion.ValueInt64() == newState.StartVersion.ValueInt64() { newState.StartVersion = existingState.StartVersion } @@ -1130,6 +1135,7 @@ func (newState *UpdateShare) SyncEffectiveFieldsDuringCreateOrUpdate(plan Update } func (newState *UpdateShare) SyncEffectiveFieldsDuringRead(existingState UpdateShare) { + newState.EffectiveOwner = existingState.EffectiveOwner if existingState.EffectiveOwner.ValueString() == newState.Owner.ValueString() { newState.Owner = existingState.Owner } From 92357dcf9cfb4acd64ace55b00e82e38300ec2ad Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Tue, 29 Oct 2024 06:17:42 -0400 Subject: [PATCH 06/14] [Fix] Handle edge case for `effective_properties` in `databricks_sql_table` (#4153) ## Changes It was reported in #4098 that some of the specified options, like, `multiLine`, `recursiveFileLookup` and potentially more, aren't returned as `option.multiLine`, etc., but instead are expanded into full names, like, `spark.sql.dataSourceOptions.multiLine`. This PR changes lookup logic a bit, and if we can't find `option.something`, then we're looking for all options ending with `.something` (only if there are no `.` in the name). Resolves #4098 ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- catalog/resource_sql_table.go | 29 +++++++++++++++++++++-------- catalog/resource_sql_table_test.go | 7 +++++-- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/catalog/resource_sql_table.go b/catalog/resource_sql_table.go index ce9d4dbd7a..710c8a20bf 100644 --- a/catalog/resource_sql_table.go +++ b/catalog/resource_sql_table.go @@ -21,6 +21,7 @@ import ( ) var MaxSqlExecWaitTimeout = 50 +var optionPrefixes = []string{"option.", "spark.sql.dataSourceOptions."} type SqlColumnInfo struct { Name string `json:"name"` @@ -67,7 +68,6 @@ type SqlTableInfo struct { } func (ti SqlTableInfo) CustomizeSchema(s *common.CustomizableSchema) *common.CustomizableSchema { - caseInsensitiveFields := []string{"name", "catalog_name", "schema_name"} for _, field := range caseInsensitiveFields { s.SchemaPath(field).SetCustomSuppressDiff(common.EqualFoldDiffSuppress) @@ -598,18 +598,31 @@ func ResourceSqlTable() common.Resource { // If the user specified a property but the value of that property has changed, that will appear // as a change in the effective property/option. To cause a diff to be detected, we need to // reset the effective property/option to the requested value. - userSpecifiedProperties := d.Get("properties").(map[string]interface{}) - userSpecifiedOptions := d.Get("options").(map[string]interface{}) - effectiveProperties := d.Get("effective_properties").(map[string]interface{}) - diff := make(map[string]interface{}) + userSpecifiedProperties := d.Get("properties").(map[string]any) + userSpecifiedOptions := d.Get("options").(map[string]any) + effectiveProperties := d.Get("effective_properties").(map[string]any) + diff := make(map[string]any) for k, userSpecifiedValue := range userSpecifiedProperties { if effectiveValue, ok := effectiveProperties[k]; !ok || effectiveValue != userSpecifiedValue { diff[k] = userSpecifiedValue } } - for k, userSpecifiedValue := range userSpecifiedOptions { - if effectiveValue, ok := effectiveProperties["option."+k]; !ok || effectiveValue != userSpecifiedValue { - diff["option."+k] = userSpecifiedValue + for userOptName, userSpecifiedValue := range userSpecifiedOptions { + var found bool + var effectiveValue any + var effectOptName string + // If the option is not found, check if the user specified the option without the prefix + // i.e. if user specified `multiLine` for JSON, then backend returns `spark.sql.dataSourceOptions.multiLine` + for _, prefix := range optionPrefixes { + effectOptName = prefix + userOptName + if v, ok := effectiveProperties[effectOptName]; ok { + found = true + effectiveValue = v + break + } + } + if !found || effectiveValue != userSpecifiedValue { + diff[effectOptName] = userSpecifiedValue } } if len(diff) > 0 { diff --git a/catalog/resource_sql_table_test.go b/catalog/resource_sql_table_test.go index f2f0a6c5e2..b2495480cb 100644 --- a/catalog/resource_sql_table_test.go +++ b/catalog/resource_sql_table_test.go @@ -1625,15 +1625,18 @@ func TestResourceSqlTable_Diff_ExistingResource(t *testing.T) { } options = { "myopt" = "myval" + "multiLine" = "true" }`, map[string]string{ "properties.%": "1", "properties.myprop": "myval", - "options.%": "1", + "options.%": "2", "options.myopt": "myval", - "effective_properties.%": "2", + "options.multiLine": "true", + "effective_properties.%": "3", "effective_properties.myprop": "myval", "effective_properties.option.myopt": "myval", + "effective_properties.spark.sql.dataSourceOptions.multiLine": "true", }, nil, }, From 38eeb21e56f9e3ec3fe271010430dbdf6a62ec0b Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 29 Oct 2024 20:43:31 +0530 Subject: [PATCH 07/14] [Fix] Provide more prescriptive error when users fail to create a single node cluster (#4168) ## Changes A better error message is warranted because many DABs customers have reportedly run into this. Original issue: https://github.com/databricks/cli/issues/1546 ## Tests Unit test --- clusters/clusters_api.go | 2 +- clusters/resource_cluster.go | 21 ++++++++++++++++++- clusters/resource_cluster_test.go | 6 ++---- jobs/resource_job_test.go | 34 +++++++++++++++++++++++++++---- 4 files changed, 53 insertions(+), 10 deletions(-) diff --git a/clusters/clusters_api.go b/clusters/clusters_api.go index d47cfb6090..6a08a4a608 100644 --- a/clusters/clusters_api.go +++ b/clusters/clusters_api.go @@ -447,7 +447,7 @@ func (cluster Cluster) Validate() error { if profile == "singleNode" && strings.HasPrefix(master, "local") && resourceClass == "SingleNode" { return nil } - return fmt.Errorf("NumWorkers could be 0 only for SingleNode clusters. See https://docs.databricks.com/clusters/single-node.html for more details") + return errors.New(numWorkerErr) } // TODO: Remove this once all the resources using clusters are migrated to Go SDK. diff --git a/clusters/resource_cluster.go b/clusters/resource_cluster.go index fb77a5f76d..3c03502023 100644 --- a/clusters/resource_cluster.go +++ b/clusters/resource_cluster.go @@ -26,7 +26,26 @@ var clusterSchema = resourceClusterSchema() var clusterSchemaVersion = 4 const ( - numWorkerErr = "NumWorkers could be 0 only for SingleNode clusters. See https://docs.databricks.com/clusters/single-node.html for more details" + numWorkerErr = `num_workers may be 0 only for single-node clusters. To create a single node +cluster please include the following configuration in your cluster configuration: + + spark_conf = { + "spark.databricks.cluster.profile" : "singleNode" + "spark.master" : "local[*]" + } + + custom_tags = { + "ResourceClass" = "SingleNode" + } + +Please note that the Databricks Terraform provider cannot detect if the above configuration +is defined in a policy used by the cluster. Please define this in the cluster configuration +itself to create a single node cluster. + +For more details please see: + 1. https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/cluster#fixed-size-or-autoscaling-cluster + 2. https://docs.databricks.com/clusters/single-node.html` + unsupportedExceptCreateEditClusterSpecErr = "unsupported type %T, must be one of %scompute.CreateCluster, %scompute.ClusterSpec or %scompute.EditCluster. Please report this issue to the GitHub repo" ) diff --git a/clusters/resource_cluster_test.go b/clusters/resource_cluster_test.go index 804067597b..240b62cb4e 100644 --- a/clusters/resource_cluster_test.go +++ b/clusters/resource_cluster_test.go @@ -1860,8 +1860,7 @@ func TestResourceClusterCreate_SingleNodeFail(t *testing.T) { "is_pinned": false, }, }.Apply(t) - assert.Error(t, err) - require.Equal(t, true, strings.Contains(err.Error(), "NumWorkers could be 0 only for SingleNode clusters")) + assert.EqualError(t, err, numWorkerErr) } func TestResourceClusterCreate_NegativeNumWorkers(t *testing.T) { @@ -1900,8 +1899,7 @@ func TestResourceClusterUpdate_FailNumWorkersZero(t *testing.T) { "num_workers": 0, }, }.Apply(t) - assert.Error(t, err) - require.Equal(t, true, strings.Contains(err.Error(), "NumWorkers could be 0 only for SingleNode clusters")) + assert.EqualError(t, err, numWorkerErr) } func TestModifyClusterRequestAws(t *testing.T) { diff --git a/jobs/resource_job_test.go b/jobs/resource_job_test.go index 95ffb03923..75a780c00a 100644 --- a/jobs/resource_job_test.go +++ b/jobs/resource_job_test.go @@ -2056,8 +2056,21 @@ func TestResourceJobCreateSingleNode_Fail(t *testing.T) { jar = "dbfs://ff/gg/hh.jar" }`, }.Apply(t) - assert.Error(t, err) - require.Equal(t, true, strings.Contains(err.Error(), "NumWorkers could be 0 only for SingleNode clusters")) + assert.ErrorContains(t, err, `num_workers may be 0 only for single-node clusters. To create a single node +cluster please include the following configuration in your cluster configuration: + + spark_conf = { + "spark.databricks.cluster.profile" : "singleNode" + "spark.master" : "local[*]" + } + + custom_tags = { + "ResourceClass" = "SingleNode" + } + +Please note that the Databricks Terraform provider cannot detect if the above configuration +is defined in a policy used by the cluster. Please define this in the cluster configuration +itself to create a single node cluster.`) } func TestResourceJobRead(t *testing.T) { @@ -2946,8 +2959,21 @@ func TestResourceJobUpdate_FailNumWorkersZero(t *testing.T) { parameters = ["--cleanup", "full"] }`, }.Apply(t) - assert.Error(t, err) - require.Equal(t, true, strings.Contains(err.Error(), "NumWorkers could be 0 only for SingleNode clusters")) + assert.ErrorContains(t, err, `num_workers may be 0 only for single-node clusters. To create a single node +cluster please include the following configuration in your cluster configuration: + + spark_conf = { + "spark.databricks.cluster.profile" : "singleNode" + "spark.master" : "local[*]" + } + + custom_tags = { + "ResourceClass" = "SingleNode" + } + +Please note that the Databricks Terraform provider cannot detect if the above configuration +is defined in a policy used by the cluster. Please define this in the cluster configuration +itself to create a single node cluster.`) } func TestJobsAPIList(t *testing.T) { From dfa6bc0bebf4541fc71b8b08a567d16b341f024d Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Wed, 30 Oct 2024 10:14:44 +0100 Subject: [PATCH 08/14] [Internal] Add test instructions for external contributors (#4169) ## Changes Add test instructions for external contributors ## Tests See Go Changes https://github.com/databricks/databricks-sdk-go/pull/1073 --- .github/workflows/external-message.yml | 114 ++++++++++++++++++++++++ .github/workflows/integration-tests.yml | 20 ++++- 2 files changed, 133 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/external-message.yml diff --git a/.github/workflows/external-message.yml b/.github/workflows/external-message.yml new file mode 100644 index 0000000000..b9534520a0 --- /dev/null +++ b/.github/workflows/external-message.yml @@ -0,0 +1,114 @@ +name: PR Comment + +# WARNING: +# THIS WORKFLOW ALWAYS RUNS FOR EXTERNAL CONTRIBUTORS WITHOUT ANY APPROVAL. +# THIS WORKFLOW RUNS FROM MAIN BRANCH, NOT FROM THE PR BRANCH. +# DO NOT PULL THE PR OR EXECUTE ANY CODE FROM THE PR. + +on: + pull_request_target: + types: [opened, reopened, synchronize] + branches: + - main + + +jobs: + comment-on-pr: + runs-on: ubuntu-latest + permissions: + pull-requests: write + + steps: + # NOTE: The following checks may not be accurate depending on Org or Repo settings. + - name: Check user and potential secret access + id: check-secrets-access + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + USER_LOGIN="${{ github.event.pull_request.user.login }}" + REPO_OWNER="${{ github.repository_owner }}" + REPO_NAME="${{ github.event.repository.name }}" + + echo "Pull request opened by: $USER_LOGIN" + + # Check if PR is from a fork + IS_FORK=$([[ "${{ github.event.pull_request.head.repo.full_name }}" != "${{ github.repository }}" ]] && echo "true" || echo "false") + + HAS_ACCESS="false" + + # Check user's permission level on the repository + USER_PERMISSION=$(gh api repos/$REPO_OWNER/$REPO_NAME/collaborators/$USER_LOGIN/permission --jq '.permission') + + if [[ "$USER_PERMISSION" == "admin" || "$USER_PERMISSION" == "write" ]]; then + HAS_ACCESS="true" + elif [[ "$USER_PERMISSION" == "read" ]]; then + # For read access, we need to check if the user has been explicitly granted secret access + # This information is not directly available via API, so we'll make an assumption + # that read access does not imply secret access + HAS_ACCESS="false" + fi + + # Check if repo owner is an organization + IS_ORG=$(gh api users/$REPO_OWNER --jq '.type == "Organization"') + + if [[ "$IS_ORG" == "true" && "$HAS_ACCESS" == "false" ]]; then + # Check if user is a member of any team with write or admin access to the repo + TEAMS_WITH_ACCESS=$(gh api repos/$REPO_OWNER/$REPO_NAME/teams --jq '.[] | select(.permission == "push" or .permission == "admin") | .slug') + for team in $TEAMS_WITH_ACCESS; do + IS_TEAM_MEMBER=$(gh api orgs/$REPO_OWNER/teams/$team/memberships/$USER_LOGIN --silent && echo "true" || echo "false") + if [[ "$IS_TEAM_MEMBER" == "true" ]]; then + HAS_ACCESS="true" + break + fi + done + fi + + # If it's a fork, set HAS_ACCESS to false regardless of other checks + if [[ "$IS_FORK" == "true" ]]; then + HAS_ACCESS="false" + fi + + echo "has_secrets_access=$HAS_ACCESS" >> $GITHUB_OUTPUT + if [[ "$HAS_ACCESS" == "true" ]]; then + echo "User $USER_LOGIN likely has access to secrets" + else + echo "User $USER_LOGIN likely does not have access to secrets" + fi + + + - uses: actions/checkout@v4 + + - name: Delete old comments + if: steps.check-secrets-access.outputs.has_secrets_access != 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Delete previous comment if it exists + previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \ + --jq '.[] | select(.body | startswith("")) | .id') + echo "Previous comment IDs: $previous_comment_ids" + # Iterate over each comment ID and delete the comment + if [ ! -z "$previous_comment_ids" ]; then + echo "$previous_comment_ids" | while read -r comment_id; do + echo "Deleting comment with ID: $comment_id" + gh api "repos/${{ github.repository }}/issues/comments/$comment_id" -X DELETE + done + fi + + - name: Comment on PR + if: steps.check-secrets-access.outputs.has_secrets_access != 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + COMMIT_SHA: ${{ github.event.pull_request.head.sha }} + run: | + gh pr comment ${{ github.event.pull_request.number }} --body \ + " + Run integration tests manually: + [go/deco-tests-run/terraform](https://go/deco-tests-run/terraform) + + Inputs: + * PR number: ${{github.event.pull_request.number}} + * Commit SHA: \`${{ env.COMMIT_SHA }}\` + + Checks will be approved automatically on success. + " diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index b92be6da5f..67ed709365 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -9,10 +9,28 @@ on: jobs: + check-token: + name: Check secrets access + runs-on: ubuntu-latest + outputs: + has_token: ${{ steps.set-token-status.outputs.has_token }} + steps: + - name: Check if GITHUB_TOKEN is set + id: set-token-status + run: | + if [ -z "${{ secrets.GITHUB_TOKEN }}" ]; then + echo "GITHUB_TOKEN is empty. User has no access to tokens." + echo "::set-output name=has_token::false" + else + echo "GITHUB_TOKEN is set. User has no access to tokens." + echo "::set-output name=has_token::true" + fi + trigger-tests: - if: github.event_name == 'pull_request' name: Trigger Tests runs-on: ubuntu-latest + needs: check-token + if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true' environment: "test-trigger-is" steps: From 613ed1ab6ff138601bbc83bf9c48a2a7f41b3f7e Mon Sep 17 00:00:00 2001 From: Omer Lachish <289488+rauchy@users.noreply.github.com> Date: Wed, 30 Oct 2024 13:36:09 +0100 Subject: [PATCH 09/14] [Internal] Migrate Share Data Source to Plugin Framework (#4161) ## Changes This PR migrates the share/shares data sources to the Plugin framework. The code was largely copied "as is" from the previous implementation of the share data source, with the necessary adaptations made for integration with the Plugin framework. ## Tests ~~Note: current tests create shares using the SDKv2 resource, but fetch them using the new plugin framework data source. Once the resource migration will be merged, I will amend this.~~ Edit: Now that the resource itself is merged, the acceptance tests use the plugin framework's version of the resource. - [x] `make test` run locally - [ ] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --------- Co-authored-by: Omer Lachish --- internal/providers/pluginfw/pluginfw.go | 2 + .../pluginfw/resources/sharing/data_share.go | 79 +++++++++++++++ .../pluginfw/resources/sharing/data_shares.go | 67 +++++++++++++ .../resources/sharing/data_shares_acc_test.go | 98 +++++++++++++++++++ 4 files changed, 246 insertions(+) create mode 100644 internal/providers/pluginfw/resources/sharing/data_share.go create mode 100644 internal/providers/pluginfw/resources/sharing/data_shares.go create mode 100644 internal/providers/pluginfw/resources/sharing/data_shares_acc_test.go diff --git a/internal/providers/pluginfw/pluginfw.go b/internal/providers/pluginfw/pluginfw.go index 53b361f998..e813c94aa3 100644 --- a/internal/providers/pluginfw/pluginfw.go +++ b/internal/providers/pluginfw/pluginfw.go @@ -58,6 +58,8 @@ func (p *DatabricksProviderPluginFramework) DataSources(ctx context.Context) []f volume.DataSourceVolumes, registered_model.DataSourceRegisteredModel, notificationdestinations.DataSourceNotificationDestinations, + sharing.DataSourceShare, + sharing.DataSourceShares, } } diff --git a/internal/providers/pluginfw/resources/sharing/data_share.go b/internal/providers/pluginfw/resources/sharing/data_share.go new file mode 100644 index 0000000000..f96d56ac12 --- /dev/null +++ b/internal/providers/pluginfw/resources/sharing/data_share.go @@ -0,0 +1,79 @@ +package sharing + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/sharing" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/databricks/terraform-provider-databricks/internal/service/sharing_tf" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func DataSourceShare() datasource.DataSource { + return &ShareDataSource{} +} + +var _ datasource.DataSourceWithConfigure = &ShareDataSource{} + +type ShareDataSource struct { + Client *common.DatabricksClient +} + +func (d *ShareDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = pluginfwcommon.GetDatabricksStagingName("share") +} + +func (d *ShareDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + attrs, blocks := tfschema.DataSourceStructToSchemaMap(sharing_tf.ShareInfo{}, nil) + resp.Schema = schema.Schema{ + Attributes: attrs, + Blocks: blocks, + } +} + +func (d *ShareDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if d.Client == nil { + d.Client = pluginfwcommon.ConfigureDataSource(req, resp) + } +} + +func (d *ShareDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + w, diags := d.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var config sharing_tf.ShareInfo + diags = req.Config.Get(ctx, &config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + share, err := w.Shares.Get(ctx, sharing.GetShareRequest{ + Name: config.Name.ValueString(), + IncludeSharedData: true, + }) + if err != nil { + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + } + + resp.Diagnostics.AddError("Failed to fetch share", err.Error()) + return + } + + var shareInfoTfSdk sharing_tf.ShareInfo + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, share, &shareInfoTfSdk)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, shareInfoTfSdk)...) +} diff --git a/internal/providers/pluginfw/resources/sharing/data_shares.go b/internal/providers/pluginfw/resources/sharing/data_shares.go new file mode 100644 index 0000000000..1753621192 --- /dev/null +++ b/internal/providers/pluginfw/resources/sharing/data_shares.go @@ -0,0 +1,67 @@ +package sharing + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/databricks/databricks-sdk-go/service/sharing" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +type SharesList struct { + Shares []types.String `tfsdk:"shares" tf:"computed,optional,slice_set"` +} + +func DataSourceShares() datasource.DataSource { + return &SharesDataSource{} +} + +var _ datasource.DataSourceWithConfigure = &SharesDataSource{} + +type SharesDataSource struct { + Client *common.DatabricksClient +} + +func (d *SharesDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = pluginfwcommon.GetDatabricksStagingName("shares") +} + +func (d *SharesDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + attrs, blocks := tfschema.DataSourceStructToSchemaMap(SharesList{}, nil) + resp.Schema = schema.Schema{ + Attributes: attrs, + Blocks: blocks, + } +} + +func (d *SharesDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if d.Client == nil { + d.Client = pluginfwcommon.ConfigureDataSource(req, resp) + } +} + +func (d *SharesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + w, diags := d.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + shares, err := w.Shares.ListAll(ctx, sharing.ListSharesRequest{}) + if err != nil { + resp.Diagnostics.AddError("Failed to fetch shares", err.Error()) + return + } + + shareNames := make([]types.String, len(shares)) + for i, share := range shares { + shareNames[i] = types.StringValue(share.Name) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, SharesList{Shares: shareNames})...) +} diff --git a/internal/providers/pluginfw/resources/sharing/data_shares_acc_test.go b/internal/providers/pluginfw/resources/sharing/data_shares_acc_test.go new file mode 100644 index 0000000000..9b0440e5d3 --- /dev/null +++ b/internal/providers/pluginfw/resources/sharing/data_shares_acc_test.go @@ -0,0 +1,98 @@ +package sharing_test + +import ( + "strconv" + "testing" + + "github.com/databricks/terraform-provider-databricks/internal/acceptance" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func checkSharesDataSourcePopulated(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + _, ok := s.Modules[0].Resources["data.databricks_shares_pluginframework.this"] + require.True(t, ok, "data.databricks_shares_pluginframework.this has to be there") + num_shares, _ := strconv.Atoi(s.Modules[0].Outputs["shares"].Value.(string)) + assert.GreaterOrEqual(t, num_shares, 1) + return nil + } +} +func TestUcAccDataSourceShares(t *testing.T) { + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: ` + resource "databricks_catalog" "sandbox" { + name = "sandbox{var.RANDOM}" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + } + + resource "databricks_schema" "things" { + catalog_name = databricks_catalog.sandbox.id + name = "things{var.RANDOM}" + comment = "this database is managed by terraform" + properties = { + kind = "various" + } + } + + resource "databricks_table" "mytable" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "id" + position = 0 + type_name = "INT" + type_text = "int" + type_json = "{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}" + } + } + + resource "databricks_table" "mytable_2" { + catalog_name = databricks_catalog.sandbox.id + schema_name = databricks_schema.things.name + name = "bar_2" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "id" + position = 0 + type_name = "INT" + type_text = "int" + type_json = "{\"name\":\"id\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}" + } + } + + resource "databricks_share_pluginframework" "myshare" { + name = "{var.RANDOM}-terraform-delta-share" + object { + name = databricks_table.mytable.id + comment = "c" + data_object_type = "TABLE" + } + object { + name = databricks_table.mytable_2.id + cdf_enabled = false + comment = "c" + data_object_type = "TABLE" + } + } + + data "databricks_shares_pluginframework" "this" { + depends_on = [databricks_share_pluginframework.myshare] + } + output "shares" { + value = length(data.databricks_shares_pluginframework.this.shares) + } + `, + Check: checkSharesDataSourcePopulated(t), + }) +} From 5daf2ed398531581329a5afb110a2b4ff5d36e76 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Wed, 30 Oct 2024 10:33:35 -0400 Subject: [PATCH 10/14] [Feature] Added `databricks_functions` data source (#4154) ## Changes It's now possible to fetch information about functions defined in a specific UC schema. No integration test yet because we don't have `databricks_function` resource yet. Resolves #4111 ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [x] using Go SDK --------- Co-authored-by: Miles Yucht --- docs/data-sources/functions.md | 86 ++++++++++++++++++ internal/providers/pluginfw/pluginfw.go | 2 + .../resources/catalog/data_functions.go | 90 +++++++++++++++++++ 3 files changed, 178 insertions(+) create mode 100644 docs/data-sources/functions.md create mode 100644 internal/providers/pluginfw/resources/catalog/data_functions.go diff --git a/docs/data-sources/functions.md b/docs/data-sources/functions.md new file mode 100644 index 0000000000..9a02db5b3b --- /dev/null +++ b/docs/data-sources/functions.md @@ -0,0 +1,86 @@ +--- +subcategory: "Unity Catalog" +--- +# databricks_functionss Data Source + +-> This data source can only be used with a workspace-level provider! + +Retrieves a list of [User-Defined Functions (UDFs) registered in the Unity Catalog](https://docs.databricks.com/en/udf/unity-catalog.html). + +## Example Usage + +List all functions defined in a specific schema (`main.default` in this example): + +```hcl +data "databricks_functions" "all" { + catalog_name = "main" + schema_name = "default" +} + +output "all_external_locations" { + value = data.databricks_functions.all.functions +} +``` + +## Argument Reference + +The following arguments are supported: + +* `catalog_name` - (Required) Name of [databricks_catalog](../resources/catalog.md). +* `schema_name` - (Required) Name of [databricks_schema](../resources/schema.md). +* `include_browse` - (Optional, Boolean) flag to specify if include UDFs in the response for which the principal can only access selective metadata for. + +## Attribute Reference + +This data source exports the following attributes: + +* `functions` - list of objects describing individual UDF. Each object consists of the following attributes (refer to [REST API documentation](https://docs.databricks.com/api/workspace/functions/list#functions) for up-to-date list of attributes. Default type is String): + * `name` - Name of function, relative to parent schema. + * `catalog_name` - Name of parent catalog. + * `schema_name` - Name of parent schema relative to its parent catalog. + * `input_params` - object describing input parameters. Consists of the single attribute: + * `parameters` - The array of definitions of the function's parameters: + * `name` - Name of parameter. + * `type_text` - Full data type spec, SQL/catalogString text. + * `type_json` - Full data type spec, JSON-serialized. + * `type_name` - Name of type (INT, STRUCT, MAP, etc.). + * `type_precision` - Digits of precision; required on Create for DecimalTypes. + * `type_scale` - Digits to right of decimal; Required on Create for DecimalTypes. + * `type_interval_type` - Format of IntervalType. + * `position` - Ordinal position of column (starting at position 0). + * `parameter_mode` - The mode of the function parameter. + * `parameter_type` - The type of function parameter (`PARAM` or `COLUMN`). + * `parameter_default` - Default value of the parameter. + * `comment` - User-provided free-form text description. + * `return_params` - Table function return parameters. See `input_params` for description. + * `data_type` - Scalar function return data type. + * `full_data_type` - Pretty printed function data type. + * `routine_body` - Function language (`SQL` or `EXTERNAL`). When `EXTERNAL` is used, the language of the routine function should be specified in the `external_language` field, and the `return_params` of the function cannot be used (as `TABLE` return type is not supported), and the `sql_data_access` field must be `NO_SQL`. + * `routine_definition` - Function body. + * `routine_dependencies` - Function dependencies. + * `parameter_style` - Function parameter style. `S` is the value for SQL. + * `is_deterministic` - Boolean flag specifying whether the function is deterministic. + * `sql_data_access` - Function SQL data access (`CONTAINS_SQL`, `READS_SQL_DATA`, `NO_SQL`). + * `is_null_call` - Boolean flag whether function null call. + * `security_type` - Function security type. (Enum: `DEFINER`). + * `specific_name` - Specific name of the function; Reserved for future use. + * `external_name` - External function name. + * `external_language` - External function language. + * `sql_path` - List of schemes whose objects can be referenced without qualification. + * `owner` - Username of current owner of function. + * `comment` - User-provided free-form text description. + * `properties` - JSON-serialized key-value pair map, encoded (escaped) as a string. + * `metastore_id` - Unique identifier of parent metastore. + * `full_name` - Full name of function, in form of catalog_name.schema_name.function__name + * `created_at` - Time at which this function was created, in epoch milliseconds. + * `created_by` - Username of function creator. + * `updated_at` - Time at which this function was created, in epoch milliseconds. + * `updated_by` - Username of user who last modified function. + * `function_id` - Id of Function, relative to parent schema. + * `browse_only` - Indicates whether the principal is limited to retrieving metadata for the associated object through the `BROWSE` privilege when `include_browse` is enabled in the request. + +## Related Resources + +The following resources are used in the same context: + +* [databricks_schema](./schema.md) to get information about a single schema diff --git a/internal/providers/pluginfw/pluginfw.go b/internal/providers/pluginfw/pluginfw.go index e813c94aa3..5592e3e29b 100644 --- a/internal/providers/pluginfw/pluginfw.go +++ b/internal/providers/pluginfw/pluginfw.go @@ -16,6 +16,7 @@ import ( "github.com/databricks/terraform-provider-databricks/commands" "github.com/databricks/terraform-provider-databricks/common" providercommon "github.com/databricks/terraform-provider-databricks/internal/providers/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/catalog" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/cluster" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/library" "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/notificationdestinations" @@ -60,6 +61,7 @@ func (p *DatabricksProviderPluginFramework) DataSources(ctx context.Context) []f notificationdestinations.DataSourceNotificationDestinations, sharing.DataSourceShare, sharing.DataSourceShares, + catalog.DataSourceFunctions, } } diff --git a/internal/providers/pluginfw/resources/catalog/data_functions.go b/internal/providers/pluginfw/resources/catalog/data_functions.go new file mode 100644 index 0000000000..6837800b51 --- /dev/null +++ b/internal/providers/pluginfw/resources/catalog/data_functions.go @@ -0,0 +1,90 @@ +package catalog + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/common" + pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/converters" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/tfschema" + "github.com/databricks/terraform-provider-databricks/internal/service/catalog_tf" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func DataSourceFunctions() datasource.DataSource { + return &FunctionsDataSource{} +} + +var _ datasource.DataSourceWithConfigure = &FunctionsDataSource{} + +type FunctionsDataSource struct { + Client *common.DatabricksClient +} + +type FunctionsData struct { + CatalogName types.String `tfsdk:"catalog_name"` + SchemaName types.String `tfsdk:"schema_name"` + IncludeBrowse types.Bool `tfsdk:"include_browse" tf:"optional"` + Functions []catalog_tf.FunctionInfo `tfsdk:"functions" tf:"optional,computed"` +} + +func (d *FunctionsDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = "databricks_functions" +} + +func (d *FunctionsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + attrs, blocks := tfschema.DataSourceStructToSchemaMap(FunctionsData{}, nil) + resp.Schema = schema.Schema{ + Attributes: attrs, + Blocks: blocks, + } +} + +func (d *FunctionsDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if d.Client == nil { + d.Client = pluginfwcommon.ConfigureDataSource(req, resp) + } +} + +func (d *FunctionsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + w, diags := d.Client.GetWorkspaceClient() + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var functions FunctionsData + diags = req.Config.Get(ctx, &functions) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + catalogName := functions.CatalogName.ValueString() + schemaName := functions.SchemaName.ValueString() + functionsInfosSdk, err := w.Functions.ListAll(ctx, catalog.ListFunctionsRequest{ + CatalogName: catalogName, + SchemaName: schemaName, + IncludeBrowse: functions.IncludeBrowse.ValueBool(), + }) + if err != nil { + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + } + resp.Diagnostics.AddError(fmt.Sprintf("failed to get functions for %s.%s schema", catalogName, schemaName), err.Error()) + return + } + for _, functionSdk := range functionsInfosSdk { + var function catalog_tf.FunctionInfo + resp.Diagnostics.Append(converters.GoSdkToTfSdkStruct(ctx, functionSdk, &function)...) + if resp.Diagnostics.HasError() { + return + } + functions.Functions = append(functions.Functions, function) + } + resp.Diagnostics.Append(resp.State.Set(ctx, functions)...) +} From 17641de1cd3110b33c4b0b1a17f1ac17b0d6835b Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Thu, 31 Oct 2024 21:27:09 +0100 Subject: [PATCH 11/14] [Dependency] Bump Go SDK to v0.50.0 (#4178) ## Changes Use the latest Go SDK in the Terraform provider. The main changes affect Dashboards and Online Tables, whose Create and Update RPCs now accept an instance of the resource, rather than inlining the fields of the resource. Additionally, Online Tables introduced a waiter configuration, so we can remove hand-written waiter logic used before. ## Tests - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- .codegen/_openapi_sha | 2 +- catalog/resource_online_table.go | 31 +--- catalog/resource_online_table_test.go | 66 +++++--- dashboards/resource_dashboard.go | 27 +-- dashboards/resource_dashboard_test.go | 42 +++-- go.mod | 2 +- go.sum | 4 +- internal/acceptance/dashboard_test.go | 26 +-- internal/service/apps_tf/model.go | 57 +------ internal/service/catalog_tf/model.go | 14 -- internal/service/dashboards_tf/model.go | 198 ++-------------------- internal/service/jobs_tf/model.go | 154 +++++++++-------- internal/service/oauth2_tf/model.go | 12 ++ internal/service/provisioning_tf/model.go | 23 +++ internal/service/settings_tf/model.go | 156 ++++++++++++++++- 15 files changed, 395 insertions(+), 419 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 2d9cb6d86d..ecf041814d 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -cf9c61453990df0f9453670f2fe68e1b128647a2 \ No newline at end of file +25b2478e5a18c888f0d423249abde5499dc58424 \ No newline at end of file diff --git a/catalog/resource_online_table.go b/catalog/resource_online_table.go index ca24d5f76f..ee4aa44754 100644 --- a/catalog/resource_online_table.go +++ b/catalog/resource_online_table.go @@ -16,29 +16,6 @@ import ( const onlineTableDefaultProvisionTimeout = 90 * time.Minute -func waitForOnlineTableCreation(w *databricks.WorkspaceClient, ctx context.Context, onlineTableName string) error { - return retry.RetryContext(ctx, onlineTableDefaultProvisionTimeout, func() *retry.RetryError { - endpoint, err := w.OnlineTables.GetByName(ctx, onlineTableName) - if err != nil { - return retry.NonRetryableError(err) - } - if endpoint.Status == nil { - return retry.RetryableError(fmt.Errorf("online table status is not available yet")) - } - switch endpoint.Status.DetailedState { - case catalog.OnlineTableStateOnline, catalog.OnlineTableStateOnlineContinuousUpdate, - catalog.OnlineTableStateOnlineNoPendingUpdate, catalog.OnlineTableStateOnlineTriggeredUpdate: - return nil - - // does catalog.OnlineTableStateOffline means that it's failed? - case catalog.OnlineTableStateOfflineFailed, catalog.OnlineTableStateOnlinePipelineFailed: - return retry.NonRetryableError(fmt.Errorf("online table status returned %s for online table: %s", - endpoint.Status.DetailedState.String(), onlineTableName)) - } - return retry.RetryableError(fmt.Errorf("online table %s is still pending", onlineTableName)) - }) -} - func waitForOnlineTableDeletion(w *databricks.WorkspaceClient, ctx context.Context, onlineTableName string) error { return retry.RetryContext(ctx, onlineTableDefaultProvisionTimeout, func() *retry.RetryError { _, err := w.OnlineTables.GetByName(ctx, onlineTableName) @@ -75,9 +52,9 @@ func ResourceOnlineTable() common.Resource { if err != nil { return err } - var req catalog.CreateOnlineTableRequest - common.DataToStructPointer(d, s, &req) - res, err := w.OnlineTables.Create(ctx, req) + var table catalog.OnlineTable + common.DataToStructPointer(d, s, &table) + res, err := w.OnlineTables.Create(ctx, catalog.CreateOnlineTableRequest{Table: &table}) if err != nil { return err } @@ -85,7 +62,7 @@ func ResourceOnlineTable() common.Resource { // If the resource creation timeout is exceeded while waiting for the online table to be ready, this ensures the online table is persisted in the state. d.SetId(res.Name) // this should be specified in the API Spec - filed a ticket to add it - err = waitForOnlineTableCreation(w, ctx, res.Name) + _, err = res.GetWithTimeout(onlineTableDefaultProvisionTimeout) if err != nil { return err } diff --git a/catalog/resource_online_table_test.go b/catalog/resource_online_table_test.go index 1deddd02a3..9f19063b48 100644 --- a/catalog/resource_online_table_test.go +++ b/catalog/resource_online_table_test.go @@ -1,8 +1,10 @@ package catalog import ( + "errors" "fmt" "testing" + "time" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/experimental/mocks" @@ -47,6 +49,13 @@ func TestOnlineTableCreate(t *testing.T) { PrimaryKeyColumns: []string{"id"}, }, } + otStatusNotSetWait := &catalog.WaitGetOnlineTableActive[catalog.OnlineTable]{ + Response: otStatusNotSet, + Name: "main.default.online_table", + Poll: func(d time.Duration, f func(*catalog.OnlineTable)) (*catalog.OnlineTable, error) { + return otStatusOnline, nil + }, + } // otStatusUnknown := &catalog.OnlineTable{ // Name: "main.default.online_table", // Spec: &catalog.OnlineTableSpec{ @@ -60,16 +69,15 @@ func TestOnlineTableCreate(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockOnlineTablesAPI().EXPECT() e.Create(mock.Anything, catalog.CreateOnlineTableRequest{ - Name: "main.default.online_table", - Spec: &catalog.OnlineTableSpec{ - RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, - SourceTableFullName: "main.default.test", - PrimaryKeyColumns: []string{"id"}, + Table: &catalog.OnlineTable{ + Name: "main.default.online_table", + Spec: &catalog.OnlineTableSpec{ + RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, + SourceTableFullName: "main.default.test", + PrimaryKeyColumns: []string{"id"}, + }, }, - }).Return(otStatusNotSet, nil) - // TODO: how to emulate the status change - // e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusNotSet, nil) - // e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusUnknown, nil) + }).Return(otStatusNotSetWait, nil) e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusOnline, nil) }, Resource: ResourceOnlineTable(), @@ -85,11 +93,13 @@ func TestOnlineTableCreate_ErrorImmediately(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockOnlineTablesAPI().EXPECT() e.Create(mock.Anything, catalog.CreateOnlineTableRequest{ - Name: "main.default.online_table", - Spec: &catalog.OnlineTableSpec{ - RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, - SourceTableFullName: "main.default.test", - PrimaryKeyColumns: []string{"id"}, + Table: &catalog.OnlineTable{ + Name: "main.default.online_table", + Spec: &catalog.OnlineTableSpec{ + RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, + SourceTableFullName: "main.default.test", + PrimaryKeyColumns: []string{"id"}, + }, }, }).Return(nil, fmt.Errorf("error!")) }, @@ -100,33 +110,41 @@ func TestOnlineTableCreate_ErrorImmediately(t *testing.T) { } func TestOnlineTableCreate_ErrorInWait(t *testing.T) { - otStatusError := &catalog.OnlineTable{ + otStatusProvisioning := &catalog.OnlineTable{ Name: "main.default.online_table", Spec: &catalog.OnlineTableSpec{ RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, SourceTableFullName: "main.default.test", PrimaryKeyColumns: []string{"id"}, }, - Status: &catalog.OnlineTableStatus{DetailedState: catalog.OnlineTableStateOfflineFailed}, + Status: &catalog.OnlineTableStatus{DetailedState: catalog.OnlineTableStateProvisioning}, + } + otStatusErrorWait := &catalog.WaitGetOnlineTableActive[catalog.OnlineTable]{ + Response: otStatusProvisioning, + Name: "main.default.online_table", + Poll: func(d time.Duration, f func(*catalog.OnlineTable)) (*catalog.OnlineTable, error) { + return nil, errors.New("failed to reach ACTIVE, got OFFLINE_FAILED: error!") + }, } d, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockOnlineTablesAPI().EXPECT() e.Create(mock.Anything, catalog.CreateOnlineTableRequest{ - Name: "main.default.online_table", - Spec: &catalog.OnlineTableSpec{ - RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, - SourceTableFullName: "main.default.test", - PrimaryKeyColumns: []string{"id"}, + Table: &catalog.OnlineTable{ + Name: "main.default.online_table", + Spec: &catalog.OnlineTableSpec{ + RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, + SourceTableFullName: "main.default.test", + PrimaryKeyColumns: []string{"id"}, + }, }, - }).Return(otStatusError, nil) - e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusError, nil) + }).Return(otStatusErrorWait, nil) }, Resource: ResourceOnlineTable(), HCL: onlineTableHcl, Create: true, }.Apply(t) - qa.AssertErrorStartsWith(t, err, "online table status returned OFFLINE_FAILED for online table: main.default.online_table") + qa.AssertErrorStartsWith(t, err, "failed to reach ACTIVE, got OFFLINE_FAILED: error!") assert.Equal(t, "main.default.online_table", d.Id()) } diff --git a/dashboards/resource_dashboard.go b/dashboards/resource_dashboard.go index d872b33f49..de61205243 100644 --- a/dashboards/resource_dashboard.go +++ b/dashboards/resource_dashboard.go @@ -68,22 +68,22 @@ func ResourceDashboard() common.Resource { if err != nil { return err } - var newDashboardRequest dashboards.CreateDashboardRequest - common.DataToStructPointer(d, dashboardSchema, &newDashboardRequest) + var dashboard dashboards.Dashboard + common.DataToStructPointer(d, dashboardSchema, &dashboard) content, md5Hash, err := common.ReadSerializedJsonContent(d.Get("serialized_dashboard").(string), d.Get("file_path").(string)) if err != nil { return err } d.Set("md5", md5Hash) - newDashboardRequest.SerializedDashboard = content - createdDashboard, err := w.Lakeview.Create(ctx, newDashboardRequest) + dashboard.SerializedDashboard = content + createdDashboard, err := w.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{Dashboard: &dashboard}) if err != nil && isParentDoesntExistError(err) { - log.Printf("[DEBUG] Parent folder '%s' doesn't exist, creating...", newDashboardRequest.ParentPath) - err = w.Workspace.MkdirsByPath(ctx, newDashboardRequest.ParentPath) + log.Printf("[DEBUG] Parent folder '%s' doesn't exist, creating...", dashboard.ParentPath) + err = w.Workspace.MkdirsByPath(ctx, dashboard.ParentPath) if err != nil { return err } - createdDashboard, err = w.Lakeview.Create(ctx, newDashboardRequest) + createdDashboard, err = w.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{Dashboard: &dashboard}) } if err != nil { return err @@ -132,16 +132,19 @@ func ResourceDashboard() common.Resource { if err != nil { return err } - var updateDashboardRequest dashboards.UpdateDashboardRequest - common.DataToStructPointer(d, dashboardSchema, &updateDashboardRequest) - updateDashboardRequest.DashboardId = d.Id() + var dashboard dashboards.Dashboard + common.DataToStructPointer(d, dashboardSchema, &dashboard) + dashboard.DashboardId = d.Id() content, md5Hash, err := common.ReadSerializedJsonContent(d.Get("serialized_dashboard").(string), d.Get("file_path").(string)) if err != nil { return err } d.Set("md5", md5Hash) - updateDashboardRequest.SerializedDashboard = content - updatedDashboard, err := w.Lakeview.Update(ctx, updateDashboardRequest) + dashboard.SerializedDashboard = content + updatedDashboard, err := w.Lakeview.Update(ctx, dashboards.UpdateDashboardRequest{ + DashboardId: dashboard.DashboardId, + Dashboard: &dashboard, + }) if err != nil { return err } diff --git a/dashboards/resource_dashboard_test.go b/dashboards/resource_dashboard_test.go index 0b450fdd7d..9016ce2dda 100644 --- a/dashboards/resource_dashboard_test.go +++ b/dashboards/resource_dashboard_test.go @@ -16,10 +16,12 @@ func TestDashboardCreate(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockLakeviewAPI().EXPECT() e.Create(mock.Anything, dashboards.CreateDashboardRequest{ - DisplayName: "Dashboard name", - WarehouseId: "abc", - ParentPath: "/path", - SerializedDashboard: "serialized_json", + Dashboard: &dashboards.Dashboard{ + DisplayName: "Dashboard name", + WarehouseId: "abc", + ParentPath: "/path", + SerializedDashboard: "serialized_json", + }, }).Return(&dashboards.Dashboard{ DashboardId: "xyz", DisplayName: "Dashboard name", @@ -67,17 +69,21 @@ func TestDashboardCreate_NoParent(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { lv := w.GetMockLakeviewAPI().EXPECT() lv.Create(mock.Anything, dashboards.CreateDashboardRequest{ - DisplayName: "Dashboard name", - WarehouseId: "abc", - ParentPath: "/path", - SerializedDashboard: "serialized_json", + Dashboard: &dashboards.Dashboard{ + DisplayName: "Dashboard name", + WarehouseId: "abc", + ParentPath: "/path", + SerializedDashboard: "serialized_json", + }, }).Return(nil, fmt.Errorf("Path (/path) doesn't exist.")).Once() w.GetMockWorkspaceAPI().EXPECT().MkdirsByPath(mock.Anything, "/path").Return(nil) lv.Create(mock.Anything, dashboards.CreateDashboardRequest{ - DisplayName: "Dashboard name", - WarehouseId: "abc", - ParentPath: "/path", - SerializedDashboard: "serialized_json", + Dashboard: &dashboards.Dashboard{ + DisplayName: "Dashboard name", + WarehouseId: "abc", + ParentPath: "/path", + SerializedDashboard: "serialized_json", + }, }).Return(&dashboards.Dashboard{ DashboardId: "xyz", DisplayName: "Dashboard name", @@ -154,10 +160,14 @@ func TestDashboardUpdate(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockLakeviewAPI().EXPECT() e.Update(mock.Anything, dashboards.UpdateDashboardRequest{ - DashboardId: "xyz", - DisplayName: "Dashboard name", - WarehouseId: "abc", - SerializedDashboard: "serialized_dashboard_updated", + DashboardId: "xyz", + Dashboard: &dashboards.Dashboard{ + DashboardId: "xyz", + DisplayName: "Dashboard name", + WarehouseId: "abc", + SerializedDashboard: "serialized_dashboard_updated", + ParentPath: "/path", + }, }).Return(&dashboards.Dashboard{ DashboardId: "xyz", DisplayName: "Dashboard name", diff --git a/go.mod b/go.mod index 4e97cc0d23..1e72ea27a6 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/databricks/terraform-provider-databricks go 1.22 require ( - github.com/databricks/databricks-sdk-go v0.49.0 + github.com/databricks/databricks-sdk-go v0.50.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 diff --git a/go.sum b/go.sum index e95a0ffe39..1188a3923d 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.49.0 h1:VBTeZZMLIuBSM4kxOCfUcW9z4FUQZY2QeNRD5qm9FUQ= -github.com/databricks/databricks-sdk-go v0.49.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.50.0 h1:Zl4uBhYMT5z6aDojCQJPT2zCYjjfqxBQSQn8uLTphpo= +github.com/databricks/databricks-sdk-go v0.50.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/acceptance/dashboard_test.go b/internal/acceptance/dashboard_test.go index 49118c9455..91c6335b9a 100644 --- a/internal/acceptance/dashboard_test.go +++ b/internal/acceptance/dashboard_test.go @@ -315,11 +315,14 @@ func TestAccDashboardWithRemoteChange(t *testing.T) { w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) _, err = w.Lakeview.Update(context.Background(), dashboards.UpdateDashboardRequest{ - DashboardId: dashboard_id, - DisplayName: display_name, - Etag: etag, - WarehouseId: warehouse_id, - SerializedDashboard: "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page Modified Remote\"}]}", + DashboardId: dashboard_id, + Dashboard: &dashboards.Dashboard{ + DashboardId: dashboard_id, + DisplayName: display_name, + Etag: etag, + WarehouseId: warehouse_id, + SerializedDashboard: "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page Modified Remote\"}]}", + }, }) require.NoError(t, err) }, @@ -419,11 +422,14 @@ func TestAccDashboardTestAll(t *testing.T) { w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) _, err = w.Lakeview.Update(context.Background(), dashboards.UpdateDashboardRequest{ - DashboardId: dashboard_id, - DisplayName: display_name, - Etag: etag, - WarehouseId: warehouse_id, - SerializedDashboard: "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page Modified Remote\"}]}", + DashboardId: dashboard_id, + Dashboard: &dashboards.Dashboard{ + DashboardId: dashboard_id, + DisplayName: display_name, + Etag: etag, + WarehouseId: warehouse_id, + SerializedDashboard: "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page Modified Remote\"}]}", + }, }) require.NoError(t, err) }, diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index 4f90baf118..b5a602ba1f 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -144,6 +144,8 @@ func (newState *AppAccessControlResponse) SyncEffectiveFieldsDuringRead(existing } type AppDeployment struct { + // The name of the app. + AppName types.String `tfsdk:"-"` // The creation time of the deployment. Formatted timestamp in ISO 6801. CreateTime types.String `tfsdk:"create_time" tf:"optional"` EffectiveCreateTime types.String `tfsdk:"effective_create_time" tf:"computed,optional"` @@ -398,45 +400,6 @@ func (newState *ComputeStatus) SyncEffectiveFieldsDuringRead(existingState Compu } } -type CreateAppDeploymentRequest struct { - // The name of the app. - AppName types.String `tfsdk:"-"` - // The unique id of the deployment. - DeploymentId types.String `tfsdk:"deployment_id" tf:"optional"` - // The mode of which the deployment will manage the source code. - Mode types.String `tfsdk:"mode" tf:"optional"` - // The workspace file system path of the source code used to create the app - // deployment. This is different from - // `deployment_artifacts.source_code_path`, which is the path used by the - // deployed app. The former refers to the original source code location of - // the app in the workspace during deployment creation, whereas the latter - // provides a system generated stable snapshotted source code path used by - // the deployment. - SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` -} - -func (newState *CreateAppDeploymentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAppDeploymentRequest) { -} - -func (newState *CreateAppDeploymentRequest) SyncEffectiveFieldsDuringRead(existingState CreateAppDeploymentRequest) { -} - -type CreateAppRequest struct { - // The description of the app. - Description types.String `tfsdk:"description" tf:"optional"` - // The name of the app. The name must contain only lowercase alphanumeric - // characters and hyphens. It must be unique within the workspace. - Name types.String `tfsdk:"name" tf:""` - // Resources for the app. - Resources []AppResource `tfsdk:"resources" tf:"optional"` -} - -func (newState *CreateAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAppRequest) { -} - -func (newState *CreateAppRequest) SyncEffectiveFieldsDuringRead(existingState CreateAppRequest) { -} - // Delete an app type DeleteAppRequest struct { // The name of the app. @@ -588,19 +551,3 @@ func (newState *StopAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan Sto func (newState *StopAppRequest) SyncEffectiveFieldsDuringRead(existingState StopAppRequest) { } - -type UpdateAppRequest struct { - // The description of the app. - Description types.String `tfsdk:"description" tf:"optional"` - // The name of the app. The name must contain only lowercase alphanumeric - // characters and hyphens. It must be unique within the workspace. - Name types.String `tfsdk:"name" tf:""` - // Resources for the app. - Resources []AppResource `tfsdk:"resources" tf:"optional"` -} - -func (newState *UpdateAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAppRequest) { -} - -func (newState *UpdateAppRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAppRequest) { -} diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index 55778dfa87..caf38f865c 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -728,20 +728,6 @@ func (newState *CreateMonitor) SyncEffectiveFieldsDuringCreateOrUpdate(plan Crea func (newState *CreateMonitor) SyncEffectiveFieldsDuringRead(existingState CreateMonitor) { } -// Online Table information. -type CreateOnlineTableRequest struct { - // Full three-part (catalog, schema, table) name of the table. - Name types.String `tfsdk:"name" tf:"optional"` - // Specification of the online table. - Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional,object"` -} - -func (newState *CreateOnlineTableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateOnlineTableRequest) { -} - -func (newState *CreateOnlineTableRequest) SyncEffectiveFieldsDuringRead(existingState CreateOnlineTableRequest) { -} - type CreateRegisteredModelRequest struct { // The name of the catalog where the schema and the registered model reside CatalogName types.String `tfsdk:"catalog_name" tf:""` diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index b76126b18b..2066f6a422 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -15,93 +15,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -type CreateDashboardRequest struct { - // The display name of the dashboard. - DisplayName types.String `tfsdk:"display_name" tf:""` - // The workspace path of the folder containing the dashboard. Includes - // leading slash and no trailing slash. This field is excluded in List - // Dashboards responses. - ParentPath types.String `tfsdk:"parent_path" tf:"optional"` - EffectiveParentPath types.String `tfsdk:"effective_parent_path" tf:"computed,optional"` - // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. Use the [get dashboard API] to - // retrieve an example response, which includes the `serialized_dashboard` - // field. This field provides the structure of the JSON string that - // represents the dashboard's layout and components. - // - // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get - SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` - // The warehouse ID used to run the dashboard. - WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` -} - -func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateDashboardRequest) { - newState.EffectiveParentPath = newState.ParentPath - newState.ParentPath = plan.ParentPath -} - -func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState CreateDashboardRequest) { - newState.EffectiveParentPath = existingState.EffectiveParentPath - if existingState.EffectiveParentPath.ValueString() == newState.ParentPath.ValueString() { - newState.ParentPath = existingState.ParentPath - } -} - -type CreateScheduleRequest struct { - // The cron expression describing the frequency of the periodic refresh for - // this schedule. - CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` - // UUID identifying the dashboard to which the schedule belongs. - DashboardId types.String `tfsdk:"-"` - EffectiveDashboardId types.String `tfsdk:"-"` - // The display name for schedule. - DisplayName types.String `tfsdk:"display_name" tf:"optional"` - // The status indicates whether this schedule is paused or not. - PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` -} - -func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateScheduleRequest) { - newState.EffectiveDashboardId = newState.DashboardId - newState.DashboardId = plan.DashboardId -} - -func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState CreateScheduleRequest) { - newState.EffectiveDashboardId = existingState.EffectiveDashboardId - if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { - newState.DashboardId = existingState.DashboardId - } -} - -type CreateSubscriptionRequest struct { - // UUID identifying the dashboard to which the subscription belongs. - DashboardId types.String `tfsdk:"-"` - EffectiveDashboardId types.String `tfsdk:"-"` - // UUID identifying the schedule to which the subscription belongs. - ScheduleId types.String `tfsdk:"-"` - EffectiveScheduleId types.String `tfsdk:"-"` - // Subscriber details for users and destinations to be added as subscribers - // to the schedule. - Subscriber []Subscriber `tfsdk:"subscriber" tf:"object"` -} - -func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateSubscriptionRequest) { - newState.EffectiveDashboardId = newState.DashboardId - newState.DashboardId = plan.DashboardId - newState.EffectiveScheduleId = newState.ScheduleId - newState.ScheduleId = plan.ScheduleId -} - -func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState CreateSubscriptionRequest) { - newState.EffectiveDashboardId = existingState.EffectiveDashboardId - if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { - newState.DashboardId = existingState.DashboardId - } - newState.EffectiveScheduleId = existingState.EffectiveScheduleId - if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { - newState.ScheduleId = existingState.ScheduleId - } -} - type CronSchedule struct { // A cron expression using quartz syntax. EX: `0 0 8 * * ?` represents // everyday at 8am. See [Cron Trigger] for details. @@ -308,22 +221,6 @@ func (newState *DeleteSubscriptionResponse) SyncEffectiveFieldsDuringCreateOrUpd func (newState *DeleteSubscriptionResponse) SyncEffectiveFieldsDuringRead(existingState DeleteSubscriptionResponse) { } -// Execute SQL query in a conversation message -type ExecuteMessageQueryRequest struct { - // Conversation ID - ConversationId types.String `tfsdk:"-"` - // Message ID - MessageId types.String `tfsdk:"-"` - // Genie space ID - SpaceId types.String `tfsdk:"-"` -} - -func (newState *ExecuteMessageQueryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExecuteMessageQueryRequest) { -} - -func (newState *ExecuteMessageQueryRequest) SyncEffectiveFieldsDuringRead(existingState ExecuteMessageQueryRequest) { -} - // Genie AI Response type GenieAttachment struct { Query []QueryAttachment `tfsdk:"query" tf:"optional,object"` @@ -373,6 +270,22 @@ func (newState *GenieCreateConversationMessageRequest) SyncEffectiveFieldsDuring func (newState *GenieCreateConversationMessageRequest) SyncEffectiveFieldsDuringRead(existingState GenieCreateConversationMessageRequest) { } +// Execute SQL query in a conversation message +type GenieExecuteMessageQueryRequest struct { + // Conversation ID + ConversationId types.String `tfsdk:"-"` + // Message ID + MessageId types.String `tfsdk:"-"` + // Genie space ID + SpaceId types.String `tfsdk:"-"` +} + +func (newState *GenieExecuteMessageQueryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenieExecuteMessageQueryRequest) { +} + +func (newState *GenieExecuteMessageQueryRequest) SyncEffectiveFieldsDuringRead(existingState GenieExecuteMessageQueryRequest) { +} + // Get conversation message type GenieGetConversationMessageRequest struct { // The ID associated with the target conversation. @@ -1112,82 +1025,3 @@ func (newState *UnpublishDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpd func (newState *UnpublishDashboardResponse) SyncEffectiveFieldsDuringRead(existingState UnpublishDashboardResponse) { } - -type UpdateDashboardRequest struct { - // UUID identifying the dashboard. - DashboardId types.String `tfsdk:"-"` - // The display name of the dashboard. - DisplayName types.String `tfsdk:"display_name" tf:"optional"` - // The etag for the dashboard. Can be optionally provided on updates to - // ensure that the dashboard has not been modified since the last read. This - // field is excluded in List Dashboards responses. - Etag types.String `tfsdk:"etag" tf:"optional"` - EffectiveEtag types.String `tfsdk:"effective_etag" tf:"computed,optional"` - // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. Use the [get dashboard API] to - // retrieve an example response, which includes the `serialized_dashboard` - // field. This field provides the structure of the JSON string that - // represents the dashboard's layout and components. - // - // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get - SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` - // The warehouse ID used to run the dashboard. - WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` -} - -func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateDashboardRequest) { - newState.EffectiveEtag = newState.Etag - newState.Etag = plan.Etag -} - -func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState UpdateDashboardRequest) { - newState.EffectiveEtag = existingState.EffectiveEtag - if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { - newState.Etag = existingState.Etag - } -} - -type UpdateScheduleRequest struct { - // The cron expression describing the frequency of the periodic refresh for - // this schedule. - CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` - // UUID identifying the dashboard to which the schedule belongs. - DashboardId types.String `tfsdk:"-"` - EffectiveDashboardId types.String `tfsdk:"-"` - // The display name for schedule. - DisplayName types.String `tfsdk:"display_name" tf:"optional"` - // The etag for the schedule. Must be left empty on create, must be provided - // on updates to ensure that the schedule has not been modified since the - // last read, and can be optionally provided on delete. - Etag types.String `tfsdk:"etag" tf:"optional"` - EffectiveEtag types.String `tfsdk:"effective_etag" tf:"computed,optional"` - // The status indicates whether this schedule is paused or not. - PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` - // UUID identifying the schedule. - ScheduleId types.String `tfsdk:"-"` - EffectiveScheduleId types.String `tfsdk:"-"` -} - -func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateScheduleRequest) { - newState.EffectiveDashboardId = newState.DashboardId - newState.DashboardId = plan.DashboardId - newState.EffectiveEtag = newState.Etag - newState.Etag = plan.Etag - newState.EffectiveScheduleId = newState.ScheduleId - newState.ScheduleId = plan.ScheduleId -} - -func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState UpdateScheduleRequest) { - newState.EffectiveDashboardId = existingState.EffectiveDashboardId - if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { - newState.DashboardId = existingState.DashboardId - } - newState.EffectiveEtag = existingState.EffectiveEtag - if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { - newState.Etag = existingState.Etag - } - newState.EffectiveScheduleId = existingState.EffectiveScheduleId - if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { - newState.ScheduleId = existingState.ScheduleId - } -} diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index 295d1e1a99..fe3918dabd 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -839,9 +839,8 @@ type GetRunRequest struct { IncludeHistory types.Bool `tfsdk:"-"` // Whether to include resolved parameter values in the response. IncludeResolvedValues types.Bool `tfsdk:"-"` - // To list the next page or the previous page of job tasks, set this field - // to the value of the `next_page_token` or `prev_page_token` returned in - // the GetJob response. + // To list the next page of job tasks, set this field to the value of the + // `next_page_token` returned in the GetJob response. PageToken types.String `tfsdk:"-"` // The canonical identifier of the run for which to retrieve the metadata. // This field is required. @@ -1727,8 +1726,10 @@ type RepairRun struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []types.String `tfsdk:"jar_params" tf:"optional"` // Job-level parameters used in the run. for example `"param": // "overriding_val"` @@ -2040,8 +2041,6 @@ type Run struct { OriginalAttemptRunId types.Int64 `tfsdk:"original_attempt_run_id" tf:"optional"` // The parameters used for this run. OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional,object"` - // A token that can be used to list the previous page of sub-resources. - PrevPageToken types.String `tfsdk:"prev_page_token" tf:"optional"` // The time in milliseconds that the run has spent in the queue. QueueDuration types.Int64 `tfsdk:"queue_duration" tf:"optional"` // The repair history of the run. @@ -2182,8 +2181,10 @@ type RunJobTask struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []types.String `tfsdk:"jar_params" tf:"optional"` // ID of the job to trigger. JobId types.Int64 `tfsdk:"job_id" tf:""` @@ -2290,8 +2291,10 @@ type RunNow struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []types.String `tfsdk:"jar_params" tf:"optional"` // The ID of the job to be executed JobId types.Int64 `tfsdk:"job_id" tf:""` @@ -2447,8 +2450,10 @@ type RunParameters struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []types.String `tfsdk:"jar_params" tf:"optional"` // A map from keys to values for jobs with notebook task, for example // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed @@ -2584,13 +2589,14 @@ type RunTask struct { // cluster, this field is set once the Jobs service has requested a cluster // for the run. ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional,object"` - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. ConditionTask []RunConditionTask `tfsdk:"condition_task" tf:"optional,object"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before @@ -2622,8 +2628,8 @@ type RunTask struct { // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` - // If for_each_task, indicates that this task must execute the nested task - // within it. + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. ForEachTask []RunForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by @@ -2643,16 +2649,17 @@ type RunTask struct { // If new_cluster, a description of a new cluster that is created for each // run. NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. + // The task runs a notebook when the `notebook_task` field is present. NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` - // If pipeline_task, indicates that this task must execute a Pipeline. + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. + // The task runs a Python wheel when the `python_wheel_task` field is + // present. PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // The time in milliseconds that the run has spent in the queue. QueueDuration types.Int64 `tfsdk:"queue_duration" tf:"optional"` @@ -2668,7 +2675,7 @@ type RunTask struct { // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf types.String `tfsdk:"run_if" tf:"optional"` - // If run_job_task, indicates that this task must execute another job. + // The task triggers another job when the `run_job_task` field is present. RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` RunPageUrl types.String `tfsdk:"run_page_url" tf:"optional"` @@ -2680,12 +2687,14 @@ type RunTask struct { // job runs. The total duration of a multitask job run is the value of the // `run_duration` field. SetupDuration types.Int64 `tfsdk:"setup_duration" tf:"optional"` - // If spark_jar_task, indicates that this task must run a JAR. + // The task runs a JAR when the `spark_jar_task` field is present. SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` - // If spark_python_task, indicates that this task must run a Python file. + // The task runs a Python file when the `spark_python_task` field is + // present. SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python @@ -2702,7 +2711,8 @@ type RunTask struct { // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` - // If sql_task, indicates that this job must execute a SQL task. + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // The time at which this run was started in epoch milliseconds // (milliseconds since 1/1/1970 UTC). This may not be the time when the job @@ -3112,13 +3122,14 @@ func (newState *SubmitRunResponse) SyncEffectiveFieldsDuringRead(existingState S } type SubmitTask struct { - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional,object"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before @@ -3139,8 +3150,8 @@ type SubmitTask struct { // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` - // If for_each_task, indicates that this task must execute the nested task - // within it. + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional set of health rules that can be defined for this job. Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` @@ -3150,30 +3161,33 @@ type SubmitTask struct { // If new_cluster, a description of a new cluster that is created for each // run. NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. + // The task runs a notebook when the `notebook_task` field is present. NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` - // If pipeline_task, indicates that this task must execute a Pipeline. + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. + // The task runs a Python wheel when the `python_wheel_task` field is + // present. PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // An optional value indicating the condition that determines whether the // task should be run once its dependencies have been completed. When // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf types.String `tfsdk:"run_if" tf:"optional"` - // If run_job_task, indicates that this task must execute another job. + // The task triggers another job when the `run_job_task` field is present. RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` - // If spark_jar_task, indicates that this task must run a JAR. + // The task runs a JAR when the `spark_jar_task` field is present. SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` - // If spark_python_task, indicates that this task must run a Python file. + // The task runs a Python file when the `spark_python_task` field is + // present. SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python @@ -3190,7 +3204,8 @@ type SubmitTask struct { // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` - // If sql_task, indicates that this job must execute a SQL task. + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent @@ -3236,13 +3251,14 @@ func (newState *TableUpdateTriggerConfiguration) SyncEffectiveFieldsDuringRead(e } type Task struct { - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional,object"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete before executing this @@ -3266,8 +3282,8 @@ type Task struct { // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` - // If for_each_task, indicates that this task must execute the nested task - // within it. + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional set of health rules that can be defined for this job. Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` @@ -3289,16 +3305,17 @@ type Task struct { // If new_cluster, a description of a new cluster that is created for each // run. NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. + // The task runs a notebook when the `notebook_task` field is present. NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task. NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` - // If pipeline_task, indicates that this task must execute a Pipeline. + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. + // The task runs a Python wheel when the `python_wheel_task` field is + // present. PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // An optional policy to specify whether to retry a job when it times out. // The default behavior is to not retry on timeout. @@ -3313,14 +3330,16 @@ type Task struct { // `AT_LEAST_ONE_FAILED`: At least one dependency failed * `ALL_FAILED`: ALl // dependencies have failed RunIf types.String `tfsdk:"run_if" tf:"optional"` - // If run_job_task, indicates that this task must execute another job. + // The task triggers another job when the `run_job_task` field is present. RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` - // If spark_jar_task, indicates that this task must run a JAR. + // The task runs a JAR when the `spark_jar_task` field is present. SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` - // If spark_python_task, indicates that this task must run a Python file. + // The task runs a Python file when the `spark_python_task` field is + // present. SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python @@ -3337,7 +3356,8 @@ type Task struct { // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` - // If sql_task, indicates that this job must execute a SQL task. + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent diff --git a/internal/service/oauth2_tf/model.go b/internal/service/oauth2_tf/model.go index e738e7f094..eacea7e75a 100755 --- a/internal/service/oauth2_tf/model.go +++ b/internal/service/oauth2_tf/model.go @@ -342,6 +342,16 @@ func (newState *ListPublishedAppIntegrationsRequest) SyncEffectiveFieldsDuringRe // List service principal secrets type ListServicePrincipalSecretsRequest struct { + // An opaque page token which was the `next_page_token` in the response of + // the previous request to list the secrets for this service principal. + // Provide this token to retrieve the next page of secret entries. When + // providing a `page_token`, all other parameters provided to the request + // must match the previous request. To list all of the secrets for a service + // principal, it is necessary to continue requesting pages of entries until + // the response contains no `next_page_token`. Note that the number of + // entries returned must not be used to determine when the listing is + // complete. + PageToken types.String `tfsdk:"-"` // The service principal ID. ServicePrincipalId types.Int64 `tfsdk:"-"` } @@ -353,6 +363,8 @@ func (newState *ListServicePrincipalSecretsRequest) SyncEffectiveFieldsDuringRea } type ListServicePrincipalSecretsResponse struct { + // A token, which can be sent as `page_token` to retrieve the next page. + NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` // List of the secrets Secrets []SecretInfo `tfsdk:"secrets" tf:"optional"` } diff --git a/internal/service/provisioning_tf/model.go b/internal/service/provisioning_tf/model.go index 17d8bbc18e..188e8f48df 100755 --- a/internal/service/provisioning_tf/model.go +++ b/internal/service/provisioning_tf/model.go @@ -277,6 +277,8 @@ type CreateWorkspaceRequest struct { GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional,object"` // The configurations for the GKE cluster of a Databricks workspace. GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional,object"` + // Whether no public IP is enabled for the workspace. + IsNoPublicIpEnabled types.Bool `tfsdk:"is_no_public_ip_enabled" tf:"optional"` // The Google Cloud region of the workspace data plane in your Google // account. For example, `us-east4`. Location types.String `tfsdk:"location" tf:"optional"` @@ -482,6 +484,21 @@ func (newState *DeleteWorkspaceRequest) SyncEffectiveFieldsDuringCreateOrUpdate( func (newState *DeleteWorkspaceRequest) SyncEffectiveFieldsDuringRead(existingState DeleteWorkspaceRequest) { } +type ExternalCustomerInfo struct { + // Email of the authoritative user. + AuthoritativeUserEmail types.String `tfsdk:"authoritative_user_email" tf:"optional"` + // The authoritative user full name. + AuthoritativeUserFullName types.String `tfsdk:"authoritative_user_full_name" tf:"optional"` + // The legal entity name for the external workspace + CustomerName types.String `tfsdk:"customer_name" tf:"optional"` +} + +func (newState *ExternalCustomerInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExternalCustomerInfo) { +} + +func (newState *ExternalCustomerInfo) SyncEffectiveFieldsDuringRead(existingState ExternalCustomerInfo) { +} + type GcpKeyInfo struct { // The GCP KMS key's resource name KmsKeyId types.String `tfsdk:"kms_key_id" tf:""` @@ -1063,6 +1080,10 @@ type Workspace struct { // This value must be unique across all non-deleted deployments across all // AWS regions. DeploymentName types.String `tfsdk:"deployment_name" tf:"optional"` + // If this workspace is for a external customer, then external_customer_info + // is populated. If this workspace is not for a external customer, then + // external_customer_info is empty. + ExternalCustomerInfo []ExternalCustomerInfo `tfsdk:"external_customer_info" tf:"optional,object"` // The network settings for the workspace. The configurations are only for // Databricks-managed VPCs. It is ignored if you specify a customer-managed // VPC in the `network_id` field.", All the IP range configurations must be @@ -1089,6 +1110,8 @@ type Workspace struct { GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional,object"` // The configurations for the GKE cluster of a Databricks workspace. GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional,object"` + // Whether no public IP is enabled for the workspace. + IsNoPublicIpEnabled types.Bool `tfsdk:"is_no_public_ip_enabled" tf:"optional"` // The Google Cloud region of the workspace data plane in your Google // account (for example, `us-east4`). Location types.String `tfsdk:"location" tf:"optional"` diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index 71b73ba253..4564aeb780 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -14,6 +14,74 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) +type AibiDashboardEmbeddingAccessPolicy struct { + AccessPolicyType types.String `tfsdk:"access_policy_type" tf:""` +} + +func (newState *AibiDashboardEmbeddingAccessPolicy) SyncEffectiveFieldsDuringCreateOrUpdate(plan AibiDashboardEmbeddingAccessPolicy) { +} + +func (newState *AibiDashboardEmbeddingAccessPolicy) SyncEffectiveFieldsDuringRead(existingState AibiDashboardEmbeddingAccessPolicy) { +} + +type AibiDashboardEmbeddingAccessPolicySetting struct { + AibiDashboardEmbeddingAccessPolicy []AibiDashboardEmbeddingAccessPolicy `tfsdk:"aibi_dashboard_embedding_access_policy" tf:"object"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + +func (newState *AibiDashboardEmbeddingAccessPolicySetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan AibiDashboardEmbeddingAccessPolicySetting) { +} + +func (newState *AibiDashboardEmbeddingAccessPolicySetting) SyncEffectiveFieldsDuringRead(existingState AibiDashboardEmbeddingAccessPolicySetting) { +} + +type AibiDashboardEmbeddingApprovedDomains struct { + ApprovedDomains []types.String `tfsdk:"approved_domains" tf:"optional"` +} + +func (newState *AibiDashboardEmbeddingApprovedDomains) SyncEffectiveFieldsDuringCreateOrUpdate(plan AibiDashboardEmbeddingApprovedDomains) { +} + +func (newState *AibiDashboardEmbeddingApprovedDomains) SyncEffectiveFieldsDuringRead(existingState AibiDashboardEmbeddingApprovedDomains) { +} + +type AibiDashboardEmbeddingApprovedDomainsSetting struct { + AibiDashboardEmbeddingApprovedDomains []AibiDashboardEmbeddingApprovedDomains `tfsdk:"aibi_dashboard_embedding_approved_domains" tf:"object"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + +func (newState *AibiDashboardEmbeddingApprovedDomainsSetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan AibiDashboardEmbeddingApprovedDomainsSetting) { +} + +func (newState *AibiDashboardEmbeddingApprovedDomainsSetting) SyncEffectiveFieldsDuringRead(existingState AibiDashboardEmbeddingApprovedDomainsSetting) { +} + type AutomaticClusterUpdateSetting struct { AutomaticClusterUpdateWorkspace []ClusterAutoRestartMessage `tfsdk:"automatic_cluster_update_workspace" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag @@ -962,6 +1030,42 @@ func (newState *GetAccountIpAccessListRequest) SyncEffectiveFieldsDuringCreateOr func (newState *GetAccountIpAccessListRequest) SyncEffectiveFieldsDuringRead(existingState GetAccountIpAccessListRequest) { } +// Retrieve the AI/BI dashboard embedding access policy +type GetAibiDashboardEmbeddingAccessPolicySettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +func (newState *GetAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +func (newState *GetAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringRead(existingState GetAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +// Retrieve the list of domains approved to host embedded AI/BI dashboards +type GetAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +func (newState *GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + +func (newState *GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringRead(existingState GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + // Get the automatic cluster update setting type GetAutomaticClusterUpdateSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -1494,8 +1598,7 @@ type NccAzurePrivateEndpointRule struct { // DISCONNECTED: Connection was removed by the private link resource owner, // the private endpoint becomes informative and should be deleted for // clean-up. - ConnectionState types.String `tfsdk:"connection_state" tf:"optional"` - EffectiveConnectionState types.String `tfsdk:"effective_connection_state" tf:"computed,optional"` + ConnectionState types.String `tfsdk:"connection_state" tf:"optional"` // Time in epoch milliseconds when this object was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` EffectiveCreationTime types.Int64 `tfsdk:"effective_creation_time" tf:"computed,optional"` @@ -1526,8 +1629,6 @@ type NccAzurePrivateEndpointRule struct { } func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringCreateOrUpdate(plan NccAzurePrivateEndpointRule) { - newState.EffectiveConnectionState = newState.ConnectionState - newState.ConnectionState = plan.ConnectionState newState.EffectiveCreationTime = newState.CreationTime newState.CreationTime = plan.CreationTime newState.EffectiveDeactivated = newState.Deactivated @@ -1543,10 +1644,6 @@ func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringCreateOrUp } func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringRead(existingState NccAzurePrivateEndpointRule) { - newState.EffectiveConnectionState = existingState.EffectiveConnectionState - if existingState.EffectiveConnectionState.ValueString() == newState.ConnectionState.ValueString() { - newState.ConnectionState = existingState.ConnectionState - } newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime @@ -1968,6 +2065,9 @@ type TokenInfo struct { CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // Timestamp when the token expires. ExpiryTime types.Int64 `tfsdk:"expiry_time" tf:"optional"` + // Approximate timestamp for the day the token was last used. Accurate up to + // 1 day. + LastUsedDay types.Int64 `tfsdk:"last_used_day" tf:"optional"` // User ID of the user that owns the token. OwnerId types.Int64 `tfsdk:"owner_id" tf:"optional"` // ID of the token. @@ -2032,6 +2132,46 @@ func (newState *TokenPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate func (newState *TokenPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState TokenPermissionsRequest) { } +// Details required to update a setting. +type UpdateAibiDashboardEmbeddingAccessPolicySettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting []AibiDashboardEmbeddingAccessPolicySetting `tfsdk:"setting" tf:"object"` +} + +func (newState *UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +func (newState *UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +// Details required to update a setting. +type UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting []AibiDashboardEmbeddingApprovedDomainsSetting `tfsdk:"setting" tf:"object"` +} + +func (newState *UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + +func (newState *UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + // Details required to update a setting. type UpdateAutomaticClusterUpdateSettingRequest struct { // This should always be set to true for Settings API. Added for AIP From da1f7e440af4eb42ecf2487f34973a3d9a0897ee Mon Sep 17 00:00:00 2001 From: Edward Feng <67326663+edwardfeng-db@users.noreply.github.com> Date: Thu, 31 Oct 2024 20:51:09 -0700 Subject: [PATCH 12/14] [Internal] Rollout Plugin Framework (#4134) ## Changes - Made it possible to use environment variable USE_SDK_V2 to control rollout - Bumped resource and data source names from staging names to production names (can debate on whether we want to do that now or in a more gradual fashion) - Proposed mechanism: https://docs.google.com/document/d/1zfTp8YesMe4GxkbIt9Sbwd5nQL7Y4mXed8dPDGmAoxU/edit?tab=t.0 - Added infra for switching providers from sdkv2 to plugin framework in integration tests and added test cases for those cases ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [x] covered with integration tests in `internal/acceptance` - [x] relevant acceptance tests are passing - [x] using Go SDK --- docs/data-sources/volumes.md | 3 + docs/guides/troubleshooting.md | 12 + docs/resources/library.md | 3 + docs/resources/quality_monitor.md | 3 + internal/providers/pluginfw/pluginfw.go | 30 +- .../pluginfw/pluginfw_rollout_utils.go | 205 +++++++++++ .../resources/library/resource_library.go | 5 +- .../library/resource_library_acc_test.go | 131 ++++--- .../resource_quality_monitor.go | 8 +- .../resource_quality_monitor_acc_test.go | 104 +++++- .../pluginfw/resources/volume/data_volumes.go | 2 +- .../resources/volume/data_volumes_acc_test.go | 8 +- internal/providers/providers.go | 19 +- internal/providers/sdkv2/sdkv2.go | 330 ++++++++++-------- 14 files changed, 617 insertions(+), 246 deletions(-) create mode 100644 internal/providers/pluginfw/pluginfw_rollout_utils.go diff --git a/docs/data-sources/volumes.md b/docs/data-sources/volumes.md index 89ee190f39..4c529185cd 100644 --- a/docs/data-sources/volumes.md +++ b/docs/data-sources/volumes.md @@ -7,6 +7,9 @@ subcategory: "Unity Catalog" Retrieves a list of [databricks_volume](../resources/volume.md) ids (full names), that were created by Terraform or manually. +## Plugin Framework Migration +The volumes data source has been migrated from sdkv2 to plugin framework in version 1.57。 If you encounter any problem with this data source and suspect it is due to the migration, you can fallback to sdkv2 by setting the environment variable in the following way `export USE_SDK_V2_DATA_SOURCES="databricks_volumes"`. + ## Example Usage Listing all volumes in a _things_ [databricks_schema](../resources/schema.md) of a _sandbox_ [databricks_catalog](../resources/catalog.md): diff --git a/docs/guides/troubleshooting.md b/docs/guides/troubleshooting.md index 93d9d89fec..dadd4a51c9 100644 --- a/docs/guides/troubleshooting.md +++ b/docs/guides/troubleshooting.md @@ -17,6 +17,18 @@ TF_LOG=DEBUG DATABRICKS_DEBUG_TRUNCATE_BYTES=250000 terraform apply -no-color 2> * Open a [new GitHub issue](https://github.com/databricks/terraform-provider-databricks/issues/new/choose) providing all information described in the issue template - debug logs, your Terraform code, Terraform & plugin versions, etc. +## Plugin Framework Migration Problems +The following resources and data sources have been migrated from sdkv2 to plugin framework。 If you encounter any problem with those, you can fallback to sdkv2 by setting the `USE_SDK_V2_RESOURCES` and `USE_SDK_V2_DATA_SOURCES` environment variables. + +Example: `export USE_SDK_V2_RESOURCES="databricks_library,databricks_quality_monitor"` + +### Resources migrated + - databricks_quality_monitor + - databricks_library +### Data sources migrated + - databricks_volumes + + ## Typical problems ### Data resources and Authentication is not configured errors diff --git a/docs/resources/library.md b/docs/resources/library.md index c693bfed8d..e03ad0ea40 100644 --- a/docs/resources/library.md +++ b/docs/resources/library.md @@ -7,6 +7,9 @@ Installs a [library](https://docs.databricks.com/libraries/index.html) on [datab -> `databricks_library` resource would always start the associated cluster if it's not running, so make sure to have auto-termination configured. It's not possible to atomically change the version of the same library without cluster restart. Libraries are fully removed from the cluster only after restart. +## Plugin Framework Migration +The library resource has been migrated from sdkv2 to plugin framework。 If you encounter any problem with this resource and suspect it is due to the migration, you can fallback to sdkv2 by setting the environment variable in the following way `export USE_SDK_V2_RESOURCES="databricks_library"`. + ## Installing library on all clusters You can install libraries on all clusters with the help of [databricks_clusters](../data-sources/clusters.md) data resource: diff --git a/docs/resources/quality_monitor.md b/docs/resources/quality_monitor.md index 71613a6e0d..64e06f187a 100644 --- a/docs/resources/quality_monitor.md +++ b/docs/resources/quality_monitor.md @@ -7,6 +7,9 @@ This resource allows you to manage [Lakehouse Monitors](https://docs.databricks. A `databricks_quality_monitor` is attached to a [databricks_sql_table](sql_table.md) and can be of type timeseries, snapshot or inference. +## Plugin Framework Migration +The quality monitor resource has been migrated from sdkv2 to plugin framework。 If you encounter any problem with this resource and suspect it is due to the migration, you can fallback to sdkv2 by setting the environment variable in the following way `export USE_SDK_V2_RESOURCES="databricks_quality_monitor"`. + ## Example Usage ```hcl diff --git a/internal/providers/pluginfw/pluginfw.go b/internal/providers/pluginfw/pluginfw.go index 5592e3e29b..4eaecd9938 100644 --- a/internal/providers/pluginfw/pluginfw.go +++ b/internal/providers/pluginfw/pluginfw.go @@ -16,15 +16,6 @@ import ( "github.com/databricks/terraform-provider-databricks/commands" "github.com/databricks/terraform-provider-databricks/common" providercommon "github.com/databricks/terraform-provider-databricks/internal/providers/common" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/catalog" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/cluster" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/library" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/notificationdestinations" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/qualitymonitor" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/registered_model" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/sharing" - "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/volume" - "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" @@ -35,34 +26,23 @@ import ( "github.com/hashicorp/terraform-plugin-log/tflog" ) -func GetDatabricksProviderPluginFramework() provider.Provider { - p := &DatabricksProviderPluginFramework{} +func GetDatabricksProviderPluginFramework(sdkV2FallbackOptions ...SdkV2FallbackOption) provider.Provider { + p := &DatabricksProviderPluginFramework{sdkV2Fallbacks: sdkV2FallbackOptions} return p } type DatabricksProviderPluginFramework struct { + sdkV2Fallbacks []SdkV2FallbackOption } var _ provider.Provider = (*DatabricksProviderPluginFramework)(nil) func (p *DatabricksProviderPluginFramework) Resources(ctx context.Context) []func() resource.Resource { - return []func() resource.Resource{ - qualitymonitor.ResourceQualityMonitor, - library.ResourceLibrary, - sharing.ResourceShare, - } + return getPluginFrameworkResourcesToRegister(p.sdkV2Fallbacks...) } func (p *DatabricksProviderPluginFramework) DataSources(ctx context.Context) []func() datasource.DataSource { - return []func() datasource.DataSource{ - cluster.DataSourceCluster, - volume.DataSourceVolumes, - registered_model.DataSourceRegisteredModel, - notificationdestinations.DataSourceNotificationDestinations, - sharing.DataSourceShare, - sharing.DataSourceShares, - catalog.DataSourceFunctions, - } + return getPluginFrameworkDataSourcesToRegister(p.sdkV2Fallbacks...) } func (p *DatabricksProviderPluginFramework) Schema(ctx context.Context, req provider.SchemaRequest, resp *provider.SchemaResponse) { diff --git a/internal/providers/pluginfw/pluginfw_rollout_utils.go b/internal/providers/pluginfw/pluginfw_rollout_utils.go new file mode 100644 index 0000000000..90b782a511 --- /dev/null +++ b/internal/providers/pluginfw/pluginfw_rollout_utils.go @@ -0,0 +1,205 @@ +package pluginfw + +// This file contains all of the utils for controlling the plugin framework rollout. +// For migrated resources and data sources, we can add them to the two maps below to have them registered with the plugin framework. +// Users can manually specify resources and data sources to use SDK V2 instead of the plugin framework by setting the USE_SDK_V2_RESOURCES and USE_SDK_V2_DATA_SOURCES environment variables. +// +// Example: USE_SDK_V2_RESOURCES="databricks_library" would force the library resource to use SDK V2 instead of the plugin framework. + +import ( + "context" + "os" + "slices" + "strings" + + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/catalog" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/cluster" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/library" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/notificationdestinations" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/qualitymonitor" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/registered_model" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/sharing" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/resources/volume" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +// List of resources that have been migrated from SDK V2 to plugin framework +var migratedResources = []func() resource.Resource{ + qualitymonitor.ResourceQualityMonitor, + library.ResourceLibrary, +} + +// List of data sources that have been migrated from SDK V2 to plugin framework +var migratedDataSources = []func() datasource.DataSource{ + volume.DataSourceVolumes, +} + +// List of resources that have been onboarded to the plugin framework - not migrated from sdkv2. +var pluginFwOnlyResources = []func() resource.Resource{ + // TODO Add resources here + sharing.ResourceShare, // Using the staging name (with pluginframework suffix) +} + +// List of data sources that have been onboarded to the plugin framework - not migrated from sdkv2. +var pluginFwOnlyDataSources = []func() datasource.DataSource{ + registered_model.DataSourceRegisteredModel, + notificationdestinations.DataSourceNotificationDestinations, + catalog.DataSourceFunctions, + // TODO: Add DataSourceCluster into migratedDataSources after fixing unit tests. + cluster.DataSourceCluster, // Using the staging name (with pluginframework suffix) + sharing.DataSourceShare, // Using the staging name (with pluginframework suffix) + sharing.DataSourceShares, // Using the staging name (with pluginframework suffix) +} + +type sdkV2FallbackOptions struct { + resourceFallbacks []string + dataSourceFallbacks []string +} + +// SdkV2FallbackOption is an interface for acceptance tests to specify resources / data sources to fallback to SDK V2 +type SdkV2FallbackOption interface { + Apply(*sdkV2FallbackOptions) +} + +type sdkV2ResourceFallback struct { + resourceFallbacks []string +} + +func (o *sdkV2ResourceFallback) Apply(options *sdkV2FallbackOptions) { + options.resourceFallbacks = o.resourceFallbacks +} + +// WithSdkV2ResourceFallbacks is a helper function to specify resources to fallback to SDK V2 +func WithSdkV2ResourceFallbacks(fallbacks ...string) SdkV2FallbackOption { + return &sdkV2ResourceFallback{resourceFallbacks: fallbacks} +} + +type sdkv2DataSourceFallback struct { + dataSourceFallbacks []string +} + +func (o *sdkv2DataSourceFallback) Apply(options *sdkV2FallbackOptions) { + options.dataSourceFallbacks = o.dataSourceFallbacks +} + +// WithSdkV2DataSourceFallbacks is a helper function to specify data sources to fallback to SDK V2 +func WithSdkV2DataSourceFallbacks(fallbacks []string) SdkV2FallbackOption { + return &sdkv2DataSourceFallback{dataSourceFallbacks: fallbacks} +} + +// GetUseSdkV2DataSources is a helper function to get name of resources that should use SDK V2 instead of plugin framework +func getUseSdkV2Resources() []string { + useSdkV2 := os.Getenv("USE_SDK_V2_RESOURCES") + if useSdkV2 == "" { + return []string{} + } + return strings.Split(useSdkV2, ",") +} + +// GetUseSdkV2DataSources is a helper function to get name of data sources that should use SDK V2 instead of plugin framework +func getUseSdkV2DataSources() []string { + useSdkV2 := os.Getenv("USE_SDK_V2_DATA_SOURCES") + if useSdkV2 == "" { + return []string{} + } + return strings.Split(useSdkV2, ",") +} + +// Helper function to check if a resource should use be in SDK V2 instead of plugin framework +func shouldUseSdkV2Resource(resourceName string) bool { + useSdkV2Resources := getUseSdkV2Resources() + return slices.Contains(useSdkV2Resources, resourceName) +} + +// Helper function to check if a data source should use be in SDK V2 instead of plugin framework +func shouldUseSdkV2DataSource(dataSourceName string) bool { + sdkV2DataSources := getUseSdkV2DataSources() + return slices.Contains(sdkV2DataSources, dataSourceName) +} + +// getPluginFrameworkResourcesToRegister is a helper function to get the list of resources that are migrated away from sdkv2 to plugin framework +func getPluginFrameworkResourcesToRegister(sdkV2Fallbacks ...SdkV2FallbackOption) []func() resource.Resource { + fallbackOption := sdkV2FallbackOptions{} + for _, o := range sdkV2Fallbacks { + o.Apply(&fallbackOption) + } + + var resources []func() resource.Resource + + // Loop through the map and add resources if they're not specifically marked to use the SDK V2 + for _, resourceFunc := range migratedResources { + name := getResourceName(resourceFunc) + if !shouldUseSdkV2Resource(name) && !slices.Contains(fallbackOption.resourceFallbacks, name) { + resources = append(resources, resourceFunc) + } + } + + return append(resources, pluginFwOnlyResources...) +} + +// getPluginFrameworkDataSourcesToRegister is a helper function to get the list of data sources that are migrated away from sdkv2 to plugin framework +func getPluginFrameworkDataSourcesToRegister(sdkV2Fallbacks ...SdkV2FallbackOption) []func() datasource.DataSource { + fallbackOption := sdkV2FallbackOptions{} + for _, o := range sdkV2Fallbacks { + o.Apply(&fallbackOption) + } + + var dataSources []func() datasource.DataSource + + // Loop through the map and add data sources if they're not specifically marked to use the SDK V2 + for _, dataSourceFunc := range migratedDataSources { + name := getDataSourceName(dataSourceFunc) + if !shouldUseSdkV2DataSource(name) && !slices.Contains(fallbackOption.dataSourceFallbacks, name) { + dataSources = append(dataSources, dataSourceFunc) + } + } + + return append(dataSources, pluginFwOnlyDataSources...) +} + +func getResourceName(resourceFunc func() resource.Resource) string { + resp := resource.MetadataResponse{} + resourceFunc().Metadata(context.Background(), resource.MetadataRequest{ProviderTypeName: "databricks"}, &resp) + return resp.TypeName +} + +func getDataSourceName(dataSourceFunc func() datasource.DataSource) string { + resp := datasource.MetadataResponse{} + dataSourceFunc().Metadata(context.Background(), datasource.MetadataRequest{ProviderTypeName: "databricks"}, &resp) + return resp.TypeName +} + +// GetSdkV2ResourcesToRemove is a helper function to get the list of resources that are migrated away from sdkv2 to plugin framework +func GetSdkV2ResourcesToRemove(sdkV2Fallbacks ...SdkV2FallbackOption) []string { + fallbackOption := sdkV2FallbackOptions{} + for _, o := range sdkV2Fallbacks { + o.Apply(&fallbackOption) + } + + resourcesToRemove := []string{} + for _, resourceFunc := range migratedResources { + name := getResourceName(resourceFunc) + if !shouldUseSdkV2Resource(name) && !slices.Contains(fallbackOption.resourceFallbacks, name) { + resourcesToRemove = append(resourcesToRemove, name) + } + } + return resourcesToRemove +} + +// GetSdkV2DataSourcesToRemove is a helper function to get the list of data sources that are migrated away from sdkv2 to plugin framework +func GetSdkV2DataSourcesToRemove(sdkV2Fallbacks ...SdkV2FallbackOption) []string { + fallbackOption := sdkV2FallbackOptions{} + for _, o := range sdkV2Fallbacks { + o.Apply(&fallbackOption) + } + + dataSourcesToRemove := []string{} + for _, dataSourceFunc := range migratedDataSources { + name := getDataSourceName(dataSourceFunc) + if !shouldUseSdkV2DataSource(name) && !slices.Contains(fallbackOption.dataSourceFallbacks, name) { + dataSourcesToRemove = append(dataSourcesToRemove, name) + } + } + return dataSourcesToRemove +} diff --git a/internal/providers/pluginfw/resources/library/resource_library.go b/internal/providers/pluginfw/resources/library/resource_library.go index 1c999bd2ed..17ac722bfa 100644 --- a/internal/providers/pluginfw/resources/library/resource_library.go +++ b/internal/providers/pluginfw/resources/library/resource_library.go @@ -62,6 +62,7 @@ func readLibrary(ctx context.Context, w *databricks.WorkspaceClient, waitParams type LibraryExtended struct { compute_tf.Library ClusterId types.String `tfsdk:"cluster_id"` + ID types.String `tfsdk:"id" tf:"optional,computed"` // Adding ID field to stay compatible with SDKv2 } type LibraryResource struct { @@ -69,7 +70,7 @@ type LibraryResource struct { } func (r *LibraryResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = pluginfwcommon.GetDatabricksStagingName(resourceName) + resp.TypeName = pluginfwcommon.GetDatabricksProductionName(resourceName) } func (r *LibraryResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { @@ -139,6 +140,8 @@ func (r *LibraryResource) Create(ctx context.Context, req resource.CreateRequest resp.Diagnostics.Append(readLibrary(ctx, w, waitParams, libraryRep, &installedLib)...) + installedLib.ID = types.StringValue(libGoSDK.String()) + if resp.Diagnostics.HasError() { return } diff --git a/internal/providers/pluginfw/resources/library/resource_library_acc_test.go b/internal/providers/pluginfw/resources/library/resource_library_acc_test.go index 153657ae41..138c803111 100644 --- a/internal/providers/pluginfw/resources/library/resource_library_acc_test.go +++ b/internal/providers/pluginfw/resources/library/resource_library_acc_test.go @@ -1,30 +1,37 @@ package library_test import ( + "context" "testing" "github.com/databricks/terraform-provider-databricks/internal/acceptance" + "github.com/databricks/terraform-provider-databricks/internal/providers" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" ) -func TestAccLibraryCreationPluginFramework(t *testing.T) { +var commonClusterConfig = `data "databricks_spark_version" "latest" { +} +resource "databricks_cluster" "this" { + cluster_name = "test-library-{var.RANDOM}" + spark_version = data.databricks_spark_version.latest.id + instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" + autotermination_minutes = 10 + num_workers = 0 + spark_conf = { + "spark.databricks.cluster.profile" = "singleNode" + "spark.master" = "local[*]" + } + custom_tags = { + "ResourceClass" = "SingleNode" + } +} + +` + +func TestAccLibraryCreation(t *testing.T) { acceptance.WorkspaceLevel(t, acceptance.Step{ - Template: `data "databricks_spark_version" "latest" { - } - resource "databricks_cluster" "this" { - cluster_name = "test-library-{var.RANDOM}" - spark_version = data.databricks_spark_version.latest.id - instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" - autotermination_minutes = 10 - num_workers = 0 - spark_conf = { - "spark.databricks.cluster.profile" = "singleNode" - "spark.master" = "local[*]" - } - custom_tags = { - "ResourceClass" = "SingleNode" - } - } - resource "databricks_library_pluginframework" "new_library" { + Template: commonClusterConfig + `resource "databricks_library" "new_library" { cluster_id = databricks_cluster.this.id pypi { repo = "https://pypi.org/dummy" @@ -35,26 +42,10 @@ func TestAccLibraryCreationPluginFramework(t *testing.T) { }) } -func TestAccLibraryUpdatePluginFramework(t *testing.T) { +func TestAccLibraryUpdate(t *testing.T) { acceptance.WorkspaceLevel(t, acceptance.Step{ - Template: `data "databricks_spark_version" "latest" { - } - resource "databricks_cluster" "this" { - cluster_name = "cluster-{var.STICKY_RANDOM}" - spark_version = data.databricks_spark_version.latest.id - instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" - autotermination_minutes = 10 - num_workers = 0 - spark_conf = { - "spark.databricks.cluster.profile" = "singleNode" - "spark.master" = "local[*]" - } - custom_tags = { - "ResourceClass" = "SingleNode" - } - } - resource "databricks_library_pluginframework" "new_library" { + Template: commonClusterConfig + `resource "databricks_library" "new_library" { cluster_id = databricks_cluster.this.id pypi { repo = "https://pypi.org/simple" @@ -64,23 +55,65 @@ func TestAccLibraryUpdatePluginFramework(t *testing.T) { `, }, acceptance.Step{ - Template: `data "databricks_spark_version" "latest" { + Template: commonClusterConfig + `resource "databricks_library" "new_library" { + cluster_id = databricks_cluster.this.id + pypi { + package = "networkx" + } } - resource "databricks_cluster" "this" { - cluster_name = "cluster-{var.STICKY_RANDOM}" - spark_version = data.databricks_spark_version.latest.id - instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" - autotermination_minutes = 10 - num_workers = 0 - spark_conf = { - "spark.databricks.cluster.profile" = "singleNode" - "spark.master" = "local[*]" + `, + }, + ) +} + +var sdkV2FallbackFactory = map[string]func() (tfprotov6.ProviderServer, error){ + "databricks": func() (tfprotov6.ProviderServer, error) { + return providers.GetProviderServer(context.Background(), providers.WithSdkV2FallbackOptions(pluginfw.WithSdkV2ResourceFallbacks("databricks_library"))) + }, +} + +// Testing the transition from sdkv2 to plugin framework. +func TestAccLibraryUpdateTransitionFromSdkV2(t *testing.T) { + acceptance.WorkspaceLevel(t, + acceptance.Step{ + ProtoV6ProviderFactories: sdkV2FallbackFactory, + Template: commonClusterConfig + `resource "databricks_library" "new_library" { + cluster_id = databricks_cluster.this.id + pypi { + repo = "https://pypi.org/simple" + package = "databricks-sdk" + } } - custom_tags = { - "ResourceClass" = "SingleNode" + `, + }, + acceptance.Step{ + Template: commonClusterConfig + `resource "databricks_library" "new_library" { + cluster_id = databricks_cluster.this.id + pypi { + package = "networkx" } } - resource "databricks_library_pluginframework" "new_library" { + `, + }, + ) +} + +// Testing the transition from plugin framework to sdkv2. +func TestAccLibraryUpdateTransitionFromPluginFw(t *testing.T) { + acceptance.WorkspaceLevel(t, + acceptance.Step{ + Template: commonClusterConfig + `resource "databricks_library" "new_library" { + cluster_id = databricks_cluster.this.id + pypi { + repo = "https://pypi.org/simple" + package = "databricks-sdk" + } + } + `, + }, + acceptance.Step{ + ProtoV6ProviderFactories: sdkV2FallbackFactory, + Template: commonClusterConfig + `resource "databricks_library" "new_library" { cluster_id = databricks_cluster.this.id pypi { package = "networkx" diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go index c0047d55cc..7a0445ddbb 100644 --- a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go @@ -58,6 +58,7 @@ type MonitorInfoExtended struct { catalog_tf.MonitorInfo WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` SkipBuiltinDashboard types.Bool `tfsdk:"skip_builtin_dashboard" tf:"optional"` + ID types.String `tfsdk:"id" tf:"optional,computed"` // Adding ID field to stay compatible with SDKv2 } type QualityMonitorResource struct { @@ -65,7 +66,7 @@ type QualityMonitorResource struct { } func (r *QualityMonitorResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = pluginfwcommon.GetDatabricksStagingName(resourceName) + resp.TypeName = pluginfwcommon.GetDatabricksProductionName(resourceName) } func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { @@ -131,6 +132,9 @@ func (r *QualityMonitorResource) Create(ctx context.Context, req resource.Create return } + // Set the ID to the table name + newMonitorInfoTfSDK.ID = newMonitorInfoTfSDK.TableName + resp.Diagnostics.Append(resp.State.Set(ctx, newMonitorInfoTfSDK)...) } @@ -162,6 +166,8 @@ func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequ return } + monitorInfoTfSDK.ID = monitorInfoTfSDK.TableName + resp.Diagnostics.Append(resp.State.Set(ctx, monitorInfoTfSDK)...) } diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go index 7f303d482e..bc87743cda 100644 --- a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor_acc_test.go @@ -1,10 +1,14 @@ package qualitymonitor_test import ( + "context" "os" "testing" "github.com/databricks/terraform-provider-databricks/internal/acceptance" + "github.com/databricks/terraform-provider-databricks/internal/providers" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" ) var commonPartQualityMonitoring = `resource "databricks_catalog" "sandbox" { @@ -55,7 +59,7 @@ func TestUcAccQualityMonitor(t *testing.T) { acceptance.UnityWorkspaceLevel(t, acceptance.Step{ Template: commonPartQualityMonitoring + ` - resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { + resource "databricks_quality_monitor" "testMonitorInference" { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" output_schema_name = databricks_schema.things.id @@ -81,7 +85,7 @@ func TestUcAccQualityMonitor(t *testing.T) { } } - resource "databricks_quality_monitor_pluginframework" "testMonitorTimeseries" { + resource "databricks_quality_monitor" "testMonitorTimeseries" { table_name = databricks_sql_table.myTimeseries.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myTimeseries.name}" output_schema_name = databricks_schema.things.id @@ -104,7 +108,7 @@ func TestUcAccQualityMonitor(t *testing.T) { } } - resource "databricks_quality_monitor_pluginframework" "testMonitorSnapshot" { + resource "databricks_quality_monitor" "testMonitorSnapshot" { table_name = databricks_sql_table.mySnapshot.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myTimeseries.name}" output_schema_name = databricks_schema.things.id @@ -121,7 +125,7 @@ func TestUcAccUpdateQualityMonitor(t *testing.T) { } acceptance.UnityWorkspaceLevel(t, acceptance.Step{ Template: commonPartQualityMonitoring + ` - resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { + resource "databricks_quality_monitor" "testMonitorInference" { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" output_schema_name = databricks_schema.things.id @@ -136,7 +140,91 @@ func TestUcAccUpdateQualityMonitor(t *testing.T) { `, }, acceptance.Step{ Template: commonPartQualityMonitoring + ` - resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { + resource "databricks_quality_monitor" "testMonitorInference" { + table_name = databricks_sql_table.myInferenceTable.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" + output_schema_name = databricks_schema.things.id + inference_log { + granularities = ["1 hour"] + timestamp_col = "timestamp" + prediction_col = "prediction" + model_id_col = "model_id" + problem_type = "PROBLEM_TYPE_REGRESSION" + } + } + `, + }) +} + +var sdkV2FallbackFactory = map[string]func() (tfprotov6.ProviderServer, error){ + "databricks": func() (tfprotov6.ProviderServer, error) { + return providers.GetProviderServer(context.Background(), providers.WithSdkV2FallbackOptions(pluginfw.WithSdkV2ResourceFallbacks("databricks_quality_monitor"))) + }, +} + +// Testing the transition from sdkv2 to plugin framework. +func TestUcAccUpdateQualityMonitorTransitionFromSdkV2(t *testing.T) { + if os.Getenv("GOOGLE_CREDENTIALS") != "" { + t.Skipf("databricks_quality_monitor resource is not available on GCP") + } + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + ProtoV6ProviderFactories: sdkV2FallbackFactory, + Template: commonPartQualityMonitoring + ` + resource "databricks_quality_monitor" "testMonitorInference" { + table_name = databricks_sql_table.myInferenceTable.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" + output_schema_name = databricks_schema.things.id + inference_log { + granularities = ["1 day"] + timestamp_col = "timestamp" + prediction_col = "prediction" + model_id_col = "model_id" + problem_type = "PROBLEM_TYPE_REGRESSION" + } + } + `, + }, acceptance.Step{ + Template: commonPartQualityMonitoring + ` + resource "databricks_quality_monitor" "testMonitorInference" { + table_name = databricks_sql_table.myInferenceTable.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" + output_schema_name = databricks_schema.things.id + inference_log { + granularities = ["1 hour"] + timestamp_col = "timestamp" + prediction_col = "prediction" + model_id_col = "model_id" + problem_type = "PROBLEM_TYPE_REGRESSION" + } + } + `, + }) +} + +// Testing the transition from plugin framework back to SDK V2. +func TestUcAccUpdateQualityMonitorTransitionFromPluginFw(t *testing.T) { + if os.Getenv("GOOGLE_CREDENTIALS") != "" { + t.Skipf("databricks_quality_monitor resource is not available on GCP") + } + acceptance.UnityWorkspaceLevel(t, acceptance.Step{ + Template: commonPartQualityMonitoring + ` + resource "databricks_quality_monitor" "testMonitorInference" { + table_name = databricks_sql_table.myInferenceTable.id + assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" + output_schema_name = databricks_schema.things.id + inference_log { + granularities = ["1 day"] + timestamp_col = "timestamp" + prediction_col = "prediction" + model_id_col = "model_id" + problem_type = "PROBLEM_TYPE_REGRESSION" + } + } + `, + }, acceptance.Step{ + ProtoV6ProviderFactories: sdkV2FallbackFactory, + Template: commonPartQualityMonitoring + ` + resource "databricks_quality_monitor" "testMonitorInference" { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" output_schema_name = databricks_schema.things.id @@ -160,7 +248,7 @@ func TestUcAccQualityMonitorImportPluginFramework(t *testing.T) { acceptance.Step{ Template: commonPartQualityMonitoring + ` - resource "databricks_quality_monitor_pluginframework" "testMonitorInference" { + resource "databricks_quality_monitor" "testMonitorInference" { table_name = databricks_sql_table.myInferenceTable.id assets_dir = "/Shared/provider-test/databricks_quality_monitoring/${databricks_sql_table.myInferenceTable.name}" output_schema_name = databricks_schema.things.id @@ -176,8 +264,8 @@ func TestUcAccQualityMonitorImportPluginFramework(t *testing.T) { }, acceptance.Step{ ImportState: true, - ResourceName: "databricks_quality_monitor_pluginframework.testMonitorInference", - ImportStateIdFunc: acceptance.BuildImportStateIdFunc("databricks_quality_monitor_pluginframework.testMonitorInference", "table_name"), + ResourceName: "databricks_quality_monitor.testMonitorInference", + ImportStateIdFunc: acceptance.BuildImportStateIdFunc("databricks_quality_monitor.testMonitorInference", "table_name"), ImportStateVerify: true, ImportStateVerifyIdentifierAttribute: "table_name", }, diff --git a/internal/providers/pluginfw/resources/volume/data_volumes.go b/internal/providers/pluginfw/resources/volume/data_volumes.go index 54eccf7bde..6a4af53ba0 100644 --- a/internal/providers/pluginfw/resources/volume/data_volumes.go +++ b/internal/providers/pluginfw/resources/volume/data_volumes.go @@ -35,7 +35,7 @@ type VolumesList struct { } func (d *VolumesDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { - resp.TypeName = pluginfwcommon.GetDatabricksStagingName(dataSourceName) + resp.TypeName = pluginfwcommon.GetDatabricksProductionName(dataSourceName) } func (d *VolumesDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { diff --git a/internal/providers/pluginfw/resources/volume/data_volumes_acc_test.go b/internal/providers/pluginfw/resources/volume/data_volumes_acc_test.go index 0fdfc8aa50..3416d20f26 100644 --- a/internal/providers/pluginfw/resources/volume/data_volumes_acc_test.go +++ b/internal/providers/pluginfw/resources/volume/data_volumes_acc_test.go @@ -12,8 +12,8 @@ import ( func checkDataSourceVolumesPopulated(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { - _, ok := s.Modules[0].Resources["data.databricks_volumes_pluginframework.this"] - require.True(t, ok, "data.databricks_volumes_pluginframework.this has to be there") + _, ok := s.Modules[0].Resources["data.databricks_volumes.this"] + require.True(t, ok, "data.databricks_volumes.this has to be there") num_volumes, _ := strconv.Atoi(s.Modules[0].Outputs["volumes"].Value.(string)) assert.GreaterOrEqual(t, num_volumes, 1) return nil @@ -45,13 +45,13 @@ func TestUcAccDataSourceVolumes(t *testing.T) { schema_name = databricks_schema.things.name volume_type = "MANAGED" } - data "databricks_volumes_pluginframework" "this" { + data "databricks_volumes" "this" { catalog_name = databricks_catalog.sandbox.name schema_name = databricks_schema.things.name depends_on = [ databricks_volume.this ] } output "volumes" { - value = length(data.databricks_volumes_pluginframework.this.ids) + value = length(data.databricks_volumes.this.ids) } `, Check: checkDataSourceVolumesPopulated(t), diff --git a/internal/providers/providers.go b/internal/providers/providers.go index 681df6f74d..64a8296467 100644 --- a/internal/providers/providers.go +++ b/internal/providers/providers.go @@ -20,6 +20,7 @@ import ( type serverOptions struct { sdkV2Provider *schema.Provider pluginFrameworkProvider provider.Provider + sdkV2fallbacks []pluginfw.SdkV2FallbackOption } // ServerOption is a common interface for overriding providers in GetProviderServer functino call. @@ -41,6 +42,20 @@ func WithSdkV2Provider(sdkV2Provider *schema.Provider) ServerOption { return &sdkV2ProviderOption{sdkV2Provider: sdkV2Provider} } +type sdkV2FallbackOption struct { + sdkV2fallbacks []pluginfw.SdkV2FallbackOption +} + +func (o *sdkV2FallbackOption) Apply(options *serverOptions) { + options.sdkV2fallbacks = o.sdkV2fallbacks +} + +// WithSdkV2FallbackOptions allows overriding the SDKv2 fallback options used when creating a Terraform provider with muxing. +// This is typically used in acceptance test for testing the compatibility between sdkv2 and plugin framework. +func WithSdkV2FallbackOptions(options ...pluginfw.SdkV2FallbackOption) ServerOption { + return &sdkV2FallbackOption{sdkV2fallbacks: options} +} + // GetProviderServer initializes and returns a Terraform Protocol v6 ProviderServer. // The function begins by initializing the Databricks provider using the SDK plugin // and then upgrades this provider to be compatible with Terraform's Protocol v6 using @@ -60,11 +75,11 @@ func GetProviderServer(ctx context.Context, options ...ServerOption) (tfprotov6. } sdkPluginProvider := serverOptions.sdkV2Provider if sdkPluginProvider == nil { - sdkPluginProvider = sdkv2.DatabricksProvider() + sdkPluginProvider = sdkv2.DatabricksProvider(serverOptions.sdkV2fallbacks...) } pluginFrameworkProvider := serverOptions.pluginFrameworkProvider if pluginFrameworkProvider == nil { - pluginFrameworkProvider = pluginfw.GetDatabricksProviderPluginFramework() + pluginFrameworkProvider = pluginfw.GetDatabricksProviderPluginFramework(serverOptions.sdkV2fallbacks...) } upgradedSdkPluginProvider, err := tf5to6server.UpgradeServer( diff --git a/internal/providers/sdkv2/sdkv2.go b/internal/providers/sdkv2/sdkv2.go index d40d663ee2..e689b5b693 100644 --- a/internal/providers/sdkv2/sdkv2.go +++ b/internal/providers/sdkv2/sdkv2.go @@ -32,6 +32,7 @@ import ( "github.com/databricks/terraform-provider-databricks/dashboards" "github.com/databricks/terraform-provider-databricks/finops" providercommon "github.com/databricks/terraform-provider-databricks/internal/providers/common" + "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw" "github.com/databricks/terraform-provider-databricks/jobs" "github.com/databricks/terraform-provider-databricks/logger" "github.com/databricks/terraform-provider-databricks/mlflow" @@ -71,162 +72,181 @@ func init() { } // DatabricksProvider returns the entire terraform provider object -func DatabricksProvider() *schema.Provider { +func DatabricksProvider(sdkV2Fallbacks ...pluginfw.SdkV2FallbackOption) *schema.Provider { + dataSourceMap := map[string]*schema.Resource{ // must be in alphabetical order + "databricks_aws_crossaccount_policy": aws.DataAwsCrossaccountPolicy().ToResource(), + "databricks_aws_assume_role_policy": aws.DataAwsAssumeRolePolicy().ToResource(), + "databricks_aws_bucket_policy": aws.DataAwsBucketPolicy().ToResource(), + "databricks_aws_unity_catalog_assume_role_policy": aws.DataAwsUnityCatalogAssumeRolePolicy().ToResource(), + "databricks_aws_unity_catalog_policy": aws.DataAwsUnityCatalogPolicy().ToResource(), + "databricks_cluster": clusters.DataSourceCluster().ToResource(), + "databricks_clusters": clusters.DataSourceClusters().ToResource(), + "databricks_cluster_policy": policies.DataSourceClusterPolicy().ToResource(), + "databricks_catalog": catalog.DataSourceCatalog().ToResource(), + "databricks_catalogs": catalog.DataSourceCatalogs().ToResource(), + "databricks_current_config": mws.DataSourceCurrentConfiguration().ToResource(), + "databricks_current_metastore": catalog.DataSourceCurrentMetastore().ToResource(), + "databricks_current_user": scim.DataSourceCurrentUser().ToResource(), + "databricks_dbfs_file": storage.DataSourceDbfsFile().ToResource(), + "databricks_dbfs_file_paths": storage.DataSourceDbfsFilePaths().ToResource(), + "databricks_directory": workspace.DataSourceDirectory().ToResource(), + "databricks_external_location": catalog.DataSourceExternalLocation().ToResource(), + "databricks_external_locations": catalog.DataSourceExternalLocations().ToResource(), + "databricks_group": scim.DataSourceGroup().ToResource(), + "databricks_instance_pool": pools.DataSourceInstancePool().ToResource(), + "databricks_instance_profiles": aws.DataSourceInstanceProfiles().ToResource(), + "databricks_jobs": jobs.DataSourceJobs().ToResource(), + "databricks_job": jobs.DataSourceJob().ToResource(), + "databricks_metastore": catalog.DataSourceMetastore().ToResource(), + "databricks_metastores": catalog.DataSourceMetastores().ToResource(), + "databricks_mlflow_experiment": mlflow.DataSourceExperiment().ToResource(), + "databricks_mlflow_model": mlflow.DataSourceModel().ToResource(), + "databricks_mlflow_models": mlflow.DataSourceModels().ToResource(), + "databricks_mws_credentials": mws.DataSourceMwsCredentials().ToResource(), + "databricks_mws_workspaces": mws.DataSourceMwsWorkspaces().ToResource(), + "databricks_node_type": clusters.DataSourceNodeType().ToResource(), + "databricks_notebook": workspace.DataSourceNotebook().ToResource(), + "databricks_notebook_paths": workspace.DataSourceNotebookPaths().ToResource(), + "databricks_pipelines": pipelines.DataSourcePipelines().ToResource(), + "databricks_schema": catalog.DataSourceSchema().ToResource(), + "databricks_schemas": catalog.DataSourceSchemas().ToResource(), + "databricks_service_principal": scim.DataSourceServicePrincipal().ToResource(), + "databricks_service_principals": scim.DataSourceServicePrincipals().ToResource(), + "databricks_share": sharing.DataSourceShare().ToResource(), + "databricks_shares": sharing.DataSourceShares().ToResource(), + "databricks_spark_version": clusters.DataSourceSparkVersion().ToResource(), + "databricks_sql_warehouse": sql.DataSourceWarehouse().ToResource(), + "databricks_sql_warehouses": sql.DataSourceWarehouses().ToResource(), + "databricks_storage_credential": catalog.DataSourceStorageCredential().ToResource(), + "databricks_storage_credentials": catalog.DataSourceStorageCredentials().ToResource(), + "databricks_table": catalog.DataSourceTable().ToResource(), + "databricks_tables": catalog.DataSourceTables().ToResource(), + "databricks_views": catalog.DataSourceViews().ToResource(), + "databricks_volume": catalog.DataSourceVolume().ToResource(), + "databricks_volumes": catalog.DataSourceVolumes().ToResource(), + "databricks_user": scim.DataSourceUser().ToResource(), + "databricks_zones": clusters.DataSourceClusterZones().ToResource(), + } + + resourceMap := map[string]*schema.Resource{ // must be in alphabetical order + "databricks_access_control_rule_set": permissions.ResourceAccessControlRuleSet().ToResource(), + "databricks_alert": sql.ResourceAlert().ToResource(), + "databricks_artifact_allowlist": catalog.ResourceArtifactAllowlist().ToResource(), + "databricks_aws_s3_mount": storage.ResourceAWSS3Mount().ToResource(), + "databricks_azure_adls_gen1_mount": storage.ResourceAzureAdlsGen1Mount().ToResource(), + "databricks_azure_adls_gen2_mount": storage.ResourceAzureAdlsGen2Mount().ToResource(), + "databricks_azure_blob_mount": storage.ResourceAzureBlobMount().ToResource(), + "databricks_budget": finops.ResourceBudget().ToResource(), + "databricks_catalog": catalog.ResourceCatalog().ToResource(), + "databricks_catalog_workspace_binding": catalog.ResourceCatalogWorkspaceBinding().ToResource(), + "databricks_custom_app_integration": apps.ResourceCustomAppIntegration().ToResource(), + "databricks_connection": catalog.ResourceConnection().ToResource(), + "databricks_cluster": clusters.ResourceCluster().ToResource(), + "databricks_cluster_policy": policies.ResourceClusterPolicy().ToResource(), + "databricks_dashboard": dashboards.ResourceDashboard().ToResource(), + "databricks_dbfs_file": storage.ResourceDbfsFile().ToResource(), + "databricks_directory": workspace.ResourceDirectory().ToResource(), + "databricks_entitlements": scim.ResourceEntitlements().ToResource(), + "databricks_external_location": catalog.ResourceExternalLocation().ToResource(), + "databricks_file": storage.ResourceFile().ToResource(), + "databricks_git_credential": repos.ResourceGitCredential().ToResource(), + "databricks_global_init_script": workspace.ResourceGlobalInitScript().ToResource(), + "databricks_grant": catalog.ResourceGrant().ToResource(), + "databricks_grants": catalog.ResourceGrants().ToResource(), + "databricks_group": scim.ResourceGroup().ToResource(), + "databricks_group_instance_profile": aws.ResourceGroupInstanceProfile().ToResource(), + "databricks_group_member": scim.ResourceGroupMember().ToResource(), + "databricks_group_role": scim.ResourceGroupRole().ToResource(), + "databricks_instance_pool": pools.ResourceInstancePool().ToResource(), + "databricks_instance_profile": aws.ResourceInstanceProfile().ToResource(), + "databricks_ip_access_list": access.ResourceIPAccessList().ToResource(), + "databricks_job": jobs.ResourceJob().ToResource(), + "databricks_lakehouse_monitor": catalog.ResourceLakehouseMonitor().ToResource(), + "databricks_library": clusters.ResourceLibrary().ToResource(), + "databricks_metastore": catalog.ResourceMetastore().ToResource(), + "databricks_metastore_assignment": catalog.ResourceMetastoreAssignment().ToResource(), + "databricks_metastore_data_access": catalog.ResourceMetastoreDataAccess().ToResource(), + "databricks_mlflow_experiment": mlflow.ResourceMlflowExperiment().ToResource(), + "databricks_mlflow_model": mlflow.ResourceMlflowModel().ToResource(), + "databricks_mlflow_webhook": mlflow.ResourceMlflowWebhook().ToResource(), + "databricks_model_serving": serving.ResourceModelServing().ToResource(), + "databricks_mount": storage.ResourceMount().ToResource(), + "databricks_mws_customer_managed_keys": mws.ResourceMwsCustomerManagedKeys().ToResource(), + "databricks_mws_credentials": mws.ResourceMwsCredentials().ToResource(), + "databricks_mws_log_delivery": mws.ResourceMwsLogDelivery().ToResource(), + "databricks_mws_ncc_binding": mws.ResourceMwsNccBinding().ToResource(), + "databricks_mws_ncc_private_endpoint_rule": mws.ResourceMwsNccPrivateEndpointRule().ToResource(), + "databricks_mws_networks": mws.ResourceMwsNetworks().ToResource(), + "databricks_mws_network_connectivity_config": mws.ResourceMwsNetworkConnectivityConfig().ToResource(), + "databricks_mws_permission_assignment": mws.ResourceMwsPermissionAssignment().ToResource(), + "databricks_mws_private_access_settings": mws.ResourceMwsPrivateAccessSettings().ToResource(), + "databricks_mws_storage_configurations": mws.ResourceMwsStorageConfigurations().ToResource(), + "databricks_mws_vpc_endpoint": mws.ResourceMwsVpcEndpoint().ToResource(), + "databricks_mws_workspaces": mws.ResourceMwsWorkspaces().ToResource(), + "databricks_notebook": workspace.ResourceNotebook().ToResource(), + "databricks_notification_destination": settings.ResourceNotificationDestination().ToResource(), + "databricks_obo_token": tokens.ResourceOboToken().ToResource(), + "databricks_online_table": catalog.ResourceOnlineTable().ToResource(), + "databricks_permission_assignment": access.ResourcePermissionAssignment().ToResource(), + "databricks_permissions": permissions.ResourcePermissions().ToResource(), + "databricks_pipeline": pipelines.ResourcePipeline().ToResource(), + "databricks_provider": sharing.ResourceProvider().ToResource(), + "databricks_quality_monitor": catalog.ResourceQualityMonitor().ToResource(), + "databricks_query": sql.ResourceQuery().ToResource(), + "databricks_recipient": sharing.ResourceRecipient().ToResource(), + "databricks_registered_model": catalog.ResourceRegisteredModel().ToResource(), + "databricks_repo": repos.ResourceRepo().ToResource(), + "databricks_schema": catalog.ResourceSchema().ToResource(), + "databricks_secret": secrets.ResourceSecret().ToResource(), + "databricks_secret_scope": secrets.ResourceSecretScope().ToResource(), + "databricks_secret_acl": secrets.ResourceSecretACL().ToResource(), + "databricks_service_principal": scim.ResourceServicePrincipal().ToResource(), + "databricks_service_principal_role": aws.ResourceServicePrincipalRole().ToResource(), + "databricks_service_principal_secret": tokens.ResourceServicePrincipalSecret().ToResource(), + "databricks_share": sharing.ResourceShare().ToResource(), + "databricks_sql_dashboard": sql.ResourceSqlDashboard().ToResource(), + "databricks_sql_endpoint": sql.ResourceSqlEndpoint().ToResource(), + "databricks_sql_global_config": sql.ResourceSqlGlobalConfig().ToResource(), + "databricks_sql_permissions": access.ResourceSqlPermissions().ToResource(), + "databricks_sql_query": sql.ResourceSqlQuery().ToResource(), + "databricks_sql_alert": sql.ResourceSqlAlert().ToResource(), + "databricks_sql_table": catalog.ResourceSqlTable().ToResource(), + "databricks_sql_visualization": sql.ResourceSqlVisualization().ToResource(), + "databricks_sql_widget": sql.ResourceSqlWidget().ToResource(), + "databricks_storage_credential": catalog.ResourceStorageCredential().ToResource(), + "databricks_system_schema": catalog.ResourceSystemSchema().ToResource(), + "databricks_table": catalog.ResourceTable().ToResource(), + "databricks_token": tokens.ResourceToken().ToResource(), + "databricks_user": scim.ResourceUser().ToResource(), + "databricks_user_instance_profile": aws.ResourceUserInstanceProfile().ToResource(), + "databricks_user_role": aws.ResourceUserRole().ToResource(), + "databricks_vector_search_endpoint": vectorsearch.ResourceVectorSearchEndpoint().ToResource(), + "databricks_vector_search_index": vectorsearch.ResourceVectorSearchIndex().ToResource(), + "databricks_volume": catalog.ResourceVolume().ToResource(), + "databricks_workspace_binding": catalog.ResourceWorkspaceBinding().ToResource(), + "databricks_workspace_conf": workspace.ResourceWorkspaceConf().ToResource(), + "databricks_workspace_file": workspace.ResourceWorkspaceFile().ToResource(), + } + + // Remove the resources and data sources that are being migrated to plugin framework + for _, dataSourceToRemove := range pluginfw.GetSdkV2DataSourcesToRemove(sdkV2Fallbacks...) { + if _, ok := dataSourceMap[dataSourceToRemove]; !ok { + panic(fmt.Sprintf("data source %s not found", dataSourceToRemove)) + } + delete(dataSourceMap, dataSourceToRemove) + } + + for _, resourceToRemove := range pluginfw.GetSdkV2ResourcesToRemove(sdkV2Fallbacks...) { + if _, ok := resourceMap[resourceToRemove]; !ok { + panic(fmt.Sprintf("resource %s not found", resourceToRemove)) + } + delete(resourceMap, resourceToRemove) + } + p := &schema.Provider{ - DataSourcesMap: map[string]*schema.Resource{ // must be in alphabetical order - "databricks_aws_crossaccount_policy": aws.DataAwsCrossaccountPolicy().ToResource(), - "databricks_aws_assume_role_policy": aws.DataAwsAssumeRolePolicy().ToResource(), - "databricks_aws_bucket_policy": aws.DataAwsBucketPolicy().ToResource(), - "databricks_aws_unity_catalog_assume_role_policy": aws.DataAwsUnityCatalogAssumeRolePolicy().ToResource(), - "databricks_aws_unity_catalog_policy": aws.DataAwsUnityCatalogPolicy().ToResource(), - "databricks_cluster": clusters.DataSourceCluster().ToResource(), - "databricks_clusters": clusters.DataSourceClusters().ToResource(), - "databricks_cluster_policy": policies.DataSourceClusterPolicy().ToResource(), - "databricks_catalog": catalog.DataSourceCatalog().ToResource(), - "databricks_catalogs": catalog.DataSourceCatalogs().ToResource(), - "databricks_current_config": mws.DataSourceCurrentConfiguration().ToResource(), - "databricks_current_metastore": catalog.DataSourceCurrentMetastore().ToResource(), - "databricks_current_user": scim.DataSourceCurrentUser().ToResource(), - "databricks_dbfs_file": storage.DataSourceDbfsFile().ToResource(), - "databricks_dbfs_file_paths": storage.DataSourceDbfsFilePaths().ToResource(), - "databricks_directory": workspace.DataSourceDirectory().ToResource(), - "databricks_external_location": catalog.DataSourceExternalLocation().ToResource(), - "databricks_external_locations": catalog.DataSourceExternalLocations().ToResource(), - "databricks_group": scim.DataSourceGroup().ToResource(), - "databricks_instance_pool": pools.DataSourceInstancePool().ToResource(), - "databricks_instance_profiles": aws.DataSourceInstanceProfiles().ToResource(), - "databricks_jobs": jobs.DataSourceJobs().ToResource(), - "databricks_job": jobs.DataSourceJob().ToResource(), - "databricks_metastore": catalog.DataSourceMetastore().ToResource(), - "databricks_metastores": catalog.DataSourceMetastores().ToResource(), - "databricks_mlflow_experiment": mlflow.DataSourceExperiment().ToResource(), - "databricks_mlflow_model": mlflow.DataSourceModel().ToResource(), - "databricks_mlflow_models": mlflow.DataSourceModels().ToResource(), - "databricks_mws_credentials": mws.DataSourceMwsCredentials().ToResource(), - "databricks_mws_workspaces": mws.DataSourceMwsWorkspaces().ToResource(), - "databricks_node_type": clusters.DataSourceNodeType().ToResource(), - "databricks_notebook": workspace.DataSourceNotebook().ToResource(), - "databricks_notebook_paths": workspace.DataSourceNotebookPaths().ToResource(), - "databricks_pipelines": pipelines.DataSourcePipelines().ToResource(), - "databricks_schema": catalog.DataSourceSchema().ToResource(), - "databricks_schemas": catalog.DataSourceSchemas().ToResource(), - "databricks_service_principal": scim.DataSourceServicePrincipal().ToResource(), - "databricks_service_principals": scim.DataSourceServicePrincipals().ToResource(), - "databricks_share": sharing.DataSourceShare().ToResource(), - "databricks_shares": sharing.DataSourceShares().ToResource(), - "databricks_spark_version": clusters.DataSourceSparkVersion().ToResource(), - "databricks_sql_warehouse": sql.DataSourceWarehouse().ToResource(), - "databricks_sql_warehouses": sql.DataSourceWarehouses().ToResource(), - "databricks_storage_credential": catalog.DataSourceStorageCredential().ToResource(), - "databricks_storage_credentials": catalog.DataSourceStorageCredentials().ToResource(), - "databricks_table": catalog.DataSourceTable().ToResource(), - "databricks_tables": catalog.DataSourceTables().ToResource(), - "databricks_views": catalog.DataSourceViews().ToResource(), - "databricks_volume": catalog.DataSourceVolume().ToResource(), - "databricks_volumes": catalog.DataSourceVolumes().ToResource(), - "databricks_user": scim.DataSourceUser().ToResource(), - "databricks_zones": clusters.DataSourceClusterZones().ToResource(), - }, - ResourcesMap: map[string]*schema.Resource{ // must be in alphabetical order - "databricks_access_control_rule_set": permissions.ResourceAccessControlRuleSet().ToResource(), - "databricks_alert": sql.ResourceAlert().ToResource(), - "databricks_artifact_allowlist": catalog.ResourceArtifactAllowlist().ToResource(), - "databricks_aws_s3_mount": storage.ResourceAWSS3Mount().ToResource(), - "databricks_azure_adls_gen1_mount": storage.ResourceAzureAdlsGen1Mount().ToResource(), - "databricks_azure_adls_gen2_mount": storage.ResourceAzureAdlsGen2Mount().ToResource(), - "databricks_azure_blob_mount": storage.ResourceAzureBlobMount().ToResource(), - "databricks_budget": finops.ResourceBudget().ToResource(), - "databricks_catalog": catalog.ResourceCatalog().ToResource(), - "databricks_catalog_workspace_binding": catalog.ResourceCatalogWorkspaceBinding().ToResource(), - "databricks_custom_app_integration": apps.ResourceCustomAppIntegration().ToResource(), - "databricks_connection": catalog.ResourceConnection().ToResource(), - "databricks_cluster": clusters.ResourceCluster().ToResource(), - "databricks_cluster_policy": policies.ResourceClusterPolicy().ToResource(), - "databricks_dashboard": dashboards.ResourceDashboard().ToResource(), - "databricks_dbfs_file": storage.ResourceDbfsFile().ToResource(), - "databricks_directory": workspace.ResourceDirectory().ToResource(), - "databricks_entitlements": scim.ResourceEntitlements().ToResource(), - "databricks_external_location": catalog.ResourceExternalLocation().ToResource(), - "databricks_file": storage.ResourceFile().ToResource(), - "databricks_git_credential": repos.ResourceGitCredential().ToResource(), - "databricks_global_init_script": workspace.ResourceGlobalInitScript().ToResource(), - "databricks_grant": catalog.ResourceGrant().ToResource(), - "databricks_grants": catalog.ResourceGrants().ToResource(), - "databricks_group": scim.ResourceGroup().ToResource(), - "databricks_group_instance_profile": aws.ResourceGroupInstanceProfile().ToResource(), - "databricks_group_member": scim.ResourceGroupMember().ToResource(), - "databricks_group_role": scim.ResourceGroupRole().ToResource(), - "databricks_instance_pool": pools.ResourceInstancePool().ToResource(), - "databricks_instance_profile": aws.ResourceInstanceProfile().ToResource(), - "databricks_ip_access_list": access.ResourceIPAccessList().ToResource(), - "databricks_job": jobs.ResourceJob().ToResource(), - "databricks_lakehouse_monitor": catalog.ResourceLakehouseMonitor().ToResource(), - "databricks_library": clusters.ResourceLibrary().ToResource(), - "databricks_metastore": catalog.ResourceMetastore().ToResource(), - "databricks_metastore_assignment": catalog.ResourceMetastoreAssignment().ToResource(), - "databricks_metastore_data_access": catalog.ResourceMetastoreDataAccess().ToResource(), - "databricks_mlflow_experiment": mlflow.ResourceMlflowExperiment().ToResource(), - "databricks_mlflow_model": mlflow.ResourceMlflowModel().ToResource(), - "databricks_mlflow_webhook": mlflow.ResourceMlflowWebhook().ToResource(), - "databricks_model_serving": serving.ResourceModelServing().ToResource(), - "databricks_mount": storage.ResourceMount().ToResource(), - "databricks_mws_customer_managed_keys": mws.ResourceMwsCustomerManagedKeys().ToResource(), - "databricks_mws_credentials": mws.ResourceMwsCredentials().ToResource(), - "databricks_mws_log_delivery": mws.ResourceMwsLogDelivery().ToResource(), - "databricks_mws_ncc_binding": mws.ResourceMwsNccBinding().ToResource(), - "databricks_mws_ncc_private_endpoint_rule": mws.ResourceMwsNccPrivateEndpointRule().ToResource(), - "databricks_mws_networks": mws.ResourceMwsNetworks().ToResource(), - "databricks_mws_network_connectivity_config": mws.ResourceMwsNetworkConnectivityConfig().ToResource(), - "databricks_mws_permission_assignment": mws.ResourceMwsPermissionAssignment().ToResource(), - "databricks_mws_private_access_settings": mws.ResourceMwsPrivateAccessSettings().ToResource(), - "databricks_mws_storage_configurations": mws.ResourceMwsStorageConfigurations().ToResource(), - "databricks_mws_vpc_endpoint": mws.ResourceMwsVpcEndpoint().ToResource(), - "databricks_mws_workspaces": mws.ResourceMwsWorkspaces().ToResource(), - "databricks_notebook": workspace.ResourceNotebook().ToResource(), - "databricks_notification_destination": settings.ResourceNotificationDestination().ToResource(), - "databricks_obo_token": tokens.ResourceOboToken().ToResource(), - "databricks_online_table": catalog.ResourceOnlineTable().ToResource(), - "databricks_permission_assignment": access.ResourcePermissionAssignment().ToResource(), - "databricks_permissions": permissions.ResourcePermissions().ToResource(), - "databricks_pipeline": pipelines.ResourcePipeline().ToResource(), - "databricks_provider": sharing.ResourceProvider().ToResource(), - "databricks_quality_monitor": catalog.ResourceQualityMonitor().ToResource(), - "databricks_query": sql.ResourceQuery().ToResource(), - "databricks_recipient": sharing.ResourceRecipient().ToResource(), - "databricks_registered_model": catalog.ResourceRegisteredModel().ToResource(), - "databricks_repo": repos.ResourceRepo().ToResource(), - "databricks_schema": catalog.ResourceSchema().ToResource(), - "databricks_secret": secrets.ResourceSecret().ToResource(), - "databricks_secret_scope": secrets.ResourceSecretScope().ToResource(), - "databricks_secret_acl": secrets.ResourceSecretACL().ToResource(), - "databricks_service_principal": scim.ResourceServicePrincipal().ToResource(), - "databricks_service_principal_role": aws.ResourceServicePrincipalRole().ToResource(), - "databricks_service_principal_secret": tokens.ResourceServicePrincipalSecret().ToResource(), - "databricks_share": sharing.ResourceShare().ToResource(), - "databricks_sql_dashboard": sql.ResourceSqlDashboard().ToResource(), - "databricks_sql_endpoint": sql.ResourceSqlEndpoint().ToResource(), - "databricks_sql_global_config": sql.ResourceSqlGlobalConfig().ToResource(), - "databricks_sql_permissions": access.ResourceSqlPermissions().ToResource(), - "databricks_sql_query": sql.ResourceSqlQuery().ToResource(), - "databricks_sql_alert": sql.ResourceSqlAlert().ToResource(), - "databricks_sql_table": catalog.ResourceSqlTable().ToResource(), - "databricks_sql_visualization": sql.ResourceSqlVisualization().ToResource(), - "databricks_sql_widget": sql.ResourceSqlWidget().ToResource(), - "databricks_storage_credential": catalog.ResourceStorageCredential().ToResource(), - "databricks_system_schema": catalog.ResourceSystemSchema().ToResource(), - "databricks_table": catalog.ResourceTable().ToResource(), - "databricks_token": tokens.ResourceToken().ToResource(), - "databricks_user": scim.ResourceUser().ToResource(), - "databricks_user_instance_profile": aws.ResourceUserInstanceProfile().ToResource(), - "databricks_user_role": aws.ResourceUserRole().ToResource(), - "databricks_vector_search_endpoint": vectorsearch.ResourceVectorSearchEndpoint().ToResource(), - "databricks_vector_search_index": vectorsearch.ResourceVectorSearchIndex().ToResource(), - "databricks_volume": catalog.ResourceVolume().ToResource(), - "databricks_workspace_binding": catalog.ResourceWorkspaceBinding().ToResource(), - "databricks_workspace_conf": workspace.ResourceWorkspaceConf().ToResource(), - "databricks_workspace_file": workspace.ResourceWorkspaceFile().ToResource(), - }, - Schema: providerSchema(), + DataSourcesMap: dataSourceMap, + ResourcesMap: resourceMap, + Schema: providerSchema(), } for name, resource := range settings.AllSettingsResources() { p.ResourcesMap[fmt.Sprintf("databricks_%s_setting", name)] = resource.ToResource() From 0fbfbf4741a1d69a9b62bc0457263c8d49c19bdc Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Fri, 1 Nov 2024 03:31:31 -0400 Subject: [PATCH 13/14] [Exporter] Allow to match resource names by regular expression (#4177) ## Changes In addition to the existing `-match` option, this PR allows the matching of names by regex during the listing operation. There are new options: - `-matchRegex` - checks if name matches a regex - this could be useful for exporting notebooks for only specific users, or something like that. - `-excludeRegex` - checks if name matches a regex, and skips processing of that object. For example, it could be used to exclude `databricks_automl` directories. This parameter has higher priority than the `-match` and `-matchRegex`. - `filterDirectoriesDuringWorkspaceWalking` - if we should apply match logic to directory names when we're performing workspace tree walking. *Note: be careful with it as it will be applied to all entries, so if you want to filter only specific users, then you will need to specify the condition for `/Users` as well, so regex will be `^(/Users|/Users/[a-c].*)$`* ## Tests - [x] `make test` run locally - [x] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --------- Co-authored-by: Miles Yucht --- docs/guides/experimental-exporter.md | 3 + exporter/command.go | 8 ++ exporter/context.go | 67 ++++++++---- exporter/exporter_test.go | 151 ++++++++++++++++++++++++++- exporter/util.go | 8 +- exporter/util_test.go | 12 +-- exporter/util_workspace.go | 17 ++- 7 files changed, 230 insertions(+), 36 deletions(-) diff --git a/docs/guides/experimental-exporter.md b/docs/guides/experimental-exporter.md index 6f41bf6154..9e0f357c03 100644 --- a/docs/guides/experimental-exporter.md +++ b/docs/guides/experimental-exporter.md @@ -61,6 +61,9 @@ All arguments are optional, and they tune what code is being generated. * `-listing` - Comma-separated list of services to be listed and further passed on for importing. For each service specified, the exporter performs a listing of available resources using the `List` function and emits them for importing together with their dependencies. The `-services` parameter could be used to control which transitive dependencies will be also imported. * `-services` - Comma-separated list of services to import. By default, all services are imported. * `-match` - Match resource names during listing operation. This filter applies to all resources that are getting listed, so if you want to import all dependencies of just one cluster, specify `-match=autoscaling -listing=compute`. By default, it is empty, which matches everything. +* `-matchRegex` - Match resource names against a given regex during listing operation. Applicable to all resources selected for listing. +* `-excludeRegex` - Exclude resource names matching a given regex. Applied during the listing operation and has higher priority than `-match` and `-matchRegex`. Applicable to all resources selected for listing. Could be used to exclude things like `databricks_automl` notebooks, etc. +* `-filterDirectoriesDuringWorkspaceWalking` - if we should apply match logic to directory names when we're performing workspace tree walking. *Note: be careful with it as it will be applied to all entries, so if you want to filter only specific users, then you will need to specify condition for `/Users` as well, so regex will be `^(/Users|/Users/[a-c].*)$`*. * `-mounts` - List DBFS mount points, an extremely slow operation that would not trigger unless explicitly specified. * `-generateProviderDeclaration` - the flag that toggles the generation of `databricks.tf` file with the declaration of the Databricks Terraform provider that is necessary for Terraform versions since Terraform 0.13 (disabled by default). * `-prefix` - optional prefix that will be added to the name of all exported resources - that's useful for exporting resources from multiple workspaces for merging into a single one. diff --git a/exporter/command.go b/exporter/command.go index 5e40b9a039..72eb8f25dd 100644 --- a/exporter/command.go +++ b/exporter/command.go @@ -131,6 +131,8 @@ func Run(args ...string) error { flags.BoolVar(&ic.mounts, "mounts", false, "List DBFS mount points.") flags.BoolVar(&ic.generateDeclaration, "generateProviderDeclaration", true, "Generate Databricks provider declaration.") + flags.BoolVar(&ic.filterDirectoriesDuringWorkspaceWalking, "filterDirectoriesDuringWorkspaceWalking", false, + "Apply filtering to directory names during workspace walking") flags.StringVar(&ic.notebooksFormat, "notebooksFormat", "SOURCE", "Format to export notebooks: SOURCE, DBC, JUPYTER. Default: SOURCE") services, listing := ic.allServicesAndListing() @@ -145,6 +147,12 @@ func Run(args ...string) error { flags.StringVar(&ic.match, "match", "", "Match resource names during listing operation. "+ "This filter applies to all resources that are getting listed, so if you want to import "+ "all dependencies of just one cluster, specify -listing=compute") + flags.StringVar(&ic.matchRegexStr, "matchRegex", "", "Match resource names during listing operation against a regex. "+ + "This filter applies to all resources that are getting listed, so if you want to import "+ + "all dependencies of just one cluster, specify -listing=compute") + flags.StringVar(&ic.excludeRegexStr, "excludeRegex", "", "Exclude resource names matching regex during listing operation. "+ + "This filter applies to all resources that are getting listed, so if you want to import "+ + "all dependencies of just one cluster, specify -listing=compute") prefix := "" flags.StringVar(&prefix, "prefix", "", "Prefix that will be added to the name of all exported resources") newArgs := args diff --git a/exporter/context.go b/exporter/context.go index ffb230a4e8..bfba5d24f1 100644 --- a/exporter/context.go +++ b/exporter/context.go @@ -78,28 +78,33 @@ type importContext struct { Scope importedResources // command-line resources (immutable, or set by the single thread) - includeUserDomains bool - importAllUsers bool - exportDeletedUsersAssets bool - incremental bool - mounts bool - noFormat bool - nativeImportSupported bool - services map[string]struct{} - listing map[string]struct{} - match string - lastActiveDays int64 - lastActiveMs int64 - generateDeclaration bool - exportSecrets bool - meAdmin bool - meUserName string - prefix string - accountLevel bool - shImports map[string]bool - notebooksFormat string - updatedSinceStr string - updatedSinceMs int64 + includeUserDomains bool + importAllUsers bool + exportDeletedUsersAssets bool + incremental bool + mounts bool + noFormat bool + nativeImportSupported bool + services map[string]struct{} + listing map[string]struct{} + match string + matchRegexStr string + matchRegex *regexp.Regexp + excludeRegexStr string + excludeRegex *regexp.Regexp + filterDirectoriesDuringWorkspaceWalking bool + lastActiveDays int64 + lastActiveMs int64 + generateDeclaration bool + exportSecrets bool + meAdmin bool + meUserName string + prefix string + accountLevel bool + shImports map[string]bool + notebooksFormat string + updatedSinceStr string + updatedSinceMs int64 waitGroup *sync.WaitGroup @@ -297,6 +302,24 @@ func (ic *importContext) Run() error { return fmt.Errorf("no services to import") } + if ic.matchRegexStr != "" { + log.Printf("[DEBUG] Using regex '%s' to filter resources", ic.matchRegexStr) + re, err := regexp.Compile(ic.matchRegexStr) + if err != nil { + log.Printf("[ERROR] can't compile regex '%s': %v", ic.matchRegexStr, err) + return err + } + ic.matchRegex = re + } + if ic.excludeRegexStr != "" { + log.Printf("[DEBUG] Using regex '%s' to filter resources", ic.excludeRegexStr) + re, err := regexp.Compile(ic.excludeRegexStr) + if err != nil { + log.Printf("[ERROR] can't compile regex '%s': %v", ic.excludeRegexStr, err) + return err + } + ic.excludeRegex = re + } if ic.incremental { if ic.updatedSinceStr == "" { ic.updatedSinceStr = getLastRunString(statsFileName) diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index 9c2f64cf15..ad485b9557 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -2349,7 +2349,7 @@ func TestImportingGlobalSqlConfig(t *testing.T) { }) } -func TestImportingNotebooksWorkspaceFiles(t *testing.T) { +func TestImportingNotebooksWorkspaceFilesWithFilter(t *testing.T) { fileStatus := workspace.ObjectStatus{ ObjectID: 123, ObjectType: workspace.File, @@ -2371,7 +2371,135 @@ func TestImportingNotebooksWorkspaceFiles(t *testing.T) { Method: "GET", Resource: "/api/2.0/workspace/list?path=%2F", Response: workspace.ObjectList{ - Objects: []workspace.ObjectStatus{notebookStatus, fileStatus}, + Objects: []workspace.ObjectStatus{notebookStatus, fileStatus, + { + ObjectID: 4567, + ObjectType: workspace.Notebook, + Path: "/UnmatchedNotebook", + Language: "PYTHON", + }, + { + ObjectID: 1234, + ObjectType: workspace.File, + Path: "/UnmatchedFile", + }, + { + ObjectID: 456, + ObjectType: workspace.Directory, + Path: "/databricks_automl", + }, + { + ObjectID: 456, + ObjectType: workspace.Directory, + Path: "/.bundle", + }, + }, + }, + ReuseRequest: true, + }, + { + Method: "GET", + Resource: "/api/2.0/workspace/list?path=%2Fdatabricks_automl", + Response: workspace.ObjectList{}, + }, + { + Method: "GET", + Resource: "/api/2.0/workspace/get-status?path=%2FNotebook", + Response: notebookStatus, + ReuseRequest: true, + }, + { + Method: "GET", + Resource: "/api/2.0/workspace/get-status?path=%2FFile", + Response: fileStatus, + ReuseRequest: true, + }, + { + Method: "GET", + Resource: "/api/2.0/workspace/export?format=AUTO&path=%2FFile", + Response: workspace.ExportPath{ + Content: "dGVzdA==", + }, + ReuseRequest: true, + }, + { + Method: "GET", + Resource: "/api/2.0/workspace/export?format=SOURCE&path=%2FNotebook", + Response: workspace.ExportPath{ + Content: "dGVzdA==", + }, + ReuseRequest: true, + }, + }, + func(ctx context.Context, client *common.DatabricksClient) { + tmpDir := fmt.Sprintf("/tmp/tf-%s", qa.RandomName()) + defer os.RemoveAll(tmpDir) + + ic := newImportContext(client) + ic.Directory = tmpDir + ic.enableListing("notebooks,wsfiles") + ic.excludeRegexStr = "databricks_automl" + ic.matchRegexStr = "^/[FN].*$" + + err := ic.Run() + assert.NoError(t, err) + // check generated code for notebooks + content, err := os.ReadFile(tmpDir + "/notebooks.tf") + assert.NoError(t, err) + contentStr := string(content) + assert.True(t, strings.Contains(contentStr, `resource "databricks_notebook" "notebook_456"`)) + assert.True(t, strings.Contains(contentStr, `path = "/Notebook"`)) + assert.False(t, strings.Contains(contentStr, `/UnmatchedNotebook`)) + // check generated code for workspace files + content, err = os.ReadFile(tmpDir + "/wsfiles.tf") + assert.NoError(t, err) + contentStr = string(content) + assert.True(t, strings.Contains(contentStr, `resource "databricks_workspace_file" "file_123"`)) + assert.True(t, strings.Contains(contentStr, `path = "/File"`)) + assert.False(t, strings.Contains(contentStr, `/UnmatchedFile`)) + }) +} + +func TestImportingNotebooksWorkspaceFilesWithFilterDuringWalking(t *testing.T) { + fileStatus := workspace.ObjectStatus{ + ObjectID: 123, + ObjectType: workspace.File, + Path: "/File", + } + notebookStatus := workspace.ObjectStatus{ + ObjectID: 456, + ObjectType: workspace.Notebook, + Path: "/Notebook", + Language: "PYTHON", + } + qa.HTTPFixturesApply(t, + []qa.HTTPFixture{ + meAdminFixture, + noCurrentMetastoreAttached, + emptyRepos, + emptyIpAccessLIst, + { + Method: "GET", + Resource: "/api/2.0/workspace/list?path=%2F", + Response: workspace.ObjectList{ + Objects: []workspace.ObjectStatus{notebookStatus, fileStatus, + { + ObjectID: 4567, + ObjectType: workspace.Notebook, + Path: "/UnmatchedNotebook", + Language: "PYTHON", + }, + { + ObjectID: 1234, + ObjectType: workspace.File, + Path: "/UnmatchedFile", + }, + { + ObjectID: 456, + ObjectType: workspace.Directory, + Path: "/databricks_automl", + }, + }, }, ReuseRequest: true, }, @@ -2410,10 +2538,27 @@ func TestImportingNotebooksWorkspaceFiles(t *testing.T) { ic := newImportContext(client) ic.Directory = tmpDir - ic.enableListing("notebooks") + ic.enableListing("notebooks,wsfiles") + ic.excludeRegexStr = "databricks_automl" + ic.matchRegexStr = "^/[FN].*$" + ic.filterDirectoriesDuringWorkspaceWalking = true err := ic.Run() assert.NoError(t, err) + // check generated code for notebooks + content, err := os.ReadFile(tmpDir + "/notebooks.tf") + assert.NoError(t, err) + contentStr := string(content) + assert.True(t, strings.Contains(contentStr, `resource "databricks_notebook" "notebook_456"`)) + assert.True(t, strings.Contains(contentStr, `path = "/Notebook"`)) + assert.False(t, strings.Contains(contentStr, `/UnmatchedNotebook`)) + // check generated code for workspace files + content, err = os.ReadFile(tmpDir + "/wsfiles.tf") + assert.NoError(t, err) + contentStr = string(content) + assert.True(t, strings.Contains(contentStr, `resource "databricks_workspace_file" "file_123"`)) + assert.True(t, strings.Contains(contentStr, `path = "/File"`)) + assert.False(t, strings.Contains(contentStr, `/UnmatchedFile`)) }) } diff --git a/exporter/util.go b/exporter/util.go index e9380a9b56..5e4f53dcaa 100644 --- a/exporter/util.go +++ b/exporter/util.go @@ -35,9 +35,15 @@ func (ic *importContext) isServiceInListing(service string) bool { } func (ic *importContext) MatchesName(n string) bool { - if ic.match == "" { + if ic.match == "" && ic.matchRegex == nil && ic.excludeRegex == nil { return true } + if ic.excludeRegex != nil && ic.excludeRegex.MatchString(n) { + return false + } + if ic.matchRegex != nil { + return ic.matchRegex.MatchString(n) + } return strings.Contains(strings.ToLower(n), strings.ToLower(ic.match)) } diff --git a/exporter/util_test.go b/exporter/util_test.go index 588c831db7..912baa78b4 100644 --- a/exporter/util_test.go +++ b/exporter/util_test.go @@ -316,16 +316,16 @@ func TestGetEnvAsInt(t *testing.T) { } func TestExcludeAuxiliaryDirectories(t *testing.T) { - assert.True(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "", ObjectType: workspace.Directory})) - assert.True(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{ObjectType: workspace.File})) - assert.True(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "/Users/user@domain.com/abc", + assert.False(t, isAuxiliaryDirectory(workspace.ObjectStatus{Path: "", ObjectType: workspace.Directory})) + assert.False(t, isAuxiliaryDirectory(workspace.ObjectStatus{ObjectType: workspace.File})) + assert.False(t, isAuxiliaryDirectory(workspace.ObjectStatus{Path: "/Users/user@domain.com/abc", ObjectType: workspace.Directory})) // should be ignored - assert.False(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "/Users/user@domain.com/.ide", + assert.True(t, isAuxiliaryDirectory(workspace.ObjectStatus{Path: "/Users/user@domain.com/.ide", ObjectType: workspace.Directory})) - assert.False(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "/Shared/.bundle", + assert.True(t, isAuxiliaryDirectory(workspace.ObjectStatus{Path: "/Shared/.bundle", ObjectType: workspace.Directory})) - assert.False(t, excludeAuxiliaryDirectories(workspace.ObjectStatus{Path: "/Users/user@domain.com/abc/__pycache__", + assert.True(t, isAuxiliaryDirectory(workspace.ObjectStatus{Path: "/Users/user@domain.com/abc/__pycache__", ObjectType: workspace.Directory})) } diff --git a/exporter/util_workspace.go b/exporter/util_workspace.go index 5a5621f806..8dcbefbaf0 100644 --- a/exporter/util_workspace.go +++ b/exporter/util_workspace.go @@ -93,17 +93,18 @@ func (ic *importContext) getAllDirectories() []workspace.ObjectStatus { var directoriesToIgnore = []string{".ide", ".bundle", "__pycache__"} // TODO: add ignoring directories of deleted users? This could potentially decrease the number of processed objects... -func excludeAuxiliaryDirectories(v workspace.ObjectStatus) bool { +func isAuxiliaryDirectory(v workspace.ObjectStatus) bool { if v.ObjectType != workspace.Directory { - return true + return false } // TODO: rewrite to use suffix check, etc., instead of split and slice contains? parts := strings.Split(v.Path, "/") result := len(parts) > 1 && slices.Contains[[]string, string](directoriesToIgnore, parts[len(parts)-1]) + log.Printf("[DEBUG] directory %s: %v", v.Path, result) if result { log.Printf("[DEBUG] Ignoring directory %s", v.Path) } - return !result + return result } func (ic *importContext) getAllWorkspaceObjects(visitor func([]workspace.ObjectStatus)) []workspace.ObjectStatus { @@ -113,7 +114,15 @@ func (ic *importContext) getAllWorkspaceObjects(visitor func([]workspace.ObjectS t1 := time.Now() log.Print("[INFO] Starting to list all workspace objects") notebooksAPI := workspace.NewNotebooksAPI(ic.Context, ic.Client) - ic.allWorkspaceObjects, _ = ListParallel(notebooksAPI, "/", excludeAuxiliaryDirectories, visitor) + shouldIncludeDirectory := func(v workspace.ObjectStatus) bool { + decision := !isAuxiliaryDirectory(v) + if decision && ic.filterDirectoriesDuringWorkspaceWalking { + decision = ic.MatchesName(v.Path) + } + // log.Printf("[DEBUG] decision of shouldIncludeDirectory for %s: %v", v.Path, decision) + return decision + } + ic.allWorkspaceObjects, _ = ListParallel(notebooksAPI, "/", shouldIncludeDirectory, visitor) log.Printf("[INFO] Finished listing of all workspace objects. %d objects in total. %v seconds", len(ic.allWorkspaceObjects), time.Since(t1).Seconds()) } From 28b8f4934c116d3d861703cf4dba55c0614ef535 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Fri, 1 Nov 2024 10:52:22 +0100 Subject: [PATCH 14/14] [Internal] Always write message for manual test integration (#4188) ## Changes Old script could not be run from master due to security restrictions and there is no reliable way to detect if a user as secrets. ## Tests Opened a PR in SDK Java from fork https://github.com/databricks/databricks-sdk-java/pull/375 --- .github/workflows/external-message.yml | 68 ++----------------------- .github/workflows/integration-tests.yml | 9 ++-- 2 files changed, 10 insertions(+), 67 deletions(-) diff --git a/.github/workflows/external-message.yml b/.github/workflows/external-message.yml index b9534520a0..d9a715d62f 100644 --- a/.github/workflows/external-message.yml +++ b/.github/workflows/external-message.yml @@ -11,7 +11,6 @@ on: branches: - main - jobs: comment-on-pr: runs-on: ubuntu-latest @@ -19,73 +18,15 @@ jobs: pull-requests: write steps: - # NOTE: The following checks may not be accurate depending on Org or Repo settings. - - name: Check user and potential secret access - id: check-secrets-access - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - USER_LOGIN="${{ github.event.pull_request.user.login }}" - REPO_OWNER="${{ github.repository_owner }}" - REPO_NAME="${{ github.event.repository.name }}" - - echo "Pull request opened by: $USER_LOGIN" - - # Check if PR is from a fork - IS_FORK=$([[ "${{ github.event.pull_request.head.repo.full_name }}" != "${{ github.repository }}" ]] && echo "true" || echo "false") - - HAS_ACCESS="false" - - # Check user's permission level on the repository - USER_PERMISSION=$(gh api repos/$REPO_OWNER/$REPO_NAME/collaborators/$USER_LOGIN/permission --jq '.permission') - - if [[ "$USER_PERMISSION" == "admin" || "$USER_PERMISSION" == "write" ]]; then - HAS_ACCESS="true" - elif [[ "$USER_PERMISSION" == "read" ]]; then - # For read access, we need to check if the user has been explicitly granted secret access - # This information is not directly available via API, so we'll make an assumption - # that read access does not imply secret access - HAS_ACCESS="false" - fi - - # Check if repo owner is an organization - IS_ORG=$(gh api users/$REPO_OWNER --jq '.type == "Organization"') - - if [[ "$IS_ORG" == "true" && "$HAS_ACCESS" == "false" ]]; then - # Check if user is a member of any team with write or admin access to the repo - TEAMS_WITH_ACCESS=$(gh api repos/$REPO_OWNER/$REPO_NAME/teams --jq '.[] | select(.permission == "push" or .permission == "admin") | .slug') - for team in $TEAMS_WITH_ACCESS; do - IS_TEAM_MEMBER=$(gh api orgs/$REPO_OWNER/teams/$team/memberships/$USER_LOGIN --silent && echo "true" || echo "false") - if [[ "$IS_TEAM_MEMBER" == "true" ]]; then - HAS_ACCESS="true" - break - fi - done - fi - - # If it's a fork, set HAS_ACCESS to false regardless of other checks - if [[ "$IS_FORK" == "true" ]]; then - HAS_ACCESS="false" - fi - - echo "has_secrets_access=$HAS_ACCESS" >> $GITHUB_OUTPUT - if [[ "$HAS_ACCESS" == "true" ]]; then - echo "User $USER_LOGIN likely has access to secrets" - else - echo "User $USER_LOGIN likely does not have access to secrets" - fi - - - uses: actions/checkout@v4 - name: Delete old comments - if: steps.check-secrets-access.outputs.has_secrets_access != 'true' env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | # Delete previous comment if it exists previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \ - --jq '.[] | select(.body | startswith("")) | .id') + --jq '.[] | select(.body | startswith("")) | .id') echo "Previous comment IDs: $previous_comment_ids" # Iterate over each comment ID and delete the comment if [ ! -z "$previous_comment_ids" ]; then @@ -96,14 +37,15 @@ jobs: fi - name: Comment on PR - if: steps.check-secrets-access.outputs.has_secrets_access != 'true' env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} COMMIT_SHA: ${{ github.event.pull_request.head.sha }} run: | gh pr comment ${{ github.event.pull_request.number }} --body \ - " - Run integration tests manually: + " + If integration tests don't run automatically, an authorized user can run them manually by following the instructions below: + + Trigger: [go/deco-tests-run/terraform](https://go/deco-tests-run/terraform) Inputs: diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 67ed709365..653a36c644 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -12,17 +12,18 @@ jobs: check-token: name: Check secrets access runs-on: ubuntu-latest + environment: "test-trigger-is" outputs: has_token: ${{ steps.set-token-status.outputs.has_token }} steps: - - name: Check if GITHUB_TOKEN is set + - name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set id: set-token-status run: | - if [ -z "${{ secrets.GITHUB_TOKEN }}" ]; then - echo "GITHUB_TOKEN is empty. User has no access to tokens." + if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then + echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets." echo "::set-output name=has_token::false" else - echo "GITHUB_TOKEN is set. User has no access to tokens." + echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets." echo "::set-output name=has_token::true" fi