diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 2d9cb6d86d..ecf041814d 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -cf9c61453990df0f9453670f2fe68e1b128647a2 \ No newline at end of file +25b2478e5a18c888f0d423249abde5499dc58424 \ No newline at end of file diff --git a/catalog/resource_online_table.go b/catalog/resource_online_table.go index ca24d5f76f..ee4aa44754 100644 --- a/catalog/resource_online_table.go +++ b/catalog/resource_online_table.go @@ -16,29 +16,6 @@ import ( const onlineTableDefaultProvisionTimeout = 90 * time.Minute -func waitForOnlineTableCreation(w *databricks.WorkspaceClient, ctx context.Context, onlineTableName string) error { - return retry.RetryContext(ctx, onlineTableDefaultProvisionTimeout, func() *retry.RetryError { - endpoint, err := w.OnlineTables.GetByName(ctx, onlineTableName) - if err != nil { - return retry.NonRetryableError(err) - } - if endpoint.Status == nil { - return retry.RetryableError(fmt.Errorf("online table status is not available yet")) - } - switch endpoint.Status.DetailedState { - case catalog.OnlineTableStateOnline, catalog.OnlineTableStateOnlineContinuousUpdate, - catalog.OnlineTableStateOnlineNoPendingUpdate, catalog.OnlineTableStateOnlineTriggeredUpdate: - return nil - - // does catalog.OnlineTableStateOffline means that it's failed? - case catalog.OnlineTableStateOfflineFailed, catalog.OnlineTableStateOnlinePipelineFailed: - return retry.NonRetryableError(fmt.Errorf("online table status returned %s for online table: %s", - endpoint.Status.DetailedState.String(), onlineTableName)) - } - return retry.RetryableError(fmt.Errorf("online table %s is still pending", onlineTableName)) - }) -} - func waitForOnlineTableDeletion(w *databricks.WorkspaceClient, ctx context.Context, onlineTableName string) error { return retry.RetryContext(ctx, onlineTableDefaultProvisionTimeout, func() *retry.RetryError { _, err := w.OnlineTables.GetByName(ctx, onlineTableName) @@ -75,9 +52,9 @@ func ResourceOnlineTable() common.Resource { if err != nil { return err } - var req catalog.CreateOnlineTableRequest - common.DataToStructPointer(d, s, &req) - res, err := w.OnlineTables.Create(ctx, req) + var table catalog.OnlineTable + common.DataToStructPointer(d, s, &table) + res, err := w.OnlineTables.Create(ctx, catalog.CreateOnlineTableRequest{Table: &table}) if err != nil { return err } @@ -85,7 +62,7 @@ func ResourceOnlineTable() common.Resource { // If the resource creation timeout is exceeded while waiting for the online table to be ready, this ensures the online table is persisted in the state. d.SetId(res.Name) // this should be specified in the API Spec - filed a ticket to add it - err = waitForOnlineTableCreation(w, ctx, res.Name) + _, err = res.GetWithTimeout(onlineTableDefaultProvisionTimeout) if err != nil { return err } diff --git a/catalog/resource_online_table_test.go b/catalog/resource_online_table_test.go index 1deddd02a3..9f19063b48 100644 --- a/catalog/resource_online_table_test.go +++ b/catalog/resource_online_table_test.go @@ -1,8 +1,10 @@ package catalog import ( + "errors" "fmt" "testing" + "time" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/experimental/mocks" @@ -47,6 +49,13 @@ func TestOnlineTableCreate(t *testing.T) { PrimaryKeyColumns: []string{"id"}, }, } + otStatusNotSetWait := &catalog.WaitGetOnlineTableActive[catalog.OnlineTable]{ + Response: otStatusNotSet, + Name: "main.default.online_table", + Poll: func(d time.Duration, f func(*catalog.OnlineTable)) (*catalog.OnlineTable, error) { + return otStatusOnline, nil + }, + } // otStatusUnknown := &catalog.OnlineTable{ // Name: "main.default.online_table", // Spec: &catalog.OnlineTableSpec{ @@ -60,16 +69,15 @@ func TestOnlineTableCreate(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockOnlineTablesAPI().EXPECT() e.Create(mock.Anything, catalog.CreateOnlineTableRequest{ - Name: "main.default.online_table", - Spec: &catalog.OnlineTableSpec{ - RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, - SourceTableFullName: "main.default.test", - PrimaryKeyColumns: []string{"id"}, + Table: &catalog.OnlineTable{ + Name: "main.default.online_table", + Spec: &catalog.OnlineTableSpec{ + RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, + SourceTableFullName: "main.default.test", + PrimaryKeyColumns: []string{"id"}, + }, }, - }).Return(otStatusNotSet, nil) - // TODO: how to emulate the status change - // e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusNotSet, nil) - // e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusUnknown, nil) + }).Return(otStatusNotSetWait, nil) e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusOnline, nil) }, Resource: ResourceOnlineTable(), @@ -85,11 +93,13 @@ func TestOnlineTableCreate_ErrorImmediately(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockOnlineTablesAPI().EXPECT() e.Create(mock.Anything, catalog.CreateOnlineTableRequest{ - Name: "main.default.online_table", - Spec: &catalog.OnlineTableSpec{ - RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, - SourceTableFullName: "main.default.test", - PrimaryKeyColumns: []string{"id"}, + Table: &catalog.OnlineTable{ + Name: "main.default.online_table", + Spec: &catalog.OnlineTableSpec{ + RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, + SourceTableFullName: "main.default.test", + PrimaryKeyColumns: []string{"id"}, + }, }, }).Return(nil, fmt.Errorf("error!")) }, @@ -100,33 +110,41 @@ func TestOnlineTableCreate_ErrorImmediately(t *testing.T) { } func TestOnlineTableCreate_ErrorInWait(t *testing.T) { - otStatusError := &catalog.OnlineTable{ + otStatusProvisioning := &catalog.OnlineTable{ Name: "main.default.online_table", Spec: &catalog.OnlineTableSpec{ RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, SourceTableFullName: "main.default.test", PrimaryKeyColumns: []string{"id"}, }, - Status: &catalog.OnlineTableStatus{DetailedState: catalog.OnlineTableStateOfflineFailed}, + Status: &catalog.OnlineTableStatus{DetailedState: catalog.OnlineTableStateProvisioning}, + } + otStatusErrorWait := &catalog.WaitGetOnlineTableActive[catalog.OnlineTable]{ + Response: otStatusProvisioning, + Name: "main.default.online_table", + Poll: func(d time.Duration, f func(*catalog.OnlineTable)) (*catalog.OnlineTable, error) { + return nil, errors.New("failed to reach ACTIVE, got OFFLINE_FAILED: error!") + }, } d, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockOnlineTablesAPI().EXPECT() e.Create(mock.Anything, catalog.CreateOnlineTableRequest{ - Name: "main.default.online_table", - Spec: &catalog.OnlineTableSpec{ - RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, - SourceTableFullName: "main.default.test", - PrimaryKeyColumns: []string{"id"}, + Table: &catalog.OnlineTable{ + Name: "main.default.online_table", + Spec: &catalog.OnlineTableSpec{ + RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{}, + SourceTableFullName: "main.default.test", + PrimaryKeyColumns: []string{"id"}, + }, }, - }).Return(otStatusError, nil) - e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusError, nil) + }).Return(otStatusErrorWait, nil) }, Resource: ResourceOnlineTable(), HCL: onlineTableHcl, Create: true, }.Apply(t) - qa.AssertErrorStartsWith(t, err, "online table status returned OFFLINE_FAILED for online table: main.default.online_table") + qa.AssertErrorStartsWith(t, err, "failed to reach ACTIVE, got OFFLINE_FAILED: error!") assert.Equal(t, "main.default.online_table", d.Id()) } diff --git a/dashboards/resource_dashboard.go b/dashboards/resource_dashboard.go index d872b33f49..de61205243 100644 --- a/dashboards/resource_dashboard.go +++ b/dashboards/resource_dashboard.go @@ -68,22 +68,22 @@ func ResourceDashboard() common.Resource { if err != nil { return err } - var newDashboardRequest dashboards.CreateDashboardRequest - common.DataToStructPointer(d, dashboardSchema, &newDashboardRequest) + var dashboard dashboards.Dashboard + common.DataToStructPointer(d, dashboardSchema, &dashboard) content, md5Hash, err := common.ReadSerializedJsonContent(d.Get("serialized_dashboard").(string), d.Get("file_path").(string)) if err != nil { return err } d.Set("md5", md5Hash) - newDashboardRequest.SerializedDashboard = content - createdDashboard, err := w.Lakeview.Create(ctx, newDashboardRequest) + dashboard.SerializedDashboard = content + createdDashboard, err := w.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{Dashboard: &dashboard}) if err != nil && isParentDoesntExistError(err) { - log.Printf("[DEBUG] Parent folder '%s' doesn't exist, creating...", newDashboardRequest.ParentPath) - err = w.Workspace.MkdirsByPath(ctx, newDashboardRequest.ParentPath) + log.Printf("[DEBUG] Parent folder '%s' doesn't exist, creating...", dashboard.ParentPath) + err = w.Workspace.MkdirsByPath(ctx, dashboard.ParentPath) if err != nil { return err } - createdDashboard, err = w.Lakeview.Create(ctx, newDashboardRequest) + createdDashboard, err = w.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{Dashboard: &dashboard}) } if err != nil { return err @@ -132,16 +132,19 @@ func ResourceDashboard() common.Resource { if err != nil { return err } - var updateDashboardRequest dashboards.UpdateDashboardRequest - common.DataToStructPointer(d, dashboardSchema, &updateDashboardRequest) - updateDashboardRequest.DashboardId = d.Id() + var dashboard dashboards.Dashboard + common.DataToStructPointer(d, dashboardSchema, &dashboard) + dashboard.DashboardId = d.Id() content, md5Hash, err := common.ReadSerializedJsonContent(d.Get("serialized_dashboard").(string), d.Get("file_path").(string)) if err != nil { return err } d.Set("md5", md5Hash) - updateDashboardRequest.SerializedDashboard = content - updatedDashboard, err := w.Lakeview.Update(ctx, updateDashboardRequest) + dashboard.SerializedDashboard = content + updatedDashboard, err := w.Lakeview.Update(ctx, dashboards.UpdateDashboardRequest{ + DashboardId: dashboard.DashboardId, + Dashboard: &dashboard, + }) if err != nil { return err } diff --git a/dashboards/resource_dashboard_test.go b/dashboards/resource_dashboard_test.go index 0b450fdd7d..9016ce2dda 100644 --- a/dashboards/resource_dashboard_test.go +++ b/dashboards/resource_dashboard_test.go @@ -16,10 +16,12 @@ func TestDashboardCreate(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockLakeviewAPI().EXPECT() e.Create(mock.Anything, dashboards.CreateDashboardRequest{ - DisplayName: "Dashboard name", - WarehouseId: "abc", - ParentPath: "/path", - SerializedDashboard: "serialized_json", + Dashboard: &dashboards.Dashboard{ + DisplayName: "Dashboard name", + WarehouseId: "abc", + ParentPath: "/path", + SerializedDashboard: "serialized_json", + }, }).Return(&dashboards.Dashboard{ DashboardId: "xyz", DisplayName: "Dashboard name", @@ -67,17 +69,21 @@ func TestDashboardCreate_NoParent(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { lv := w.GetMockLakeviewAPI().EXPECT() lv.Create(mock.Anything, dashboards.CreateDashboardRequest{ - DisplayName: "Dashboard name", - WarehouseId: "abc", - ParentPath: "/path", - SerializedDashboard: "serialized_json", + Dashboard: &dashboards.Dashboard{ + DisplayName: "Dashboard name", + WarehouseId: "abc", + ParentPath: "/path", + SerializedDashboard: "serialized_json", + }, }).Return(nil, fmt.Errorf("Path (/path) doesn't exist.")).Once() w.GetMockWorkspaceAPI().EXPECT().MkdirsByPath(mock.Anything, "/path").Return(nil) lv.Create(mock.Anything, dashboards.CreateDashboardRequest{ - DisplayName: "Dashboard name", - WarehouseId: "abc", - ParentPath: "/path", - SerializedDashboard: "serialized_json", + Dashboard: &dashboards.Dashboard{ + DisplayName: "Dashboard name", + WarehouseId: "abc", + ParentPath: "/path", + SerializedDashboard: "serialized_json", + }, }).Return(&dashboards.Dashboard{ DashboardId: "xyz", DisplayName: "Dashboard name", @@ -154,10 +160,14 @@ func TestDashboardUpdate(t *testing.T) { MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockLakeviewAPI().EXPECT() e.Update(mock.Anything, dashboards.UpdateDashboardRequest{ - DashboardId: "xyz", - DisplayName: "Dashboard name", - WarehouseId: "abc", - SerializedDashboard: "serialized_dashboard_updated", + DashboardId: "xyz", + Dashboard: &dashboards.Dashboard{ + DashboardId: "xyz", + DisplayName: "Dashboard name", + WarehouseId: "abc", + SerializedDashboard: "serialized_dashboard_updated", + ParentPath: "/path", + }, }).Return(&dashboards.Dashboard{ DashboardId: "xyz", DisplayName: "Dashboard name", diff --git a/go.mod b/go.mod index 4e97cc0d23..1e72ea27a6 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/databricks/terraform-provider-databricks go 1.22 require ( - github.com/databricks/databricks-sdk-go v0.49.0 + github.com/databricks/databricks-sdk-go v0.50.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 diff --git a/go.sum b/go.sum index e95a0ffe39..1188a3923d 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.49.0 h1:VBTeZZMLIuBSM4kxOCfUcW9z4FUQZY2QeNRD5qm9FUQ= -github.com/databricks/databricks-sdk-go v0.49.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.50.0 h1:Zl4uBhYMT5z6aDojCQJPT2zCYjjfqxBQSQn8uLTphpo= +github.com/databricks/databricks-sdk-go v0.50.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/acceptance/dashboard_test.go b/internal/acceptance/dashboard_test.go index 49118c9455..91c6335b9a 100644 --- a/internal/acceptance/dashboard_test.go +++ b/internal/acceptance/dashboard_test.go @@ -315,11 +315,14 @@ func TestAccDashboardWithRemoteChange(t *testing.T) { w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) _, err = w.Lakeview.Update(context.Background(), dashboards.UpdateDashboardRequest{ - DashboardId: dashboard_id, - DisplayName: display_name, - Etag: etag, - WarehouseId: warehouse_id, - SerializedDashboard: "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page Modified Remote\"}]}", + DashboardId: dashboard_id, + Dashboard: &dashboards.Dashboard{ + DashboardId: dashboard_id, + DisplayName: display_name, + Etag: etag, + WarehouseId: warehouse_id, + SerializedDashboard: "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page Modified Remote\"}]}", + }, }) require.NoError(t, err) }, @@ -419,11 +422,14 @@ func TestAccDashboardTestAll(t *testing.T) { w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) _, err = w.Lakeview.Update(context.Background(), dashboards.UpdateDashboardRequest{ - DashboardId: dashboard_id, - DisplayName: display_name, - Etag: etag, - WarehouseId: warehouse_id, - SerializedDashboard: "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page Modified Remote\"}]}", + DashboardId: dashboard_id, + Dashboard: &dashboards.Dashboard{ + DashboardId: dashboard_id, + DisplayName: display_name, + Etag: etag, + WarehouseId: warehouse_id, + SerializedDashboard: "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page Modified Remote\"}]}", + }, }) require.NoError(t, err) }, diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index 4f90baf118..b5a602ba1f 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -144,6 +144,8 @@ func (newState *AppAccessControlResponse) SyncEffectiveFieldsDuringRead(existing } type AppDeployment struct { + // The name of the app. + AppName types.String `tfsdk:"-"` // The creation time of the deployment. Formatted timestamp in ISO 6801. CreateTime types.String `tfsdk:"create_time" tf:"optional"` EffectiveCreateTime types.String `tfsdk:"effective_create_time" tf:"computed,optional"` @@ -398,45 +400,6 @@ func (newState *ComputeStatus) SyncEffectiveFieldsDuringRead(existingState Compu } } -type CreateAppDeploymentRequest struct { - // The name of the app. - AppName types.String `tfsdk:"-"` - // The unique id of the deployment. - DeploymentId types.String `tfsdk:"deployment_id" tf:"optional"` - // The mode of which the deployment will manage the source code. - Mode types.String `tfsdk:"mode" tf:"optional"` - // The workspace file system path of the source code used to create the app - // deployment. This is different from - // `deployment_artifacts.source_code_path`, which is the path used by the - // deployed app. The former refers to the original source code location of - // the app in the workspace during deployment creation, whereas the latter - // provides a system generated stable snapshotted source code path used by - // the deployment. - SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` -} - -func (newState *CreateAppDeploymentRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAppDeploymentRequest) { -} - -func (newState *CreateAppDeploymentRequest) SyncEffectiveFieldsDuringRead(existingState CreateAppDeploymentRequest) { -} - -type CreateAppRequest struct { - // The description of the app. - Description types.String `tfsdk:"description" tf:"optional"` - // The name of the app. The name must contain only lowercase alphanumeric - // characters and hyphens. It must be unique within the workspace. - Name types.String `tfsdk:"name" tf:""` - // Resources for the app. - Resources []AppResource `tfsdk:"resources" tf:"optional"` -} - -func (newState *CreateAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateAppRequest) { -} - -func (newState *CreateAppRequest) SyncEffectiveFieldsDuringRead(existingState CreateAppRequest) { -} - // Delete an app type DeleteAppRequest struct { // The name of the app. @@ -588,19 +551,3 @@ func (newState *StopAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan Sto func (newState *StopAppRequest) SyncEffectiveFieldsDuringRead(existingState StopAppRequest) { } - -type UpdateAppRequest struct { - // The description of the app. - Description types.String `tfsdk:"description" tf:"optional"` - // The name of the app. The name must contain only lowercase alphanumeric - // characters and hyphens. It must be unique within the workspace. - Name types.String `tfsdk:"name" tf:""` - // Resources for the app. - Resources []AppResource `tfsdk:"resources" tf:"optional"` -} - -func (newState *UpdateAppRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAppRequest) { -} - -func (newState *UpdateAppRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAppRequest) { -} diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index 55778dfa87..caf38f865c 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -728,20 +728,6 @@ func (newState *CreateMonitor) SyncEffectiveFieldsDuringCreateOrUpdate(plan Crea func (newState *CreateMonitor) SyncEffectiveFieldsDuringRead(existingState CreateMonitor) { } -// Online Table information. -type CreateOnlineTableRequest struct { - // Full three-part (catalog, schema, table) name of the table. - Name types.String `tfsdk:"name" tf:"optional"` - // Specification of the online table. - Spec []OnlineTableSpec `tfsdk:"spec" tf:"optional,object"` -} - -func (newState *CreateOnlineTableRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateOnlineTableRequest) { -} - -func (newState *CreateOnlineTableRequest) SyncEffectiveFieldsDuringRead(existingState CreateOnlineTableRequest) { -} - type CreateRegisteredModelRequest struct { // The name of the catalog where the schema and the registered model reside CatalogName types.String `tfsdk:"catalog_name" tf:""` diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index b76126b18b..2066f6a422 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -15,93 +15,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -type CreateDashboardRequest struct { - // The display name of the dashboard. - DisplayName types.String `tfsdk:"display_name" tf:""` - // The workspace path of the folder containing the dashboard. Includes - // leading slash and no trailing slash. This field is excluded in List - // Dashboards responses. - ParentPath types.String `tfsdk:"parent_path" tf:"optional"` - EffectiveParentPath types.String `tfsdk:"effective_parent_path" tf:"computed,optional"` - // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. Use the [get dashboard API] to - // retrieve an example response, which includes the `serialized_dashboard` - // field. This field provides the structure of the JSON string that - // represents the dashboard's layout and components. - // - // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get - SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` - // The warehouse ID used to run the dashboard. - WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` -} - -func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateDashboardRequest) { - newState.EffectiveParentPath = newState.ParentPath - newState.ParentPath = plan.ParentPath -} - -func (newState *CreateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState CreateDashboardRequest) { - newState.EffectiveParentPath = existingState.EffectiveParentPath - if existingState.EffectiveParentPath.ValueString() == newState.ParentPath.ValueString() { - newState.ParentPath = existingState.ParentPath - } -} - -type CreateScheduleRequest struct { - // The cron expression describing the frequency of the periodic refresh for - // this schedule. - CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` - // UUID identifying the dashboard to which the schedule belongs. - DashboardId types.String `tfsdk:"-"` - EffectiveDashboardId types.String `tfsdk:"-"` - // The display name for schedule. - DisplayName types.String `tfsdk:"display_name" tf:"optional"` - // The status indicates whether this schedule is paused or not. - PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` -} - -func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateScheduleRequest) { - newState.EffectiveDashboardId = newState.DashboardId - newState.DashboardId = plan.DashboardId -} - -func (newState *CreateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState CreateScheduleRequest) { - newState.EffectiveDashboardId = existingState.EffectiveDashboardId - if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { - newState.DashboardId = existingState.DashboardId - } -} - -type CreateSubscriptionRequest struct { - // UUID identifying the dashboard to which the subscription belongs. - DashboardId types.String `tfsdk:"-"` - EffectiveDashboardId types.String `tfsdk:"-"` - // UUID identifying the schedule to which the subscription belongs. - ScheduleId types.String `tfsdk:"-"` - EffectiveScheduleId types.String `tfsdk:"-"` - // Subscriber details for users and destinations to be added as subscribers - // to the schedule. - Subscriber []Subscriber `tfsdk:"subscriber" tf:"object"` -} - -func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateSubscriptionRequest) { - newState.EffectiveDashboardId = newState.DashboardId - newState.DashboardId = plan.DashboardId - newState.EffectiveScheduleId = newState.ScheduleId - newState.ScheduleId = plan.ScheduleId -} - -func (newState *CreateSubscriptionRequest) SyncEffectiveFieldsDuringRead(existingState CreateSubscriptionRequest) { - newState.EffectiveDashboardId = existingState.EffectiveDashboardId - if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { - newState.DashboardId = existingState.DashboardId - } - newState.EffectiveScheduleId = existingState.EffectiveScheduleId - if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { - newState.ScheduleId = existingState.ScheduleId - } -} - type CronSchedule struct { // A cron expression using quartz syntax. EX: `0 0 8 * * ?` represents // everyday at 8am. See [Cron Trigger] for details. @@ -308,22 +221,6 @@ func (newState *DeleteSubscriptionResponse) SyncEffectiveFieldsDuringCreateOrUpd func (newState *DeleteSubscriptionResponse) SyncEffectiveFieldsDuringRead(existingState DeleteSubscriptionResponse) { } -// Execute SQL query in a conversation message -type ExecuteMessageQueryRequest struct { - // Conversation ID - ConversationId types.String `tfsdk:"-"` - // Message ID - MessageId types.String `tfsdk:"-"` - // Genie space ID - SpaceId types.String `tfsdk:"-"` -} - -func (newState *ExecuteMessageQueryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExecuteMessageQueryRequest) { -} - -func (newState *ExecuteMessageQueryRequest) SyncEffectiveFieldsDuringRead(existingState ExecuteMessageQueryRequest) { -} - // Genie AI Response type GenieAttachment struct { Query []QueryAttachment `tfsdk:"query" tf:"optional,object"` @@ -373,6 +270,22 @@ func (newState *GenieCreateConversationMessageRequest) SyncEffectiveFieldsDuring func (newState *GenieCreateConversationMessageRequest) SyncEffectiveFieldsDuringRead(existingState GenieCreateConversationMessageRequest) { } +// Execute SQL query in a conversation message +type GenieExecuteMessageQueryRequest struct { + // Conversation ID + ConversationId types.String `tfsdk:"-"` + // Message ID + MessageId types.String `tfsdk:"-"` + // Genie space ID + SpaceId types.String `tfsdk:"-"` +} + +func (newState *GenieExecuteMessageQueryRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenieExecuteMessageQueryRequest) { +} + +func (newState *GenieExecuteMessageQueryRequest) SyncEffectiveFieldsDuringRead(existingState GenieExecuteMessageQueryRequest) { +} + // Get conversation message type GenieGetConversationMessageRequest struct { // The ID associated with the target conversation. @@ -1112,82 +1025,3 @@ func (newState *UnpublishDashboardResponse) SyncEffectiveFieldsDuringCreateOrUpd func (newState *UnpublishDashboardResponse) SyncEffectiveFieldsDuringRead(existingState UnpublishDashboardResponse) { } - -type UpdateDashboardRequest struct { - // UUID identifying the dashboard. - DashboardId types.String `tfsdk:"-"` - // The display name of the dashboard. - DisplayName types.String `tfsdk:"display_name" tf:"optional"` - // The etag for the dashboard. Can be optionally provided on updates to - // ensure that the dashboard has not been modified since the last read. This - // field is excluded in List Dashboards responses. - Etag types.String `tfsdk:"etag" tf:"optional"` - EffectiveEtag types.String `tfsdk:"effective_etag" tf:"computed,optional"` - // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. Use the [get dashboard API] to - // retrieve an example response, which includes the `serialized_dashboard` - // field. This field provides the structure of the JSON string that - // represents the dashboard's layout and components. - // - // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get - SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` - // The warehouse ID used to run the dashboard. - WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` -} - -func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateDashboardRequest) { - newState.EffectiveEtag = newState.Etag - newState.Etag = plan.Etag -} - -func (newState *UpdateDashboardRequest) SyncEffectiveFieldsDuringRead(existingState UpdateDashboardRequest) { - newState.EffectiveEtag = existingState.EffectiveEtag - if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { - newState.Etag = existingState.Etag - } -} - -type UpdateScheduleRequest struct { - // The cron expression describing the frequency of the periodic refresh for - // this schedule. - CronSchedule []CronSchedule `tfsdk:"cron_schedule" tf:"object"` - // UUID identifying the dashboard to which the schedule belongs. - DashboardId types.String `tfsdk:"-"` - EffectiveDashboardId types.String `tfsdk:"-"` - // The display name for schedule. - DisplayName types.String `tfsdk:"display_name" tf:"optional"` - // The etag for the schedule. Must be left empty on create, must be provided - // on updates to ensure that the schedule has not been modified since the - // last read, and can be optionally provided on delete. - Etag types.String `tfsdk:"etag" tf:"optional"` - EffectiveEtag types.String `tfsdk:"effective_etag" tf:"computed,optional"` - // The status indicates whether this schedule is paused or not. - PauseStatus types.String `tfsdk:"pause_status" tf:"optional"` - // UUID identifying the schedule. - ScheduleId types.String `tfsdk:"-"` - EffectiveScheduleId types.String `tfsdk:"-"` -} - -func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateScheduleRequest) { - newState.EffectiveDashboardId = newState.DashboardId - newState.DashboardId = plan.DashboardId - newState.EffectiveEtag = newState.Etag - newState.Etag = plan.Etag - newState.EffectiveScheduleId = newState.ScheduleId - newState.ScheduleId = plan.ScheduleId -} - -func (newState *UpdateScheduleRequest) SyncEffectiveFieldsDuringRead(existingState UpdateScheduleRequest) { - newState.EffectiveDashboardId = existingState.EffectiveDashboardId - if existingState.EffectiveDashboardId.ValueString() == newState.DashboardId.ValueString() { - newState.DashboardId = existingState.DashboardId - } - newState.EffectiveEtag = existingState.EffectiveEtag - if existingState.EffectiveEtag.ValueString() == newState.Etag.ValueString() { - newState.Etag = existingState.Etag - } - newState.EffectiveScheduleId = existingState.EffectiveScheduleId - if existingState.EffectiveScheduleId.ValueString() == newState.ScheduleId.ValueString() { - newState.ScheduleId = existingState.ScheduleId - } -} diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index 295d1e1a99..fe3918dabd 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -839,9 +839,8 @@ type GetRunRequest struct { IncludeHistory types.Bool `tfsdk:"-"` // Whether to include resolved parameter values in the response. IncludeResolvedValues types.Bool `tfsdk:"-"` - // To list the next page or the previous page of job tasks, set this field - // to the value of the `next_page_token` or `prev_page_token` returned in - // the GetJob response. + // To list the next page of job tasks, set this field to the value of the + // `next_page_token` returned in the GetJob response. PageToken types.String `tfsdk:"-"` // The canonical identifier of the run for which to retrieve the metadata. // This field is required. @@ -1727,8 +1726,10 @@ type RepairRun struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []types.String `tfsdk:"jar_params" tf:"optional"` // Job-level parameters used in the run. for example `"param": // "overriding_val"` @@ -2040,8 +2041,6 @@ type Run struct { OriginalAttemptRunId types.Int64 `tfsdk:"original_attempt_run_id" tf:"optional"` // The parameters used for this run. OverridingParameters []RunParameters `tfsdk:"overriding_parameters" tf:"optional,object"` - // A token that can be used to list the previous page of sub-resources. - PrevPageToken types.String `tfsdk:"prev_page_token" tf:"optional"` // The time in milliseconds that the run has spent in the queue. QueueDuration types.Int64 `tfsdk:"queue_duration" tf:"optional"` // The repair history of the run. @@ -2182,8 +2181,10 @@ type RunJobTask struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []types.String `tfsdk:"jar_params" tf:"optional"` // ID of the job to trigger. JobId types.Int64 `tfsdk:"job_id" tf:""` @@ -2290,8 +2291,10 @@ type RunNow struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []types.String `tfsdk:"jar_params" tf:"optional"` // The ID of the job to be executed JobId types.Int64 `tfsdk:"job_id" tf:""` @@ -2447,8 +2450,10 @@ type RunParameters struct { // of this field (for example `{"jar_params":["john doe","35"]}`) cannot // exceed 10,000 bytes. // - // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set - // parameters containing information about job runs. + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables JarParams []types.String `tfsdk:"jar_params" tf:"optional"` // A map from keys to values for jobs with notebook task, for example // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed @@ -2584,13 +2589,14 @@ type RunTask struct { // cluster, this field is set once the Jobs service has requested a cluster // for the run. ClusterInstance []ClusterInstance `tfsdk:"cluster_instance" tf:"optional,object"` - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. ConditionTask []RunConditionTask `tfsdk:"condition_task" tf:"optional,object"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before @@ -2622,8 +2628,8 @@ type RunTask struct { // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` - // If for_each_task, indicates that this task must execute the nested task - // within it. + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. ForEachTask []RunForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by @@ -2643,16 +2649,17 @@ type RunTask struct { // If new_cluster, a description of a new cluster that is created for each // run. NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. + // The task runs a notebook when the `notebook_task` field is present. NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` - // If pipeline_task, indicates that this task must execute a Pipeline. + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. + // The task runs a Python wheel when the `python_wheel_task` field is + // present. PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // The time in milliseconds that the run has spent in the queue. QueueDuration types.Int64 `tfsdk:"queue_duration" tf:"optional"` @@ -2668,7 +2675,7 @@ type RunTask struct { // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf types.String `tfsdk:"run_if" tf:"optional"` - // If run_job_task, indicates that this task must execute another job. + // The task triggers another job when the `run_job_task` field is present. RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` RunPageUrl types.String `tfsdk:"run_page_url" tf:"optional"` @@ -2680,12 +2687,14 @@ type RunTask struct { // job runs. The total duration of a multitask job run is the value of the // `run_duration` field. SetupDuration types.Int64 `tfsdk:"setup_duration" tf:"optional"` - // If spark_jar_task, indicates that this task must run a JAR. + // The task runs a JAR when the `spark_jar_task` field is present. SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` - // If spark_python_task, indicates that this task must run a Python file. + // The task runs a Python file when the `spark_python_task` field is + // present. SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python @@ -2702,7 +2711,8 @@ type RunTask struct { // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` - // If sql_task, indicates that this job must execute a SQL task. + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // The time at which this run was started in epoch milliseconds // (milliseconds since 1/1/1970 UTC). This may not be the time when the job @@ -3112,13 +3122,14 @@ func (newState *SubmitRunResponse) SyncEffectiveFieldsDuringRead(existingState S } type SubmitTask struct { - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional,object"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before @@ -3139,8 +3150,8 @@ type SubmitTask struct { // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` - // If for_each_task, indicates that this task must execute the nested task - // within it. + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional set of health rules that can be defined for this job. Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` @@ -3150,30 +3161,33 @@ type SubmitTask struct { // If new_cluster, a description of a new cluster that is created for each // run. NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. + // The task runs a notebook when the `notebook_task` field is present. NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task run. NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` - // If pipeline_task, indicates that this task must execute a Pipeline. + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. + // The task runs a Python wheel when the `python_wheel_task` field is + // present. PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // An optional value indicating the condition that determines whether the // task should be run once its dependencies have been completed. When // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of // possible values. RunIf types.String `tfsdk:"run_if" tf:"optional"` - // If run_job_task, indicates that this task must execute another job. + // The task triggers another job when the `run_job_task` field is present. RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` - // If spark_jar_task, indicates that this task must run a JAR. + // The task runs a JAR when the `spark_jar_task` field is present. SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` - // If spark_python_task, indicates that this task must run a Python file. + // The task runs a Python file when the `spark_python_task` field is + // present. SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python @@ -3190,7 +3204,8 @@ type SubmitTask struct { // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` - // If sql_task, indicates that this job must execute a SQL task. + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent @@ -3236,13 +3251,14 @@ func (newState *TableUpdateTriggerConfiguration) SyncEffectiveFieldsDuringRead(e } type Task struct { - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. ConditionTask []ConditionTask `tfsdk:"condition_task" tf:"optional,object"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. DbtTask []DbtTask `tfsdk:"dbt_task" tf:"optional,object"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete before executing this @@ -3266,8 +3282,8 @@ type Task struct { // to manually restart the cluster if it stops responding. We suggest // running jobs and tasks on new clusters for greater reliability ExistingClusterId types.String `tfsdk:"existing_cluster_id" tf:"optional"` - // If for_each_task, indicates that this task must execute the nested task - // within it. + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. ForEachTask []ForEachTask `tfsdk:"for_each_task" tf:"optional,object"` // An optional set of health rules that can be defined for this job. Health []JobsHealthRules `tfsdk:"health" tf:"optional,object"` @@ -3289,16 +3305,17 @@ type Task struct { // If new_cluster, a description of a new cluster that is created for each // run. NewCluster compute.ClusterSpec `tfsdk:"new_cluster" tf:"optional,object"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. + // The task runs a notebook when the `notebook_task` field is present. NotebookTask []NotebookTask `tfsdk:"notebook_task" tf:"optional,object"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // task. NotificationSettings []TaskNotificationSettings `tfsdk:"notification_settings" tf:"optional,object"` - // If pipeline_task, indicates that this task must execute a Pipeline. + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. PipelineTask []PipelineTask `tfsdk:"pipeline_task" tf:"optional,object"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. + // The task runs a Python wheel when the `python_wheel_task` field is + // present. PythonWheelTask []PythonWheelTask `tfsdk:"python_wheel_task" tf:"optional,object"` // An optional policy to specify whether to retry a job when it times out. // The default behavior is to not retry on timeout. @@ -3313,14 +3330,16 @@ type Task struct { // `AT_LEAST_ONE_FAILED`: At least one dependency failed * `ALL_FAILED`: ALl // dependencies have failed RunIf types.String `tfsdk:"run_if" tf:"optional"` - // If run_job_task, indicates that this task must execute another job. + // The task triggers another job when the `run_job_task` field is present. RunJobTask []RunJobTask `tfsdk:"run_job_task" tf:"optional,object"` - // If spark_jar_task, indicates that this task must run a JAR. + // The task runs a JAR when the `spark_jar_task` field is present. SparkJarTask []SparkJarTask `tfsdk:"spark_jar_task" tf:"optional,object"` - // If spark_python_task, indicates that this task must run a Python file. + // The task runs a Python file when the `spark_python_task` field is + // present. SparkPythonTask []SparkPythonTask `tfsdk:"spark_python_task" tf:"optional,object"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. // // In the `new_cluster` specification, `libraries` and `spark_conf` are not // supported. Instead, use `--jars` and `--py-files` to add Java and Python @@ -3337,7 +3356,8 @@ type Task struct { // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 // paths. SparkSubmitTask []SparkSubmitTask `tfsdk:"spark_submit_task" tf:"optional,object"` - // If sql_task, indicates that this job must execute a SQL task. + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. SqlTask []SqlTask `tfsdk:"sql_task" tf:"optional,object"` // A unique name for the task. This field is used to refer to this task from // other tasks. This field is required and must be unique within its parent diff --git a/internal/service/oauth2_tf/model.go b/internal/service/oauth2_tf/model.go index e738e7f094..eacea7e75a 100755 --- a/internal/service/oauth2_tf/model.go +++ b/internal/service/oauth2_tf/model.go @@ -342,6 +342,16 @@ func (newState *ListPublishedAppIntegrationsRequest) SyncEffectiveFieldsDuringRe // List service principal secrets type ListServicePrincipalSecretsRequest struct { + // An opaque page token which was the `next_page_token` in the response of + // the previous request to list the secrets for this service principal. + // Provide this token to retrieve the next page of secret entries. When + // providing a `page_token`, all other parameters provided to the request + // must match the previous request. To list all of the secrets for a service + // principal, it is necessary to continue requesting pages of entries until + // the response contains no `next_page_token`. Note that the number of + // entries returned must not be used to determine when the listing is + // complete. + PageToken types.String `tfsdk:"-"` // The service principal ID. ServicePrincipalId types.Int64 `tfsdk:"-"` } @@ -353,6 +363,8 @@ func (newState *ListServicePrincipalSecretsRequest) SyncEffectiveFieldsDuringRea } type ListServicePrincipalSecretsResponse struct { + // A token, which can be sent as `page_token` to retrieve the next page. + NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` // List of the secrets Secrets []SecretInfo `tfsdk:"secrets" tf:"optional"` } diff --git a/internal/service/provisioning_tf/model.go b/internal/service/provisioning_tf/model.go index 17d8bbc18e..188e8f48df 100755 --- a/internal/service/provisioning_tf/model.go +++ b/internal/service/provisioning_tf/model.go @@ -277,6 +277,8 @@ type CreateWorkspaceRequest struct { GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional,object"` // The configurations for the GKE cluster of a Databricks workspace. GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional,object"` + // Whether no public IP is enabled for the workspace. + IsNoPublicIpEnabled types.Bool `tfsdk:"is_no_public_ip_enabled" tf:"optional"` // The Google Cloud region of the workspace data plane in your Google // account. For example, `us-east4`. Location types.String `tfsdk:"location" tf:"optional"` @@ -482,6 +484,21 @@ func (newState *DeleteWorkspaceRequest) SyncEffectiveFieldsDuringCreateOrUpdate( func (newState *DeleteWorkspaceRequest) SyncEffectiveFieldsDuringRead(existingState DeleteWorkspaceRequest) { } +type ExternalCustomerInfo struct { + // Email of the authoritative user. + AuthoritativeUserEmail types.String `tfsdk:"authoritative_user_email" tf:"optional"` + // The authoritative user full name. + AuthoritativeUserFullName types.String `tfsdk:"authoritative_user_full_name" tf:"optional"` + // The legal entity name for the external workspace + CustomerName types.String `tfsdk:"customer_name" tf:"optional"` +} + +func (newState *ExternalCustomerInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ExternalCustomerInfo) { +} + +func (newState *ExternalCustomerInfo) SyncEffectiveFieldsDuringRead(existingState ExternalCustomerInfo) { +} + type GcpKeyInfo struct { // The GCP KMS key's resource name KmsKeyId types.String `tfsdk:"kms_key_id" tf:""` @@ -1063,6 +1080,10 @@ type Workspace struct { // This value must be unique across all non-deleted deployments across all // AWS regions. DeploymentName types.String `tfsdk:"deployment_name" tf:"optional"` + // If this workspace is for a external customer, then external_customer_info + // is populated. If this workspace is not for a external customer, then + // external_customer_info is empty. + ExternalCustomerInfo []ExternalCustomerInfo `tfsdk:"external_customer_info" tf:"optional,object"` // The network settings for the workspace. The configurations are only for // Databricks-managed VPCs. It is ignored if you specify a customer-managed // VPC in the `network_id` field.", All the IP range configurations must be @@ -1089,6 +1110,8 @@ type Workspace struct { GcpManagedNetworkConfig []GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config" tf:"optional,object"` // The configurations for the GKE cluster of a Databricks workspace. GkeConfig []GkeConfig `tfsdk:"gke_config" tf:"optional,object"` + // Whether no public IP is enabled for the workspace. + IsNoPublicIpEnabled types.Bool `tfsdk:"is_no_public_ip_enabled" tf:"optional"` // The Google Cloud region of the workspace data plane in your Google // account (for example, `us-east4`). Location types.String `tfsdk:"location" tf:"optional"` diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index 71b73ba253..4564aeb780 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -14,6 +14,74 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) +type AibiDashboardEmbeddingAccessPolicy struct { + AccessPolicyType types.String `tfsdk:"access_policy_type" tf:""` +} + +func (newState *AibiDashboardEmbeddingAccessPolicy) SyncEffectiveFieldsDuringCreateOrUpdate(plan AibiDashboardEmbeddingAccessPolicy) { +} + +func (newState *AibiDashboardEmbeddingAccessPolicy) SyncEffectiveFieldsDuringRead(existingState AibiDashboardEmbeddingAccessPolicy) { +} + +type AibiDashboardEmbeddingAccessPolicySetting struct { + AibiDashboardEmbeddingAccessPolicy []AibiDashboardEmbeddingAccessPolicy `tfsdk:"aibi_dashboard_embedding_access_policy" tf:"object"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + +func (newState *AibiDashboardEmbeddingAccessPolicySetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan AibiDashboardEmbeddingAccessPolicySetting) { +} + +func (newState *AibiDashboardEmbeddingAccessPolicySetting) SyncEffectiveFieldsDuringRead(existingState AibiDashboardEmbeddingAccessPolicySetting) { +} + +type AibiDashboardEmbeddingApprovedDomains struct { + ApprovedDomains []types.String `tfsdk:"approved_domains" tf:"optional"` +} + +func (newState *AibiDashboardEmbeddingApprovedDomains) SyncEffectiveFieldsDuringCreateOrUpdate(plan AibiDashboardEmbeddingApprovedDomains) { +} + +func (newState *AibiDashboardEmbeddingApprovedDomains) SyncEffectiveFieldsDuringRead(existingState AibiDashboardEmbeddingApprovedDomains) { +} + +type AibiDashboardEmbeddingApprovedDomainsSetting struct { + AibiDashboardEmbeddingApprovedDomains []AibiDashboardEmbeddingApprovedDomains `tfsdk:"aibi_dashboard_embedding_approved_domains" tf:"object"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + +func (newState *AibiDashboardEmbeddingApprovedDomainsSetting) SyncEffectiveFieldsDuringCreateOrUpdate(plan AibiDashboardEmbeddingApprovedDomainsSetting) { +} + +func (newState *AibiDashboardEmbeddingApprovedDomainsSetting) SyncEffectiveFieldsDuringRead(existingState AibiDashboardEmbeddingApprovedDomainsSetting) { +} + type AutomaticClusterUpdateSetting struct { AutomaticClusterUpdateWorkspace []ClusterAutoRestartMessage `tfsdk:"automatic_cluster_update_workspace" tf:"object"` // etag used for versioning. The response is at least as fresh as the eTag @@ -962,6 +1030,42 @@ func (newState *GetAccountIpAccessListRequest) SyncEffectiveFieldsDuringCreateOr func (newState *GetAccountIpAccessListRequest) SyncEffectiveFieldsDuringRead(existingState GetAccountIpAccessListRequest) { } +// Retrieve the AI/BI dashboard embedding access policy +type GetAibiDashboardEmbeddingAccessPolicySettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +func (newState *GetAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +func (newState *GetAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringRead(existingState GetAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +// Retrieve the list of domains approved to host embedded AI/BI dashboards +type GetAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +func (newState *GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + +func (newState *GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringRead(existingState GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + // Get the automatic cluster update setting type GetAutomaticClusterUpdateSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -1494,8 +1598,7 @@ type NccAzurePrivateEndpointRule struct { // DISCONNECTED: Connection was removed by the private link resource owner, // the private endpoint becomes informative and should be deleted for // clean-up. - ConnectionState types.String `tfsdk:"connection_state" tf:"optional"` - EffectiveConnectionState types.String `tfsdk:"effective_connection_state" tf:"computed,optional"` + ConnectionState types.String `tfsdk:"connection_state" tf:"optional"` // Time in epoch milliseconds when this object was created. CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` EffectiveCreationTime types.Int64 `tfsdk:"effective_creation_time" tf:"computed,optional"` @@ -1526,8 +1629,6 @@ type NccAzurePrivateEndpointRule struct { } func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringCreateOrUpdate(plan NccAzurePrivateEndpointRule) { - newState.EffectiveConnectionState = newState.ConnectionState - newState.ConnectionState = plan.ConnectionState newState.EffectiveCreationTime = newState.CreationTime newState.CreationTime = plan.CreationTime newState.EffectiveDeactivated = newState.Deactivated @@ -1543,10 +1644,6 @@ func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringCreateOrUp } func (newState *NccAzurePrivateEndpointRule) SyncEffectiveFieldsDuringRead(existingState NccAzurePrivateEndpointRule) { - newState.EffectiveConnectionState = existingState.EffectiveConnectionState - if existingState.EffectiveConnectionState.ValueString() == newState.ConnectionState.ValueString() { - newState.ConnectionState = existingState.ConnectionState - } newState.EffectiveCreationTime = existingState.EffectiveCreationTime if existingState.EffectiveCreationTime.ValueInt64() == newState.CreationTime.ValueInt64() { newState.CreationTime = existingState.CreationTime @@ -1968,6 +2065,9 @@ type TokenInfo struct { CreationTime types.Int64 `tfsdk:"creation_time" tf:"optional"` // Timestamp when the token expires. ExpiryTime types.Int64 `tfsdk:"expiry_time" tf:"optional"` + // Approximate timestamp for the day the token was last used. Accurate up to + // 1 day. + LastUsedDay types.Int64 `tfsdk:"last_used_day" tf:"optional"` // User ID of the user that owns the token. OwnerId types.Int64 `tfsdk:"owner_id" tf:"optional"` // ID of the token. @@ -2032,6 +2132,46 @@ func (newState *TokenPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate func (newState *TokenPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState TokenPermissionsRequest) { } +// Details required to update a setting. +type UpdateAibiDashboardEmbeddingAccessPolicySettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting []AibiDashboardEmbeddingAccessPolicySetting `tfsdk:"setting" tf:"object"` +} + +func (newState *UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +func (newState *UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +// Details required to update a setting. +type UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting []AibiDashboardEmbeddingApprovedDomainsSetting `tfsdk:"setting" tf:"object"` +} + +func (newState *UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + +func (newState *UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringRead(existingState UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + // Details required to update a setting. type UpdateAutomaticClusterUpdateSettingRequest struct { // This should always be set to true for Settings API. Added for AIP