diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index e61d5779c..c4b47ca14 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -69e3a18d087fcce417b2f8d71d2f336f679ded5f \ No newline at end of file +7437dabb9dadee402c1fc060df4c1ce8cc5369f0 \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 18364a940..1a9ea08c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,53 @@ # Version changelog +## 0.43.0 + +Major Changes and Improvements: + * Support partners in user agent for SDK ([#925](https://github.com/databricks/databricks-sdk-go/pull/925)). + * Add `serverless_compute_id` field to the config ([#952](https://github.com/databricks/databricks-sdk-go/pull/952)). + +Other Changes: + + * Generate from latest spec ([#944](https://github.com/databricks/databricks-sdk-go/pull/944)) and ([#947](https://github.com/databricks/databricks-sdk-go/pull/947)). + +API Changes: + + * Changed `IsolationMode` field for [catalog.CatalogInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CatalogInfo) to [catalog.CatalogIsolationMode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CatalogIsolationMode). + * Added `IsolationMode` field for [catalog.ExternalLocationInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ExternalLocationInfo). + * Added `MaxResults` and `PageToken` fields for [catalog.ListCatalogsRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ListCatalogsRequest). + * Added `NextPageToken` field for [catalog.ListCatalogsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ListCatalogsResponse). + * Added `TableServingUrl` field for [catalog.OnlineTable](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#OnlineTable). + * Added `IsolationMode` field for [catalog.StorageCredentialInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#StorageCredentialInfo). + * Changed `IsolationMode` field for [catalog.UpdateCatalog](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#UpdateCatalog) to [catalog.CatalogIsolationMode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CatalogIsolationMode). + * Added `IsolationMode` field for [catalog.UpdateExternalLocation](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#UpdateExternalLocation). + * Added `IsolationMode` field for [catalog.UpdateStorageCredential](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#UpdateStorageCredential). + * Added [catalog.CatalogIsolationMode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CatalogIsolationMode). + * Added `CreateSchedule`, `CreateSubscription`, `DeleteSchedule`, `DeleteSubscription`, `GetSchedule`, `GetSubscription`, `List`, `ListSchedules`, `ListSubscriptions` and `UpdateSchedule` methods for [w.Lakeview](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#LakeviewAPI) workspace-level service. + * Added [dashboards.CreateScheduleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#CreateScheduleRequest), [dashboards.CreateSubscriptionRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#CreateSubscriptionRequest), [dashboards.CronSchedule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#CronSchedule), [dashboards.DashboardView](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#DashboardView), [dashboards.DeleteScheduleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#DeleteScheduleRequest), [dashboards.DeleteSubscriptionRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#DeleteSubscriptionRequest), [dashboards.GetScheduleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GetScheduleRequest), [dashboards.GetSubscriptionRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GetSubscriptionRequest), [dashboards.ListDashboardsRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#ListDashboardsRequest), [dashboards.ListDashboardsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#ListDashboardsResponse), [dashboards.ListSchedulesRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#ListSchedulesRequest), [dashboards.ListSchedulesResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#ListSchedulesResponse), [dashboards.ListSubscriptionsRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#ListSubscriptionsRequest), [dashboards.ListSubscriptionsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#ListSubscriptionsResponse), [dashboards.Schedule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#Schedule), [dashboards.SchedulePauseStatus](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#SchedulePauseStatus), [dashboards.Subscriber](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#Subscriber), [dashboards.Subscription](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#Subscription), [dashboards.SubscriptionSubscriberDestination](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#SubscriptionSubscriberDestination), [dashboards.SubscriptionSubscriberUser](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#SubscriptionSubscriberUser) and [dashboards.UpdateScheduleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#UpdateScheduleRequest) structs. + * Added `OnStreamingBacklogExceeded` field for [jobs.JobEmailNotifications](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#JobEmailNotifications). + * Added `EnvironmentKey` field for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). + * Removed `ConditionTask`, `DbtTask`, `NotebookTask`, `PipelineTask`, `PythonWheelTask`, `RunJobTask`, `SparkJarTask`, `SparkPythonTask`, `SparkSubmitTask`, `SqlTask` and `Environments` fields for [jobs.SubmitRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SubmitRun). + * Added `DbtTask` and `EnvironmentKey` field for [jobs.SubmitTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SubmitTask). + * Added `OnStreamingBacklogExceeded` field for [jobs.TaskEmailNotifications](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#TaskEmailNotifications). + * Added `Periodic` field for [jobs.TriggerSettings](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#TriggerSettings). + * Added `OnStreamingBacklogExceeded` field for [jobs.WebhookNotifications](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#WebhookNotifications). + * Added [jobs.PeriodicTriggerConfiguration](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PeriodicTriggerConfiguration). + * Added [jobs.PeriodicTriggerConfigurationTimeUnit](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PeriodicTriggerConfigurationTimeUnit). + * Added `ProviderSummary` field for [marketplace.Listing](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/marketplace#Listing). + * Added [marketplace.ProviderIconFile](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/marketplace#ProviderIconFile). + * Added [marketplace.ProviderIconType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/marketplace#ProviderIconType). + * Added [marketplace.ProviderListingSummaryInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/marketplace#ProviderListingSummaryInfo). + * Added `Start` method for [w.Apps](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AppsAPI) workspace-level service. + * Added [w.ServingEndpointsDataPlane](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsDataPlaneAPI) workspace-level service. + * Added `ServicePrincipalId` field for [serving.App](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#App). + * Added `ServicePrincipalName` field for [serving.App](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#App). + * Added [serving.StartAppRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#StartAppRequest). + * Added `QueryNextPage` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service. + * Added `QueryType` field for [vectorsearch.QueryVectorIndexRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#QueryVectorIndexRequest). + * Added `NextPageToken` field for [vectorsearch.QueryVectorIndexResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#QueryVectorIndexResponse). + * Added [vectorsearch.QueryVectorIndexNextPageRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#QueryVectorIndexNextPageRequest). + +OpenAPI SHA: 7437dabb9dadee402c1fc060df4c1ce8cc5369f0, Date: 2024-06-25 ## 0.42.0 * Ignore additional flaky test ([#930](https://github.com/databricks/databricks-sdk-go/pull/930)). diff --git a/experimental/mocks/service/dashboards/mock_lakeview_interface.go b/experimental/mocks/service/dashboards/mock_lakeview_interface.go index 48a91c8a8..69f6bf1bc 100644 --- a/experimental/mocks/service/dashboards/mock_lakeview_interface.go +++ b/experimental/mocks/service/dashboards/mock_lakeview_interface.go @@ -914,6 +914,114 @@ func (_c *MockLakeviewInterface_Impl_Call) RunAndReturn(run func() dashboards.La return _c } +// List provides a mock function with given fields: ctx, request +func (_m *MockLakeviewInterface) List(ctx context.Context, request dashboards.ListDashboardsRequest) listing.Iterator[dashboards.Dashboard] { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for List") + } + + var r0 listing.Iterator[dashboards.Dashboard] + if rf, ok := ret.Get(0).(func(context.Context, dashboards.ListDashboardsRequest) listing.Iterator[dashboards.Dashboard]); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(listing.Iterator[dashboards.Dashboard]) + } + } + + return r0 +} + +// MockLakeviewInterface_List_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'List' +type MockLakeviewInterface_List_Call struct { + *mock.Call +} + +// List is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.ListDashboardsRequest +func (_e *MockLakeviewInterface_Expecter) List(ctx interface{}, request interface{}) *MockLakeviewInterface_List_Call { + return &MockLakeviewInterface_List_Call{Call: _e.mock.On("List", ctx, request)} +} + +func (_c *MockLakeviewInterface_List_Call) Run(run func(ctx context.Context, request dashboards.ListDashboardsRequest)) *MockLakeviewInterface_List_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.ListDashboardsRequest)) + }) + return _c +} + +func (_c *MockLakeviewInterface_List_Call) Return(_a0 listing.Iterator[dashboards.Dashboard]) *MockLakeviewInterface_List_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockLakeviewInterface_List_Call) RunAndReturn(run func(context.Context, dashboards.ListDashboardsRequest) listing.Iterator[dashboards.Dashboard]) *MockLakeviewInterface_List_Call { + _c.Call.Return(run) + return _c +} + +// ListAll provides a mock function with given fields: ctx, request +func (_m *MockLakeviewInterface) ListAll(ctx context.Context, request dashboards.ListDashboardsRequest) ([]dashboards.Dashboard, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for ListAll") + } + + var r0 []dashboards.Dashboard + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.ListDashboardsRequest) ([]dashboards.Dashboard, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.ListDashboardsRequest) []dashboards.Dashboard); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]dashboards.Dashboard) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.ListDashboardsRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockLakeviewInterface_ListAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAll' +type MockLakeviewInterface_ListAll_Call struct { + *mock.Call +} + +// ListAll is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.ListDashboardsRequest +func (_e *MockLakeviewInterface_Expecter) ListAll(ctx interface{}, request interface{}) *MockLakeviewInterface_ListAll_Call { + return &MockLakeviewInterface_ListAll_Call{Call: _e.mock.On("ListAll", ctx, request)} +} + +func (_c *MockLakeviewInterface_ListAll_Call) Run(run func(ctx context.Context, request dashboards.ListDashboardsRequest)) *MockLakeviewInterface_ListAll_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.ListDashboardsRequest)) + }) + return _c +} + +func (_c *MockLakeviewInterface_ListAll_Call) Return(_a0 []dashboards.Dashboard, _a1 error) *MockLakeviewInterface_ListAll_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockLakeviewInterface_ListAll_Call) RunAndReturn(run func(context.Context, dashboards.ListDashboardsRequest) ([]dashboards.Dashboard, error)) *MockLakeviewInterface_ListAll_Call { + _c.Call.Return(run) + return _c +} + // ListSchedules provides a mock function with given fields: ctx, request func (_m *MockLakeviewInterface) ListSchedules(ctx context.Context, request dashboards.ListSchedulesRequest) listing.Iterator[dashboards.Schedule] { ret := _m.Called(ctx, request) diff --git a/experimental/mocks/service/serving/mock_apps_interface.go b/experimental/mocks/service/serving/mock_apps_interface.go index 7ebbff688..ddd2c9954 100644 --- a/experimental/mocks/service/serving/mock_apps_interface.go +++ b/experimental/mocks/service/serving/mock_apps_interface.go @@ -1065,6 +1065,65 @@ func (_c *MockAppsInterface_ListDeploymentsByAppName_Call) RunAndReturn(run func return _c } +// Start provides a mock function with given fields: ctx, request +func (_m *MockAppsInterface) Start(ctx context.Context, request serving.StartAppRequest) (*serving.AppDeployment, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 *serving.AppDeployment + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, serving.StartAppRequest) (*serving.AppDeployment, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, serving.StartAppRequest) *serving.AppDeployment); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*serving.AppDeployment) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, serving.StartAppRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAppsInterface_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type MockAppsInterface_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +// - ctx context.Context +// - request serving.StartAppRequest +func (_e *MockAppsInterface_Expecter) Start(ctx interface{}, request interface{}) *MockAppsInterface_Start_Call { + return &MockAppsInterface_Start_Call{Call: _e.mock.On("Start", ctx, request)} +} + +func (_c *MockAppsInterface_Start_Call) Run(run func(ctx context.Context, request serving.StartAppRequest)) *MockAppsInterface_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(serving.StartAppRequest)) + }) + return _c +} + +func (_c *MockAppsInterface_Start_Call) Return(_a0 *serving.AppDeployment, _a1 error) *MockAppsInterface_Start_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAppsInterface_Start_Call) RunAndReturn(run func(context.Context, serving.StartAppRequest) (*serving.AppDeployment, error)) *MockAppsInterface_Start_Call { + _c.Call.Return(run) + return _c +} + // Stop provides a mock function with given fields: ctx, request func (_m *MockAppsInterface) Stop(ctx context.Context, request serving.StopAppRequest) error { ret := _m.Called(ctx, request) diff --git a/service/catalog/model.go b/service/catalog/model.go index 3f9a6be44..6c03eddc1 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -955,9 +955,10 @@ func (f *CreateFunctionSqlDataAccess) Type() string { type CreateMetastore struct { // The user-specified name of the metastore. Name string `json:"name"` - // Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). If - // this field is omitted, the region of the workspace receiving the request - // will be used. + // Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). + // The field can be omitted in the __workspace-level__ __API__ but not in + // the __account-level__ __API__. If this field is omitted, the region of + // the workspace receiving the request will be used. Region string `json:"region,omitempty"` // The storage root URL for metastore StorageRoot string `json:"storage_root,omitempty"` @@ -2474,16 +2475,15 @@ type ListCatalogsRequest struct { // Whether to include catalogs in the response for which the principal can // only access selective metadata for IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` - // Maximum number of catalogs to return. - If not set, all valid catalogs - // are returned (not recommended). - when set to a value greater than 0, the - // page length is the minimum of this value and a server configured value; - - // when set to 0, the page length is set to a server configured value - // (recommended); - when set to a value less than 0, an invalid parameter - // error is returned; - Note: The number of returned catalogs might be less - // than the specified max_results size, even reaching zero. Reaching zero - // does not necessarily signify reaching the end. The definitive indication + // Maximum number of catalogs to return. - when set to 0, the page length is + // set to a server configured value (recommended); - when set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - when set to a value less than 0, an invalid parameter + // error is returned; - If not set, all valid catalogs are returned (not + // recommended). - Note: The number of returned catalogs might be less than + // the specified max_results size, even zero. The only definitive indication // that no further catalogs can be fetched is when the next_page_token is - // unset from response. + // unset from the response. MaxResults int `json:"-" url:"max_results,omitempty"` // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` @@ -3682,6 +3682,8 @@ type OnlineTable struct { Spec *OnlineTableSpec `json:"spec,omitempty"` // Online Table status Status *OnlineTableStatus `json:"status,omitempty"` + // Data serving REST API URL for this table + TableServingUrl string `json:"table_serving_url,omitempty"` ForceSendFields []string `json:"-"` } @@ -3939,8 +3941,6 @@ const PrivilegeSelect Privilege = `SELECT` const PrivilegeSetSharePermission Privilege = `SET_SHARE_PERMISSION` -const PrivilegeSingleUserAccess Privilege = `SINGLE_USER_ACCESS` - const PrivilegeUsage Privilege = `USAGE` const PrivilegeUseCatalog Privilege = `USE_CATALOG` @@ -3971,11 +3971,11 @@ func (f *Privilege) String() string { // Set raw string value and validate it against allowed values func (f *Privilege) Set(v string) error { switch v { - case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `SINGLE_USER_ACCESS`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: *f = Privilege(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "SINGLE_USER_ACCESS", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) } } diff --git a/service/compute/model.go b/service/compute/model.go index 3f4fba20a..7a7f04464 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -2355,9 +2355,9 @@ type EditPolicyResponse struct { type EditResponse struct { } -// The a environment entity used to preserve serverless environment side panel -// and jobs' environment for non-notebook task. In this minimal environment -// spec, only pip dependencies are supported. Next ID: 5 +// The environment entity used to preserve serverless environment side panel and +// jobs' environment for non-notebook task. In this minimal environment spec, +// only pip dependencies are supported. type Environment struct { // Client version used by the environment The client is the user-facing // environment of the runtime. Each client comes with a specific set of diff --git a/service/dashboards/api.go b/service/dashboards/api.go index 3e23829a1..96e0e82b7 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -76,6 +76,16 @@ type LakeviewInterface interface { // Get schedule subscription. GetSubscriptionByDashboardIdAndScheduleIdAndSubscriptionId(ctx context.Context, dashboardId string, scheduleId string, subscriptionId string) (*Subscription, error) + // List dashboards. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] + + // List dashboards. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) + // List dashboard schedules. // // This method is generated by Databricks SDK Code Generator. @@ -275,6 +285,41 @@ func (a *LakeviewAPI) GetSubscriptionByDashboardIdAndScheduleIdAndSubscriptionId }) } +// List dashboards. +// +// This method is generated by Databricks SDK Code Generator. +func (a *LakeviewAPI) List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] { + + getNextPage := func(ctx context.Context, req ListDashboardsRequest) (*ListDashboardsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.impl.List(ctx, req) + } + getItems := func(resp *ListDashboardsResponse) []Dashboard { + return resp.Dashboards + } + getNextReq := func(resp *ListDashboardsResponse) *ListDashboardsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List dashboards. +// +// This method is generated by Databricks SDK Code Generator. +func (a *LakeviewAPI) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[Dashboard](ctx, iterator) +} + // List dashboard schedules. // // This method is generated by Databricks SDK Code Generator. diff --git a/service/dashboards/impl.go b/service/dashboards/impl.go index ea15b310d..46a756120 100755 --- a/service/dashboards/impl.go +++ b/service/dashboards/impl.go @@ -99,6 +99,15 @@ func (a *lakeviewImpl) GetSubscription(ctx context.Context, request GetSubscript return &subscription, err } +func (a *lakeviewImpl) List(ctx context.Context, request ListDashboardsRequest) (*ListDashboardsResponse, error) { + var listDashboardsResponse ListDashboardsResponse + path := "/api/2.0/lakeview/dashboards" + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listDashboardsResponse) + return &listDashboardsResponse, err +} + func (a *lakeviewImpl) ListSchedules(ctx context.Context, request ListSchedulesRequest) (*ListSchedulesResponse, error) { var listSchedulesResponse ListSchedulesResponse path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/schedules", request.DashboardId) diff --git a/service/dashboards/interface.go b/service/dashboards/interface.go index e8e491caa..94edde5ef 100755 --- a/service/dashboards/interface.go +++ b/service/dashboards/interface.go @@ -44,6 +44,11 @@ type LakeviewService interface { // Get schedule subscription. GetSubscription(ctx context.Context, request GetSubscriptionRequest) (*Subscription, error) + // List dashboards. + // + // Use ListAll() to get all Dashboard instances, which will iterate over every result page. + List(ctx context.Context, request ListDashboardsRequest) (*ListDashboardsResponse, error) + // List dashboard schedules. // // Use ListSchedulesAll() to get all Schedule instances, which will iterate over every result page. diff --git a/service/dashboards/model.go b/service/dashboards/model.go index 3b8d07f52..ad8537099 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -110,6 +110,33 @@ func (s Dashboard) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type DashboardView string + +const DashboardViewDashboardViewBasic DashboardView = `DASHBOARD_VIEW_BASIC` + +const DashboardViewDashboardViewFull DashboardView = `DASHBOARD_VIEW_FULL` + +// String representation for [fmt.Print] +func (f *DashboardView) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DashboardView) Set(v string) error { + switch v { + case `DASHBOARD_VIEW_BASIC`, `DASHBOARD_VIEW_FULL`: + *f = DashboardView(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DASHBOARD_VIEW_BASIC", "DASHBOARD_VIEW_FULL"`, v) + } +} + +// Type always returns DashboardView to satisfy [pflag.Value] interface +func (f *DashboardView) Type() string { + return "DashboardView" +} + // Delete dashboard schedule type DeleteScheduleRequest struct { // UUID identifying the dashboard to which the schedule belongs. @@ -217,6 +244,49 @@ func (f *LifecycleState) Type() string { return "LifecycleState" } +// List dashboards +type ListDashboardsRequest struct { + // The number of dashboards to return per page. + PageSize int `json:"-" url:"page_size,omitempty"` + // A page token, received from a previous `ListDashboards` call. This token + // can be used to retrieve the subsequent page. + PageToken string `json:"-" url:"page_token,omitempty"` + // The flag to include dashboards located in the trash. If unspecified, only + // active dashboards will be returned. + ShowTrashed bool `json:"-" url:"show_trashed,omitempty"` + // Indicates whether to include all metadata from the dashboard in the + // response. If unset, the response defaults to `DASHBOARD_VIEW_BASIC` which + // only includes summary metadata from the dashboard. + View DashboardView `json:"-" url:"view,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListDashboardsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListDashboardsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListDashboardsResponse struct { + Dashboards []Dashboard `json:"dashboards,omitempty"` + // A token, which can be sent as `page_token` to retrieve the next page. If + // this field is omitted, there are no subsequent dashboards. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListDashboardsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListDashboardsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // List dashboard schedules type ListSchedulesRequest struct { // UUID identifying the dashboard to which the schedule belongs. diff --git a/service/iam/model.go b/service/iam/model.go index 55e4ccf6b..52d3de756 100755 --- a/service/iam/model.go +++ b/service/iam/model.go @@ -1231,7 +1231,7 @@ type ServicePrincipal struct { Groups []ComplexValue `json:"groups,omitempty"` // Databricks service principal ID. - Id string `json:"id,omitempty"` + Id string `json:"id,omitempty" url:"-"` // Corresponds to AWS instance profile/arn role. Roles []ComplexValue `json:"roles,omitempty"` // The schema of the List response. @@ -1317,7 +1317,7 @@ type User struct { Groups []ComplexValue `json:"groups,omitempty"` // Databricks user ID. This is automatically set by Databricks. Any value // provided by the client will be ignored. - Id string `json:"id,omitempty" url:"-"` + Id string `json:"id,omitempty"` Name *Name `json:"name,omitempty"` // Corresponds to AWS instance profile/arn role. diff --git a/service/jobs/model.go b/service/jobs/model.go index 7a17a360d..bec612f48 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -1053,6 +1053,14 @@ type JobEmailNotifications struct { // specified on job creation, reset, or update, the list is empty, and // notifications are not sent. OnStart []string `json:"on_start,omitempty"` + // A list of email addresses to notify when any streaming backlog thresholds + // are exceeded for any stream. Streaming backlog thresholds can be set in + // the `health` field using the following metrics: + // `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, + // `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. Alerting is + // based on the 10-minute average of these metrics. If the issue persists, + // notifications are resent every 30 minutes. + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` // A list of email addresses to be notified when a run successfully // completes. A run is considered to have completed successfully if it ends // with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If @@ -1074,9 +1082,9 @@ func (s JobEmailNotifications) MarshalJSON() ([]byte, error) { type JobEnvironment struct { // The key of an environment. It has to be unique within a job. EnvironmentKey string `json:"environment_key"` - // The a environment entity used to preserve serverless environment side - // panel and jobs' environment for non-notebook task. In this minimal - // environment spec, only pip dependencies are supported. Next ID: 5 + // The environment entity used to preserve serverless environment side panel + // and jobs' environment for non-notebook task. In this minimal environment + // spec, only pip dependencies are supported. Spec *compute.Environment `json:"spec,omitempty"` } @@ -1407,10 +1415,37 @@ func (f *JobSourceDirtyState) Type() string { // Specifies the health metric that is being evaluated for a particular health // rule. +// +// * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * +// `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting +// to be consumed across all streams. This metric is in Private Preview. * +// `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all +// streams. This metric is in Private Preview. * `STREAMING_BACKLOG_SECONDS`: An +// estimate of the maximum consumer delay across all streams. This metric is in +// Private Preview. * `STREAMING_BACKLOG_FILES`: An estimate of the maximum +// number of outstanding files across all streams. This metric is in Private +// Preview. type JobsHealthMetric string +// Expected total time for a run in seconds. const JobsHealthMetricRunDurationSeconds JobsHealthMetric = `RUN_DURATION_SECONDS` +// An estimate of the maximum bytes of data waiting to be consumed across all +// streams. This metric is in Private Preview. +const JobsHealthMetricStreamingBacklogBytes JobsHealthMetric = `STREAMING_BACKLOG_BYTES` + +// An estimate of the maximum number of outstanding files across all streams. +// This metric is in Private Preview. +const JobsHealthMetricStreamingBacklogFiles JobsHealthMetric = `STREAMING_BACKLOG_FILES` + +// An estimate of the maximum offset lag across all streams. This metric is in +// Private Preview. +const JobsHealthMetricStreamingBacklogRecords JobsHealthMetric = `STREAMING_BACKLOG_RECORDS` + +// An estimate of the maximum consumer delay across all streams. This metric is +// in Private Preview. +const JobsHealthMetricStreamingBacklogSeconds JobsHealthMetric = `STREAMING_BACKLOG_SECONDS` + // String representation for [fmt.Print] func (f *JobsHealthMetric) String() string { return string(*f) @@ -1419,11 +1454,11 @@ func (f *JobsHealthMetric) String() string { // Set raw string value and validate it against allowed values func (f *JobsHealthMetric) Set(v string) error { switch v { - case `RUN_DURATION_SECONDS`: + case `RUN_DURATION_SECONDS`, `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_FILES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`: *f = JobsHealthMetric(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "RUN_DURATION_SECONDS"`, v) + return fmt.Errorf(`value "%s" is not one of "RUN_DURATION_SECONDS", "STREAMING_BACKLOG_BYTES", "STREAMING_BACKLOG_FILES", "STREAMING_BACKLOG_RECORDS", "STREAMING_BACKLOG_SECONDS"`, v) } } @@ -1462,6 +1497,16 @@ func (f *JobsHealthOperator) Type() string { type JobsHealthRule struct { // Specifies the health metric that is being evaluated for a particular // health rule. + // + // * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * + // `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data + // waiting to be consumed across all streams. This metric is in Private + // Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset + // lag across all streams. This metric is in Private Preview. * + // `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay + // across all streams. This metric is in Private Preview. * + // `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of + // outstanding files across all streams. This metric is in Private Preview. Metric JobsHealthMetric `json:"metric"` // Specifies the operator used to compare the health metric value with the // specified threshold. @@ -1705,8 +1750,9 @@ func (f *PauseStatus) Type() string { } type PeriodicTriggerConfiguration struct { + // The interval at which the trigger should run. Interval int `json:"interval"` - + // The unit of time for the interval. Unit PeriodicTriggerConfigurationTimeUnit `json:"unit"` } @@ -2872,6 +2918,10 @@ type RunTask struct { // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. EndTime int64 `json:"end_time,omitempty"` + // The key that references an environment spec in a job. This field is + // required for Python script, Python wheel and dbt tasks when using + // serverless compute. + EnvironmentKey string `json:"environment_key,omitempty"` // The time in milliseconds it took to execute the commands in the JAR or // notebook until they completed, failed, timed out, were cancelled, or // encountered an unexpected error. The duration of a task run is the sum of @@ -3447,17 +3497,12 @@ func (s SqlTaskSubscription) MarshalJSON() ([]byte, error) { type SubmitRun struct { // List of permissions to set on the job. AccessControlList []iam.AccessControlRequest `json:"access_control_list,omitempty"` - // If condition_task, specifies a condition with an outcome that can be used - // to control the execution of other tasks. Does not require a cluster to - // execute and does not support retries or notifications. - ConditionTask *ConditionTask `json:"condition_task,omitempty"` - // If dbt_task, indicates that this must execute a dbt task. It requires - // both Databricks SQL and the ability to use a serverless or a pro SQL - // warehouse. - DbtTask *DbtTask `json:"dbt_task,omitempty"` // An optional set of email addresses notified when the run begins or // completes. EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` + // A list of task execution environment specifications that can be + // referenced by tasks of this run. + Environments []JobEnvironment `json:"environments,omitempty"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by // notebook, dbt, Python script, and SQL File tasks. @@ -3487,50 +3532,17 @@ type SubmitRun struct { // // [How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html IdempotencyToken string `json:"idempotency_token,omitempty"` - // If notebook_task, indicates that this task must run a notebook. This - // field may not be specified in conjunction with spark_jar_task. - NotebookTask *NotebookTask `json:"notebook_task,omitempty"` // Optional notification settings that are used when sending notifications // to each of the `email_notifications` and `webhook_notifications` for this // run. NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` - // If pipeline_task, indicates that this task must execute a Pipeline. - PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` - // If python_wheel_task, indicates that this job must execute a PythonWheel. - PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` // The queue settings of the one-time run. Queue *QueueSettings `json:"queue,omitempty"` // Specifies the user or service principal that the job runs as. If not // specified, the job runs as the user who submits the request. RunAs *JobRunAs `json:"run_as,omitempty"` - // If run_job_task, indicates that this task must execute another job. - RunJobTask *RunJobTask `json:"run_job_task,omitempty"` // An optional name for the run. The default value is `Untitled`. RunName string `json:"run_name,omitempty"` - // If spark_jar_task, indicates that this task must run a JAR. - SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` - // If spark_python_task, indicates that this task must run a Python file. - SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` - // If `spark_submit_task`, indicates that this task must be launched by the - // spark submit script. This task can run only on new clusters. - // - // In the `new_cluster` specification, `libraries` and `spark_conf` are not - // supported. Instead, use `--jars` and `--py-files` to add Java and Python - // libraries and `--conf` to set the Spark configurations. - // - // `master`, `deploy-mode`, and `executor-cores` are automatically - // configured by Databricks; you _cannot_ specify them in parameters. - // - // By default, the Spark submit job uses all available memory (excluding - // reserved memory for Databricks services). You can set `--driver-memory`, - // and `--executor-memory` to a smaller value to leave some room for - // off-heap usage. - // - // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 - // paths. - SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` - // If sql_task, indicates that this job must execute a SQL task. - SqlTask *SqlTask `json:"sql_task,omitempty"` Tasks []SubmitTask `json:"tasks,omitempty"` // An optional timeout applied to each run of this job. A value of `0` means @@ -3572,6 +3584,10 @@ type SubmitTask struct { // to control the execution of other tasks. Does not require a cluster to // execute and does not support retries or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` + // If dbt_task, indicates that this must execute a dbt task. It requires + // both Databricks SQL and the ability to use a serverless or a pro SQL + // warehouse. + DbtTask *DbtTask `json:"dbt_task,omitempty"` // An optional array of objects specifying the dependency graph of the task. // All tasks specified in this field must complete successfully before // executing this task. The key is `task_key`, and the value is the name @@ -3582,6 +3598,10 @@ type SubmitTask struct { // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` + // The key that references an environment spec in a job. This field is + // required for Python script, Python wheel and dbt tasks when using + // serverless compute. + EnvironmentKey string `json:"environment_key,omitempty"` // If existing_cluster_id, the ID of an existing cluster that is used for // all runs. When running jobs or tasks on an existing cluster, you may need // to manually restart the cluster if it stops responding. We suggest @@ -3856,6 +3876,14 @@ type TaskEmailNotifications struct { // specified on job creation, reset, or update, the list is empty, and // notifications are not sent. OnStart []string `json:"on_start,omitempty"` + // A list of email addresses to notify when any streaming backlog thresholds + // are exceeded for any stream. Streaming backlog thresholds can be set in + // the `health` field using the following metrics: + // `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, + // `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. Alerting is + // based on the 10-minute average of these metrics. If the issue persists, + // notifications are resent every 30 minutes. + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` // A list of email addresses to be notified when a run successfully // completes. A run is considered to have completed successfully if it ends // with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If @@ -3918,7 +3946,7 @@ type TriggerSettings struct { FileArrival *FileArrivalTriggerConfiguration `json:"file_arrival,omitempty"` // Whether this trigger is paused or not. PauseStatus PauseStatus `json:"pause_status,omitempty"` - + // Periodic trigger settings. Periodic *PeriodicTriggerConfiguration `json:"periodic,omitempty"` // Old table trigger settings name. Deprecated in favor of `table_update`. Table *TableUpdateTriggerConfiguration `json:"table,omitempty"` @@ -4104,6 +4132,15 @@ type WebhookNotifications struct { // An optional list of system notification IDs to call when the run starts. // A maximum of 3 destinations can be specified for the `on_start` property. OnStart []Webhook `json:"on_start,omitempty"` + // An optional list of system notification IDs to call when any streaming + // backlog thresholds are exceeded for any stream. Streaming backlog + // thresholds can be set in the `health` field using the following metrics: + // `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, + // `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. Alerting is + // based on the 10-minute average of these metrics. If the issue persists, + // notifications are resent every 30 minutes. A maximum of 3 destinations + // can be specified for the `on_streaming_backlog_exceeded` property. + OnStreamingBacklogExceeded []Webhook `json:"on_streaming_backlog_exceeded,omitempty"` // An optional list of system notification IDs to call when the run // completes successfully. A maximum of 3 destinations can be specified for // the `on_success` property. diff --git a/service/marketplace/model.go b/service/marketplace/model.go index efc78aa57..968515d9d 100755 --- a/service/marketplace/model.go +++ b/service/marketplace/model.go @@ -1378,6 +1378,9 @@ type Listing struct { Detail *ListingDetail `json:"detail,omitempty"` Id string `json:"id,omitempty"` + // we can not use just ProviderListingSummary since we already have same + // name on entity side of the state + ProviderSummary *ProviderListingSummaryInfo `json:"provider_summary,omitempty"` // Next Number: 26 Summary ListingSummary `json:"summary"` @@ -1756,6 +1759,53 @@ type ProviderAnalyticsDashboard struct { Id string `json:"id"` } +type ProviderIconFile struct { + IconFileId string `json:"icon_file_id,omitempty"` + + IconFilePath string `json:"icon_file_path,omitempty"` + + IconType ProviderIconType `json:"icon_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ProviderIconFile) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ProviderIconFile) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ProviderIconType string + +const ProviderIconTypeDark ProviderIconType = `DARK` + +const ProviderIconTypePrimary ProviderIconType = `PRIMARY` + +const ProviderIconTypeProviderIconTypeUnspecified ProviderIconType = `PROVIDER_ICON_TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *ProviderIconType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ProviderIconType) Set(v string) error { + switch v { + case `DARK`, `PRIMARY`, `PROVIDER_ICON_TYPE_UNSPECIFIED`: + *f = ProviderIconType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DARK", "PRIMARY", "PROVIDER_ICON_TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns ProviderIconType to satisfy [pflag.Value] interface +func (f *ProviderIconType) Type() string { + return "ProviderIconType" +} + type ProviderInfo struct { BusinessContactEmail string `json:"business_contact_email"` @@ -1796,6 +1846,26 @@ func (s ProviderInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// we can not use just ProviderListingSummary since we already have same name on +// entity side of the state +type ProviderListingSummaryInfo struct { + Description string `json:"description,omitempty"` + + IconFiles []ProviderIconFile `json:"icon_files,omitempty"` + + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ProviderListingSummaryInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ProviderListingSummaryInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type RegionInfo struct { Cloud string `json:"cloud,omitempty"` diff --git a/service/serving/api.go b/service/serving/api.go index 64e30bc0d..06b12f339 100755 --- a/service/serving/api.go +++ b/service/serving/api.go @@ -133,6 +133,11 @@ type AppsInterface interface { // Lists all app deployments for the app with the supplied name. ListDeploymentsByAppName(ctx context.Context, appName string) (*ListAppDeploymentsResponse, error) + // Start an app. + // + // Start the last active deployment of the app in the workspace. + Start(ctx context.Context, request StartAppRequest) (*AppDeployment, error) + // Stop an app. // // Stops the active deployment of the app in the workspace. @@ -538,6 +543,13 @@ func (a *AppsAPI) ListDeploymentsByAppName(ctx context.Context, appName string) }) } +// Start an app. +// +// Start the last active deployment of the app in the workspace. +func (a *AppsAPI) Start(ctx context.Context, request StartAppRequest) (*AppDeployment, error) { + return a.impl.Start(ctx, request) +} + // Stop an app. // // Stops the active deployment of the app in the workspace. diff --git a/service/serving/impl.go b/service/serving/impl.go index c267d2002..bfef540f9 100755 --- a/service/serving/impl.go +++ b/service/serving/impl.go @@ -89,6 +89,16 @@ func (a *appsImpl) ListDeployments(ctx context.Context, request ListAppDeploymen return &listAppDeploymentsResponse, err } +func (a *appsImpl) Start(ctx context.Context, request StartAppRequest) (*AppDeployment, error) { + var appDeployment AppDeployment + path := fmt.Sprintf("/api/2.0/preview/apps/%v/start", request.Name) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, request, &appDeployment) + return &appDeployment, err +} + func (a *appsImpl) Stop(ctx context.Context, request StopAppRequest) error { var stopAppResponse StopAppResponse path := fmt.Sprintf("/api/2.0/preview/apps/%v/stop", request.Name) diff --git a/service/serving/interface.go b/service/serving/interface.go index 61350c543..0c4766af3 100755 --- a/service/serving/interface.go +++ b/service/serving/interface.go @@ -56,6 +56,11 @@ type AppsService interface { // Use ListDeploymentsAll() to get all AppDeployment instances, which will iterate over every result page. ListDeployments(ctx context.Context, request ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error) + // Start an app. + // + // Start the last active deployment of the app in the workspace. + Start(ctx context.Context, request StartAppRequest) (*AppDeployment, error) + // Stop an app. // // Stops the active deployment of the app in the workspace. diff --git a/service/serving/model.go b/service/serving/model.go index 472d7a34d..44b9bb9f5 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -298,16 +298,15 @@ func (s AppStatus) MarshalJSON() ([]byte, error) { type AutoCaptureConfigInput struct { // The name of the catalog in Unity Catalog. NOTE: On update, you cannot - // change the catalog name if it was already set. + // change the catalog name if the inference table is already enabled. CatalogName string `json:"catalog_name,omitempty"` - // If inference tables are enabled or not. NOTE: If you have already - // disabled payload logging once, you cannot enable again. + // Indicates whether the inference table is enabled. Enabled bool `json:"enabled,omitempty"` // The name of the schema in Unity Catalog. NOTE: On update, you cannot - // change the schema name if it was already set. + // change the schema name if the inference table is already enabled. SchemaName string `json:"schema_name,omitempty"` // The prefix of the table in Unity Catalog. NOTE: On update, you cannot - // change the prefix name if it was already set. + // change the prefix name if the inference table is already enabled. TableNamePrefix string `json:"table_name_prefix,omitempty"` ForceSendFields []string `json:"-"` @@ -324,7 +323,7 @@ func (s AutoCaptureConfigInput) MarshalJSON() ([]byte, error) { type AutoCaptureConfigOutput struct { // The name of the catalog in Unity Catalog. CatalogName string `json:"catalog_name,omitempty"` - // If inference tables are enabled or not. + // Indicates whether the inference table is enabled. Enabled bool `json:"enabled,omitempty"` // The name of the schema in Unity Catalog. SchemaName string `json:"schema_name,omitempty"` @@ -2013,6 +2012,11 @@ type ServingEndpointPermissionsRequest struct { ServingEndpointId string `json:"-" url:"-"` } +type StartAppRequest struct { + // The name of the app. + Name string `json:"-" url:"-"` +} + type StopAppRequest struct { // The name of the app. Name string `json:"-" url:"-"` diff --git a/service/settings/model.go b/service/settings/model.go index 7c096b930..418e66ab5 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -247,6 +247,8 @@ type ComplianceStandard string const ComplianceStandardComplianceStandardUnspecified ComplianceStandard = `COMPLIANCE_STANDARD_UNSPECIFIED` +const ComplianceStandardCyberEssentialPlus ComplianceStandard = `CYBER_ESSENTIAL_PLUS` + const ComplianceStandardFedrampHigh ComplianceStandard = `FEDRAMP_HIGH` const ComplianceStandardFedrampIl5 ComplianceStandard = `FEDRAMP_IL5` @@ -271,11 +273,11 @@ func (f *ComplianceStandard) String() string { // Set raw string value and validate it against allowed values func (f *ComplianceStandard) Set(v string) error { switch v { - case `COMPLIANCE_STANDARD_UNSPECIFIED`, `FEDRAMP_HIGH`, `FEDRAMP_IL5`, `FEDRAMP_MODERATE`, `HIPAA`, `IRAP_PROTECTED`, `ITAR_EAR`, `NONE`, `PCI_DSS`: + case `COMPLIANCE_STANDARD_UNSPECIFIED`, `CYBER_ESSENTIAL_PLUS`, `FEDRAMP_HIGH`, `FEDRAMP_IL5`, `FEDRAMP_MODERATE`, `HIPAA`, `IRAP_PROTECTED`, `ITAR_EAR`, `NONE`, `PCI_DSS`: *f = ComplianceStandard(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "COMPLIANCE_STANDARD_UNSPECIFIED", "FEDRAMP_HIGH", "FEDRAMP_IL5", "FEDRAMP_MODERATE", "HIPAA", "IRAP_PROTECTED", "ITAR_EAR", "NONE", "PCI_DSS"`, v) + return fmt.Errorf(`value "%s" is not one of "COMPLIANCE_STANDARD_UNSPECIFIED", "CYBER_ESSENTIAL_PLUS", "FEDRAMP_HIGH", "FEDRAMP_IL5", "FEDRAMP_MODERATE", "HIPAA", "IRAP_PROTECTED", "ITAR_EAR", "NONE", "PCI_DSS"`, v) } } diff --git a/service/sharing/model.go b/service/sharing/model.go index 617fefb13..f6385d15d 100755 --- a/service/sharing/model.go +++ b/service/sharing/model.go @@ -749,8 +749,6 @@ const PrivilegeSelect Privilege = `SELECT` const PrivilegeSetSharePermission Privilege = `SET_SHARE_PERMISSION` -const PrivilegeSingleUserAccess Privilege = `SINGLE_USER_ACCESS` - const PrivilegeUsage Privilege = `USAGE` const PrivilegeUseCatalog Privilege = `USE_CATALOG` @@ -781,11 +779,11 @@ func (f *Privilege) String() string { // Set raw string value and validate it against allowed values func (f *Privilege) Set(v string) error { switch v { - case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `SINGLE_USER_ACCESS`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: *f = Privilege(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "SINGLE_USER_ACCESS", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) } } diff --git a/service/sql/api.go b/service/sql/api.go index 09a8f0547..bd18788f6 100755 --- a/service/sql/api.go +++ b/service/sql/api.go @@ -29,35 +29,65 @@ type AlertsInterface interface { // Creates an alert. An alert is a Databricks SQL object that periodically runs // a query, evaluates a condition of its result, and notifies users or // notification destinations if the condition was met. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Create(ctx context.Context, request CreateAlert) (*Alert, error) // Delete an alert. // // Deletes an alert. Deleted alerts are no longer accessible and cannot be - // restored. **Note:** Unlike queries and dashboards, alerts cannot be moved to + // restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to // the trash. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Delete(ctx context.Context, request DeleteAlertRequest) error // Delete an alert. // // Deletes an alert. Deleted alerts are no longer accessible and cannot be - // restored. **Note:** Unlike queries and dashboards, alerts cannot be moved to + // restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to // the trash. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources DeleteByAlertId(ctx context.Context, alertId string) error // Get an alert. // // Gets an alert. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Get(ctx context.Context, request GetAlertRequest) (*Alert, error) // Get an alert. // // Gets an alert. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources GetByAlertId(ctx context.Context, alertId string) (*Alert, error) // Get alerts. // // Gets a list of alerts. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources List(ctx context.Context) ([]Alert, error) // AlertNameToIdMap calls [AlertsAPI.List] and creates a map of results with [Alert].Name as key and [Alert].Id as value. @@ -81,6 +111,11 @@ type AlertsInterface interface { // Update an alert. // // Updates an alert. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Update(ctx context.Context, request EditAlert) error } @@ -97,6 +132,11 @@ func NewAlerts(client *client.DatabricksClient) *AlertsAPI { // of its result, and notifies one or more users and/or notification // destinations if the condition was met. Alerts can be scheduled using the // `sql_task` type of the Jobs API, e.g. :method:jobs/create. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources type AlertsAPI struct { // impl contains low-level REST API interface, that could be overridden // through WithImpl(AlertsService) @@ -122,6 +162,11 @@ func (a *AlertsAPI) Impl() AlertsService { // Creates an alert. An alert is a Databricks SQL object that periodically runs // a query, evaluates a condition of its result, and notifies users or // notification destinations if the condition was met. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *AlertsAPI) Create(ctx context.Context, request CreateAlert) (*Alert, error) { return a.impl.Create(ctx, request) } @@ -129,8 +174,13 @@ func (a *AlertsAPI) Create(ctx context.Context, request CreateAlert) (*Alert, er // Delete an alert. // // Deletes an alert. Deleted alerts are no longer accessible and cannot be -// restored. **Note:** Unlike queries and dashboards, alerts cannot be moved to +// restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to // the trash. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *AlertsAPI) Delete(ctx context.Context, request DeleteAlertRequest) error { return a.impl.Delete(ctx, request) } @@ -138,8 +188,13 @@ func (a *AlertsAPI) Delete(ctx context.Context, request DeleteAlertRequest) erro // Delete an alert. // // Deletes an alert. Deleted alerts are no longer accessible and cannot be -// restored. **Note:** Unlike queries and dashboards, alerts cannot be moved to +// restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to // the trash. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *AlertsAPI) DeleteByAlertId(ctx context.Context, alertId string) error { return a.impl.Delete(ctx, DeleteAlertRequest{ AlertId: alertId, @@ -149,6 +204,11 @@ func (a *AlertsAPI) DeleteByAlertId(ctx context.Context, alertId string) error { // Get an alert. // // Gets an alert. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *AlertsAPI) Get(ctx context.Context, request GetAlertRequest) (*Alert, error) { return a.impl.Get(ctx, request) } @@ -156,6 +216,11 @@ func (a *AlertsAPI) Get(ctx context.Context, request GetAlertRequest) (*Alert, e // Get an alert. // // Gets an alert. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *AlertsAPI) GetByAlertId(ctx context.Context, alertId string) (*Alert, error) { return a.impl.Get(ctx, GetAlertRequest{ AlertId: alertId, @@ -165,6 +230,11 @@ func (a *AlertsAPI) GetByAlertId(ctx context.Context, alertId string) (*Alert, e // Get alerts. // // Gets a list of alerts. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *AlertsAPI) List(ctx context.Context) ([]Alert, error) { return a.impl.List(ctx) } @@ -225,6 +295,11 @@ func (a *AlertsAPI) GetByName(ctx context.Context, name string) (*Alert, error) // Update an alert. // // Updates an alert. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *AlertsAPI) Update(ctx context.Context, request EditAlert) error { return a.impl.Update(ctx, request) } @@ -346,8 +421,8 @@ type DashboardsInterface interface { // // Fetch a paginated list of dashboard objects. // - // ### **Warning: Calling this API concurrently 10 or more times could result in - // throttling, service degradation, or a temporary ban.** + // **Warning**: Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. // // This method is generated by Databricks SDK Code Generator. List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] @@ -356,8 +431,8 @@ type DashboardsInterface interface { // // Fetch a paginated list of dashboard objects. // - // ### **Warning: Calling this API concurrently 10 or more times could result in - // throttling, service degradation, or a temporary ban.** + // **Warning**: Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. // // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) @@ -473,8 +548,8 @@ func (a *DashboardsAPI) GetByDashboardId(ctx context.Context, dashboardId string // // Fetch a paginated list of dashboard objects. // -// ### **Warning: Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban.** +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. // // This method is generated by Databricks SDK Code Generator. func (a *DashboardsAPI) List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] { @@ -512,8 +587,8 @@ func (a *DashboardsAPI) List(ctx context.Context, request ListDashboardsRequest) // // Fetch a paginated list of dashboard objects. // -// ### **Warning: Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban.** +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. // // This method is generated by Databricks SDK Code Generator. func (a *DashboardsAPI) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) { @@ -607,6 +682,11 @@ type DataSourcesInterface interface { // Retrieves a full list of SQL warehouses available in this workspace. All // fields that appear in this API response are enumerated for clarity. However, // you need only a SQL warehouse's `id` to create new queries against it. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources List(ctx context.Context) ([]DataSource, error) // DataSourceNameToIdMap calls [DataSourcesAPI.List] and creates a map of results with [DataSource].Name as key and [DataSource].Id as value. @@ -646,6 +726,11 @@ func NewDataSources(client *client.DatabricksClient) *DataSourcesAPI { // warehouses in your workspace. We advise you to use any text editor, REST // client, or `grep` to search the response from this API for the name of your // SQL warehouse as it appears in Databricks SQL. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources type DataSourcesAPI struct { // impl contains low-level REST API interface, that could be overridden // through WithImpl(DataSourcesService) @@ -671,6 +756,11 @@ func (a *DataSourcesAPI) Impl() DataSourcesService { // Retrieves a full list of SQL warehouses available in this workspace. All // fields that appear in this API response are enumerated for clarity. However, // you need only a SQL warehouse's `id` to create new queries against it. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *DataSourcesAPI) List(ctx context.Context) ([]DataSource, error) { return a.impl.List(ctx) } @@ -742,24 +832,44 @@ type DbsqlPermissionsInterface interface { // // Gets a JSON representation of the access control list (ACL) for a specified // object. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error) // Get object ACL. // // Gets a JSON representation of the access control list (ACL) for a specified // object. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources GetByObjectTypeAndObjectId(ctx context.Context, objectType ObjectTypePlural, objectId string) (*GetResponse, error) // Set object ACL. // // Sets the access control list (ACL) for a specified object. This operation // will complete rewrite the ACL. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Set(ctx context.Context, request SetRequest) (*SetResponse, error) // Transfer object ownership. // // Transfers ownership of a dashboard, query, or alert to an active user. // Requires an admin API key. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error) } @@ -784,6 +894,11 @@ func NewDbsqlPermissions(client *client.DatabricksClient) *DbsqlPermissionsAPI { // // - `CAN_MANAGE`: Allows all actions: read, run, edit, delete, modify // permissions (superset of `CAN_RUN`) +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources type DbsqlPermissionsAPI struct { // impl contains low-level REST API interface, that could be overridden // through WithImpl(DbsqlPermissionsService) @@ -808,6 +923,11 @@ func (a *DbsqlPermissionsAPI) Impl() DbsqlPermissionsService { // // Gets a JSON representation of the access control list (ACL) for a specified // object. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *DbsqlPermissionsAPI) Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error) { return a.impl.Get(ctx, request) } @@ -816,6 +936,11 @@ func (a *DbsqlPermissionsAPI) Get(ctx context.Context, request GetDbsqlPermissio // // Gets a JSON representation of the access control list (ACL) for a specified // object. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *DbsqlPermissionsAPI) GetByObjectTypeAndObjectId(ctx context.Context, objectType ObjectTypePlural, objectId string) (*GetResponse, error) { return a.impl.Get(ctx, GetDbsqlPermissionRequest{ ObjectType: objectType, @@ -827,6 +952,11 @@ func (a *DbsqlPermissionsAPI) GetByObjectTypeAndObjectId(ctx context.Context, ob // // Sets the access control list (ACL) for a specified object. This operation // will complete rewrite the ACL. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *DbsqlPermissionsAPI) Set(ctx context.Context, request SetRequest) (*SetResponse, error) { return a.impl.Set(ctx, request) } @@ -835,6 +965,11 @@ func (a *DbsqlPermissionsAPI) Set(ctx context.Context, request SetRequest) (*Set // // Transfers ownership of a dashboard, query, or alert to an active user. // Requires an admin API key. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *DbsqlPermissionsAPI) TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error) { return a.impl.TransferOwnership(ctx, request) } @@ -860,6 +995,11 @@ type QueriesInterface interface { // existing query. // // **Note**: You cannot add a visualization until you create the query. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Create(ctx context.Context, request QueryPostContent) (*Query, error) // Delete a query. @@ -867,6 +1007,11 @@ type QueriesInterface interface { // Moves a query to the trash. Trashed queries immediately disappear from // searches and list views, and they cannot be used for alerts. The trash is // deleted after 30 days. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Delete(ctx context.Context, request DeleteQueryRequest) error // Delete a query. @@ -874,18 +1019,33 @@ type QueriesInterface interface { // Moves a query to the trash. Trashed queries immediately disappear from // searches and list views, and they cannot be used for alerts. The trash is // deleted after 30 days. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources DeleteByQueryId(ctx context.Context, queryId string) error // Get a query definition. // // Retrieve a query object definition along with contextual permissions // information about the currently authenticated user. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Get(ctx context.Context, request GetQueryRequest) (*Query, error) // Get a query definition. // // Retrieve a query object definition along with contextual permissions // information about the currently authenticated user. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources GetByQueryId(ctx context.Context, queryId string) (*Query, error) // Get a list of queries. @@ -893,8 +1053,13 @@ type QueriesInterface interface { // Gets a list of queries. Optionally, this list can be filtered by a search // term. // - // ### **Warning: Calling this API concurrently 10 or more times could result in - // throttling, service degradation, or a temporary ban.** + // **Warning**: Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources // // This method is generated by Databricks SDK Code Generator. List(ctx context.Context, request ListQueriesRequest) listing.Iterator[Query] @@ -904,8 +1069,13 @@ type QueriesInterface interface { // Gets a list of queries. Optionally, this list can be filtered by a search // term. // - // ### **Warning: Calling this API concurrently 10 or more times could result in - // throttling, service degradation, or a temporary ban.** + // **Warning**: Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources // // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListQueriesRequest) ([]Query, error) @@ -932,6 +1102,11 @@ type QueriesInterface interface { // // Restore a query that has been moved to the trash. A restored query appears in // list views and searches. You can use restored queries for alerts. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Restore(ctx context.Context, request RestoreQueryRequest) error // Change a query definition. @@ -939,6 +1114,11 @@ type QueriesInterface interface { // Modify this query definition. // // **Note**: You cannot undo this operation. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Update(ctx context.Context, request QueryEditContent) (*Query, error) } @@ -954,6 +1134,11 @@ func NewQueries(client *client.DatabricksClient) *QueriesAPI { // definitions include the target SQL warehouse, query text, name, description, // tags, parameters, and visualizations. Queries can be scheduled using the // `sql_task` type of the Jobs API, e.g. :method:jobs/create. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources type QueriesAPI struct { // impl contains low-level REST API interface, that could be overridden // through WithImpl(QueriesService) @@ -985,6 +1170,11 @@ func (a *QueriesAPI) Impl() QueriesService { // existing query. // // **Note**: You cannot add a visualization until you create the query. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *QueriesAPI) Create(ctx context.Context, request QueryPostContent) (*Query, error) { return a.impl.Create(ctx, request) } @@ -994,6 +1184,11 @@ func (a *QueriesAPI) Create(ctx context.Context, request QueryPostContent) (*Que // Moves a query to the trash. Trashed queries immediately disappear from // searches and list views, and they cannot be used for alerts. The trash is // deleted after 30 days. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *QueriesAPI) Delete(ctx context.Context, request DeleteQueryRequest) error { return a.impl.Delete(ctx, request) } @@ -1003,6 +1198,11 @@ func (a *QueriesAPI) Delete(ctx context.Context, request DeleteQueryRequest) err // Moves a query to the trash. Trashed queries immediately disappear from // searches and list views, and they cannot be used for alerts. The trash is // deleted after 30 days. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *QueriesAPI) DeleteByQueryId(ctx context.Context, queryId string) error { return a.impl.Delete(ctx, DeleteQueryRequest{ QueryId: queryId, @@ -1013,6 +1213,11 @@ func (a *QueriesAPI) DeleteByQueryId(ctx context.Context, queryId string) error // // Retrieve a query object definition along with contextual permissions // information about the currently authenticated user. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *QueriesAPI) Get(ctx context.Context, request GetQueryRequest) (*Query, error) { return a.impl.Get(ctx, request) } @@ -1021,6 +1226,11 @@ func (a *QueriesAPI) Get(ctx context.Context, request GetQueryRequest) (*Query, // // Retrieve a query object definition along with contextual permissions // information about the currently authenticated user. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *QueriesAPI) GetByQueryId(ctx context.Context, queryId string) (*Query, error) { return a.impl.Get(ctx, GetQueryRequest{ QueryId: queryId, @@ -1032,10 +1242,15 @@ func (a *QueriesAPI) GetByQueryId(ctx context.Context, queryId string) (*Query, // Gets a list of queries. Optionally, this list can be filtered by a search // term. // -// ### **Warning: Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban.** +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] // // This method is generated by Databricks SDK Code Generator. +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *QueriesAPI) List(ctx context.Context, request ListQueriesRequest) listing.Iterator[Query] { request.Page = 1 // start iterating from the first page @@ -1072,10 +1287,15 @@ func (a *QueriesAPI) List(ctx context.Context, request ListQueriesRequest) listi // Gets a list of queries. Optionally, this list can be filtered by a search // term. // -// ### **Warning: Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban.** +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] // // This method is generated by Databricks SDK Code Generator. +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *QueriesAPI) ListAll(ctx context.Context, request ListQueriesRequest) ([]Query, error) { iterator := a.List(ctx, request) return listing.ToSliceN[Query, int](ctx, iterator, request.PageSize) @@ -1139,6 +1359,11 @@ func (a *QueriesAPI) GetByName(ctx context.Context, name string) (*Query, error) // // Restore a query that has been moved to the trash. A restored query appears in // list views and searches. You can use restored queries for alerts. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *QueriesAPI) Restore(ctx context.Context, request RestoreQueryRequest) error { return a.impl.Restore(ctx, request) } @@ -1148,6 +1373,11 @@ func (a *QueriesAPI) Restore(ctx context.Context, request RestoreQueryRequest) e // Modify this query definition. // // **Note**: You cannot undo this operation. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources func (a *QueriesAPI) Update(ctx context.Context, request QueryEditContent) (*Query, error) { return a.impl.Update(ctx, request) } diff --git a/service/sql/interface.go b/service/sql/interface.go index d7df019aa..be39220e3 100755 --- a/service/sql/interface.go +++ b/service/sql/interface.go @@ -11,6 +11,11 @@ import ( // of its result, and notifies one or more users and/or notification // destinations if the condition was met. Alerts can be scheduled using the // `sql_task` type of the Jobs API, e.g. :method:jobs/create. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources type AlertsService interface { // Create an alert. @@ -18,28 +23,53 @@ type AlertsService interface { // Creates an alert. An alert is a Databricks SQL object that periodically // runs a query, evaluates a condition of its result, and notifies users or // notification destinations if the condition was met. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Create(ctx context.Context, request CreateAlert) (*Alert, error) // Delete an alert. // // Deletes an alert. Deleted alerts are no longer accessible and cannot be - // restored. **Note:** Unlike queries and dashboards, alerts cannot be moved + // restored. **Note**: Unlike queries and dashboards, alerts cannot be moved // to the trash. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Delete(ctx context.Context, request DeleteAlertRequest) error // Get an alert. // // Gets an alert. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Get(ctx context.Context, request GetAlertRequest) (*Alert, error) // Get alerts. // // Gets a list of alerts. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources List(ctx context.Context) ([]Alert, error) // Update an alert. // // Updates an alert. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Update(ctx context.Context, request EditAlert) error } @@ -85,8 +115,8 @@ type DashboardsService interface { // // Fetch a paginated list of dashboard objects. // - // ### **Warning: Calling this API concurrently 10 or more times could - // result in throttling, service degradation, or a temporary ban.** + // **Warning**: Calling this API concurrently 10 or more times could result + // in throttling, service degradation, or a temporary ban. // // Use ListAll() to get all Dashboard instances, which will iterate over every result page. List(ctx context.Context, request ListDashboardsRequest) (*ListResponse, error) @@ -116,6 +146,11 @@ type DashboardsService interface { // warehouses in your workspace. We advise you to use any text editor, REST // client, or `grep` to search the response from this API for the name of your // SQL warehouse as it appears in Databricks SQL. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources type DataSourcesService interface { // Get a list of SQL warehouses. @@ -124,6 +159,11 @@ type DataSourcesService interface { // fields that appear in this API response are enumerated for clarity. // However, you need only a SQL warehouse's `id` to create new queries // against it. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources List(ctx context.Context) ([]DataSource, error) } @@ -140,24 +180,44 @@ type DataSourcesService interface { // // - `CAN_MANAGE`: Allows all actions: read, run, edit, delete, modify // permissions (superset of `CAN_RUN`) +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources type DbsqlPermissionsService interface { // Get object ACL. // // Gets a JSON representation of the access control list (ACL) for a // specified object. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error) // Set object ACL. // // Sets the access control list (ACL) for a specified object. This operation // will complete rewrite the ACL. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Set(ctx context.Context, request SetRequest) (*SetResponse, error) // Transfer object ownership. // // Transfers ownership of a dashboard, query, or alert to an active user. // Requires an admin API key. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error) } @@ -165,6 +225,11 @@ type DbsqlPermissionsService interface { // definitions include the target SQL warehouse, query text, name, description, // tags, parameters, and visualizations. Queries can be scheduled using the // `sql_task` type of the Jobs API, e.g. :method:jobs/create. +// +// **Note**: A new version of the Databricks SQL API will soon be available. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources type QueriesService interface { // Create a new query definition. @@ -178,6 +243,11 @@ type QueriesService interface { // from an existing query. // // **Note**: You cannot add a visualization until you create the query. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Create(ctx context.Context, request QueryPostContent) (*Query, error) // Delete a query. @@ -185,12 +255,22 @@ type QueriesService interface { // Moves a query to the trash. Trashed queries immediately disappear from // searches and list views, and they cannot be used for alerts. The trash is // deleted after 30 days. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Delete(ctx context.Context, request DeleteQueryRequest) error // Get a query definition. // // Retrieve a query object definition along with contextual permissions // information about the currently authenticated user. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Get(ctx context.Context, request GetQueryRequest) (*Query, error) // Get a list of queries. @@ -198,8 +278,13 @@ type QueriesService interface { // Gets a list of queries. Optionally, this list can be filtered by a search // term. // - // ### **Warning: Calling this API concurrently 10 or more times could - // result in throttling, service degradation, or a temporary ban.** + // **Warning**: Calling this API concurrently 10 or more times could result + // in throttling, service degradation, or a temporary ban. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources // // Use ListAll() to get all Query instances, which will iterate over every result page. List(ctx context.Context, request ListQueriesRequest) (*QueryList, error) @@ -209,6 +294,11 @@ type QueriesService interface { // Restore a query that has been moved to the trash. A restored query // appears in list views and searches. You can use restored queries for // alerts. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Restore(ctx context.Context, request RestoreQueryRequest) error // Change a query definition. @@ -216,6 +306,11 @@ type QueriesService interface { // Modify this query definition. // // **Note**: You cannot undo this operation. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Update(ctx context.Context, request QueryEditContent) (*Query, error) } diff --git a/service/sql/model.go b/service/sql/model.go index 6904aef74..1a21e941d 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -138,7 +138,7 @@ type AlertQuery struct { // The timestamp when this query was created. CreatedAt string `json:"created_at,omitempty"` // Data source ID maps to the ID of the data source used by the resource and - // is distinct from the warehouse ID. [Learn more]. + // is distinct from the warehouse ID. [Learn more] // // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list DataSourceId string `json:"data_source_id,omitempty"` @@ -287,7 +287,6 @@ func (s ChannelInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Name of the channel type ChannelName string const ChannelNameChannelNameCurrent ChannelName = `CHANNEL_NAME_CURRENT` @@ -744,7 +743,7 @@ func (s DashboardPostContent) MarshalJSON() ([]byte, error) { // A JSON object representing a DBSQL data source / SQL warehouse. type DataSource struct { // Data source ID maps to the ID of the data source used by the resource and - // is distinct from the warehouse ID. [Learn more]. + // is distinct from the warehouse ID. [Learn more] // // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list Id string `json:"id,omitempty"` @@ -2225,7 +2224,7 @@ type Query struct { // The timestamp when this query was created. CreatedAt string `json:"created_at,omitempty"` // Data source ID maps to the ID of the data source used by the resource and - // is distinct from the warehouse ID. [Learn more]. + // is distinct from the warehouse ID. [Learn more] // // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list DataSourceId string `json:"data_source_id,omitempty"` @@ -2301,7 +2300,7 @@ func (s Query) MarshalJSON() ([]byte, error) { type QueryEditContent struct { // Data source ID maps to the ID of the data source used by the resource and - // is distinct from the warehouse ID. [Learn more]. + // is distinct from the warehouse ID. [Learn more] // // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list DataSourceId string `json:"data_source_id,omitempty"` @@ -2535,7 +2534,7 @@ func (s QueryOptions) MarshalJSON() ([]byte, error) { type QueryPostContent struct { // Data source ID maps to the ID of the data source used by the resource and - // is distinct from the warehouse ID. [Learn more]. + // is distinct from the warehouse ID. [Learn more] // // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list DataSourceId string `json:"data_source_id,omitempty"` diff --git a/service/vectorsearch/model.go b/service/vectorsearch/model.go index 28300fe85..d1267893e 100755 --- a/service/vectorsearch/model.go +++ b/service/vectorsearch/model.go @@ -603,8 +603,8 @@ type QueryVectorIndexResponse struct { // Metadata about the result set. Manifest *ResultManifest `json:"manifest,omitempty"` // [Optional] Token that can be used in `QueryVectorIndexNextPage` API to - // get next page of results. If more than 100 results satisfy the query, - // they are returned in groups of 100. Empty value means no more results. + // get next page of results. If more than 1000 results satisfy the query, + // they are returned in groups of 1000. Empty value means no more results. NextPageToken string `json:"next_page_token,omitempty"` // Data returned in the query result. Result *ResultData `json:"result,omitempty"` diff --git a/version/version.go b/version/version.go index b0a5ef53a..76b6831f0 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.42.0" +const Version = "0.43.0" diff --git a/workspace_client.go b/workspace_client.go index 9d176242b..79beef7c5 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -44,6 +44,11 @@ type WorkspaceClient struct { // notification destinations if the condition was met. Alerts can be // scheduled using the `sql_task` type of the Jobs API, e.g. // :method:jobs/create. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Alerts sql.AlertsInterface // Apps run directly on a customer’s Databricks instance, integrate with @@ -196,6 +201,11 @@ type WorkspaceClient struct { // warehouses in your workspace. We advise you to use any text editor, REST // client, or `grep` to search the response from this API for the name of // your SQL warehouse as it appears in Databricks SQL. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources DataSources sql.DataSourcesInterface // DBFS API makes it simple to interact with various data sources without @@ -215,6 +225,11 @@ type WorkspaceClient struct { // // - `CAN_MANAGE`: Allows all actions: read, run, edit, delete, modify // permissions (superset of `CAN_RUN`) + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources DbsqlPermissions sql.DbsqlPermissionsInterface // Experiments are the primary unit of organization in MLflow; all MLflow @@ -581,6 +596,11 @@ type WorkspaceClient struct { // description, tags, parameters, and visualizations. Queries can be // scheduled using the `sql_task` type of the Jobs API, e.g. // :method:jobs/create. + // + // **Note**: A new version of the Databricks SQL API will soon be available. + // [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources Queries sql.QueriesInterface // Access the history of queries through SQL warehouses.