From 2f56ab84318f28ddc284038887ee7546e1a199a3 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Wed, 24 Apr 2024 09:51:16 +0200 Subject: [PATCH] Update SDK to OpenAPI spec (#899) ## Changes There are some backwards incompatible changes around cluster library structs -- they needed to be fixed in library utils. Done those in this PR also. ## Tests - [ ] `make test` passing - [ ] `make fmt` applied - [ ] relevant integration tests applied --- .codegen/_openapi_sha | 2 +- .../compute/mock_libraries_interface.go | 56 ++--- .../mock_serving_endpoints_interface.go | 94 +++++++++ service/catalog/model.go | 114 ++-------- service/compute/api.go | 196 ++++++------------ service/compute/impl.go | 8 +- service/compute/interface.go | 51 ++--- service/compute/library_utilities.go | 14 +- service/compute/model.go | 111 +++++++--- service/iam/model.go | 4 +- service/jobs/model.go | 23 +- service/pipelines/model.go | 141 +++++++++++++ service/serving/api.go | 34 +++ service/serving/impl.go | 9 + service/serving/interface.go | 7 + service/serving/model.go | 12 ++ service/sharing/model.go | 8 +- service/sql/model.go | 1 + workspace_client.go | 8 +- 19 files changed, 551 insertions(+), 342 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 0aa4b1028..1f11c17bf 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -94684175b8bd65f8701f89729351f8069e8309c9 \ No newline at end of file +21f9f1482f9d0d15228da59f2cd9f0863d2a6d55 \ No newline at end of file diff --git a/experimental/mocks/service/compute/mock_libraries_interface.go b/experimental/mocks/service/compute/mock_libraries_interface.go index d49421deb..040355032 100644 --- a/experimental/mocks/service/compute/mock_libraries_interface.go +++ b/experimental/mocks/service/compute/mock_libraries_interface.go @@ -86,7 +86,7 @@ func (_c *MockLibrariesInterface_AllClusterStatuses_Call) RunAndReturn(run func( } // ClusterStatus provides a mock function with given fields: ctx, request -func (_m *MockLibrariesInterface) ClusterStatus(ctx context.Context, request compute.ClusterStatusRequest) listing.Iterator[compute.LibraryFullStatus] { +func (_m *MockLibrariesInterface) ClusterStatus(ctx context.Context, request compute.ClusterStatus) listing.Iterator[compute.LibraryFullStatus] { ret := _m.Called(ctx, request) if len(ret) == 0 { @@ -94,7 +94,7 @@ func (_m *MockLibrariesInterface) ClusterStatus(ctx context.Context, request com } var r0 listing.Iterator[compute.LibraryFullStatus] - if rf, ok := ret.Get(0).(func(context.Context, compute.ClusterStatusRequest) listing.Iterator[compute.LibraryFullStatus]); ok { + if rf, ok := ret.Get(0).(func(context.Context, compute.ClusterStatus) listing.Iterator[compute.LibraryFullStatus]); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { @@ -112,14 +112,14 @@ type MockLibrariesInterface_ClusterStatus_Call struct { // ClusterStatus is a helper method to define mock.On call // - ctx context.Context -// - request compute.ClusterStatusRequest +// - request compute.ClusterStatus func (_e *MockLibrariesInterface_Expecter) ClusterStatus(ctx interface{}, request interface{}) *MockLibrariesInterface_ClusterStatus_Call { return &MockLibrariesInterface_ClusterStatus_Call{Call: _e.mock.On("ClusterStatus", ctx, request)} } -func (_c *MockLibrariesInterface_ClusterStatus_Call) Run(run func(ctx context.Context, request compute.ClusterStatusRequest)) *MockLibrariesInterface_ClusterStatus_Call { +func (_c *MockLibrariesInterface_ClusterStatus_Call) Run(run func(ctx context.Context, request compute.ClusterStatus)) *MockLibrariesInterface_ClusterStatus_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(compute.ClusterStatusRequest)) + run(args[0].(context.Context), args[1].(compute.ClusterStatus)) }) return _c } @@ -129,13 +129,13 @@ func (_c *MockLibrariesInterface_ClusterStatus_Call) Return(_a0 listing.Iterator return _c } -func (_c *MockLibrariesInterface_ClusterStatus_Call) RunAndReturn(run func(context.Context, compute.ClusterStatusRequest) listing.Iterator[compute.LibraryFullStatus]) *MockLibrariesInterface_ClusterStatus_Call { +func (_c *MockLibrariesInterface_ClusterStatus_Call) RunAndReturn(run func(context.Context, compute.ClusterStatus) listing.Iterator[compute.LibraryFullStatus]) *MockLibrariesInterface_ClusterStatus_Call { _c.Call.Return(run) return _c } // ClusterStatusAll provides a mock function with given fields: ctx, request -func (_m *MockLibrariesInterface) ClusterStatusAll(ctx context.Context, request compute.ClusterStatusRequest) ([]compute.LibraryFullStatus, error) { +func (_m *MockLibrariesInterface) ClusterStatusAll(ctx context.Context, request compute.ClusterStatus) ([]compute.LibraryFullStatus, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { @@ -144,10 +144,10 @@ func (_m *MockLibrariesInterface) ClusterStatusAll(ctx context.Context, request var r0 []compute.LibraryFullStatus var r1 error - if rf, ok := ret.Get(0).(func(context.Context, compute.ClusterStatusRequest) ([]compute.LibraryFullStatus, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, compute.ClusterStatus) ([]compute.LibraryFullStatus, error)); ok { return rf(ctx, request) } - if rf, ok := ret.Get(0).(func(context.Context, compute.ClusterStatusRequest) []compute.LibraryFullStatus); ok { + if rf, ok := ret.Get(0).(func(context.Context, compute.ClusterStatus) []compute.LibraryFullStatus); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { @@ -155,7 +155,7 @@ func (_m *MockLibrariesInterface) ClusterStatusAll(ctx context.Context, request } } - if rf, ok := ret.Get(1).(func(context.Context, compute.ClusterStatusRequest) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, compute.ClusterStatus) error); ok { r1 = rf(ctx, request) } else { r1 = ret.Error(1) @@ -171,14 +171,14 @@ type MockLibrariesInterface_ClusterStatusAll_Call struct { // ClusterStatusAll is a helper method to define mock.On call // - ctx context.Context -// - request compute.ClusterStatusRequest +// - request compute.ClusterStatus func (_e *MockLibrariesInterface_Expecter) ClusterStatusAll(ctx interface{}, request interface{}) *MockLibrariesInterface_ClusterStatusAll_Call { return &MockLibrariesInterface_ClusterStatusAll_Call{Call: _e.mock.On("ClusterStatusAll", ctx, request)} } -func (_c *MockLibrariesInterface_ClusterStatusAll_Call) Run(run func(ctx context.Context, request compute.ClusterStatusRequest)) *MockLibrariesInterface_ClusterStatusAll_Call { +func (_c *MockLibrariesInterface_ClusterStatusAll_Call) Run(run func(ctx context.Context, request compute.ClusterStatus)) *MockLibrariesInterface_ClusterStatusAll_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(compute.ClusterStatusRequest)) + run(args[0].(context.Context), args[1].(compute.ClusterStatus)) }) return _c } @@ -188,29 +188,29 @@ func (_c *MockLibrariesInterface_ClusterStatusAll_Call) Return(_a0 []compute.Lib return _c } -func (_c *MockLibrariesInterface_ClusterStatusAll_Call) RunAndReturn(run func(context.Context, compute.ClusterStatusRequest) ([]compute.LibraryFullStatus, error)) *MockLibrariesInterface_ClusterStatusAll_Call { +func (_c *MockLibrariesInterface_ClusterStatusAll_Call) RunAndReturn(run func(context.Context, compute.ClusterStatus) ([]compute.LibraryFullStatus, error)) *MockLibrariesInterface_ClusterStatusAll_Call { _c.Call.Return(run) return _c } // ClusterStatusByClusterId provides a mock function with given fields: ctx, clusterId -func (_m *MockLibrariesInterface) ClusterStatusByClusterId(ctx context.Context, clusterId string) (*compute.ClusterLibraryStatuses, error) { +func (_m *MockLibrariesInterface) ClusterStatusByClusterId(ctx context.Context, clusterId string) (*compute.ClusterStatusResponse, error) { ret := _m.Called(ctx, clusterId) if len(ret) == 0 { panic("no return value specified for ClusterStatusByClusterId") } - var r0 *compute.ClusterLibraryStatuses + var r0 *compute.ClusterStatusResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) (*compute.ClusterLibraryStatuses, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) (*compute.ClusterStatusResponse, error)); ok { return rf(ctx, clusterId) } - if rf, ok := ret.Get(0).(func(context.Context, string) *compute.ClusterLibraryStatuses); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) *compute.ClusterStatusResponse); ok { r0 = rf(ctx, clusterId) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*compute.ClusterLibraryStatuses) + r0 = ret.Get(0).(*compute.ClusterStatusResponse) } } @@ -242,12 +242,12 @@ func (_c *MockLibrariesInterface_ClusterStatusByClusterId_Call) Run(run func(ctx return _c } -func (_c *MockLibrariesInterface_ClusterStatusByClusterId_Call) Return(_a0 *compute.ClusterLibraryStatuses, _a1 error) *MockLibrariesInterface_ClusterStatusByClusterId_Call { +func (_c *MockLibrariesInterface_ClusterStatusByClusterId_Call) Return(_a0 *compute.ClusterStatusResponse, _a1 error) *MockLibrariesInterface_ClusterStatusByClusterId_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockLibrariesInterface_ClusterStatusByClusterId_Call) RunAndReturn(run func(context.Context, string) (*compute.ClusterLibraryStatuses, error)) *MockLibrariesInterface_ClusterStatusByClusterId_Call { +func (_c *MockLibrariesInterface_ClusterStatusByClusterId_Call) RunAndReturn(run func(context.Context, string) (*compute.ClusterStatusResponse, error)) *MockLibrariesInterface_ClusterStatusByClusterId_Call { _c.Call.Return(run) return _c } @@ -394,7 +394,7 @@ func (_c *MockLibrariesInterface_Uninstall_Call) RunAndReturn(run func(context.C } // UpdateAndWait provides a mock function with given fields: ctx, update, options -func (_m *MockLibrariesInterface) UpdateAndWait(ctx context.Context, update compute.Update, options ...retries.Option[compute.ClusterLibraryStatuses]) error { +func (_m *MockLibrariesInterface) UpdateAndWait(ctx context.Context, update compute.Update, options ...retries.Option[compute.ClusterStatusResponse]) error { _va := make([]interface{}, len(options)) for _i := range options { _va[_i] = options[_i] @@ -409,7 +409,7 @@ func (_m *MockLibrariesInterface) UpdateAndWait(ctx context.Context, update comp } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, compute.Update, ...retries.Option[compute.ClusterLibraryStatuses]) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, compute.Update, ...retries.Option[compute.ClusterStatusResponse]) error); ok { r0 = rf(ctx, update, options...) } else { r0 = ret.Error(0) @@ -426,18 +426,18 @@ type MockLibrariesInterface_UpdateAndWait_Call struct { // UpdateAndWait is a helper method to define mock.On call // - ctx context.Context // - update compute.Update -// - options ...retries.Option[compute.ClusterLibraryStatuses] +// - options ...retries.Option[compute.ClusterStatusResponse] func (_e *MockLibrariesInterface_Expecter) UpdateAndWait(ctx interface{}, update interface{}, options ...interface{}) *MockLibrariesInterface_UpdateAndWait_Call { return &MockLibrariesInterface_UpdateAndWait_Call{Call: _e.mock.On("UpdateAndWait", append([]interface{}{ctx, update}, options...)...)} } -func (_c *MockLibrariesInterface_UpdateAndWait_Call) Run(run func(ctx context.Context, update compute.Update, options ...retries.Option[compute.ClusterLibraryStatuses])) *MockLibrariesInterface_UpdateAndWait_Call { +func (_c *MockLibrariesInterface_UpdateAndWait_Call) Run(run func(ctx context.Context, update compute.Update, options ...retries.Option[compute.ClusterStatusResponse])) *MockLibrariesInterface_UpdateAndWait_Call { _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]retries.Option[compute.ClusterLibraryStatuses], len(args)-2) + variadicArgs := make([]retries.Option[compute.ClusterStatusResponse], len(args)-2) for i, a := range args[2:] { if a != nil { - variadicArgs[i] = a.(retries.Option[compute.ClusterLibraryStatuses]) + variadicArgs[i] = a.(retries.Option[compute.ClusterStatusResponse]) } } run(args[0].(context.Context), args[1].(compute.Update), variadicArgs...) @@ -450,7 +450,7 @@ func (_c *MockLibrariesInterface_UpdateAndWait_Call) Return(_a0 error) *MockLibr return _c } -func (_c *MockLibrariesInterface_UpdateAndWait_Call) RunAndReturn(run func(context.Context, compute.Update, ...retries.Option[compute.ClusterLibraryStatuses]) error) *MockLibrariesInterface_UpdateAndWait_Call { +func (_c *MockLibrariesInterface_UpdateAndWait_Call) RunAndReturn(run func(context.Context, compute.Update, ...retries.Option[compute.ClusterStatusResponse]) error) *MockLibrariesInterface_UpdateAndWait_Call { _c.Call.Return(run) return _c } diff --git a/experimental/mocks/service/serving/mock_serving_endpoints_interface.go b/experimental/mocks/service/serving/mock_serving_endpoints_interface.go index 491d3af29..e2bdb3d06 100644 --- a/experimental/mocks/service/serving/mock_serving_endpoints_interface.go +++ b/experimental/mocks/service/serving/mock_serving_endpoints_interface.go @@ -586,6 +586,100 @@ func (_c *MockServingEndpointsInterface_GetByName_Call) RunAndReturn(run func(co return _c } +// GetOpenApi provides a mock function with given fields: ctx, request +func (_m *MockServingEndpointsInterface) GetOpenApi(ctx context.Context, request serving.GetOpenApiRequest) error { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetOpenApi") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, serving.GetOpenApiRequest) error); ok { + r0 = rf(ctx, request) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockServingEndpointsInterface_GetOpenApi_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOpenApi' +type MockServingEndpointsInterface_GetOpenApi_Call struct { + *mock.Call +} + +// GetOpenApi is a helper method to define mock.On call +// - ctx context.Context +// - request serving.GetOpenApiRequest +func (_e *MockServingEndpointsInterface_Expecter) GetOpenApi(ctx interface{}, request interface{}) *MockServingEndpointsInterface_GetOpenApi_Call { + return &MockServingEndpointsInterface_GetOpenApi_Call{Call: _e.mock.On("GetOpenApi", ctx, request)} +} + +func (_c *MockServingEndpointsInterface_GetOpenApi_Call) Run(run func(ctx context.Context, request serving.GetOpenApiRequest)) *MockServingEndpointsInterface_GetOpenApi_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(serving.GetOpenApiRequest)) + }) + return _c +} + +func (_c *MockServingEndpointsInterface_GetOpenApi_Call) Return(_a0 error) *MockServingEndpointsInterface_GetOpenApi_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockServingEndpointsInterface_GetOpenApi_Call) RunAndReturn(run func(context.Context, serving.GetOpenApiRequest) error) *MockServingEndpointsInterface_GetOpenApi_Call { + _c.Call.Return(run) + return _c +} + +// GetOpenApiByName provides a mock function with given fields: ctx, name +func (_m *MockServingEndpointsInterface) GetOpenApiByName(ctx context.Context, name string) error { + ret := _m.Called(ctx, name) + + if len(ret) == 0 { + panic("no return value specified for GetOpenApiByName") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, name) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockServingEndpointsInterface_GetOpenApiByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOpenApiByName' +type MockServingEndpointsInterface_GetOpenApiByName_Call struct { + *mock.Call +} + +// GetOpenApiByName is a helper method to define mock.On call +// - ctx context.Context +// - name string +func (_e *MockServingEndpointsInterface_Expecter) GetOpenApiByName(ctx interface{}, name interface{}) *MockServingEndpointsInterface_GetOpenApiByName_Call { + return &MockServingEndpointsInterface_GetOpenApiByName_Call{Call: _e.mock.On("GetOpenApiByName", ctx, name)} +} + +func (_c *MockServingEndpointsInterface_GetOpenApiByName_Call) Run(run func(ctx context.Context, name string)) *MockServingEndpointsInterface_GetOpenApiByName_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockServingEndpointsInterface_GetOpenApiByName_Call) Return(_a0 error) *MockServingEndpointsInterface_GetOpenApiByName_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockServingEndpointsInterface_GetOpenApiByName_Call) RunAndReturn(run func(context.Context, string) error) *MockServingEndpointsInterface_GetOpenApiByName_Call { + _c.Call.Return(run) + return _c +} + // GetPermissionLevels provides a mock function with given fields: ctx, request func (_m *MockServingEndpointsInterface) GetPermissionLevels(ctx context.Context, request serving.GetServingEndpointPermissionLevelsRequest) (*serving.GetServingEndpointPermissionLevelsResponse, error) { ret := _m.Called(ctx, request) diff --git a/service/catalog/model.go b/service/catalog/model.go index 0a886bffa..a9347203d 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -3807,6 +3807,8 @@ type PrimaryKeyConstraint struct { type Privilege string +const PrivilegeAccess Privilege = `ACCESS` + const PrivilegeAllPrivileges Privilege = `ALL_PRIVILEGES` const PrivilegeApplyTag Privilege = `APPLY_TAG` @@ -3839,6 +3841,8 @@ const PrivilegeCreateRecipient Privilege = `CREATE_RECIPIENT` const PrivilegeCreateSchema Privilege = `CREATE_SCHEMA` +const PrivilegeCreateServiceCredential Privilege = `CREATE_SERVICE_CREDENTIAL` + const PrivilegeCreateShare Privilege = `CREATE_SHARE` const PrivilegeCreateStorageCredential Privilege = `CREATE_STORAGE_CREDENTIAL` @@ -3897,11 +3901,11 @@ func (f *Privilege) String() string { // Set raw string value and validate it against allowed values func (f *Privilege) Set(v string) error { switch v { - case `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: *f = Privilege(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) } } @@ -4095,6 +4099,8 @@ type SchemaInfo struct { Owner string `json:"owner,omitempty"` // A map of key-value properties attached to the securable. Properties map[string]string `json:"properties,omitempty"` + // The unique identifier of the schema. + SchemaId string `json:"schema_id,omitempty"` // Storage location for managed tables within schema. StorageLocation string `json:"storage_location,omitempty"` // Storage root URL for managed tables within schema. @@ -4427,7 +4433,7 @@ type TableInfo struct { // List of table constraints. Note: this field is not set in the output of // the __listTables__ API. TableConstraints []TableConstraint `json:"table_constraints,omitempty"` - // Name of table, relative to parent schema. + // The unique identifier of the table. TableId string `json:"table_id,omitempty"` TableType TableType `json:"table_type,omitempty"` @@ -5004,14 +5010,10 @@ func (s ValidateStorageCredentialResponse) MarshalJSON() ([]byte, error) { } type ValidationResult struct { - // The operation tested. - AwsOperation ValidationResultAwsOperation `json:"aws_operation,omitempty"` - // The operation tested. - AzureOperation ValidationResultAzureOperation `json:"azure_operation,omitempty"` - // The operation tested. - GcpOperation ValidationResultGcpOperation `json:"gcp_operation,omitempty"` // Error message would exist when the result does not equal to **PASS**. Message string `json:"message,omitempty"` + // The operation tested. + Operation ValidationResultOperation `json:"operation,omitempty"` // The results of the tested operation. Result ValidationResultResult `json:"result,omitempty"` @@ -5027,107 +5029,37 @@ func (s ValidationResult) MarshalJSON() ([]byte, error) { } // The operation tested. -type ValidationResultAwsOperation string - -const ValidationResultAwsOperationDelete ValidationResultAwsOperation = `DELETE` - -const ValidationResultAwsOperationList ValidationResultAwsOperation = `LIST` - -const ValidationResultAwsOperationPathExists ValidationResultAwsOperation = `PATH_EXISTS` - -const ValidationResultAwsOperationRead ValidationResultAwsOperation = `READ` - -const ValidationResultAwsOperationWrite ValidationResultAwsOperation = `WRITE` - -// String representation for [fmt.Print] -func (f *ValidationResultAwsOperation) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *ValidationResultAwsOperation) Set(v string) error { - switch v { - case `DELETE`, `LIST`, `PATH_EXISTS`, `READ`, `WRITE`: - *f = ValidationResultAwsOperation(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "DELETE", "LIST", "PATH_EXISTS", "READ", "WRITE"`, v) - } -} - -// Type always returns ValidationResultAwsOperation to satisfy [pflag.Value] interface -func (f *ValidationResultAwsOperation) Type() string { - return "ValidationResultAwsOperation" -} - -// The operation tested. -type ValidationResultAzureOperation string - -const ValidationResultAzureOperationDelete ValidationResultAzureOperation = `DELETE` - -const ValidationResultAzureOperationHierarchicalNamespaceEnabled ValidationResultAzureOperation = `HIERARCHICAL_NAMESPACE_ENABLED` - -const ValidationResultAzureOperationList ValidationResultAzureOperation = `LIST` - -const ValidationResultAzureOperationPathExists ValidationResultAzureOperation = `PATH_EXISTS` - -const ValidationResultAzureOperationRead ValidationResultAzureOperation = `READ` - -const ValidationResultAzureOperationWrite ValidationResultAzureOperation = `WRITE` - -// String representation for [fmt.Print] -func (f *ValidationResultAzureOperation) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *ValidationResultAzureOperation) Set(v string) error { - switch v { - case `DELETE`, `HIERARCHICAL_NAMESPACE_ENABLED`, `LIST`, `PATH_EXISTS`, `READ`, `WRITE`: - *f = ValidationResultAzureOperation(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "DELETE", "HIERARCHICAL_NAMESPACE_ENABLED", "LIST", "PATH_EXISTS", "READ", "WRITE"`, v) - } -} - -// Type always returns ValidationResultAzureOperation to satisfy [pflag.Value] interface -func (f *ValidationResultAzureOperation) Type() string { - return "ValidationResultAzureOperation" -} - -// The operation tested. -type ValidationResultGcpOperation string +type ValidationResultOperation string -const ValidationResultGcpOperationDelete ValidationResultGcpOperation = `DELETE` +const ValidationResultOperationDelete ValidationResultOperation = `DELETE` -const ValidationResultGcpOperationList ValidationResultGcpOperation = `LIST` +const ValidationResultOperationList ValidationResultOperation = `LIST` -const ValidationResultGcpOperationPathExists ValidationResultGcpOperation = `PATH_EXISTS` +const ValidationResultOperationPathExists ValidationResultOperation = `PATH_EXISTS` -const ValidationResultGcpOperationRead ValidationResultGcpOperation = `READ` +const ValidationResultOperationRead ValidationResultOperation = `READ` -const ValidationResultGcpOperationWrite ValidationResultGcpOperation = `WRITE` +const ValidationResultOperationWrite ValidationResultOperation = `WRITE` // String representation for [fmt.Print] -func (f *ValidationResultGcpOperation) String() string { +func (f *ValidationResultOperation) String() string { return string(*f) } // Set raw string value and validate it against allowed values -func (f *ValidationResultGcpOperation) Set(v string) error { +func (f *ValidationResultOperation) Set(v string) error { switch v { case `DELETE`, `LIST`, `PATH_EXISTS`, `READ`, `WRITE`: - *f = ValidationResultGcpOperation(v) + *f = ValidationResultOperation(v) return nil default: return fmt.Errorf(`value "%s" is not one of "DELETE", "LIST", "PATH_EXISTS", "READ", "WRITE"`, v) } } -// Type always returns ValidationResultGcpOperation to satisfy [pflag.Value] interface -func (f *ValidationResultGcpOperation) Type() string { - return "ValidationResultGcpOperation" +// Type always returns ValidationResultOperation to satisfy [pflag.Value] interface +func (f *ValidationResultOperation) Type() string { + return "ValidationResultOperation" } // The results of the tested operation. diff --git a/service/compute/api.go b/service/compute/api.go index 68f5f9d11..4712770a9 100755 --- a/service/compute/api.go +++ b/service/compute/api.go @@ -2827,88 +2827,58 @@ type LibrariesInterface interface { // Get all statuses. // - // Get the status of all libraries on all clusters. A status will be available - // for all libraries installed on this cluster via the API or the libraries UI - // as well as libraries set to be installed on all clusters via the libraries - // UI. + // Get the status of all libraries on all clusters. A status is returned for all + // libraries installed on this cluster via the API or the libraries UI. AllClusterStatuses(ctx context.Context) (*ListAllClusterLibraryStatusesResponse, error) // Get status. // - // Get the status of libraries on a cluster. A status will be available for all - // libraries installed on this cluster via the API or the libraries UI as well - // as libraries set to be installed on all clusters via the libraries UI. The - // order of returned libraries will be as follows. - // - // 1. Libraries set to be installed on this cluster will be returned first. - // Within this group, the final order will be order in which the libraries were - // added to the cluster. - // - // 2. Libraries set to be installed on all clusters are returned next. Within - // this group there is no order guarantee. - // - // 3. Libraries that were previously requested on this cluster or on all - // clusters, but now marked for removal. Within this group there is no order - // guarantee. + // Get the status of libraries on a cluster. A status is returned for all + // libraries installed on this cluster via the API or the libraries UI. The + // order of returned libraries is as follows: 1. Libraries set to be installed + // on this cluster, in the order that the libraries were added to the cluster, + // are returned first. 2. Libraries that were previously requested to be + // installed on this cluster or, but are now marked for removal, in no + // particular order, are returned last. // // This method is generated by Databricks SDK Code Generator. - ClusterStatus(ctx context.Context, request ClusterStatusRequest) listing.Iterator[LibraryFullStatus] + ClusterStatus(ctx context.Context, request ClusterStatus) listing.Iterator[LibraryFullStatus] // Get status. // - // Get the status of libraries on a cluster. A status will be available for all - // libraries installed on this cluster via the API or the libraries UI as well - // as libraries set to be installed on all clusters via the libraries UI. The - // order of returned libraries will be as follows. - // - // 1. Libraries set to be installed on this cluster will be returned first. - // Within this group, the final order will be order in which the libraries were - // added to the cluster. - // - // 2. Libraries set to be installed on all clusters are returned next. Within - // this group there is no order guarantee. - // - // 3. Libraries that were previously requested on this cluster or on all - // clusters, but now marked for removal. Within this group there is no order - // guarantee. + // Get the status of libraries on a cluster. A status is returned for all + // libraries installed on this cluster via the API or the libraries UI. The + // order of returned libraries is as follows: 1. Libraries set to be installed + // on this cluster, in the order that the libraries were added to the cluster, + // are returned first. 2. Libraries that were previously requested to be + // installed on this cluster or, but are now marked for removal, in no + // particular order, are returned last. // // This method is generated by Databricks SDK Code Generator. - ClusterStatusAll(ctx context.Context, request ClusterStatusRequest) ([]LibraryFullStatus, error) + ClusterStatusAll(ctx context.Context, request ClusterStatus) ([]LibraryFullStatus, error) // Get status. // - // Get the status of libraries on a cluster. A status will be available for all - // libraries installed on this cluster via the API or the libraries UI as well - // as libraries set to be installed on all clusters via the libraries UI. The - // order of returned libraries will be as follows. - // - // 1. Libraries set to be installed on this cluster will be returned first. - // Within this group, the final order will be order in which the libraries were - // added to the cluster. - // - // 2. Libraries set to be installed on all clusters are returned next. Within - // this group there is no order guarantee. - // - // 3. Libraries that were previously requested on this cluster or on all - // clusters, but now marked for removal. Within this group there is no order - // guarantee. - ClusterStatusByClusterId(ctx context.Context, clusterId string) (*ClusterLibraryStatuses, error) + // Get the status of libraries on a cluster. A status is returned for all + // libraries installed on this cluster via the API or the libraries UI. The + // order of returned libraries is as follows: 1. Libraries set to be installed + // on this cluster, in the order that the libraries were added to the cluster, + // are returned first. 2. Libraries that were previously requested to be + // installed on this cluster or, but are now marked for removal, in no + // particular order, are returned last. + ClusterStatusByClusterId(ctx context.Context, clusterId string) (*ClusterStatusResponse, error) // Add a library. // - // Add libraries to be installed on a cluster. The installation is asynchronous; - // it happens in the background after the completion of this request. - // - // **Note**: The actual set of libraries to be installed on a cluster is the - // union of the libraries specified via this method and the libraries set to be - // installed on all clusters via the libraries UI. + // Add libraries to install on a cluster. The installation is asynchronous; it + // happens in the background after the completion of this request. Install(ctx context.Context, request InstallLibraries) error // Uninstall libraries. // - // Set libraries to be uninstalled on a cluster. The libraries won't be - // uninstalled until the cluster is restarted. Uninstalling libraries that are - // not installed on the cluster will have no impact but is not an error. + // Set libraries to uninstall from a cluster. The libraries won't be uninstalled + // until the cluster is restarted. A request to uninstall a library that is not + // currently installed is ignored. Uninstall(ctx context.Context, request UninstallLibraries) error } @@ -2925,7 +2895,7 @@ func NewLibraries(client *client.DatabricksClient) *LibrariesAPI { // // To make third-party or custom code available to notebooks and jobs running on // your clusters, you can install a library. Libraries can be written in Python, -// Java, Scala, and R. You can upload Java, Scala, and Python libraries and +// Java, Scala, and R. You can upload Python, Java, Scala and R libraries and // point to external packages in PyPI, Maven, and CRAN repositories. // // Cluster libraries can be used by all notebooks running on a cluster. You can @@ -2933,10 +2903,6 @@ func NewLibraries(client *client.DatabricksClient) *LibrariesAPI { // Maven, using a previously installed workspace library, or using an init // script. // -// When you install a library on a cluster, a notebook already attached to that -// cluster will not immediately see the new library. You must first detach and -// then reattach the notebook to the cluster. -// // When you uninstall a library from a cluster, the library is removed only when // you restart the cluster. Until you restart the cluster, the status of the // uninstalled library appears as Uninstall pending restart. @@ -2962,40 +2928,30 @@ func (a *LibrariesAPI) Impl() LibrariesService { // Get all statuses. // -// Get the status of all libraries on all clusters. A status will be available -// for all libraries installed on this cluster via the API or the libraries UI -// as well as libraries set to be installed on all clusters via the libraries -// UI. +// Get the status of all libraries on all clusters. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. func (a *LibrariesAPI) AllClusterStatuses(ctx context.Context) (*ListAllClusterLibraryStatusesResponse, error) { return a.impl.AllClusterStatuses(ctx) } // Get status. // -// Get the status of libraries on a cluster. A status will be available for all -// libraries installed on this cluster via the API or the libraries UI as well -// as libraries set to be installed on all clusters via the libraries UI. The -// order of returned libraries will be as follows. -// -// 1. Libraries set to be installed on this cluster will be returned first. -// Within this group, the final order will be order in which the libraries were -// added to the cluster. -// -// 2. Libraries set to be installed on all clusters are returned next. Within -// this group there is no order guarantee. -// -// 3. Libraries that were previously requested on this cluster or on all -// clusters, but now marked for removal. Within this group there is no order -// guarantee. +// Get the status of libraries on a cluster. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. The +// order of returned libraries is as follows: 1. Libraries set to be installed +// on this cluster, in the order that the libraries were added to the cluster, +// are returned first. 2. Libraries that were previously requested to be +// installed on this cluster or, but are now marked for removal, in no +// particular order, are returned last. // // This method is generated by Databricks SDK Code Generator. -func (a *LibrariesAPI) ClusterStatus(ctx context.Context, request ClusterStatusRequest) listing.Iterator[LibraryFullStatus] { +func (a *LibrariesAPI) ClusterStatus(ctx context.Context, request ClusterStatus) listing.Iterator[LibraryFullStatus] { - getNextPage := func(ctx context.Context, req ClusterStatusRequest) (*ClusterLibraryStatuses, error) { + getNextPage := func(ctx context.Context, req ClusterStatus) (*ClusterStatusResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.impl.ClusterStatus(ctx, req) } - getItems := func(resp *ClusterLibraryStatuses) []LibraryFullStatus { + getItems := func(resp *ClusterStatusResponse) []LibraryFullStatus { return resp.LibraryStatuses } @@ -3009,68 +2965,48 @@ func (a *LibrariesAPI) ClusterStatus(ctx context.Context, request ClusterStatusR // Get status. // -// Get the status of libraries on a cluster. A status will be available for all -// libraries installed on this cluster via the API or the libraries UI as well -// as libraries set to be installed on all clusters via the libraries UI. The -// order of returned libraries will be as follows. -// -// 1. Libraries set to be installed on this cluster will be returned first. -// Within this group, the final order will be order in which the libraries were -// added to the cluster. -// -// 2. Libraries set to be installed on all clusters are returned next. Within -// this group there is no order guarantee. -// -// 3. Libraries that were previously requested on this cluster or on all -// clusters, but now marked for removal. Within this group there is no order -// guarantee. +// Get the status of libraries on a cluster. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. The +// order of returned libraries is as follows: 1. Libraries set to be installed +// on this cluster, in the order that the libraries were added to the cluster, +// are returned first. 2. Libraries that were previously requested to be +// installed on this cluster or, but are now marked for removal, in no +// particular order, are returned last. // // This method is generated by Databricks SDK Code Generator. -func (a *LibrariesAPI) ClusterStatusAll(ctx context.Context, request ClusterStatusRequest) ([]LibraryFullStatus, error) { +func (a *LibrariesAPI) ClusterStatusAll(ctx context.Context, request ClusterStatus) ([]LibraryFullStatus, error) { iterator := a.ClusterStatus(ctx, request) return listing.ToSlice[LibraryFullStatus](ctx, iterator) } // Get status. // -// Get the status of libraries on a cluster. A status will be available for all -// libraries installed on this cluster via the API or the libraries UI as well -// as libraries set to be installed on all clusters via the libraries UI. The -// order of returned libraries will be as follows. -// -// 1. Libraries set to be installed on this cluster will be returned first. -// Within this group, the final order will be order in which the libraries were -// added to the cluster. -// -// 2. Libraries set to be installed on all clusters are returned next. Within -// this group there is no order guarantee. -// -// 3. Libraries that were previously requested on this cluster or on all -// clusters, but now marked for removal. Within this group there is no order -// guarantee. -func (a *LibrariesAPI) ClusterStatusByClusterId(ctx context.Context, clusterId string) (*ClusterLibraryStatuses, error) { - return a.impl.ClusterStatus(ctx, ClusterStatusRequest{ +// Get the status of libraries on a cluster. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. The +// order of returned libraries is as follows: 1. Libraries set to be installed +// on this cluster, in the order that the libraries were added to the cluster, +// are returned first. 2. Libraries that were previously requested to be +// installed on this cluster or, but are now marked for removal, in no +// particular order, are returned last. +func (a *LibrariesAPI) ClusterStatusByClusterId(ctx context.Context, clusterId string) (*ClusterStatusResponse, error) { + return a.impl.ClusterStatus(ctx, ClusterStatus{ ClusterId: clusterId, }) } // Add a library. // -// Add libraries to be installed on a cluster. The installation is asynchronous; -// it happens in the background after the completion of this request. -// -// **Note**: The actual set of libraries to be installed on a cluster is the -// union of the libraries specified via this method and the libraries set to be -// installed on all clusters via the libraries UI. +// Add libraries to install on a cluster. The installation is asynchronous; it +// happens in the background after the completion of this request. func (a *LibrariesAPI) Install(ctx context.Context, request InstallLibraries) error { return a.impl.Install(ctx, request) } // Uninstall libraries. // -// Set libraries to be uninstalled on a cluster. The libraries won't be -// uninstalled until the cluster is restarted. Uninstalling libraries that are -// not installed on the cluster will have no impact but is not an error. +// Set libraries to uninstall from a cluster. The libraries won't be uninstalled +// until the cluster is restarted. A request to uninstall a library that is not +// currently installed is ignored. func (a *LibrariesAPI) Uninstall(ctx context.Context, request UninstallLibraries) error { return a.impl.Uninstall(ctx, request) } diff --git a/service/compute/impl.go b/service/compute/impl.go index 2866a7e0b..bf78f6260 100755 --- a/service/compute/impl.go +++ b/service/compute/impl.go @@ -561,13 +561,13 @@ func (a *librariesImpl) AllClusterStatuses(ctx context.Context) (*ListAllCluster return &listAllClusterLibraryStatusesResponse, err } -func (a *librariesImpl) ClusterStatus(ctx context.Context, request ClusterStatusRequest) (*ClusterLibraryStatuses, error) { - var clusterLibraryStatuses ClusterLibraryStatuses +func (a *librariesImpl) ClusterStatus(ctx context.Context, request ClusterStatus) (*ClusterStatusResponse, error) { + var clusterStatusResponse ClusterStatusResponse path := "/api/2.0/libraries/cluster-status" headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &clusterLibraryStatuses) - return &clusterLibraryStatuses, err + err := a.client.Do(ctx, http.MethodGet, path, headers, request, &clusterStatusResponse) + return &clusterStatusResponse, err } func (a *librariesImpl) Install(ctx context.Context, request InstallLibraries) error { diff --git a/service/compute/interface.go b/service/compute/interface.go index d7679c604..70c263b49 100755 --- a/service/compute/interface.go +++ b/service/compute/interface.go @@ -500,7 +500,7 @@ type InstanceProfilesService interface { // // To make third-party or custom code available to notebooks and jobs running on // your clusters, you can install a library. Libraries can be written in Python, -// Java, Scala, and R. You can upload Java, Scala, and Python libraries and +// Java, Scala, and R. You can upload Python, Java, Scala and R libraries and // point to external packages in PyPI, Maven, and CRAN repositories. // // Cluster libraries can be used by all notebooks running on a cluster. You can @@ -508,10 +508,6 @@ type InstanceProfilesService interface { // Maven, using a previously installed workspace library, or using an init // script. // -// When you install a library on a cluster, a notebook already attached to that -// cluster will not immediately see the new library. You must first detach and -// then reattach the notebook to the cluster. -// // When you uninstall a library from a cluster, the library is removed only when // you restart the cluster. Until you restart the cluster, the status of the // uninstalled library appears as Uninstall pending restart. @@ -519,49 +515,34 @@ type LibrariesService interface { // Get all statuses. // - // Get the status of all libraries on all clusters. A status will be - // available for all libraries installed on this cluster via the API or the - // libraries UI as well as libraries set to be installed on all clusters via - // the libraries UI. + // Get the status of all libraries on all clusters. A status is returned for + // all libraries installed on this cluster via the API or the libraries UI. AllClusterStatuses(ctx context.Context) (*ListAllClusterLibraryStatusesResponse, error) // Get status. // - // Get the status of libraries on a cluster. A status will be available for - // all libraries installed on this cluster via the API or the libraries UI - // as well as libraries set to be installed on all clusters via the - // libraries UI. The order of returned libraries will be as follows. - // - // 1. Libraries set to be installed on this cluster will be returned first. - // Within this group, the final order will be order in which the libraries - // were added to the cluster. - // - // 2. Libraries set to be installed on all clusters are returned next. - // Within this group there is no order guarantee. - // - // 3. Libraries that were previously requested on this cluster or on all - // clusters, but now marked for removal. Within this group there is no order - // guarantee. + // Get the status of libraries on a cluster. A status is returned for all + // libraries installed on this cluster via the API or the libraries UI. The + // order of returned libraries is as follows: 1. Libraries set to be + // installed on this cluster, in the order that the libraries were added to + // the cluster, are returned first. 2. Libraries that were previously + // requested to be installed on this cluster or, but are now marked for + // removal, in no particular order, are returned last. // // Use ClusterStatusAll() to get all LibraryFullStatus instances - ClusterStatus(ctx context.Context, request ClusterStatusRequest) (*ClusterLibraryStatuses, error) + ClusterStatus(ctx context.Context, request ClusterStatus) (*ClusterStatusResponse, error) // Add a library. // - // Add libraries to be installed on a cluster. The installation is - // asynchronous; it happens in the background after the completion of this - // request. - // - // **Note**: The actual set of libraries to be installed on a cluster is the - // union of the libraries specified via this method and the libraries set to - // be installed on all clusters via the libraries UI. + // Add libraries to install on a cluster. The installation is asynchronous; + // it happens in the background after the completion of this request. Install(ctx context.Context, request InstallLibraries) error // Uninstall libraries. // - // Set libraries to be uninstalled on a cluster. The libraries won't be - // uninstalled until the cluster is restarted. Uninstalling libraries that - // are not installed on the cluster will have no impact but is not an error. + // Set libraries to uninstall from a cluster. The libraries won't be + // uninstalled until the cluster is restarted. A request to uninstall a + // library that is not currently installed is ignored. Uninstall(ctx context.Context, request UninstallLibraries) error } diff --git a/service/compute/library_utilities.go b/service/compute/library_utilities.go index 9babc7537..d43eecb6f 100644 --- a/service/compute/library_utilities.go +++ b/service/compute/library_utilities.go @@ -77,7 +77,7 @@ func (w *Wait) IsNotInScope(lib *Library) bool { // IsRetryNeeded returns first bool if there needs to be retry. // If there needs to be retry, error message will explain why. // If retry does not need to happen and error is not nil - it failed. -func (cls ClusterLibraryStatuses) IsRetryNeeded(w Wait) (bool, error) { +func (cls ClusterStatusResponse) IsRetryNeeded(w Wait) (bool, error) { pending := 0 ready := 0 errors := []string{} @@ -136,11 +136,11 @@ type Update struct { } type librariesAPIUtilities interface { - UpdateAndWait(ctx context.Context, update Update, options ...retries.Option[ClusterLibraryStatuses]) error + UpdateAndWait(ctx context.Context, update Update, options ...retries.Option[ClusterStatusResponse]) error } func (a *LibrariesAPI) UpdateAndWait(ctx context.Context, update Update, - options ...retries.Option[ClusterLibraryStatuses]) error { + options ...retries.Option[ClusterStatusResponse]) error { ctx = useragent.InContext(ctx, "sdk-feature", "update-libraries") if len(update.Uninstall) > 0 { err := a.Uninstall(ctx, UninstallLibraries{ @@ -176,20 +176,20 @@ func (a *LibrariesAPI) UpdateAndWait(ctx context.Context, update Update, // clusterID string, timeout time.Duration, isActive bool, refresh bool func (a *LibrariesAPI) Wait(ctx context.Context, wait Wait, - options ...retries.Option[ClusterLibraryStatuses]) (*ClusterLibraryStatuses, error) { + options ...retries.Option[ClusterStatusResponse]) (*ClusterStatusResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "wait-for-libraries") - i := retries.Info[ClusterLibraryStatuses]{Timeout: 30 * time.Minute} + i := retries.Info[ClusterStatusResponse]{Timeout: 30 * time.Minute} for _, o := range options { o(&i) } - result, err := retries.Poll(ctx, i.Timeout, func() (*ClusterLibraryStatuses, *retries.Err) { + result, err := retries.Poll(ctx, i.Timeout, func() (*ClusterStatusResponse, *retries.Err) { status, err := a.ClusterStatusByClusterId(ctx, wait.ClusterID) if apierr.IsMissing(err) { // eventual consistency error return nil, retries.Continue(err) } for _, o := range options { - o(&retries.Info[ClusterLibraryStatuses]{ + o(&retries.Info[ClusterStatusResponse]{ Timeout: i.Timeout, Info: status, }) diff --git a/service/compute/model.go b/service/compute/model.go index d6f91c4c5..10c64aee0 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -1248,11 +1248,28 @@ func (s ClusterSpec) MarshalJSON() ([]byte, error) { } // Get status -type ClusterStatusRequest struct { +type ClusterStatus struct { // Unique identifier of the cluster whose status should be retrieved. ClusterId string `json:"-" url:"cluster_id"` } +type ClusterStatusResponse struct { + // Unique identifier for the cluster. + ClusterId string `json:"cluster_id,omitempty"` + // Status of all libraries on the cluster. + LibraryStatuses []LibraryFullStatus `json:"library_statuses,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterStatusResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterStatusResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type Command struct { // Running cluster id ClusterId string `json:"clusterId,omitempty"` @@ -2338,8 +2355,10 @@ type EditResponse struct { // and jobs' environment for non-notebook task. In this minimal environment // spec, only pip dependencies are supported. Next ID: 5 type Environment struct { - // * User-friendly name for the client version: “client”: “1” The - // version is a string, consisting of the major client version + // Client version used by the environment The client is the user-facing + // environment of the runtime. Each client comes with a specific set of + // pre-installed libraries. The version is a string, consisting of the major + // client version. Client string `json:"client"` // List of pip dependencies, as supported by the version of pip in this // environment. Each dependency is a pip requirement file line @@ -3374,6 +3393,15 @@ type InstancePoolGcpAttributes struct { // workspace resides in the "us-east1" region. This is an optional field at // instance pool creation, and if not specified, a default zone will be // used. + // + // This field can be one of the following: - "HA" => High availability, + // spread nodes across availability zones for a Databricks deployment region + // - A GCP availability zone => Pick One of the available zones for (machine + // type + region) from https://cloud.google.com/compute/docs/regions-zones + // (e.g. "us-west1-a"). + // + // If empty, Databricks picks an availability zone to schedule the cluster + // on. ZoneId string `json:"zone_id,omitempty"` ForceSendFields []string `json:"-"` @@ -3595,17 +3623,21 @@ func (f *Language) Type() string { type Library struct { // Specification of a CRAN library to be installed as part of the library Cran *RCranLibrary `json:"cran,omitempty"` - // URI of the egg to be installed. Currently only DBFS and S3 URIs are - // supported. For example: `{ "egg": "dbfs:/my/egg" }` or `{ "egg": - // "s3://my-bucket/egg" }`. If S3 is used, please make sure the cluster has - // read access on the library. You may need to launch the cluster with an - // IAM role to access the S3 URI. + // URI of the egg library to install. Supported URIs include Workspace + // paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "egg": + // "/Workspace/path/to/library.egg" }`, `{ "egg" : + // "/Volumes/path/to/library.egg" }` or `{ "egg": + // "s3://my-bucket/library.egg" }`. If S3 is used, please make sure the + // cluster has read access on the library. You may need to launch the + // cluster with an IAM role to access the S3 URI. Egg string `json:"egg,omitempty"` - // URI of the jar to be installed. Currently only DBFS and S3 URIs are - // supported. For example: `{ "jar": "dbfs:/mnt/databricks/library.jar" }` - // or `{ "jar": "s3://my-bucket/library.jar" }`. If S3 is used, please make - // sure the cluster has read access on the library. You may need to launch - // the cluster with an IAM role to access the S3 URI. + // URI of the JAR library to install. Supported URIs include Workspace + // paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "jar": + // "/Workspace/path/to/library.jar" }`, `{ "jar" : + // "/Volumes/path/to/library.jar" }` or `{ "jar": + // "s3://my-bucket/library.jar" }`. If S3 is used, please make sure the + // cluster has read access on the library. You may need to launch the + // cluster with an IAM role to access the S3 URI. Jar string `json:"jar,omitempty"` // Specification of a maven library to be installed. For example: `{ // "coordinates": "org.jsoup:jsoup:1.7.2" }` @@ -3613,9 +3645,17 @@ type Library struct { // Specification of a PyPi library to be installed. For example: `{ // "package": "simplejson" }` Pypi *PythonPyPiLibrary `json:"pypi,omitempty"` - // URI of the wheel to be installed. For example: `{ "whl": "dbfs:/my/whl" - // }` or `{ "whl": "s3://my-bucket/whl" }`. If S3 is used, please make sure - // the cluster has read access on the library. You may need to launch the + // URI of the requirements.txt file to install. Only Workspace paths and + // Unity Catalog Volumes paths are supported. For example: `{ + // "requirements": "/Workspace/path/to/requirements.txt" }` or `{ + // "requirements" : "/Volumes/path/to/requirements.txt" }` + Requirements string `json:"requirements,omitempty"` + // URI of the wheel library to install. Supported URIs include Workspace + // paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "whl": + // "/Workspace/path/to/library.whl" }`, `{ "whl" : + // "/Volumes/path/to/library.whl" }` or `{ "whl": + // "s3://my-bucket/library.whl" }`. If S3 is used, please make sure the + // cluster has read access on the library. You may need to launch the // cluster with an IAM role to access the S3 URI. Whl string `json:"whl,omitempty"` @@ -3630,6 +3670,7 @@ func (s Library) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// The status of the library on a specific cluster. type LibraryFullStatus struct { // Whether the library was set to be installed on all clusters via the // libraries UI. @@ -3640,7 +3681,7 @@ type LibraryFullStatus struct { // library. Messages []string `json:"messages,omitempty"` // Status of installing the library on the cluster. - Status LibraryFullStatusStatus `json:"status,omitempty"` + Status LibraryInstallStatus `json:"status,omitempty"` ForceSendFields []string `json:"-"` } @@ -3653,42 +3694,44 @@ func (s LibraryFullStatus) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Status of installing the library on the cluster. -type LibraryFullStatusStatus string +// The status of a library on a specific cluster. +type LibraryInstallStatus string + +const LibraryInstallStatusFailed LibraryInstallStatus = `FAILED` -const LibraryFullStatusStatusFailed LibraryFullStatusStatus = `FAILED` +const LibraryInstallStatusInstalled LibraryInstallStatus = `INSTALLED` -const LibraryFullStatusStatusInstalled LibraryFullStatusStatus = `INSTALLED` +const LibraryInstallStatusInstalling LibraryInstallStatus = `INSTALLING` -const LibraryFullStatusStatusInstalling LibraryFullStatusStatus = `INSTALLING` +const LibraryInstallStatusPending LibraryInstallStatus = `PENDING` -const LibraryFullStatusStatusPending LibraryFullStatusStatus = `PENDING` +const LibraryInstallStatusResolving LibraryInstallStatus = `RESOLVING` -const LibraryFullStatusStatusResolving LibraryFullStatusStatus = `RESOLVING` +const LibraryInstallStatusRestored LibraryInstallStatus = `RESTORED` -const LibraryFullStatusStatusSkipped LibraryFullStatusStatus = `SKIPPED` +const LibraryInstallStatusSkipped LibraryInstallStatus = `SKIPPED` -const LibraryFullStatusStatusUninstallOnRestart LibraryFullStatusStatus = `UNINSTALL_ON_RESTART` +const LibraryInstallStatusUninstallOnRestart LibraryInstallStatus = `UNINSTALL_ON_RESTART` // String representation for [fmt.Print] -func (f *LibraryFullStatusStatus) String() string { +func (f *LibraryInstallStatus) String() string { return string(*f) } // Set raw string value and validate it against allowed values -func (f *LibraryFullStatusStatus) Set(v string) error { +func (f *LibraryInstallStatus) Set(v string) error { switch v { - case `FAILED`, `INSTALLED`, `INSTALLING`, `PENDING`, `RESOLVING`, `SKIPPED`, `UNINSTALL_ON_RESTART`: - *f = LibraryFullStatusStatus(v) + case `FAILED`, `INSTALLED`, `INSTALLING`, `PENDING`, `RESOLVING`, `RESTORED`, `SKIPPED`, `UNINSTALL_ON_RESTART`: + *f = LibraryInstallStatus(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "FAILED", "INSTALLED", "INSTALLING", "PENDING", "RESOLVING", "SKIPPED", "UNINSTALL_ON_RESTART"`, v) + return fmt.Errorf(`value "%s" is not one of "FAILED", "INSTALLED", "INSTALLING", "PENDING", "RESOLVING", "RESTORED", "SKIPPED", "UNINSTALL_ON_RESTART"`, v) } } -// Type always returns LibraryFullStatusStatus to satisfy [pflag.Value] interface -func (f *LibraryFullStatusStatus) Type() string { - return "LibraryFullStatusStatus" +// Type always returns LibraryInstallStatus to satisfy [pflag.Value] interface +func (f *LibraryInstallStatus) Type() string { + return "LibraryInstallStatus" } type ListAllClusterLibraryStatusesResponse struct { diff --git a/service/iam/model.go b/service/iam/model.go index b4aa4acd5..52d3de756 100755 --- a/service/iam/model.go +++ b/service/iam/model.go @@ -329,7 +329,7 @@ type Group struct { Groups []ComplexValue `json:"groups,omitempty"` // Databricks group ID - Id string `json:"id,omitempty" url:"-"` + Id string `json:"id,omitempty"` Members []ComplexValue `json:"members,omitempty"` // Container for the group identifier. Workspace local versus account. @@ -1231,7 +1231,7 @@ type ServicePrincipal struct { Groups []ComplexValue `json:"groups,omitempty"` // Databricks service principal ID. - Id string `json:"id,omitempty"` + Id string `json:"id,omitempty" url:"-"` // Corresponds to AWS instance profile/arn role. Roles []ComplexValue `json:"roles,omitempty"` // The schema of the List response. diff --git a/service/jobs/model.go b/service/jobs/model.go index 684be1160..ee21965ac 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -1656,6 +1656,23 @@ type NotebookTask struct { // Notebook is located in Databricks workspace. * `GIT`: Notebook is located // in cloud Git provider. Source Source `json:"source,omitempty"` + // Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic + // SQL warehouses are NOT supported, please use serverless or pro SQL + // warehouses. + // + // Note that SQL warehouses only support SQL cells; if the notebook contains + // non-SQL cells, the run will fail. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NotebookTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NotebookTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type PauseStatus string @@ -3298,8 +3315,7 @@ type SqlTask struct { // If dashboard, indicates that this job must refresh a SQL dashboard. Dashboard *SqlTaskDashboard `json:"dashboard,omitempty"` // If file, indicates that this job runs a SQL file in a remote Git - // repository. Only one SQL statement is supported in a file. Multiple SQL - // statements separated by semicolons (;) are not permitted. + // repository. File *SqlTaskFile `json:"file,omitempty"` // Parameters to be used for each run of this job. The SQL alert task does // not support custom parameters. @@ -3452,6 +3468,9 @@ type SubmitRun struct { PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` // The queue settings of the one-time run. Queue *QueueSettings `json:"queue,omitempty"` + // Specifies the user or service principal that the job runs as. If not + // specified, the job runs as the user who submits the request. + RunAs *JobRunAs `json:"run_as,omitempty"` // If run_job_task, indicates that this task must execute another job. RunJobTask *RunJobTask `json:"run_job_task,omitempty"` // An optional name for the run. The default value is `Untitled`. diff --git a/service/pipelines/model.go b/service/pipelines/model.go index 83646234f..14e089034 100755 --- a/service/pipelines/model.go +++ b/service/pipelines/model.go @@ -27,6 +27,8 @@ type CreatePipeline struct { Configuration map[string]string `json:"configuration,omitempty"` // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous bool `json:"continuous,omitempty"` + // Deployment type of this pipeline. + Deployment *PipelineDeployment `json:"deployment,omitempty"` // Whether the pipeline is in Development mode. Defaults to false. Development bool `json:"development,omitempty"` @@ -37,6 +39,9 @@ type CreatePipeline struct { Filters *Filters `json:"filters,omitempty"` // Unique identifier for this pipeline. Id string `json:"id,omitempty"` + // The configuration for a managed ingestion pipeline. These settings cannot + // be used with the 'libraries', 'target' or 'catalog' settings. + IngestionDefinition *ManagedIngestionPipelineDefinition `json:"ingestion_definition,omitempty"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `json:"libraries,omitempty"` // Friendly identifier for this pipeline. @@ -126,6 +131,33 @@ type DeletePipelineRequest struct { type DeletePipelineResponse struct { } +// The deployment method that manages the pipeline: - BUNDLE: The pipeline is +// managed by a Databricks Asset Bundle. +type DeploymentKind string + +const DeploymentKindBundle DeploymentKind = `BUNDLE` + +// String representation for [fmt.Print] +func (f *DeploymentKind) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DeploymentKind) Set(v string) error { + switch v { + case `BUNDLE`: + *f = DeploymentKind(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BUNDLE"`, v) + } +} + +// Type always returns DeploymentKind to satisfy [pflag.Value] interface +func (f *DeploymentKind) Type() string { + return "DeploymentKind" +} + type EditPipeline struct { // If false, deployment will fail if name has changed and conflicts the name // of another pipeline. @@ -144,6 +176,8 @@ type EditPipeline struct { Configuration map[string]string `json:"configuration,omitempty"` // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous bool `json:"continuous,omitempty"` + // Deployment type of this pipeline. + Deployment *PipelineDeployment `json:"deployment,omitempty"` // Whether the pipeline is in Development mode. Defaults to false. Development bool `json:"development,omitempty"` // Pipeline product edition. @@ -156,6 +190,9 @@ type EditPipeline struct { Filters *Filters `json:"filters,omitempty"` // Unique identifier for this pipeline. Id string `json:"id,omitempty"` + // The configuration for a managed ingestion pipeline. These settings cannot + // be used with the 'libraries', 'target' or 'catalog' settings. + IngestionDefinition *ManagedIngestionPipelineDefinition `json:"ingestion_definition,omitempty"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `json:"libraries,omitempty"` // Friendly identifier for this pipeline. @@ -362,6 +399,13 @@ type GetUpdateResponse struct { Update *UpdateInfo `json:"update,omitempty"` } +type IngestionConfig struct { + // Select tables from a specific source schema. + Schema *SchemaSpec `json:"schema,omitempty"` + // Select tables from a specific source table. + Table *TableSpec `json:"table,omitempty"` +} + // List pipeline events type ListPipelineEventsRequest struct { // Criteria to select a subset of results, expressed using a SQL-like @@ -513,6 +557,30 @@ func (s ListUpdatesResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type ManagedIngestionPipelineDefinition struct { + // Immutable. The Unity Catalog connection this ingestion pipeline uses to + // communicate with the source. Specify either ingestion_gateway_id or + // connection_name. + ConnectionName string `json:"connection_name,omitempty"` + // Immutable. Identifier for the ingestion gateway used by this ingestion + // pipeline to communicate with the source. Specify either + // ingestion_gateway_id or connection_name. + IngestionGatewayId string `json:"ingestion_gateway_id,omitempty"` + // Required. Settings specifying tables to replicate and the destination for + // the replicated tables. + Objects []IngestionConfig `json:"objects,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ManagedIngestionPipelineDefinition) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ManagedIngestionPipelineDefinition) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type ManualTrigger struct { } @@ -819,6 +887,23 @@ func (f *PipelineClusterAutoscaleMode) Type() string { return "PipelineClusterAutoscaleMode" } +type PipelineDeployment struct { + // The deployment method that manages the pipeline. + Kind DeploymentKind `json:"kind,omitempty"` + // The path to the file containing metadata about the deployment. + MetadataFilePath string `json:"metadata_file_path,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelineDeployment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelineDeployment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type PipelineEvent struct { // Information about an error captured by the event. Error *ErrorDetail `json:"error,omitempty"` @@ -978,6 +1063,8 @@ type PipelineSpec struct { Configuration map[string]string `json:"configuration,omitempty"` // Whether the pipeline is continuous or triggered. This replaces `trigger`. Continuous bool `json:"continuous,omitempty"` + // Deployment type of this pipeline. + Deployment *PipelineDeployment `json:"deployment,omitempty"` // Whether the pipeline is in Development mode. Defaults to false. Development bool `json:"development,omitempty"` // Pipeline product edition. @@ -986,6 +1073,9 @@ type PipelineSpec struct { Filters *Filters `json:"filters,omitempty"` // Unique identifier for this pipeline. Id string `json:"id,omitempty"` + // The configuration for a managed ingestion pipeline. These settings cannot + // be used with the 'libraries', 'target' or 'catalog' settings. + IngestionDefinition *ManagedIngestionPipelineDefinition `json:"ingestion_definition,omitempty"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `json:"libraries,omitempty"` // Friendly identifier for this pipeline. @@ -1093,6 +1183,30 @@ type PipelineTrigger struct { Manual *ManualTrigger `json:"manual,omitempty"` } +type SchemaSpec struct { + // Required. Destination catalog to store tables. + DestinationCatalog string `json:"destination_catalog,omitempty"` + // Required. Destination schema to store tables in. Tables with the same + // name as the source tables are created in this destination schema. The + // pipeline fails If a table with the same name already exists. + DestinationSchema string `json:"destination_schema,omitempty"` + // The source catalog name. Might be optional depending on the type of + // source. + SourceCatalog string `json:"source_catalog,omitempty"` + // Required. Schema name in the source database. + SourceSchema string `json:"source_schema,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SchemaSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SchemaSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type Sequencing struct { // A sequence number, unique and increasing within the control plane. ControlPlaneSeqNo int `json:"control_plane_seq_no,omitempty"` @@ -1238,6 +1352,33 @@ type StopRequest struct { PipelineId string `json:"-" url:"-"` } +type TableSpec struct { + // Required. Destination catalog to store table. + DestinationCatalog string `json:"destination_catalog,omitempty"` + // Required. Destination schema to store table. + DestinationSchema string `json:"destination_schema,omitempty"` + // Optional. Destination table name. The pipeline fails If a table with that + // name already exists. If not set, the source table name is used. + DestinationTable string `json:"destination_table,omitempty"` + // Source catalog name. Might be optional depending on the type of source. + SourceCatalog string `json:"source_catalog,omitempty"` + // Schema name in the source database. Might be optional depending on the + // type of source. + SourceSchema string `json:"source_schema,omitempty"` + // Required. Table name in the source database. + SourceTable string `json:"source_table,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TableSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TableSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type UpdateInfo struct { // What triggered this update. Cause UpdateInfoCause `json:"cause,omitempty"` diff --git a/service/serving/api.go b/service/serving/api.go index 2ca7b738a..7d67daed0 100755 --- a/service/serving/api.go +++ b/service/serving/api.go @@ -247,6 +247,20 @@ type ServingEndpointsInterface interface { // Retrieves the details for a single serving endpoint. GetByName(ctx context.Context, name string) (*ServingEndpointDetailed, error) + // Get the schema for a serving endpoint. + // + // Get the query schema of the serving endpoint in OpenAPI format. The schema + // contains information for the supported paths, input and output format and + // datatypes. + GetOpenApi(ctx context.Context, request GetOpenApiRequest) error + + // Get the schema for a serving endpoint. + // + // Get the query schema of the serving endpoint in OpenAPI format. The schema + // contains information for the supported paths, input and output format and + // datatypes. + GetOpenApiByName(ctx context.Context, name string) error + // Get serving endpoint permission levels. // // Gets the permission levels that a user can have on an object. @@ -535,6 +549,26 @@ func (a *ServingEndpointsAPI) GetByName(ctx context.Context, name string) (*Serv }) } +// Get the schema for a serving endpoint. +// +// Get the query schema of the serving endpoint in OpenAPI format. The schema +// contains information for the supported paths, input and output format and +// datatypes. +func (a *ServingEndpointsAPI) GetOpenApi(ctx context.Context, request GetOpenApiRequest) error { + return a.impl.GetOpenApi(ctx, request) +} + +// Get the schema for a serving endpoint. +// +// Get the query schema of the serving endpoint in OpenAPI format. The schema +// contains information for the supported paths, input and output format and +// datatypes. +func (a *ServingEndpointsAPI) GetOpenApiByName(ctx context.Context, name string) error { + return a.impl.GetOpenApi(ctx, GetOpenApiRequest{ + Name: name, + }) +} + // Get serving endpoint permission levels. // // Gets the permission levels that a user can have on an object. diff --git a/service/serving/impl.go b/service/serving/impl.go index 07d94cb7b..01d13fa20 100755 --- a/service/serving/impl.go +++ b/service/serving/impl.go @@ -120,6 +120,15 @@ func (a *servingEndpointsImpl) Get(ctx context.Context, request GetServingEndpoi return &servingEndpointDetailed, err } +func (a *servingEndpointsImpl) GetOpenApi(ctx context.Context, request GetOpenApiRequest) error { + var getOpenApiResponse GetOpenApiResponse + path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/openapi", request.Name) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getOpenApiResponse) + return err +} + func (a *servingEndpointsImpl) GetPermissionLevels(ctx context.Context, request GetServingEndpointPermissionLevelsRequest) (*GetServingEndpointPermissionLevelsResponse, error) { var getServingEndpointPermissionLevelsResponse GetServingEndpointPermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/serving-endpoints/%v/permissionLevels", request.ServingEndpointId) diff --git a/service/serving/interface.go b/service/serving/interface.go index fdb884910..39d9b2613 100755 --- a/service/serving/interface.go +++ b/service/serving/interface.go @@ -79,6 +79,13 @@ type ServingEndpointsService interface { // Retrieves the details for a single serving endpoint. Get(ctx context.Context, request GetServingEndpointRequest) (*ServingEndpointDetailed, error) + // Get the schema for a serving endpoint. + // + // Get the query schema of the serving endpoint in OpenAPI format. The + // schema contains information for the supported paths, input and output + // format and datatypes. + GetOpenApi(ctx context.Context, request GetOpenApiRequest) error + // Get serving endpoint permission levels. // // Gets the permission levels that a user can have on an object. diff --git a/service/serving/model.go b/service/serving/model.go index 27a845c41..9dd4eaecd 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -759,6 +759,18 @@ type GetEventsRequest struct { Name string `json:"-" url:"-"` } +// Get the schema for a serving endpoint +type GetOpenApiRequest struct { + // The name of the serving endpoint that the served model belongs to. This + // field is required. + Name string `json:"-" url:"-"` +} + +// The response is an OpenAPI spec in JSON format that typically includes fields +// like openapi, info, servers and paths, etc. +type GetOpenApiResponse struct { +} + // Get serving endpoint permission levels type GetServingEndpointPermissionLevelsRequest struct { // The serving endpoint for which to get or manage permissions. diff --git a/service/sharing/model.go b/service/sharing/model.go index 5ff54968b..62a22a170 100755 --- a/service/sharing/model.go +++ b/service/sharing/model.go @@ -683,6 +683,8 @@ func (f *PartitionValueOp) Type() string { type Privilege string +const PrivilegeAccess Privilege = `ACCESS` + const PrivilegeAllPrivileges Privilege = `ALL_PRIVILEGES` const PrivilegeApplyTag Privilege = `APPLY_TAG` @@ -715,6 +717,8 @@ const PrivilegeCreateRecipient Privilege = `CREATE_RECIPIENT` const PrivilegeCreateSchema Privilege = `CREATE_SCHEMA` +const PrivilegeCreateServiceCredential Privilege = `CREATE_SERVICE_CREDENTIAL` + const PrivilegeCreateShare Privilege = `CREATE_SHARE` const PrivilegeCreateStorageCredential Privilege = `CREATE_STORAGE_CREDENTIAL` @@ -773,11 +777,11 @@ func (f *Privilege) String() string { // Set raw string value and validate it against allowed values func (f *Privilege) Set(v string) error { switch v { - case `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: *f = Privilege(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) } } diff --git a/service/sql/model.go b/service/sql/model.go index 702b8d1af..f69dd35f3 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -287,6 +287,7 @@ func (s ChannelInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Name of the channel type ChannelName string const ChannelNameChannelNameCurrent ChannelName = `CHANNEL_NAME_CURRENT` diff --git a/workspace_client.go b/workspace_client.go index 1411f0ba1..b0a425c65 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -403,8 +403,8 @@ type WorkspaceClient struct { // // To make third-party or custom code available to notebooks and jobs // running on your clusters, you can install a library. Libraries can be - // written in Python, Java, Scala, and R. You can upload Java, Scala, and - // Python libraries and point to external packages in PyPI, Maven, and CRAN + // written in Python, Java, Scala, and R. You can upload Python, Java, Scala + // and R libraries and point to external packages in PyPI, Maven, and CRAN // repositories. // // Cluster libraries can be used by all notebooks running on a cluster. You @@ -412,10 +412,6 @@ type WorkspaceClient struct { // PyPI or Maven, using a previously installed workspace library, or using // an init script. // - // When you install a library on a cluster, a notebook already attached to - // that cluster will not immediately see the new library. You must first - // detach and then reattach the notebook to the cluster. - // // When you uninstall a library from a cluster, the library is removed only // when you restart the cluster. Until you restart the cluster, the status // of the uninstalled library appears as Uninstall pending restart.