From c55a2dcdb6695fd5a78412eb91741729cef4eac0 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 20 Aug 2024 14:08:15 +0200 Subject: [PATCH 1/6] Added support for creating all-purpose clusters --- bundle/config/mutator/apply_presets.go | 17 +++- .../mutator/process_target_mode_test.go | 10 ++ bundle/config/mutator/run_as_test.go | 2 + bundle/config/resources.go | 1 + bundle/config/resources/clusters.go | 39 ++++++++ bundle/deploy/terraform/convert.go | 22 +++++ bundle/deploy/terraform/convert_test.go | 56 +++++++++++ bundle/deploy/terraform/interpolate.go | 2 + bundle/deploy/terraform/interpolate_test.go | 2 + .../deploy/terraform/tfdyn/convert_cluster.go | 46 +++++++++ .../terraform/tfdyn/convert_cluster_test.go | 96 +++++++++++++++++++ bundle/tests/clusters/databricks.yml | 36 +++++++ bundle/tests/clusters_test.go | 36 +++++++ .../databricks_template_schema.json | 16 ++++ .../template/databricks.yml.tmpl | 18 ++++ .../clusters copy/template/hello_world.py | 1 + .../clusters/databricks_template_schema.json | 16 ++++ .../clusters/template/databricks.yml.tmpl | 27 ++++++ .../bundles/clusters/template/hello_world.py | 1 + internal/bundle/clusters_test.go | 51 ++++++++++ 20 files changed, 493 insertions(+), 2 deletions(-) create mode 100644 bundle/config/resources/clusters.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_cluster.go create mode 100644 bundle/deploy/terraform/tfdyn/convert_cluster_test.go create mode 100644 bundle/tests/clusters/databricks.yml create mode 100644 bundle/tests/clusters_test.go create mode 100644 internal/bundle/bundles/clusters copy/databricks_template_schema.json create mode 100644 internal/bundle/bundles/clusters copy/template/databricks.yml.tmpl create mode 100644 internal/bundle/bundles/clusters copy/template/hello_world.py create mode 100644 internal/bundle/bundles/clusters/databricks_template_schema.json create mode 100644 internal/bundle/bundles/clusters/template/databricks.yml.tmpl create mode 100644 internal/bundle/bundles/clusters/template/hello_world.py create mode 100644 internal/bundle/clusters_test.go diff --git a/bundle/config/mutator/apply_presets.go b/bundle/config/mutator/apply_presets.go index 42e6ab95f5..583ba0031f 100644 --- a/bundle/config/mutator/apply_presets.go +++ b/bundle/config/mutator/apply_presets.go @@ -155,12 +155,25 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos // Schemas: Prefix for i := range r.Schemas { - prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_" - r.Schemas[i].Name = prefix + r.Schemas[i].Name + schemaPrefix := "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_" + r.Schemas[i].Name = schemaPrefix + r.Schemas[i].Name // HTTP API for schemas doesn't yet support tags. It's only supported in // the Databricks UI and via the SQL API. } + // Clusters: Prefix, Tags + for _, c := range r.Clusters { + c.ClusterName = prefix + c.ClusterName + if c.CustomTags == nil { + c.CustomTags = make(map[string]string) + } + for _, tag := range tags { + if c.CustomTags[tag.Key] == "" { + c.CustomTags[tag.Key] = tag.Value + } + } + } + return nil } diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 1c8671b4c5..326648e405 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -13,6 +13,7 @@ import ( "github.com/databricks/cli/libs/tags" sdkconfig "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" @@ -119,6 +120,9 @@ func mockBundle(mode config.Mode) *bundle.Bundle { Schemas: map[string]*resources.Schema{ "schema1": {CreateSchema: &catalog.CreateSchema{Name: "schema1"}}, }, + Clusters: map[string]*resources.Cluster{ + "cluster1": {ClusterSpec: &compute.ClusterSpec{ClusterName: "cluster1", SparkVersion: "13.2.x", NumWorkers: 1}}, + }, }, }, // Use AWS implementation for testing. @@ -177,6 +181,9 @@ func TestProcessTargetModeDevelopment(t *testing.T) { // Schema 1 assert.Equal(t, "dev_lennart_schema1", b.Config.Resources.Schemas["schema1"].Name) + + // Clusters + assert.Equal(t, "[dev lennart] cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName) } func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { @@ -271,6 +278,7 @@ func TestProcessTargetModeDefault(t *testing.T) { assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName) + assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName) } func TestProcessTargetModeProduction(t *testing.T) { @@ -302,6 +310,7 @@ func TestProcessTargetModeProduction(t *testing.T) { b.Config.Resources.Experiments["experiment2"].Permissions = permissions b.Config.Resources.Models["model1"].Permissions = permissions b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions + b.Config.Resources.Clusters["cluster1"].Permissions = permissions diags = validateProductionMode(context.Background(), b, false) require.NoError(t, diags.Error()) @@ -312,6 +321,7 @@ func TestProcessTargetModeProduction(t *testing.T) { assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName) + assert.Equal(t, "cluster1", b.Config.Resources.Clusters["cluster1"].ClusterName) } func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) { diff --git a/bundle/config/mutator/run_as_test.go b/bundle/config/mutator/run_as_test.go index e6cef9ba45..abeea45d0a 100644 --- a/bundle/config/mutator/run_as_test.go +++ b/bundle/config/mutator/run_as_test.go @@ -32,6 +32,7 @@ func allResourceTypes(t *testing.T) []string { // the dyn library gives us the correct list of all resources supported. Please // also update this check when adding a new resource require.Equal(t, []string{ + "clusters", "experiments", "jobs", "model_serving_endpoints", @@ -133,6 +134,7 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) { // some point in the future. These resources are (implicitly) on the deny list, since // they are not on the allow list below. allowList := []string{ + "clusters", "jobs", "models", "registered_models", diff --git a/bundle/config/resources.go b/bundle/config/resources.go index 22d69ffb53..a3afb7fc36 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -19,6 +19,7 @@ type Resources struct { RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"` QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"` Schemas map[string]*resources.Schema `json:"schemas,omitempty"` + Clusters map[string]*resources.Cluster `json:"clusters,omitempty"` } type ConfigResource interface { diff --git a/bundle/config/resources/clusters.go b/bundle/config/resources/clusters.go new file mode 100644 index 0000000000..6323456669 --- /dev/null +++ b/bundle/config/resources/clusters.go @@ -0,0 +1,39 @@ +package resources + +import ( + "context" + + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/marshal" + "github.com/databricks/databricks-sdk-go/service/compute" +) + +type Cluster struct { + ID string `json:"id,omitempty" bundle:"readonly"` + Permissions []Permission `json:"permissions,omitempty"` + ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + + *compute.ClusterSpec +} + +func (s *Cluster) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Cluster) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +func (s *Cluster) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.Clusters.GetByClusterId(ctx, id) + if err != nil { + log.Debugf(ctx, "cluster %s does not exist", id) + return false, err + } + return true, nil +} + +func (s *Cluster) TerraformResourceName() string { + return "databricks_cluster" +} diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index f13c241cee..5a548e3b50 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -231,6 +231,13 @@ func BundleToTerraform(config *config.Root) *schema.Root { tfroot.Resource.QualityMonitor[k] = &dst } + for k, src := range config.Resources.Clusters { + noResources = false + var dst schema.ResourceCluster + conv(src, &dst) + tfroot.Resource.Cluster[k] = &dst + } + // We explicitly set "resource" to nil to omit it from a JSON encoding. // This is required because the terraform CLI requires >= 1 resources defined // if the "resource" property is used in a .tf.json file. @@ -394,6 +401,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error { } cur.ID = instance.Attributes.ID config.Resources.Schemas[resource.Name] = cur + case "databricks_cluster": + if config.Resources.Clusters == nil { + config.Resources.Clusters = make(map[string]*resources.Cluster) + } + cur := config.Resources.Clusters[resource.Name] + if cur == nil { + cur = &resources.Cluster{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID + config.Resources.Clusters[resource.Name] = cur case "databricks_permissions": case "databricks_grants": // Ignore; no need to pull these back into the configuration. @@ -443,6 +460,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error { src.ModifiedStatus = resources.ModifiedStatusCreated } } + for _, src := range config.Resources.Clusters { + if src.ModifiedStatus == "" && src.ID == "" { + src.ModifiedStatus = resources.ModifiedStatusCreated + } + } return nil } diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index e4ef6114a9..4c6866d9d8 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -663,6 +663,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { {Attributes: stateInstanceAttributes{ID: "1"}}, }, }, + { + Type: "databricks_cluster", + Mode: "managed", + Name: "test_cluster", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, }, } err := TerraformToBundle(&tfState, &config) @@ -692,6 +700,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { assert.Equal(t, "1", config.Resources.Schemas["test_schema"].ID) assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Schemas["test_schema"].ModifiedStatus) + assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID) + assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster"].ModifiedStatus) + AssertFullResourceCoverage(t, &config) } @@ -754,6 +765,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { }, }, }, + Clusters: map[string]*resources.Cluster{ + "test_cluster": { + ClusterSpec: &compute.ClusterSpec{ + ClusterName: "test_cluster", + }, + }, + }, }, } var tfState = resourcesState{ @@ -786,6 +804,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { assert.Equal(t, "", config.Resources.Schemas["test_schema"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema"].ModifiedStatus) + assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ID) + assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster"].ModifiedStatus) + AssertFullResourceCoverage(t, &config) } @@ -888,6 +909,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { }, }, }, + Clusters: map[string]*resources.Cluster{ + "test_cluster": { + ClusterSpec: &compute.ClusterSpec{ + ClusterName: "test_cluster", + }, + }, + "test_cluster_new": { + ClusterSpec: &compute.ClusterSpec{ + ClusterName: "test_cluster_new", + }, + }, + }, }, } var tfState = resourcesState{ @@ -1020,6 +1053,22 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { {Attributes: stateInstanceAttributes{ID: "2"}}, }, }, + { + Type: "databricks_cluster", + Mode: "managed", + Name: "test_cluster", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_cluster", + Mode: "managed", + Name: "test_cluster_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, }, } err := TerraformToBundle(&tfState, &config) @@ -1081,6 +1130,13 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { assert.Equal(t, "", config.Resources.Schemas["test_schema_new"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Schemas["test_schema_new"].ModifiedStatus) + assert.Equal(t, "1", config.Resources.Clusters["test_cluster"].ID) + assert.Equal(t, "", config.Resources.Clusters["test_cluster"].ModifiedStatus) + assert.Equal(t, "2", config.Resources.Clusters["test_cluster_old"].ID) + assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.Clusters["test_cluster_old"].ModifiedStatus) + assert.Equal(t, "", config.Resources.Clusters["test_cluster_new"].ID) + assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.Clusters["test_cluster_new"].ModifiedStatus) + AssertFullResourceCoverage(t, &config) } diff --git a/bundle/deploy/terraform/interpolate.go b/bundle/deploy/terraform/interpolate.go index faa098e1cc..12894c6843 100644 --- a/bundle/deploy/terraform/interpolate.go +++ b/bundle/deploy/terraform/interpolate.go @@ -58,6 +58,8 @@ func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.D path = dyn.NewPath(dyn.Key("databricks_quality_monitor")).Append(path[2:]...) case dyn.Key("schemas"): path = dyn.NewPath(dyn.Key("databricks_schema")).Append(path[2:]...) + case dyn.Key("clusters"): + path = dyn.NewPath(dyn.Key("databricks_cluster")).Append(path[2:]...) default: // Trigger "key not found" for unknown resource types. return dyn.GetByPath(root, path) diff --git a/bundle/deploy/terraform/interpolate_test.go b/bundle/deploy/terraform/interpolate_test.go index 5ceb243bcd..630a904acd 100644 --- a/bundle/deploy/terraform/interpolate_test.go +++ b/bundle/deploy/terraform/interpolate_test.go @@ -31,6 +31,7 @@ func TestInterpolate(t *testing.T) { "other_model_serving": "${resources.model_serving_endpoints.other_model_serving.id}", "other_registered_model": "${resources.registered_models.other_registered_model.id}", "other_schema": "${resources.schemas.other_schema.id}", + "other_cluster": "${resources.clusters.other_cluster.id}", }, Tasks: []jobs.Task{ { @@ -67,6 +68,7 @@ func TestInterpolate(t *testing.T) { assert.Equal(t, "${databricks_model_serving.other_model_serving.id}", j.Tags["other_model_serving"]) assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"]) assert.Equal(t, "${databricks_schema.other_schema.id}", j.Tags["other_schema"]) + assert.Equal(t, "${databricks_cluster.other_cluster.id}", j.Tags["other_cluster"]) m := b.Config.Resources.Models["my_model"] assert.Equal(t, "my_model", m.Model.Name) diff --git a/bundle/deploy/terraform/tfdyn/convert_cluster.go b/bundle/deploy/terraform/tfdyn/convert_cluster.go new file mode 100644 index 0000000000..30ee53f017 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_cluster.go @@ -0,0 +1,46 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go/service/compute" +) + +func convertClusterResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(compute.ClusterSpec{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "cluster normalization diagnostic: %s", diag.Summary) + } + + return vout, nil +} + +type clusterConverter struct{} + +func (clusterConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertClusterResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.Cluster[key] = vout.AsAny() + + // Configure permissions for this resource. + if permissions := convertPermissionsResource(ctx, vin); permissions != nil { + permissions.JobId = fmt.Sprintf("${databricks_cluster.%s.id}", key) + out.Permissions["cluster_"+key] = permissions + } + + return nil +} + +func init() { + registerConverter("clusters", clusterConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_cluster_test.go b/bundle/deploy/terraform/tfdyn/convert_cluster_test.go new file mode 100644 index 0000000000..68872a421a --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_cluster_test.go @@ -0,0 +1,96 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertCluster(t *testing.T) { + var src = resources.Cluster{ + ClusterSpec: &compute.ClusterSpec{ + NumWorkers: 3, + SparkVersion: "13.3.x-scala2.12", + ClusterName: "cluster", + SparkConf: map[string]string{ + "spark.executor.memory": "2g", + }, + AwsAttributes: &compute.AwsAttributes{ + Availability: "ON_DEMAND", + }, + AzureAttributes: &compute.AzureAttributes{ + Availability: "SPOT", + }, + DataSecurityMode: "USER_ISOLATION", + NodeTypeId: "m5.xlarge", + Autoscale: &compute.AutoScale{ + MinWorkers: 1, + MaxWorkers: 10, + }, + }, + + Permissions: []resources.Permission{ + { + Level: "CAN_RUN", + UserName: "jack@gmail.com", + }, + { + Level: "CAN_MANAGE", + ServicePrincipalName: "sp", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = clusterConverter{}.Convert(ctx, "my_cluster", vin, out) + require.NoError(t, err) + + cluster := out.Cluster["my_cluster"] + assert.Equal(t, map[string]any{ + "num_workers": int64(3), + "spark_version": "13.3.x-scala2.12", + "cluster_name": "cluster", + "spark_conf": map[string]any{ + "spark.executor.memory": "2g", + }, + "aws_attributes": map[string]any{ + "availability": "ON_DEMAND", + }, + "azure_attributes": map[string]any{ + "availability": "SPOT", + }, + "data_security_mode": "USER_ISOLATION", + "node_type_id": "m5.xlarge", + "autoscale": map[string]any{ + "min_workers": int64(1), + "max_workers": int64(10), + }, + }, cluster) + + // Assert equality on the permissions + assert.Equal(t, &schema.ResourcePermissions{ + JobId: "${databricks_cluster.my_cluster.id}", + AccessControl: []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_RUN", + UserName: "jack@gmail.com", + }, + { + PermissionLevel: "CAN_MANAGE", + ServicePrincipalName: "sp", + }, + }, + }, out.Permissions["cluster_my_cluster"]) + +} diff --git a/bundle/tests/clusters/databricks.yml b/bundle/tests/clusters/databricks.yml new file mode 100644 index 0000000000..1074462a63 --- /dev/null +++ b/bundle/tests/clusters/databricks.yml @@ -0,0 +1,36 @@ +bundle: + name: clusters + +workspace: + host: https://acme.cloud.databricks.com/ + +resources: + clusters: + foo: + cluster_name: foo + num_workers: 2 + node_type_id: "i3.xlarge" + autoscale: + min_workers: 2 + max_workers: 7 + spark_version: "13.3.x-scala2.12" + spark_conf: + "spark.executor.memory": "2g" + +targets: + default: + + development: + resources: + clusters: + foo: + cluster_name: foo-override + num_workers: 3 + node_type_id: "m5.xlarge" + autoscale: + min_workers: 1 + max_workers: 3 + spark_version: "15.2.x-scala2.12" + spark_conf: + "spark.executor.memory": "4g" + "spark.executor.memory2": "4g" diff --git a/bundle/tests/clusters_test.go b/bundle/tests/clusters_test.go new file mode 100644 index 0000000000..def8a2a314 --- /dev/null +++ b/bundle/tests/clusters_test.go @@ -0,0 +1,36 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestClusters(t *testing.T) { + b := load(t, "./clusters") + assert.Equal(t, "clusters", b.Config.Bundle.Name) + + cluster := b.Config.Resources.Clusters["foo"] + assert.Equal(t, "foo", cluster.ClusterName) + assert.Equal(t, "13.3.x-scala2.12", cluster.SparkVersion) + assert.Equal(t, "i3.xlarge", cluster.NodeTypeId) + assert.Equal(t, 2, cluster.NumWorkers) + assert.Equal(t, "2g", cluster.SparkConf["spark.executor.memory"]) + assert.Equal(t, 2, cluster.Autoscale.MinWorkers) + assert.Equal(t, 7, cluster.Autoscale.MaxWorkers) +} + +func TestClustersOverride(t *testing.T) { + b := loadTarget(t, "./clusters", "development") + assert.Equal(t, "clusters", b.Config.Bundle.Name) + + cluster := b.Config.Resources.Clusters["foo"] + assert.Equal(t, "foo-override", cluster.ClusterName) + assert.Equal(t, "15.2.x-scala2.12", cluster.SparkVersion) + assert.Equal(t, "m5.xlarge", cluster.NodeTypeId) + assert.Equal(t, 3, cluster.NumWorkers) + assert.Equal(t, "4g", cluster.SparkConf["spark.executor.memory"]) + assert.Equal(t, "4g", cluster.SparkConf["spark.executor.memory2"]) + assert.Equal(t, 1, cluster.Autoscale.MinWorkers) + assert.Equal(t, 3, cluster.Autoscale.MaxWorkers) +} diff --git a/internal/bundle/bundles/clusters copy/databricks_template_schema.json b/internal/bundle/bundles/clusters copy/databricks_template_schema.json new file mode 100644 index 0000000000..c1c5cf12eb --- /dev/null +++ b/internal/bundle/bundles/clusters copy/databricks_template_schema.json @@ -0,0 +1,16 @@ +{ + "properties": { + "unique_id": { + "type": "string", + "description": "Unique ID for job name" + }, + "spark_version": { + "type": "string", + "description": "Spark version used for job cluster" + }, + "node_type_id": { + "type": "string", + "description": "Node type id for job cluster" + } + } +} diff --git a/internal/bundle/bundles/clusters copy/template/databricks.yml.tmpl b/internal/bundle/bundles/clusters copy/template/databricks.yml.tmpl new file mode 100644 index 0000000000..a88cbd30ee --- /dev/null +++ b/internal/bundle/bundles/clusters copy/template/databricks.yml.tmpl @@ -0,0 +1,18 @@ +bundle: + name: basic + +workspace: + root_path: "~/.bundle/{{.unique_id}}" + +resources: + jobs: + foo: + name: test-job-basic-{{.unique_id}} + tasks: + - task_key: my_notebook_task + new_cluster: + num_workers: 1 + spark_version: "{{.spark_version}}" + node_type_id: "{{.node_type_id}}" + spark_python_task: + python_file: ./hello_world.py diff --git a/internal/bundle/bundles/clusters copy/template/hello_world.py b/internal/bundle/bundles/clusters copy/template/hello_world.py new file mode 100644 index 0000000000..f301245e24 --- /dev/null +++ b/internal/bundle/bundles/clusters copy/template/hello_world.py @@ -0,0 +1 @@ +print("Hello World!") diff --git a/internal/bundle/bundles/clusters/databricks_template_schema.json b/internal/bundle/bundles/clusters/databricks_template_schema.json new file mode 100644 index 0000000000..c1c5cf12eb --- /dev/null +++ b/internal/bundle/bundles/clusters/databricks_template_schema.json @@ -0,0 +1,16 @@ +{ + "properties": { + "unique_id": { + "type": "string", + "description": "Unique ID for job name" + }, + "spark_version": { + "type": "string", + "description": "Spark version used for job cluster" + }, + "node_type_id": { + "type": "string", + "description": "Node type id for job cluster" + } + } +} diff --git a/internal/bundle/bundles/clusters/template/databricks.yml.tmpl b/internal/bundle/bundles/clusters/template/databricks.yml.tmpl new file mode 100644 index 0000000000..d820d7d264 --- /dev/null +++ b/internal/bundle/bundles/clusters/template/databricks.yml.tmpl @@ -0,0 +1,27 @@ +bundle: + name: basic + +workspace: + root_path: "~/.bundle/{{.unique_id}}" + +resources: + clusters: + test_cluster: + cluster_name: "test-cluster-{{.unique_id}}" + spark_version: "{{.spark_version}}" + node_type_id: "{{.node_type_id}}" + num_workers: 2 + autoscale: + min_workers: 2 + max_workers: 6 + spark_conf: + "spark.executor.memory": "2g" + + jobs: + foo: + name: test-job-with-cluster-{{.unique_id}} + tasks: + - task_key: my_notebook_task + existing_cluster_id: "${resources.clusters.test_cluster.cluster_id}" + spark_python_task: + python_file: ./hello_world.py diff --git a/internal/bundle/bundles/clusters/template/hello_world.py b/internal/bundle/bundles/clusters/template/hello_world.py new file mode 100644 index 0000000000..f301245e24 --- /dev/null +++ b/internal/bundle/bundles/clusters/template/hello_world.py @@ -0,0 +1 @@ +print("Hello World!") diff --git a/internal/bundle/clusters_test.go b/internal/bundle/clusters_test.go new file mode 100644 index 0000000000..cb6536be41 --- /dev/null +++ b/internal/bundle/clusters_test.go @@ -0,0 +1,51 @@ +package bundle + +import ( + "fmt" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestAccDeployBundleWithCluster(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + uniqueId := uuid.New().String() + root, err := initTestTemplate(t, ctx, "clusters", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": defaultSparkVersion, + }) + require.NoError(t, err) + + t.Cleanup(func() { + err = destroyBundle(t, ctx, root) + require.NoError(t, err) + + cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId)) + if err != nil { + require.ErrorContains(t, err, "does not exist") + } else { + require.Contains(t, []compute.State{compute.StateTerminated, compute.StateTerminating}, cluster.State) + } + + }) + + err = deployBundle(t, ctx, root) + require.NoError(t, err) + + // Cluster should exists after bundle deployment + cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId)) + require.NoError(t, err) + require.NotNil(t, cluster) + + out, err := runResource(t, ctx, root, "foo") + require.NoError(t, err) + require.Contains(t, out, "Hello World!") +} From 7366c699af0185b5c076bb0d194c225372473210 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 20 Aug 2024 14:56:45 +0200 Subject: [PATCH 2/6] skip e2e test on aws --- internal/bundle/clusters_test.go | 5 +++++ internal/testutil/cloud.go | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/internal/bundle/clusters_test.go b/internal/bundle/clusters_test.go index cb6536be41..a961f3ea8f 100644 --- a/internal/bundle/clusters_test.go +++ b/internal/bundle/clusters_test.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/google/uuid" @@ -15,6 +16,10 @@ import ( func TestAccDeployBundleWithCluster(t *testing.T) { ctx, wt := acc.WorkspaceTest(t) + if testutil.IsAWSCloud(wt.T) { + t.Skip("Skipping test for AWS cloud because it is not permitted to create clusters") + } + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) uniqueId := uuid.New().String() root, err := initTestTemplate(t, ctx, "clusters", map[string]any{ diff --git a/internal/testutil/cloud.go b/internal/testutil/cloud.go index e547069f38..ba5b75ecff 100644 --- a/internal/testutil/cloud.go +++ b/internal/testutil/cloud.go @@ -49,3 +49,7 @@ func GetCloud(t *testing.T) Cloud { } return -1 } + +func IsAWSCloud(t *testing.T) bool { + return GetCloud(t) == AWS +} From f667e127df14f482bd86a7a808ea772b2d8d4123 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 20 Aug 2024 16:00:30 +0200 Subject: [PATCH 3/6] deprecate compute_id in favor of cluster_id --- bundle/config/bundle.go | 7 +- bundle/config/mutator/compute_id_compat.go | 68 +++++++++++++++++++ .../config/mutator/compute_id_compate_test.go | 31 +++++++++ bundle/config/mutator/mutator.go | 1 + bundle/config/mutator/override_compute.go | 8 +-- .../config/mutator/override_compute_test.go | 4 +- bundle/config/root.go | 6 +- bundle/config/target.go | 7 +- cmd/bundle/deploy.go | 11 ++- .../databricks_template_schema.json | 16 ----- .../template/databricks.yml.tmpl | 18 ----- .../clusters copy/template/hello_world.py | 1 - 12 files changed, 127 insertions(+), 51 deletions(-) create mode 100644 bundle/config/mutator/compute_id_compat.go create mode 100644 bundle/config/mutator/compute_id_compate_test.go delete mode 100644 internal/bundle/bundles/clusters copy/databricks_template_schema.json delete mode 100644 internal/bundle/bundles/clusters copy/template/databricks.yml.tmpl delete mode 100644 internal/bundle/bundles/clusters copy/template/hello_world.py diff --git a/bundle/config/bundle.go b/bundle/config/bundle.go index 78648dfd7f..f533c4d184 100644 --- a/bundle/config/bundle.go +++ b/bundle/config/bundle.go @@ -38,8 +38,11 @@ type Bundle struct { // Annotated readonly as this should be set at the target level. Mode Mode `json:"mode,omitempty" bundle:"readonly"` - // Overrides the compute used for jobs and other supported assets. - ComputeID string `json:"compute_id,omitempty"` + // DEPRECATED: Overrides the compute used for jobs and other supported assets. + ComputeId string `json:"compute_id,omitempty"` + + // Overrides the cluster used for jobs and other supported assets. + ClusterId string `json:"cluster_id,omitempty"` // Deployment section specifies deployment related configuration for bundle Deployment Deployment `json:"deployment,omitempty"` diff --git a/bundle/config/mutator/compute_id_compat.go b/bundle/config/mutator/compute_id_compat.go new file mode 100644 index 0000000000..eabfc9aa72 --- /dev/null +++ b/bundle/config/mutator/compute_id_compat.go @@ -0,0 +1,68 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type computeIdToClusterId struct{} + +func ComputeIdToClusterId() bundle.Mutator { + return &computeIdToClusterId{} +} + +func (m *computeIdToClusterId) Name() string { + return "ComputeIdToClusterId" +} + +func (m *computeIdToClusterId) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + // If the "compute_id" key is not set, just skip + if b.Config.Bundle.ComputeId == "" { + return nil + } + + var diags diag.Diagnostics + + // The "compute_id" key is set; rewrite it to "cluster_id". + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + computeId, err := dyn.Get(v, "bundle.compute_id") + if err != nil { + return v, err + } + + if computeId.Kind() != dyn.KindInvalid { + p := dyn.NewPath(dyn.Key("bundle"), dyn.Key("compute_id")) + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: "compute_id is deprecated, please use cluster_id instead", + Locations: computeId.Locations(), + Paths: []dyn.Path{p}, + }) + + nv, err := dyn.Set(v, "bundle.cluster_id", computeId) + if err != nil { + return dyn.InvalidValue, err + } + // Drop the "compute_id" key. + return dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + switch len(p) { + case 0, 1: + return v, nil + case 2: + if p[1] == dyn.Key("compute_id") { + return v, dyn.ErrDrop + } + } + return v, dyn.ErrSkip + }) + } + + return v, nil + }) + + diags = diags.Extend(diag.FromErr(err)) + return diags +} diff --git a/bundle/config/mutator/compute_id_compate_test.go b/bundle/config/mutator/compute_id_compate_test.go new file mode 100644 index 0000000000..f6a8b7eb08 --- /dev/null +++ b/bundle/config/mutator/compute_id_compate_test.go @@ -0,0 +1,31 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/libs/diag" + "github.com/stretchr/testify/assert" +) + +func TestComputeIdToClusterId(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + ComputeId: "compute-id", + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.ComputeIdToClusterId()) + assert.NoError(t, diags.Error()) + assert.Equal(t, "compute-id", b.Config.Bundle.ClusterId) + assert.Empty(t, b.Config.Bundle.ComputeId) + + assert.Len(t, diags, 1) + assert.Equal(t, "compute_id is deprecated, please use cluster_id instead", diags[0].Summary) + assert.Equal(t, diag.Warning, diags[0].Severity) +} diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index 0458beff44..faf50ae6e3 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -23,6 +23,7 @@ func DefaultMutators() []bundle.Mutator { VerifyCliVersion(), EnvironmentsToTargets(), + ComputeIdToClusterId(), InitializeVariables(), DefineDefaultTarget(), LoadGitDetails(), diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index 73fbad364e..5700cdf261 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -39,22 +39,22 @@ func overrideJobCompute(j *resources.Job, compute string) { func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if b.Config.Bundle.Mode != config.Development { - if b.Config.Bundle.ComputeID != "" { + if b.Config.Bundle.ClusterId != "" { return diag.Errorf("cannot override compute for an target that does not use 'mode: development'") } return nil } if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" { - b.Config.Bundle.ComputeID = v + b.Config.Bundle.ClusterId = v } - if b.Config.Bundle.ComputeID == "" { + if b.Config.Bundle.ClusterId == "" { return nil } r := b.Config.Resources for i := range r.Jobs { - overrideJobCompute(r.Jobs[i], b.Config.Bundle.ComputeID) + overrideJobCompute(r.Jobs[i], b.Config.Bundle.ClusterId) } return nil diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index 152ee543ea..369447d7e2 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -20,7 +20,7 @@ func TestOverrideDevelopment(t *testing.T) { Config: config.Root{ Bundle: config.Bundle{ Mode: config.Development, - ComputeID: "newClusterID", + ClusterId: "newClusterID", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -144,7 +144,7 @@ func TestOverrideProduction(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ - ComputeID: "newClusterID", + ClusterId: "newClusterID", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ diff --git a/bundle/config/root.go b/bundle/config/root.go index 86dc33921d..91ab467799 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -366,9 +366,9 @@ func (r *Root) MergeTargetOverrides(name string) error { } } - // Merge `compute_id`. This field must be overwritten if set, not merged. - if v := target.Get("compute_id"); v.Kind() != dyn.KindInvalid { - root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("compute_id")), v) + // Merge `cluster_id`. This field must be overwritten if set, not merged. + if v := target.Get("cluster_id"); v.Kind() != dyn.KindInvalid { + root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("cluster_id")), v) if err != nil { return err } diff --git a/bundle/config/target.go b/bundle/config/target.go index a2ef4d7356..cfc81c7318 100644 --- a/bundle/config/target.go +++ b/bundle/config/target.go @@ -24,8 +24,11 @@ type Target struct { // name prefix of deployed resources. Presets Presets `json:"presets,omitempty"` - // Overrides the compute used for jobs and other supported assets. - ComputeID string `json:"compute_id,omitempty"` + // DEPRECATED: Overrides the compute used for jobs and other supported assets. + ComputeId string `json:"compute_id,omitempty"` + + // Overrides the cluster used for jobs and other supported assets. + ClusterId string `json:"cluster_id,omitempty"` Bundle *Bundle `json:"bundle,omitempty"` diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index 1166875ab3..0c9eb623ca 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -23,13 +23,15 @@ func newDeployCommand() *cobra.Command { var force bool var forceLock bool var failOnActiveRuns bool - var computeID string + var clusterId string var autoApprove bool cmd.Flags().BoolVar(&force, "force", false, "Force-override Git branch validation.") cmd.Flags().BoolVar(&forceLock, "force-lock", false, "Force acquisition of deployment lock.") cmd.Flags().BoolVar(&failOnActiveRuns, "fail-on-active-runs", false, "Fail if there are running jobs or pipelines in the deployment.") - cmd.Flags().StringVarP(&computeID, "compute-id", "c", "", "Override compute in the deployment with the given compute ID.") + cmd.Flags().StringVar(&clusterId, "compute-id", "", "Override cluster in the deployment with the given compute ID.") + cmd.Flags().StringVarP(&clusterId, "cluster-id", "c", "", "Override cluster in the deployment with the given cluster ID.") cmd.Flags().BoolVar(&autoApprove, "auto-approve", false, "Skip interactive approvals that might be required for deployment.") + cmd.Flags().MarkDeprecated("compute-id", "use --cluster-id instead") cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -42,7 +44,10 @@ func newDeployCommand() *cobra.Command { b.AutoApprove = autoApprove if cmd.Flag("compute-id").Changed { - b.Config.Bundle.ComputeID = computeID + b.Config.Bundle.ClusterId = clusterId + } + if cmd.Flag("cluster-id").Changed { + b.Config.Bundle.ClusterId = clusterId } if cmd.Flag("fail-on-active-runs").Changed { b.Config.Bundle.Deployment.FailOnActiveRuns = failOnActiveRuns diff --git a/internal/bundle/bundles/clusters copy/databricks_template_schema.json b/internal/bundle/bundles/clusters copy/databricks_template_schema.json deleted file mode 100644 index c1c5cf12eb..0000000000 --- a/internal/bundle/bundles/clusters copy/databricks_template_schema.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "properties": { - "unique_id": { - "type": "string", - "description": "Unique ID for job name" - }, - "spark_version": { - "type": "string", - "description": "Spark version used for job cluster" - }, - "node_type_id": { - "type": "string", - "description": "Node type id for job cluster" - } - } -} diff --git a/internal/bundle/bundles/clusters copy/template/databricks.yml.tmpl b/internal/bundle/bundles/clusters copy/template/databricks.yml.tmpl deleted file mode 100644 index a88cbd30ee..0000000000 --- a/internal/bundle/bundles/clusters copy/template/databricks.yml.tmpl +++ /dev/null @@ -1,18 +0,0 @@ -bundle: - name: basic - -workspace: - root_path: "~/.bundle/{{.unique_id}}" - -resources: - jobs: - foo: - name: test-job-basic-{{.unique_id}} - tasks: - - task_key: my_notebook_task - new_cluster: - num_workers: 1 - spark_version: "{{.spark_version}}" - node_type_id: "{{.node_type_id}}" - spark_python_task: - python_file: ./hello_world.py diff --git a/internal/bundle/bundles/clusters copy/template/hello_world.py b/internal/bundle/bundles/clusters copy/template/hello_world.py deleted file mode 100644 index f301245e24..0000000000 --- a/internal/bundle/bundles/clusters copy/template/hello_world.py +++ /dev/null @@ -1 +0,0 @@ -print("Hello World!") From 9783b958b988222f21a83aa024ee00983576fabe Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 6 Sep 2024 16:19:04 +0200 Subject: [PATCH 4/6] fixes for tags --- bundle/config/mutator/apply_presets.go | 6 ++++-- .../bundle/bundles/clusters/template/databricks.yml.tmpl | 3 --- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/bundle/config/mutator/apply_presets.go b/bundle/config/mutator/apply_presets.go index 583ba0031f..2527d6d4a2 100644 --- a/bundle/config/mutator/apply_presets.go +++ b/bundle/config/mutator/apply_presets.go @@ -168,8 +168,10 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos c.CustomTags = make(map[string]string) } for _, tag := range tags { - if c.CustomTags[tag.Key] == "" { - c.CustomTags[tag.Key] = tag.Value + normalisedKey := b.Tagging.NormalizeKey(tag.Key) + normalisedValue := b.Tagging.NormalizeValue(tag.Value) + if c.CustomTags[normalisedKey] == "" { + c.CustomTags[normalisedKey] = normalisedValue } } } diff --git a/internal/bundle/bundles/clusters/template/databricks.yml.tmpl b/internal/bundle/bundles/clusters/template/databricks.yml.tmpl index d820d7d264..e0d6320a39 100644 --- a/internal/bundle/bundles/clusters/template/databricks.yml.tmpl +++ b/internal/bundle/bundles/clusters/template/databricks.yml.tmpl @@ -11,9 +11,6 @@ resources: spark_version: "{{.spark_version}}" node_type_id: "{{.node_type_id}}" num_workers: 2 - autoscale: - min_workers: 2 - max_workers: 6 spark_conf: "spark.executor.memory": "2g" From bb58bd134190a9b34781732a27e14d4a5b1ca903 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 20 Sep 2024 11:57:44 +0200 Subject: [PATCH 5/6] fixes --- bundle/config/mutator/apply_presets.go | 2 +- bundle/config/mutator/compute_id_compat.go | 84 +++++++++++-------- .../config/mutator/compute_id_compate_test.go | 21 +++++ .../deploy/terraform/tfdyn/convert_cluster.go | 6 ++ 4 files changed, 77 insertions(+), 36 deletions(-) diff --git a/bundle/config/mutator/apply_presets.go b/bundle/config/mutator/apply_presets.go index 87e7cf4bf1..27af82e54c 100644 --- a/bundle/config/mutator/apply_presets.go +++ b/bundle/config/mutator/apply_presets.go @@ -169,7 +169,7 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos for _, tag := range tags { normalisedKey := b.Tagging.NormalizeKey(tag.Key) normalisedValue := b.Tagging.NormalizeValue(tag.Value) - if c.CustomTags[normalisedKey] == "" { + if _, ok := c.CustomTags[normalisedKey]; !ok { c.CustomTags[normalisedKey] = normalisedValue } } diff --git a/bundle/config/mutator/compute_id_compat.go b/bundle/config/mutator/compute_id_compat.go index eabfc9aa72..fb095d952a 100644 --- a/bundle/config/mutator/compute_id_compat.go +++ b/bundle/config/mutator/compute_id_compat.go @@ -19,50 +19,64 @@ func (m *computeIdToClusterId) Name() string { } func (m *computeIdToClusterId) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - // If the "compute_id" key is not set, just skip - if b.Config.Bundle.ComputeId == "" { - return nil - } - var diags diag.Diagnostics // The "compute_id" key is set; rewrite it to "cluster_id". err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { - computeId, err := dyn.Get(v, "bundle.compute_id") - if err != nil { - return v, err - } + v, d := rewriteComputeIdToClusterId(v, dyn.NewPath(dyn.Key("bundle"))) + diags = diags.Extend(d) - if computeId.Kind() != dyn.KindInvalid { - p := dyn.NewPath(dyn.Key("bundle"), dyn.Key("compute_id")) - diags = diags.Append(diag.Diagnostic{ - Severity: diag.Warning, - Summary: "compute_id is deprecated, please use cluster_id instead", - Locations: computeId.Locations(), - Paths: []dyn.Path{p}, - }) + // Check if the "compute_id" key is set in any target overrides. + return dyn.MapByPattern(v, dyn.NewPattern(dyn.Key("targets"), dyn.AnyKey()), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + v, d := rewriteComputeIdToClusterId(v, dyn.Path{}) + diags = diags.Extend(d) + return v, nil + }) + }) - nv, err := dyn.Set(v, "bundle.cluster_id", computeId) - if err != nil { - return dyn.InvalidValue, err - } - // Drop the "compute_id" key. - return dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { - switch len(p) { - case 0, 1: - return v, nil - case 2: - if p[1] == dyn.Key("compute_id") { - return v, dyn.ErrDrop - } - } - return v, dyn.ErrSkip - }) - } + diags = diags.Extend(diag.FromErr(err)) + return diags +} + +func rewriteComputeIdToClusterId(v dyn.Value, p dyn.Path) (dyn.Value, diag.Diagnostics) { + var diags diag.Diagnostics + computeIdPath := p.Append(dyn.Key("compute_id")) + computeId, err := dyn.GetByPath(v, computeIdPath) + // If the "compute_id" key is not set, we don't need to do anything. + if err != nil { return v, nil + } + + if computeId.Kind() == dyn.KindInvalid { + return v, nil + } + + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: "compute_id is deprecated, please use cluster_id instead", + Locations: computeId.Locations(), + Paths: []dyn.Path{computeIdPath}, + }) + + clusterIdPath := p.Append(dyn.Key("cluster_id")) + nv, err := dyn.SetByPath(v, clusterIdPath, computeId) + if err != nil { + return dyn.InvalidValue, diag.FromErr(err) + } + // Drop the "compute_id" key. + vout, err := dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + switch len(p) { + case 0, 1: + return v, nil + case 2: + if p[1] == dyn.Key("compute_id") { + return v, dyn.ErrDrop + } + } + return v, dyn.ErrSkip }) diags = diags.Extend(diag.FromErr(err)) - return diags + return vout, diags } diff --git a/bundle/config/mutator/compute_id_compate_test.go b/bundle/config/mutator/compute_id_compate_test.go index f6a8b7eb08..ff051ac492 100644 --- a/bundle/config/mutator/compute_id_compate_test.go +++ b/bundle/config/mutator/compute_id_compate_test.go @@ -29,3 +29,24 @@ func TestComputeIdToClusterId(t *testing.T) { assert.Equal(t, "compute_id is deprecated, please use cluster_id instead", diags[0].Summary) assert.Equal(t, diag.Warning, diags[0].Severity) } + +func TestComputeIdToClusterIdInTargetOverride(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Targets: map[string]*config.Target{ + "dev": { + ComputeId: "compute-id-dev", + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, bundle.Seq(mutator.ComputeIdToClusterId(), mutator.SelectTarget("dev"))) + assert.NoError(t, diags.Error()) + assert.Equal(t, "compute-id-dev", b.Config.Bundle.ClusterId) + assert.Empty(t, b.Config.Bundle.ComputeId) + + assert.Len(t, diags, 1) + assert.Equal(t, "compute_id is deprecated, please use cluster_id instead", diags[0].Summary) + assert.Equal(t, diag.Warning, diags[0].Severity) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_cluster.go b/bundle/deploy/terraform/tfdyn/convert_cluster.go index 30ee53f017..f25f09ea8d 100644 --- a/bundle/deploy/terraform/tfdyn/convert_cluster.go +++ b/bundle/deploy/terraform/tfdyn/convert_cluster.go @@ -29,6 +29,12 @@ func (clusterConverter) Convert(ctx context.Context, key string, vin dyn.Value, return err } + // We always set no_wait as it allows DABs not to wait for cluster to be started. + vout, err = dyn.Set(vout, "no_wait", dyn.V(true)) + if err != nil { + return err + } + // Add the converted resource to the output. out.Cluster[key] = vout.AsAny() From 67557b7c84dd4a452de0e7c31a66a0eee230343d Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 23 Sep 2024 12:29:11 +0200 Subject: [PATCH 6/6] added and fixed tests --- bundle/config/mutator/compute_id_compat.go | 7 ++++++- bundle/config/mutator/compute_id_compate_test.go | 7 ++++++- bundle/deploy/terraform/tfdyn/convert_cluster_test.go | 1 + 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/bundle/config/mutator/compute_id_compat.go b/bundle/config/mutator/compute_id_compat.go index fb095d952a..3afe02e9e8 100644 --- a/bundle/config/mutator/compute_id_compat.go +++ b/bundle/config/mutator/compute_id_compat.go @@ -67,7 +67,12 @@ func rewriteComputeIdToClusterId(v dyn.Value, p dyn.Path) (dyn.Value, diag.Diagn // Drop the "compute_id" key. vout, err := dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { switch len(p) { - case 0, 1: + case 0: + return v, nil + case 1: + if p[0] == dyn.Key("compute_id") { + return v, dyn.ErrDrop + } return v, nil case 2: if p[1] == dyn.Key("compute_id") { diff --git a/bundle/config/mutator/compute_id_compate_test.go b/bundle/config/mutator/compute_id_compate_test.go index ff051ac492..e59d37e39b 100644 --- a/bundle/config/mutator/compute_id_compate_test.go +++ b/bundle/config/mutator/compute_id_compate_test.go @@ -41,8 +41,13 @@ func TestComputeIdToClusterIdInTargetOverride(t *testing.T) { }, } - diags := bundle.Apply(context.Background(), b, bundle.Seq(mutator.ComputeIdToClusterId(), mutator.SelectTarget("dev"))) + diags := bundle.Apply(context.Background(), b, mutator.ComputeIdToClusterId()) + assert.NoError(t, diags.Error()) + assert.Empty(t, b.Config.Targets["dev"].ComputeId) + + diags = diags.Extend(bundle.Apply(context.Background(), b, mutator.SelectTarget("dev"))) assert.NoError(t, diags.Error()) + assert.Equal(t, "compute-id-dev", b.Config.Bundle.ClusterId) assert.Empty(t, b.Config.Bundle.ComputeId) diff --git a/bundle/deploy/terraform/tfdyn/convert_cluster_test.go b/bundle/deploy/terraform/tfdyn/convert_cluster_test.go index 68872a421a..e7d2542fdd 100644 --- a/bundle/deploy/terraform/tfdyn/convert_cluster_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_cluster_test.go @@ -71,6 +71,7 @@ func TestConvertCluster(t *testing.T) { "availability": "SPOT", }, "data_security_mode": "USER_ISOLATION", + "no_wait": true, "node_type_id": "m5.xlarge", "autoscale": map[string]any{ "min_workers": int64(1),