Skip to content

Commit

Permalink
[Internal] Remove unnecessary test
Browse files Browse the repository at this point in the history
  • Loading branch information
hectorcast-db committed Oct 28, 2024
1 parent dac9a9e commit 78d4296
Showing 1 changed file with 0 additions and 144 deletions.
144 changes: 0 additions & 144 deletions internal/clusters_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/retries"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/iam"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
Expand Down Expand Up @@ -89,146 +88,3 @@ func TestAccAwsInstanceProfiles(t *testing.T) {
require.NoError(t, err)
assert.True(t, len(all) >= 1)
}

func TestAccClustersApiIntegration(t *testing.T) {
ctx, w := workspaceTest(t)

clusterName := RandomName("sdk-go-cluster-")

// Select the latest LTS version without Photon
latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
Latest: true,
LongTermSupport: true,
})
require.NoError(t, err)

// Create cluster and wait for it to start properly
clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
ClusterName: clusterName,
SparkVersion: latest,
InstancePoolId: GetEnvOrSkipTest(t, "TEST_INSTANCE_POOL_ID"),
AutoterminationMinutes: 15,
NumWorkers: 1,
})
require.NoError(t, err)

t.Cleanup(func() {
// Permanently delete the cluster
err := w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
require.NoError(t, err)
})

byId, err := w.Clusters.GetByClusterId(ctx, clstr.ClusterId)
require.NoError(t, err)
assert.Equal(t, clusterName, byId.ClusterName)
assert.Equal(t, compute.StateRunning, byId.State)

// Pin the cluster in the list
err = w.Clusters.PinByClusterId(ctx, clstr.ClusterId)
require.NoError(t, err)

// Unpin the cluster
err = w.Clusters.UnpinByClusterId(ctx, clstr.ClusterId)
require.NoError(t, err)

// Edit the cluster: change auto-termination and number of workers
_, err = w.Clusters.EditAndWait(ctx, compute.EditCluster{
ClusterId: clstr.ClusterId,
SparkVersion: latest,
ClusterName: clusterName,
InstancePoolId: GetEnvOrSkipTest(t, "TEST_INSTANCE_POOL_ID"),

// change auto-termination and number of workers
AutoterminationMinutes: 10,
NumWorkers: 2,
})
require.NoError(t, err)

// Assert edit changes are reflected in the cluster
byId, err = w.Clusters.GetByClusterId(ctx, clstr.ClusterId)
require.NoError(t, err)
assert.Equal(t, 10, byId.AutoterminationMinutes)
assert.Equal(t, 2, byId.NumWorkers)

// Test getting the cluster by name
byName, err := w.Clusters.GetByClusterName(ctx, byId.ClusterName)
require.NoError(t, err)
assert.Equal(t, byId.ClusterId, byName.ClusterId)

// Terminate the cluster
_, err = w.Clusters.DeleteByClusterIdAndWait(ctx, clstr.ClusterId)
require.NoError(t, err)

// Assert that the cluster we've just deleted has Terminated state
byId, err = w.Clusters.GetByClusterId(ctx, clstr.ClusterId)
require.NoError(t, err)
assert.Equal(t, byId.State, compute.StateTerminated)

// Start cluster and wait until it's running again
_, err = w.Clusters.StartByClusterIdAndWait(ctx, clstr.ClusterId)
require.NoError(t, err)

// Resize the cluster back to 1 worker and wait till completion
byId, err = w.Clusters.ResizeAndWait(ctx, compute.ResizeCluster{
ClusterId: clstr.ClusterId,
NumWorkers: 1,
})
require.NoError(t, err)
assert.Equal(t, 1, byId.NumWorkers)

// Restart the cluster and wait for it to run again
_, err = w.Clusters.RestartAndWait(ctx, compute.RestartCluster{
ClusterId: clstr.ClusterId,
})
require.NoError(t, err)

// Get events for the cluster and assert its non empty
events, err := w.Clusters.EventsAll(ctx, compute.GetEvents{
ClusterId: clstr.ClusterId,
})
require.NoError(t, err)
assert.True(t, len(events) > 0)

// List clusters in workspace
all, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{})
require.NoError(t, err)
foundCluster := false
for _, info := range all {
if info.ClusterName == clusterName {
foundCluster = true
}
}
assert.True(t, foundCluster)

// Get cluster by name and assert it still exists
ClusterDetails, err := w.Clusters.GetByClusterName(ctx, clusterName)
require.NoError(t, err)
assert.Equal(t, ClusterDetails.ClusterName, clusterName)

otherOwner, err := w.Users.Create(ctx, iam.User{
UserName: RandomEmail(),
})
require.NoError(t, err)
defer w.Users.DeleteById(ctx, otherOwner.Id)

// terminate the cluster
_, err = w.Clusters.DeleteByClusterIdAndWait(ctx, clstr.ClusterId)
require.NoError(t, err)

// cluster must be terminated to change the owner
err = w.Clusters.ChangeOwner(ctx, compute.ChangeClusterOwner{
ClusterId: clstr.ClusterId,
OwnerUsername: otherOwner.UserName,
})
require.NoError(t, err)

nodes, err := w.Clusters.ListNodeTypes(ctx)
require.NoError(t, err)
assert.True(t, len(nodes.NodeTypes) > 1)

if w.Config.IsAws() {
zones, err := w.Clusters.ListZones(ctx)
require.NoError(t, err)
assert.True(t, len(zones.Zones) > 1)
}
}

0 comments on commit 78d4296

Please sign in to comment.