Skip to content

Commit

Permalink
[Internal] Remove unnecessary test
Browse files Browse the repository at this point in the history
  • Loading branch information
hectorcast-db committed Oct 28, 2024
1 parent dac9a9e commit acf757c
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 285 deletions.
144 changes: 0 additions & 144 deletions internal/clusters_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"github.com/databricks/databricks-sdk-go"
"github.com/databricks/databricks-sdk-go/retries"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/iam"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
Expand Down Expand Up @@ -89,146 +88,3 @@ func TestAccAwsInstanceProfiles(t *testing.T) {
require.NoError(t, err)
assert.True(t, len(all) >= 1)
}

func TestAccClustersApiIntegration(t *testing.T) {
ctx, w := workspaceTest(t)

clusterName := RandomName("sdk-go-cluster-")

// Select the latest LTS version without Photon
latest, err := w.Clusters.SelectSparkVersion(ctx, compute.SparkVersionRequest{
Latest: true,
LongTermSupport: true,
})
require.NoError(t, err)

// Create cluster and wait for it to start properly
clstr, err := w.Clusters.CreateAndWait(ctx, compute.CreateCluster{
ClusterName: clusterName,
SparkVersion: latest,
InstancePoolId: GetEnvOrSkipTest(t, "TEST_INSTANCE_POOL_ID"),
AutoterminationMinutes: 15,
NumWorkers: 1,
})
require.NoError(t, err)

t.Cleanup(func() {
// Permanently delete the cluster
err := w.Clusters.PermanentDeleteByClusterId(ctx, clstr.ClusterId)
require.NoError(t, err)
})

byId, err := w.Clusters.GetByClusterId(ctx, clstr.ClusterId)
require.NoError(t, err)
assert.Equal(t, clusterName, byId.ClusterName)
assert.Equal(t, compute.StateRunning, byId.State)

// Pin the cluster in the list
err = w.Clusters.PinByClusterId(ctx, clstr.ClusterId)
require.NoError(t, err)

// Unpin the cluster
err = w.Clusters.UnpinByClusterId(ctx, clstr.ClusterId)
require.NoError(t, err)

// Edit the cluster: change auto-termination and number of workers
_, err = w.Clusters.EditAndWait(ctx, compute.EditCluster{
ClusterId: clstr.ClusterId,
SparkVersion: latest,
ClusterName: clusterName,
InstancePoolId: GetEnvOrSkipTest(t, "TEST_INSTANCE_POOL_ID"),

// change auto-termination and number of workers
AutoterminationMinutes: 10,
NumWorkers: 2,
})
require.NoError(t, err)

// Assert edit changes are reflected in the cluster
byId, err = w.Clusters.GetByClusterId(ctx, clstr.ClusterId)
require.NoError(t, err)
assert.Equal(t, 10, byId.AutoterminationMinutes)
assert.Equal(t, 2, byId.NumWorkers)

// Test getting the cluster by name
byName, err := w.Clusters.GetByClusterName(ctx, byId.ClusterName)
require.NoError(t, err)
assert.Equal(t, byId.ClusterId, byName.ClusterId)

// Terminate the cluster
_, err = w.Clusters.DeleteByClusterIdAndWait(ctx, clstr.ClusterId)
require.NoError(t, err)

// Assert that the cluster we've just deleted has Terminated state
byId, err = w.Clusters.GetByClusterId(ctx, clstr.ClusterId)
require.NoError(t, err)
assert.Equal(t, byId.State, compute.StateTerminated)

// Start cluster and wait until it's running again
_, err = w.Clusters.StartByClusterIdAndWait(ctx, clstr.ClusterId)
require.NoError(t, err)

// Resize the cluster back to 1 worker and wait till completion
byId, err = w.Clusters.ResizeAndWait(ctx, compute.ResizeCluster{
ClusterId: clstr.ClusterId,
NumWorkers: 1,
})
require.NoError(t, err)
assert.Equal(t, 1, byId.NumWorkers)

// Restart the cluster and wait for it to run again
_, err = w.Clusters.RestartAndWait(ctx, compute.RestartCluster{
ClusterId: clstr.ClusterId,
})
require.NoError(t, err)

// Get events for the cluster and assert its non empty
events, err := w.Clusters.EventsAll(ctx, compute.GetEvents{
ClusterId: clstr.ClusterId,
})
require.NoError(t, err)
assert.True(t, len(events) > 0)

// List clusters in workspace
all, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{})
require.NoError(t, err)
foundCluster := false
for _, info := range all {
if info.ClusterName == clusterName {
foundCluster = true
}
}
assert.True(t, foundCluster)

// Get cluster by name and assert it still exists
ClusterDetails, err := w.Clusters.GetByClusterName(ctx, clusterName)
require.NoError(t, err)
assert.Equal(t, ClusterDetails.ClusterName, clusterName)

otherOwner, err := w.Users.Create(ctx, iam.User{
UserName: RandomEmail(),
})
require.NoError(t, err)
defer w.Users.DeleteById(ctx, otherOwner.Id)

// terminate the cluster
_, err = w.Clusters.DeleteByClusterIdAndWait(ctx, clstr.ClusterId)
require.NoError(t, err)

// cluster must be terminated to change the owner
err = w.Clusters.ChangeOwner(ctx, compute.ChangeClusterOwner{
ClusterId: clstr.ClusterId,
OwnerUsername: otherOwner.UserName,
})
require.NoError(t, err)

nodes, err := w.Clusters.ListNodeTypes(ctx)
require.NoError(t, err)
assert.True(t, len(nodes.NodeTypes) > 1)

if w.Config.IsAws() {
zones, err := w.Clusters.ListZones(ctx)
require.NoError(t, err)
assert.True(t, len(zones.Zones) > 1)
}
}
141 changes: 0 additions & 141 deletions internal/jobs_test.go
Original file line number Diff line number Diff line change
@@ -1,156 +1,15 @@
package internal

import (
"encoding/base64"
"testing"

"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/databricks/databricks-sdk-go/service/workspace"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func TestAccJobsApiFullIntegration(t *testing.T) {
ctx, w := workspaceTest(t)
clusterId := sharedRunningCluster(t, ctx, w)
notebookPath := myNotebookPath(t, w)

err := w.Workspace.Import(ctx, workspace.Import{
Path: notebookPath,
Overwrite: true,
Format: workspace.ImportFormatSource,
Language: workspace.LanguagePython,
Content: base64.StdEncoding.EncodeToString([]byte(`
import time
time.sleep(10)
dbutils.notebook.exit('hello')`)),
})
require.NoError(t, err)

run, err := w.Jobs.SubmitAndWait(ctx, jobs.SubmitRun{
RunName: RandomName("go-sdk-SubmitAndWait-"),
Tasks: []jobs.SubmitTask{{
ExistingClusterId: clusterId,
NotebookTask: &jobs.NotebookTask{
NotebookPath: notebookPath,
},
TaskKey: RandomName(),
}},
})
require.NoError(t, err)
defer w.Jobs.DeleteRunByRunId(ctx, run.RunId)

output, err := w.Jobs.GetRunOutputByRunId(ctx, run.Tasks[0].RunId)
require.NoError(t, err)
assert.Equal(t, output.NotebookOutput.Result, "hello")

createdJob, err := w.Jobs.Create(ctx, jobs.CreateJob{
Name: RandomName("go-sdk-Create-"),
Tasks: []jobs.Task{{
Description: "test",
ExistingClusterId: clusterId,
NotebookTask: &jobs.NotebookTask{
NotebookPath: notebookPath,
},
TaskKey: "test",
TimeoutSeconds: 0,
}},
})
require.NoError(t, err)
defer w.Jobs.DeleteByJobId(ctx, createdJob.JobId)

runById, err := w.Jobs.RunNowAndWait(ctx, jobs.RunNow{
JobId: createdJob.JobId,
})
require.NoError(t, err)
assert.NotEmpty(t, runById.Tasks)

exportedView, err := w.Jobs.ExportRun(ctx, jobs.ExportRunRequest{
RunId: runById.Tasks[0].RunId,
ViewsToExport: "CODE",
})
require.NoError(t, err)
assert.NotEmpty(t, exportedView.Views)
assert.Equal(t, exportedView.Views[0].Type, jobs.ViewTypeNotebook)
assert.NotEmpty(t, exportedView.Views[0].Content)

_, err = w.Jobs.RunNow(ctx, jobs.RunNow{
JobId: createdJob.JobId,
})
require.NoError(t, err)

runList, err := w.Jobs.ListRunsAll(ctx, jobs.ListRunsRequest{
JobId: createdJob.JobId,
})
require.NoError(t, err)
assert.Equal(t, createdJob.JobId, runList[0].JobId)

err = w.Jobs.CancelAllRuns(ctx, jobs.CancelAllRuns{
JobId: createdJob.JobId,
})

require.NoError(t, err)

runNowResponse, err := w.Jobs.RunNow(ctx, jobs.RunNow{
JobId: createdJob.JobId,
})
require.NoError(t, err)

cancelledRun, err := w.Jobs.CancelRunAndWait(ctx, jobs.CancelRun{
RunId: runNowResponse.Response.RunId,
})
require.NoError(t, err)

repairedRun, err := w.Jobs.RepairRunAndWait(ctx, jobs.RepairRun{
RerunTasks: []string{cancelledRun.Tasks[0].TaskKey},
RunId: runNowResponse.Response.RunId,
})
require.NoError(t, err)
assert.GreaterOrEqual(t, len(repairedRun.Tasks), 1)

newName := RandomName("updated")
err = w.Jobs.Update(ctx, jobs.UpdateJob{
JobId: createdJob.JobId,
NewSettings: &jobs.JobSettings{
Name: newName,
MaxConcurrentRuns: 5,
},
})
require.NoError(t, err)

byId, err := w.Jobs.GetByJobId(ctx, createdJob.JobId)
require.NoError(t, err)

assert.Equal(t, byId.Settings.Name, newName)
assert.Equal(t, byId.Settings.MaxConcurrentRuns, 5)

newName = RandomName("updated-for-reset")
err = w.Jobs.Reset(ctx, jobs.ResetJob{
JobId: byId.JobId,
NewSettings: jobs.JobSettings{
Name: newName,
Tasks: byId.Settings.Tasks,
},
})
require.NoError(t, err)

byId, err = w.Jobs.GetByJobId(ctx, createdJob.JobId)
require.NoError(t, err)
assert.Equal(t, byId.Settings.Name, newName)

byName, err := w.Jobs.GetBySettingsName(ctx, newName)
require.NoError(t, err)
assert.Equal(t, byId.JobId, byName.JobId)

jobList, err := w.Jobs.ListAll(ctx, jobs.ListJobsRequest{
ExpandTasks: false,
})
require.NoError(t, err)
assert.True(t, len(jobList) >= 1)
}

func TestAccJobsGetCorrectErrorNoTranspile(t *testing.T) {
ctx, w := workspaceTest(t)
_, err := w.Jobs.GetByJobId(ctx, 123456789)
Expand Down

0 comments on commit acf757c

Please sign in to comment.