Skip to content

Commit

Permalink
Merge branch 'main' into feature/uc-sql-table-primary-and-foreign-key…
Browse files Browse the repository at this point in the history
…s-support
  • Loading branch information
hshahconsulting authored Nov 4, 2024
2 parents ff5aaf3 + 28b8f49 commit a689c02
Show file tree
Hide file tree
Showing 60 changed files with 2,608 additions and 737 deletions.
2 changes: 1 addition & 1 deletion .codegen/_openapi_sha
Original file line number Diff line number Diff line change
@@ -1 +1 @@
cf9c61453990df0f9453670f2fe68e1b128647a2
25b2478e5a18c888f0d423249abde5499dc58424
1 change: 1 addition & 0 deletions .codegen/model.go.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ func (newState *{{.PascalName}}) SyncEffectiveFieldsDuringRead(existingState {{.
{{- if .Entity.IsFloat64}}{{$type = "Float64"}}{{end}}
{{- if .Entity.IsInt}}{{$type = "Int64"}}{{end}}
{{- if .Entity.Enum}}{{$type = "String"}}{{end}}
newState.Effective{{.PascalName}} = existingState.Effective{{.PascalName}}
if existingState.Effective{{.PascalName}}.Value{{$type}}() == newState.{{.PascalName}}.Value{{$type}}() {
newState.{{.PascalName}} = existingState.{{.PascalName}}
}
Expand Down
56 changes: 56 additions & 0 deletions .github/workflows/external-message.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
name: PR Comment

# WARNING:
# THIS WORKFLOW ALWAYS RUNS FOR EXTERNAL CONTRIBUTORS WITHOUT ANY APPROVAL.
# THIS WORKFLOW RUNS FROM MAIN BRANCH, NOT FROM THE PR BRANCH.
# DO NOT PULL THE PR OR EXECUTE ANY CODE FROM THE PR.

on:
pull_request_target:
types: [opened, reopened, synchronize]
branches:
- main

jobs:
comment-on-pr:
runs-on: ubuntu-latest
permissions:
pull-requests: write

steps:
- uses: actions/checkout@v4

- name: Delete old comments
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Delete previous comment if it exists
previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \
--jq '.[] | select(.body | startswith("<!-- INTEGRATION_TESTS_MANUAL -->")) | .id')
echo "Previous comment IDs: $previous_comment_ids"
# Iterate over each comment ID and delete the comment
if [ ! -z "$previous_comment_ids" ]; then
echo "$previous_comment_ids" | while read -r comment_id; do
echo "Deleting comment with ID: $comment_id"
gh api "repos/${{ github.repository }}/issues/comments/$comment_id" -X DELETE
done
fi
- name: Comment on PR
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
run: |
gh pr comment ${{ github.event.pull_request.number }} --body \
"<!-- INTEGRATION_TESTS_MANUAL -->
If integration tests don't run automatically, an authorized user can run them manually by following the instructions below:
Trigger:
[go/deco-tests-run/terraform](https://go/deco-tests-run/terraform)
Inputs:
* PR number: ${{github.event.pull_request.number}}
* Commit SHA: \`${{ env.COMMIT_SHA }}\`
Checks will be approved automatically on success.
"
21 changes: 20 additions & 1 deletion .github/workflows/integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,29 @@ on:


jobs:
check-token:
name: Check secrets access
runs-on: ubuntu-latest
environment: "test-trigger-is"
outputs:
has_token: ${{ steps.set-token-status.outputs.has_token }}
steps:
- name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set
id: set-token-status
run: |
if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then
echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets."
echo "::set-output name=has_token::false"
else
echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets."
echo "::set-output name=has_token::true"
fi
trigger-tests:
if: github.event_name == 'pull_request'
name: Trigger Tests
runs-on: ubuntu-latest
needs: check-token
if: github.event_name == 'pull_request' && needs.check-token.outputs.has_token == 'true'
environment: "test-trigger-is"

steps:
Expand Down
25 changes: 25 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,30 @@
# Version changelog

## [Release] Release v1.56.0

### Bug Fixes

* Recreate missing system schema ([#4068](https://github.com/databricks/terraform-provider-databricks/pull/4068)).
* Tolerate `databricks_permissions` resources for SQL warehouses with `/warehouses/...` IDs ([#4158](https://github.com/databricks/terraform-provider-databricks/pull/4158)).


### Documentation

* Fix `databricks_grant` regarding metastore_id description ([#4164](https://github.com/databricks/terraform-provider-databricks/pull/4164)).


### Internal Changes

* Automatically trigger integration tests on PR ([#4149](https://github.com/databricks/terraform-provider-databricks/pull/4149)).


### Exporter

* **Breaking change** Use new query and alert resources instead of legacy resources ([#4150](https://github.com/databricks/terraform-provider-databricks/pull/4150)).
* Improve exporting of `databricks_pipeline` resources ([#4142](https://github.com/databricks/terraform-provider-databricks/pull/4142)).
* Improving reliability of `Emit` function ([#4163](https://github.com/databricks/terraform-provider-databricks/pull/4163)).


## [Release] Release v1.55.0

### New Features and Improvements
Expand Down
31 changes: 4 additions & 27 deletions catalog/resource_online_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,29 +16,6 @@ import (

const onlineTableDefaultProvisionTimeout = 90 * time.Minute

func waitForOnlineTableCreation(w *databricks.WorkspaceClient, ctx context.Context, onlineTableName string) error {
return retry.RetryContext(ctx, onlineTableDefaultProvisionTimeout, func() *retry.RetryError {
endpoint, err := w.OnlineTables.GetByName(ctx, onlineTableName)
if err != nil {
return retry.NonRetryableError(err)
}
if endpoint.Status == nil {
return retry.RetryableError(fmt.Errorf("online table status is not available yet"))
}
switch endpoint.Status.DetailedState {
case catalog.OnlineTableStateOnline, catalog.OnlineTableStateOnlineContinuousUpdate,
catalog.OnlineTableStateOnlineNoPendingUpdate, catalog.OnlineTableStateOnlineTriggeredUpdate:
return nil

// does catalog.OnlineTableStateOffline means that it's failed?
case catalog.OnlineTableStateOfflineFailed, catalog.OnlineTableStateOnlinePipelineFailed:
return retry.NonRetryableError(fmt.Errorf("online table status returned %s for online table: %s",
endpoint.Status.DetailedState.String(), onlineTableName))
}
return retry.RetryableError(fmt.Errorf("online table %s is still pending", onlineTableName))
})
}

func waitForOnlineTableDeletion(w *databricks.WorkspaceClient, ctx context.Context, onlineTableName string) error {
return retry.RetryContext(ctx, onlineTableDefaultProvisionTimeout, func() *retry.RetryError {
_, err := w.OnlineTables.GetByName(ctx, onlineTableName)
Expand Down Expand Up @@ -75,17 +52,17 @@ func ResourceOnlineTable() common.Resource {
if err != nil {
return err
}
var req catalog.CreateOnlineTableRequest
common.DataToStructPointer(d, s, &req)
res, err := w.OnlineTables.Create(ctx, req)
var table catalog.OnlineTable
common.DataToStructPointer(d, s, &table)
res, err := w.OnlineTables.Create(ctx, catalog.CreateOnlineTableRequest{Table: &table})
if err != nil {
return err
}
// Note: We should set the id right after creation and before waiting for online table to be available.
// If the resource creation timeout is exceeded while waiting for the online table to be ready, this ensures the online table is persisted in the state.
d.SetId(res.Name)
// this should be specified in the API Spec - filed a ticket to add it
err = waitForOnlineTableCreation(w, ctx, res.Name)
_, err = res.GetWithTimeout(onlineTableDefaultProvisionTimeout)
if err != nil {
return err
}
Expand Down
66 changes: 42 additions & 24 deletions catalog/resource_online_table_test.go
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
package catalog

import (
"errors"
"fmt"
"testing"
"time"

"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/experimental/mocks"
Expand Down Expand Up @@ -47,6 +49,13 @@ func TestOnlineTableCreate(t *testing.T) {
PrimaryKeyColumns: []string{"id"},
},
}
otStatusNotSetWait := &catalog.WaitGetOnlineTableActive[catalog.OnlineTable]{
Response: otStatusNotSet,
Name: "main.default.online_table",
Poll: func(d time.Duration, f func(*catalog.OnlineTable)) (*catalog.OnlineTable, error) {
return otStatusOnline, nil
},
}
// otStatusUnknown := &catalog.OnlineTable{
// Name: "main.default.online_table",
// Spec: &catalog.OnlineTableSpec{
Expand All @@ -60,16 +69,15 @@ func TestOnlineTableCreate(t *testing.T) {
MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) {
e := w.GetMockOnlineTablesAPI().EXPECT()
e.Create(mock.Anything, catalog.CreateOnlineTableRequest{
Name: "main.default.online_table",
Spec: &catalog.OnlineTableSpec{
RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{},
SourceTableFullName: "main.default.test",
PrimaryKeyColumns: []string{"id"},
Table: &catalog.OnlineTable{
Name: "main.default.online_table",
Spec: &catalog.OnlineTableSpec{
RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{},
SourceTableFullName: "main.default.test",
PrimaryKeyColumns: []string{"id"},
},
},
}).Return(otStatusNotSet, nil)
// TODO: how to emulate the status change
// e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusNotSet, nil)
// e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusUnknown, nil)
}).Return(otStatusNotSetWait, nil)
e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusOnline, nil)
},
Resource: ResourceOnlineTable(),
Expand All @@ -85,11 +93,13 @@ func TestOnlineTableCreate_ErrorImmediately(t *testing.T) {
MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) {
e := w.GetMockOnlineTablesAPI().EXPECT()
e.Create(mock.Anything, catalog.CreateOnlineTableRequest{
Name: "main.default.online_table",
Spec: &catalog.OnlineTableSpec{
RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{},
SourceTableFullName: "main.default.test",
PrimaryKeyColumns: []string{"id"},
Table: &catalog.OnlineTable{
Name: "main.default.online_table",
Spec: &catalog.OnlineTableSpec{
RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{},
SourceTableFullName: "main.default.test",
PrimaryKeyColumns: []string{"id"},
},
},
}).Return(nil, fmt.Errorf("error!"))
},
Expand All @@ -100,33 +110,41 @@ func TestOnlineTableCreate_ErrorImmediately(t *testing.T) {
}

func TestOnlineTableCreate_ErrorInWait(t *testing.T) {
otStatusError := &catalog.OnlineTable{
otStatusProvisioning := &catalog.OnlineTable{
Name: "main.default.online_table",
Spec: &catalog.OnlineTableSpec{
RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{},
SourceTableFullName: "main.default.test",
PrimaryKeyColumns: []string{"id"},
},
Status: &catalog.OnlineTableStatus{DetailedState: catalog.OnlineTableStateOfflineFailed},
Status: &catalog.OnlineTableStatus{DetailedState: catalog.OnlineTableStateProvisioning},
}
otStatusErrorWait := &catalog.WaitGetOnlineTableActive[catalog.OnlineTable]{
Response: otStatusProvisioning,
Name: "main.default.online_table",
Poll: func(d time.Duration, f func(*catalog.OnlineTable)) (*catalog.OnlineTable, error) {
return nil, errors.New("failed to reach ACTIVE, got OFFLINE_FAILED: error!")
},
}
d, err := qa.ResourceFixture{
MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) {
e := w.GetMockOnlineTablesAPI().EXPECT()
e.Create(mock.Anything, catalog.CreateOnlineTableRequest{
Name: "main.default.online_table",
Spec: &catalog.OnlineTableSpec{
RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{},
SourceTableFullName: "main.default.test",
PrimaryKeyColumns: []string{"id"},
Table: &catalog.OnlineTable{
Name: "main.default.online_table",
Spec: &catalog.OnlineTableSpec{
RunTriggered: &catalog.OnlineTableSpecTriggeredSchedulingPolicy{},
SourceTableFullName: "main.default.test",
PrimaryKeyColumns: []string{"id"},
},
},
}).Return(otStatusError, nil)
e.GetByName(mock.Anything, "main.default.online_table").Return(otStatusError, nil)
}).Return(otStatusErrorWait, nil)
},
Resource: ResourceOnlineTable(),
HCL: onlineTableHcl,
Create: true,
}.Apply(t)
qa.AssertErrorStartsWith(t, err, "online table status returned OFFLINE_FAILED for online table: main.default.online_table")
qa.AssertErrorStartsWith(t, err, "failed to reach ACTIVE, got OFFLINE_FAILED: error!")
assert.Equal(t, "main.default.online_table", d.Id())
}

Expand Down
29 changes: 21 additions & 8 deletions catalog/resource_sql_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
)

var MaxSqlExecWaitTimeout = 50
var optionPrefixes = []string{"option.", "spark.sql.dataSourceOptions."}

type SqlColumnInfo struct {
Name string `json:"name"`
Expand Down Expand Up @@ -170,7 +171,6 @@ type SqlTableInfo struct {
}

func (ti SqlTableInfo) CustomizeSchema(s *common.CustomizableSchema) *common.CustomizableSchema {

caseInsensitiveFields := []string{"name", "catalog_name", "schema_name"}
for _, field := range caseInsensitiveFields {
s.SchemaPath(field).SetCustomSuppressDiff(common.EqualFoldDiffSuppress)
Expand Down Expand Up @@ -743,18 +743,31 @@ func ResourceSqlTable() common.Resource {
// If the user specified a property but the value of that property has changed, that will appear
// as a change in the effective property/option. To cause a diff to be detected, we need to
// reset the effective property/option to the requested value.
userSpecifiedProperties := d.Get("properties").(map[string]interface{})
userSpecifiedOptions := d.Get("options").(map[string]interface{})
effectiveProperties := d.Get("effective_properties").(map[string]interface{})
diff := make(map[string]interface{})
userSpecifiedProperties := d.Get("properties").(map[string]any)
userSpecifiedOptions := d.Get("options").(map[string]any)
effectiveProperties := d.Get("effective_properties").(map[string]any)
diff := make(map[string]any)
for k, userSpecifiedValue := range userSpecifiedProperties {
if effectiveValue, ok := effectiveProperties[k]; !ok || effectiveValue != userSpecifiedValue {
diff[k] = userSpecifiedValue
}
}
for k, userSpecifiedValue := range userSpecifiedOptions {
if effectiveValue, ok := effectiveProperties["option."+k]; !ok || effectiveValue != userSpecifiedValue {
diff["option."+k] = userSpecifiedValue
for userOptName, userSpecifiedValue := range userSpecifiedOptions {
var found bool
var effectiveValue any
var effectOptName string
// If the option is not found, check if the user specified the option without the prefix
// i.e. if user specified `multiLine` for JSON, then backend returns `spark.sql.dataSourceOptions.multiLine`
for _, prefix := range optionPrefixes {
effectOptName = prefix + userOptName
if v, ok := effectiveProperties[effectOptName]; ok {
found = true
effectiveValue = v
break
}
}
if !found || effectiveValue != userSpecifiedValue {
diff[effectOptName] = userSpecifiedValue
}
}
if len(diff) > 0 {
Expand Down
7 changes: 5 additions & 2 deletions catalog/resource_sql_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1855,15 +1855,18 @@ func TestResourceSqlTable_Diff_ExistingResource(t *testing.T) {
}
options = {
"myopt" = "myval"
"multiLine" = "true"
}`,
map[string]string{
"properties.%": "1",
"properties.myprop": "myval",
"options.%": "1",
"options.%": "2",
"options.myopt": "myval",
"effective_properties.%": "2",
"options.multiLine": "true",
"effective_properties.%": "3",
"effective_properties.myprop": "myval",
"effective_properties.option.myopt": "myval",
"effective_properties.spark.sql.dataSourceOptions.multiLine": "true",
},
nil,
},
Expand Down
2 changes: 1 addition & 1 deletion clusters/clusters_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ func (cluster Cluster) Validate() error {
if profile == "singleNode" && strings.HasPrefix(master, "local") && resourceClass == "SingleNode" {
return nil
}
return fmt.Errorf("NumWorkers could be 0 only for SingleNode clusters. See https://docs.databricks.com/clusters/single-node.html for more details")
return errors.New(numWorkerErr)
}

// TODO: Remove this once all the resources using clusters are migrated to Go SDK.
Expand Down
Loading

0 comments on commit a689c02

Please sign in to comment.