From a41921c31ae1dbd48efb685b19d5554ed911cfab Mon Sep 17 00:00:00 2001 From: Koen van Zuijlen <8818390+kvanzuijlen@users.noreply.github.com> Date: Tue, 19 Sep 2023 20:46:46 +0200 Subject: [PATCH 01/36] Added member attribute to compute default sa datasource (#8767) --- .../data_source_google_compute_default_service_account.go | 7 +++++++ .../docs/d/compute_default_service_account.html.markdown | 2 ++ 2 files changed, 9 insertions(+) diff --git a/mmv1/third_party/terraform/services/compute/data_source_google_compute_default_service_account.go b/mmv1/third_party/terraform/services/compute/data_source_google_compute_default_service_account.go index 59264ef77c8b..7789eb53fe50 100644 --- a/mmv1/third_party/terraform/services/compute/data_source_google_compute_default_service_account.go +++ b/mmv1/third_party/terraform/services/compute/data_source_google_compute_default_service_account.go @@ -33,6 +33,10 @@ func DataSourceGoogleComputeDefaultServiceAccount() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "member": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -80,6 +84,9 @@ func dataSourceGoogleComputeDefaultServiceAccountRead(d *schema.ResourceData, me if err := d.Set("display_name", sa.DisplayName); err != nil { return fmt.Errorf("Error setting display_name: %s", err) } + if err := d.Set("member", "serviceAccount:"+sa.Email); err != nil { + return fmt.Errorf("Error setting member: %s", err) + } return nil } diff --git a/mmv1/third_party/terraform/website/docs/d/compute_default_service_account.html.markdown b/mmv1/third_party/terraform/website/docs/d/compute_default_service_account.html.markdown index 3edee46bdea2..655e1964dcdb 100644 --- a/mmv1/third_party/terraform/website/docs/d/compute_default_service_account.html.markdown +++ b/mmv1/third_party/terraform/website/docs/d/compute_default_service_account.html.markdown @@ -37,3 +37,5 @@ The following attributes are exported: * `name` - The fully-qualified name of the service account. * `display_name` - The display name for the service account. + +* `member` - The Identity of the service account in the form `serviceAccount:{email}`. This value is often used to refer to the service account in order to grant IAM permissions. From b162a69839ae1e4d970ffd5e03977485d8f9ed4a Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Tue, 19 Sep 2023 20:11:36 +0100 Subject: [PATCH 02/36] Fix provider configuration code to handle Unknown values correctly (#8943) * Uncomment test cases for unknown values * Update test case names * Update unknown value test for `project`, make it pass * Update unknown value test for `access_token`, make `access_token` and `credentials` tests pass * Update unknown value tests for `region` and `zone`, make those tests pass * Update unknown value tests for `user_project_override`, make that test pass * Update unknown value test for `impersonate_service_account`, make that test pass * Update unknown value tests for `request_reason` and `request_timeout`, make those tests pass * Make unknown batching.send_after and batching.enable_batching values be set to same defaults as if they were null, update test * Update code to handle when the whole batching block is Unknown * Update the test function for `batching` unit tests to navigate how `GetBatchingConfig` is used by the code * Update code to handle null/unknown Scopes and ImpersonateServiceAccountDelegates values * Improve `impersonate_service_account_delegates` tests for unknown values * Add missing null test case for `batching` field * Add non-VCR acceptance test to assert handling of unknown values in provider config Only testing `credentials` currently. --- .../fwtransport/framework_config.go.erb | 51 +-- .../fwtransport/framework_config_test.go.erb | 350 ++++++++++-------- .../terraform/provider/provider_test.go.erb | 152 +++++++- 3 files changed, 378 insertions(+), 175 deletions(-) diff --git a/mmv1/third_party/terraform/fwtransport/framework_config.go.erb b/mmv1/third_party/terraform/fwtransport/framework_config.go.erb index 6c7d1ad64dc4..8a1bc8c7b6ba 100644 --- a/mmv1/third_party/terraform/fwtransport/framework_config.go.erb +++ b/mmv1/third_party/terraform/fwtransport/framework_config.go.erb @@ -188,7 +188,7 @@ func (p *FrameworkProviderConfig) HandleZeroValues(ctx context.Context, data *fw // HandleDefaults will handle all the defaults necessary in the provider func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmodels.ProviderModel, diags *diag.Diagnostics) { - if data.AccessToken.IsNull() && data.Credentials.IsNull() { + if (data.AccessToken.IsNull() || data.AccessToken.IsUnknown()) && (data.Credentials.IsNull() || data.Credentials.IsUnknown()) { credentials := transport_tpg.MultiEnvDefault([]string{ "GOOGLE_CREDENTIALS", "GOOGLE_CLOUD_KEYFILE_JSON", @@ -208,11 +208,11 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo } } - if data.ImpersonateServiceAccount.IsNull() && os.Getenv("GOOGLE_IMPERSONATE_SERVICE_ACCOUNT") != "" { + if (data.ImpersonateServiceAccount.IsNull() || data.ImpersonateServiceAccount.IsUnknown()) && os.Getenv("GOOGLE_IMPERSONATE_SERVICE_ACCOUNT") != "" { data.ImpersonateServiceAccount = types.StringValue(os.Getenv("GOOGLE_IMPERSONATE_SERVICE_ACCOUNT")) } - if data.Project.IsNull() { + if data.Project.IsNull() || data.Project.IsUnknown() { project := transport_tpg.MultiEnvDefault([]string{ "GOOGLE_PROJECT", "GOOGLE_CLOUD_PROJECT", @@ -228,7 +228,7 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo data.BillingProject = types.StringValue(os.Getenv("GOOGLE_BILLING_PROJECT")) } - if data.Region.IsNull() { + if data.Region.IsNull() || data.Region.IsUnknown() { region := transport_tpg.MultiEnvDefault([]string{ "GOOGLE_REGION", "GCLOUD_REGION", @@ -240,7 +240,7 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo } } - if data.Zone.IsNull() { + if data.Zone.IsNull() || data.Zone.IsUnknown() { zone := transport_tpg.MultiEnvDefault([]string{ "GOOGLE_ZONE", "GCLOUD_ZONE", @@ -261,7 +261,7 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo } } - if !data.Batching.IsNull() { + if !data.Batching.IsNull() && !data.Batching.IsUnknown() { var pbConfigs []fwmodels.ProviderBatching d := data.Batching.ElementsAs(ctx, &pbConfigs, true) diags.Append(d...) @@ -269,18 +269,18 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo return } - if pbConfigs[0].SendAfter.IsNull() { + if pbConfigs[0].SendAfter.IsNull() || pbConfigs[0].SendAfter.IsUnknown() { pbConfigs[0].SendAfter = types.StringValue("10s") } - if pbConfigs[0].EnableBatching.IsNull() { + if pbConfigs[0].EnableBatching.IsNull() || pbConfigs[0].EnableBatching.IsUnknown() { pbConfigs[0].EnableBatching = types.BoolValue(true) } data.Batching, d = types.ListValueFrom(ctx, types.ObjectType{}.WithAttributeTypes(fwmodels.ProviderBatchingAttributes), pbConfigs) } - if data.UserProjectOverride.IsNull() && os.Getenv("USER_PROJECT_OVERRIDE") != "" { + if (data.UserProjectOverride.IsNull() || data.UserProjectOverride.IsUnknown()) && os.Getenv("USER_PROJECT_OVERRIDE") != "" { override, err := strconv.ParseBool(os.Getenv("USER_PROJECT_OVERRIDE")) if err != nil { diags.AddError( @@ -289,11 +289,11 @@ func (p *FrameworkProviderConfig) HandleDefaults(ctx context.Context, data *fwmo data.UserProjectOverride = types.BoolValue(override) } - if data.RequestReason.IsNull() && os.Getenv("CLOUDSDK_CORE_REQUEST_REASON") != "" { + if (data.RequestReason.IsNull() || data.RequestReason.IsUnknown()) && os.Getenv("CLOUDSDK_CORE_REQUEST_REASON") != "" { data.RequestReason = types.StringValue(os.Getenv("CLOUDSDK_CORE_REQUEST_REASON")) } - if data.RequestTimeout.IsNull() { + if data.RequestTimeout.IsNull() || data.RequestTimeout.IsUnknown() { data.RequestTimeout = types.StringValue("120s") } @@ -595,7 +595,7 @@ func (p *FrameworkProviderConfig) logGoogleIdentities(ctx context.Context, data // a separate diagnostics here var d diag.Diagnostics - if data.ImpersonateServiceAccount.IsNull() { + if data.ImpersonateServiceAccount.IsNull() || data.ImpersonateServiceAccount.IsUnknown() { tokenSource := GetTokenSource(ctx, data, true, diags) if diags.HasError() { @@ -655,19 +655,23 @@ func GetCredentials(ctx context.Context, data fwmodels.ProviderModel, initialCre var clientScopes []string var delegates []string - d := data.Scopes.ElementsAs(ctx, &clientScopes, false) - diags.Append(d...) - if diags.HasError() { - return googleoauth.Credentials{} + if !data.Scopes.IsNull() && !data.Scopes.IsUnknown() { + d := data.Scopes.ElementsAs(ctx, &clientScopes, false) + diags.Append(d...) + if diags.HasError() { + return googleoauth.Credentials{} + } } - d = data.ImpersonateServiceAccountDelegates.ElementsAs(ctx, &delegates, false) - diags.Append(d...) - if diags.HasError() { - return googleoauth.Credentials{} + if !data.ImpersonateServiceAccountDelegates.IsNull() && !data.ImpersonateServiceAccountDelegates.IsUnknown() { + d := data.ImpersonateServiceAccountDelegates.ElementsAs(ctx, &delegates, false) + diags.Append(d...) + if diags.HasError() { + return googleoauth.Credentials{} + } } - if !data.AccessToken.IsNull() { + if !data.AccessToken.IsNull() && !data.AccessToken.IsUnknown() { contents, _, err := verify.PathOrContents(data.AccessToken.ValueString()) if err != nil { diags.AddError("error loading access token", err.Error()) @@ -692,7 +696,7 @@ func GetCredentials(ctx context.Context, data fwmodels.ProviderModel, initialCre } } - if !data.Credentials.IsNull() { + if !data.Credentials.IsNull() && !data.Credentials.IsUnknown() { contents, _, err := verify.PathOrContents(data.Credentials.ValueString()) if err != nil { diags.AddError(fmt.Sprintf("error loading credentials: %s", err), err.Error()) @@ -751,7 +755,8 @@ func GetBatchingConfig(ctx context.Context, data types.List, diags *diag.Diagnos EnableBatching: true, } - if data.IsNull() { + // Handle if entire batching block is null/unknown + if data.IsNull() || data.IsUnknown() { return bc } diff --git a/mmv1/third_party/terraform/fwtransport/framework_config_test.go.erb b/mmv1/third_party/terraform/fwtransport/framework_config_test.go.erb index 5569da181fc8..37c11069bd51 100644 --- a/mmv1/third_party/terraform/fwtransport/framework_config_test.go.erb +++ b/mmv1/third_party/terraform/fwtransport/framework_config_test.go.erb @@ -120,14 +120,16 @@ func TestFrameworkProvider_LoadAndValidateFramework_project(t *testing.T) { ExpectedConfigStructValue: types.StringValue("project-from-GOOGLE_PROJECT"), }, // Handling unknown values - // TODO(SarahFrench) make these tests pass to address: https://github.com/hashicorp/terraform-provider-google/issues/14444 - // "when project is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { - // ConfigValues: fwmodels.ProviderModel{ - // Project: types.StringUnknown(), - // }, - // ExpectedDataModelValue: types.StringNull(), - // ExpectedConfigStructValue: types.StringNull(), - // }, + "when project is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + Project: types.StringUnknown(), + }, + EnvVariables: map[string]string{ + "GOOGLE_PROJECT": "project-from-GOOGLE_PROJECT", + }, + ExpectedDataModelValue: types.StringValue("project-from-GOOGLE_PROJECT"), + ExpectedConfigStructValue: types.StringValue("project-from-GOOGLE_PROJECT"), + }, } for tn, tc := range cases { @@ -333,62 +335,61 @@ func TestFrameworkProvider_LoadAndValidateFramework_credentials(t *testing.T) { } } -// TODO(SarahFrench) make this test pass to address: https://github.com/hashicorp/terraform-provider-google/issues/14444 -// func TestFrameworkProvider_LoadAndValidateFramework_credentials_unknown(t *testing.T) { -// // This test case is kept separate from other credentials tests, as it requires comparing -// // error messages returned by two different error states: -// // - When credentials = Null -// // - When credentials = Unknown - -// t.Run("when project is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)", func(t *testing.T) { - -// // Arrange -// acctest.UnsetTestProviderConfigEnvs(t) - -// ctx := context.Background() -// tfVersion := "foobar" -// providerversion := "999" - -// impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list - -// // Null data and error collection -// diagsNull := diag.Diagnostics{} -// dataNull := fwmodels.ProviderModel{ -// Credentials: types.StringNull(), -// } -// dataNull.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates - -// // Unknown data and error collection -// diagsUnknown := diag.Diagnostics{} -// dataUnknown := fwmodels.ProviderModel{ -// Credentials: types.StringUnknown(), -// } -// dataUnknown.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates - -// pNull := fwtransport.FrameworkProviderConfig{} -// pUnknown := fwtransport.FrameworkProviderConfig{} - -// // Act -// pNull.LoadAndValidateFramework(ctx, &dataNull, tfVersion, &diagsNull, providerversion) -// pUnknown.LoadAndValidateFramework(ctx, &dataUnknown, tfVersion, &diagsUnknown, providerversion) - -// // Assert -// if !diagsNull.HasError() { -// t.Fatalf("expect errors when credentials is null, but [%d] errors occurred", diagsNull.ErrorsCount()) -// } -// if !diagsUnknown.HasError() { -// t.Fatalf("expect errors when credentials is unknown, but [%d] errors occurred", diagsUnknown.ErrorsCount()) -// } - -// errNull := diagsNull.Errors() -// errUnknown := diagsUnknown.Errors() -// for i := 0; i < len(errNull); i++ { -// if errNull[i] != errUnknown[i] { -// t.Fatalf("expect errors to be the same for null and unknown credentials values, instead got \nnull=`%s` \nunknown=%s", errNull[i], errUnknown[i]) -// } -// } -// }) -// } +func TestFrameworkProvider_LoadAndValidateFramework_credentials_unknown(t *testing.T) { + // This test case is kept separate from other credentials tests, as it requires comparing + // error messages returned by two different error states: + // - When credentials = Null + // - When credentials = Unknown + + t.Run("the same error is returned whether credentials is set as a null or unknown value (and access_token isn't set)", func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + + // Null data and error collection + diagsNull := diag.Diagnostics{} + dataNull := fwmodels.ProviderModel{ + Credentials: types.StringNull(), + } + dataNull.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + // Unknown data and error collection + diagsUnknown := diag.Diagnostics{} + dataUnknown := fwmodels.ProviderModel{ + Credentials: types.StringUnknown(), + } + dataUnknown.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + pNull := fwtransport.FrameworkProviderConfig{} + pUnknown := fwtransport.FrameworkProviderConfig{} + + // Act + pNull.LoadAndValidateFramework(ctx, &dataNull, tfVersion, &diagsNull, providerversion) + pUnknown.LoadAndValidateFramework(ctx, &dataUnknown, tfVersion, &diagsUnknown, providerversion) + + // Assert + if !diagsNull.HasError() { + t.Fatalf("expect errors when credentials is null, but [%d] errors occurred", diagsNull.ErrorsCount()) + } + if !diagsUnknown.HasError() { + t.Fatalf("expect errors when credentials is unknown, but [%d] errors occurred", diagsUnknown.ErrorsCount()) + } + + errNull := diagsNull.Errors() + errUnknown := diagsUnknown.Errors() + for i := 0; i < len(errNull); i++ { + if errNull[i] != errUnknown[i] { + t.Fatalf("expect errors to be the same for null and unknown credentials values, instead got \nnull=`%s` \nunknown=%s", errNull[i], errUnknown[i]) + } + } + }) +} func TestFrameworkProvider_LoadAndValidateFramework_billingProject(t *testing.T) { @@ -565,14 +566,16 @@ func TestFrameworkProvider_LoadAndValidateFramework_region(t *testing.T) { ExpectedConfigStructValue: types.StringValue("region-from-env"), }, // Handling unknown values - // TODO(SarahFrench) make these tests pass to address: https://github.com/hashicorp/terraform-provider-google/issues/14444 - // "when region is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { - // ConfigValues: fwmodels.ProviderModel{ - // Region: types.StringUnknown(), - // }, - // ExpectedDataModelValue: types.StringNull(), - // ExpectedConfigStructValue: types.StringNull(), - // }, + "when region is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + Region: types.StringUnknown(), + }, + EnvVariables: map[string]string{ + "GOOGLE_REGION": "region-from-env", + }, + ExpectedDataModelValue: types.StringValue("region-from-env"), + ExpectedConfigStructValue: types.StringValue("region-from-env"), + }, } for tn, tc := range cases { @@ -713,14 +716,16 @@ func TestFrameworkProvider_LoadAndValidateFramework_zone(t *testing.T) { ExpectedConfigStructValue: types.StringValue("zone-from-env"), }, // Handling unknown values - // TODO(SarahFrench) make these tests pass to address: https://github.com/hashicorp/terraform-provider-google/issues/14444 - // "when zone is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { - // ConfigValues: fwmodels.ProviderModel{ - // Zone: types.StringUnknown(), - // }, - // ExpectedDataModelValue: types.StringNull(), - // ExpectedConfigStructValue: types.StringNull(), - // }, + "when zone is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + Zone: types.StringUnknown(), + }, + EnvVariables: map[string]string{ + "GOOGLE_ZONE": "zone-from-env", + }, + ExpectedDataModelValue: types.StringValue("zone-from-env"), + ExpectedConfigStructValue: types.StringValue("zone-from-env"), + }, } for tn, tc := range cases { @@ -827,13 +832,15 @@ func TestFrameworkProvider_LoadAndValidateFramework_accessToken(t *testing.T) { ExpectedDataModelValue: types.StringValue("value-from-GOOGLE_OAUTH_ACCESS_TOKEN"), }, // Handling unknown values - // TODO(SarahFrench) make these tests pass to address: https://github.com/hashicorp/terraform-provider-google/issues/14444 - // "when access_token is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { - // ConfigValues: fwmodels.ProviderModel{ - // AccessToken: types.StringUnknown(), - // }, - // ExpectedDataModelValue: types.StringNull(), - // }, + "when access_token is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + AccessToken: types.StringUnknown(), + }, + EnvVariables: map[string]string{ + "GOOGLE_OAUTH_ACCESS_TOKEN": "value-from-GOOGLE_OAUTH_ACCESS_TOKEN", + }, + ExpectedDataModelValue: types.StringValue("value-from-GOOGLE_OAUTH_ACCESS_TOKEN"), + }, } for tn, tc := range cases { @@ -955,14 +962,16 @@ func TestFrameworkProvider_LoadAndValidateFramework_userProjectOverride(t *testi ExpectedConfigStructValue: types.BoolNull(), }, // Handling unknown values - // TODO(SarahFrench) make these tests pass to address: https://github.com/hashicorp/terraform-provider-google/issues/14444 - // "when user_project_override is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { - // ConfigValues: fwmodels.ProviderModel{ - // UserProjectOverride: types.BoolUnknown(), - // }, - // ExpectedDataModelValue: types.BoolNull(), - // ExpectedConfigStructValue: types.BoolNull(), - // }, + "when user_project_override is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + UserProjectOverride: types.BoolUnknown(), + }, + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "true", + }, + ExpectedDataModelValue: types.BoolValue(true), + ExpectedConfigStructValue: types.BoolValue(true), + }, } for tn, tc := range cases { @@ -1065,13 +1074,15 @@ func TestFrameworkProvider_LoadAndValidateFramework_impersonateServiceAccount(t ExpectedDataModelValue: types.StringValue("value-from-env@example.com"), }, // Handling unknown values - // TODO(SarahFrench) make these tests pass to address: https://github.com/hashicorp/terraform-provider-google/issues/14444 - // "when impersonate_service_account is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { - // ConfigValues: fwmodels.ProviderModel{ - // ImpersonateServiceAccount: types.StringUnknown(), - // }, - // ExpectedDataModelValue: types.StringNull(), - // }, + "when impersonate_service_account is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + ImpersonateServiceAccount: types.StringUnknown(), + }, + EnvVariables: map[string]string{ + "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT": "value-from-env@example.com", + }, + ExpectedDataModelValue: types.StringValue("value-from-env@example.com"), + }, } for tn, tc := range cases { @@ -1129,9 +1140,11 @@ func TestFrameworkProvider_LoadAndValidateFramework_impersonateServiceAccountDel SetAsUnknown bool ImpersonateServiceAccountDelegatesValue []string EnvVariables map[string]string - ExpectedDataModelValue []string - // ExpectedConfigStructValue not used here, as impersonate_service_account_delegates info isn't stored in the config struct - ExpectError bool + + ExpectedNull bool + ExpectedUnknown bool + ExpectedDataModelValue []string + ExpectError bool }{ "impersonate_service_account_delegates value can be set in the provider schema": { ImpersonateServiceAccountDelegatesValue: []string{ @@ -1146,7 +1159,7 @@ func TestFrameworkProvider_LoadAndValidateFramework_impersonateServiceAccountDel // Note: no environment variables can be used for impersonate_service_account_delegates "when no impersonate_service_account_delegates value is provided via config, the field remains unset without error": { SetAsNull: true, // not setting impersonate_service_account_delegates - ExpectedDataModelValue: nil, + ExpectedNull: true, }, // Handling empty values in config "when impersonate_service_account_delegates is set as an empty array the field is treated as if it's unset, without error": { @@ -1154,11 +1167,10 @@ func TestFrameworkProvider_LoadAndValidateFramework_impersonateServiceAccountDel ExpectedDataModelValue: nil, }, // Handling unknown values - // TODO(SarahFrench) make these tests pass to address: https://github.com/hashicorp/terraform-provider-google/issues/14444 - // "when impersonate_service_account_delegates is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { - // SetAsUnknown: true, - // // Currently this causes an error at google/fwtransport/framework_config.go:1518 - // }, + "when impersonate_service_account_delegates is an unknown value, the provider treats it as if it's unset, without error": { + SetAsUnknown: true, + ExpectedUnknown: true, + }, } for tn, tc := range cases { @@ -1204,7 +1216,16 @@ func TestFrameworkProvider_LoadAndValidateFramework_impersonateServiceAccountDel t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) } // Checking mutation of the data model - expected, _ := types.ListValueFrom(ctx, types.StringType, tc.ExpectedDataModelValue) + var expected attr.Value + if !tc.ExpectedNull && !tc.ExpectedUnknown { + expected, _ = types.ListValueFrom(ctx, types.StringType, tc.ExpectedDataModelValue) + } + if tc.ExpectedNull { + expected = types.ListNull(types.StringType) + } + if tc.ExpectedUnknown { + expected = types.ListUnknown(types.StringType) + } if !data.ImpersonateServiceAccountDelegates.Equal(expected) { t.Fatalf("want impersonate_service_account in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", expected, data.ImpersonateServiceAccountDelegates.String()) } @@ -1246,7 +1267,7 @@ func TestFrameworkProvider_LoadAndValidateFramework_scopes(t *testing.T) { ExpectedConfigStructValue: transport_tpg.DefaultClientScopes, }, // Handling unknown values - "when scopes is an unknown value, the provider treats it as if it's unset and a default value is used without errors (align to SDK behaviour)": { + "when scopes is an unknown value, the provider treats it as if it's unset and a default value is used without errors": { SetAsUnknown: true, ExpectedDataModelValue: transport_tpg.DefaultClientScopes, ExpectedConfigStructValue: transport_tpg.DefaultClientScopes, @@ -1366,13 +1387,15 @@ func TestFrameworkProvider_LoadAndValidateFramework_requestReason(t *testing.T) ExpectedDataModelValue: types.StringNull(), }, // Handling unknown values - // TODO(SarahFrench) make these tests pass to address: https://github.com/hashicorp/terraform-provider-google/issues/14444 - // "when request_timeout is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { - // ConfigValues: fwmodels.ProviderModel{ - // RequestReason: types.StringUnknown(), - // }, - // ExpectedDataModelValue: types.StringNull(), - // }, + "when request_reason is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + RequestReason: types.StringUnknown(), + }, + EnvVariables: map[string]string{ + "CLOUDSDK_CORE_REQUEST_REASON": "foo", + }, + ExpectedDataModelValue: types.StringValue("foo"), + }, } for tn, tc := range cases { @@ -1460,13 +1483,12 @@ func TestFrameworkProvider_LoadAndValidateFramework_requestTimeout(t *testing.T) ExpectedDataModelValue: types.StringValue("120s"), }, // Handling unknown values - // TODO(SarahFrench) make these tests pass to address: https://github.com/hashicorp/terraform-provider-google/issues/14444 - // "when request_timeout is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { - // ConfigValues: fwmodels.ProviderModel{ - // RequestTimeout: types.StringUnknown(), - // }, - // ExpectedDataModelValue: types.StringNull(), - // }, + "when request_timeout is an unknown value, the provider treats it as if it's unset and uses the default value 120s": { + ConfigValues: fwmodels.ProviderModel{ + RequestTimeout: types.StringUnknown(), + }, + ExpectedDataModelValue: types.StringValue("120s"), + }, } for tn, tc := range cases { @@ -1558,6 +1580,11 @@ func TestFrameworkProvider_LoadAndValidateFramework_batching(t *testing.T) { ExpectEnableBatchingValue: types.BoolValue(true), ExpectSendAfterValue: types.StringValue("45s"), }, + "when the whole batching block is a null value, the provider provides default values for send_after and enable_batching": { + SetBatchingAsNull: true, + ExpectEnableBatchingValue: types.BoolValue(true), + ExpectSendAfterValue: types.StringValue("3s"), + }, // Handling empty strings in config "when batching is configured with send_after as an empty string, send_after will be set to a default value": { EnableBatchingValue: types.BoolValue(true), @@ -1566,24 +1593,23 @@ func TestFrameworkProvider_LoadAndValidateFramework_batching(t *testing.T) { ExpectSendAfterValue: types.StringValue("10s"), // When batching block is present but has missing arguments inside, default is 10s }, // Handling unknown values - // TODO(SarahFrench) make these tests pass to address: https://github.com/hashicorp/terraform-provider-google/issues/14444 - // "when batching is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { - // SetBatchingAsUnknown: true, - // ExpectEnableBatchingValue: types.BoolValue(true), - // ExpectSendAfterValue: types.StringValue("10s"), - // }, - // "when batching is configured with send_after as an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { - // EnableBatchingValue: types.BoolValue(true), - // SendAfterValue: types.StringUnknown(), - // ExpectEnableBatchingValue: types.BoolValue(true), - // ExpectSendAfterValue: types.StringValue("10s"), - // }, - // "when batching is configured with enable_batching as an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { - // EnableBatchingValue: types.BoolNull(), - // SendAfterValue: types.StringValue("45s"), - // ExpectEnableBatchingValue: types.BoolValue(true), - // ExpectSendAfterValue: types.StringValue("45s"), - // }, + "when batching is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { + SetBatchingAsUnknown: true, + ExpectEnableBatchingValue: types.BoolValue(true), + ExpectSendAfterValue: types.StringValue("3s"), + }, + "when batching is configured with send_after as an unknown value, send_after will be set to a default value": { + EnableBatchingValue: types.BoolValue(true), + SendAfterValue: types.StringUnknown(), + ExpectEnableBatchingValue: types.BoolValue(true), + ExpectSendAfterValue: types.StringValue("10s"), + }, + "when batching is configured with enable_batching as an unknown value, enable_batching will be set to a default value": { + EnableBatchingValue: types.BoolUnknown(), + SendAfterValue: types.StringValue("45s"), + ExpectEnableBatchingValue: types.BoolValue(true), + ExpectSendAfterValue: types.StringValue("45s"), + }, // Error states "if batching is configured with send_after as an invalid value, there's an error": { SendAfterValue: types.StringValue("invalid value"), @@ -1658,14 +1684,36 @@ func TestFrameworkProvider_LoadAndValidateFramework_batching(t *testing.T) { if !data.Batching.IsUnknown() && tc.ExpectBatchingUnknown { t.Fatalf("want batching in the `fwmodels.ProviderModel` struct to be unknown, but got the value `%s`", data.Batching.String()) } - var pbConfigs []fwmodels.ProviderBatching - _ = data.Batching.ElementsAs(ctx, &pbConfigs, true) - if !pbConfigs[0].EnableBatching.Equal(tc.ExpectEnableBatchingValue) { - t.Fatalf("want batching.enable_batching in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectEnableBatchingValue.String(), data.Batching.String()) + + // The code doesn't mutate values in the fwmodels.ProviderModel struct if the whole batching block is null/unknown, + // so run these checks below only if we're not setting the whole batching block is null/unknown + if !tc.SetBatchingAsNull && !tc.SetBatchingAsUnknown { + var pbConfigs []fwmodels.ProviderBatching + _ = data.Batching.ElementsAs(ctx, &pbConfigs, true) + if !pbConfigs[0].EnableBatching.Equal(tc.ExpectEnableBatchingValue) { + t.Fatalf("want batching.enable_batching in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectEnableBatchingValue.String(), pbConfigs[0].EnableBatching.String()) + } + if !pbConfigs[0].SendAfter.Equal(tc.ExpectSendAfterValue) { + t.Fatalf("want batching.send_after in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectSendAfterValue.String(), pbConfigs[0].SendAfter.String()) + } + } + + // Check how the batching block's values are used to configure other parts of the `FrameworkProviderConfig` struct + // - RequestBatcherServiceUsage + // - RequestBatcherIam + if p.RequestBatcherServiceUsage.BatchingConfig.EnableBatching != tc.ExpectEnableBatchingValue.ValueBool() { + t.Fatalf("want batching.enable_batching to be `%s`, but got the value `%v`", tc.ExpectEnableBatchingValue.String(), p.RequestBatcherServiceUsage.BatchingConfig.EnableBatching) } - if !pbConfigs[0].SendAfter.Equal(tc.ExpectSendAfterValue) { - t.Fatalf("want batching.send_after in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectSendAfterValue.String(), data.Batching.String()) + if !types.StringValue(p.RequestBatcherServiceUsage.BatchingConfig.SendAfter.String()).Equal(tc.ExpectSendAfterValue) { + t.Fatalf("want batching.send_after to be `%s`, but got the value `%s`", tc.ExpectSendAfterValue.String(), p.RequestBatcherServiceUsage.BatchingConfig.SendAfter.String()) + } + if p.RequestBatcherIam.BatchingConfig.EnableBatching != tc.ExpectEnableBatchingValue.ValueBool() { + t.Fatalf("want batching.enable_batching to be `%s`, but got the value `%v`", tc.ExpectEnableBatchingValue.String(), p.RequestBatcherIam.BatchingConfig.EnableBatching) + } + if !types.StringValue(p.RequestBatcherIam.BatchingConfig.SendAfter.String()).Equal(tc.ExpectSendAfterValue) { + t.Fatalf("want batching.send_after to be `%s`, but got the value `%s`", tc.ExpectSendAfterValue.String(), p.RequestBatcherIam.BatchingConfig.SendAfter.String()) } }) } } + diff --git a/mmv1/third_party/terraform/provider/provider_test.go.erb b/mmv1/third_party/terraform/provider/provider_test.go.erb index a1676aa0621b..ad0744de45ca 100644 --- a/mmv1/third_party/terraform/provider/provider_test.go.erb +++ b/mmv1/third_party/terraform/provider/provider_test.go.erb @@ -35,7 +35,6 @@ func TestProvider_noDuplicatesInResourceMap(t *testing.T) { } } - func TestProvider_noDuplicatesInDatasourceMap(t *testing.T) { _, err := provider.DatasourceMapWithErrors() if err != nil { @@ -224,6 +223,77 @@ func TestAccProviderCredentialsEmptyString(t *testing.T) { }) } +func TestAccProviderCredentialsUnknownValue(t *testing.T) { + // Test is not parallel because ENVs are set. + // Need to skip VCR as this test downloads providers from the Terraform Registry + acctest.SkipIfVcr(t) + + creds := envvar.GetTestCredsFromEnv() + t.Setenv("GOOGLE_CREDENTIALS", creds) // Needs to be set for test to run, but config overrides this ENV + + project := envvar.GetTestProjectFromEnv() + t.Setenv("GOOGLE_PROJECT", project) + + org := envvar.GetTestOrgFromEnv(t) + t.Setenv("GOOGLE_ORG", org) + + billing := envvar.GetTestBillingAccountFromEnv(t) + t.Setenv("GOOGLE_BILLING_ACCOUNT", billing) + + pid := "tf-test-" + acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + // No TestDestroy since that's not really the point of this test + Steps: []resource.TestStep{ + { + // Unknown creds handled ok with v4.59.0 + Config: testAccProviderCredentials_useUnknownCredentials(creds, org, billing, pid), + ExternalProviders: map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "4.59.0", + Source: "hashicorp/google", + }, + "google-beta": { + VersionConstraint: "4.59.0", + Source: "hashicorp/google-beta", + }, + }, + }, + { + // Same config results in an error with v4.60.3 + Config: testAccProviderCredentials_useUnknownCredentials(creds, org, billing, pid), + ExternalProviders: map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "4.60.2", + Source: "hashicorp/google", + }, + "google-beta": { + VersionConstraint: "4.60.2", + Source: "hashicorp/google-beta", + }, + }, + ExpectError: regexp.MustCompile(`unexpected end of JSON input`), + }, + // TODO(SarahFrench) Uncomment this once this PR is merged: https://github.com/GoogleCloudPlatform/magic-modules/pull/8943 + // { + // // Unknown creds should be handled ok again following a fix released in v4.84.0 + // Config: testAccProviderCredentials_useUnknownCredentials(creds, org, billing, pid), + // ExternalProviders: map[string]resource.ExternalProvider{ + // "google": { + // VersionConstraint: "~>4.84", + // Source: "hashicorp/google", + // }, + // "google-beta": { + // VersionConstraint: "~>4.84", + // Source: "hashicorp/google-beta", + // }, + // }, + // }, + }, + }) +} + func testAccProviderBasePath_setBasePath(endpoint, name string) string { return fmt.Sprintf(` provider "google" { @@ -391,3 +461,83 @@ resource "google_compute_address" "default" { name = "%s" }`, name) } + +func testAccProviderCredentials_useUnknownCredentials(credentials, org, billing, pid string) string { + return fmt.Sprintf(` +provider "google" { + alias = "unknown_credentials_ga" + credentials = "%s" +} + +provider "google-beta" { + alias = "unknown_credentials_beta" + credentials = base64decode(google_service_account_key.terraform_service_account.private_key) +} + +resource "google_service_account" "terraform_service_account" { + provider = google.unknown_credentials_ga + + account_id = "%s" + display_name = "Terraform FireBase Service Account" + project = google_project.this.project_id +} + +resource "google_service_account_key" "terraform_service_account" { + provider = google.unknown_credentials_ga + + service_account_id = google_service_account.terraform_service_account.name +} + +resource "google_project_iam_member" "terraform_service_account" { + provider = google.unknown_credentials_ga + + role = "roles/editor" + member = "serviceAccount:${google_service_account.terraform_service_account.email}" + project = google_project.this.project_id +} + +resource "google_project_service" "activate-firebase" { + provider = google.unknown_credentials_ga + + project = google_project.this.project_id + service = "firebase.googleapis.com" + + timeouts { + create = "30m" + update = "40m" + } + disable_dependent_services = true +} + +resource "google_project" "this" { + provider = google.unknown_credentials_ga + + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" + + auto_create_network = false + + labels = { + "firebase" = "enabled" + } + + lifecycle { + ignore_changes = [ + labels + ] + } +} + +resource "google_firebase_project" "this" { + provider = "google-beta.unknown_credentials_beta" + + project = google_project.this.project_id + + depends_on = [ + google_project_iam_member.terraform_service_account, + google_project_service.activate-firebase + ] +}`, credentials, pid, pid, pid, org, billing) +} From f26f4e758422a5c97f839212a87acf3436cc9476 Mon Sep 17 00:00:00 2001 From: Thomas Rodgers Date: Tue, 19 Sep 2023 19:22:55 +0000 Subject: [PATCH 03/36] Add support for config functions returning binary expressions (#8977) --- tools/missing-test-detector/reader.go | 147 ++++++++++-------- .../testdata/service/covered_resource_test.go | 4 +- 2 files changed, 87 insertions(+), 64 deletions(-) diff --git a/tools/missing-test-detector/reader.go b/tools/missing-test-detector/reader.go index c6c384f9c1e4..3ac95d54092d 100644 --- a/tools/missing-test-detector/reader.go +++ b/tools/missing-test-detector/reader.go @@ -224,7 +224,11 @@ func readStepsCompLit(stepsCompLit *ast.CompositeLit, funcDecls map[string]*ast. test.Steps = append(test.Steps, step) } else if ident, ok := keyValueExpr.Value.(*ast.Ident); ok { if configVar, ok := varDecls[ident.Name]; ok { - step, err := readConfigBasicLit(configVar) + configStr, err := strconv.Unquote(configVar.Value) + if err != nil { + errs = append(errs, err) + } + step, err := readConfigStr(configStr) if err != nil { errs = append(errs, err) } @@ -257,90 +261,109 @@ func readConfigFunc(configFunc *ast.FuncDecl) (Step, error) { for _, stmt := range configFunc.Body.List { if returnStmt, ok := stmt.(*ast.ReturnStmt); ok { for _, result := range returnStmt.Results { - if basicLit, ok := result.(*ast.BasicLit); ok && basicLit.Kind == token.STRING { - return readConfigBasicLit(basicLit) + configStr, err := readConfigFuncResult(result) + if err != nil { + return nil, err } - if callExpr, ok := result.(*ast.CallExpr); ok { - return readConfigFuncCallExpr(callExpr) + if configStr != "" { + return readConfigStr(configStr) } } - return nil, fmt.Errorf("failed to find a call expression in results %v", returnStmt.Results) + return nil, fmt.Errorf("failed to find a config string in results %v", returnStmt.Results) } } return nil, fmt.Errorf("failed to find a return statement in %v", configFunc.Body.List) } +// Read the return result of a config func and return the config string. +func readConfigFuncResult(result ast.Expr) (string, error) { + if basicLit, ok := result.(*ast.BasicLit); ok && basicLit.Kind == token.STRING { + return strconv.Unquote(basicLit.Value) + } else if callExpr, ok := result.(*ast.CallExpr); ok { + return readConfigFuncCallExpr(callExpr) + } else if binaryExpr, ok := result.(*ast.BinaryExpr); ok { + xConfigStr, err := readConfigFuncResult(binaryExpr.X) + if err != nil { + return "", err + } + yConfigStr, err := readConfigFuncResult(binaryExpr.Y) + if err != nil { + return "", err + } + return xConfigStr + yConfigStr, nil + } + return "", fmt.Errorf("unknown config func result %v (%T)", result, result) +} + // Read the call expression in the config function that returns the config string. // The call expression can contain a nested call expression. -func readConfigFuncCallExpr(configFuncCallExpr *ast.CallExpr) (Step, error) { +// Return the config string. +func readConfigFuncCallExpr(configFuncCallExpr *ast.CallExpr) (string, error) { if len(configFuncCallExpr.Args) == 0 { - return nil, fmt.Errorf("no arguments found for call expression %v", configFuncCallExpr) + return "", fmt.Errorf("no arguments found for call expression %v", configFuncCallExpr) } if basicLit, ok := configFuncCallExpr.Args[0].(*ast.BasicLit); ok && basicLit.Kind == token.STRING { - return readConfigBasicLit(basicLit) + return strconv.Unquote(basicLit.Value) } else if nestedCallExpr, ok := configFuncCallExpr.Args[0].(*ast.CallExpr); ok { return readConfigFuncCallExpr(nestedCallExpr) } - return nil, fmt.Errorf("no string literal found in arguments to call expression %v", configFuncCallExpr) + return "", fmt.Errorf("no string literal found in arguments to call expression %v", configFuncCallExpr) } -func readConfigBasicLit(configBasicLit *ast.BasicLit) (Step, error) { - if configStr, err := strconv.Unquote(configBasicLit.Value); err != nil { - return nil, err - } else { - // Remove template variables because they interfere with hcl parsing. - pattern := regexp.MustCompile("%{[^{}]*}") - // Replace with a value that can be parsed outside quotation marks. - configStr = pattern.ReplaceAllString(configStr, "true") - parser := hclparse.NewParser() - file, diagnostics := parser.ParseHCL([]byte(configStr), "config.hcl") - if diagnostics.HasErrors() { - return nil, fmt.Errorf("errors parsing hcl: %v", diagnostics.Errs()) - } - content, diagnostics := file.Body.Content(&hcl.BodySchema{ - Blocks: []hcl.BlockHeaderSchema{ - { - Type: "resource", - LabelNames: []string{"type", "name"}, - }, - { - Type: "data", - LabelNames: []string{"type", "name"}, - }, - { - Type: "output", - LabelNames: []string{"name"}, - }, - { - Type: "locals", - }, +// Read the config string and return a test step. +func readConfigStr(configStr string) (Step, error) { + // Remove template variables because they interfere with hcl parsing. + pattern := regexp.MustCompile("%{[^{}]*}") + // Replace with a value that can be parsed outside quotation marks. + configStr = pattern.ReplaceAllString(configStr, "true") + parser := hclparse.NewParser() + file, diagnostics := parser.ParseHCL([]byte(configStr), "config.hcl") + if diagnostics.HasErrors() { + return nil, fmt.Errorf("errors parsing hcl: %v", diagnostics.Errs()) + } + content, diagnostics := file.Body.Content(&hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "resource", + LabelNames: []string{"type", "name"}, + }, + { + Type: "data", + LabelNames: []string{"type", "name"}, + }, + { + Type: "output", + LabelNames: []string{"name"}, + }, + { + Type: "locals", }, - }) - if diagnostics.HasErrors() { - return nil, fmt.Errorf("errors getting hcl body content: %v", diagnostics.Errs()) + }, + }) + if diagnostics.HasErrors() { + return nil, fmt.Errorf("errors getting hcl body content: %v", diagnostics.Errs()) + } + m := make(map[string]Resources) + errs := make([]error, 0) + for _, block := range content.Blocks { + if len(block.Labels) != 2 { + continue } - m := make(map[string]Resources) - errs := make([]error, 0) - for _, block := range content.Blocks { - if len(block.Labels) != 2 { - continue - } - if _, ok := m[block.Labels[0]]; !ok { - // Create an empty map for this resource type. - m[block.Labels[0]] = make(Resources) - } - // Use the resource name as a key. - resourceConfig, err := readHCLBlockBody(block.Body, file.Bytes) - if err != nil { - errs = append(errs, err) - } - m[block.Labels[0]][block.Labels[1]] = resourceConfig + if _, ok := m[block.Labels[0]]; !ok { + // Create an empty map for this resource type. + m[block.Labels[0]] = make(Resources) } - if len(errs) > 0 { - return m, fmt.Errorf("errors reading hcl blocks: %v", errs) + // Use the resource name as a key. + resourceConfig, err := readHCLBlockBody(block.Body, file.Bytes) + if err != nil { + errs = append(errs, err) } - return m, nil + m[block.Labels[0]][block.Labels[1]] = resourceConfig } + if len(errs) > 0 { + return m, fmt.Errorf("errors reading hcl blocks: %v", errs) + } + return m, nil } func readHCLBlockBody(body hcl.Body, fileBytes []byte) (Resource, error) { diff --git a/tools/missing-test-detector/testdata/service/covered_resource_test.go b/tools/missing-test-detector/testdata/service/covered_resource_test.go index 115a05c96ff4..0cd9ad44c442 100644 --- a/tools/missing-test-detector/testdata/service/covered_resource_test.go +++ b/tools/missing-test-detector/testdata/service/covered_resource_test.go @@ -29,10 +29,10 @@ resource "covered_resource" "resource" { field_five { field_six = "value-three" } - } + }`)) + acctest.Nprintf(` field_seven = %{bool} } -`, context)) +`, context) } func testAccCoveredResource_update() string { From 31326fdde912e46d4a4eec30006197edf4331b00 Mon Sep 17 00:00:00 2001 From: ron-gal <125445217+ron-gal@users.noreply.github.com> Date: Tue, 19 Sep 2023 16:07:26 -0400 Subject: [PATCH 04/36] Replace InstanceInfo call with Instances call for regional reliability (#8971) * Replace InstanceInfo call with Instances call for regional reliability * Handle unavailable error when no overlap * Handle unavailable error when no overlap * Add tests * Add tests * Add tests * Add tests --- .../bigtable/resource_bigtable_instance.go | 55 +++++++++++-- ...esource_bigtable_instance_internal_test.go | 82 +++++++++++++++++++ 2 files changed, 128 insertions(+), 9 deletions(-) diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go index b3c867fa8df6..1b0b8d208c30 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go @@ -258,13 +258,9 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro ctxWithTimeout, cancel := context.WithTimeout(ctx, d.Timeout(schema.TimeoutRead)) defer cancel() - instance, err := c.InstanceInfo(ctxWithTimeout, instanceName) - if err != nil { - if tpgresource.IsNotFoundGrpcError(err) { - log.Printf("[WARN] Removing %s because it's gone", instanceName) - d.SetId("") - return nil - } + instancesResponse, err := c.Instances(ctxWithTimeout) + instance, stop, err := getInstanceFromResponse(instancesResponse, instanceName, err, d) + if stop { return err } @@ -272,11 +268,16 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error setting project: %s", err) } - clusters, err := c.Clusters(ctxWithTimeout, instance.Name) + clusters, err := c.Clusters(ctxWithTimeout, instanceName) if err != nil { partiallyUnavailableErr, ok := err.(bigtable.ErrPartiallyUnavailable) - if !ok { + // Clusters() fails with 404 if instance does not exist. + if tpgresource.IsNotFoundGrpcError(err) { + log.Printf("[WARN] Removing %s because it's gone", instanceName) + d.SetId("") + return nil + } return fmt.Errorf("Error retrieving instance clusters. %s", err) } @@ -430,6 +431,42 @@ func flattenBigtableCluster(c *bigtable.ClusterInfo) map[string]interface{} { return cluster } +func getInstanceFromResponse(instances []*bigtable.InstanceInfo, instanceName string, err error, d *schema.ResourceData) (*bigtable.InstanceInfo, bool, error) { + // Fail on any error other than ParrtiallyUnavailable. + isPartiallyUnavailableError := false + if err != nil { + _, isPartiallyUnavailableError = err.(bigtable.ErrPartiallyUnavailable) + + if !isPartiallyUnavailableError { + return nil, true, fmt.Errorf("Error retrieving instance. %s", err) + } + } + + // Get instance from response. + var instanceInfo *bigtable.InstanceInfo + for _, instance := range instances { + if instance.Name == instanceName { + instanceInfo = instance + } + } + + // If instance found, it either wasn't affected by the outage, or there is no outage. + if instanceInfo != nil { + return instanceInfo, false, nil + } + + // If instance wasn't found and error is PartiallyUnavailable, + // continue to clusters call that will reveal overlap between instance regions and unavailable regions. + if isPartiallyUnavailableError { + return nil, false, nil + } + + // If instance wasn't found and error is not PartiallyUnavailable, instance doesn't exist. + log.Printf("[WARN] Removing %s because it's gone", instanceName) + d.SetId("") + return nil, true, nil +} + func getUnavailableClusterZones(clusters []interface{}, unavailableZones []string) []string { var zones []string diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_internal_test.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_internal_test.go index de25b1b51ed2..6fcc7ab1f6c1 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_internal_test.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_internal_test.go @@ -1,8 +1,13 @@ package bigtable import ( + "fmt" "reflect" + "strings" "testing" + + "cloud.google.com/go/bigtable" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func TestGetUnavailableClusterZones(t *testing.T) { @@ -48,3 +53,80 @@ func TestGetUnavailableClusterZones(t *testing.T) { } } } + +func TestGetInstanceFromResponse(t *testing.T) { + instanceName := "test-instance" + originalId := "original_value" + cases := map[string]struct { + instanceNames []string + listInstancesError error + + wantError string + wantInstanceName string + wantStop bool + wantId string + }{ + "not found": { + instanceNames: []string{"wrong", "also_wrong"}, + listInstancesError: nil, + + wantError: "", + wantStop: true, + wantInstanceName: "", + wantId: "", + }, + "found": { + instanceNames: []string{"wrong", "also_wrong", instanceName}, + listInstancesError: nil, + + wantError: "", + wantStop: false, + wantInstanceName: instanceName, + wantId: originalId, + }, + "error": { + instanceNames: nil, + listInstancesError: fmt.Errorf("some error"), + + wantError: "Error retrieving instance.", + wantStop: true, + wantInstanceName: "", + wantId: originalId, + }, + "unavailble error": { + instanceNames: []string{"wrong", "also_wrong"}, + listInstancesError: bigtable.ErrPartiallyUnavailable{[]string{"some", "location"}}, + + wantError: "", + wantStop: false, + wantInstanceName: "", + wantId: originalId, + }} + for tn, tc := range cases { + instancesResponse := []*bigtable.InstanceInfo{} + for _, existingInstance := range tc.instanceNames { + instancesResponse = append(instancesResponse, &bigtable.InstanceInfo{Name: existingInstance}) + } + d := &schema.ResourceData{} + d.SetId(originalId) + gotInstance, gotStop, gotErr := getInstanceFromResponse(instancesResponse, instanceName, tc.listInstancesError, d) + + if gotStop != tc.wantStop { + t.Errorf("bad stop: %s, got %v, want %v", tn, gotStop, tc.wantStop) + } + if (gotErr != nil && tc.wantError == "") || + (gotErr == nil && tc.wantError != "") || + (gotErr != nil && !strings.Contains(gotErr.Error(), tc.wantError)) { + t.Errorf("bad error: %s, got %q, want %q", tn, gotErr, tc.wantError) + } + if (gotInstance == nil && tc.wantInstanceName != "") || + (gotInstance != nil && tc.wantInstanceName == "") || + (gotInstance != nil && gotInstance.Name != tc.wantInstanceName) { + t.Errorf("bad instance: %s, got %v, want %q", tn, gotInstance, tc.wantInstanceName) + } + gotId := d.Id() + if gotId != tc.wantId { + t.Errorf("bad ID: %s, got %v, want %q", tn, gotId, tc.wantId) + } + } +} From 950e370d2fe74dcd302b43ee4f406b087f415ed4 Mon Sep 17 00:00:00 2001 From: Aliaksei Burau Date: Tue, 19 Sep 2023 23:23:56 +0200 Subject: [PATCH 05/36] allow multi-setting for classifications and excludes (#8988) --- mmv1/products/osconfig/PatchDeployment.yaml | 13 ++++++++++--- .../examples/os_config_patch_deployment_full.tf.erb | 3 ++- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/mmv1/products/osconfig/PatchDeployment.yaml b/mmv1/products/osconfig/PatchDeployment.yaml index b2c994585e21..10485867be82 100644 --- a/mmv1/products/osconfig/PatchDeployment.yaml +++ b/mmv1/products/osconfig/PatchDeployment.yaml @@ -396,10 +396,12 @@ properties: properties: - !ruby/object:Api::Type::Array name: 'classifications' - exactly_one_of: + at_least_one_of: - patch_config.0.windows_update.0.classifications - patch_config.0.windows_update.0.excludes - patch_config.0.windows_update.0.exclusive_patches + conflicts: + - patch_config.0.windows_update.0.exclusive_patches description: | Only apply updates of these windows update classifications. If empty, all updates are applied. item_type: !ruby/object:Api::Type::Enum @@ -417,19 +419,24 @@ properties: - :UPDATE - !ruby/object:Api::Type::Array name: 'excludes' - exactly_one_of: + at_least_one_of: - patch_config.0.windows_update.0.classifications - patch_config.0.windows_update.0.excludes - patch_config.0.windows_update.0.exclusive_patches + conflicts: + - patch_config.0.windows_update.0.exclusive_patches description: | List of KBs to exclude from update. item_type: Api::Type::String - !ruby/object:Api::Type::Array name: 'exclusivePatches' - exactly_one_of: + at_least_one_of: - patch_config.0.windows_update.0.classifications - patch_config.0.windows_update.0.excludes - patch_config.0.windows_update.0.exclusive_patches + conflicts: + - patch_config.0.windows_update.0.classifications + - patch_config.0.windows_update.0.excludes description: | An exclusive list of kbs to be updated. These are the only patches that will be updated. This field must not be used with other patch configurations. diff --git a/mmv1/templates/terraform/examples/os_config_patch_deployment_full.tf.erb b/mmv1/templates/terraform/examples/os_config_patch_deployment_full.tf.erb index d95d70b62e0c..161ab079be8a 100644 --- a/mmv1/templates/terraform/examples/os_config_patch_deployment_full.tf.erb +++ b/mmv1/templates/terraform/examples/os_config_patch_deployment_full.tf.erb @@ -16,7 +16,7 @@ resource "google_os_config_patch_deployment" "<%= ctx[:primary_resource_id] %>" patch_config { mig_instances_allowed = true - + reboot_config = "ALWAYS" apt { @@ -40,6 +40,7 @@ resource "google_os_config_patch_deployment" "<%= ctx[:primary_resource_id] %>" windows_update { classifications = ["CRITICAL", "SECURITY", "UPDATE"] + excludes = ["5012170"] } pre_step { From e9e03ecc418949ce7d5ac8f0de90f66bb911f162 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Tue, 19 Sep 2023 14:58:07 -0700 Subject: [PATCH 06/36] Added support for project-level custom modules (#8993) * Added support for project-level custom modules Related to b/296259216 * gofmt * Added client-side validation of display_name * Fixed expressions in update test * Added mutex --- .../securitycenter/ProjectCustomModule.yaml | 197 ++++++++++++++++++ .../scc_project_custom_module_basic.tf.erb | 17 ++ .../scc_project_custom_module_full.tf.erb | 31 +++ ...resource_scc_project_custom_module_test.go | 77 +++++++ 4 files changed, 322 insertions(+) create mode 100644 mmv1/products/securitycenter/ProjectCustomModule.yaml create mode 100644 mmv1/templates/terraform/examples/scc_project_custom_module_basic.tf.erb create mode 100644 mmv1/templates/terraform/examples/scc_project_custom_module_full.tf.erb create mode 100644 mmv1/third_party/terraform/services/securitycenter/resource_scc_project_custom_module_test.go diff --git a/mmv1/products/securitycenter/ProjectCustomModule.yaml b/mmv1/products/securitycenter/ProjectCustomModule.yaml new file mode 100644 index 000000000000..c723c8c710a5 --- /dev/null +++ b/mmv1/products/securitycenter/ProjectCustomModule.yaml @@ -0,0 +1,197 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'ProjectCustomModule' +description: | + Represents an instance of a Security Health Analytics custom module, including + its full module name, display name, enablement state, and last updated time. + You can create a custom module at the organization, folder, or project level. + Custom modules that you create at the organization or folder level are inherited + by the child folders and projects. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Overview of custom modules for Security Health Analytics': 'https://cloud.google.com/security-command-center/docs/custom-modules-sha-overview' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v1/projects.securityHealthAnalyticsSettings.customModules' +base_url: 'projects/{{project}}/securityHealthAnalyticsSettings/customModules' +self_link: 'projects/{{project}}/securityHealthAnalyticsSettings/customModules/{{name}}' +mutex: 'projects/{{project}}/securityHealthAnalyticsSettings/customModules' +update_verb: :PATCH +update_mask: true +examples: + - !ruby/object:Provider::Terraform::Examples + name: "scc_project_custom_module_basic" + primary_resource_id: "example" + - !ruby/object:Provider::Terraform::Examples + name: "scc_project_custom_module_full" + primary_resource_id: "example" + +properties: + - !ruby/object:Api::Type::String + name: 'name' + output: true + custom_flatten: templates/terraform/custom_flatten/name_from_self_link.erb + description: | + The resource name of the custom module. Its format is "projects/{project}/securityHealthAnalyticsSettings/customModules/{customModule}". + The id {customModule} is server-generated and is not user settable. It will be a numeric id containing 1-20 digits. + - !ruby/object:Api::Type::String + name: 'displayName' + immutable: true + required: true + # API error for invalid display names is just "INVALID_ARGUMENT" with no details + validation: !ruby/object:Provider::Terraform::Validation + function: 'verify.ValidateRegexp(`^[a-z][\w_]{0,127}$`)' + description: | + The display name of the Security Health Analytics custom module. This + display name becomes the finding category for all findings that are + returned by this custom module. The display name must be between 1 and + 128 characters, start with a lowercase letter, and contain alphanumeric + characters or underscores only. + - !ruby/object:Api::Type::Enum + name: 'enablementState' + required: true + description: | + The enablement state of the custom module. + values: + - :ENABLED + - :DISABLED + - !ruby/object:Api::Type::String + name: 'updateTime' + output: true + description: | + The time at which the custom module was last updated. + + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and + up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::String + name: 'lastEditor' + output: true + description: | + The editor that last updated the custom module. + - !ruby/object:Api::Type::String + name: 'ancestorModule' + output: true + description: | + If empty, indicates that the custom module was created in the organization,folder, + or project in which you are viewing the custom module. Otherwise, ancestor_module + specifies the organization or folder from which the custom module is inherited. + - !ruby/object:Api::Type::NestedObject + name: 'customConfig' + required: true + description: | + The user specified custom configuration for the module. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'predicate' + required: true + description: | + The CEL expression to evaluate to produce findings. When the expression evaluates + to true against a resource, a finding is generated. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + required: true + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Title for the expression, i.e. a short string describing its purpose. This can + be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the expression. This is a longer text which describes the + expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + String indicating the location of the expression for error reporting, e.g. a + file name and a position in the file. + - !ruby/object:Api::Type::NestedObject + name: 'customOutput' + description: | + Custom output properties. + properties: + - !ruby/object:Api::Type::Array + name: 'properties' + description: | + A list of custom output properties to add to the finding. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the property for the custom output. + - !ruby/object:Api::Type::NestedObject + name: 'valueExpression' + description: | + The CEL expression for the custom output. A resource property can be specified + to return the value of the property or a text string enclosed in quotation marks. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + required: true + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Title for the expression, i.e. a short string describing its purpose. This can + be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the expression. This is a longer text which describes the + expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + String indicating the location of the expression for error reporting, e.g. a + file name and a position in the file. + - !ruby/object:Api::Type::NestedObject + name: 'resourceSelector' + required: true + description: | + The resource types that the custom module operates on. Each custom module + can specify up to 5 resource types. + properties: + - !ruby/object:Api::Type::Array + name: 'resourceTypes' + required: true + description: | + The resource types to run the detector on. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'severity' + required: true + description: | + The severity to assign to findings generated by the module. + values: + - :CRITICAL + - :HIGH + - :MEDIUM + - :LOW + - !ruby/object:Api::Type::String + name: 'description' + description: | + Text that describes the vulnerability or misconfiguration that the custom + module detects. This explanation is returned with each finding instance to + help investigators understand the detected issue. The text must be enclosed in quotation marks. + - !ruby/object:Api::Type::String + name: 'recommendation' + required: true + description: | + An explanation of the recommended steps that security teams can take to resolve + the detected issue. This explanation is returned with each finding generated by + this module in the nextSteps property of the finding JSON. diff --git a/mmv1/templates/terraform/examples/scc_project_custom_module_basic.tf.erb b/mmv1/templates/terraform/examples/scc_project_custom_module_basic.tf.erb new file mode 100644 index 000000000000..f52bff0780da --- /dev/null +++ b/mmv1/templates/terraform/examples/scc_project_custom_module_basic.tf.erb @@ -0,0 +1,17 @@ +resource "google_scc_project_custom_module" "<%= ctx[:primary_resource_id] %>" { + display_name = "basic_custom_module" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/scc_project_custom_module_full.tf.erb b/mmv1/templates/terraform/examples/scc_project_custom_module_full.tf.erb new file mode 100644 index 000000000000..2fa6803b27e7 --- /dev/null +++ b/mmv1/templates/terraform/examples/scc_project_custom_module_full.tf.erb @@ -0,0 +1,31 @@ +resource "google_scc_project_custom_module" "<%= ctx[:primary_resource_id] %>" { + display_name = "full_custom_module" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/securitycenter/resource_scc_project_custom_module_test.go b/mmv1/third_party/terraform/services/securitycenter/resource_scc_project_custom_module_test.go new file mode 100644 index 000000000000..37e73a4e6cce --- /dev/null +++ b/mmv1/third_party/terraform/services/securitycenter/resource_scc_project_custom_module_test.go @@ -0,0 +1,77 @@ +package securitycenter_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccSecurityCenterProjectCustomModule_sccProjectCustomModuleUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecurityCenterProjectCustomModuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecurityCenterProjectCustomModule_sccProjectCustomModuleFullExample(context), + }, + { + ResourceName: "google_scc_project_custom_module.example", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccSecurityCenterProjectCustomModule_sccProjectCustomModuleUpdate(context), + }, + { + ResourceName: "google_scc_project_custom_module.example", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccSecurityCenterProjectCustomModule_sccProjectCustomModuleUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_project_custom_module" "example" { + display_name = "full_custom_module" + enablement_state = "DISABLED" + custom_config { + predicate { + expression = "resource.name == \"updated-name\"" + title = "Updated expression title" + description = "Updated description of the expression" + location = "Updated location of the expression" + } + custom_output { + properties { + name = "violation" + value_expression { + expression = "resource.name" + title = "Updated expression title" + description = "Updated description of the expression" + location = "Updated location of the expression" + } + } + } + resource_selector { + resource_types = [ + "compute.googleapis.com/Instance", + ] + } + severity = "CRITICAL" + description = "Updated description of the custom module" + recommendation = "Updated steps to resolve violation" + } +} +`, context) +} From fa76354435caf347e88edec2688e36b258812084 Mon Sep 17 00:00:00 2001 From: Hamzawy63 <43001514+Hamzawy63@users.noreply.github.com> Date: Wed, 20 Sep 2023 00:13:44 +0200 Subject: [PATCH 07/36] Add support for certificate map datasource (#8972) Co-authored-by: Hamza Hassan --- .../terraform/provider/provider.go.erb | 1 + ...gle_certificate_manager_certificate_map.go | 44 +++++++ ...ertificate_manager_certificate_map_test.go | 109 ++++++++++++++++++ ...cate_manager_certificate_map.html.markdown | 30 +++++ 4 files changed, 184 insertions(+) create mode 100644 mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificate_map.go create mode 100644 mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificate_map_test.go create mode 100644 mmv1/third_party/terraform/website/docs/d/certificate_manager_certificate_map.html.markdown diff --git a/mmv1/third_party/terraform/provider/provider.go.erb b/mmv1/third_party/terraform/provider/provider.go.erb index 500d9cdded3c..bffaf4b4764c 100644 --- a/mmv1/third_party/terraform/provider/provider.go.erb +++ b/mmv1/third_party/terraform/provider/provider.go.erb @@ -213,6 +213,7 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { "google_beyondcorp_app_gateway": beyondcorp.DataSourceGoogleBeyondcorpAppGateway(), "google_billing_account": billing.DataSourceGoogleBillingAccount(), "google_bigquery_default_service_account": bigquery.DataSourceGoogleBigqueryDefaultServiceAccount(), + "google_certificate_manager_certificate_map": certificatemanager.DataSourceGoogleCertificateManagerCertificateMap(), "google_cloudbuild_trigger": cloudbuild.DataSourceGoogleCloudBuildTrigger(), "google_cloudfunctions_function": cloudfunctions.DataSourceGoogleCloudFunctionsFunction(), "google_cloudfunctions2_function": cloudfunctions2.DataSourceGoogleCloudFunctions2Function(), diff --git a/mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificate_map.go b/mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificate_map.go new file mode 100644 index 000000000000..0c87129f0d06 --- /dev/null +++ b/mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificate_map.go @@ -0,0 +1,44 @@ +package certificatemanager + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleCertificateManagerCertificateMap() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceCertificateManagerCertificateMap().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleCertificateManagerCertificateMapRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleCertificateManagerCertificateMapRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + name := d.Get("name").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + id := fmt.Sprintf("projects/%s/locations/global/certificateMaps/%s", project, name) + d.SetId(id) + err = resourceCertificateManagerCertificateMapRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + return nil +} diff --git a/mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificate_map_test.go b/mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificate_map_test.go new file mode 100644 index 000000000000..7ca8c9c1f20d --- /dev/null +++ b/mmv1/third_party/terraform/services/certificatemanager/data_source_google_certificate_manager_certificate_map_test.go @@ -0,0 +1,109 @@ +package certificatemanager_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccDataSourceGoogleCertificateManagerCertificateMap_basic(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + + description := "My acceptance data source test certificate map" + name := fmt.Sprintf("tf-test-certificate-map-%d", acctest.RandInt(t)) + id := fmt.Sprintf("projects/%s/locations/global/certificateMaps/%s", project, name) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleCertificateManagerCertificateMap_basic(name, description), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.google_certificate_manager_certificate_map.cert_map_data", "id", id), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificate_map.cert_map_data", "description", description), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificate_map.cert_map_data", "name", name), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleCertificateManagerCertificateMap_basic(certificateMapName, certificateMapDescription string) string { + return fmt.Sprintf(` +resource "google_certificate_manager_certificate_map" "cert_map" { + name = "%s" + description = "%s" + labels = { + "terraform" : true, + "acc-test" : true, + } +} +data "google_certificate_manager_certificate_map" "cert_map_data" { + name = google_certificate_manager_certificate_map.cert_map.name +} +`, certificateMapName, certificateMapDescription) +} + +func TestAccDataSourceGoogleCertificateManagerCertificateMap_certificateMapEntryUsingMapDatasource(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + + certName := fmt.Sprintf("tf-test-certificate-%d", acctest.RandInt(t)) + mapEntryName := fmt.Sprintf("tf-test-certificate-map-entry-%d", acctest.RandInt(t)) + mapName := fmt.Sprintf("tf-test-certificate-map-%d", acctest.RandInt(t)) + id := fmt.Sprintf("projects/%s/locations/global/certificateMaps/%s", project, mapName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleCertificateManagerCertificateMap_certificateMapEntryUsingMapDatasource(mapName, mapEntryName, certName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.google_certificate_manager_certificate_map.cert_map_data", "id", id), + resource.TestCheckResourceAttr("data.google_certificate_manager_certificate_map.cert_map_data", "name", mapName), + resource.TestCheckResourceAttr("google_certificate_manager_certificate_map_entry.cert_map_entry", "map", mapName), // check that the certificate map entry is referencing the data source + + ), + }, + }, + }) +} + +func testAccDataSourceGoogleCertificateManagerCertificateMap_certificateMapEntryUsingMapDatasource(certificateMapName, certificateMapEntryName, certificateName string) string { + return fmt.Sprintf(` +resource "google_certificate_manager_certificate_map" "cert_map" { + name = "%s" + description = "certificate map example created for testing data sources in TF" + labels = { + "terraform" : true, + "acc-test" : true, + } +} +data "google_certificate_manager_certificate_map" "cert_map_data" { + name = google_certificate_manager_certificate_map.cert_map.name +} +resource "google_certificate_manager_certificate" "certificate" { + name = "%s" + description = "Global cert" + self_managed { + pem_certificate = file("test-fixtures/cert.pem") + pem_private_key = file("test-fixtures/private-key.pem") + } +} +resource "google_certificate_manager_certificate_map_entry" "cert_map_entry" { + name = "%s" + description = "certificate map entry that reference a data source of certificate map and a self managed certificate" + map = data.google_certificate_manager_certificate_map.cert_map_data.name + certificates = [google_certificate_manager_certificate.certificate.id] + matcher = "PRIMARY" +} +`, certificateMapName, certificateName, certificateMapEntryName) +} diff --git a/mmv1/third_party/terraform/website/docs/d/certificate_manager_certificate_map.html.markdown b/mmv1/third_party/terraform/website/docs/d/certificate_manager_certificate_map.html.markdown new file mode 100644 index 000000000000..20e2e4fe5b0c --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/certificate_manager_certificate_map.html.markdown @@ -0,0 +1,30 @@ +--- +subcategory: "Certificate manager" +description: |- + Contains the data that describes a Certificate Map +--- +# google_certificate_manager_certificate_map + +Get info about a Google Certificate Manager Certificate Map resource. + +## Example Usage + +```tf +data "google_certificate_manager_certificate_map" "default" { + name = "cert-map" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the certificate map. + +- - - +* `project` - (Optional) The ID of the project in which the resource belongs. If it + is not provided, the provider project is used. + +## Attributes Reference + +See [google_certificate_manager_certificate_map](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/certificate_manager_certificate_map) resource for details of the available attributes. From 86e74c75a61140817b17b38cdc40bbcc9dc6f69b Mon Sep 17 00:00:00 2001 From: Shuya Ma <87669292+shuyama1@users.noreply.github.com> Date: Tue, 19 Sep 2023 15:43:39 -0700 Subject: [PATCH 08/36] made `sign_in` in `google_identity_platform_config` O+C (#9001) --- mmv1/products/identityplatform/Config.yaml | 2 +- .../examples/identity_platform_config_minimal.tf.erb | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/mmv1/products/identityplatform/Config.yaml b/mmv1/products/identityplatform/Config.yaml index 55a2c57347b0..19de848c6ca6 100644 --- a/mmv1/products/identityplatform/Config.yaml +++ b/mmv1/products/identityplatform/Config.yaml @@ -59,7 +59,6 @@ examples: org_id: :ORG_ID billing_acct: :BILLING_ACCT - skip_vcr: true skip_docs: true custom_code: !ruby/object:Provider::Terraform::CustomCode custom_create: 'templates/terraform/custom_create/identity_platform_config.go' @@ -77,6 +76,7 @@ properties: name: 'signIn' description: | Configuration related to local sign in methods. + default_from_api: true properties: - !ruby/object:Api::Type::NestedObject name: email diff --git a/mmv1/templates/terraform/examples/identity_platform_config_minimal.tf.erb b/mmv1/templates/terraform/examples/identity_platform_config_minimal.tf.erb index 4a9299dec8c8..c026f04822fc 100644 --- a/mmv1/templates/terraform/examples/identity_platform_config_minimal.tf.erb +++ b/mmv1/templates/terraform/examples/identity_platform_config_minimal.tf.erb @@ -16,4 +16,8 @@ resource "google_project_service" "identitytoolkit" { resource "google_identity_platform_config" "default" { project = google_project.default.project_id + + depends_on = [ + google_project_service.identitytoolkit + ] } From b4a858eb2c9d4e6e3a86e23df62b364376544309 Mon Sep 17 00:00:00 2001 From: Kamal Aboul-Hosn Date: Wed, 20 Sep 2023 11:49:48 -0400 Subject: [PATCH 09/36] Remove wait in Pub/Sub Schema tests as server-side changes have been made so that this is not necessary (#9020) --- .../pubsub/resource_pubsub_schema_test.go | 22 ------------------- 1 file changed, 22 deletions(-) diff --git a/mmv1/third_party/terraform/services/pubsub/resource_pubsub_schema_test.go b/mmv1/third_party/terraform/services/pubsub/resource_pubsub_schema_test.go index abd7ad5a212e..68b193d71afd 100644 --- a/mmv1/third_party/terraform/services/pubsub/resource_pubsub_schema_test.go +++ b/mmv1/third_party/terraform/services/pubsub/resource_pubsub_schema_test.go @@ -48,17 +48,6 @@ func testAccPubsubSchema_basic(schema string) string { type = "PROTOCOL_BUFFER" definition = "syntax = \"proto3\";\nmessage Results {\nstring message_request = 1;\nstring message_response = 2;\n}" } - - # Need to introduce delay for updates in order for tests to complete - # successfully due to caching effects. - resource "time_sleep" "wait_121_seconds" { - create_duration = "121s" - lifecycle { - replace_triggered_by = [ - google_pubsub_schema.foo - ] - } - } `, schema) } @@ -69,16 +58,5 @@ func testAccPubsubSchema_updated(schema string) string { type = "PROTOCOL_BUFFER" definition = "syntax = \"proto3\";\nmessage Results {\nstring message_request = 1;\nstring message_response = 2;\nstring timestamp_request = 3;\n}" } - - # Need to introduce delay for updates in order for tests to complete - # successfully due to caching effects. - resource "time_sleep" "wait_121_seconds" { - create_duration = "121s" - lifecycle { - replace_triggered_by = [ - google_pubsub_schema.foo - ] - } - } `, schema) } From 3fae246afecdd559491c433333e9d07ae894e7d4 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Wed, 20 Sep 2023 10:13:29 -0700 Subject: [PATCH 10/36] Added melinath to vacation list (#9021) * Update membership.go * Update membership.go --- .ci/containers/membership-checker/membership.go | 1 + .ci/magician/github/membership.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.ci/containers/membership-checker/membership.go b/.ci/containers/membership-checker/membership.go index 1dcd332721f8..848408bbfb8d 100644 --- a/.ci/containers/membership-checker/membership.go +++ b/.ci/containers/membership-checker/membership.go @@ -30,6 +30,7 @@ var ( // This is for reviewers who are "on vacation": will not receive new review assignments but will still receive re-requests for assigned PRs. onVacationReviewers = []string{ + "melinath", "roaks3", } ) diff --git a/.ci/magician/github/membership.go b/.ci/magician/github/membership.go index 6ed6a6da7395..296da0b65a7a 100644 --- a/.ci/magician/github/membership.go +++ b/.ci/magician/github/membership.go @@ -31,7 +31,7 @@ var ( // This is for reviewers who are "on vacation": will not receive new review assignments but will still receive re-requests for assigned PRs. onVacationReviewers = []string{ - "slevenick", + "melinath", "roaks3", } ) From f15c1d30bd90b6348f91a6f1dcb09f030a028097 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ayberk=20Y=C4=B1lmaz?= <309940+ayberk@users.noreply.github.com> Date: Wed, 20 Sep 2023 12:38:10 -0700 Subject: [PATCH 11/36] Add a note to prefer compute_region_instance_template. (#9025) --- .../website/docs/d/compute_instance_template.html.markdown | 2 ++ .../website/docs/r/compute_instance_template.html.markdown | 2 ++ 2 files changed, 4 insertions(+) diff --git a/mmv1/third_party/terraform/website/docs/d/compute_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/d/compute_instance_template.html.markdown index 2af65eaae691..e771c3be87c5 100644 --- a/mmv1/third_party/terraform/website/docs/d/compute_instance_template.html.markdown +++ b/mmv1/third_party/terraform/website/docs/d/compute_instance_template.html.markdown @@ -6,6 +6,8 @@ description: |- # google\_compute\_instance\_template +-> **Note**: Global instance templates can be used in any region. To lower the impact of outages outside your region and gain data residency within your region, use [google_compute_region_instance_template](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_region_instance_template). + Get information about a VM instance template resource within GCE. For more information see [the official documentation](https://cloud.google.com/compute/docs/instance-templates) and diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown index 2d08f340fb6f..c04c64137d4d 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown @@ -6,6 +6,8 @@ description: |- # google\_compute\_instance\_template +-> **Note**: Global instance templates can be used in any region. To lower the impact of outages outside your region and gain data residency within your region, use [google_compute_region_instance_template](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_region_instance_template). + Manages a VM instance template resource within GCE. For more information see [the official documentation](https://cloud.google.com/compute/docs/instance-templates) and From fc55ed521aacfa04c7f56167d4251a4eca16f7fc Mon Sep 17 00:00:00 2001 From: bobyu-google Date: Wed, 20 Sep 2023 13:41:40 -0700 Subject: [PATCH 12/36] Update version_5_upgrade.html.markdown for google_compute_service_attachment (#9029) --- .../website/docs/guides/version_5_upgrade.html.markdown | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown index d3fb8647f90c..3845211cd374 100644 --- a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown @@ -525,3 +525,9 @@ resource "google_secret_manager_secret" "my-secret" { ### `google_identity_platform_project_default_config` has been removed from the provider Use the `google_identity_platform_config` resource instead. It contains a more comprehensive list of fields, and was created before `google_identity_platform_project_default_config` was added. + +## Resource: `google_compute_service_attachment` + +### `reconcile_connections` now defaults from API + +`reconcile_connections` previously defaults to true. Now it will default from the API. From a12e81cea487686855f430a7874180872fa187f5 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Wed, 20 Sep 2023 16:08:28 -0500 Subject: [PATCH 13/36] SecretManager Secret: Prevent recreation for "automatic" to "auto" (#9030) --- mmv1/products/secretmanager/Secret.yaml | 8 ++++-- .../constants/secret_manager_secret.go | 25 +++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 mmv1/templates/terraform/constants/secret_manager_secret.go diff --git a/mmv1/products/secretmanager/Secret.yaml b/mmv1/products/secretmanager/Secret.yaml index c2e8a0832e77..1fe39265855f 100644 --- a/mmv1/products/secretmanager/Secret.yaml +++ b/mmv1/products/secretmanager/Secret.yaml @@ -49,6 +49,8 @@ examples: import_format: ['projects/{{project}}/secrets/{{secret_id}}'] custom_code: !ruby/object:Provider::Terraform::CustomCode pre_update: templates/terraform/pre_update/secret_manager_secret.go.erb + constants: templates/terraform/constants/secret_manager_secret.go +custom_diff: ['secretManagerSecretAutoCustomizeDiff'] parameters: - !ruby/object:Api::Type::String name: secretId @@ -125,7 +127,8 @@ properties: properties: - !ruby/object:Api::Type::Boolean name: automatic - immutable: true + # Immutability is handled by the custom diff function until "automatic" is removed + # immutable: true exactly_one_of: - replication.0.automatic - replication.0.user_managed @@ -137,7 +140,8 @@ properties: - !ruby/object:Api::Type::NestedObject name: auto api_name: automatic - immutable: true + # Immutability is handled by the custom diff function until "automatic" is removed + # immutable: true exactly_one_of: - replication.0.automatic - replication.0.user_managed diff --git a/mmv1/templates/terraform/constants/secret_manager_secret.go b/mmv1/templates/terraform/constants/secret_manager_secret.go new file mode 100644 index 000000000000..13db855f5e7e --- /dev/null +++ b/mmv1/templates/terraform/constants/secret_manager_secret.go @@ -0,0 +1,25 @@ +// Prevent ForceNew when upgrading replication.automatic -> replication.auto +func secretManagerSecretAutoCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + oAutomatic, nAutomatic := diff.GetChange("replication.0.automatic") + _, nAuto := diff.GetChange("replication.0.auto") + autoLen := len(nAuto.([]interface{})) + + // Do not ForceNew if we are removing "automatic" while adding "auto" + if oAutomatic == true && nAutomatic == false && autoLen > 0 { + return nil + } + + if diff.HasChange("replication.0.automatic") { + if err := diff.ForceNew("replication.0.automatic"); err != nil { + return err + } + } + + if diff.HasChange("replication.0.auto") { + if err := diff.ForceNew("replication.0.auto"); err != nil { + return err + } + } + + return nil +} From 53d05243b90a8232084b118637910afce90f6e0c Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Wed, 20 Sep 2023 15:59:28 -0700 Subject: [PATCH 14/36] Initial split of diff computation from breaking change computation (#8930) * Initial split of diff computation from breaking change computation * Made a breaking change * go mod tidy * Fixed import path * Corrected function diff detection * Revert "Made a breaking change" This reverts commit 0d4fd2b0a4a72da2f2dd83a4764b4d2ba886ab45. * Reverted detection of new required field addition b/300515447 * Factored out key union logic --- tools/diff-processor/cmd/breaking_changes.go | 37 +- .../cmd/breaking_changes_test.go | 106 ++ tools/diff-processor/cmd/root.go | 8 +- tools/diff-processor/diff/comparison.go | 134 --- tools/diff-processor/diff/comparison_test.go | 344 ------ tools/diff-processor/diff/diff.go | 222 ++++ tools/diff-processor/diff/diff_test.go | 1072 +++++++++++++++++ tools/diff-processor/go.mod | 14 +- tools/diff-processor/go.sum | 7 +- .../diff-processor/rules/breaking_changes.go | 52 + .../rules/breaking_changes_test.go | 344 ++++++ tools/diff-processor/rules/rules_field.go | 28 + .../diff-processor/rules/rules_field_test.go | 116 ++ .../rules/rules_resource_inventory.go | 24 +- .../rules/rules_resource_inventory_test.go | 123 +- .../rules/rules_resource_schema.go | 21 +- .../rules/rules_resource_schema_test.go | 98 +- 17 files changed, 2068 insertions(+), 682 deletions(-) create mode 100644 tools/diff-processor/cmd/breaking_changes_test.go delete mode 100644 tools/diff-processor/diff/comparison.go delete mode 100644 tools/diff-processor/diff/comparison_test.go create mode 100644 tools/diff-processor/diff/diff.go create mode 100644 tools/diff-processor/diff/diff_test.go create mode 100644 tools/diff-processor/rules/breaking_changes.go create mode 100644 tools/diff-processor/rules/breaking_changes_test.go diff --git a/tools/diff-processor/cmd/breaking_changes.go b/tools/diff-processor/cmd/breaking_changes.go index 7d9957234449..1544f6529a00 100644 --- a/tools/diff-processor/cmd/breaking_changes.go +++ b/tools/diff-processor/cmd/breaking_changes.go @@ -1,18 +1,33 @@ package cmd + import ( - "fmt" + newProvider "google/provider/new/google/provider" + oldProvider "google/provider/old/google/provider" + + "io" + "os" "sort" - "github.com/spf13/cobra" "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/diff" + "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/rules" + "github.com/spf13/cobra" ) + const breakingChangesDesc = `Check for breaking changes between the new / old Terraform provider versions.` + type breakingChangesOptions struct { - rootOptions *rootOptions + rootOptions *rootOptions + computeSchemaDiff func() diff.SchemaDiff + stdout io.Writer } + func newBreakingChangesCmd(rootOptions *rootOptions) *cobra.Command { o := &breakingChangesOptions{ - rootOptions: rootOptions, + rootOptions: rootOptions, + computeSchemaDiff: func() diff.SchemaDiff { + return diff.ComputeSchemaDiff(oldProvider.ResourceMap(), newProvider.ResourceMap()) + }, + stdout: os.Stdout, } cmd := &cobra.Command{ Use: "breaking-changes", @@ -25,10 +40,14 @@ func newBreakingChangesCmd(rootOptions *rootOptions) *cobra.Command { return cmd } func (o *breakingChangesOptions) run() error { - breakages := diff.Compare() - sort.Strings(breakages) - for _, breakage := range breakages { - fmt.Println(breakage) + schemaDiff := o.computeSchemaDiff() + breakingChanges := rules.ComputeBreakingChanges(schemaDiff) + sort.Strings(breakingChanges) + for _, breakingChange := range breakingChanges { + _, err := o.stdout.Write([]byte(breakingChange + "\n")) + if err != nil { + return err + } } return nil -} \ No newline at end of file +} diff --git a/tools/diff-processor/cmd/breaking_changes_test.go b/tools/diff-processor/cmd/breaking_changes_test.go new file mode 100644 index 000000000000..f4e25966b4b8 --- /dev/null +++ b/tools/diff-processor/cmd/breaking_changes_test.go @@ -0,0 +1,106 @@ +package cmd + +import ( + "bytes" + "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/diff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "strings" + "testing" +) + +func TestBreakingChangesCmd(t *testing.T) { + cases := map[string]struct { + oldResourceMap map[string]*schema.Resource + newResourceMap map[string]*schema.Resource + expectedViolations int + }{ + "no breaking changes": { + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + expectedViolations: 0, + }, + "resource missing": { + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep"}, + "field-b": {Description: "beep"}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{}, + expectedViolations: 1, + }, + "field missing, resource missing, and optional to required": { + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + "google-y": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Required: true}, + }, + }, + }, + expectedViolations: 3, + }, + } + + for tn, tc := range cases { + tc := tc + t.Run(tn, func(t *testing.T) { + t.Parallel() + + var buf bytes.Buffer + o := breakingChangesOptions{ + computeSchemaDiff: func() diff.SchemaDiff { + return diff.ComputeSchemaDiff(tc.oldResourceMap, tc.newResourceMap) + }, + stdout: &buf, + } + + err := o.run() + if err != nil { + t.Errorf("Error running command: %s", err) + } + + out := make([]byte, buf.Len()) + buf.Read(out) + + lines := strings.Split(string(out), "\n") + nonemptyLines := []string{} + for _, line := range lines { + if line != "" { + nonemptyLines = append(nonemptyLines, line) + } + } + if len(nonemptyLines) != tc.expectedViolations { + t.Errorf("Unexpected number of violations. Want %d, got %d. Output: %s", tc.expectedViolations, len(nonemptyLines), out) + } + }) + } +} diff --git a/tools/diff-processor/cmd/root.go b/tools/diff-processor/cmd/root.go index c749e0e31b52..ffa8c6dc71b1 100644 --- a/tools/diff-processor/cmd/root.go +++ b/tools/diff-processor/cmd/root.go @@ -1,10 +1,13 @@ package cmd + import ( "fmt" - "os" "github.com/spf13/cobra" + "os" ) + const rootCmdDesc = "Utilities for interacting with diffs between Terraform schema versions." + type rootOptions struct { } @@ -20,6 +23,7 @@ func newRootCmd() (*cobra.Command, *rootOptions, error) { cmd.AddCommand(newBreakingChangesCmd(o)) return cmd, o, nil } + // Execute is the entry-point for all commands. // This lets us keep all new command functions private. func Execute() { @@ -35,4 +39,4 @@ func Execute() { fmt.Println(err.Error()) os.Exit(1) } -} \ No newline at end of file +} diff --git a/tools/diff-processor/diff/comparison.go b/tools/diff-processor/diff/comparison.go deleted file mode 100644 index 26ad10122f38..000000000000 --- a/tools/diff-processor/diff/comparison.go +++ /dev/null @@ -1,134 +0,0 @@ -package diff - -import ( - newProvider "google/provider/new/google/provider" - oldProvider "google/provider/old/google/provider" - "strings" - - "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/rules" - "github.com/golang/glog" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func Compare() []string { - resourceMapOld := oldProvider.ResourceMap() - resourceMapNew := newProvider.ResourceMap() - - return compareResourceMaps(resourceMapOld, resourceMapNew) -} - -func compareResourceMaps(old, new map[string]*schema.Resource) []string { - messages := []string{} - - for _, rule := range rules.ResourceInventoryRules { - violatingResources := rule.IsRuleBreak(old, new) - if len(violatingResources) > 0 { - for _, resourceName := range violatingResources { - newMessage := rule.Message(resourceName) - messages = append(messages, newMessage) - } - } - - } - - for resourceName, resource := range new { - oldResource, ok := old[resourceName] - if ok { - newMessages := compareResourceSchema(resourceName, oldResource.Schema, resource.Schema) - messages = append(messages, newMessages...) - } - } - - return messages -} - -func compareResourceSchema(resourceName string, old, new map[string]*schema.Schema) []string { - messages := []string{} - oldCompressed := flattenSchema(old) - newCompressed := flattenSchema(new) - - for _, rule := range rules.ResourceSchemaRules { - violatingFields := rule.IsRuleBreak(oldCompressed, newCompressed) - if len(violatingFields) > 0 { - for _, fieldName := range violatingFields { - newMessage := rule.Message(resourceName, fieldName) - messages = append(messages, newMessage) - } - } - } - - for fieldName, field := range newCompressed { - oldField, ok := oldCompressed[fieldName] - if ok { - newMessages := compareField(resourceName, fieldName, oldField, field) - messages = append(messages, newMessages...) - } - } - - return messages -} - -func compareField(resourceName, fieldName string, old, new *schema.Schema) []string { - messages := []string{} - fieldRules := rules.FieldRules - - for _, rule := range fieldRules { - breakageMessage := rule.IsRuleBreak( - old, - new, - rules.MessageContext{ - Resource: resourceName, - Field: fieldName, - }, - ) - if breakageMessage != "" { - messages = append(messages, breakageMessage) - } - } - return messages -} - -func flattenSchema(schemaObj map[string]*schema.Schema) map[string]*schema.Schema { - return flattenSchemaRecursive(nil, schemaObj) -} - -func flattenSchemaRecursive(parentLineage []string, schemaObj map[string]*schema.Schema) map[string]*schema.Schema { - compressed := make(map[string]*schema.Schema) - - // prepare prefix to bring nested entries up - parentPrefix := strings.Join(parentLineage, ".") - if len(parentPrefix) > 0 { - parentPrefix += "." - } - - // add entry to output and call - // flattenSchemaRecursive for any children - for fieldName, field := range schemaObj { - compressed[parentPrefix+fieldName] = field - casted, typeConverted := field.Elem.(*schema.Resource) - if field.Elem != nil && typeConverted { - newLineage := append([]string{}, parentLineage...) - newLineage = append(newLineage, fieldName) - compressedChild := flattenSchemaRecursive(newLineage, casted.Schema) - compressed = mergeSchemaMaps(compressed, compressedChild) - } - } - - return compressed -} - -func mergeSchemaMaps(map1, map2 map[string]*schema.Schema) map[string]*schema.Schema { - merged := make(map[string]*schema.Schema) - for key, value := range map1 { - merged[key] = value - } - - for key, value := range map2 { - if _, alreadyExists := merged[key]; alreadyExists { - glog.Errorf("error when trying to merge maps key " + key + " was found in both maps.. please ensure the children you are merging up have the prefix on the key names.") - } - merged[key] = value - } - - return merged -} diff --git a/tools/diff-processor/diff/comparison_test.go b/tools/diff-processor/diff/comparison_test.go deleted file mode 100644 index 5554148b07ed..000000000000 --- a/tools/diff-processor/diff/comparison_test.go +++ /dev/null @@ -1,344 +0,0 @@ -package diff - -import ( - "strings" - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func TestComparisonEngine(t *testing.T) { - for _, tc := range comparisonEngineTestCases { - tc.check(t) - } -} - -type comparisonEngineTestCase struct { - name string - oldResourceMap map[string]*schema.Resource - newResourceMap map[string]*schema.Resource - expectedViolations int -} - -var comparisonEngineTestCases = []comparisonEngineTestCase{ - { - name: "control", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - expectedViolations: 0, - }, - { - name: "adding resources", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - "google-y": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - }, - }, - }, - expectedViolations: 0, - }, - { - name: "adding fields", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - "field-c": {Description: "beep", Optional: true}, - }, - }, - }, - expectedViolations: 0, - }, - { - name: "resource missing", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep"}, - "field-b": {Description: "beep"}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{}, - expectedViolations: 1, - }, - { - name: "field missing", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - }, - }, - }, - expectedViolations: 1, - }, - { - name: "optional field to required", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Required: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - expectedViolations: 1, - }, - { - name: "field missing and optional to required", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Required: true}, - }, - }, - }, - expectedViolations: 2, - }, - { - name: "field missing, resource missing, and optional to required", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - "google-y": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Required: true}, - }, - }, - }, - expectedViolations: 3, - }, - { - name: "removing a subfield", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": { - Description: "beep", - Optional: true, - Type: schema.TypeList, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sub-field-1": {Description: "beep", Optional: true}, - "sub-field-2": {Description: "beep", Optional: true}, - }, - }, - }, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": { - Description: "beep", - Optional: true, - Type: schema.TypeList, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sub-field-1": {Description: "beep", Optional: true}, - }, - }, - }, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - expectedViolations: 1, - }, - { - name: "subfield max shrinking", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": { - Description: "beep", - Optional: true, - Type: schema.TypeList, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sub-field-1": {Description: "beep", Optional: true, MaxItems: 100}, - }, - }, - }, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": { - Description: "beep", - Optional: true, - Type: schema.TypeList, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sub-field-1": {Description: "beep", Optional: true, MaxItems: 25}, - }, - }, - }, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - expectedViolations: 1, - }, - { - name: "subfield max shrinking", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": { - Description: "beep", - Optional: true, - Type: schema.TypeList, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sub-field-1": {Description: "beep", Optional: true, MaxItems: 100}, - }, - }, - }, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": { - Description: "beep", - Optional: true, - Type: schema.TypeList, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sub-field-1": {Description: "beep", Optional: true, MaxItems: 25}, - }, - }, - }, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - expectedViolations: 1, - }, - { - name: "min growing", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": { - Description: "beep", - Optional: true, - MinItems: 1, - }, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": { - Description: "beep", - Optional: true, - MinItems: 4, - }, - }, - }, - }, - expectedViolations: 1, - }, -} - -func (tc *comparisonEngineTestCase) check(t *testing.T) { - violations := compareResourceMaps(tc.oldResourceMap, tc.newResourceMap) - for _, v := range violations { - if strings.Contains(v, "{{") || strings.Contains(v, "}}") { - t.Errorf("Test `%s` failed: found unreplaced characters in string - %s", tc.name, v) - } - } - if tc.expectedViolations != len(violations) { - t.Errorf("Test `%s` failed: expected %d violations, got %d", tc.name, tc.expectedViolations, len(violations)) - } -} diff --git a/tools/diff-processor/diff/diff.go b/tools/diff-processor/diff/diff.go new file mode 100644 index 000000000000..51b3de6002dd --- /dev/null +++ b/tools/diff-processor/diff/diff.go @@ -0,0 +1,222 @@ +package diff + +import ( + "reflect" + + "golang.org/x/exp/maps" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// SchemaDiff is a nested map with field names as keys and Field objects +// as bottom-level values. +// Fields are assumed not to be covered until detected in a test. +type SchemaDiff map[string]ResourceDiff + +type ResourceDiff struct { + ResourceConfig ResourceConfigDiff + Fields map[string]FieldDiff +} + +type ResourceConfigDiff struct { + Old *schema.Resource + New *schema.Resource +} + +type FieldDiff struct { + Old *schema.Schema + New *schema.Schema +} + +func ComputeSchemaDiff(oldResourceMap, newResourceMap map[string]*schema.Resource) SchemaDiff { + schemaDiff := make(SchemaDiff) + for resource, _ := range union(maps.Keys(oldResourceMap), maps.Keys(newResourceMap)) { + // Compute diff between old and new resources and fields. + // TODO: add support for computing diff between resource configs, not just whether the + // resource was added/removed. b/300114839 + resourceDiff := ResourceDiff{} + var flattenedOldSchema map[string]*schema.Schema + if oldResource, ok := oldResourceMap[resource]; ok { + flattenedOldSchema = flattenSchema("", oldResource.Schema) + resourceDiff.ResourceConfig.Old = &schema.Resource{} + } + + var flattenedNewSchema map[string]*schema.Schema + if newResource, ok := newResourceMap[resource]; ok { + flattenedNewSchema = flattenSchema("", newResource.Schema) + resourceDiff.ResourceConfig.New = &schema.Resource{} + } + + resourceDiff.Fields = make(map[string]FieldDiff) + for key, _ := range union(maps.Keys(flattenedOldSchema), maps.Keys(flattenedNewSchema)) { + oldField := flattenedOldSchema[key] + newField := flattenedNewSchema[key] + if fieldChanged(oldField, newField) { + resourceDiff.Fields[key] = FieldDiff{ + Old: oldField, + New: newField, + } + } + } + if len(resourceDiff.Fields) > 0 || !cmp.Equal(resourceDiff.ResourceConfig.Old, resourceDiff.ResourceConfig.New) { + schemaDiff[resource] = resourceDiff + } + } + return schemaDiff +} + +func union(keys1, keys2 []string) map[string]struct{} { + allKeys := make(map[string]struct{}) + for _, key := range keys1 { + allKeys[key] = struct{}{} + } + for _, key := range keys2 { + allKeys[key] = struct{}{} + } + return allKeys +} + +func flattenSchema(parentKey string, schemaObj map[string]*schema.Schema) map[string]*schema.Schema { + flattened := make(map[string]*schema.Schema) + + if parentKey != "" { + parentKey += "." + } + + for fieldName, field := range schemaObj { + key := parentKey + fieldName + flattened[key] = field + childResource, hasNestedFields := field.Elem.(*schema.Resource) + if field.Elem != nil && hasNestedFields { + for childKey, childField := range flattenSchema(key, childResource.Schema) { + flattened[childKey] = childField + } + } + } + + return flattened +} + +func fieldChanged(oldField, newField *schema.Schema) bool { + // If either field is nil, it is changed; if both are nil (which should never happen) it's not + if oldField == nil && newField == nil { + return false + } + if oldField == nil || newField == nil { + return true + } + // Check if any basic Schema struct fields have changed. + // https://github.com/hashicorp/terraform-plugin-sdk/blob/v2.24.0/helper/schema/schema.go#L44 + if oldField.Type != newField.Type { + return true + } + if oldField.ConfigMode != newField.ConfigMode { + return true + } + if oldField.Required != newField.Required { + return true + } + if oldField.Optional != newField.Optional { + return true + } + if oldField.Computed != newField.Computed { + return true + } + if oldField.ForceNew != newField.ForceNew { + return true + } + if oldField.DiffSuppressOnRefresh != newField.DiffSuppressOnRefresh { + return true + } + if oldField.Default != newField.Default { + return true + } + if oldField.Description != newField.Description { + return true + } + if oldField.InputDefault != newField.InputDefault { + return true + } + if oldField.MaxItems != newField.MaxItems { + return true + } + if oldField.MinItems != newField.MinItems { + return true + } + if oldField.Deprecated != newField.Deprecated { + return true + } + if oldField.Sensitive != newField.Sensitive { + return true + } + + // Compare slices + less := func(a, b string) bool { return a < b } + + if (len(oldField.ConflictsWith) > 0 || len(newField.ConflictsWith) > 0) && !cmp.Equal(oldField.ConflictsWith, newField.ConflictsWith, cmpopts.SortSlices(less)) { + return true + } + + if (len(oldField.ExactlyOneOf) > 0 || len(newField.ExactlyOneOf) > 0) && !cmp.Equal(oldField.ExactlyOneOf, newField.ExactlyOneOf, cmpopts.SortSlices(less)) { + return true + } + + if (len(oldField.AtLeastOneOf) > 0 || len(newField.AtLeastOneOf) > 0) && !cmp.Equal(oldField.AtLeastOneOf, newField.AtLeastOneOf, cmpopts.SortSlices(less)) { + return true + } + + if (len(oldField.RequiredWith) > 0 || len(newField.RequiredWith) > 0) && !cmp.Equal(oldField.RequiredWith, newField.RequiredWith, cmpopts.SortSlices(less)) { + return true + } + + // Check if Elem changed (unless old and new both represent nested fields) + if (oldField.Elem == nil || newField.Elem == nil) && !(oldField.Elem == nil && newField.Elem == nil) { + return true + } + _, oldHasChildren := oldField.Elem.(*schema.Resource) + _, newHasChildren := newField.Elem.(*schema.Resource) + if !oldHasChildren && !newHasChildren { + if !reflect.DeepEqual(oldField.Elem, newField.Elem) { + return true + } + } else if (oldHasChildren || newHasChildren) && !(oldHasChildren && newHasChildren) { + return true + } + + // Check if any Schema struct fields that are functions have changed + if funcChanged(oldField.DiffSuppressFunc, newField.DiffSuppressFunc) { + return true + } + if funcChanged(oldField.DefaultFunc, newField.DefaultFunc) { + return true + } + if funcChanged(oldField.StateFunc, newField.StateFunc) { + return true + } + if funcChanged(oldField.Set, newField.Set) { + return true + } + if funcChanged(oldField.ValidateFunc, newField.ValidateFunc) { + return true + } + if funcChanged(oldField.ValidateDiagFunc, newField.ValidateDiagFunc) { + return true + } + + return false +} + +func funcChanged(oldFunc, newFunc interface{}) bool { + // If it changed to/from nil, it changed + oldFuncIsNil := reflect.ValueOf(oldFunc).IsNil() + newFuncIsNil := reflect.ValueOf(newFunc).IsNil() + if (oldFuncIsNil && !newFuncIsNil) || (!oldFuncIsNil && newFuncIsNil) { + return true + } + + // If a func is set before and after we don't currently have a way to reliably + // determine whether the function changed, so we assume that it has not changed. + // b/300157205 + return false +} diff --git a/tools/diff-processor/diff/diff_test.go b/tools/diff-processor/diff/diff_test.go new file mode 100644 index 000000000000..2d5896228dd5 --- /dev/null +++ b/tools/diff-processor/diff/diff_test.go @@ -0,0 +1,1072 @@ +package diff + +import ( + "fmt" + "testing" + + newProvider "google/provider/new/google/provider" + newTpgresource "google/provider/new/google/tpgresource" + oldTpgresource "google/provider/new/google/tpgresource" + newVerify "google/provider/new/google/verify" + oldProvider "google/provider/old/google/provider" + oldVerify "google/provider/old/google/verify" + + "github.com/google/go-cmp/cmp" + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" +) + +func TestNewProviderOldProviderChanges(t *testing.T) { + changes := ComputeSchemaDiff(oldProvider.ResourceMap(), newProvider.ResourceMap()) + + t.Logf("Changes between old and new providers: %s", spew.Sdump(changes)) +} + +func TestFlattenSchema(t *testing.T) { + cases := map[string]struct { + resourceSchema map[string]*schema.Schema + expectFlattened map[string]*schema.Schema + }{ + "primitive fields": { + resourceSchema: map[string]*schema.Schema{ + "bool": { + Type: schema.TypeBool, + }, + "int": { + Type: schema.TypeInt, + }, + "float": { + Type: schema.TypeFloat, + }, + "string": { + Type: schema.TypeString, + }, + }, + expectFlattened: map[string]*schema.Schema{ + "bool": { + Type: schema.TypeBool, + }, + "int": { + Type: schema.TypeInt, + }, + "float": { + Type: schema.TypeFloat, + }, + "string": { + Type: schema.TypeString, + }, + }, + }, + "map field": { + resourceSchema: map[string]*schema.Schema{ + "map": { + Type: schema.TypeMap, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + expectFlattened: map[string]*schema.Schema{ + "map": { + Type: schema.TypeMap, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + "simple list field": { + resourceSchema: map[string]*schema.Schema{ + "list": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + expectFlattened: map[string]*schema.Schema{ + "list": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + "simple set field": { + resourceSchema: map[string]*schema.Schema{ + "set": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + expectFlattened: map[string]*schema.Schema{ + "set": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + "nested list field": { + resourceSchema: map[string]*schema.Schema{ + "list": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nested_string": { + Type: schema.TypeString, + }, + }, + }, + }, + }, + expectFlattened: map[string]*schema.Schema{ + "list": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nested_string": { + Type: schema.TypeString, + }, + }, + }, + }, + "list.nested_string": { + Type: schema.TypeString, + }, + }, + }, + "nested set field": { + resourceSchema: map[string]*schema.Schema{ + "set": { + Type: schema.TypeSet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nested_string": { + Type: schema.TypeString, + }, + }, + }, + }, + }, + expectFlattened: map[string]*schema.Schema{ + "set": { + Type: schema.TypeSet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nested_string": { + Type: schema.TypeString, + }, + }, + }, + }, + "set.nested_string": { + Type: schema.TypeString, + }, + }, + }, + } + + for tn, tc := range cases { + tc := tc + t.Run(tn, func(t *testing.T) { + t.Parallel() + flattened := flattenSchema("", tc.resourceSchema) + assert.Equal(t, tc.expectFlattened, flattened) + }) + } +} + +func testDefaultFunc1() (interface{}, error) { + return "default1", nil +} +func testDefaultFunc2() (interface{}, error) { + return "default2", nil +} +func testStateFunc1(interface{}) string { + return "state1" +} +func testStateFunc2(interface{}) string { + return "state2" +} +func testValidateDiagFunc1(v interface{}, p cty.Path) diag.Diagnostics { + return diag.Diagnostics{} +} +func testValidateDiagFunc2(v interface{}, p cty.Path) diag.Diagnostics { + return diag.Diagnostics{} +} + +func TestFieldChanged(t *testing.T) { + cases := map[string]struct { + oldField *schema.Schema + newField *schema.Schema + expectChanged bool + }{ + "both nil": { + oldField: nil, + newField: nil, + expectChanged: false, + }, + "old nil": { + oldField: nil, + newField: &schema.Schema{ + Type: schema.TypeString, + }, + expectChanged: true, + }, + "new nil": { + oldField: &schema.Schema{ + Type: schema.TypeString, + }, + newField: nil, + expectChanged: true, + }, + "not changed": { + oldField: &schema.Schema{}, + newField: &schema.Schema{}, + expectChanged: false, + }, + "Type changed": { + oldField: &schema.Schema{ + Type: schema.TypeString, + }, + newField: &schema.Schema{ + Type: schema.TypeInt, + }, + expectChanged: true, + }, + "ConfigMode changed": { + oldField: &schema.Schema{ + ConfigMode: schema.SchemaConfigModeAttr, + }, + newField: &schema.Schema{ + ConfigMode: schema.SchemaConfigModeBlock, + }, + expectChanged: true, + }, + "Required changed": { + oldField: &schema.Schema{ + Required: false, + }, + newField: &schema.Schema{ + Required: true, + }, + expectChanged: true, + }, + "Optional changed": { + oldField: &schema.Schema{ + Optional: false, + }, + newField: &schema.Schema{ + Optional: true, + }, + expectChanged: true, + }, + "Computed changed": { + oldField: &schema.Schema{ + Computed: false, + }, + newField: &schema.Schema{ + Computed: true, + }, + expectChanged: true, + }, + "ForceNew changed": { + oldField: &schema.Schema{ + ForceNew: false, + }, + newField: &schema.Schema{ + ForceNew: true, + }, + expectChanged: true, + }, + "DiffSuppressOnRefresh changed": { + oldField: &schema.Schema{ + DiffSuppressOnRefresh: false, + }, + newField: &schema.Schema{ + DiffSuppressOnRefresh: true, + }, + expectChanged: true, + }, + "Default changed": { + oldField: &schema.Schema{ + Default: 10, + }, + newField: &schema.Schema{ + Default: 20, + }, + expectChanged: true, + }, + "Description changed": { + oldField: &schema.Schema{ + Description: "Hello", + }, + newField: &schema.Schema{ + Description: "Goodbye", + }, + expectChanged: true, + }, + "InputDefault changed": { + oldField: &schema.Schema{ + InputDefault: "Hello", + }, + newField: &schema.Schema{ + InputDefault: "Goodbye", + }, + expectChanged: true, + }, + "MaxItems changed": { + oldField: &schema.Schema{ + MaxItems: 10, + }, + newField: &schema.Schema{ + MaxItems: 20, + }, + expectChanged: true, + }, + "MinItems changed": { + oldField: &schema.Schema{ + MinItems: 10, + }, + newField: &schema.Schema{ + MinItems: 20, + }, + expectChanged: true, + }, + "Deprecated changed": { + oldField: &schema.Schema{ + Deprecated: "Hello", + }, + newField: &schema.Schema{ + Deprecated: "Goodbye", + }, + expectChanged: true, + }, + "Sensitive changed": { + oldField: &schema.Schema{ + Sensitive: false, + }, + newField: &schema.Schema{ + Sensitive: true, + }, + expectChanged: true, + }, + "ConflictsWith reordered": { + oldField: &schema.Schema{ + ConflictsWith: []string{"field_one", "field_two"}, + }, + newField: &schema.Schema{ + ConflictsWith: []string{"field_two", "field_one"}, + }, + expectChanged: false, + }, + "ConflictsWith changed": { + oldField: &schema.Schema{ + ConflictsWith: []string{"field_one", "field_two"}, + }, + newField: &schema.Schema{ + ConflictsWith: []string{"field_two", "field_three"}, + }, + expectChanged: true, + }, + "ExactlyOneOf reordered": { + oldField: &schema.Schema{ + ExactlyOneOf: []string{"field_one", "field_two"}, + }, + newField: &schema.Schema{ + ExactlyOneOf: []string{"field_two", "field_one"}, + }, + expectChanged: false, + }, + "ExactlyOneOf changed": { + oldField: &schema.Schema{ + ExactlyOneOf: []string{"field_one", "field_two"}, + }, + newField: &schema.Schema{ + ExactlyOneOf: []string{"field_two", "field_three"}, + }, + expectChanged: true, + }, + "AtLeastOneOf reordered": { + oldField: &schema.Schema{ + AtLeastOneOf: []string{"field_one", "field_two"}, + }, + newField: &schema.Schema{ + AtLeastOneOf: []string{"field_two", "field_one"}, + }, + expectChanged: false, + }, + "AtLeastOneOf changed": { + oldField: &schema.Schema{ + AtLeastOneOf: []string{"field_one", "field_two"}, + }, + newField: &schema.Schema{ + AtLeastOneOf: []string{"field_two", "field_three"}, + }, + expectChanged: true, + }, + "RequiredWith reordered": { + oldField: &schema.Schema{ + RequiredWith: []string{"field_one", "field_two"}, + }, + newField: &schema.Schema{ + RequiredWith: []string{"field_two", "field_one"}, + }, + expectChanged: false, + }, + "RequiredWith changed": { + oldField: &schema.Schema{ + RequiredWith: []string{"field_one", "field_two"}, + }, + newField: &schema.Schema{ + RequiredWith: []string{"field_two", "field_three"}, + }, + expectChanged: true, + }, + "simple Elem unset -> set": { + oldField: &schema.Schema{}, + newField: &schema.Schema{ + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + expectChanged: true, + }, + "simple Elem set -> unset": { + oldField: &schema.Schema{ + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + newField: &schema.Schema{}, + expectChanged: true, + }, + "simple Elem unchanged": { + oldField: &schema.Schema{ + Elem: &schema.Schema{Type: schema.TypeString}, + }, + newField: &schema.Schema{ + Elem: &schema.Schema{Type: schema.TypeString}, + }, + expectChanged: false, + }, + "simple Elem changed": { + oldField: &schema.Schema{ + Elem: &schema.Schema{Type: schema.TypeString}, + }, + newField: &schema.Schema{ + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + expectChanged: true, + }, + "nested Elem unset -> set": { + oldField: &schema.Schema{}, + newField: &schema.Schema{ + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "foobar": { + Type: schema.TypeInt, + }, + }, + }, + }, + expectChanged: true, + }, + "nested Elem set -> unset": { + oldField: &schema.Schema{ + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "foobar": { + Type: schema.TypeInt, + }, + }, + }, + }, + newField: &schema.Schema{}, + expectChanged: true, + }, + "nested Elem unchanged": { + oldField: &schema.Schema{ + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "foobar": { + Type: schema.TypeInt, + }, + }, + }, + }, + newField: &schema.Schema{ + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "foobar": { + Type: schema.TypeInt, + }, + }, + }, + }, + expectChanged: false, + }, + "nested Elem changing is ignored": { + oldField: &schema.Schema{ + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "foobar": { + Type: schema.TypeInt, + }, + }, + }, + }, + newField: &schema.Schema{ + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "barbaz": { + Type: schema.TypeString, + }, + }, + }, + }, + expectChanged: false, + }, + "Elem simple -> nested": { + oldField: &schema.Schema{ + Elem: &schema.Schema{Type: schema.TypeString}, + }, + newField: &schema.Schema{ + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "foobar": { + Type: schema.TypeInt, + }, + }, + }, + }, + expectChanged: true, + }, + "Elem nested -> simple": { + oldField: &schema.Schema{ + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "foobar": { + Type: schema.TypeInt, + }, + }, + }, + }, + newField: &schema.Schema{ + Elem: &schema.Schema{Type: schema.TypeString}, + }, + expectChanged: true, + }, + + "DiffSuppressFunc added": { + oldField: &schema.Schema{}, + newField: &schema.Schema{ + DiffSuppressFunc: newTpgresource.CaseDiffSuppress, + }, + expectChanged: true, + }, + "DiffSuppressFunc removed": { + oldField: &schema.Schema{ + DiffSuppressFunc: oldTpgresource.CaseDiffSuppress, + }, + newField: &schema.Schema{}, + expectChanged: true, + }, + "DiffSuppressFunc remains set": { + oldField: &schema.Schema{ + DiffSuppressFunc: oldTpgresource.CaseDiffSuppress, + }, + newField: &schema.Schema{ + DiffSuppressFunc: newTpgresource.CaseDiffSuppress, + }, + expectChanged: false, + }, + + "DefaultFunc added": { + oldField: &schema.Schema{}, + newField: &schema.Schema{ + DefaultFunc: testDefaultFunc1, + }, + expectChanged: true, + }, + "DefaultFunc removed": { + oldField: &schema.Schema{ + DefaultFunc: testDefaultFunc1, + }, + newField: &schema.Schema{}, + expectChanged: true, + }, + "DefaultFunc remains set": { + oldField: &schema.Schema{ + DefaultFunc: testDefaultFunc1, + }, + newField: &schema.Schema{ + DefaultFunc: testDefaultFunc2, + }, + expectChanged: false, + }, + + "StateFunc added": { + oldField: &schema.Schema{}, + newField: &schema.Schema{ + StateFunc: testStateFunc1, + }, + expectChanged: true, + }, + "StateFunc removed": { + oldField: &schema.Schema{ + StateFunc: testStateFunc1, + }, + newField: &schema.Schema{}, + expectChanged: true, + }, + "StateFunc remains set": { + oldField: &schema.Schema{ + StateFunc: testStateFunc1, + }, + newField: &schema.Schema{ + StateFunc: testStateFunc2, + }, + expectChanged: false, + }, + + "Set added": { + oldField: &schema.Schema{}, + newField: &schema.Schema{ + Set: newTpgresource.SelfLinkRelativePathHash, + }, + expectChanged: true, + }, + "Set removed": { + oldField: &schema.Schema{ + Set: oldTpgresource.SelfLinkRelativePathHash, + }, + newField: &schema.Schema{}, + expectChanged: true, + }, + "Set remains set": { + oldField: &schema.Schema{ + Set: oldTpgresource.SelfLinkRelativePathHash, + }, + newField: &schema.Schema{ + Set: newTpgresource.SelfLinkRelativePathHash, + }, + expectChanged: false, + }, + + "ValidateFunc added": { + oldField: &schema.Schema{}, + newField: &schema.Schema{ + ValidateFunc: newVerify.ValidateBase64String, + }, + expectChanged: true, + }, + "ValidateFunc removed": { + oldField: &schema.Schema{ + ValidateFunc: oldVerify.ValidateBase64String, + }, + newField: &schema.Schema{}, + expectChanged: true, + }, + "ValidateFunc remains set": { + oldField: &schema.Schema{ + ValidateFunc: oldVerify.ValidateBase64String, + }, + newField: &schema.Schema{ + ValidateFunc: newVerify.ValidateBase64String, + }, + expectChanged: false, + }, + + "ValidateDiagFunc added": { + oldField: &schema.Schema{}, + newField: &schema.Schema{ + ValidateDiagFunc: testValidateDiagFunc1, + }, + expectChanged: true, + }, + "ValidateDiagFunc removed": { + oldField: &schema.Schema{ + ValidateDiagFunc: testValidateDiagFunc1, + }, + newField: &schema.Schema{}, + expectChanged: true, + }, + "ValidateDiagFunc remains set": { + oldField: &schema.Schema{ + ValidateDiagFunc: testValidateDiagFunc1, + }, + newField: &schema.Schema{ + ValidateDiagFunc: testValidateDiagFunc2, + }, + expectChanged: false, + }, + } + + for tn, tc := range cases { + tc := tc + t.Run(tn, func(t *testing.T) { + t.Parallel() + changed := fieldChanged(tc.oldField, tc.newField) + assert.Equal( + t, + tc.expectChanged, + changed, + fmt.Sprintf( + "want %t; got %t.\nOld field: %s\nNew field: %s\n", + tc.expectChanged, + changed, + spew.Sdump(tc.oldField), + spew.Sdump(tc.newField), + ), + ) + }) + } +} + +func TestComputeSchemaDiff(t *testing.T) { + cases := map[string]struct { + oldResourceMap map[string]*schema.Resource + newResourceMap map[string]*schema.Resource + expectedSchemaDiff SchemaDiff + }{ + "empty-maps": { + oldResourceMap: map[string]*schema.Resource{}, + newResourceMap: map[string]*schema.Resource{}, + expectedSchemaDiff: SchemaDiff{}, + }, + "empty-resources": { + oldResourceMap: map[string]*schema.Resource{ + "google_service_one_resource_one": {}, + "google_service_one_resource_two": {}, + }, + newResourceMap: map[string]*schema.Resource{ + "google_service_one_resource_one": {}, + "google_service_one_resource_two": {}, + }, + expectedSchemaDiff: SchemaDiff{}, + }, + "unchanged-nested-field": { + oldResourceMap: map[string]*schema.Resource{ + "google_service_one_resource_one": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + "field_two": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_three": { + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google_service_one_resource_one": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + "field_two": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_three": { + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + expectedSchemaDiff: SchemaDiff{}, + }, + "new-nested-field": { + oldResourceMap: map[string]*schema.Resource{ + "google_service_one_resource_one": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + "field_two": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_three": { + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + "google_service_one_resource_two": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + "field_two": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_three": { + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google_service_one_resource_one": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + "field_two": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_three": { + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + "google_service_one_resource_two": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + "field_two": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_three": { + Type: schema.TypeString, + }, + "field_four": { + Type: schema.TypeInt, + }, + }, + }, + }, + }, + }, + }, + expectedSchemaDiff: SchemaDiff{ + "google_service_one_resource_two": ResourceDiff{ + ResourceConfig: ResourceConfigDiff{ + Old: &schema.Resource{}, + New: &schema.Resource{}, + }, + Fields: map[string]FieldDiff{ + "field_two.field_four": FieldDiff{ + Old: nil, + New: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + }, + }, + }, + }, + "new-field-in-two-resources": { + oldResourceMap: map[string]*schema.Resource{ + "google_service_one_resource_one": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + "field_two": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_three": { + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + "google_service_one_resource_two": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + "field_two": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_three": { + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google_service_one_resource_one": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + "field_two": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_three": { + Type: schema.TypeString, + }, + "field_four": { + Type: schema.TypeInt, + }, + }, + }, + }, + }, + }, + "google_service_one_resource_two": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + "field_two": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_three": { + Type: schema.TypeString, + }, + "field_four": { + Type: schema.TypeInt, + }, + }, + }, + }, + }, + }, + }, + expectedSchemaDiff: SchemaDiff{ + "google_service_one_resource_one": ResourceDiff{ + ResourceConfig: ResourceConfigDiff{ + Old: &schema.Resource{}, + New: &schema.Resource{}, + }, + Fields: map[string]FieldDiff{ + "field_two.field_four": FieldDiff{ + Old: nil, + New: &schema.Schema{Type: schema.TypeInt}, + }, + }, + }, + "google_service_one_resource_two": ResourceDiff{ + ResourceConfig: ResourceConfigDiff{ + Old: &schema.Resource{}, + New: &schema.Resource{}, + }, + Fields: map[string]FieldDiff{ + "field_two.field_four": FieldDiff{ + Old: nil, + New: &schema.Schema{Type: schema.TypeInt}, + }, + }, + }, + }, + }, + "deleted-field": { + oldResourceMap: map[string]*schema.Resource{ + "google_service_one_resource_one": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google_service_one_resource_one": { + Schema: map[string]*schema.Schema{}, + }, + }, + expectedSchemaDiff: SchemaDiff{ + "google_service_one_resource_one": ResourceDiff{ + ResourceConfig: ResourceConfigDiff{ + Old: &schema.Resource{}, + New: &schema.Resource{}, + }, + Fields: map[string]FieldDiff{ + "field_one": FieldDiff{ + Old: &schema.Schema{Type: schema.TypeString}, + New: nil, + }, + }, + }, + }, + }, + "deleted-resource": { + oldResourceMap: map[string]*schema.Resource{ + "google_service_one_resource_one": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + }, + }, + }, + expectedSchemaDiff: SchemaDiff{ + "google_service_one_resource_one": ResourceDiff{ + ResourceConfig: ResourceConfigDiff{ + Old: &schema.Resource{}, + New: nil, + }, + Fields: map[string]FieldDiff{ + "field_one": FieldDiff{ + Old: &schema.Schema{Type: schema.TypeString}, + New: nil, + }, + }, + }, + }, + }, + "new-resource": { + newResourceMap: map[string]*schema.Resource{ + "google_service_one_resource_one": { + Schema: map[string]*schema.Schema{ + "field_one": { + Type: schema.TypeString, + }, + }, + }, + }, + expectedSchemaDiff: SchemaDiff{ + "google_service_one_resource_one": ResourceDiff{ + ResourceConfig: ResourceConfigDiff{ + Old: nil, + New: &schema.Resource{}, + }, + Fields: map[string]FieldDiff{ + "field_one": FieldDiff{ + Old: nil, + New: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + } + for tn, tc := range cases { + tc := tc + t.Run(tn, func(t *testing.T) { + t.Parallel() + schemaDiff := ComputeSchemaDiff(tc.oldResourceMap, tc.newResourceMap) + if diff := cmp.Diff(tc.expectedSchemaDiff, schemaDiff); diff != "" { + t.Errorf("schema diff not equal (-want, +got):\n%s", diff) + } + }) + } +} diff --git a/tools/diff-processor/go.mod b/tools/diff-processor/go.mod index 92d513b193cd..88bcd44634ca 100644 --- a/tools/diff-processor/go.mod +++ b/tools/diff-processor/go.mod @@ -9,9 +9,13 @@ replace google/provider/new => ./new replace github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor => ./ require ( - github.com/golang/glog v1.1.0 + github.com/davecgh/go-spew v1.1.1 + github.com/google/go-cmp v0.5.9 + github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.0 github.com/spf13/cobra v1.7.0 + github.com/stretchr/testify v1.8.3 + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 google/provider/new v0.0.0-00010101000000-000000000000 google/provider/old v0.0.0-00010101000000-000000000000 ) @@ -33,15 +37,14 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f // indirect github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect github.com/fatih/color v1.13.0 // indirect github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 // indirect + github.com/golang/glog v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 // indirect github.com/google/s2a-go v0.1.4 // indirect github.com/google/uuid v1.3.0 // indirect @@ -51,7 +54,6 @@ require ( github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect github.com/hashicorp/go-hclog v1.2.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.4.8 // indirect @@ -82,6 +84,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/oklog/run v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect @@ -93,7 +96,7 @@ require ( golang.org/x/net v0.12.0 // indirect golang.org/x/oauth2 v0.10.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.10.0 // indirect + golang.org/x/sys v0.12.0 // indirect golang.org/x/text v0.11.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/api v0.135.0 // indirect @@ -103,4 +106,5 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e // indirect google.golang.org/grpc v1.57.0 // indirect google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/tools/diff-processor/go.sum b/tools/diff-processor/go.sum index 8e301bf72f63..42705ad96cf3 100644 --- a/tools/diff-processor/go.sum +++ b/tools/diff-processor/go.sum @@ -280,6 +280,7 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -317,6 +318,8 @@ golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -383,8 +386,8 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/tools/diff-processor/rules/breaking_changes.go b/tools/diff-processor/rules/breaking_changes.go new file mode 100644 index 000000000000..2adaf4192720 --- /dev/null +++ b/tools/diff-processor/rules/breaking_changes.go @@ -0,0 +1,52 @@ +package rules + +import ( + "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/diff" +) + +func ComputeBreakingChanges(schemaDiff diff.SchemaDiff) []string { + messages := []string{} + for resource, resourceDiff := range schemaDiff { + for _, rule := range ResourceInventoryRules { + if rule.isRuleBreak(resourceDiff.ResourceConfig.Old, resourceDiff.ResourceConfig.New) { + messages = append(messages, rule.Message(resource)) + } + } + + // If the resource was added or removed, don't check field schema diffs. + if resourceDiff.ResourceConfig.Old == nil || resourceDiff.ResourceConfig.New == nil { + continue + } + + // TODO: Move field removal to field_rules and merge resource schema / resource inventory rules + // b/300124253 + for _, rule := range ResourceSchemaRules { + violatingFields := rule.IsRuleBreak(resourceDiff) + if len(violatingFields) > 0 { + for _, field := range violatingFields { + newMessage := rule.Message(resource, field) + messages = append(messages, newMessage) + } + } + } + + for field, fieldDiff := range resourceDiff.Fields { + for _, rule := range FieldRules { + // TODO: refactor rules to use interface-based implementation that separates checking whether + // a rule broke from composing a message for a rule break. + breakageMessage := rule.IsRuleBreak( + fieldDiff.Old, + fieldDiff.New, + MessageContext{ + Resource: resource, + Field: field, + }, + ) + if breakageMessage != "" { + messages = append(messages, breakageMessage) + } + } + } + } + return messages +} diff --git a/tools/diff-processor/rules/breaking_changes_test.go b/tools/diff-processor/rules/breaking_changes_test.go new file mode 100644 index 000000000000..5c11cf63287e --- /dev/null +++ b/tools/diff-processor/rules/breaking_changes_test.go @@ -0,0 +1,344 @@ +package rules + +import ( + "strings" + "testing" + + "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/diff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func TestComputeBreakingChanges(t *testing.T) { + cases := []struct { + name string + oldResourceMap map[string]*schema.Resource + newResourceMap map[string]*schema.Resource + expectedViolations int + }{ + { + name: "control", + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + expectedViolations: 0, + }, + { + name: "adding resources", + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + "google-y": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + }, + }, + }, + expectedViolations: 0, + }, + { + name: "adding fields", + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + "field-c": {Description: "beep", Optional: true}, + }, + }, + }, + expectedViolations: 0, + }, + { + name: "resource missing", + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep"}, + "field-b": {Description: "beep"}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{}, + expectedViolations: 1, + }, + { + name: "field missing", + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + }, + }, + }, + expectedViolations: 1, + }, + { + name: "optional field to required", + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Required: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + expectedViolations: 1, + }, + { + name: "field missing and optional to required", + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Required: true}, + }, + }, + }, + expectedViolations: 2, + }, + { + name: "field missing, resource missing, and optional to required", + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + "field-b": {Description: "beep", Optional: true}, + }, + }, + "google-y": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": {Description: "beep", Required: true}, + }, + }, + }, + expectedViolations: 3, + }, + { + name: "removing a subfield", + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": { + Description: "beep", + Optional: true, + Type: schema.TypeList, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sub-field-1": {Description: "beep", Optional: true}, + "sub-field-2": {Description: "beep", Optional: true}, + }, + }, + }, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": { + Description: "beep", + Optional: true, + Type: schema.TypeList, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sub-field-1": {Description: "beep", Optional: true}, + }, + }, + }, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + expectedViolations: 1, + }, + { + name: "subfield max shrinking", + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": { + Description: "beep", + Optional: true, + Type: schema.TypeList, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sub-field-1": {Description: "beep", Optional: true, MaxItems: 100}, + }, + }, + }, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": { + Description: "beep", + Optional: true, + Type: schema.TypeList, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sub-field-1": {Description: "beep", Optional: true, MaxItems: 25}, + }, + }, + }, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + expectedViolations: 1, + }, + { + name: "subfield max shrinking", + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": { + Description: "beep", + Optional: true, + Type: schema.TypeList, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sub-field-1": {Description: "beep", Optional: true, MaxItems: 100}, + }, + }, + }, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": { + Description: "beep", + Optional: true, + Type: schema.TypeList, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sub-field-1": {Description: "beep", Optional: true, MaxItems: 25}, + }, + }, + }, + "field-b": {Description: "beep", Optional: true}, + }, + }, + }, + expectedViolations: 1, + }, + { + name: "min growing", + oldResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": { + Description: "beep", + Optional: true, + MinItems: 1, + }, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google-x": { + Schema: map[string]*schema.Schema{ + "field-a": { + Description: "beep", + Optional: true, + MinItems: 4, + }, + }, + }, + }, + expectedViolations: 1, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + schemaDiff := diff.ComputeSchemaDiff(tc.oldResourceMap, tc.newResourceMap) + violations := ComputeBreakingChanges(schemaDiff) + for _, v := range violations { + if strings.Contains(v, "{{") || strings.Contains(v, "}}") { + t.Errorf("Test `%s` failed: found unreplaced characters in string - %s", tc.name, v) + } + } + if tc.expectedViolations != len(violations) { + t.Errorf("Test `%s` failed: expected %d violations, got %d", tc.name, tc.expectedViolations, len(violations)) + } + }) + } +} diff --git a/tools/diff-processor/rules/rules_field.go b/tools/diff-processor/rules/rules_field.go index be79462b10e4..1b5645571731 100644 --- a/tools/diff-processor/rules/rules_field.go +++ b/tools/diff-processor/rules/rules_field.go @@ -45,6 +45,10 @@ var fieldRule_ChangingType = FieldRule{ } func fieldRule_ChangingType_func(old, new *schema.Schema, mc MessageContext) string { + // Type change doesn't matter for added / removed fields + if old == nil || new == nil { + return "" + } message := mc.message if old.Type != new.Type { oldType := getValueType(old.Type) @@ -76,6 +80,10 @@ var fieldRule_BecomingRequired = FieldRule{ } func fieldRule_BecomingRequired_func(old, new *schema.Schema, mc MessageContext) string { + // Ignore for added / removed fields + if old == nil || new == nil { + return "" + } message := mc.message if !old.Required && new.Required { return populateMessageContext(message, mc) @@ -93,6 +101,10 @@ var fieldRule_BecomingComputedOnly = FieldRule{ } func fieldRule_BecomingComputedOnly_func(old, new *schema.Schema, mc MessageContext) string { + // ignore for added / removed fields + if old == nil || new == nil { + return "" + } message := mc.message // if the field is computed only already // this rule doesn't apply @@ -115,6 +127,10 @@ var fieldRule_OptionalComputedToOptional = FieldRule{ } func fieldRule_OptionalComputedToOptional_func(old, new *schema.Schema, mc MessageContext) string { + // ignore for added / removed fields + if old == nil || new == nil { + return "" + } message := mc.message if (old.Computed && old.Optional) && (new.Optional && !new.Computed) { return populateMessageContext(message, mc) @@ -131,6 +147,10 @@ var fieldRule_DefaultModification = FieldRule{ } func fieldRule_DefaultModification_func(old, new *schema.Schema, mc MessageContext) string { + // ignore for added / removed fields + if old == nil || new == nil { + return "" + } message := mc.message if old.Default != new.Default { oldDefault := fmt.Sprintf("%v", old.Default) @@ -152,6 +172,10 @@ var fieldRule_GrowingMin = FieldRule{ } func fieldRule_GrowingMin_func(old, new *schema.Schema, mc MessageContext) string { + // ignore for added / removed fields + if old == nil || new == nil { + return "" + } message := mc.message if old.MinItems < new.MinItems { oldMin := fmt.Sprint(old.MinItems) @@ -172,6 +196,10 @@ var fieldRule_ShrinkingMax = FieldRule{ } func fieldRule_ShrinkingMax_func(old, new *schema.Schema, mc MessageContext) string { + // ignore for added / removed fields + if old == nil || new == nil { + return "" + } message := mc.message if old.MaxItems > new.MaxItems { oldMax := fmt.Sprint(old.MinItems) diff --git a/tools/diff-processor/rules/rules_field_test.go b/tools/diff-processor/rules/rules_field_test.go index 1b485d62c195..e19fd13cbc4f 100644 --- a/tools/diff-processor/rules/rules_field_test.go +++ b/tools/diff-processor/rules/rules_field_test.go @@ -71,6 +71,25 @@ var fieldRule_BecomingRequiredTestCases = []fieldTestCase{ }, expectedViolation: true, }, + { + // TODO: detect this as violation b/300515447 + name: "field added as required", + oldField: nil, + newField: &schema.Schema{ + Description: "beep", + Required: true, + }, + expectedViolation: false, + }, + { + name: "field removed", + oldField: &schema.Schema{ + Description: "beep", + Optional: true, + }, + newField: nil, + expectedViolation: false, + }, } // !! min max ? @@ -93,6 +112,22 @@ var fieldRule_ChangingTypeTestCases = []fieldTestCase{ }, expectedViolation: false, }, + { + name: "field added", + oldField: nil, + newField: &schema.Schema{ + Type: schema.TypeBool, + }, + expectedViolation: false, + }, + { + name: "field removed", + oldField: &schema.Schema{ + Type: schema.TypeBool, + }, + newField: nil, + expectedViolation: false, + }, { name: "field sub-element type control", oldField: &schema.Schema{ @@ -222,6 +257,22 @@ var fieldRule_DefaultModificationTestCases = []fieldTestCase{ }, expectedViolation: true, }, + { + name: "field added", + oldField: nil, + newField: &schema.Schema{ + Default: "same", + }, + expectedViolation: false, + }, + { + name: "field removed", + oldField: &schema.Schema{ + Default: "same", + }, + newField: nil, + expectedViolation: false, + }, } func TestFieldRule_BecomingComputedOnly(t *testing.T) { @@ -294,6 +345,22 @@ var fieldRule_BecomingComputedOnlyTestCases = []fieldTestCase{ }, expectedViolation: true, }, + { + name: "added computed field", + oldField: nil, + newField: &schema.Schema{ + Computed: true, + }, + expectedViolation: false, + }, + { + name: "removed computed field", + oldField: &schema.Schema{ + Computed: true, + }, + newField: nil, + expectedViolation: false, + }, } func TestFieldRule_OptionalComputedToOptional(t *testing.T) { @@ -336,6 +403,23 @@ var fieldRule_OptionalComputedToOptionalTestCases = []fieldTestCase{ }, expectedViolation: true, }, + { + name: "field added", + oldField: nil, + newField: &schema.Schema{ + Optional: true, + }, + expectedViolation: false, + }, + { + name: "field removed", + oldField: &schema.Schema{ + Optional: true, + Computed: true, + }, + newField: nil, + expectedViolation: false, + }, } func TestFieldRule_GrowingMin(t *testing.T) { @@ -381,6 +465,22 @@ var fieldRule_GrowingMinTestCases = []fieldTestCase{ }, expectedViolation: true, }, + { + name: "field added", + oldField: nil, + newField: &schema.Schema{ + MaxItems: 1, + }, + expectedViolation: false, + }, + { + name: "field removed", + oldField: &schema.Schema{ + MinItems: 1, + }, + newField: nil, + expectedViolation: false, + }, } func TestFieldRule_ShrinkingMax(t *testing.T) { @@ -426,6 +526,22 @@ var fieldRule_ShrinkingMaxTestCases = []fieldTestCase{ }, expectedViolation: true, }, + { + name: "field added", + oldField: nil, + newField: &schema.Schema{ + MaxItems: 2, + }, + expectedViolation: false, + }, + { + name: "field removed", + oldField: &schema.Schema{ + MinItems: 2, + }, + newField: nil, + expectedViolation: false, + }, } func (tc *fieldTestCase) check(rule FieldRule, t *testing.T) { diff --git a/tools/diff-processor/rules/rules_resource_inventory.go b/tools/diff-processor/rules/rules_resource_inventory.go index 8e1d6083f50c..f3664625725e 100644 --- a/tools/diff-processor/rules/rules_resource_inventory.go +++ b/tools/diff-processor/rules/rules_resource_inventory.go @@ -15,7 +15,7 @@ type ResourceInventoryRule struct { definition string message string identifier string - isRuleBreak func(old, new map[string]*schema.Resource) []string + isRuleBreak func(old, new *schema.Resource) bool } // ResourceInventoryRules is a list of ResourceInventoryRule @@ -27,18 +27,11 @@ var resourceInventoryRule_RemovingAResource = ResourceInventoryRule{ definition: "In terraform resources should be retained whenever possible. A removable of an resource will result in a configuration breakage wherever a dependency on that resource exists. Renaming or Removing a resources are functionally equivalent in terms of configuration breakages.", message: "Resource {{resource}} was either removed or renamed", identifier: "resource-map-resource-removal-or-rename", - isRuleBreak: resourceInventoryRule_RemovingAField_func, + isRuleBreak: resourceInventoryRule_RemovingAResource_func, } -func resourceInventoryRule_RemovingAField_func(old, new map[string]*schema.Resource) []string { - keysNotPresent := []string{} - for key := range old { - _, exists := new[key] - if !exists { - keysNotPresent = append(keysNotPresent, key) - } - } - return keysNotPresent +func resourceInventoryRule_RemovingAResource_func(old, new *schema.Resource) bool { + return new == nil && old != nil } func resourceInventoryRulesToRuleArray(rms []ResourceInventoryRule) []Rule { @@ -73,15 +66,6 @@ func (rm ResourceInventoryRule) Message(resource string) string { return msg + documentationReference(rm.identifier) } -// IsRuleBreak - compares resource entries and returns -// a list of resources violating the rule -func (rm ResourceInventoryRule) IsRuleBreak(old, new map[string]*schema.Resource) []string { - if rm.isRuleBreak == nil { - return []string{} - } - return rm.isRuleBreak(old, new) -} - // Undetectable - informs if there are functions in place // to detect this rule. func (rm ResourceInventoryRule) Undetectable() bool { diff --git a/tools/diff-processor/rules/rules_resource_inventory_test.go b/tools/diff-processor/rules/rules_resource_inventory_test.go index d8a33167f552..2bc309bfdda4 100644 --- a/tools/diff-processor/rules/rules_resource_inventory_test.go +++ b/tools/diff-processor/rules/rules_resource_inventory_test.go @@ -7,10 +7,10 @@ import ( ) type resourceInventoryTestCase struct { - name string - oldResourceMap map[string]*schema.Resource - newResourceMap map[string]*schema.Resource - expectedViolations int + name string + old *schema.Resource + new *schema.Resource + expected bool } func TestResourceInventoryRule_RemovingAResource(t *testing.T) { @@ -21,113 +21,28 @@ func TestResourceInventoryRule_RemovingAResource(t *testing.T) { var resourceInventoryRule_RemovingAResourceTestCases = []resourceInventoryTestCase{ { - name: "control", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - expectedViolations: 0, + name: "control", + old: &schema.Resource{}, + new: &schema.Resource{}, + expected: false, }, { - name: "adding a resource", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - "google-y": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - }, - }, - }, - expectedViolations: 0, + name: "resource added", + old: nil, + new: &schema.Resource{}, + expected: false, }, { - name: "resource missing", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep"}, - "field-b": {Description: "beep"}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{}, - expectedViolations: 1, - }, - { - name: "resource renamed", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-y": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - expectedViolations: 1, - }, - { - name: "resource renamed and another removed", - oldResourceMap: map[string]*schema.Resource{ - "google-x": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - "google-z": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - }, - }, - }, - newResourceMap: map[string]*schema.Resource{ - "google-y": { - Schema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - }, - }, - expectedViolations: 2, + name: "resource removed", + old: &schema.Resource{}, + new: nil, + expected: true, }, } func (tc *resourceInventoryTestCase) check(rule ResourceInventoryRule, t *testing.T) { - violations := rule.isRuleBreak(tc.oldResourceMap, tc.newResourceMap) - if tc.expectedViolations != len(violations) { - t.Errorf("Test `%s` failed: expected %d violations, got %d", tc.name, tc.expectedViolations, len(violations)) + got := rule.isRuleBreak(tc.old, tc.new) + if tc.expected != got { + t.Errorf("Test `%s` failed: want %t, got %t", tc.name, tc.expected, got) } } diff --git a/tools/diff-processor/rules/rules_resource_schema.go b/tools/diff-processor/rules/rules_resource_schema.go index 3929238afc94..5cf6ca11915a 100644 --- a/tools/diff-processor/rules/rules_resource_schema.go +++ b/tools/diff-processor/rules/rules_resource_schema.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/diff" ) // ResourceSchemaRule provides structure for @@ -14,7 +14,7 @@ type ResourceSchemaRule struct { definition string message string identifier string - isRuleBreak func(old, new map[string]*schema.Schema) []string + isRuleBreak func(resourceDiff diff.ResourceDiff) []string } // ResourceSchemaRules is a list of ResourceInventoryRule @@ -41,15 +41,14 @@ var resourceSchemaRule_RemovingAField = ResourceSchemaRule{ isRuleBreak: resourceSchemaRule_RemovingAField_func, } -func resourceSchemaRule_RemovingAField_func(old, new map[string]*schema.Schema) []string { - keysNotPresent := []string{} - for key := range old { - _, exists := new[key] - if !exists { - keysNotPresent = append(keysNotPresent, key) +func resourceSchemaRule_RemovingAField_func(resourceDiff diff.ResourceDiff) []string { + fieldsRemoved := []string{} + for field, fieldDiff := range resourceDiff.Fields { + if fieldDiff.Old != nil && fieldDiff.New == nil { + fieldsRemoved = append(fieldsRemoved, field) } } - return keysNotPresent + return fieldsRemoved } func resourceSchemaRulesToRuleArray(rss []ResourceSchemaRule) []Rule { @@ -88,11 +87,11 @@ func (rs ResourceSchemaRule) Message(resource, field string) string { // IsRuleBreak - compares the field entries and returns // a list of fields violating the rule -func (rs ResourceSchemaRule) IsRuleBreak(old, new map[string]*schema.Schema) []string { +func (rs ResourceSchemaRule) IsRuleBreak(resourceDiff diff.ResourceDiff) []string { if rs.isRuleBreak == nil { return []string{} } - return rs.isRuleBreak(old, new) + return rs.isRuleBreak(resourceDiff) } // Undetectable - informs if there are functions in place diff --git a/tools/diff-processor/rules/rules_resource_schema_test.go b/tools/diff-processor/rules/rules_resource_schema_test.go index 01d40676649d..ea92c9740c6c 100644 --- a/tools/diff-processor/rules/rules_resource_schema_test.go +++ b/tools/diff-processor/rules/rules_resource_schema_test.go @@ -3,6 +3,9 @@ package rules import ( "testing" + "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/diff" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -13,77 +16,70 @@ func TestResourceSchemaRule_RemovingAField(t *testing.T) { } type resourceSchemaTestCase struct { - name string - oldResourceSchema map[string]*schema.Schema - newResourceSchema map[string]*schema.Schema - expectedViolations int + name string + resourceDiff diff.ResourceDiff + expectedFields []string } var resourceSchemaRule_RemovingAField_TestCases = []resourceSchemaTestCase{ { name: "control", - oldResourceSchema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, + resourceDiff: diff.ResourceDiff{ + Fields: map[string]diff.FieldDiff{ + "field-a": diff.FieldDiff{ + Old: &schema.Schema{Description: "beep", Optional: true}, + New: &schema.Schema{Description: "beep", Optional: true}, + }, + }, }, - newResourceSchema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - expectedViolations: 0, + expectedFields: []string{}, }, { name: "adding a field", - oldResourceSchema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - newResourceSchema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - "field-c": {Description: "beep", Optional: true}, - }, - expectedViolations: 0, - }, - { - name: "renaming a field", - oldResourceSchema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - newResourceSchema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-d": {Description: "beep", Optional: true}, + resourceDiff: diff.ResourceDiff{ + Fields: map[string]diff.FieldDiff{ + "field-a": diff.FieldDiff{ + Old: nil, + New: &schema.Schema{Description: "beep", Optional: true}, + }, + }, }, - expectedViolations: 1, + expectedFields: []string{}, }, { name: "removing a field", - oldResourceSchema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, + resourceDiff: diff.ResourceDiff{ + Fields: map[string]diff.FieldDiff{ + "field-a": diff.FieldDiff{ + Old: &schema.Schema{Description: "beep", Optional: true}, + New: nil, + }, + }, }, - newResourceSchema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - }, - expectedViolations: 1, + expectedFields: []string{"field-a"}, }, { - name: "renaming a field and removing a field", - oldResourceSchema: map[string]*schema.Schema{ - "field-a": {Description: "beep", Optional: true}, - "field-b": {Description: "beep", Optional: true}, - }, - newResourceSchema: map[string]*schema.Schema{ - "field-z": {Description: "beep", Optional: true}, + name: "removing multiple fields", + resourceDiff: diff.ResourceDiff{ + Fields: map[string]diff.FieldDiff{ + "field-a": diff.FieldDiff{ + Old: &schema.Schema{Description: "beep", Optional: true}, + New: nil, + }, + "field-b": diff.FieldDiff{ + Old: &schema.Schema{Description: "beep", Optional: true}, + New: nil, + }, + }, }, - expectedViolations: 2, + expectedFields: []string{"field-a", "field-b"}, }, } func (tc *resourceSchemaTestCase) check(rule ResourceSchemaRule, t *testing.T) { - violations := rule.isRuleBreak(tc.oldResourceSchema, tc.newResourceSchema) - if tc.expectedViolations != len(violations) { - t.Errorf("Test `%s` failed: expected %d violations, got %d", tc.name, tc.expectedViolations, len(violations)) + fields := rule.IsRuleBreak(tc.resourceDiff) + less := func(a, b string) bool { return a < b } + if !cmp.Equal(fields, tc.expectedFields, cmpopts.SortSlices(less)) { + t.Errorf("Test `%s` failed: wanted %v , got %v", tc.name, tc.expectedFields, fields) } } From e79b03b91b5efce386e872b833c2fd9f860d6ff9 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Thu, 21 Sep 2023 00:30:05 +0100 Subject: [PATCH 15/36] Comment out PF provider config test that's affected by ADCs in our CI (#9034) --- .../fwtransport/framework_config_test.go.erb | 112 +++++++++--------- 1 file changed, 57 insertions(+), 55 deletions(-) diff --git a/mmv1/third_party/terraform/fwtransport/framework_config_test.go.erb b/mmv1/third_party/terraform/fwtransport/framework_config_test.go.erb index 37c11069bd51..e7b8fdf8ae02 100644 --- a/mmv1/third_party/terraform/fwtransport/framework_config_test.go.erb +++ b/mmv1/third_party/terraform/fwtransport/framework_config_test.go.erb @@ -335,61 +335,63 @@ func TestFrameworkProvider_LoadAndValidateFramework_credentials(t *testing.T) { } } -func TestFrameworkProvider_LoadAndValidateFramework_credentials_unknown(t *testing.T) { - // This test case is kept separate from other credentials tests, as it requires comparing - // error messages returned by two different error states: - // - When credentials = Null - // - When credentials = Unknown - - t.Run("the same error is returned whether credentials is set as a null or unknown value (and access_token isn't set)", func(t *testing.T) { - - // Arrange - acctest.UnsetTestProviderConfigEnvs(t) - - ctx := context.Background() - tfVersion := "foobar" - providerversion := "999" - - impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list - - // Null data and error collection - diagsNull := diag.Diagnostics{} - dataNull := fwmodels.ProviderModel{ - Credentials: types.StringNull(), - } - dataNull.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates - - // Unknown data and error collection - diagsUnknown := diag.Diagnostics{} - dataUnknown := fwmodels.ProviderModel{ - Credentials: types.StringUnknown(), - } - dataUnknown.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates - - pNull := fwtransport.FrameworkProviderConfig{} - pUnknown := fwtransport.FrameworkProviderConfig{} - - // Act - pNull.LoadAndValidateFramework(ctx, &dataNull, tfVersion, &diagsNull, providerversion) - pUnknown.LoadAndValidateFramework(ctx, &dataUnknown, tfVersion, &diagsUnknown, providerversion) - - // Assert - if !diagsNull.HasError() { - t.Fatalf("expect errors when credentials is null, but [%d] errors occurred", diagsNull.ErrorsCount()) - } - if !diagsUnknown.HasError() { - t.Fatalf("expect errors when credentials is unknown, but [%d] errors occurred", diagsUnknown.ErrorsCount()) - } - - errNull := diagsNull.Errors() - errUnknown := diagsUnknown.Errors() - for i := 0; i < len(errNull); i++ { - if errNull[i] != errUnknown[i] { - t.Fatalf("expect errors to be the same for null and unknown credentials values, instead got \nnull=`%s` \nunknown=%s", errNull[i], errUnknown[i]) - } - } - }) -} +// NOTE: these tests can't run in Cloud Build due to ADC locating credentials despite `GOOGLE_APPLICATION_CREDENTIALS` being unset +// See https://cloud.google.com/docs/authentication/application-default-credentials#search_order +// Also, when running these tests locally you need to run `gcloud auth application-default revoke` to ensure your machine isn't supplying ADCs +// func TestFrameworkProvider_LoadAndValidateFramework_credentials_unknown(t *testing.T) { +// // This test case is kept separate from other credentials tests, as it requires comparing +// // error messages returned by two different error states: +// // - When credentials = Null +// // - When credentials = Unknown + +// t.Run("the same error is returned whether credentials is set as a null or unknown value (and access_token isn't set)", func(t *testing.T) { +// // Arrange +// acctest.UnsetTestProviderConfigEnvs(t) + +// ctx := context.Background() +// tfVersion := "foobar" +// providerversion := "999" + +// impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + +// // Null data and error collection +// diagsNull := diag.Diagnostics{} +// dataNull := fwmodels.ProviderModel{ +// Credentials: types.StringNull(), +// } +// dataNull.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + +// // Unknown data and error collection +// diagsUnknown := diag.Diagnostics{} +// dataUnknown := fwmodels.ProviderModel{ +// Credentials: types.StringUnknown(), +// } +// dataUnknown.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + +// pNull := fwtransport.FrameworkProviderConfig{} +// pUnknown := fwtransport.FrameworkProviderConfig{} + +// // Act +// pNull.LoadAndValidateFramework(ctx, &dataNull, tfVersion, &diagsNull, providerversion) +// pUnknown.LoadAndValidateFramework(ctx, &dataUnknown, tfVersion, &diagsUnknown, providerversion) + +// // Assert +// if !diagsNull.HasError() { +// t.Fatalf("expect errors when credentials is null, but [%d] errors occurred", diagsNull.ErrorsCount()) +// } +// if !diagsUnknown.HasError() { +// t.Fatalf("expect errors when credentials is unknown, but [%d] errors occurred", diagsUnknown.ErrorsCount()) +// } + +// errNull := diagsNull.Errors() +// errUnknown := diagsUnknown.Errors() +// for i := 0; i < len(errNull); i++ { +// if errNull[i] != errUnknown[i] { +// t.Fatalf("expect errors to be the same for null and unknown credentials values, instead got \nnull=`%s` \nunknown=%s", errNull[i], errUnknown[i]) +// } +// } +// }) +// } func TestFrameworkProvider_LoadAndValidateFramework_billingProject(t *testing.T) { From 1f8ac462d7b2ed2c2f843f3f36d4558b87afa30a Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Thu, 21 Sep 2023 17:09:17 +0100 Subject: [PATCH 16/36] Allow NotificationChannel import process to set project from the URI (#9022) --- mmv1/products/monitoring/NotificationChannel.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/products/monitoring/NotificationChannel.yaml b/mmv1/products/monitoring/NotificationChannel.yaml index fe021aa1ff00..7c67e49d806d 100644 --- a/mmv1/products/monitoring/NotificationChannel.yaml +++ b/mmv1/products/monitoring/NotificationChannel.yaml @@ -72,7 +72,7 @@ custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/monitoring_notification_channel.go.erb decoder: templates/terraform/decoders/monitoring_notification_channel.go.erb constants: templates/terraform/constants/monitoring_notification_channel.go.erb - custom_import: templates/terraform/custom_import/self_link_as_name.erb + custom_import: templates/terraform/custom_import/self_link_as_name_set_project.go.erb post_create: templates/terraform/post_create/set_computed_name.erb custom_diff: [ 'sensitiveLabelCustomizeDiff', From 8be1e0ab2a75090ac06ed371f225b98084a50175 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Thu, 21 Sep 2023 17:32:23 +0100 Subject: [PATCH 17/36] Upgrade guide - Add guidance for navigating region/location logic changes (#9008) * Add upgrade guidance for navigating region/location logic changes Co-authored-by: BBBmau * Move `Resource annotations` section to be nested under `Provider-level Labels Rework` again --------- Co-authored-by: BBBmau --- .../guides/version_5_upgrade.html.markdown | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown index 3845211cd374..d850b89fc40f 100644 --- a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown @@ -118,6 +118,27 @@ The new annotations model is similar to the new labels model and will be applied There are now two annotation-related fields with the new model, the `annotations` and the output-only `effective_annotations` fields. +### Changes to how default `location`, `region` and `zone` values are obtained for resources + +Currently, when configuring resources that require a `location`, `region` or `zone` field you have the choice of specifying it in the resource block or allowing default values to be used. Default [region](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#region) or [zone](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#zone) values can be configured in the provider block or by providing values through environment variables. + +Changes in 5.0.0 make the way the provider handles `location`/`region`/`zone` values more consistent: + +* Resources that have a `location` field will now use the default `region` value preferentially over the default `zone` value set on the provider. This is only relevant to resources where `location` is not provided in the resource block directly. +* Previously, default `region` and `zone` values set as URIs were incompatible with resources that have `location` or `region` arguments. In 5.0.0+ those values will now be valid and won't result in errors during plan/apply stages. + + +#### When you may need to take action + +There is only one change that we anticipate can lead to unexpected diffs in Terraform plans after upgrading to 5.0.0, which is: + +> Resources that have a `location` field will now use the default `region` value preferentially over the default `zone` value set on the provider. This is only relevant to resources where `location` is not provided in the resource block directly. + +Users will need to check for unexpected `location` changes for resources. If an unexpected change is seen, the solution is to explicitly set the `location` value in that resource's configuration block to match the desired value. + +This will only affect users whose configuration contains resource blocks that have missing `location` values and whose [default zone](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#zone) value belongs to a region that's different than the [default region](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#region) value. For example, if you set `us-central1-a` as the default zone and `us-central2` as the default region on the provider you may see plans that contain unexpected diffs to move resources from `us-central1` to `us-central2`. + + ### Provider default values shown at plan-time `project`, `region`, and `zone` fields will now display their values during plan-time instead of the placeholder `(known after apply)` value normally displayed for fields without fixed Terraform default values. These values will be taken from either the Terraform resource config file, provider config, or local environment variables, depending on which variables are supplied by the user, matching the existing per-resource functionality for what default values are used in execution of a Terraform plan. From c68c0c04d9f9a67c12618605258827f4a2e34b64 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Thu, 21 Sep 2023 12:00:08 -0500 Subject: [PATCH 18/36] Upgrade guide - container nodepool (#8969) --- .../docs/guides/version_5_upgrade.html.markdown | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown index d850b89fc40f..e4e4695748ed 100644 --- a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown @@ -292,12 +292,14 @@ resource "google_firebaserules_ruleset" "firestore" { These two unsupported fields were introduced incorrectly. They are now removed. + ## Resource: `google_cloud_run_v2_service` ### `liveness_probe.tcp_socket` is now removed This unsupported field was introduced incorrectly. It is now removed. + ## Resource: `google_container_cluster` ### Clusters created in error states are now tainted rather than deleted @@ -318,6 +320,18 @@ cluster and/or manually resolve the issues and untaint their failed clusters. Previously `network_policy.provider` defaulted to "PROVIDER_UNSPECIFIED". It no longer has a default value. + +## Resource: `google_container_node_pool` + +### `logging_variant` no longer has a provider default value + +Previously `logging_variant` defaulted to "DEFAULT". It no longer has a default value. + +### `management.auto_repair` and `management.auto_upgrade` now default to true + +Previously both fields defaulted to false. They now default to true. + + ## Resource: `google_dataplex_datascan` ### `dataQualityResult` and `dataProfileResult` output fields are now removed From a05ae4ff7a4d1bc54fcdf3d228855df91a25b272 Mon Sep 17 00:00:00 2001 From: Max Portocarrero CI&T <105444618+maxi-cit@users.noreply.github.com> Date: Thu, 21 Sep 2023 12:43:53 -0500 Subject: [PATCH 19/36] Add network attachment to gce instance (#8829) * first draft for new Network Attachment resource * updated NetworkAttachment specification file * added usage example for Network Attachment with GCE Instance * verified attributes and descriptions * enhanced network attachment examples * adding network attachment field to compute instance network interface * added network attachment specification to yaml file * added integration tests * fixing minor typo * fixed typo * fixed typo * fixed tf config * fixed typo * fixed network attachment simple test * updated instance resource documentation * removed bad logic network attachment validation * fixed tests and field specification for network attachment --- mmv1/products/compute/Instance.yaml | 8 + .../compute/compute_instance_helpers.go.erb | 9 - .../compute/resource_compute_instance.go.erb | 13 +- .../resource_compute_instance_test.go.erb | 214 ++++++++++++++++++ .../docs/r/compute_instance.html.markdown | 19 +- 5 files changed, 245 insertions(+), 18 deletions(-) diff --git a/mmv1/products/compute/Instance.yaml b/mmv1/products/compute/Instance.yaml index 4d8111c04eae..c8de201d2765 100644 --- a/mmv1/products/compute/Instance.yaml +++ b/mmv1/products/compute/Instance.yaml @@ -497,6 +497,14 @@ properties: should be specified. # networkInterfaces.kind is not necessary for convergence. custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' + - !ruby/object:Api::Type::ResourceRef + name: 'networkAttachment' + resource: 'networkAttachment' + min_version: beta + imports: 'selfLink' + description: | + The URL of the network attachment that this interface should connect to in the following format: + projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}. - !ruby/object:Api::Type::NestedObject name: 'scheduling' description: Sets the scheduling options for this instance. diff --git a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb index 655c4341db21..4f577b14cc48 100644 --- a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb +++ b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb @@ -440,15 +440,6 @@ func expandNetworkInterfaces(d tpgresource.TerraformResourceData, config *transp return nil, fmt.Errorf("exactly one of network, subnetwork, or network_attachment must be provided") } - - if networkAttachment != "" { - if network != "" { - return nil, fmt.Errorf("Cannot have a network provided with networkAttachment given that networkAttachment is associated with a network already") - } - if subnetwork != "" { - return nil, fmt.Errorf("Cannot have a subnetwork provided with networkAttachment given that networkAttachment is associated with a subnetwork already") - } - } <% else -%> network := data["network"].(string) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 9ade334d7099..799176f39943 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -305,6 +305,17 @@ func ResourceComputeInstance() *schema.Resource { Description: `The name or self_link of the subnetwork attached to this interface.`, }, + <% if version == "beta" -%> + "network_attachment": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URL of the network attachment that this interface should connect to in the following format: projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}.`, + }, + <% end %> + "subnetwork_project": { Type: schema.TypeString, Optional: true, @@ -1509,7 +1520,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } <% unless version == 'ga' -%> // Add extra check on Scheduling to prevent STOP instance setting MaxRunDuration. - // When Instance being stopped, GCE will wipe out the MaxRunDuration field. + // When Instance being stopped, GCE will wipe out the MaxRunDuration field. // And Terraform has no visiblity on this field after then. Given the infrastructure // constraint, MaxRunDuration will only be supported with instance has // DELETE InstanceTerminationAction diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index bdd25e99ac0e..fa9c520d4629 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -2626,6 +2626,70 @@ func testAccCheckComputeInstanceUpdateMachineType(t *testing.T, n string) resour } } +<% if version == "beta"%> +func TestAccComputeInstance_NetworkAttachment(t *testing.T) { + t.Parallel() + suffix := fmt.Sprintf("%s", acctest.RandString(t, 10)) + var instance compute.Instance + + testNetworkAttachmentName := fmt.Sprintf("tf-test-network-attachment-%s", suffix) + + // Need to have the full network attachment name in the format project/{project_id}/regions/{region_id}/networkAttachments/{testNetworkAttachmentName} + fullFormNetworkAttachmentName := fmt.Sprintf("projects/%s/regions/%s/networkAttachments/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), testNetworkAttachmentName) + + context := map[string]interface{}{ + "suffix": (acctest.RandString(t, 10)), + "network_attachment_name": testNetworkAttachmentName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_networkAttachment(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasNetworkAttachment(&instance, fmt.Sprintf("https://www.googleapis.com/compute/beta/%s", fullFormNetworkAttachmentName)), + ), + }, + }, + }) +} + +func TestAccComputeInstance_NetworkAttachmentUpdate(t *testing.T) { + t.Parallel() + suffix := acctest.RandString(t, 10) + envRegion := envvar.GetTestRegionFromEnv() + instanceName := fmt.Sprintf("tf-test-compute-instance-%s", suffix) + + networkAttachmentSelflink1 := "google_compute_network_attachment.test_network_attachment_1.self_link" + networkAttachmentSelflink2 := "google_compute_network_attachment.test_network_attachment_2.self_link" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_networkAttachmentUpdate(networkAttachmentSelflink1, envRegion, suffix), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_networkAttachmentUpdate(networkAttachmentSelflink2, envRegion, suffix), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_networkAttachmentUpdate(networkAttachmentSelflink1, envRegion, suffix), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} +<% end %> + func testAccCheckComputeInstanceDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) @@ -3204,6 +3268,19 @@ func testAccCheckComputeInstanceHasMinCpuPlatform(instance *compute.Instance, mi } } +<% if version == "beta"%> +func testAccCheckComputeInstanceHasNetworkAttachment(instance *compute.Instance, networkAttachmentName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, networkInterface := range instance.NetworkInterfaces { + if networkInterface.NetworkAttachment != "" && networkInterface.NetworkAttachment == networkAttachmentName { + return nil + } + } + return fmt.Errorf("Network Attachment %s, was not found in the instance template", networkAttachmentName) + } +} +<% end %> + func testAccCheckComputeInstanceHasMachineType(instance *compute.Instance, machineType string) resource.TestCheckFunc { return func(s *terraform.State) error { instanceMachineType := tpgresource.GetResourceNameFromSelfLink(instance.MachineType) @@ -6925,3 +7002,140 @@ resource "google_compute_disk" "debian" { } `, instance, diskName, suffix, suffix, suffix) } + +<% if version =="beta"%> +func testAccComputeInstance_networkAttachment(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "test-network"{ + name = "tf-test-network-%{suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test-subnetwork" { + name = "tf-test-compute-subnet-%{suffix}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.test-network.id +} + +resource "google_compute_network_attachment" "test_network_attachment" { + name = "%{network_attachment_name}" + region = "us-central1" + description = "network attachment description" + connection_preference = "ACCEPT_AUTOMATIC" + + subnetworks = [ + google_compute_subnetwork.test-subnetwork.self_link + ] +} + +resource "google_compute_instance" "foobar" { + name = "tf-test-instance-%{suffix}" + machine_type = "e2-medium" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + network = "default" + } + + network_interface{ + network_attachment = google_compute_network_attachment.test_network_attachment.self_link + } + + metadata = { + foo = "bar" + } +} +`, context) +} + +func testAccComputeInstance_networkAttachmentUpdate(networkAttachment, region, suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "consumer_vpc_1" { + name = "tf-test-consumer-net-1-%s" + auto_create_subnetworks = false +} + +resource "google_compute_network" "consumer_vpc_2" { + name = "tf-test-consumer-net-2-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "consumer_subnet_1" { + name = "tf-test-consumer-subnet-1-%s" + ip_cidr_range = "10.0.0.0/16" + region = "%s" + network = google_compute_network.consumer_vpc_1.id +} + +resource "google_compute_subnetwork" "consumer_subnet_2" { + name = "tf-test-consumer-subnet-2-%s" + ip_cidr_range = "10.3.0.0/16" + region = "%s" + network = google_compute_network.consumer_vpc_2.id +} + +resource "google_compute_network_attachment" "test_network_attachment_1" { + name = "tf-test-network-attachment-1-%s" + region = "%s" + description = "network attachment 1 description" + connection_preference = "ACCEPT_AUTOMATIC" + + subnetworks = [ + google_compute_subnetwork.consumer_subnet_1.self_link + ] +} + +resource "google_compute_network_attachment" "test_network_attachment_2" { + name = "tf-test-network-attachment-2-%s" + region = "%s" + description = "network attachment 2 description" + connection_preference = "ACCEPT_AUTOMATIC" + + subnetworks = [ + google_compute_subnetwork.consumer_subnet_2.self_link + ] +} + +resource "google_compute_instance" "foobar" { + name = "tf-test-compute-instance-%s" + machine_type = "e2-medium" + zone = "%s-a" + allow_stopping_for_update = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + network_interface{ + network_attachment = %s + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, region, suffix, region, suffix, region, suffix, region, suffix, region, networkAttachment) +} +<% end %> diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown index aa1c929d2db8..bbb897168f8d 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown @@ -236,7 +236,7 @@ is desired, you will need to modify your state file manually using For instance, the image `centos-6-v20180104` includes its family name `centos-6`. These images can be referred by family name here. -* `labels` - (Optional) A set of key/value label pairs assigned to the disk. This +* `labels` - (Optional) A set of key/value label pairs assigned to the disk. This field is only applicable for persistent disks. * `resource_manager_tags` - (Optional) A tag is a key-value pair that can be attached to a Google Cloud resource. You can use tags to conditionally allow or deny policies based on whether a resource has a specific tag. This value is not returned by the API. In Terraform, this value cannot be updated and changing it will recreate the resource. @@ -285,6 +285,7 @@ is desired, you will need to modify your state file manually using network is in auto subnet mode, specifying the subnetwork is optional. If the network is in custom subnet mode, specifying the subnetwork is required. + * `subnetwork_project` - (Optional) The project in which the subnetwork belongs. If the `subnetwork` is a self_link, this field is ignored in favor of the project defined in the subnetwork self_link. If the `subnetwork` is a name and this @@ -306,6 +307,8 @@ is desired, you will need to modify your state file manually using * `nic_type` - (Optional) The type of vNIC to be used on this interface. Possible values: GVNIC, VIRTIO_NET. +* `network_attachment` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) The URL of the network attachment that this interface should connect to in the following format: `projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}`. + * `stack_type` - (Optional) The stack type for this network interface to identify whether the IPv6 feature is enabled or not. Values are IPV4_IPV6 or IPV4_ONLY. If not specified, IPV4_ONLY will be used. * `ipv6_access_config` - (Optional) An array of IPv6 access configurations for this interface. @@ -331,14 +334,14 @@ specified, then this instance will have no external IPv6 Internet access. Struct The `ipv6_access_config` block supports: -* `external_ipv6` - (Optional) The first IPv6 address of the external IPv6 range associated - with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. - To use a static external IP address, it must be unused and in the same region as the instance's zone. +* `external_ipv6` - (Optional) The first IPv6 address of the external IPv6 range associated + with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. + To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork. * `external_ipv6_prefix_length` - (Optional) The prefix length of the external IPv6 range. -* `name` - (Optional) The name of this access configuration. In ipv6AccessConfigs, the recommended name +* `name` - (Optional) The name of this access configuration. In ipv6AccessConfigs, the recommended name is "External IPv6". * `network_tier` - (Optional) The service-level to be provided for IPv6 traffic when the @@ -390,12 +393,12 @@ specified, then this instance will have no external IPv6 Internet access. Struct * `min_node_cpus` - (Optional) The minimum number of virtual CPUs this instance will consume when running on a sole-tenant node. -* `provisioning_model` - (Optional) Describe the type of preemptible VM. This field accepts the value `STANDARD` or `SPOT`. If the value is `STANDARD`, there will be no discount. If this is set to `SPOT`, +* `provisioning_model` - (Optional) Describe the type of preemptible VM. This field accepts the value `STANDARD` or `SPOT`. If the value is `STANDARD`, there will be no discount. If this is set to `SPOT`, `preemptible` should be `true` and `automatic_restart` should be `false`. For more info about `SPOT`, read [here](https://cloud.google.com/compute/docs/instances/spot) - -* `instance_termination_action` - (Optional) Describe the type of termination action for VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) + +* `instance_termination_action` - (Optional) Describe the type of termination action for VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) * `max_run_duration` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instance_termination_action`. Only support `DELETE` `instance_termination_action` at this point. Structure is [documented below](#nested_max_run_duration). The `max_run_duration` block supports: From bc48ba2b4f137cec27b6889b79c77569dd27e768 Mon Sep 17 00:00:00 2001 From: Thomas Rodgers Date: Thu, 21 Sep 2023 17:44:15 +0000 Subject: [PATCH 20/36] Migrate tpg-test to go (#8992) --- .ci/gcb-generate-diffs-new.yml | 18 ++++---- .ci/magician/cmd/mock_github_test.go | 5 +++ .ci/magician/cmd/test_tpg.go | 61 ++++++++++++++++++++++++++++ .ci/magician/cmd/test_tpg_test.go | 17 ++++++++ .ci/magician/github/init.go | 1 + .ci/magician/github/set.go | 20 +++++++++ .ci/magician/go.mod | 6 +-- .ci/magician/go.sum | 12 +++--- .ci/scripts/go-plus/magician/exec.sh | 2 + 9 files changed, 122 insertions(+), 20 deletions(-) create mode 100644 .ci/magician/cmd/test_tpg.go create mode 100644 .ci/magician/cmd/test_tpg_test.go diff --git a/.ci/gcb-generate-diffs-new.yml b/.ci/gcb-generate-diffs-new.yml index 1a69cfd12b6e..978870ceccff 100644 --- a/.ci/gcb-generate-diffs-new.yml +++ b/.ci/gcb-generate-diffs-new.yml @@ -240,29 +240,25 @@ steps: - "19" # Build step - terraform-google-conversion - - name: 'gcr.io/graphite-docker-images/bash-plus' + - name: 'gcr.io/graphite-docker-images/go-plus' id: tpgb-test - entrypoint: '/workspace/.ci/scripts/bash-plus/terraform-tester/test_terraform.sh' + entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' secretEnv: ["GITHUB_TOKEN"] waitFor: ["tpgb-head", "tpgb-base"] + args: + - 'test-tpg' env: - VERSION=beta - COMMIT_SHA=$COMMIT_SHA - PR_NUMBER=$_PR_NUMBER - - name: 'gcr.io/graphite-docker-images/bash-plus' + - name: 'gcr.io/graphite-docker-images/go-plus' id: tpg-test - entrypoint: '/workspace/.ci/scripts/bash-plus/terraform-tester/test_terraform.sh' + entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' secretEnv: ["GITHUB_TOKEN"] waitFor: ["tpg-head", "tpg-base"] args: - - 'ga' # remove after 07/2023 - - $_PR_NUMBER # remove after 07/2023 - - $COMMIT_SHA # remove after 07/2023 - - $BUILD_ID # remove after 07/2023 - - $PROJECT_ID # remove after 07/2023 - - GoogleCloudPlatform/magic-modules # remove after 07/2023 - - "21" # remove after 07/2023 + - 'test-tpg' env: - VERSION=ga - COMMIT_SHA=$COMMIT_SHA diff --git a/.ci/magician/cmd/mock_github_test.go b/.ci/magician/cmd/mock_github_test.go index 4f747b6645ce..8c85b5d37587 100644 --- a/.ci/magician/cmd/mock_github_test.go +++ b/.ci/magician/cmd/mock_github_test.go @@ -54,3 +54,8 @@ func (m *mockGithub) PostBuildStatus(prNumber string, title string, state string m.calledMethods["PostBuildStatus"] = true return nil } + +func (m *mockGithub) CreateWorkflowDispatchEvent(workflowFileName string, inputs map[string]any) error { + m.calledMethods["CreateWorkflowDispatchEvent"] = true + return nil +} diff --git a/.ci/magician/cmd/test_tpg.go b/.ci/magician/cmd/test_tpg.go new file mode 100644 index 000000000000..60eadb949cfd --- /dev/null +++ b/.ci/magician/cmd/test_tpg.go @@ -0,0 +1,61 @@ +package cmd + +import ( + "fmt" + "magician/github" + "os" + + "github.com/spf13/cobra" +) + +type ttGithub interface { + CreateWorkflowDispatchEvent(string, map[string]any) error +} + +var testTPGCmd = &cobra.Command{ + Use: "test-tpg", + Short: "Run provider unit tests via workflow dispatch", + Long: `This command runs provider unit tests via workflow dispatch + + The following PR details are expected as environment variables: + 1. VERSION (beta or ga) + 2. COMMIT_SHA + 3. PR_NUMBER + `, + Run: func(cmd *cobra.Command, args []string) { + version := os.Getenv("VERSION") + commit := os.Getenv("COMMIT_SHA") + pr := os.Getenv("PR_NUMBER") + + gh := github.NewGithubService() + + execTestTPG(version, commit, pr, gh) + }, +} + +func execTestTPG(version, commit, pr string, gh ttGithub) { + var repo string + if version == "ga" { + repo = "terraform-provider-google" + } else if version == "beta" { + repo = "terraform-provider-google-beta" + } else { + fmt.Println("invalid version specified") + os.Exit(1) + } + + inputs := map[string]any{ + "owner": "modular-magician", + "repo": repo, + "branch": "auto-pr-" + pr, + "sha": commit, + } + + if err := gh.CreateWorkflowDispatchEvent("test-tpg.yml", inputs); err != nil { + fmt.Printf("Error creating workflow dispatch event: %v\n", err) + } +} + +func init() { + rootCmd.AddCommand(testTPGCmd) +} diff --git a/.ci/magician/cmd/test_tpg_test.go b/.ci/magician/cmd/test_tpg_test.go new file mode 100644 index 000000000000..243fd5820ea0 --- /dev/null +++ b/.ci/magician/cmd/test_tpg_test.go @@ -0,0 +1,17 @@ +package cmd + +import ( + "testing" +) + +func TestExecTestTPG(t *testing.T) { + gh := &mockGithub{ + calledMethods: make(map[string]bool), + } + + execTestTPG("beta", "sha1", "pr1", gh) + + if !gh.calledMethods["CreateWorkflowDispatchEvent"] { + t.Fatal("workflow dispatch event not created") + } +} diff --git a/.ci/magician/github/init.go b/.ci/magician/github/init.go index 66847f0fc628..6a4b7a468552 100644 --- a/.ci/magician/github/init.go +++ b/.ci/magician/github/init.go @@ -20,6 +20,7 @@ type GithubService interface { RequestPullRequestReviewer(prNumber, assignee string) error AddLabel(prNumber, label string) error RemoveLabel(prNumber, label string) error + CreateWorkflowDispatchEvent(workflowFileName string, inputs map[string]any) error } func NewGithubService() GithubService { diff --git a/.ci/magician/github/set.go b/.ci/magician/github/set.go index a7893b3cf83b..9238ce94da25 100644 --- a/.ci/magician/github/set.go +++ b/.ci/magician/github/set.go @@ -94,3 +94,23 @@ func (gh *github) RemoveLabel(prNumber, label string) error { return nil } + +func (gh *github) CreateWorkflowDispatchEvent(workflowFileName string, inputs map[string]any) error { + url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/actions/workflows/%s/dispatches", workflowFileName) + resp, err := utils.RequestCall(url, "POST", gh.token, nil, map[string]any{ + "ref": "main", + "inputs": inputs, + }) + + if resp != 200 { + return fmt.Errorf("server returned %d creating workflow dispatch event", resp) + } + + if err != nil { + return fmt.Errorf("failed to create workflow dispatch event: %s", err) + } + + fmt.Printf("Successfully created workflow dispatch event for %s with inputs %v", workflowFileName, inputs) + + return nil +} diff --git a/.ci/magician/go.mod b/.ci/magician/go.mod index 864ebd1e2c08..3d70795804fd 100644 --- a/.ci/magician/go.mod +++ b/.ci/magician/go.mod @@ -19,10 +19,10 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/gax-go/v2 v2.7.0 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/net v0.8.0 // indirect + golang.org/x/net v0.10.0 // indirect golang.org/x/oauth2 v0.6.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488 // indirect google.golang.org/grpc v1.53.0 // indirect diff --git a/.ci/magician/go.sum b/.ci/magician/go.sum index 1fa5659b8306..813e641e6f7b 100644 --- a/.ci/magician/go.sum +++ b/.ci/magician/go.sum @@ -80,8 +80,8 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= @@ -92,13 +92,13 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= diff --git a/.ci/scripts/go-plus/magician/exec.sh b/.ci/scripts/go-plus/magician/exec.sh index 32abb017defa..11f636d23791 100755 --- a/.ci/scripts/go-plus/magician/exec.sh +++ b/.ci/scripts/go-plus/magician/exec.sh @@ -14,5 +14,7 @@ GO_PROGRAM="$DIR/../../../magician/" pushd $GO_PROGRAM +set -x # Pass all arguments to the child command go run . "$@" +set +x From e5f527e912c77554aae130cdaac25675a62ae486 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Thu, 21 Sep 2023 11:20:23 -0700 Subject: [PATCH 21/36] added `google_service_networking_connection` deletion to 5.0 guide (#9009) * Add 5.0 guide to delete google_service_networking_connection * Address comment --- .../website/docs/guides/version_5_upgrade.html.markdown | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown index e4e4695748ed..6cad0bd57bbf 100644 --- a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown @@ -525,6 +525,10 @@ If you were relying on accessing an individual flag by index (for example, `goog `google_service_networking_connection` now uses the Create endpoint instead of the Patch endpoint during the creation step. Previously, Patch was used as a workaround for an issue that has since been resolved. +### "terraform destroy" now fully deletes the resource instead of abandoning + +`google_service_networking_connection` now uses API `deleteConnection` method instead of `removePeering` method during the deletion step. Previously, `removePeering` method was used because `deleteConnection` method was unavailable. In some cases a private connection cannot be deleted immediately after the resource using that connection is deleted, and users may have to delete the private connection after a waiting period. + ## Resource: `google_secret_manager_secret` ### `replication.automatic` is now removed From 8eabaaf75599ffada0fe4b6e297f2c6bb8e64450 Mon Sep 17 00:00:00 2001 From: Sheneska Williams <74882676+sheneska@users.noreply.github.com> Date: Thu, 21 Sep 2023 15:19:56 -0400 Subject: [PATCH 22/36] Upload META.d folder (#9002) * Upload META.d folder * Update common~copy.yaml --- mmv1/provider/terraform/common~copy.yaml | 6 ++++++ mmv1/third_party/terraform/META.d/_summary.yaml | 12 ++++++++++++ mmv1/third_party/terraform/META.d/links.yaml | 7 +++++++ 3 files changed, 25 insertions(+) create mode 100644 mmv1/third_party/terraform/META.d/_summary.yaml create mode 100644 mmv1/third_party/terraform/META.d/links.yaml diff --git a/mmv1/provider/terraform/common~copy.yaml b/mmv1/provider/terraform/common~copy.yaml index 4e34aa349fec..b8ae9786abc2 100644 --- a/mmv1/provider/terraform/common~copy.yaml +++ b/mmv1/provider/terraform/common~copy.yaml @@ -137,6 +137,12 @@ -%> '<%= fname -%>': '<%= file_path -%>' <% end -%> +<% + Dir["third_party/terraform/META.d/*.yaml"].each do |file_path| + fname = file_path.delete_prefix("third_party/terraform/") +-%> +'<%= fname -%>': '<%= file_path -%>' +<% end -%> '.teamcity/.gitignore': 'third_party/terraform/.teamcity/.gitignore' '.teamcity/Makefile': 'third_party/terraform/.teamcity/Makefile' 'version/version.go': 'third_party/terraform/version/version.go' diff --git a/mmv1/third_party/terraform/META.d/_summary.yaml b/mmv1/third_party/terraform/META.d/_summary.yaml new file mode 100644 index 000000000000..c3dc9c1febb3 --- /dev/null +++ b/mmv1/third_party/terraform/META.d/_summary.yaml @@ -0,0 +1,12 @@ +--- + +schema: 1.1 + +partition: tf-ecosystem + +summary: + owner: team-tf-hybrid-cloud + description: | + The Terraform Google provider is a plugin that allows Terraform to manage resources on Google Cloud Platform. + + visibility: external \ No newline at end of file diff --git a/mmv1/third_party/terraform/META.d/links.yaml b/mmv1/third_party/terraform/META.d/links.yaml new file mode 100644 index 000000000000..b15cd0fc2485 --- /dev/null +++ b/mmv1/third_party/terraform/META.d/links.yaml @@ -0,0 +1,7 @@ +runbooks: [] +#- name: +# link: + +other_links: [] +#- name: +# link: \ No newline at end of file From 8712f21ba54dd6ece13ab7b2ace4fc477d3c6f36 Mon Sep 17 00:00:00 2001 From: Scott Suarez Date: Thu, 21 Sep 2023 14:28:40 -0700 Subject: [PATCH 23/36] Don't merge for FEATURE BRANCH commits (#9041) --- .ci/gcb-community-checker.yml | 25 +++++++++++----------- .ci/gcb-contributor-membership-checker.yml | 25 +++++++++++----------- 2 files changed, 24 insertions(+), 26 deletions(-) diff --git a/.ci/gcb-community-checker.yml b/.ci/gcb-community-checker.yml index b7032b5c6148..08712995646b 100644 --- a/.ci/gcb-community-checker.yml +++ b/.ci/gcb-community-checker.yml @@ -22,13 +22,6 @@ steps: - user.name - "Modular Magician Diff Process" - # Fetch main (only if it's not already present) - - name: "gcr.io/cloud-builders/git" - args: - - fetch - - origin - - main - # Display commit log for clarity - name: "gcr.io/cloud-builders/git" args: @@ -36,18 +29,24 @@ steps: - "--oneline" - "-n 10" - # Find common ancestor commit and apply diff for the .ci folder + # Find common ancestor commit and apply diff for the .ci folder. - name: "gcr.io/cloud-builders/git" id: findMergeBase entrypoint: "bash" args: - "-c" - | - base_commit=$(git merge-base origin/main HEAD) - echo "Common ancestor commit: $base_commit" - git diff $base_commit origin/main -- .ci/ - git diff $base_commit origin/main -- .ci/ > /workspace/ci.diff - git apply /workspace/ci.diff --allow-empty + git fetch origin main + if [ "$_BASE_BRANCH" != "main" ]; then + echo "Checking out .ci/ folder from main" + git checkout origin/main -- .ci/ + else + base_commit=$(git merge-base origin/main HEAD) + echo "Common ancestor commit: $base_commit" + git diff $base_commit upstream/main -- .ci/ + git diff $base_commit upstream/main -- .ci/ > /workspace/ci.diff + git apply ./ci.diff --allow-empty + fi - name: 'gcr.io/graphite-docker-images/go-plus' entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' diff --git a/.ci/gcb-contributor-membership-checker.yml b/.ci/gcb-contributor-membership-checker.yml index d44ea5a4b4a2..3164a75a5cf8 100644 --- a/.ci/gcb-contributor-membership-checker.yml +++ b/.ci/gcb-contributor-membership-checker.yml @@ -22,13 +22,6 @@ steps: - user.name - "Modular Magician Diff Process" - # Fetch main (only if it's not already present) - - name: "gcr.io/cloud-builders/git" - args: - - fetch - - origin - - main - # Display commit log for clarity - name: "gcr.io/cloud-builders/git" args: @@ -36,18 +29,24 @@ steps: - "--oneline" - "-n 10" - # Find common ancestor commit and apply diff for the .ci folder + # Find common ancestor commit and apply diff for the .ci folder. - name: "gcr.io/cloud-builders/git" id: findMergeBase entrypoint: "bash" args: - "-c" - | - base_commit=$(git merge-base origin/main HEAD) - echo "Common ancestor commit: $base_commit" - git diff $base_commit origin/main -- .ci/ - git diff $base_commit origin/main -- .ci/ > /workspace/ci.diff - git apply /workspace/ci.diff --allow-empty + git fetch origin main + if [ "$_BASE_BRANCH" != "main" ]; then + echo "Checking out .ci/ folder from main" + git checkout origin/main -- .ci/ + else + base_commit=$(git merge-base origin/main HEAD) + echo "Common ancestor commit: $base_commit" + git diff $base_commit upstream/main -- .ci/ + git diff $base_commit upstream/main -- .ci/ > /workspace/ci.diff + git apply ./ci.diff --allow-empty + fi - name: "gcr.io/graphite-docker-images/go-plus" entrypoint: "/workspace/.ci/scripts/go-plus/magician/exec.sh" From 7e712d2925612a218c0fc6d9c28eeda0d2ad0583 Mon Sep 17 00:00:00 2001 From: Scott Suarez Date: Thu, 21 Sep 2023 14:41:54 -0700 Subject: [PATCH 24/36] change contributor checker merge to upstream from origin (#9042) --- .ci/gcb-community-checker.yml | 4 ++-- .ci/gcb-contributor-membership-checker.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.ci/gcb-community-checker.yml b/.ci/gcb-community-checker.yml index 08712995646b..9aba1d2d9478 100644 --- a/.ci/gcb-community-checker.yml +++ b/.ci/gcb-community-checker.yml @@ -43,8 +43,8 @@ steps: else base_commit=$(git merge-base origin/main HEAD) echo "Common ancestor commit: $base_commit" - git diff $base_commit upstream/main -- .ci/ - git diff $base_commit upstream/main -- .ci/ > /workspace/ci.diff + git diff $base_commit origin/main -- .ci/ + git diff $base_commit origin/main -- .ci/ > /workspace/ci.diff git apply ./ci.diff --allow-empty fi diff --git a/.ci/gcb-contributor-membership-checker.yml b/.ci/gcb-contributor-membership-checker.yml index 3164a75a5cf8..713918f91e35 100644 --- a/.ci/gcb-contributor-membership-checker.yml +++ b/.ci/gcb-contributor-membership-checker.yml @@ -43,8 +43,8 @@ steps: else base_commit=$(git merge-base origin/main HEAD) echo "Common ancestor commit: $base_commit" - git diff $base_commit upstream/main -- .ci/ - git diff $base_commit upstream/main -- .ci/ > /workspace/ci.diff + git diff $base_commit origin/main -- .ci/ + git diff $base_commit origin/main -- .ci/ > /workspace/ci.diff git apply ./ci.diff --allow-empty fi From 52daa08ad93417d5f8c63de79e920e0ec9b9d55c Mon Sep 17 00:00:00 2001 From: ron-gal <125445217+ron-gal@users.noreply.github.com> Date: Thu, 21 Sep 2023 20:42:26 -0400 Subject: [PATCH 25/36] Skip duplication validation for empty cluster IDs (#9018) --- .../terraform/services/bigtable/resource_bigtable_instance.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go index 1b0b8d208c30..2cc076b7bd6e 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go @@ -543,6 +543,10 @@ func resourceBigtableInstanceUniqueClusterID(_ context.Context, diff *schema.Res for i := 0; i < newCount.(int); i++ { _, newId := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) clusterID := newId.(string) + // In case clusterID is empty, it is probably computed and this validation will be wrong. + if clusterID == "" { + continue + } if clusters[clusterID] { return fmt.Errorf("duplicated cluster_id: %q", clusterID) } From 91afc186bbac6c9930e90790f4bc8c60cf6bea9c Mon Sep 17 00:00:00 2001 From: Tsubasa Nagasawa Date: Fri, 22 Sep 2023 10:21:55 +0900 Subject: [PATCH 26/36] fix(container_node_pool): panic interface conversion on `linux_node_config.sysctls` (#8981) Co-authored-by: Shuya Ma <87669292+shuyama1@users.noreply.github.com> --- .../services/container/node_config.go.erb | 3 + .../resource_container_node_pool_test.go.erb | 55 +++++++++++++------ 2 files changed, 40 insertions(+), 18 deletions(-) diff --git a/mmv1/third_party/terraform/services/container/node_config.go.erb b/mmv1/third_party/terraform/services/container/node_config.go.erb index 8db029040fad..e11ebef03c11 100644 --- a/mmv1/third_party/terraform/services/container/node_config.go.erb +++ b/mmv1/third_party/terraform/services/container/node_config.go.erb @@ -1008,6 +1008,9 @@ func expandLinuxNodeConfig(v interface{}) *container.LinuxNodeConfig { if len(ls) == 0 { return nil } + if ls[0] == nil { + return &container.LinuxNodeConfig{} + } cfg := ls[0].(map[string]interface{}) sysCfgRaw, ok := cfg["sysctls"] if !ok { diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb index 0fb6f5ede53a..d80c9c89ece3 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb @@ -465,8 +465,17 @@ func TestAccContainerNodePool_withLinuxNodeConfig(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ + // Create a node pool with empty `linux_node_config.sysctls`. { - Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, 10000, 12800, "1000 20000 100000", 1), + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, ""), + }, + { + ResourceName: "google_container_node_pool.with_linux_node_config", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, "1000 20000 100000"), }, { ResourceName: "google_container_node_pool.with_linux_node_config", @@ -475,7 +484,7 @@ func TestAccContainerNodePool_withLinuxNodeConfig(t *testing.T) { }, // Perform an update. { - Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, 10000, 12800, "1000 20000 200000", 1), + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, "1000 20000 200000"), }, { ResourceName: "google_container_node_pool.with_linux_node_config", @@ -2619,7 +2628,30 @@ resource "google_container_node_pool" "with_kubelet_config" { `, cluster, np, policy, quota, period, podPidsLimit) } -func testAccContainerNodePool_withLinuxNodeConfig(cluster, np string, maxBacklog, soMaxConn int, tcpMem string, twReuse int) string { +func testAccContainerNodePool_withLinuxNodeConfig(cluster, np string, tcpMem string) string { + linuxNodeConfig := ` + linux_node_config { + sysctls = {} + } +` + if len(tcpMem) != 0 { + linuxNodeConfig = fmt.Sprintf(` + linux_node_config { + sysctls = { + "net.core.netdev_max_backlog" = "10000" + "net.core.rmem_max" = 10000 + "net.core.wmem_default" = 10000 + "net.core.wmem_max" = 20000 + "net.core.optmem_max" = 10000 + "net.core.somaxconn" = 12800 + "net.ipv4.tcp_rmem" = "%s" + "net.ipv4.tcp_wmem" = "%s" + "net.ipv4.tcp_tw_reuse" = 1 + } + } +`, tcpMem, tcpMem) + } + return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -2639,29 +2671,16 @@ resource "google_container_node_pool" "with_linux_node_config" { initial_node_count = 1 node_config { image_type = "COS_CONTAINERD" - linux_node_config { - sysctls = { - "net.core.netdev_max_backlog" = "%d" - "net.core.rmem_max" = 10000 - "net.core.wmem_default" = 10000 - "net.core.wmem_max" = 20000 - "net.core.optmem_max" = 10000 - "net.core.somaxconn" = %d - "net.ipv4.tcp_rmem" = "%s" - "net.ipv4.tcp_wmem" = "%s" - "net.ipv4.tcp_tw_reuse" = %d - } - } + %s oauth_scopes = [ "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring", ] } } -`, cluster, np, maxBacklog, soMaxConn, tcpMem, tcpMem, twReuse) +`, cluster, np, linuxNodeConfig) } - func testAccContainerNodePool_withNetworkConfig(cluster, np, network string) string { return fmt.Sprintf(` resource "google_compute_network" "container_network" { From 1e00951c9eb4c43306dc8571f464887f6a4e3b18 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Fri, 22 Sep 2023 16:58:01 +0100 Subject: [PATCH 27/36] Upgrade guide: Add guidance for removing empty strings from `provider` blocks (#9048) * Add guidance for removing empty strings from `provider` blocks * Update code block to use tf formatting, instead of diff --- .../docs/guides/version_5_upgrade.html.markdown | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown index 6cad0bd57bbf..e538e65eddaa 100644 --- a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown @@ -118,6 +118,21 @@ The new annotations model is similar to the new labels model and will be applied There are now two annotation-related fields with the new model, the `annotations` and the output-only `effective_annotations` fields. +### Updates to how empty strings are handled in the `provider` block + +In 5.0.0+ any empty strings set in the `provider` block will be used and not ignored. Previously any empty strings used as arguments in the `provider` block were ignored and did not contribute to configuration of the provider. + +Users should remove empty string arguments to avoid errors during plan/apply stages. + +```tf +provider "google" { + credentials = "" # this line should be removed + project = "my-project" + region = "us-central1" + zone = "" # this line should be removed +} +``` + ### Changes to how default `location`, `region` and `zone` values are obtained for resources Currently, when configuring resources that require a `location`, `region` or `zone` field you have the choice of specifying it in the resource block or allowing default values to be used. Default [region](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#region) or [zone](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#zone) values can be configured in the provider block or by providing values through environment variables. From 2d8774d41c9f4521cd5fad83d8f94bf2541372c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felipe=20Gon=C3=A7alves=20de=20Castro?= Date: Fri, 22 Sep 2023 13:28:30 -0300 Subject: [PATCH 28/36] adding security policy field to instance (#8878) * adding security policy field to networkInterfaceAccessConfig * adding security policy to networkInterface instead of networkInterfaceAccessConfig * finishing solution 1 and adding integration tests and doc * cleanups for solution 1 * wrapping update security policy for beta * replacing the networks in tests * fixing code review by implementing the solution two * replacing networks in tests * fixing nic read for instance_template resource * adding checking for access config security policy while flattening to prevent the instance template to break * fixing error while creating instance with empty security_policy * changing region for tests which use network_edge_security_service * comment all failing tests but one to test if it runs alone * making the tests running serially * fixing the tests to be called only by the serial one * fixing code review comments * fixing missing compute_image data from merge * fixing code review --- .../compute/compute_instance_helpers.go.erb | 19 + ..._instance_network_interface_helpers.go.erb | 79 ++ .../compute/resource_compute_instance.go.erb | 68 + .../resource_compute_instance_test.go.erb | 1180 ++++++++++++++++- .../docs/r/compute_instance.html.markdown | 1 + 5 files changed, 1346 insertions(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb index 4f577b14cc48..8357d00c507f 100644 --- a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb +++ b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb @@ -305,6 +305,11 @@ func flattenAccessConfigs(accessConfigs []*compute.AccessConfig) ([]map[string]i if natIP == "" { natIP = ac.NatIP } + <% unless version == 'ga' -%> + if ac.SecurityPolicy != "" { + flattened[i]["security_policy"] = ac.SecurityPolicy + } + <% end -%> } return flattened, natIP } @@ -319,6 +324,11 @@ func flattenIpv6AccessConfigs(ipv6AccessConfigs []*compute.AccessConfig) []map[s flattened[i]["external_ipv6"] = ac.ExternalIpv6 flattened[i]["external_ipv6_prefix_length"] = strconv.FormatInt(ac.ExternalIpv6PrefixLength, 10) flattened[i]["name"] = ac.Name + <% unless version == 'ga' -%> + if ac.SecurityPolicy != "" { + flattened[i]["security_policy"] = ac.SecurityPolicy + } + <% end -%> } return flattened } @@ -370,6 +380,15 @@ func flattenNetworkInterfaces(d *schema.ResourceData, config *transport_tpg.Conf flattened[i]["network_attachment"] = networkAttachment } <% end -%> + + <% unless version == 'ga' -%> + // the security_policy for a network_interface is found in one of its accessConfigs. + if len(iface.AccessConfigs) > 0 && iface.AccessConfigs[0].SecurityPolicy != "" { + flattened[i]["security_policy"] = iface.AccessConfigs[0].SecurityPolicy + } else if len(iface.Ipv6AccessConfigs) > 0 && iface.Ipv6AccessConfigs[0].SecurityPolicy != "" { + flattened[i]["security_policy"] = iface.Ipv6AccessConfigs[0].SecurityPolicy + } + <% end -%> } return flattened, region, internalIP, externalIP, nil } diff --git a/mmv1/third_party/terraform/services/compute/compute_instance_network_interface_helpers.go.erb b/mmv1/third_party/terraform/services/compute/compute_instance_network_interface_helpers.go.erb index b80b47ee7818..2eb3607d7297 100644 --- a/mmv1/third_party/terraform/services/compute/compute_instance_network_interface_helpers.go.erb +++ b/mmv1/third_party/terraform/services/compute/compute_instance_network_interface_helpers.go.erb @@ -7,6 +7,10 @@ import ( transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +<% unless version == 'ga' -%> + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +<% end -%> + <% if version == "ga" -%> "google.golang.org/api/compute/v1" <% else -%> @@ -85,3 +89,78 @@ func computeInstanceCreateUpdateWhileStoppedCall(d *schema.ResourceData, config return nil } } + +<% unless version == 'ga' -%> +func computeInstanceAddSecurityPolicy(d *schema.ResourceData, config *transport_tpg.Config, securityPolicyWithNics map[string][]string, project, zone, userAgent, instanceName string) error { + for sp, nics := range securityPolicyWithNics { + req := &compute.InstancesSetSecurityPolicyRequest{ + NetworkInterfaces: nics, + SecurityPolicy: sp, + } + op, err := config.NewComputeClient(userAgent).Instances.SetSecurityPolicy(project, zone, instanceName, req).Do() + if err != nil { + return fmt.Errorf("Error adding security policy: %s", err) + } + opErr := ComputeOperationWaitTime(config, op, project, "security_policy to add", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + return nil +} + +func computeInstanceMapSecurityPoliciesCreate(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string][]string, error) { + securityPolicies := make(map[string][]string) + configs := d.Get("network_interface").([]interface{}) + for i, raw := range configs { + data := raw.(map[string]interface{}) + secPolicy := data["security_policy"].(string) + err := validateSecurityPolicy(data) + if err != nil { + return securityPolicies, err + } + + if secPolicy != "" { + // Network interfaces use the nicN naming format and is only know after the instance is created. + nicName := fmt.Sprintf("nic%d", i) + securityPolicies[secPolicy] = append(securityPolicies[secPolicy], nicName) + } + } + + return securityPolicies, nil +} + +func computeInstanceMapSecurityPoliciesUpdate(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string][]string, error) { + securityPolicies := make(map[string][]string) + configs := d.Get("network_interface").([]interface{}) + for i, raw := range configs { + data := raw.(map[string]interface{}) + secPolicy := data["security_policy"].(string) + err := validateSecurityPolicy(data) + if err != nil { + return securityPolicies, err + } + + // Network interfaces use the nicN naming format and is only know after the instance is created. + nicName := fmt.Sprintf("nic%d", i) + // To cleanup the security policy from the interface we should send something like this on the api: {"":[nic0, nic1]} + securityPolicies[secPolicy] = append(securityPolicies[secPolicy], nicName) + } + + return securityPolicies, nil +} + +func validateSecurityPolicy(rawNetworkInterface map[string]interface{}) error { + acessConfigs := expandAccessConfigs(rawNetworkInterface["access_config"].([]interface{})) + ipv6AccessConfigs := expandIpv6AccessConfigs(rawNetworkInterface["ipv6_access_config"].([]interface{})) + secPolicy := rawNetworkInterface["security_policy"].(string) + + if secPolicy != "" && len(acessConfigs) == 0 && len(ipv6AccessConfigs) == 0 { + return fmt.Errorf("Error setting security policy to the instance since at least one access config must exist") + } + + return nil +} + +<% end -%> \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 799176f39943..5cfc9ac2e394 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -367,6 +367,13 @@ func ResourceComputeInstance() *schema.Resource { Optional: true, Description: `The DNS domain name for the public PTR record.`, }, + <% unless version == 'ga' -%> + "security_policy": { + Type: schema.TypeString, + Computed: true, + Description: `A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.`, + }, + <% end -%> }, }, }, @@ -444,6 +451,13 @@ func ResourceComputeInstance() *schema.Resource { ForceNew: true, Description: `The name of this access configuration. In ipv6AccessConfigs, the recommended name is External IPv6.`, }, + <% unless version == 'ga' -%> + "security_policy": { + Type: schema.TypeString, + Computed: true, + Description: `A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.`, + }, + <% end -%> }, }, }, @@ -469,6 +483,14 @@ func ResourceComputeInstance() *schema.Resource { ForceNew: true, Description: `The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified.`, }, + + <% unless version == 'ga' -%> + "security_policy": { + Type: schema.TypeString, + Optional: true, + Description: `A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.`, + }, + <% end -%> }, }, }, @@ -1300,6 +1322,13 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err return err } + <% unless version == 'ga' -%> + securityPolicies, err := computeInstanceMapSecurityPoliciesCreate(d, config) + if err != nil { + return err + } + <% end -%> + log.Printf("[INFO] Requesting instance creation") op, err := config.NewComputeClient(userAgent).Instances.Insert(project, zone.Name, instance).Do() if err != nil { @@ -1317,6 +1346,13 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err return waitErr } + <% unless version == 'ga' -%> + err = computeInstanceAddSecurityPolicy(d, config, securityPolicies, project, z, userAgent, instance.Name) + if err != nil { + return fmt.Errorf("Error creating instance while setting the security policies: %s", err) + } + <% end -%> + err = waitUntilInstanceHasDesiredStatus(config, d) if err != nil { return fmt.Errorf("Error waiting for status: %s", err) @@ -1759,6 +1795,30 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces)) } + <% unless version == 'ga' -%> + updateSecurityPolicy := false + for i := 0; i < len(instance.NetworkInterfaces); i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + // check if sec policy has been changed + // check if access config has been changed because it may be deleted and needs to be re-created. + if d.HasChange(prefix+".security_policy") || d.HasChange(prefix+".access_config") || d.HasChange(prefix+".ipv6_access_config") { + if instance.Status != "RUNNING" { + return fmt.Errorf("Error to update security policy because the current instance status must be \"RUNNING\". The security policy or some access config may have changed which requires the security policy to be re-applied") + } + updateSecurityPolicy = true + } + } + + securityPolicies := make(map[string][]string) + if updateSecurityPolicy { + // map the security policies to call SetSecurityPolicy because the next section of the code removes and re-creates the access_config which ends up removing the security_policy. + securityPolicies, err = computeInstanceMapSecurityPoliciesUpdate(d, config) + if err != nil { + return err + } + } + <% end -%> + var updatesToNIWhileStopped []func(inst *compute.Instance) error for i := 0; i < len(networkInterfaces); i++ { prefix := fmt.Sprintf("network_interface.%d", i) @@ -2300,6 +2360,14 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } } + <% unless version == 'ga' -%> + // The access config must be updated only if the machine is still RUNNING and after each access_config for each interface has been re-created. + err = computeInstanceAddSecurityPolicy(d, config, securityPolicies, project, zone, userAgent, instance.Name) + if err != nil { + return fmt.Errorf("Error updating instance while setting the security policies: %s", err) + } + <% end -%> + // We made it, disable partial mode d.Partial(false) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index fa9c520d4629..c7c28b68bcf7 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -2587,6 +2587,341 @@ func TestAccComputeInstance_regionBootDisk(t *testing.T) { }) } +<% unless version == 'ga' -%> +// The tests related to security_policy use network_edge_security_service resource +// which can only exist one per region. Because of that, all the following tests must run serially. +func TestAccComputeInstanceNetworkIntefaceWithSecurityPolicy(t *testing.T) { + testCases := map[string]func(t *testing.T){ + "two_access_config": testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigs, + "two_nics_access_config_with_empty_nil_security_policy": testAccComputeInstance_nic_securityPolicyCreateWithEmptyAndNullSecurityPolicies, + "two_nics_two_access_configs_update_one_policy": testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateOnlyOnePolicy, + "two_access_config_update_policy_with_stopped_machine": testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsUpdateSecurityPoliciesWithStoppedMachine, + "two_nics_two_access_configs_update_remove_access_config": testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateRemoveAccessConfig, + "two_nics_two_access_configs_update_two_policies": testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateSwapPolicies, + "access_config_update_access_config": testAccComputeInstance_nic_securityPolicyCreateWithAccessConfigUpdateAccessConfig, + "wit_no_access_config": testAccComputeInstance_nic_securityPolicyCreateWithoutAccessConfig, + } + + for name, tc := range testCases { + // shadow the tc variable into scope so that when + // the loop continues, if t.Run hasn't executed tc(t) + // yet, we don't have a race condition + // see https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) + }) + } +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigs(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithOneNicAndTwoAccessConfigs(suffix, policyName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithEmptyAndNullSecurityPolicies(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndAccessConfigsWithEmptyAndNullSecurityPolicies(suffix, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasNoSecurityPolicy(&instance), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateOnlyOnePolicy(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var policyName2 = fmt.Sprintf("tf-test-policy2-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "google_compute_region_security_policy.policyforinstance.self_link", "google_compute_region_security_policy.policyforinstance.self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "google_compute_region_security_policy.policyforinstance.self_link", "google_compute_region_security_policy.policyforinstance2.self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName2), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsUpdateSecurityPoliciesWithStoppedMachine(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsWithTwoSecurityPoliciesAndStatus(suffix, policyName, instanceName, "\"\"", "RUNNING"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsWithTwoSecurityPoliciesAndStatus(suffix, policyName, instanceName, "\"\"", "TERMINATED"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsWithTwoSecurityPoliciesAndStatus(suffix, policyName, instanceName, "google_compute_region_security_policy.policyforinstance.self_link", "TERMINATED"), + ExpectError: regexp.MustCompile("Error to update security policy because the current instance status must be \"RUNNING\". The security policy or some access config may have changed which requires the security policy to be re-applied"), + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateRemoveAccessConfig(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var policyName2 = fmt.Sprintf("tf-test-policy2-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "google_compute_region_security_policy.policyforinstance.self_link", "google_compute_region_security_policy.policyforinstance.self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPoliciesRemoveAccessConfig(suffix, policyName, policyName2, instanceName, "google_compute_region_security_policy.policyforinstance.self_link", "google_compute_region_security_policy.policyforinstance.self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateSwapPolicies(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var policyName2 = fmt.Sprintf("tf-test-policy2-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "\"\"", "\"\""), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "google_compute_region_security_policy.policyforinstance.self_link", "google_compute_region_security_policy.policyforinstance2.self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName2), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "google_compute_region_security_policy.policyforinstance2.self_link", "google_compute_region_security_policy.policyforinstance.self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName2), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "\"\"", "\"\""), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasNoSecurityPolicy(&instance), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithAccessConfigUpdateAccessConfig(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithOneNicAndTwoAccessConfigs(suffix, policyName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsUpdateAccessConfig(suffix, policyName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsRemoveAccessConfig(suffix, policyName, instanceName), + ExpectError: regexp.MustCompile("Error setting security policy to the instance since at least one access config must exist"), + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithoutAccessConfig(t *testing.T) { + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsRemoveAccessConfig(suffix, policyName, instanceName), + ExpectError: regexp.MustCompile("Error setting security policy to the instance since at least one access config must exist"), + }, + }, + }) +} + +<% end -%> + func testAccCheckComputeInstanceUpdateMachineType(t *testing.T, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -3405,7 +3740,50 @@ func testAccCheckComputeInstanceHasStatus(instance *compute.Instance, status str } } -func testAccComputeInstance_basic(instance string) string { +<% unless version == 'ga' -%> +func testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(instance *compute.Instance, securityPolicy string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, networkInterface := range instance.NetworkInterfaces { + for _, accessConfig := range networkInterface.AccessConfigs { + if strings.Contains(accessConfig.SecurityPolicy, securityPolicy) { + return nil + } + } + + for _, accessConfigIpv6 := range networkInterface.Ipv6AccessConfigs { + if strings.Contains(accessConfigIpv6.SecurityPolicy, securityPolicy) { + return nil + } + } + } + + return fmt.Errorf("Security Policy with name %s not present", securityPolicy) + } +} + +func testAccCheckComputeInstanceNicAccessConfigHasNoSecurityPolicy(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, networkInterface := range instance.NetworkInterfaces { + for _, accessConfig := range networkInterface.AccessConfigs { + if accessConfig.SecurityPolicy != "" { + return fmt.Errorf("Security Policy with name %s is present", accessConfig.SecurityPolicy) + } + + } + + for _, accessConfigIpv6 := range networkInterface.Ipv6AccessConfigs { + if accessConfigIpv6.SecurityPolicy != "" { + return fmt.Errorf("Security Policy with name %s is present", accessConfigIpv6.SecurityPolicy) + } + } + } + + return nil + } +} +<% end -%> + +func testAccComputeInstance_basic(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { family = "debian-11" @@ -7003,6 +7381,805 @@ resource "google_compute_disk" "debian" { `, instance, diskName, suffix, suffix, suffix) } +<% unless version == 'ga' -%> +func testAccComputeInstance_nic_securityPolicyCreateWithOneNicAndTwoAccessConfigs(suffix, policy, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +# First activate advanced network DDoS protection for the desired region +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "europe-west1" + name = "tf-test-policyddosprotection-%s" + description = "region security policy for instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "europe-west1" + name = "tf-test-edgesecservice-%s" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_subnetwork" "subnet-ipv6" { + region = "europe-west1" + name = "tf-test-subnet-ip6-%s" + ip_cidr_range = "10.0.0.0/22" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net.id +} + +resource "google_compute_address" "ipv6-address" { + region = "europe-west1" + name = "tf-test-addr-ipv6-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv6.name +} + +resource "google_compute_region_security_policy" "policyforinstance" { + region = "europe-west1" + name = "%s" + description = "region security policy to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet-ipv6.self_link + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + access_config { + network_tier = "STANDARD" + } + security_policy = google_compute_region_security_policy.policyforinstance.self_link + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix, suffix, suffix, policy, instance) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsWithTwoSecurityPoliciesAndStatus(suffix, policy, instance, policyToSetOne, desiredStatus string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +# First activate advanced network DDoS protection for the desired region +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "europe-west1" + name = "tf-test-policyddosprotection-%s" + description = "region security policy for instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "europe-west1" + name = "tf-test-edgesecservice-%s" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_region_security_policy" "policyforinstance" { + region = "europe-west1" + name = "%s" + description = "region security policy to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet.self_link + access_config { + network_tier = "STANDARD" + } + security_policy = %s + } + + metadata = { + foo = "bar" + } + + desired_status = "%s" +} +`, suffix, suffix, suffix, suffix, policy, instance, policyToSetOne, desiredStatus) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policy, policy2, instance, policyToSetOne, policyToSetTwo string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +# First activate advanced network DDoS protection for the desired region +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "europe-west1" + name = "tf-test-policyddosprotection-%s" + description = "region security policy for instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "europe-west1" + name = "tf-test-edgesecservice-%s" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_subnetwork" "subnet-ipv6" { + region = "europe-west1" + name = "tf-test-subnet-ip6-%s" + ip_cidr_range = "10.0.0.0/22" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net.id +} + +resource "google_compute_address" "normal-address" { + region = "europe-west1" + name = "tf-test-addr-normal-%s" +} + +resource "google_compute_address" "ipv6-address" { + region = "europe-west1" + name = "tf-test-addr-ipv6-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv6.name +} + +resource "google_compute_network" "net2" { + name = "tf-test-network2-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet2" { + region = "europe-west1" + name = "tf-test-subnet2-%s" + ip_cidr_range = "192.170.0.0/20" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net2.id +} + +resource "google_compute_subnetwork" "subnet-ipv62" { + region = "europe-west1" + name = "tf-test-subnet-ip62-%s" + ip_cidr_range = "10.10.0.0/20" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net2.id +} + +resource "google_compute_address" "normal-address2" { + region = "europe-west1" + name = "tf-test-addr-normal2-%s" +} + +resource "google_compute_address" "ipv6-address2" { + region = "europe-west1" + name = "tf-test-addr-ipv62-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv62.name +} + +resource "google_compute_region_security_policy" "policyforinstance" { + region = "europe-west1" + name = "%s" + description = "region security policy to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_region_security_policy" "policyforinstance2" { + region = "europe-west1" + name = "%s" + description = "region security policy 2 to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet-ipv6.self_link + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + access_config { + network_tier = "PREMIUM" + nat_ip = google_compute_address.normal-address.address + } + security_policy = %s + } + + network_interface { + network = google_compute_network.net2.self_link + subnetwork = google_compute_subnetwork.subnet-ipv62.self_link + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address2.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + access_config { + network_tier = "PREMIUM" + nat_ip = google_compute_address.normal-address2.address + } + security_policy = %s + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, policy, policy2, instance, policyToSetOne, policyToSetTwo) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPoliciesRemoveAccessConfig(suffix, policy, policy2, instance, policyToSetOne, policyToSetTwo string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +# First activate advanced network DDoS protection for the desired region +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "europe-west1" + name = "tf-test-policyddosprotection-%s" + description = "region security policy for instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "europe-west1" + name = "tf-test-edgesecservice-%s" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_subnetwork" "subnet-ipv6" { + region = "europe-west1" + name = "tf-test-subnet-ip6-%s" + ip_cidr_range = "10.0.0.0/22" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net.id +} + +resource "google_compute_address" "normal-address" { + region = "europe-west1" + name = "tf-test-addr-normal-%s" +} + +resource "google_compute_address" "ipv6-address" { + region = "europe-west1" + name = "tf-test-addr-ipv6-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv6.name +} + +resource "google_compute_network" "net2" { + name = "tf-test-network2-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet2" { + region = "europe-west1" + name = "tf-test-subnet2-%s" + ip_cidr_range = "192.170.0.0/20" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net2.id +} + +resource "google_compute_subnetwork" "subnet-ipv62" { + region = "europe-west1" + name = "tf-test-subnet-ip62-%s" + ip_cidr_range = "10.10.0.0/20" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net2.id +} + +resource "google_compute_address" "normal-address2" { + region = "europe-west1" + name = "tf-test-addr-normal2-%s" +} + +resource "google_compute_address" "ipv6-address2" { + region = "europe-west1" + name = "tf-test-addr-ipv62-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv62.name +} + +resource "google_compute_region_security_policy" "policyforinstance" { + region = "europe-west1" + name = "%s" + description = "region security policy to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_region_security_policy" "policyforinstance2" { + region = "europe-west1" + name = "%s" + description = "region security policy 2 to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet-ipv6.self_link + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + # access config removed + security_policy = %s + } + + network_interface { + network = google_compute_network.net2.self_link + subnetwork = google_compute_subnetwork.subnet-ipv62.self_link + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address2.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + # access config removed + security_policy = %s + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, policy, policy2, instance, policyToSetOne, policyToSetTwo) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndAccessConfigsWithEmptyAndNullSecurityPolicies(suffix, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_network" "net2" { + name = "tf-test-network2-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet2" { + region = "europe-west1" + name = "tf-test-subnet2-%s" + ip_cidr_range = "192.170.0.0/20" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net2.id +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet.self_link + access_config { + network_tier = "STANDARD" + } + security_policy = "" + } + + network_interface { + network = google_compute_network.net2.self_link + subnetwork = google_compute_subnetwork.subnet2.self_link + access_config { + network_tier = "STANDARD" + } + security_policy = null + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix, instance) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsUpdateAccessConfig(suffix, policy, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +# First activate advanced network DDoS protection for the desired region +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "europe-west1" + name = "tf-test-policyddosprotection-%s" + description = "region security policy for instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "europe-west1" + name = "tf-test-edgesecservice-%s" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_subnetwork" "subnet-ipv6" { + region = "europe-west1" + name = "tf-test-subnet-ip6-%s" + ip_cidr_range = "10.0.0.0/22" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net.id +} + +resource "google_compute_address" "normal-address" { + region = "europe-west1" + name = "tf-test-addr-normal-%s" +} + +resource "google_compute_address" "ipv6-address" { + region = "europe-west1" + name = "tf-test-addr-ipv6-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv6.name +} + +resource "google_compute_region_security_policy" "policyforinstance" { + region = "europe-west1" + name = "%s" + description = "region security policy to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet-ipv6.self_link + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + access_config { + network_tier = "PREMIUM" + nat_ip = google_compute_address.normal-address.address + } + security_policy = google_compute_region_security_policy.policyforinstance.self_link + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix, suffix, suffix, suffix, policy, instance) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsRemoveAccessConfig(suffix, policy, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +# First activate advanced network DDoS protection for the desired region +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "europe-west1" + name = "tf-test-policyddosprotection-%s" + description = "region security policy for instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "europe-west1" + name = "tf-test-edgesecservice-%s" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_subnetwork" "subnet-ipv6" { + region = "europe-west1" + name = "tf-test-subnet-ip6-%s" + ip_cidr_range = "10.0.0.0/22" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net.id +} + +resource "google_compute_address" "normal-address" { + region = "europe-west1" + name = "tf-test-addr-normal-%s" +} + +resource "google_compute_address" "ipv6-address" { + region = "europe-west1" + name = "tf-test-addr-ipv6-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv6.name +} + +resource "google_compute_region_security_policy" "policyforinstance" { + region = "europe-west1" + name = "%s" + description = "region security policy to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet-ipv6.self_link + stack_type = "IPV4_IPV6" + # remove all access config + security_policy = google_compute_region_security_policy.policyforinstance.self_link + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix, suffix, suffix, suffix, policy, instance) +} + +<% end -%> + <% if version =="beta"%> func testAccComputeInstance_networkAttachment(context map[string]interface{}) string { return acctest.Nprintf(` @@ -7139,3 +8316,4 @@ resource "google_compute_instance" "foobar" { `, suffix, suffix, suffix, region, suffix, region, suffix, region, suffix, region, suffix, region, networkAttachment) } <% end %> + diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown index bbb897168f8d..00e16e341146 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown @@ -317,6 +317,7 @@ specified, then this instance will have no external IPv6 Internet access. Struct * `queue_count` - (Optional) The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified. +* `security_policy` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy. The `access_config` block supports: From f80a4a67c6e91fc0dcdf84523394fa677c50aa9b Mon Sep 17 00:00:00 2001 From: abheda-crest <105624942+abheda-crest@users.noreply.github.com> Date: Fri, 22 Sep 2023 22:57:19 +0530 Subject: [PATCH 29/36] Added support for in-place update for `rotation.rotation_period` field in `google_secret_manager_secret` (#8983) --- mmv1/products/secretmanager/Secret.yaml | 1 - ...resource_secret_manager_secret_test.go.erb | 167 ++++++++++++++++++ 2 files changed, 167 insertions(+), 1 deletion(-) diff --git a/mmv1/products/secretmanager/Secret.yaml b/mmv1/products/secretmanager/Secret.yaml index 1fe39265855f..cd6ba56029b8 100644 --- a/mmv1/products/secretmanager/Secret.yaml +++ b/mmv1/products/secretmanager/Secret.yaml @@ -237,7 +237,6 @@ properties: - rotation.0.rotation_period - !ruby/object:Api::Type::String name: rotationPeriod - immutable: true description: | The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) and at most 3153600000s (100 years). If rotationPeriod is set, `next_rotation_time` must be set. `next_rotation_time` will be advanced by this period when the service automatically sends rotation notifications. diff --git a/mmv1/third_party/terraform/services/secretmanager/resource_secret_manager_secret_test.go.erb b/mmv1/third_party/terraform/services/secretmanager/resource_secret_manager_secret_test.go.erb index e5c61c864c25..b672a3d607cc 100644 --- a/mmv1/third_party/terraform/services/secretmanager/resource_secret_manager_secret_test.go.erb +++ b/mmv1/third_party/terraform/services/secretmanager/resource_secret_manager_secret_test.go.erb @@ -283,6 +283,59 @@ func TestAccSecretManagerSecret_automaticCmekUpdate(t *testing.T) { }) } +func TestAccSecretManagerSecret_rotationPeriodUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "timestamp": "2122-11-26T19:58:16Z", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretManagerSecret_withoutRotationPeriod(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + { + Config: testAccSecretManagerSecret_rotationPeriodBasic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + { + Config: testAccSecretManagerSecret_rotationPeriodUpdate(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + { + Config: testAccSecretManagerSecret_withoutRotationPeriod(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + }, + }) +} + func testAccSecretManagerSecret_basic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_secret_manager_secret" "secret-basic" { @@ -833,3 +886,117 @@ resource "google_secret_manager_secret" "secret-basic" { } `, context) } + +func testAccSecretManagerSecret_withoutRotationPeriod(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_pubsub_topic_iam_member" "secrets_manager_access" { + topic = google_pubsub_topic.topic.name + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" + role = "roles/pubsub.publisher" +} + +resource "google_pubsub_topic" "topic" { + name = "tf-test-topic-%{random_suffix}" +} + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } + + depends_on = [ + google_pubsub_topic_iam_member.secrets_manager_access, + ] +} +`, context) +} + +func testAccSecretManagerSecret_rotationPeriodBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_pubsub_topic_iam_member" "secrets_manager_access" { + topic = google_pubsub_topic.topic.name + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" + role = "roles/pubsub.publisher" +} + +resource "google_pubsub_topic" "topic" { + name = "tf-test-topic-%{random_suffix}" +} + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + topics { + name = google_pubsub_topic.topic.id + } + + rotation { + rotation_period = "3600s" + next_rotation_time = "%{timestamp}" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } + + depends_on = [ + google_pubsub_topic_iam_member.secrets_manager_access, + ] +} +`, context) +} + +func testAccSecretManagerSecret_rotationPeriodUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_pubsub_topic_iam_member" "secrets_manager_access" { + topic = google_pubsub_topic.topic.name + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" + role = "roles/pubsub.publisher" +} + +resource "google_pubsub_topic" "topic" { + name = "tf-test-topic-%{random_suffix}" +} + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + topics { + name = google_pubsub_topic.topic.id + } + + rotation { + rotation_period = "3700s" + next_rotation_time = "%{timestamp}" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } + + depends_on = [ + google_pubsub_topic_iam_member.secrets_manager_access, + ] +} +`, context) +} From 91f64d05fa85807f0801c1c512d4ea889da28e70 Mon Sep 17 00:00:00 2001 From: efe Date: Fri, 22 Sep 2023 13:52:10 -0500 Subject: [PATCH 30/36] Update version_5_upgrade.html.markdown (#9052) --- .../website/docs/guides/version_5_upgrade.html.markdown | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown index e538e65eddaa..0611e6edd1dc 100644 --- a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown @@ -585,3 +585,9 @@ Use the `google_identity_platform_config` resource instead. It contains a more c ### `reconcile_connections` now defaults from API `reconcile_connections` previously defaults to true. Now it will default from the API. + +## Resource: `google_looker_instance` + +### `LOOKER_MODELER` has been removed as a platform edition. + +Looker Modeler edition is deprecated as a platform edition. From 442e837b2822bc731ddb76acad2a6b18fb120e10 Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Fri, 22 Sep 2023 12:53:03 -0700 Subject: [PATCH 31/36] Upgrade guide: Rework taint model in GKE (#9010) --- .../guides/version_5_upgrade.html.markdown | 88 ++++++++++++++++++- 1 file changed, 87 insertions(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown index 0611e6edd1dc..f6ee289852e6 100644 --- a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown @@ -326,6 +326,50 @@ deleted. This behavior was changed to allow users to collect internal logs from the cluster and/or manually resolve the issues and untaint their failed clusters. +### `node_config.0.taint` and `node_pool.0.node_config.0.taint` field change + +The `taint` field has been changed to manage a subset of the taint keys on a node pool +and the `effective_taints` output field has been added to record the complete set of +taints applied to the node pool by GKE. + +Previously, the field was authoritative and would require every taint on the node pool +to be recorded, causing friction when users used GPUs or configured sandbox settings, +actions which added taints. After this change, only "Terraform-managed" taints will be +managed by the `taint` field. Other taints, including new taints injected by the +server, will not have drift detected. + +Currently, the set of managed taints and their values are immutable in Terraform, and +any changes will cause a recreate to be planned. However, taints can be unmanaged by +simultaneously removing the taint entry from GKE and your Terraform configuration at +the same time. + +The set of taints Terraform manages (and their values) will be determined based on +how the cluster or node pool resource was added to your Terraform state file: + +* If you created the cluster or node pool with Terraform with Google provider 5.0.0 +or later, the set of taints specified during resource creation will be managed. +* If you imported the cluster or node pool with Google provider 5.0.0 or later, no +taints will be managed by Terraform +* If you upgraded from an earlier version, the complete set of taint values applied to the +node pool at the time of your last refresh will be managed by Terraform + +Most existing configurations will not be affected with this change as they already specify +the whole set of managed taints, or are already ignoring changes with `lifecycle.ignore_changes`, +preventing a diff. + +A limited number of users may see a diff if they are using the `google-beta` provider +and have specified a `sandbox_config` value. If that's the case, you can safely add the +proposed value to configuration (below) or apply `lifecycle.ignore_changes` to the field to resolve. + + +```diff ++ taint { ++ key = "sandbox.gke.io/runtime" ++ value = "gvisor" ++ effect = "NO_SCHEDULE" ++ } +``` + ### `enable_binary_authorization` is now removed `enable_binary_authorization` has been removed in favor of `binary_authorization.enabled`. @@ -335,7 +379,6 @@ cluster and/or manually resolve the issues and untaint their failed clusters. Previously `network_policy.provider` defaulted to "PROVIDER_UNSPECIFIED". It no longer has a default value. - ## Resource: `google_container_node_pool` ### `logging_variant` no longer has a provider default value @@ -346,6 +389,49 @@ Previously `logging_variant` defaulted to "DEFAULT". It no longer has a default Previously both fields defaulted to false. They now default to true. +### `node_config.0.taint` field change + +The `taint` field has been changed to manage a subset of the taint keys on a node pool +and the `effective_taints` output field has been added to record the complete set of +taints applied to the node pool by GKE. + +Previously, the field was authoritative and would require every taint on the node pool +to be recorded, causing friction when users used GPUs or configured sandbox settings, +actions which added taints. After this change, only "Terraform-managed" taints will be +managed by the `taint` field. Other taints, including new taints injected by the +server, will not have drift detected. + +Currently, the set of managed taints and their values are immutable in Terraform, and +any changes will cause a recreate to be planned. However, taints can be unmanaged by +simultaneously removing the taint entry from GKE and your Terraform configuration at +the same time. + +The set of taints Terraform manages (and their values) will be determined based on +how the cluster or node pool resource was added to your Terraform state file: + +* If you created the cluster or node pool with Terraform with Google provider 5.0.0 +or later, the set of taints specified during resource creation will be managed. +* If you imported the cluster or node pool with Google provider 5.0.0 or later, no +taints will be managed by Terraform +* If you upgraded from an earlier version, the complete set of taint values applied to the +node pool at the time of your last refresh will be managed by Terraform + +Most existing configurations will not be affected with this change as they already specify +the whole set of managed taints, or are already ignoring changes with `lifecycle.ignore_changes`, +preventing a diff. + +A limited number of users may see a diff if they are using the `google-beta` provider +and have specified a `sandbox_config` value. If that's the case, you can safely add the +proposed value to configuration (below) or apply `lifecycle.ignore_changes` to the field to resolve. + + +```diff ++ taint { ++ key = "sandbox.gke.io/runtime" ++ value = "gvisor" ++ effect = "NO_SCHEDULE" ++ } +``` ## Resource: `google_dataplex_datascan` From cd61cc28222cf033e44362d82e8129fe4b66b1e7 Mon Sep 17 00:00:00 2001 From: ron-gal <125445217+ron-gal@users.noreply.github.com> Date: Fri, 22 Sep 2023 15:57:10 -0400 Subject: [PATCH 32/36] Avoid recreate instance when cluster not ready (#9023) --- .../bigtable/resource_bigtable_instance.go | 27 +- ...esource_bigtable_instance_internal_test.go | 243 +++++++++++++++++- .../docs/r/bigtable_instance.html.markdown | 1 + 3 files changed, 263 insertions(+), 8 deletions(-) diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go index 2cc076b7bd6e..d428c709a8a7 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go @@ -127,6 +127,11 @@ func ResourceBigtableInstance() *schema.Resource { }, }, }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of the cluster`, + }, }, }, }, @@ -418,6 +423,7 @@ func flattenBigtableCluster(c *bigtable.ClusterInfo) map[string]interface{} { "cluster_id": c.Name, "storage_type": storageType, "kms_key_name": c.KMSKeyName, + "state": c.State, } if c.AutoscalingConfig != nil { cluster["autoscaling_config"] = make([]map[string]interface{}, 1) @@ -563,7 +569,14 @@ func resourceBigtableInstanceUniqueClusterID(_ context.Context, diff *schema.Res // This doesn't use the standard unordered list utility (https://github.com/GoogleCloudPlatform/magic-modules/blob/main/templates/terraform/unordered_list_customize_diff.erb) // because some fields can't be modified using the API and we recreate the instance // when they're changed. -func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { +func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *schema.ResourceDiff, _ interface{}) error { + // separate func to allow unit testing + return resourceBigtableInstanceClusterReorderTypeListFunc(diff, func(orderedClusters []interface{}) error { + return diff.SetNew("cluster", orderedClusters) + }) + +} +func resourceBigtableInstanceClusterReorderTypeListFunc(diff tpgresource.TerraformResourceDiff, setNew func([]interface{}) error) error { oldCount, newCount := diff.GetChange("cluster.#") // Simulate Required:true, MinItems:1 for "cluster". This doesn't work @@ -592,7 +605,9 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch for i := 0; i < newCount.(int); i++ { _, newId := diff.GetChange(fmt.Sprintf("cluster.%d.cluster_id", i)) _, c := diff.GetChange(fmt.Sprintf("cluster.%d", i)) - clusters[newId.(string)] = c + typedCluster := c.(map[string]interface{}) + typedCluster["state"] = "READY" + clusters[newId.(string)] = typedCluster } // create a list of clusters using the old order when possible to minimise @@ -628,9 +643,8 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch } } - err := diff.SetNew("cluster", orderedClusters) - if err != nil { - return fmt.Errorf("Error setting cluster diff: %s", err) + if err := setNew(orderedClusters); err != nil { + return err } // Clusters can't have their zone, storage_type or kms_key_name updated, @@ -656,8 +670,9 @@ func resourceBigtableInstanceClusterReorderTypeList(_ context.Context, diff *sch } } + currentState, _ := diff.GetChange(fmt.Sprintf("cluster.%d.state", i)) oST, nST := diff.GetChange(fmt.Sprintf("cluster.%d.storage_type", i)) - if oST != nST { + if oST != nST && currentState.(string) != "CREATING" { err := diff.ForceNew(fmt.Sprintf("cluster.%d.storage_type", i)) if err != nil { return fmt.Errorf("Error setting cluster diff: %s", err) diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_internal_test.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_internal_test.go index 6fcc7ab1f6c1..acc71d2397bb 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_internal_test.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_internal_test.go @@ -8,9 +8,10 @@ import ( "cloud.google.com/go/bigtable" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" ) -func TestGetUnavailableClusterZones(t *testing.T) { +func TestUnitBigtable_getUnavailableClusterZones(t *testing.T) { cases := map[string]struct { clusterZones []string unavailableZones []string @@ -54,7 +55,7 @@ func TestGetUnavailableClusterZones(t *testing.T) { } } -func TestGetInstanceFromResponse(t *testing.T) { +func TestUnitBigtable_getInstanceFromResponse(t *testing.T) { instanceName := "test-instance" originalId := "original_value" cases := map[string]struct { @@ -130,3 +131,241 @@ func TestGetInstanceFromResponse(t *testing.T) { } } } + +func TestUnitBigtable_flattenBigtableCluster(t *testing.T) { + cases := map[string]struct { + clusterInfo *bigtable.ClusterInfo + want map[string]interface{} + }{ + "SSD auto scaling": { + clusterInfo: &bigtable.ClusterInfo{ + StorageType: bigtable.SSD, + Zone: "zone1", + ServeNodes: 5, + Name: "ssd-cluster", + KMSKeyName: "KMS", + State: "CREATING", + AutoscalingConfig: &bigtable.AutoscalingConfig{ + MinNodes: 3, + MaxNodes: 7, + CPUTargetPercent: 50, + StorageUtilizationPerNode: 60, + }, + }, + want: map[string]interface{}{ + "zone": "zone1", + "num_nodes": 5, + "cluster_id": "ssd-cluster", + "storage_type": "SSD", + "kms_key_name": "KMS", + "state": "CREATING", + "autoscaling_config": []map[string]interface{}{ + map[string]interface{}{ + "min_nodes": 3, + "max_nodes": 7, + "cpu_target": 50, + "storage_target": 60, + }, + }, + }, + }, + "HDD manual scaling": { + clusterInfo: &bigtable.ClusterInfo{ + StorageType: bigtable.HDD, + Zone: "zone2", + ServeNodes: 7, + Name: "hdd-cluster", + KMSKeyName: "KMS", + State: "READY", + }, + want: map[string]interface{}{ + "zone": "zone2", + "num_nodes": 7, + "cluster_id": "hdd-cluster", + "storage_type": "HDD", + "kms_key_name": "KMS", + "state": "READY", + }, + }, + } + + for tn, tc := range cases { + if got := flattenBigtableCluster(tc.clusterInfo); !reflect.DeepEqual(got, tc.want) { + t.Errorf("bad: %s, got %q, want %q", tn, got, tc.want) + } + } +} + +func TestUnitBigtable_resourceBigtableInstanceClusterReorderTypeListFunc_error(t *testing.T) { + d := &tpgresource.ResourceDiffMock{ + After: map[string]interface{}{ + "cluster.#": 0, + }, + } + if err := resourceBigtableInstanceClusterReorderTypeListFunc(d, nil); err == nil { + t.Errorf("expected error, got success") + } +} + +func TestUnitBigtable_resourceBigtableInstanceClusterReorderTypeListFunc(t *testing.T) { + cases := map[string]struct { + before map[string]interface{} + after map[string]interface{} + wantClusterOrder []string + wantForceNew bool + }{ + "create": { + before: map[string]interface{}{ + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + }, + }, + wantClusterOrder: []string{}, + wantForceNew: false, + }, + "no force new change": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 4, + "cluster.0.cluster_id": "some-id-a", + "cluster.1.cluster_id": "some-id-b", + "cluster.2.cluster_id": "some-id-c", + "cluster.3.cluster_id": "some-id-e", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 3, + "cluster.0.cluster_id": "some-id-c", + "cluster.1.cluster_id": "some-id-a", + "cluster.2.cluster_id": "some-id-d", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-c", + }, + "cluster.1": map[string]interface{}{ + "cluster_id": "some-id-a", + }, + "cluster.2": map[string]interface{}{ + "cluster_id": "some-id-d", + }, + }, + wantClusterOrder: []string{"some-id-a", "some-id-d", "some-id-c"}, + wantForceNew: false, + }, + "force new - zone change": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.zone": "zone-a", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.zone": "zone-b", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + "zone": "zone-b", + }, + }, + wantClusterOrder: []string{"some-id-a"}, + wantForceNew: true, + }, + "force new - kms_key_name change": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.kms_key_name": "key-a", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.kms_key_name": "key-b", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + "kms_key_name": "key-b", + }, + }, + wantClusterOrder: []string{"some-id-a"}, + wantForceNew: true, + }, + "force new - storage_type change": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.storage_type": "HDD", + "cluster.0.state": "READY", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.storage_type": "SSD", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + "storage_type": "SSD", + }, + }, + wantClusterOrder: []string{"some-id-a"}, + wantForceNew: true, + }, + "skip force new - storage_type change for CREATING cluster": { + before: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.storage_type": "SSD", + "cluster.0.state": "CREATING", + }, + after: map[string]interface{}{ + "name": "some-name", + "cluster.#": 1, + "cluster.0.cluster_id": "some-id-a", + "cluster.0.storage_type": "HDD", + "cluster.0": map[string]interface{}{ + "cluster_id": "some-id-a", + "storage_type": "HDD", + }, + }, + wantClusterOrder: []string{"some-id-a"}, + wantForceNew: false, + }, + } + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + d := &tpgresource.ResourceDiffMock{ + Before: tc.before, + After: tc.after, + } + var clusters []interface{} + err := resourceBigtableInstanceClusterReorderTypeListFunc(d, func(gotClusters []interface{}) error { + clusters = gotClusters + return nil + }) + if err != nil { + t.Fatalf("bad: %s, error: %v", tn, err) + } + if d.IsForceNew != tc.wantForceNew { + t.Errorf("bad: %s, got %v, want %v", tn, d.IsForceNew, tc.wantForceNew) + } + gotClusterOrder := []string{} + for _, cluster := range clusters { + clusterResource := cluster.(map[string]interface{}) + gotClusterOrder = append(gotClusterOrder, clusterResource["cluster_id"].(string)) + } + if !reflect.DeepEqual(gotClusterOrder, tc.wantClusterOrder) { + t.Errorf("bad: %s, got %q, want %q", tn, gotClusterOrder, tc.wantClusterOrder) + } + }) + } +} diff --git a/mmv1/third_party/terraform/website/docs/r/bigtable_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/bigtable_instance.html.markdown index 1dbe10bfed87..a6f1d14cb34f 100644 --- a/mmv1/third_party/terraform/website/docs/r/bigtable_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/bigtable_instance.html.markdown @@ -142,6 +142,7 @@ If no value is set, Cloud Bigtable automatically allocates nodes based on your d In addition to the arguments listed above, the following computed attributes are exported: * `id` - an identifier for the resource with format `projects/{{project}}/instances/{{name}}` +* `cluster.0.state` - describes the current state of the cluster. ## Timeouts From e29884cc24e622233fad8dc8fd6953387aaa7ca8 Mon Sep 17 00:00:00 2001 From: Siddhartha Bagaria Date: Fri, 22 Sep 2023 13:48:47 -0700 Subject: [PATCH 33/36] Add accelerators to workstation update masks (#8979) * Add accelerators to workstation update masks This was missed in #8490, and so in-place updates to accelerators are silently dropped. Verified the issue by updating my workstation config with accelerators but the actual config did not change. On inspecting the debug logs, I noticed the update mask was wrong. Near the end of the log, the following warning message was present. ``` 2023-09-16T04:38:23.768Z [WARN] Provider "provider[\"registry.terraform.io/hashicorp/google-beta\"]" produced an unexpected new value for google_workstations_workstation_config.sidb, but we are tolerating it because it is using the legacy plugin SDK. The following problems may be the cause of any confusing errors from downstream operations: - .etag: was cty.StringVal("...."), but now cty.StringVal("...") - .host[0].gce_instance[0].accelerators: block count changed from 1 to 0 ``` * Fix test failure --- mmv1/products/workstations/WorkstationConfig.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/mmv1/products/workstations/WorkstationConfig.yaml b/mmv1/products/workstations/WorkstationConfig.yaml index 4a64f9f44852..2bf15ad17af4 100644 --- a/mmv1/products/workstations/WorkstationConfig.yaml +++ b/mmv1/products/workstations/WorkstationConfig.yaml @@ -198,6 +198,7 @@ properties: - 'host.gceInstance.shieldedInstanceConfig.enableVtpm' - 'host.gceInstance.shieldedInstanceConfig.enableIntegrityMonitoring' - 'host.gceInstance.confidentialInstanceConfig.enableConfidentialCompute' + - 'host.gceInstance.accelerators' properties: - !ruby/object:Api::Type::NestedObject name: 'gceInstance' From 2aaa5ac47521cc49fba2f19d2596512da0fb0e46 Mon Sep 17 00:00:00 2001 From: joelkattapuram <46967875+joelkattapuram@users.noreply.github.com> Date: Fri, 22 Sep 2023 13:49:49 -0700 Subject: [PATCH 34/36] Add support for replica zones, service account scopes, and auditd logging in workstations configs (#9028) --- .../workstations/WorkstationConfig.yaml | 20 +++++ .../examples/workstation_config_basic.tf.erb | 2 + .../workstation_config_service_account.tf.erb | 55 ++++++++++++ ...orkstations_workstation_config_test.go.erb | 85 ++++++++++++++++++- 4 files changed, 161 insertions(+), 1 deletion(-) create mode 100644 mmv1/templates/terraform/examples/workstation_config_service_account.tf.erb diff --git a/mmv1/products/workstations/WorkstationConfig.yaml b/mmv1/products/workstations/WorkstationConfig.yaml index 2bf15ad17af4..d41c39ffd811 100644 --- a/mmv1/products/workstations/WorkstationConfig.yaml +++ b/mmv1/products/workstations/WorkstationConfig.yaml @@ -183,6 +183,19 @@ properties: description: | How long to wait before automatically stopping a workstation after it was started. A value of 0 indicates that workstations using this configuration should never time out from running duration. Must be greater than 0 and less than 24 hours if `encryption_key` is set. Defaults to 12 hours. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + - !ruby/object:Api::Type::Array + name: 'replicaZones' + item_type: Api::Type::String + immutable: true + description: | + Specifies the zones used to replicate the VM and disk resources within the region. If set, exactly two zones within the workstation cluster's region must be specified—for example, `['us-central1-a', 'us-central1-f']`. + If this field is empty, two default zones within the region are used. Immutable after the workstation configuration is created. + default_from_api: true + - !ruby/object:Api::Type::Boolean + name: 'enableAuditAgent' + description: | + Whether to enable Linux `auditd` logging on the workstation. When enabled, a service account must also be specified that has `logging.buckets.write` permission on the project. Operating system audit logging is distinct from Cloud Audit Logs. + ignore_read: true - !ruby/object:Api::Type::NestedObject name: 'host' description: | @@ -192,6 +205,7 @@ properties: - 'host.gceInstance.machineType' - 'host.gceInstance.poolSize' - 'host.gceInstance.tags' + - 'host.gceInstance.serviceAccountScopes' - 'host.gceInstance.disablePublicIpAddresses' - 'host.gceInstance.enableNestedVirtualization' - 'host.gceInstance.shieldedInstanceConfig.enableSecureBoot' @@ -217,6 +231,12 @@ properties: description: |- Email address of the service account that will be used on VM instances used to support this config. This service account must have permission to pull the specified container image. If not set, VMs will run without a service account, in which case the image must be publicly accessible. default_from_api: true + - !ruby/object:Api::Type::Array + name: 'serviceAccountScopes' + item_type: Api::Type::String + description: |- + Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. + default_from_api: true - !ruby/object:Api::Type::Integer name: 'poolSize' description: |- diff --git a/mmv1/templates/terraform/examples/workstation_config_basic.tf.erb b/mmv1/templates/terraform/examples/workstation_config_basic.tf.erb index 314df7dd12f2..2e154577d02f 100644 --- a/mmv1/templates/terraform/examples/workstation_config_basic.tf.erb +++ b/mmv1/templates/terraform/examples/workstation_config_basic.tf.erb @@ -37,6 +37,8 @@ resource "google_workstations_workstation_config" "<%= ctx[:primary_resource_id] idle_timeout = "600s" running_timeout = "21600s" + replica_zones = ["us-central1-a", "us-central1-b"] + host { gce_instance { machine_type = "e2-standard-4" diff --git a/mmv1/templates/terraform/examples/workstation_config_service_account.tf.erb b/mmv1/templates/terraform/examples/workstation_config_service_account.tf.erb new file mode 100644 index 000000000000..18e1b07cbd34 --- /dev/null +++ b/mmv1/templates/terraform/examples/workstation_config_service_account.tf.erb @@ -0,0 +1,55 @@ +resource "google_compute_network" "default" { + provider = google-beta + name = "<%= ctx[:vars]['workstation_cluster_name'] %>" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "<%= ctx[:vars]['workstation_cluster_name'] %>" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + workstation_cluster_id = "<%= ctx[:vars]['workstation_cluster_name'] %>" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + + labels = { + "label" = "key" + } + + annotations = { + label-one = "value-one" + } +} + +resource "google_service_account" "default" { + provider = google-beta + + account_id = "<%= ctx[:vars]['account_id'] %>" + display_name = "Service Account" +} + +resource "google_workstations_workstation_config" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + workstation_config_id = "<%= ctx[:vars]['workstation_config_name'] %>" + workstation_cluster_id = google_workstations_workstation_cluster.<%= ctx[:primary_resource_id] %>.workstation_cluster_id + location = "us-central1" + + enable_audit_agent = true + + host { + gce_instance { + machine_type = "e2-standard-4" + boot_disk_size_gb = 35 + disable_public_ip_addresses = true + service_account = google_service_account.default.email + service_account_scopes = ["https://www.googleapis.com/auth/cloud-platform"] + } + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb b/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb index ebdc135e6095..f1272a8f7560 100644 --- a/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb +++ b/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb @@ -223,6 +223,81 @@ resource "google_workstations_workstation_config" "default" { `, context) } + +func TestAccWorkstationsWorkstationConfig_serviceAccount(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_serviceAccount(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_serviceAccount(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name + } + + resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + } + + resource "google_service_account" "default" { + provider = google-beta + + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Service Account" + } + + resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + enable_audit_agent = true + + host { + gce_instance { + service_account = google_service_account.default.email + service_account_scopes = ["https://www.googleapis.com/auth/cloud-platform"] + } + } + } +`, context) +} + func TestAccWorkstationsWorkstationConfig_update(t *testing.T) { t.Parallel() @@ -387,6 +462,13 @@ resource "google_workstations_workstation_cluster" "default" { location = "us-central1" } +resource "google_service_account" "default" { + provider = google-beta + + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Service Account" +} + resource "google_workstations_workstation_config" "default" { provider = google-beta workstation_config_id = "tf-test-workstation-config%{random_suffix}" @@ -399,6 +481,7 @@ resource "google_workstations_workstation_config" "default" { boot_disk_size_gb = 35 pool_size = 0 + service_account = google_service_account.default.email disable_public_ip_addresses = false shielded_instance_config { @@ -780,4 +863,4 @@ resource "google_workstations_workstation_config" "default" { } `, context) } -<% end -%> +<% end -%> \ No newline at end of file From d1dc8393ee573ba35279c82ba0ea16a2f5819800 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Fri, 22 Sep 2023 15:58:26 -0500 Subject: [PATCH 35/36] Remove upgrade guide entry for google_identity_platform_project_default_config (#9057) --- .../website/docs/guides/version_5_upgrade.html.markdown | 6 ------ 1 file changed, 6 deletions(-) diff --git a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown index f6ee289852e6..f32b4d3dc595 100644 --- a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown @@ -660,12 +660,6 @@ resource "google_secret_manager_secret" "my-secret" { } ``` -## Resource: `google_identity_platform_project_default_config` - -### `google_identity_platform_project_default_config` has been removed from the provider - -Use the `google_identity_platform_config` resource instead. It contains a more comprehensive list of fields, and was created before `google_identity_platform_project_default_config` was added. - ## Resource: `google_compute_service_attachment` ### `reconcile_connections` now defaults from API From aef0da09e868754d947f4c4c932af5eae7ef8e5c Mon Sep 17 00:00:00 2001 From: Ishant Bhaskar <68506858+ibhaskar2@users.noreply.github.com> Date: Mon, 25 Sep 2023 20:55:52 +0530 Subject: [PATCH 36/36] Add preferred_zone in clone context of sql database instance resource (#8990) --- .../services/sql/resource_sql_database_instance.go.erb | 6 ++++++ .../services/sql/resource_sql_database_instance_test.go | 1 + .../website/docs/r/sql_database_instance.html.markdown | 2 ++ 3 files changed, 9 insertions(+) diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb index 28004b4c275d..254161f9e9ee 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb +++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb @@ -946,6 +946,11 @@ is set to true. Defaults to ZONAL.`, DiffSuppressFunc: tpgresource.TimestampDiffSuppress(time.RFC3339Nano), Description: `The timestamp of the point in time that should be restored.`, }, + "preferred_zone": { + Type: schema.TypeString, + Optional: true, + Description: `(Point-in-time recovery for PostgreSQL only) Clone to an instance in the specified zone. If no zone is specified, clone to the same zone as the source instance.`, + }, "database_names": { Type: schema.TypeList, Optional: true, @@ -1317,6 +1322,7 @@ func expandCloneContext(configured []interface{}) (*sqladmin.CloneContext, strin return &sqladmin.CloneContext{ PointInTime: _cloneConfiguration["point_in_time"].(string), + PreferredZone: _cloneConfiguration["preferred_zone"].(string), DatabaseNames: databaseNames, AllocatedIpRange: _cloneConfiguration["allocated_ip_range"].(string), }, _cloneConfiguration["source_instance_name"].(string) diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go index 326f858fcb2a..f20ece069f2f 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go +++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go @@ -3727,6 +3727,7 @@ resource "google_sql_database_instance" "instance" { clone { source_instance_name = data.google_sql_backup_run.backup.instance point_in_time = data.google_sql_backup_run.backup.start_time + preferred_zone = "us-central1-b" } deletion_protection = false diff --git a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown index 7cf741ed9260..8e58b8741b03 100644 --- a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown @@ -486,6 +486,8 @@ The optional `clone` block supports: A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". +* `preferred_zone` - (Optional) (Point-in-time recovery for PostgreSQL only) Clone to an instance in the specified zone. If no zone is specified, clone to the same zone as the source instance. [clone-unavailable-instance](https://cloud.google.com/sql/docs/postgres/clone-instance#clone-unavailable-instance) + * `database_names` - (Optional) (SQL Server only, use with `point_in_time`) Clone only the specified databases from the source instance. Clone all databases if empty. * `allocated_ip_range` - (Optional) The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the cloned instance ip will be created in the allocated range. The range name must comply with [RFC 1035](https://tools.ietf.org/html/rfc1035). Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])?.