diff --git a/.ci/containers/go-plus/Dockerfile b/.ci/containers/go-plus/Dockerfile index 6a6d48d9038f..c7b8641eb297 100644 --- a/.ci/containers/go-plus/Dockerfile +++ b/.ci/containers/go-plus/Dockerfile @@ -1,5 +1,25 @@ -from golang:1.19-bullseye as resource +# Stage 1: Download go module cache for builds +FROM golang:1.19-bullseye AS builder +ENV GOCACHE=/go/cache + +RUN apt-get update && apt-get install -y unzip +WORKDIR /app1 +# Add the source code and build +ADD "https://github.com/GoogleCloudPlatform/magic-modules/archive/refs/heads/main.zip" source.zip +RUN unzip source.zip && rm source.zip +WORKDIR /app1/magic-modules-main/.ci/magician +# Build the binary (we won't use it in the final image, but it's cached) +RUN go build -o /dev/null . + +# Stage 2: Creating the final image +FROM golang:1.19-bullseye SHELL ["/bin/bash", "-c"] +ENV GOCACHE=/go/cache + +# Copy Go dependencies and Go build cache +COPY --from=builder /go/pkg/mod /go/pkg/mod +COPY --from=builder /go/cache /go/cache + # Set up Github SSH cloning. RUN ssh-keyscan github.com >> /known_hosts RUN echo "UserKnownHostsFile /known_hosts" >> /etc/ssh/ssh_config diff --git a/.ci/magician/github/membership.go b/.ci/magician/github/membership.go index 80b96c3873b5..98af81827746 100644 --- a/.ci/magician/github/membership.go +++ b/.ci/magician/github/membership.go @@ -27,10 +27,14 @@ var ( } // This is for new team members who are onboarding - trustedContributors = []string{} + trustedContributors = []string{ + "BBBmau", + } // This is for reviewers who are "on vacation": will not receive new review assignments but will still receive re-requests for assigned PRs. - onVacationReviewers = []string{} + onVacationReviewers = []string{ + "hao-nan-li", + } ) type UserType int64 diff --git a/.ci/scripts/go-plus/magician/exec.sh b/.ci/scripts/go-plus/magician/exec.sh index 11f636d23791..acd9714f09bd 100755 --- a/.ci/scripts/go-plus/magician/exec.sh +++ b/.ci/scripts/go-plus/magician/exec.sh @@ -1,20 +1,28 @@ #!/bin/bash -# Check if there's at least one argument -if [ "$#" -eq 0 ]; then - echo "No arguments provided" - exit 1 -fi - # Get the directory of the current script DIR="$(dirname $(realpath $0))" -# Construct the path to the Go program -GO_PROGRAM="$DIR/../../../magician/" +# Construct the path to the Go program directory and binary +GO_PROGRAM_DIR="$DIR/../../../magician/" +GO_BINARY="$GO_PROGRAM_DIR/magician_binary" -pushd $GO_PROGRAM +pushd $GO_PROGRAM_DIR set -x -# Pass all arguments to the child command -go run . "$@" +# Check if the binary exists +if [ ! -f "$GO_BINARY" ]; then + # If it doesn't exist, compile the binary + echo "Building the magician binary at $GO_BINARY" + go build -o "$GO_BINARY" +fi + +# If there are no arguments only compile the binary +if [ "$#" -eq 0 ]; then + echo "No arguments provided" + exit 0 +fi + +# Run the binary and pass all arguments +$GO_BINARY "$@" set +x diff --git a/.github/workflows/test-tgc.yml b/.github/workflows/test-tgc.yml index a22bc69203dc..7d33f3cbc60c 100644 --- a/.github/workflows/test-tgc.yml +++ b/.github/workflows/test-tgc.yml @@ -25,8 +25,6 @@ on: sha: description: "The commit SHA in magic-modules repository where the status result will be posted" required: true - caller_id: - description: "Identity of the workflow dispatch caller" concurrency: group: test-tgc-${{ github.event.inputs.owner }}-${{ github.event.inputs.repo }}-${{ github.event.inputs.branch }} @@ -42,12 +40,19 @@ jobs: with: repository: ${{ github.event.inputs.owner }}/${{ github.event.inputs.repo }} ref: ${{ github.event.inputs.branch }} - path: tgc fetch-depth: 2 + - name: Cache Go modules and build cache + uses: actions/cache@v3 + with: + path: | + ~/go/pkg/mod + key: ${{ runner.os }}-test-${{ github.event.inputs.repo }}-${{ hashFiles('go.sum') }} + restore-keys: | + ${{ runner.os }}-test-${{ github.event.inputs.repo }}-${{ hashFiles('go.sum') }} + ${{ runner.os }}-test-${{ github.event.inputs.repo }}- - name: Check for Code Changes id: pull_request run: | - cd tgc gofiles=$(git diff --name-only HEAD~1 | { grep -e "\.go$" -e "go.mod$" -e "go.sum$" || test $? = 1; }) if [ -z "$gofiles" ]; then echo "has_changes=false" >> $GITHUB_OUTPUT @@ -80,14 +85,12 @@ jobs: - name: Build Terraform Google Conversion if: ${{ !failure() && steps.pull_request.outputs.has_changes == 'true' }} run: | - cd tgc go mod edit -replace github.com/hashicorp/terraform-provider-google-beta=github.com/${{ github.event.inputs.owner }}/terraform-provider-google-beta@${{ github.event.inputs.branch }} go mod tidy make build - name: Run Unit Tests if: ${{ !failure() && steps.pull_request.outputs.has_changes == 'true' }} run: | - cd tgc make test - name: Post Result Status to Pull Request if: ${{ !cancelled() }} diff --git a/.github/workflows/test-tpg.yml b/.github/workflows/test-tpg.yml index 6d7e544aecd5..3b0c3f2fa822 100644 --- a/.github/workflows/test-tpg.yml +++ b/.github/workflows/test-tpg.yml @@ -25,8 +25,6 @@ on: sha: description: "The commit SHA in magic-modules repository where the status result will be posted" required: true - caller_id: - description: "Identity of the workflow dispatch caller" concurrency: group: test-tpg-${{ github.event.inputs.owner }}-${{ github.event.inputs.repo }}-${{ github.event.inputs.branch }} @@ -42,12 +40,20 @@ jobs: with: repository: ${{ github.event.inputs.owner }}/${{ github.event.inputs.repo }} ref: ${{ github.event.inputs.branch }} - path: provider fetch-depth: 2 + - name: Cache Go modules and build cache + uses: actions/cache@v3 + with: + path: | + ~/go/pkg/mod + ~/.cache/go-build + key: ${{ runner.os }}-test-${{ github.event.inputs.repo }}-${{hashFiles('go.sum','google-beta/transport/**','google-beta/tpgresource/**','google-beta/acctest/**','google-beta/envvar/**','google-beta/sweeper/**','google-beta/verify/**') }} + restore-keys: | + ${{ runner.os }}-test-${{ github.event.inputs.repo }}-${{ hashFiles('go.sum') }} + ${{ runner.os }}-test-${{ github.event.inputs.repo }}- - name: Check for Code Changes id: pull_request run: | - cd provider gofiles=$(git diff --name-only HEAD~1 | { grep -e "\.go$" -e "go.mod$" -e "go.sum$" || test $? = 1; }) if [ -z "$gofiles" ]; then echo "has_changes=false" >> $GITHUB_OUTPUT @@ -80,22 +86,18 @@ jobs: - name: Build Provider if: ${{ !failure() && steps.pull_request.outputs.has_changes == 'true' }} run: | - cd provider go build - name: Run Unit Tests if: ${{ !failure() && steps.pull_request.outputs.has_changes == 'true' }} run: | - cd provider - make testnolint + make testnolint TESTARGS="-p 4" - name: Lint Check if: ${{ !cancelled() && steps.pull_request.outputs.has_changes == 'true' }} run: | - cd provider make lint - name: Documentation Check if: ${{ !cancelled() && steps.pull_request.outputs.has_changes == 'true' }} run: | - cd provider make docscheck - name: Post Result Status to Pull Request if: ${{ !cancelled() }} diff --git a/docs/content/contribute/create-pr.md b/docs/content/contribute/create-pr.md index 14fb2644ef5f..5530537fa4e5 100644 --- a/docs/content/contribute/create-pr.md +++ b/docs/content/contribute/create-pr.md @@ -18,14 +18,16 @@ weight: 10 1. A reviewer will automatically be assigned to your PR. 1. Creating a new pull request or pushing a new commit automatically triggers our CI pipelines and workflows. After CI starts, downstream diff generation takes about 10 minutes; VCR tests can take up to 2 hours. If you are a community contributor, some tests will only run after approval from a reviewer. + - While convenient, relying on CI to test iterative changes to PRs often adds extreme latency to reviews if there are errors in test configurations or at runtime. We **strongly** recommend you [test your changes locally before pushing]({{< ref "/develop/run-tests" >}}) even after the initial change. 1. If your assigned reviewer does not respond to changes on a pull request within two US business days, ping them on the pull request. {{< hint info >}} **TIP:** Speeding up review: +1. [Test your changes locally before pushing]({{< ref "/develop/run-tests" >}}) to iterate faster. + - You can push them and test in parallel as well. New CI runs will preempt old ones where possible. 1. Resolve failed [status checks](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/collaborating-on-repositories-with-code-quality-features/about-status-checks) quickly - - [Run provider tests locally]({{< ref "/develop/run-tests" >}}) to iterate faster - - Ask your reviewer for help if you get stuck. -1. [Self-review your PR]({{< ref "/contribute/review-pr" >}}) or ask someone you know to review + - Directly ask your reviewer for help if you don't know how to proceed. If there are failed checks they may only check in if there's no progress after a couple days. +1. [Self-review your PR]({{< ref "/contribute/review-pr" >}}) or ask someone else familiar with Terraform to review {{< /hint >}} diff --git a/mmv1/products/alloydb/Cluster.yaml b/mmv1/products/alloydb/Cluster.yaml index 618ac5f3dcc9..63c7a537893f 100644 --- a/mmv1/products/alloydb/Cluster.yaml +++ b/mmv1/products/alloydb/Cluster.yaml @@ -99,6 +99,8 @@ examples: skip_docs: true custom_code: !ruby/object:Provider::Terraform::CustomCode pre_create: templates/terraform/pre_create/alloydb_cluster.go.erb + pre_update: templates/terraform/pre_update/alloydb_cluster.go.erb + pre_delete: templates/terraform/pre_delete/alloydb_cluster.go.erb parameters: - !ruby/object:Api::Type::String name: 'clusterId' @@ -477,7 +479,6 @@ properties: - :PRIMARY - :SECONDARY default_value: :PRIMARY - immutable: true description: | The type of cluster. If not set, defaults to PRIMARY. - !ruby/object:Api::Type::NestedObject @@ -487,8 +488,18 @@ properties: properties: - !ruby/object:Api::Type::String name: "primaryClusterName" - immutable: true required: true description: | Name of the primary cluster must be in the format 'projects/{project}/locations/{location}/clusters/{cluster_id}' +virtual_fields: + - !ruby/object:Api::Type::Enum + name: 'deletion_policy' + description: | + Policy to determine if the cluster should be deleted forcefully. + Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. + Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + values: + - :DEFAULT + - :FORCE + default_value: :DEFAULT diff --git a/mmv1/products/alloydb/Instance.yaml b/mmv1/products/alloydb/Instance.yaml index cc8b23b35a27..9f71e922bb9e 100644 --- a/mmv1/products/alloydb/Instance.yaml +++ b/mmv1/products/alloydb/Instance.yaml @@ -13,6 +13,11 @@ --- !ruby/object:Api::Resource name: 'Instance' +docs: !ruby/object:Provider::Terraform::Docs + warning: | + Deleting an instance with instanceType = SECONDARY does not delete the secondary instance, and abandons it instead. + Use deletion_policy = "FORCE" in the associated secondary cluster and delete the cluster forcefully to delete the secondary cluster as well its associated secondary instance. + Users can undo the delete secondary instance action by importing the deleted secondary instance by calling terraform import. self_link: '{{cluster}}/instances/{{instance_id}}' base_url: '{{cluster}}/instances?instanceId={{instance_id}}' update_verb: :PATCH @@ -28,9 +33,9 @@ async: !ruby/object:Api::OpAsync base_url: '{{op_id}}' wait_ms: 1000 timeouts: !ruby/object:Api::Timeouts - insert_minutes: 40 - update_minutes: 40 - delete_minutes: 40 + insert_minutes: 120 + update_minutes: 120 + delete_minutes: 120 result: !ruby/object:Api::OpAsync::Result path: 'response' status: !ruby/object:Api::OpAsync::Status @@ -49,6 +54,8 @@ skip_sweeper: true autogen_async: true custom_code: !ruby/object:Provider::Terraform::CustomCode custom_import: templates/terraform/custom_import/alloydb_instance.go.erb + pre_create: templates/terraform/pre_create/alloydb_instance.go.erb + pre_delete: templates/terraform/pre_delete/alloydb_instance.go.erb examples: - !ruby/object:Provider::Terraform::Examples name: 'alloydb_instance_basic' @@ -61,6 +68,19 @@ examples: - 'reconciling' - 'update_time' skip_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_secondary_instance_basic' + primary_resource_id: 'secondary' + vars: + alloydb_primary_cluster_name: 'alloydb-primary-cluster' + alloydb_primary_instance_name: 'alloydb-primary-instance' + alloydb_secondary_cluster_name: 'alloydb-secondary-cluster' + alloydb_secondary_instance_name: 'alloydb-secondary-instance' + network_name: 'alloydb-secondary-network' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_test: true - !ruby/object:Provider::Terraform::Examples name: 'alloydb_instance_basic_test' primary_resource_id: 'default' @@ -74,6 +94,21 @@ examples: - 'reconciling' - 'update_time' skip_docs: true + - !ruby/object:Provider::Terraform::Examples + name: 'alloydb_secondary_instance_basic_test' + primary_resource_id: 'secondary' + vars: + alloydb_primary_cluster_name: 'alloydb-primary-cluster' + alloydb_primary_instance_name: 'alloydb-primary-instance' + alloydb_secondary_cluster_name: 'alloydb-secondary-cluster' + alloydb_secondary_instance_name: 'alloydb-secondary-instance' + network_name: 'alloydb-secondary-network' + test_vars_overrides: + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_docs: true parameters: - !ruby/object:Api::Type::ResourceRef name: 'cluster' @@ -167,10 +202,17 @@ properties: required: true immutable: true description: | - The type of the instance. If the instance type is READ_POOL, provide the associated PRIMARY instance in the `depends_on` meta-data attribute. + The type of the instance. + If the instance type is READ_POOL, provide the associated PRIMARY/SECONDARY instance in the `depends_on` meta-data attribute. + If the instance type is SECONDARY, point to the cluster_type of the associated secondary cluster instead of mentioning SECONDARY. + Example: {instance_type = google_alloydb_cluster..cluster_type} instead of {instance_type = SECONDARY} + If the instance type is SECONDARY, the terraform delete instance operation does not delete the secondary instance but abandons it instead. + Use deletion_policy = "FORCE" in the associated secondary cluster and delete the cluster forcefully to delete the secondary cluster as well its associated secondary instance. + Users can undo the delete secondary instance action by importing the deleted secondary instance by calling terraform import. values: - :PRIMARY - :READ_POOL + - :SECONDARY - !ruby/object:Api::Type::String name: 'ipAddress' output: true diff --git a/mmv1/products/apigee/Organization.yaml b/mmv1/products/apigee/Organization.yaml index e6c0d808b8ad..912d95511a39 100644 --- a/mmv1/products/apigee/Organization.yaml +++ b/mmv1/products/apigee/Organization.yaml @@ -60,6 +60,8 @@ examples: test_env_vars: org_id: :ORG_ID billing_account: :BILLING_ACCT + ignore_read_extra: + - properties skip_docs: true # Resource creation race @@ -76,6 +78,8 @@ examples: test_env_vars: org_id: :ORG_ID billing_account: :BILLING_ACCT + ignore_read_extra: + - properties skip_docs: true # Resource creation race diff --git a/mmv1/products/bigtable/AppProfile.yaml b/mmv1/products/bigtable/AppProfile.yaml index 4100ae507fa8..33b2881ad74b 100644 --- a/mmv1/products/bigtable/AppProfile.yaml +++ b/mmv1/products/bigtable/AppProfile.yaml @@ -43,6 +43,8 @@ examples: deletion_protection: 'false' ignore_read_extra: - 'ignore_warnings' + # bigtable instance does not use the shared HTTP client, this test creates an instance + skip_vcr: true - !ruby/object:Provider::Terraform::Examples name: 'bigtable_app_profile_singlecluster' primary_resource_id: 'ap' @@ -56,6 +58,8 @@ examples: deletion_protection: 'false' ignore_read_extra: - 'ignore_warnings' + # bigtable instance does not use the shared HTTP client, this test creates an instance + skip_vcr: true - !ruby/object:Provider::Terraform::Examples name: 'bigtable_app_profile_multicluster' primary_resource_id: 'ap' @@ -69,6 +73,8 @@ examples: deletion_protection: 'false' ignore_read_extra: - 'ignore_warnings' + # bigtable instance does not use the shared HTTP client, this test creates an instance + skip_vcr: true custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/bigtable_app_profile.go.erb extra_schema_entry: templates/terraform/extra_schema_entry/bigtable_app_profile.go.erb diff --git a/mmv1/products/compute/ForwardingRule.yaml b/mmv1/products/compute/ForwardingRule.yaml index 72f8c6292a2c..d0258cfdf70a 100644 --- a/mmv1/products/compute/ForwardingRule.yaml +++ b/mmv1/products/compute/ForwardingRule.yaml @@ -402,56 +402,58 @@ properties: - !ruby/object:Api::Type::String name: 'portRange' description: | - This field can only be used: - - * If `IPProtocol` is one of TCP, UDP, or SCTP. - * By backend service-based network load balancers, target pool-based - network load balancers, internal proxy load balancers, external proxy load - balancers, Traffic Director, external protocol forwarding, and Classic VPN. - Some products have restrictions on what ports can be used. See + The `ports`, `portRange`, and `allPorts` fields are mutually exclusive. + Only packets addressed to ports in the specified range will be forwarded + to the backends configured with this forwarding rule. + + The `portRange` field has the following limitations: + * It requires that the forwarding rule `IPProtocol` be TCP, UDP, or SCTP, + and + * It's applicable only to the following products: external passthrough + Network Load Balancers, internal and external proxy Network Load + Balancers, internal and external Application Load Balancers, external + protocol forwarding, and Classic VPN. + * Some products have restrictions on what ports can be used. See [port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications) for details. - - Only packets addressed to ports in the specified range will be forwarded to - the backends configured with this forwarding rule. - - The `ports` and `port_range` fields are mutually exclusive. - For external forwarding rules, two or more forwarding rules cannot use the - same `[IPAddress, IPProtocol]` pair, and cannot have - overlapping `portRange`s. + same `[IPAddress, IPProtocol]` pair, and cannot have overlapping + `portRange`s. For internal forwarding rules within the same VPC network, two or more - forwarding rules cannot use the same `[IPAddress, IPProtocol]` - pair, and cannot have overlapping `portRange`s. + forwarding rules cannot use the same `[IPAddress, IPProtocol]` pair, and + cannot have overlapping `portRange`s. + + @pattern: \d+(?:-\d+)? diff_suppress_func: 'tpgresource.PortRangeDiffSuppress' default_from_api: true - !ruby/object:Api::Type::Array name: 'ports' max_size: 5 description: | - This field can only be used: - - * If `IPProtocol` is one of TCP, UDP, or SCTP. - * By internal TCP/UDP load balancers, backend service-based network load - balancers, internal protocol forwarding and when protocol is not L3_DEFAULT. - - - You can specify a list of up to five ports by number, separated by commas. - The ports can be contiguous or discontiguous. Only packets addressed to - these ports will be forwarded to the backends configured with this - forwarding rule. + The `ports`, `portRange`, and `allPorts` fields are mutually exclusive. + Only packets addressed to ports in the specified range will be forwarded + to the backends configured with this forwarding rule. + + The `ports` field has the following limitations: + * It requires that the forwarding rule `IPProtocol` be TCP, UDP, or SCTP, + and + * It's applicable only to the following products: internal passthrough + Network Load Balancers, backend service-based external passthrough Network + Load Balancers, and internal protocol forwarding. + * You can specify a list of up to five ports by number, separated by + commas. The ports can be contiguous or discontiguous. For external forwarding rules, two or more forwarding rules cannot use the - same `[IPAddress, IPProtocol]` pair, and cannot share any values - defined in `ports`. + same `[IPAddress, IPProtocol]` pair if they share at least one port + number. For internal forwarding rules within the same VPC network, two or more - forwarding rules cannot use the same `[IPAddress, IPProtocol]` - pair, and cannot share any values defined in `ports`. + forwarding rules cannot use the same `[IPAddress, IPProtocol]` pair if + they share at least one port number. - The `ports` and `port_range` fields are mutually exclusive. + @pattern: \d+(?:-\d+)? is_set: true custom_expand: 'templates/terraform/custom_expand/set_to_list.erb' item_type: Api::Type::String @@ -523,21 +525,21 @@ properties: - !ruby/object:Api::Type::Boolean name: 'allPorts' description: | - This field can only be used: - * If `IPProtocol` is one of TCP, UDP, or SCTP. - * By internal TCP/UDP load balancers, backend service-based network load - balancers, and internal and external protocol forwarding. - - This option should be set to TRUE when the Forwarding Rule - IPProtocol is set to L3_DEFAULT. - - Set this field to true to allow packets addressed to any port or packets + The `ports`, `portRange`, and `allPorts` fields are mutually exclusive. + Only packets addressed to ports in the specified range will be forwarded + to the backends configured with this forwarding rule. + + The `allPorts` field has the following limitations: + * It requires that the forwarding rule `IPProtocol` be TCP, UDP, SCTP, or + L3_DEFAULT. + * It's applicable only to the following products: internal passthrough + Network Load Balancers, backend service-based external passthrough Network + Load Balancers, and internal and external protocol forwarding. + * Set this field to true to allow packets addressed to any port or packets lacking destination port information (for example, UDP fragments after the first fragment) to be forwarded to the backends configured with this - forwarding rule. - - The `ports`, `port_range`, and - `allPorts` fields are mutually exclusive. + forwarding rule. The L3_DEFAULT protocol requires `allPorts` be set to + true. - !ruby/object:Api::Type::Enum name: 'networkTier' description: | diff --git a/mmv1/products/compute/GlobalForwardingRule.yaml b/mmv1/products/compute/GlobalForwardingRule.yaml index 780dc6e8a562..f3b30c449c44 100644 --- a/mmv1/products/compute/GlobalForwardingRule.yaml +++ b/mmv1/products/compute/GlobalForwardingRule.yaml @@ -423,24 +423,26 @@ properties: - !ruby/object:Api::Type::String name: 'portRange' description: | - This field can only be used: - - * If `IPProtocol` is one of TCP, UDP, or SCTP. - * By backend service-based network load balancers, target pool-based - network load balancers, internal proxy load balancers, external proxy load - balancers, Traffic Director, external protocol forwarding, and Classic VPN. - Some products have restrictions on what ports can be used. See + The `portRange` field has the following limitations: + * It requires that the forwarding rule `IPProtocol` be TCP, UDP, or SCTP, + and + * It's applicable only to the following products: external passthrough + Network Load Balancers, internal and external proxy Network Load + Balancers, internal and external Application Load Balancers, external + protocol forwarding, and Classic VPN. + * Some products have restrictions on what ports can be used. See [port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications) for details. + For external forwarding rules, two or more forwarding rules cannot use the + same `[IPAddress, IPProtocol]` pair, and cannot have overlapping + `portRange`s. + + For internal forwarding rules within the same VPC network, two or more + forwarding rules cannot use the same `[IPAddress, IPProtocol]` pair, and + cannot have overlapping `portRange`s. - * TargetHttpProxy: 80, 8080 - * TargetHttpsProxy: 443 - * TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, - 1883, 5222 - * TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, - 1883, 5222 - * TargetVpnGateway: 500, 4500 + @pattern: \d+(?:-\d+)? diff_suppress_func: 'tpgresource.PortRangeDiffSuppress' # This is a multi-resource resource reference (TargetHttp(s)Proxy, # TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, diff --git a/mmv1/products/firestore/Field.yaml b/mmv1/products/firestore/Field.yaml index 92a71cc0b203..bd67a668a598 100644 --- a/mmv1/products/firestore/Field.yaml +++ b/mmv1/products/firestore/Field.yaml @@ -20,6 +20,8 @@ immutable: false update_verb: :PATCH update_mask: true create_verb: :PATCH +error_retry_predicates: + ["transport_tpg.FirestoreField409RetryUnderlyingDataChanged"] description: | Represents a single field in the database. Fields are grouped by their "Collection Group", which represent all collections diff --git a/mmv1/products/monitoring/AlertPolicy.yaml b/mmv1/products/monitoring/AlertPolicy.yaml index 1a8021a6b5a6..92aa4f110ad8 100644 --- a/mmv1/products/monitoring/AlertPolicy.yaml +++ b/mmv1/products/monitoring/AlertPolicy.yaml @@ -971,6 +971,7 @@ properties: at_least_one_of: - documentation.0.content - documentation.0.mime_type + - documentation.0.subject description: | The text of the documentation, interpreted according to mimeType. The content may not exceed 8,192 Unicode characters and may not @@ -981,7 +982,19 @@ properties: at_least_one_of: - documentation.0.content - documentation.0.mime_type + - documentation.0.subject default_value: text/markdown description: | The format of the content field. Presently, only the value "text/markdown" is supported. + - !ruby/object:Api::Type::String + name: subject + at_least_one_of: + - documentation.0.content + - documentation.0.mime_type + - documentation.0.subject + description: | + The subject line of the notification. The subject line may not + exceed 10,240 bytes. In notifications generated by this policy the contents + of the subject line after variable expansion will be truncated to 255 bytes + or shorter at the latest UTF-8 character boundary. diff --git a/mmv1/products/monitoring/UptimeCheckConfig.yaml b/mmv1/products/monitoring/UptimeCheckConfig.yaml index 3af431687199..1de7bdc41934 100644 --- a/mmv1/products/monitoring/UptimeCheckConfig.yaml +++ b/mmv1/products/monitoring/UptimeCheckConfig.yaml @@ -172,6 +172,15 @@ properties: - :STATIC_IP_CHECKERS - :VPC_CHECKERS default_from_api: true + - !ruby/object:Api::Type::KeyValuePairs + name: userLabels + description: + User-supplied key/value data to be used for organizing and + identifying the `UptimeCheckConfig` objects. + The field can contain up to 64 entries. Each key and value is limited to + 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + values can contain only lowercase letters, numerals, underscores, and + dashes. Keys must begin with a letter. - !ruby/object:Api::Type::NestedObject name: httpCheck description: Contains information needed to make an HTTP or HTTPS check. @@ -193,6 +202,16 @@ properties: values: - :TYPE_UNSPECIFIED - :URL_ENCODED + - :USER_PROVIDED + - !ruby/object:Api::Type::String + name: customContentType + description: + A user provided content type header to use for the check. The invalid + configurations outlined in the `content_type` field apply to + custom_content_type`, as well as the following + 1. `content_type` is `URL_ENCODED` and `custom_content_type` is set. + 2. `content_type` is `USER_PROVIDED` and `custom_content_type` is not + set. - !ruby/object:Api::Type::NestedObject name: authInfo at_least_one_of: @@ -330,6 +349,16 @@ properties: - :STATUS_CLASS_4XX - :STATUS_CLASS_5XX - :STATUS_CLASS_ANY + - !ruby/object:Api::Type::NestedObject + name: pingConfig + description: + Contains information needed to add pings to an HTTP check. + properties: + - !ruby/object:Api::Type::Integer + name: pingsCount + required: true + description: + Number of ICMP pings. A maximum of 3 ICMP pings is currently supported. - !ruby/object:Api::Type::NestedObject name: tcpCheck description: Contains information needed to make a TCP check. @@ -341,6 +370,16 @@ properties: The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) to construct the full URL. + - !ruby/object:Api::Type::NestedObject + name: pingConfig + description: + Contains information needed to add pings to a TCP check. + properties: + - !ruby/object:Api::Type::Integer + name: pingsCount + required: true + description: + Number of ICMP pings. A maximum of 3 ICMP pings is currently supported. - !ruby/object:Api::Type::NestedObject name: resourceGroup immutable: true diff --git a/mmv1/products/orgpolicy/CustomConstraint.yaml b/mmv1/products/orgpolicy/CustomConstraint.yaml index 60635242fd0f..e305db146275 100644 --- a/mmv1/products/orgpolicy/CustomConstraint.yaml +++ b/mmv1/products/orgpolicy/CustomConstraint.yaml @@ -31,11 +31,19 @@ examples: - !ruby/object:Provider::Terraform::Examples name: 'org_policy_custom_constraint_basic' primary_resource_id: 'constraint' + vars: + policy_name: 'custom.disableGkeAutoUpgrade' + test_vars_overrides: + policy_name: '"custom.tfTestDisableGkeAutoUpgrade" + acctest.RandString(t, 10)' test_env_vars: org_id: :ORG_ID - !ruby/object:Provider::Terraform::Examples name: 'org_policy_custom_constraint_full' primary_resource_id: 'constraint' + vars: + policy_name: 'custom.disableGkeAutoUpgrade' + test_vars_overrides: + policy_name: '"custom.tfTestDisableGkeAutoUpgrade" + acctest.RandString(t, 10)' test_env_vars: org_id: :ORG_TARGET parameters: diff --git a/mmv1/products/redis/Cluster.yaml b/mmv1/products/redis/Cluster.yaml index 0241b174a145..08751531ba2f 100644 --- a/mmv1/products/redis/Cluster.yaml +++ b/mmv1/products/redis/Cluster.yaml @@ -57,6 +57,11 @@ examples: policy_name: "mypolicy" subnet_name: "mysubnet" network_name: "mynetwork" + prevent_destroy: 'true' + test_vars_overrides: + prevent_destroy: 'false' + oics_vars_overrides: + prevent_destroy: 'false' properties: - !ruby/object:Api::Type::Time name: createTime diff --git a/mmv1/products/redis/Instance.yaml b/mmv1/products/redis/Instance.yaml index 316af5aae8de..14c7b8e5ec85 100644 --- a/mmv1/products/redis/Instance.yaml +++ b/mmv1/products/redis/Instance.yaml @@ -44,22 +44,35 @@ examples: primary_resource_id: 'cache' vars: instance_name: 'memory-cache' + prevent_destroy: 'true' + test_vars_overrides: + prevent_destroy: 'false' + oics_vars_overrides: + prevent_destroy: 'false' - !ruby/object:Provider::Terraform::Examples name: 'redis_instance_full' primary_resource_id: 'cache' vars: instance_name: 'ha-memory-cache' network_name: 'redis-test-network' + prevent_destroy: 'true' test_vars_overrides: network_name: 'acctest.BootstrapSharedTestNetwork(t, "redis-full")' + prevent_destroy: 'false' + oics_vars_overrides: + prevent_destroy: 'false' - !ruby/object:Provider::Terraform::Examples name: 'redis_instance_full_with_persistence_config' primary_resource_id: 'cache-persis' vars: instance_name: 'ha-memory-cache-persis' network_name: 'redis-test-network' + prevent_destroy: 'true' test_vars_overrides: network_name: 'acctest.BootstrapSharedTestNetwork(t, "redis-full-persis")' + prevent_destroy: 'false' + oics_vars_overrides: + prevent_destroy: 'false' - !ruby/object:Provider::Terraform::Examples name: 'redis_instance_private_service' primary_resource_id: 'cache' @@ -67,6 +80,11 @@ examples: instance_name: 'private-cache' address_name: 'address' network_name: 'redis-test-network' + prevent_destroy: 'true' + test_vars_overrides: + prevent_destroy: 'false' + oics_vars_overrides: + prevent_destroy: 'false' skip_test: true - !ruby/object:Provider::Terraform::Examples name: 'redis_instance_private_service_test' @@ -74,8 +92,12 @@ examples: vars: instance_name: 'private-cache' network_name: 'redis-test-network' + prevent_destroy: 'true' test_vars_overrides: network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "vpc-network-1")' + prevent_destroy: 'false' + oics_vars_overrides: + prevent_destroy: 'false' skip_docs: true - !ruby/object:Provider::Terraform::Examples name: 'redis_instance_mrr' @@ -83,8 +105,12 @@ examples: vars: instance_name: 'mrr-memory-cache' network_name: 'redis-test-network' + prevent_destroy: 'true' test_vars_overrides: network_name: 'acctest.BootstrapSharedTestNetwork(t, "redis-mrr")' + prevent_destroy: 'false' + oics_vars_overrides: + prevent_destroy: 'false' - !ruby/object:Provider::Terraform::Examples name: 'redis_instance_cmek' primary_resource_id: 'cache' @@ -92,8 +118,12 @@ examples: vars: instance_name: 'cmek-memory-cache' network_name: 'redis-test-network' + prevent_destroy: 'true' test_vars_overrides: network_name: 'acctest.BootstrapSharedTestNetwork(t, "redis-cmek")' + prevent_destroy: 'false' + oics_vars_overrides: + prevent_destroy: 'false' parameters: # TODO: resourceref? - !ruby/object:Api::Type::String diff --git a/mmv1/products/spanner/Instance.yaml b/mmv1/products/spanner/Instance.yaml index 6903c10a4416..9c0c38c266c3 100644 --- a/mmv1/products/spanner/Instance.yaml +++ b/mmv1/products/spanner/Instance.yaml @@ -156,3 +156,79 @@ properties: values: - :READY - :CREATING + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingConfig' + description: | + The autoscaling configuration. Autoscaling is enabled if this field is set. + When autoscaling is enabled, num_nodes and processing_units are treated as, + OUTPUT_ONLY fields and reflect the current compute capacity allocated to + the instance. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingLimits' + description: | + Defines scale in controls to reduce the risk of response latency + and outages due to abrupt scale-in events + properties: + - !ruby/object:Api::Type::NestedObject + name: 'minLimit' + description: | + Specifies the minimum compute capacity for the instance. + properties: + - !ruby/object:Api::Type::Integer + name: 'minNodes' + exactly_one_of: + - min_nodes + - min_processing_units + description: | + Specifies minimum number of processing units allocated to the instance. + If set, this number should be greater than or equal to 1. + - !ruby/object:Api::Type::Integer + name: 'minProcessingUnits' + exactly_one_of: + - min_nodes + - min_processing_units + description: | + Specifies minimum number of processing units allocated to the instance. + If set, this number should be multiples of 1000. + - !ruby/object:Api::Type::NestedObject + name: 'maxLimit' + description: | + Specifies the maximum compute capacity for the instance. + The maximum compute capacity should be less than or equal to 10X the minimum compute capacity. + properties: + - !ruby/object:Api::Type::Integer + name: 'maxNodes' + exactly_one_of: + - max_nodes + - max_processing_units + description: | + Specifies maximum number of nodes allocated to the instance. + If set, this number should be greater than or equal to min_nodes. + - !ruby/object:Api::Type::Integer + name: 'maxProcessingUnits' + exactly_one_of: + - max_nodes + - max_processing_units + description: | + Specifies maximum number of processing units allocated to the instance. + If set, this number should be multiples of 1000 and be greater than or equal to + min_processing_units. + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingTargets' + description: | + Defines scale in controls to reduce the risk of response latency + and outages due to abrupt scale-in events + properties: + - !ruby/object:Api::Type::Integer + name: 'highPriorityCpuUtilizationPercent' + description: | + Specifies the target high priority cpu utilization percentage that the autoscaler + should be trying to achieve for the instance. + This number is on a scale from 0 (no utilization) to 100 (full utilization).. + - !ruby/object:Api::Type::Integer + name: 'storageUtilizationPercent' + description: | + Specifies the target storage utilization percentage that the autoscaler + should be trying to achieve for the instance. + This number is on a scale from 0 (no utilization) to 100 (full utilization). diff --git a/mmv1/products/tpuv2/Vm.yaml b/mmv1/products/tpuv2/Vm.yaml index ac58ca1a0cda..49363f49e6f2 100644 --- a/mmv1/products/tpuv2/Vm.yaml +++ b/mmv1/products/tpuv2/Vm.yaml @@ -59,6 +59,7 @@ examples: name: 'tpu_v2_vm_full' min_version: 'beta' primary_resource_id: 'tpu' + pull_external: true vars: vm_name: 'test-tpu' network_name: 'tpu-net' diff --git a/mmv1/products/workflows/Workflow.yaml b/mmv1/products/workflows/Workflow.yaml index 3c7a4baad876..e6f5600c6f68 100644 --- a/mmv1/products/workflows/Workflow.yaml +++ b/mmv1/products/workflows/Workflow.yaml @@ -41,6 +41,14 @@ examples: name: 'workflow' account_id: 'my-account' skip_import_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'workflow_beta' + primary_resource_id: 'example_beta' + vars: + name: 'workflow_beta' + account_id: 'my-account' + skip_import_test: true + min_version: 'beta' custom_code: !ruby/object:Provider::Terraform::CustomCode extra_schema_entry: templates/terraform/extra_schema_entry/workflow.erb encoder: templates/terraform/encoders/workflow.go.erb @@ -108,3 +116,8 @@ properties: The KMS key used to encrypt workflow and execution data. Format: projects/{project}/locations/{location}/keyRings/{keyRing}/cryptoKeys/{cryptoKey} + - !ruby/object:Api::Type::KeyValuePairs + name: 'userEnvVars' + min_version: beta + description: | + User-defined environment variables associated with this workflow revision. This map has a maximum length of 20. Each string can take up to 40KiB. Keys cannot be empty strings and cannot start with “GOOGLE” or “WORKFLOWS". diff --git a/mmv1/templates/terraform/examples/alloydb_secondary_instance_basic.tf.erb b/mmv1/templates/terraform/examples/alloydb_secondary_instance_basic.tf.erb new file mode 100644 index 000000000000..bd4c628c017e --- /dev/null +++ b/mmv1/templates/terraform/examples/alloydb_secondary_instance_basic.tf.erb @@ -0,0 +1,75 @@ +resource "google_alloydb_cluster" "primary" { + cluster_id = "<%= ctx[:vars]['alloydb_primary_cluster_name'] %>" + location = "us-central1" + network = google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "<%= ctx[:vars]['alloydb_primary_instance_name'] %>" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } + + depends_on = [google_service_networking_connection.vpc_connection] +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "<%= ctx[:vars]['alloydb_secondary_cluster_name'] %>" + location = "us-east1" + network = google_compute_network.default.id + cluster_type = "SECONDARY" + + continuous_backup_config { + enabled = false + } + + secondary_config { + primary_cluster_name = google_alloydb_cluster.primary.name + } + + deletion_policy = "FORCE" + + # Need lifecycle.ignore_changes because instance_type is an immutable field. + # And when promoting cluster from SECONDARY to PRIMARY, the instance_type of the associated secondary instance also changes and becomes PRIMARY. + # And we do not want terraform to destroy and create the instance because the field is immutable + lifecycle { + ignore_changes = [instance_type] + } + + depends_on = [google_alloydb_instance.primary] +} + +resource "google_alloydb_instance" "<%= ctx[:primary_resource_id] %>" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "<%= ctx[:vars]['alloydb_secondary_instance_name'] %>" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + depends_on = [google_service_networking_connection.vpc_connection] +} + +data "google_project" "project" {} + +resource "google_compute_network" "default" { + name = "<%= ctx[:vars]['network_name'] %>" +} + +resource "google_compute_global_address" "private_ip_alloc" { + name = "<%= ctx[:vars]['alloydb_secondary_instance_name'] %>" + address_type = "INTERNAL" + purpose = "VPC_PEERING" + prefix_length = 16 + network = google_compute_network.default.id +} + +resource "google_service_networking_connection" "vpc_connection" { + network = google_compute_network.default.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] +} diff --git a/mmv1/templates/terraform/examples/alloydb_secondary_instance_basic_test.tf.erb b/mmv1/templates/terraform/examples/alloydb_secondary_instance_basic_test.tf.erb new file mode 100644 index 000000000000..c6cfdce0f8d4 --- /dev/null +++ b/mmv1/templates/terraform/examples/alloydb_secondary_instance_basic_test.tf.erb @@ -0,0 +1,50 @@ +resource "google_alloydb_cluster" "primary" { + cluster_id = "<%= ctx[:vars]['alloydb_primary_cluster_name'] %>" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "<%= ctx[:vars]['alloydb_primary_instance_name'] %>" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "<%= ctx[:vars]['alloydb_secondary_cluster_name'] %>" + location = "us-east1" + network = data.google_compute_network.default.id + cluster_type = "SECONDARY" + + continuous_backup_config { + enabled = false + } + + secondary_config { + primary_cluster_name = google_alloydb_cluster.primary.name + } + + deletion_policy = "FORCE" + + depends_on = [google_alloydb_instance.primary] +} + +resource "google_alloydb_instance" "<%= ctx[:primary_resource_id] %>" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "<%= ctx[:vars]['alloydb_secondary_instance_name'] %>" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "<%= ctx[:vars]['network_name'] %>" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/org_policy_custom_constraint_basic.tf.erb b/mmv1/templates/terraform/examples/org_policy_custom_constraint_basic.tf.erb index 32b23fc4b45f..61a4805230b8 100644 --- a/mmv1/templates/terraform/examples/org_policy_custom_constraint_basic.tf.erb +++ b/mmv1/templates/terraform/examples/org_policy_custom_constraint_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_org_policy_custom_constraint" "<%= ctx[:primary_resource_id] %>" { - name = "custom.disableGkeAutoUpgrade" + name = "<%= ctx[:vars]['policy_name'] %>" parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" action_type = "ALLOW" diff --git a/mmv1/templates/terraform/examples/org_policy_custom_constraint_full.tf.erb b/mmv1/templates/terraform/examples/org_policy_custom_constraint_full.tf.erb index a75a21e907de..cf08b9504acd 100644 --- a/mmv1/templates/terraform/examples/org_policy_custom_constraint_full.tf.erb +++ b/mmv1/templates/terraform/examples/org_policy_custom_constraint_full.tf.erb @@ -1,6 +1,6 @@ resource "google_org_policy_custom_constraint" "<%= ctx[:primary_resource_id] %>" { - name = "custom.disableGkeAutoUpgrade" + name = "<%= ctx[:vars]['policy_name'] %>" parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" display_name = "Disable GKE auto upgrade" description = "Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced." diff --git a/mmv1/templates/terraform/examples/redis_cluster_ha.tf.erb b/mmv1/templates/terraform/examples/redis_cluster_ha.tf.erb index 7abb93e5b601..a2ef9d488324 100644 --- a/mmv1/templates/terraform/examples/redis_cluster_ha.tf.erb +++ b/mmv1/templates/terraform/examples/redis_cluster_ha.tf.erb @@ -11,6 +11,10 @@ resource "google_redis_cluster" "<%= ctx[:primary_resource_id] %>" { depends_on = [ google_network_connectivity_service_connection_policy.default ] + + lifecycle { + prevent_destroy = <%= ctx[:vars]['prevent_destroy'] %> + } } resource "google_network_connectivity_service_connection_policy" "default" { diff --git a/mmv1/templates/terraform/examples/redis_instance_basic.tf.erb b/mmv1/templates/terraform/examples/redis_instance_basic.tf.erb index ac403764b5ca..5cf774559a06 100644 --- a/mmv1/templates/terraform/examples/redis_instance_basic.tf.erb +++ b/mmv1/templates/terraform/examples/redis_instance_basic.tf.erb @@ -1,4 +1,8 @@ resource "google_redis_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]["instance_name"] %>" memory_size_gb = 1 + + lifecycle { + prevent_destroy = <%= ctx[:vars]['prevent_destroy'] %> + } } diff --git a/mmv1/templates/terraform/examples/redis_instance_cmek.tf.erb b/mmv1/templates/terraform/examples/redis_instance_cmek.tf.erb index acb9ba91a95e..c40840101d3f 100644 --- a/mmv1/templates/terraform/examples/redis_instance_cmek.tf.erb +++ b/mmv1/templates/terraform/examples/redis_instance_cmek.tf.erb @@ -17,6 +17,10 @@ resource "google_redis_instance" "<%= ctx[:primary_resource_id] %>" { other_key = "other_val" } customer_managed_key = google_kms_crypto_key.redis_key.id + + lifecycle { + prevent_destroy = <%= ctx[:vars]['prevent_destroy'] %> + } } resource "google_kms_key_ring" "redis_keyring" { diff --git a/mmv1/templates/terraform/examples/redis_instance_full.tf.erb b/mmv1/templates/terraform/examples/redis_instance_full.tf.erb index e737c4ec6111..2bcac657ff0c 100644 --- a/mmv1/templates/terraform/examples/redis_instance_full.tf.erb +++ b/mmv1/templates/terraform/examples/redis_instance_full.tf.erb @@ -28,6 +28,10 @@ resource "google_redis_instance" "<%= ctx[:primary_resource_id] %>" { } } } + + lifecycle { + prevent_destroy = <%= ctx[:vars]['prevent_destroy'] %> + } } // This example assumes this network already exists. diff --git a/mmv1/templates/terraform/examples/redis_instance_full_with_persistence_config.tf.erb b/mmv1/templates/terraform/examples/redis_instance_full_with_persistence_config.tf.erb index c0d7774feaff..4f8bbf2654c5 100644 --- a/mmv1/templates/terraform/examples/redis_instance_full_with_persistence_config.tf.erb +++ b/mmv1/templates/terraform/examples/redis_instance_full_with_persistence_config.tf.erb @@ -9,4 +9,8 @@ resource "google_redis_instance" "<%= ctx[:primary_resource_id] %>" { persistence_mode = "RDB" rdb_snapshot_period = "TWELVE_HOURS" } + + lifecycle { + prevent_destroy = <%= ctx[:vars]['prevent_destroy'] %> + } } diff --git a/mmv1/templates/terraform/examples/redis_instance_mrr.tf.erb b/mmv1/templates/terraform/examples/redis_instance_mrr.tf.erb index b0ab91f58bf2..2b038101dedf 100644 --- a/mmv1/templates/terraform/examples/redis_instance_mrr.tf.erb +++ b/mmv1/templates/terraform/examples/redis_instance_mrr.tf.erb @@ -18,6 +18,10 @@ resource "google_redis_instance" "<%= ctx[:primary_resource_id] %>" { my_key = "my_val" other_key = "other_val" } + + lifecycle { + prevent_destroy = <%= ctx[:vars]['prevent_destroy'] %> + } } // This example assumes this network already exists. diff --git a/mmv1/templates/terraform/examples/redis_instance_private_service.tf.erb b/mmv1/templates/terraform/examples/redis_instance_private_service.tf.erb index 90e0e4ff5698..9eedaf4a9fb3 100644 --- a/mmv1/templates/terraform/examples/redis_instance_private_service.tf.erb +++ b/mmv1/templates/terraform/examples/redis_instance_private_service.tf.erb @@ -40,4 +40,7 @@ resource "google_redis_instance" "<%= ctx[:primary_resource_id] %>" { depends_on = [google_service_networking_connection.private_service_connection] -} \ No newline at end of file + lifecycle { + prevent_destroy = <%= ctx[:vars]['prevent_destroy'] %> + } +} diff --git a/mmv1/templates/terraform/examples/redis_instance_private_service_test.tf.erb b/mmv1/templates/terraform/examples/redis_instance_private_service_test.tf.erb index afd73fa81f04..0b69e81cf45b 100644 --- a/mmv1/templates/terraform/examples/redis_instance_private_service_test.tf.erb +++ b/mmv1/templates/terraform/examples/redis_instance_private_service_test.tf.erb @@ -23,4 +23,8 @@ resource "google_redis_instance" "<%= ctx[:primary_resource_id] %>" { redis_version = "REDIS_4_0" display_name = "Terraform Test Instance" -} \ No newline at end of file + + lifecycle { + prevent_destroy = <%= ctx[:vars]['prevent_destroy'] %> + } +} diff --git a/mmv1/templates/terraform/examples/tpu_v2_vm_full.tf.erb b/mmv1/templates/terraform/examples/tpu_v2_vm_full.tf.erb index 38c2f5f288d5..2e5fe2a82689 100644 --- a/mmv1/templates/terraform/examples/tpu_v2_vm_full.tf.erb +++ b/mmv1/templates/terraform/examples/tpu_v2_vm_full.tf.erb @@ -58,6 +58,8 @@ resource "google_tpu_v2_vm" "<%= ctx[:primary_resource_id] %>" { } tags = ["foo"] + + depends_on = [time_sleep.wait_60_seconds] } resource "google_compute_subnetwork" "subnet" { @@ -92,3 +94,10 @@ resource "google_compute_disk" "disk" { type = "pd-ssd" zone = "us-central1-c" } + +# Wait after service account creation to limit eventual consistency errors. +resource "time_sleep" "wait_60_seconds" { + depends_on = [google_service_account.sa] + + create_duration = "60s" +} diff --git a/mmv1/templates/terraform/examples/uptime_check_config_http.tf.erb b/mmv1/templates/terraform/examples/uptime_check_config_http.tf.erb index a20f502162e6..3d13d3ab99f5 100644 --- a/mmv1/templates/terraform/examples/uptime_check_config_http.tf.erb +++ b/mmv1/templates/terraform/examples/uptime_check_config_http.tf.erb @@ -1,13 +1,20 @@ resource "google_monitoring_uptime_check_config" "<%= ctx[:primary_resource_id] %>" { display_name = "<%= ctx[:vars]["display_name"] %>" timeout = "60s" + user_labels = { + example-key = "example-value" + } http_check { path = "some-path" port = "8010" request_method = "POST" - content_type = "URL_ENCODED" + content_type = "USER_PROVIDED" + custom_content_type = "application/json" body = "Zm9vJTI1M0RiYXI=" + ping_config { + pings_count = 1 + } } monitored_resource { diff --git a/mmv1/templates/terraform/examples/uptime_check_tcp.tf.erb b/mmv1/templates/terraform/examples/uptime_check_tcp.tf.erb index c86059090301..46e4e78374b1 100644 --- a/mmv1/templates/terraform/examples/uptime_check_tcp.tf.erb +++ b/mmv1/templates/terraform/examples/uptime_check_tcp.tf.erb @@ -4,6 +4,9 @@ resource "google_monitoring_uptime_check_config" "<%= ctx[:primary_resource_id] tcp_check { port = 888 + ping_config { + pings_count = 2 + } } resource_group { diff --git a/mmv1/templates/terraform/examples/workflow_beta.tf.erb b/mmv1/templates/terraform/examples/workflow_beta.tf.erb new file mode 100644 index 000000000000..76a1fdcc3da9 --- /dev/null +++ b/mmv1/templates/terraform/examples/workflow_beta.tf.erb @@ -0,0 +1,47 @@ +resource "google_service_account" "test_account" { + provider = google-beta + account_id = "<%= ctx[:vars]['account_id'] %>" + display_name = "Test Service Account" +} + +resource "google_workflows_workflow" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + name = "<%= ctx[:vars]['name'] %>" + region = "us-central1" + description = "Magic" + service_account = google_service_account.test_account.id + labels = { + env = "test" + } + user_env_vars = { + foo = "BAR" + } + source_contents = <<-EOF + # This is a sample workflow. You can replace it with your source code. + # + # This workflow does the following: + # - reads current time and date information from an external API and stores + # the response in currentTime variable + # - retrieves a list of Wikipedia articles related to the day of the week + # from currentTime + # - returns the list of articles as an output of the workflow + # + # Note: In Terraform you need to escape the $$ or it will cause errors. + + - getCurrentTime: + call: http.get + args: + url: https://timeapi.io/api/Time/current/zone?timeZone=Europe/Amsterdam + result: currentTime + - readWikipedia: + call: http.get + args: + url: https://en.wikipedia.org/w/api.php + query: + action: opensearch + search: $${currentTime.body.dayOfWeek} + result: wikiResult + - returnOutput: + return: $${wikiResult.body[1]} +EOF +} diff --git a/mmv1/templates/terraform/pre_create/alloydb_instance.go.erb b/mmv1/templates/terraform/pre_create/alloydb_instance.go.erb new file mode 100644 index 000000000000..ba3d97920cd4 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/alloydb_instance.go.erb @@ -0,0 +1,5 @@ +// Read the config and call createsecondary api if instance_type is SECONDARY + +if instanceType := d.Get("instance_type"); instanceType == "SECONDARY" { + url = strings.Replace(url, "instances?instanceId", "instances:createsecondary?instanceId", 1) +} diff --git a/mmv1/templates/terraform/pre_delete/alloydb_cluster.go.erb b/mmv1/templates/terraform/pre_delete/alloydb_cluster.go.erb new file mode 100644 index 000000000000..743d7e857912 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/alloydb_cluster.go.erb @@ -0,0 +1,4 @@ +// Forcefully delete the secondary cluster and the dependent instances because deletion of secondary instance is not supported. +if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "FORCE" { + url = url + "?force=true" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/alloydb_instance.go.erb b/mmv1/templates/terraform/pre_delete/alloydb_instance.go.erb new file mode 100644 index 000000000000..68d0ca605bc4 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/alloydb_instance.go.erb @@ -0,0 +1,18 @@ +// Read the config and avoid calling the delete API if the instance_type is SECONDARY and instead return nil +// Returning nil is equivalent of returning a success message to the users +// This is done because deletion of secondary instance is not supported +// Instead users should be deleting the secondary cluster which will forcefully delete the associated secondary instance +// A warning message prompts the user to delete the associated secondary cluster. +// Users can always undo the delete secondary instance action by importing the deleted secondary instance by calling terraform import + +var instanceType interface{} +instanceTypeProp, err := expandAlloydbInstanceInstanceType(d.Get("instance_type"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("instance_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceTypeProp)) && (ok || !reflect.DeepEqual(v, instanceTypeProp)) { + instanceType = instanceTypeProp +} +if instanceType != nil && instanceType == "SECONDARY" { + log.Printf("[WARNING] This operation didn't delete the Secondary Instance %q. Please delete the associated Secondary Cluster as well to delete the entire cluster and the secondary instance.\n", d.Id()) + return nil +} diff --git a/mmv1/templates/terraform/pre_update/alloydb_cluster.go.erb b/mmv1/templates/terraform/pre_update/alloydb_cluster.go.erb new file mode 100644 index 000000000000..0c893e92f2cd --- /dev/null +++ b/mmv1/templates/terraform/pre_update/alloydb_cluster.go.erb @@ -0,0 +1,72 @@ +// Restrict modification of cluster_type from PRIMARY to SECONDARY as it is an invalid operation +if d.HasChange("cluster_type") && d.Get("cluster_type") == "SECONDARY" { + return fmt.Errorf("Can not convert a primary cluster to a secondary cluster.") +} + +// Restrict setting secondary_config if cluster_type is PRIMARY +if d.Get("cluster_type") == "PRIMARY" && !tpgresource.IsEmptyValue(reflect.ValueOf(d.Get("secondary_config"))) { + return fmt.Errorf("Can not set secondary config for primary cluster.") +} + +// Implementation for cluster promotion +if d.HasChange("cluster_type") && d.Get("cluster_type") == "PRIMARY" { + + if !d.HasChange("secondary_config") || !tpgresource.IsEmptyValue(reflect.ValueOf(d.Get("secondary_config"))) { + return fmt.Errorf("Remove the secondary_config field to promote the cluster to primary cluster.") + } + + // If necassary precondition checks for cluster promotion is fine ONLY then + // Promote cluster as a separate implementation within the update logic + + promoteUrl := strings.Split(url, "?updateMask=")[0] + ":promote" + emptyObj := make(map[string]interface{}) + + // Remove promote changes from obj and updateMask + delete(obj, "clusterType") + delete(obj, "secondaryConfig") + + index := 0 + for _, label := range updateMask { + if label != "clusterType" && label != "secondaryConfig" { + updateMask[index] = label + index++ + } + } + updateMask = updateMask[:index] + + // Update url with the new updateMask + url := strings.Split(url, "?updateMask=")[0] + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: promoteUrl, + UserAgent: userAgent, + Body: emptyObj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error promoting Cluster: %s", err) + } + + err = AlloydbOperationWaitTime( + config, res, project, "Promoting Cluster", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + return fmt.Errorf("Error waiting to promote Cluster: %s", err) + } + + log.Printf("[DEBUG] Finished promoting Cluster %q: %#v", d.Id(), res) + +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/go.mod.erb b/mmv1/third_party/terraform/go.mod.erb index 1f6a8af6f956..11ab70d515df 100644 --- a/mmv1/third_party/terraform/go.mod.erb +++ b/mmv1/third_party/terraform/go.mod.erb @@ -4,7 +4,7 @@ go 1.19 require ( cloud.google.com/go/bigtable v1.19.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.52.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.55.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/dnaeon/go-vcr v1.0.1 @@ -24,20 +24,20 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/hashstructure v1.1.0 github.com/sirupsen/logrus v1.8.1 - golang.org/x/net v0.16.0 + golang.org/x/net v0.17.0 golang.org/x/oauth2 v0.13.0 - google.golang.org/api v0.143.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 - google.golang.org/grpc v1.57.0 + google.golang.org/api v0.148.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a + google.golang.org/grpc v1.58.3 google.golang.org/protobuf v1.31.0 ) require ( bitbucket.org/creachadair/stringset v0.0.8 // indirect - cloud.google.com/go v0.110.7 // indirect + cloud.google.com/go v0.110.8 // indirect cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.1 // indirect + cloud.google.com/go/iam v1.1.2 // indirect cloud.google.com/go/longrunning v0.5.1 // indirect github.com/agext/levenshtein v1.2.2 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect @@ -46,14 +46,14 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect - github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f // indirect - github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect + github.com/envoyproxy/go-control-plane v0.11.1 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect github.com/fatih/color v1.13.0 // indirect github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect github.com/golang/glog v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.3.1 // indirect @@ -86,12 +86,12 @@ require ( github.com/zclconf/go-cty v1.11.0 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.14.0 // indirect - golang.org/x/sync v0.3.0 // indirect + golang.org/x/sync v0.4.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb // indirect + google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/mmv1/third_party/terraform/go.sum b/mmv1/third_party/terraform/go.sum index 647a2b8726e0..40a46b2fcc1c 100644 --- a/mmv1/third_party/terraform/go.sum +++ b/mmv1/third_party/terraform/go.sum @@ -2,21 +2,23 @@ bitbucket.org/creachadair/stringset v0.0.8 h1:gQqe4vs8XWgMyijfyKE6K8o4TcyGGrRXe0 bitbucket.org/creachadair/stringset v0.0.8/go.mod h1:AgthVMyMxC/6FK1KBJ2ALdqkZObGN8hOetgpwXyMn34= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= -cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= +cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= cloud.google.com/go/bigtable v1.19.0 h1:wiq9LT0kukfInzvy1joMDijCw/OD1UChpSbORXYn0LI= cloud.google.com/go/bigtable v1.19.0/go.mod h1:xl5kPa8PTkJjdBxg6qdGH88464nNqmbISHSRU+D2yFE= cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= -cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= +cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/GoogleCloudPlatform/declarative-resource-client-library v1.52.0 h1:KswxXF4E5iWv2ggktqv265zOvwmXA3mgma3UQfYA4tU= github.com/GoogleCloudPlatform/declarative-resource-client-library v1.52.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.55.0 h1:MTP0IDIztk36l8ubHkEcL6lWMG8Enqu9AP3E4MoBFg0= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.55.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= @@ -61,11 +63,11 @@ github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3 github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= -github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -120,8 +122,9 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 h1:5/4TSDzpDnHQ8rKEEQBjRlYx77mHOvXu08oGchxej7o= github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932/go.mod h1:cC6EdPbj/17GFCPDK39NRarlMI+kt+O60S12cNB5J9Y= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= @@ -313,8 +316,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= -golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= @@ -325,8 +328,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -371,8 +374,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.143.0 h1:o8cekTkqhywkbZT6p1UHJPZ9+9uuCAJs/KYomxZB8fA= -google.golang.org/api v0.143.0/go.mod h1:FoX9DO9hT7DLNn97OuoZAGSDuNAXdJRuGK98rSUgurk= +google.golang.org/api v0.148.0 h1:HBq4TZlN4/1pNcu0geJZ/Q50vIwIXT532UIMYoo0vOs= +google.golang.org/api v0.148.0/go.mod h1:8/TBgwaKjfqTdacOJrOv2+2Q6fBDU1uHKK06oGSkxzU= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= @@ -382,20 +385,20 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA= -google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI= -google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a h1:a2MQQVoTo96JC9PMGtGBymLp7+/RzpFc2yX/9WfFg1c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/mmv1/third_party/terraform/provider/provider.go.erb b/mmv1/third_party/terraform/provider/provider.go.erb index 59ef633e6c3c..a05c3f5dc892 100644 --- a/mmv1/third_party/terraform/provider/provider.go.erb +++ b/mmv1/third_party/terraform/provider/provider.go.erb @@ -225,10 +225,14 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { "google_alloydb_supported_database_flags": alloydb.DataSourceAlloydbSupportedDatabaseFlags(), "google_artifact_registry_repository": artifactregistry.DataSourceArtifactRegistryRepository(), "google_app_engine_default_service_account": appengine.DataSourceGoogleAppEngineDefaultServiceAccount(), + <% unless version == 'ga' -%> + "google_backup_dr_management_server": backupdr.DataSourceGoogleCloudBackupDRService(), + <% end -%> "google_beyondcorp_app_connection": beyondcorp.DataSourceGoogleBeyondcorpAppConnection(), "google_beyondcorp_app_connector": beyondcorp.DataSourceGoogleBeyondcorpAppConnector(), "google_beyondcorp_app_gateway": beyondcorp.DataSourceGoogleBeyondcorpAppGateway(), "google_billing_account": billing.DataSourceGoogleBillingAccount(), + "google_bigquery_dataset": bigquery.DataSourceGoogleBigqueryDataset(), "google_bigquery_default_service_account": bigquery.DataSourceGoogleBigqueryDefaultServiceAccount(), "google_certificate_manager_certificate_map": certificatemanager.DataSourceGoogleCertificateManagerCertificateMap(), "google_cloudbuild_trigger": cloudbuild.DataSourceGoogleCloudBuildTrigger(), diff --git a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_secondary_cluster_test.go b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_secondary_cluster_test.go index 041607c29316..55e01d0a2c54 100644 --- a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_secondary_cluster_test.go +++ b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_secondary_cluster_test.go @@ -702,3 +702,1042 @@ data "google_compute_global_address" "private_ip_alloc" { } `, context) } + +// This test passes if secondary cluster can be promoted +func TestAccAlloydbCluster_secondaryClusterPromote(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "secondary_cluster_location": "us-east1", + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryClusterWithInstance(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromote(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbInstance_secondaryClusterWithInstance(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "%{secondary_cluster_location}" + network = data.google_compute_network.default.id + cluster_type = "SECONDARY" + + continuous_backup_config { + enabled = false + } + + secondary_config { + primary_cluster_name = google_alloydb_cluster.primary.name + } + + deletion_policy = "FORCE" + + depends_on = [google_alloydb_instance.primary] +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + lifecycle { + ignore_changes = [instance_type] + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +func testAccAlloydbCluster_secondaryClusterPromote(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "%{secondary_cluster_location}" + network = data.google_compute_network.default.id + cluster_type = "PRIMARY" + + continuous_backup_config { + enabled = false + } +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + lifecycle { + ignore_changes = [instance_type] + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +// This test passes if secondary cluster can be promoted and updated simultaneously +func TestAccAlloydbCluster_secondaryClusterPromoteAndSimultaneousUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "secondary_cluster_location": "us-east1", + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryClusterWithInstance(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromoteAndSimultaneousUpdate(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbCluster_secondaryClusterPromoteAndSimultaneousUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "%{secondary_cluster_location}" + network = data.google_compute_network.default.id + cluster_type = "PRIMARY" + + continuous_backup_config { + enabled = true + } + + labels = { + foo = "bar" + } +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + lifecycle { + ignore_changes = [instance_type] + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +// This test passes if secondary cluster can be promoted and the original primary can be deleted after promotion +func TestAccAlloydbCluster_secondaryClusterPromoteAndDeleteOriginalPrimary(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "secondary_cluster_location": "us-east1", + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryClusterWithInstance(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromote(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromoteAndDeleteOriginalPrimary(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbCluster_secondaryClusterPromoteAndDeleteOriginalPrimary(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "%{secondary_cluster_location}" + network = data.google_compute_network.default.id + cluster_type = "PRIMARY" + + continuous_backup_config { + enabled = false + } +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + lifecycle { + ignore_changes = [instance_type] + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +// This test passes if the promoted secondary cluster can be updated +func TestAccAlloydbCluster_secondaryClusterPromoteAndUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "secondary_cluster_location": "us-east1", + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryClusterWithInstance(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromote(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromoteAndUpdate(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbCluster_secondaryClusterPromoteAndUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "%{secondary_cluster_location}" + network = data.google_compute_network.default.id + cluster_type = "PRIMARY" + + continuous_backup_config { + enabled = false + } + + labels = { + foo = "bar" + } + +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + lifecycle { + ignore_changes = [instance_type] + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +// This test passes if secondary cluster can be promoted with networkConfig and a specified allocated IP range +func TestAccAlloydbCluster_secondaryClusterPromoteWithNetworkConfigAndAllocatedIPRange(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + "address_name": acctest.BootstrapSharedTestGlobalAddress(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryClusterWithInstanceAndNetworkConfigAndAllocatedIPRange(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromoteWithNetworkConfigAndAllocatedIPRange(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbInstance_secondaryClusterWithInstanceAndNetworkConfigAndAllocatedIPRange(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network_config { + network = "projects/${data.google_project.project.number}/global/networks/${data.google_compute_network.default.name}" + allocated_ip_range = data.google_compute_global_address.private_ip_alloc.name + } +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "us-south1" + network_config { + network = "projects/${data.google_project.project.number}/global/networks/${data.google_compute_network.default.name}" + allocated_ip_range = data.google_compute_global_address.private_ip_alloc.name + } + cluster_type = "SECONDARY" + + continuous_backup_config { + enabled = false + } + + secondary_config { + primary_cluster_name = google_alloydb_cluster.primary.name + } + + deletion_policy = "FORCE" + + depends_on = [google_alloydb_instance.primary] +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + lifecycle { + ignore_changes = [instance_type] + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} + +data "google_compute_global_address" "private_ip_alloc" { + name = "%{address_name}" +} +`, context) +} + +func testAccAlloydbCluster_secondaryClusterPromoteWithNetworkConfigAndAllocatedIPRange(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network_config { + network = "projects/${data.google_project.project.number}/global/networks/${data.google_compute_network.default.name}" + allocated_ip_range = data.google_compute_global_address.private_ip_alloc.name + } +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "us-south1" + network_config { + network = "projects/${data.google_project.project.number}/global/networks/${data.google_compute_network.default.name}" + allocated_ip_range = data.google_compute_global_address.private_ip_alloc.name + } + cluster_type = "PRIMARY" + + continuous_backup_config { + enabled = false + } +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + lifecycle { + ignore_changes = [instance_type] + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} + +data "google_compute_global_address" "private_ip_alloc" { + name = "%{address_name}" +} +`, context) +} + +// This test passes if automated backup policy and inital user can be added and deleted from the promoted secondary cluster +func TestAccAlloydbCluster_secondaryClusterPromoteAndAddAndDeleteAutomatedBackupPolicyAndInitialUser(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "secondary_cluster_location": "us-south1", + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + "hour": 23, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryClusterWithInstance(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromote(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromoteAndAddAutomatedBackupPolicyAndInitialUser(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromote(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbCluster_secondaryClusterPromoteAndAddAutomatedBackupPolicyAndInitialUser(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "%{secondary_cluster_location}" + network = data.google_compute_network.default.id + cluster_type = "PRIMARY" + + continuous_backup_config { + enabled = false + } + + initial_user { + user = "tf-test-alloydb-secondary-cluster%{random_suffix}" + password = "tf-test-alloydb-secondary-cluster%{random_suffix}" + } + + automated_backup_policy { + location = "%{secondary_cluster_location}" + backup_window = "1800s" + enabled = true + + weekly_schedule { + days_of_week = ["MONDAY"] + + start_times { + hours = %{hour} + minutes = 0 + seconds = 0 + nanos = 0 + } + } + + quantity_based_retention { + count = 1 + } + + labels = { + test = "tf-test-alloydb-secondary-cluster%{random_suffix}" + } + } +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + lifecycle { + ignore_changes = [instance_type] + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +// This test passes if time based retention policy can be added and deleted from the promoted secondary cluster +func TestAccAlloydbCluster_secondaryClusterPromoteAndDeleteTimeBasedRetentionPolicy(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "secondary_cluster_location": "us-south1", + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryClusterWithInstance(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromote(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromoteWithTimeBasedRetentionPolicy(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromoteWithoutTimeBasedRetentionPolicy(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbCluster_secondaryClusterPromoteWithTimeBasedRetentionPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "%{secondary_cluster_location}" + network = data.google_compute_network.default.id + cluster_type = "PRIMARY" + + continuous_backup_config { + enabled = false + } + + initial_user { + user = "tf-test-alloydb-secondary-cluster%{random_suffix}" + password = "tf-test-alloydb-secondary-cluster%{random_suffix}" + } + + automated_backup_policy { + location = "%{secondary_cluster_location}" + backup_window = "1800s" + enabled = true + + weekly_schedule { + days_of_week = ["MONDAY"] + + start_times { + hours = 23 + minutes = 0 + seconds = 0 + nanos = 0 + } + } + time_based_retention { + retention_period = "4.5s" + } + } + lifecycle { + ignore_changes = [ + automated_backup_policy[0].time_based_retention + ] + } +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + lifecycle { + ignore_changes = [instance_type] + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +func testAccAlloydbCluster_secondaryClusterPromoteWithoutTimeBasedRetentionPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "%{secondary_cluster_location}" + network = data.google_compute_network.default.id + cluster_type = "PRIMARY" + + continuous_backup_config { + enabled = false + } + + initial_user { + user = "tf-test-alloydb-secondary-cluster%{random_suffix}" + password = "tf-test-alloydb-secondary-cluster%{random_suffix}" + } + + automated_backup_policy { + location = "%{secondary_cluster_location}" + backup_window = "1800s" + enabled = true + + weekly_schedule { + days_of_week = ["MONDAY"] + + start_times { + hours = 23 + minutes = 0 + seconds = 0 + nanos = 0 + } + } + } + lifecycle { + ignore_changes = [ + automated_backup_policy[0].time_based_retention + ] + } +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + lifecycle { + ignore_changes = [instance_type] + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +// This test passes if continuous backup config can be enabled in the promoted secondary cluster +func TestAccAlloydbCluster_secondaryClusterPromoteAndAddContinuousBackupConfig(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "secondary_cluster_location": "us-south1", + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryClusterWithInstance(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromote(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + { + Config: testAccAlloydbCluster_secondaryClusterPromoteAndAddContinuousBackupConfig(context), + }, + { + ResourceName: "google_alloydb_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "deletion_policy", "labels", "annotations", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbCluster_secondaryClusterPromoteAndAddContinuousBackupConfig(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "%{secondary_cluster_location}" + network = data.google_compute_network.default.id + cluster_type = "PRIMARY" + + continuous_backup_config { + enabled = true + recovery_window_days = 14 + } + +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + lifecycle { + ignore_changes = [instance_type] + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_secondary_instance_test.go b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_secondary_instance_test.go new file mode 100644 index 000000000000..6cb158429e11 --- /dev/null +++ b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_secondary_instance_test.go @@ -0,0 +1,627 @@ +package alloydb_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +// This test passes if secondary instance's machine config can be updated +func TestAccAlloydbInstance_secondaryInstanceUpdateMachineConfig(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryInstanceInitial(context), + }, + { + ResourceName: "google_alloydb_instance.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time", "labels", "terraform_labels"}, + }, + { + Config: testAccAlloydbInstance_secondaryInstanceUpdateMachineConfig(context), + }, + { + ResourceName: "google_alloydb_instance.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbInstance_secondaryInstanceInitial(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "us-east1" + network = data.google_compute_network.default.id + cluster_type = "SECONDARY" + + continuous_backup_config { + enabled = false + } + + secondary_config { + primary_cluster_name = google_alloydb_cluster.primary.name + } + + deletion_policy = "FORCE" + + depends_on = [google_alloydb_instance.primary] +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +func testAccAlloydbInstance_secondaryInstanceUpdateMachineConfig(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "us-east1" + network = data.google_compute_network.default.id + cluster_type = "SECONDARY" + + continuous_backup_config { + enabled = false + } + + secondary_config { + primary_cluster_name = google_alloydb_cluster.primary.name + } + + deletion_policy = "FORCE" + + depends_on = [google_alloydb_instance.primary] +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 4 + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +// This test passes if we are able to create a secondary instance with an associated read-pool instance +func TestAccAlloydbInstance_secondaryInstanceWithReadPoolInstance(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryInstanceWithReadPoolInstance(context), + }, + { + ResourceName: "google_alloydb_instance.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbInstance_secondaryInstanceWithReadPoolInstance(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "us-west1" + network = data.google_compute_network.default.id + cluster_type = "SECONDARY" + + continuous_backup_config { + enabled = false + } + + secondary_config { + primary_cluster_name = google_alloydb_cluster.primary.name + } + + deletion_policy = "FORCE" + + depends_on = [google_alloydb_instance.primary] +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_instance" "read_pool" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-read-instance%{random_suffix}-read" + instance_type = "READ_POOL" + read_pool_config { + node_count = 4 + } + depends_on = [google_alloydb_instance.secondary] +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +// This test passes if we are able to create a secondary instance by specifying network_config.network and network_config.allocated_ip_range +func TestAccAlloydbCluster_secondaryInstanceWithNetworkConfigAndAllocatedIPRange(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + "address_name": acctest.BootstrapSharedTestGlobalAddress(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbCluster_secondaryInstanceWithNetworkConfigAndAllocatedIPRange(context), + }, + { + ResourceName: "google_alloydb_instance.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbCluster_secondaryInstanceWithNetworkConfigAndAllocatedIPRange(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network_config { + network = data.google_compute_network.default.id + allocated_ip_range = data.google_compute_global_address.private_ip_alloc.name + } +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "us-west1" + network_config { + network = data.google_compute_network.default.id + allocated_ip_range = data.google_compute_global_address.private_ip_alloc.name + } + cluster_type = "SECONDARY" + + continuous_backup_config { + enabled = false + } + + secondary_config { + primary_cluster_name = google_alloydb_cluster.primary.name + } + + deletion_policy = "FORCE" + + depends_on = [google_alloydb_instance.primary] +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} + +data "google_compute_global_address" "private_ip_alloc" { + name = "%{address_name}" +} +`, context) +} + +// This test passes if secondary instance's database flag config can be updated +func TestAccAlloydbInstance_secondaryInstanceUpdateDatabaseFlag(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryInstanceInitial(context), + }, + { + ResourceName: "google_alloydb_instance.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time", "labels", "terraform_labels"}, + }, + { + Config: testAccAlloydbInstance_secondaryInstanceUpdateDatabaseFlag(context), + }, + { + ResourceName: "google_alloydb_instance.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbInstance_secondaryInstanceUpdateDatabaseFlag(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "us-east1" + network = data.google_compute_network.default.id + cluster_type = "SECONDARY" + + continuous_backup_config { + enabled = false + } + + secondary_config { + primary_cluster_name = google_alloydb_cluster.primary.name + } + + deletion_policy = "FORCE" + + depends_on = [google_alloydb_instance.primary] +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + database_flags = { + "alloydb.enable_auto_explain" = "true" + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +// This test passes if secondary instance's query insight config can be updated +func TestAccAlloydbInstance_secondaryInstanceUpdateQueryInsightConfig(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryInstanceInitial(context), + }, + { + ResourceName: "google_alloydb_instance.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time", "labels", "terraform_labels"}, + }, + { + Config: testAccAlloydbInstance_secondaryInstanceUpdateQueryInsightConfig(context), + }, + { + ResourceName: "google_alloydb_instance.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbInstance_secondaryInstanceUpdateQueryInsightConfig(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "us-east1" + network = data.google_compute_network.default.id + cluster_type = "SECONDARY" + + continuous_backup_config { + enabled = false + } + + secondary_config { + primary_cluster_name = google_alloydb_cluster.primary.name + } + + deletion_policy = "FORCE" + + depends_on = [google_alloydb_instance.primary] +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + query_insights_config { + query_plans_per_minute = 10 + query_string_length = 2048 + record_application_tags = true + record_client_address = true + } +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} + +// This test passes if we are able to create a secondary instance with maximum fields +func TestAccAlloydbInstance_secondaryInstanceMaximumFields(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_secondaryInstanceMaximumFields(context), + }, + { + ResourceName: "google_alloydb_instance.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "instance_id", "reconciling", "update_time", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccAlloydbInstance_secondaryInstanceMaximumFields(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "primary" { + cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}" + location = "us-central1" + network = data.google_compute_network.default.id +} + +resource "google_alloydb_instance" "primary" { + cluster = google_alloydb_cluster.primary.name + instance_id = "tf-test-alloydb-primary-instance%{random_suffix}" + instance_type = "PRIMARY" + + machine_config { + cpu_count = 2 + } +} + +resource "google_alloydb_cluster" "secondary" { + cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}" + location = "us-west1" + network = data.google_compute_network.default.id + cluster_type = "SECONDARY" + + continuous_backup_config { + enabled = false + } + + secondary_config { + primary_cluster_name = google_alloydb_cluster.primary.name + } + + deletion_policy = "FORCE" + + depends_on = [google_alloydb_instance.primary] +} + +resource "google_alloydb_instance" "secondary" { + cluster = google_alloydb_cluster.secondary.name + instance_id = "tf-test-alloydb-secondary-instance%{random_suffix}" + instance_type = google_alloydb_cluster.secondary.cluster_type + + machine_config { + cpu_count = 2 + } + + labels = { + test_label = "test-alloydb-label" + } + + query_insights_config { + query_plans_per_minute = 10 + query_string_length = 2048 + record_application_tags = true + record_client_address = true + } + + availability_type = "REGIONAL" +} + +data "google_project" "project" {} + +data "google_compute_network" "default" { + name = "%{network_name}" +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/backupdr/data_source_backup_dr_management_server.go.erb b/mmv1/third_party/terraform/services/backupdr/data_source_backup_dr_management_server.go.erb new file mode 100644 index 000000000000..e3652da8b873 --- /dev/null +++ b/mmv1/third_party/terraform/services/backupdr/data_source_backup_dr_management_server.go.erb @@ -0,0 +1,105 @@ +<% autogen_exception -%> +package backupdr +<% unless version == 'ga' -%> + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "strings" +) + +func DataSourceGoogleCloudBackupDRService() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceBackupDRManagementServer().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "location") + + return &schema.Resource{ + Read: dataSourceGoogleCloudBackupDRServiceRead, + Schema: dsSchema, + } +} +func flattenBackupDRManagementServerName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBackupDRManagementServerResourceResp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) map[string]interface{} { + if v == nil { + fmt.Printf("Interface is nil: %s", v) + } + fmt.Printf("Interface is : %s", v) + l := v.([]interface{}) + for _, raw := range l { + // Management server is a singleton resource. It is only present in one location per project. Hence returning only resource present. + return flattenBackupDRManagementServerResource(raw, d, config) + } + return nil +} +func flattenBackupDRManagementServerResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) map[string]interface{} { + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = flattenBackupDRManagementServerType(original["type"], d, config) + transformed["networks"] = flattenBackupDRManagementServerNetworks(original["networks"], d, config) + transformed["oauth2ClientId"] = flattenBackupDRManagementServerOauth2ClientId(original["oauth2ClientId"], d, config) + transformed["managementUri"] = flattenBackupDRManagementServerManagementUri(original["managementUri"], d, config) + transformed["name"] = flattenBackupDRManagementServerName(original["name"], d, config) + return transformed +} + +func dataSourceGoogleCloudBackupDRServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + billingProject := project + url, err := tpgresource.ReplaceVars(d, config, "{{BackupDRBasePath}}projects/{{project}}/locations/{{location}}/managementServers") + if err != nil { + return err + } + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error reading ManagementServer: %s", err) + } + resourceResponse := flattenBackupDRManagementServerResourceResp(res["managementServers"], d, config) + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ManagementServer: %s", err) + } + + if err := d.Set("type", resourceResponse["type"]); err != nil { + return fmt.Errorf("Error reading ManagementServer: %s", err) + } + if err := d.Set("networks", resourceResponse["networks"]); err != nil { + return fmt.Errorf("Error reading ManagementServer: %s", err) + } + if err := d.Set("oauth2_client_id", resourceResponse["oauth2ClientId"]); err != nil { + return fmt.Errorf("Error reading ManagementServer: %s", err) + } + if err := d.Set("management_uri", resourceResponse["managementUri"]); err != nil { + return fmt.Errorf("Error reading ManagementServer: %s", err) + } + + id := fmt.Sprintf("%s", resourceResponse["name"]) + d.SetId(id) + name := id[strings.LastIndex(id, "/")+1:] + d.Set("name", name) + return nil +} +<% end -%> diff --git a/mmv1/third_party/terraform/services/backupdr/data_source_backup_dr_management_server_test.go.erb b/mmv1/third_party/terraform/services/backupdr/data_source_backup_dr_management_server_test.go.erb new file mode 100644 index 000000000000..23fcd639a022 --- /dev/null +++ b/mmv1/third_party/terraform/services/backupdr/data_source_backup_dr_management_server_test.go.erb @@ -0,0 +1,56 @@ +<% autogen_exception -%> +package backupdr_test +<% unless version == 'ga' -%> + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceGoogleBackupDRManagementServer_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "backupdr-managementserver-basic"), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleBackupDRManagementServer_basic(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_backup_dr_management_server.foo", "google_backup_dr_management_server.foo"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleBackupDRManagementServer_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_network" "default" { + name = "%{network_name}" +} + +resource "google_backup_dr_management_server" "foo" { + location = "us-central1" + name = "tf-test-management-server%{random_suffix}" + type = "BACKUP_RESTORE" + networks { + network = data.google_compute_network.default.id + peering_mode = "PRIVATE_SERVICE_ACCESS" + } +} + +data "google_backup_dr_management_server" "foo" { + location = "us-central1" + depends_on = [ google_backup_dr_management_server.foo ] +} +`, context) +} +<% end -%> \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_dataset.go b/mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_dataset.go new file mode 100644 index 000000000000..c2089834a1b8 --- /dev/null +++ b/mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_dataset.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigquery + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleBigqueryDataset() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceBigQueryDataset().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "dataset_id") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleBigqueryDatasetRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleBigqueryDatasetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + dataset_id := d.Get("dataset_id").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project: %s", err) + } + + id := fmt.Sprintf("projects/%s/datasets/%s", project, dataset_id) + d.SetId(id) + err = resourceBigQueryDatasetRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + + return nil +} diff --git a/mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_dataset_test.go b/mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_dataset_test.go new file mode 100644 index 000000000000..a734217b9024 --- /dev/null +++ b/mmv1/third_party/terraform/services/bigquery/data_source_google_bigquery_dataset_test.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigquery_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceGoogleBigqueryDataset_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleBigqueryDataset_basic(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_bigquery_dataset.bar", "google_bigquery_dataset.foo"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleBigqueryDataset_basic(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_bigquery_dataset" "foo" { + dataset_id = "tf_test_ds_%{random_suffix}" + friendly_name = "testing" + description = "This is a test description" + location = "US" + default_table_expiration_ms = 3600000 + } + + data "google_bigquery_dataset" "bar" { + dataset_id = google_bigquery_dataset.foo.dataset_id + } +`, context) +} diff --git a/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table_test.go b/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table_test.go index e96befc44df4..e30a5e05d378 100644 --- a/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table_test.go +++ b/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table_test.go @@ -1057,6 +1057,8 @@ func TestAccBigQueryExternalDataTable_CSV_WithSchema_UpdateAllowQuotedNewlines(t } func TestAccBigQueryDataTable_bigtable(t *testing.T) { + // bigtable instance does not use the shared HTTP client, this test creates an instance + acctest.SkipIfVcr(t) t.Parallel() context := map[string]interface{}{ diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_gc_policy_test.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_gc_policy_test.go index 7974b0c82a8a..28a834acd86a 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_gc_policy_test.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_gc_policy_test.go @@ -166,6 +166,7 @@ func TestAccBigtableGCPolicy_multiplePolicies(t *testing.T) { } func TestAccBigtableGCPolicy_gcRulesPolicy(t *testing.T) { + // bigtable instance does not use the shared HTTP client, this test creates an instance acctest.SkipIfVcr(t) t.Parallel() diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb index d20b70d5b532..59c9f1798148 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb @@ -699,6 +699,7 @@ be from 0 to 999,999,999 inclusive.`, "maintenance_interval" : { Type: schema.TypeString, Optional: true, + ForceNew: true, Description: `Specifies the frequency of planned maintenance events. The accepted values are: PERIODIC`, }, <% end -%> diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index e41d6e123624..6d747946cef6 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -6026,9 +6026,12 @@ resource "google_compute_image" "foobar" { guest_os_features { type = "VIRTIO_SCSI_MULTIQUEUE" } - guest_os_features { + guest_os_features { type = "UEFI_COMPATIBLE" - } + } + guest_os_features { + type = "SEV_CAPABLE" + } } resource "google_compute_instance" "foobar" { diff --git a/mmv1/third_party/terraform/services/container/data_source_google_container_cluster_test.go b/mmv1/third_party/terraform/services/container/data_source_google_container_cluster_test.go index 81fa64c14c9f..5389d6f15bb0 100644 --- a/mmv1/third_party/terraform/services/container/data_source_google_container_cluster_test.go +++ b/mmv1/third_party/terraform/services/container/data_source_google_container_cluster_test.go @@ -11,12 +11,15 @@ import ( func TestAccContainerClusterDatasource_zonal(t *testing.T) { t.Parallel() + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccContainerClusterDatasource_zonal(acctest.RandString(t, 10)), + Config: testAccContainerClusterDatasource_zonal(acctest.RandString(t, 10), networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( acctest.CheckDataSourceStateMatchesResourceStateWithIgnores( "data.google_container_cluster.kubes", @@ -38,12 +41,15 @@ func TestAccContainerClusterDatasource_zonal(t *testing.T) { func TestAccContainerClusterDatasource_regional(t *testing.T) { t.Parallel() + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), Steps: []resource.TestStep{ { - Config: testAccContainerClusterDatasource_regional(acctest.RandString(t, 10)), + Config: testAccContainerClusterDatasource_regional(acctest.RandString(t, 10), networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( acctest.CheckDataSourceStateMatchesResourceStateWithIgnores( "data.google_container_cluster.kubes", @@ -62,34 +68,40 @@ func TestAccContainerClusterDatasource_regional(t *testing.T) { }) } -func testAccContainerClusterDatasource_zonal(suffix string) string { +func testAccContainerClusterDatasource_zonal(suffix, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "kubes" { name = "tf-test-cluster-%s" location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" + } data "google_container_cluster" "kubes" { name = google_container_cluster.kubes.name location = google_container_cluster.kubes.location } -`, suffix) +`, suffix, networkName, subnetworkName) } -func testAccContainerClusterDatasource_regional(suffix string) string { +func testAccContainerClusterDatasource_regional(suffix, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "kubes" { name = "tf-test-cluster-%s" location = "us-central1" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" + } data "google_container_cluster" "kubes" { name = google_container_cluster.kubes.name location = google_container_cluster.kubes.location } -`, suffix) +`, suffix, networkName, subnetworkName) } diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb index bf3663095dc6..aac2bfc21ce3 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb @@ -17,6 +17,8 @@ func TestAccContainerNodePool_basic(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -24,7 +26,7 @@ func TestAccContainerNodePool_basic(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_basic(cluster, np), + Config: testAccContainerNodePool_basic(cluster, np, networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np", @@ -40,6 +42,8 @@ func TestAccContainerNodePool_basicWithClusterId(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -47,7 +51,7 @@ func TestAccContainerNodePool_basicWithClusterId(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_basicWithClusterId(cluster, np), + Config: testAccContainerNodePool_basicWithClusterId(cluster, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -113,6 +117,8 @@ func TestAccContainerNodePool_namePrefix(t *testing.T) { t.Parallel() cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -120,7 +126,7 @@ func TestAccContainerNodePool_namePrefix(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_namePrefix(cluster, "tf-np-"), + Config: testAccContainerNodePool_namePrefix(cluster, "tf-np-", networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np", @@ -138,6 +144,8 @@ func TestAccContainerNodePool_noName(t *testing.T) { t.Parallel() cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -145,7 +153,7 @@ func TestAccContainerNodePool_noName(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_noName(cluster), + Config: testAccContainerNodePool_noName(cluster, networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np", @@ -161,6 +169,8 @@ func TestAccContainerNodePool_withLoggingVariantUpdates(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -168,7 +178,7 @@ func TestAccContainerNodePool_withLoggingVariantUpdates(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "DEFAULT"), + Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "DEFAULT", networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.with_logging_variant", @@ -176,7 +186,7 @@ func TestAccContainerNodePool_withLoggingVariantUpdates(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "MAX_THROUGHPUT"), + Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "MAX_THROUGHPUT", networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.with_logging_variant", @@ -184,7 +194,7 @@ func TestAccContainerNodePool_withLoggingVariantUpdates(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "DEFAULT"), + Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "DEFAULT", networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.with_logging_variant", @@ -200,6 +210,8 @@ func TestAccContainerNodePool_withNodeConfig(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -207,7 +219,7 @@ func TestAccContainerNodePool_withNodeConfig(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_withNodeConfig(cluster, nodePool), + Config: testAccContainerNodePool_withNodeConfig(cluster, nodePool, networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np_with_node_config", @@ -218,7 +230,7 @@ func TestAccContainerNodePool_withNodeConfig(t *testing.T) { ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint"}, }, resource.TestStep{ - Config: testAccContainerNodePool_withNodeConfigUpdate(cluster, nodePool), + Config: testAccContainerNodePool_withNodeConfigUpdate(cluster, nodePool, networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np_with_node_config", @@ -237,6 +249,8 @@ func TestAccContainerNodePool_withTaintsUpdate(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -244,7 +258,7 @@ func TestAccContainerNodePool_withTaintsUpdate(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_basic(cluster, nodePool), + Config: testAccContainerNodePool_basic(cluster, nodePool, networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np", @@ -252,7 +266,7 @@ func TestAccContainerNodePool_withTaintsUpdate(t *testing.T) { ImportStateVerify: true, }, resource.TestStep{ - Config: testAccContainerNodePool_withTaintsUpdate(cluster, nodePool), + Config: testAccContainerNodePool_withTaintsUpdate(cluster, nodePool, networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np", @@ -271,6 +285,8 @@ func TestAccContainerNodePool_withReservationAffinity(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -278,7 +294,7 @@ func TestAccContainerNodePool_withReservationAffinity(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withReservationAffinity(cluster, np), + Config: testAccContainerNodePool_withReservationAffinity(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.with_reservation_affinity", "node_config.0.reservation_affinity.#", "1"), @@ -301,6 +317,8 @@ func TestAccContainerNodePool_withReservationAffinitySpecific(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) reservation := fmt.Sprintf("tf-test-reservation-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -308,7 +326,7 @@ func TestAccContainerNodePool_withReservationAffinitySpecific(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withReservationAffinitySpecific(cluster, reservation, np), + Config: testAccContainerNodePool_withReservationAffinitySpecific(cluster, reservation, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.with_reservation_affinity", "node_config.0.reservation_affinity.#", "1"), @@ -337,6 +355,8 @@ func TestAccContainerNodePool_withWorkloadIdentityConfig(t *testing.T) { pid := envvar.GetTestProjectFromEnv() cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -344,7 +364,7 @@ func TestAccContainerNodePool_withWorkloadIdentityConfig(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withWorkloadMetadataConfig(cluster, np), + Config: testAccContainerNodePool_withWorkloadMetadataConfig(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.with_workload_metadata_config", "node_config.0.workload_metadata_config.0.mode", "GCE_METADATA"), @@ -356,7 +376,7 @@ func TestAccContainerNodePool_withWorkloadIdentityConfig(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_withWorkloadMetadataConfig_gkeMetadata(pid, cluster, np), + Config: testAccContainerNodePool_withWorkloadMetadataConfig_gkeMetadata(pid, cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.with_workload_metadata_config", "node_config.0.workload_metadata_config.0.mode", "GKE_METADATA"), @@ -377,6 +397,8 @@ func TestAccContainerNodePool_withSandboxConfig(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -384,7 +406,7 @@ func TestAccContainerNodePool_withSandboxConfig(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withSandboxConfig(cluster, np), + Config: testAccContainerNodePool_withSandboxConfig(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.with_sandbox_config", "node_config.0.sandbox_config.0.sandbox_type", "gvisor"), @@ -406,6 +428,8 @@ func TestAccContainerNodePool_withKubeletConfig(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -413,7 +437,7 @@ func TestAccContainerNodePool_withKubeletConfig(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "static", "100us", true, 2048), + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "static", "100us", networkName, subnetworkName, true, 2048), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", "node_config.0.kubelet_config.0.cpu_cfs_quota", "true"), @@ -427,7 +451,7 @@ func TestAccContainerNodePool_withKubeletConfig(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "", "", false, 1024), + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "", "", networkName, subnetworkName, false, 1024), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", "node_config.0.kubelet_config.0.cpu_cfs_quota", "false"), @@ -449,6 +473,8 @@ func TestAccContainerNodePool_withInvalidKubeletCpuManagerPolicy(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -456,7 +482,7 @@ func TestAccContainerNodePool_withInvalidKubeletCpuManagerPolicy(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "dontexist", "100us", true, 1024), + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "dontexist", "100us", networkName, subnetworkName, true, 1024), ExpectError: regexp.MustCompile(`.*to be one of \[static none \].*`), }, }, @@ -468,6 +494,8 @@ func TestAccContainerNodePool_withLinuxNodeConfig(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -476,7 +504,7 @@ func TestAccContainerNodePool_withLinuxNodeConfig(t *testing.T) { Steps: []resource.TestStep{ // Create a node pool with empty `linux_node_config.sysctls`. { - Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, ""), + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, "", networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.with_linux_node_config", @@ -484,7 +512,7 @@ func TestAccContainerNodePool_withLinuxNodeConfig(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, "1000 20000 100000"), + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, "1000 20000 100000", networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.with_linux_node_config", @@ -493,7 +521,7 @@ func TestAccContainerNodePool_withLinuxNodeConfig(t *testing.T) { }, // Perform an update. { - Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, "1000 20000 200000"), + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, "1000 20000 200000", networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.with_linux_node_config", @@ -509,6 +537,8 @@ func TestAccContainerNodePool_withCgroupMode(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -516,7 +546,7 @@ func TestAccContainerNodePool_withCgroupMode(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withCgroupMode(cluster, np, "CGROUP_MODE_V2"), + Config: testAccContainerNodePool_withCgroupMode(cluster, np, "CGROUP_MODE_V2", networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -525,7 +555,7 @@ func TestAccContainerNodePool_withCgroupMode(t *testing.T) { }, // Perform an update. { - Config: testAccContainerNodePool_withCgroupMode(cluster, np, "CGROUP_MODE_UNSPECIFIED"), + Config: testAccContainerNodePool_withCgroupMode(cluster, np, "CGROUP_MODE_UNSPECIFIED", networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -701,6 +731,8 @@ func TestAccContainerNodePool_withBootDiskKmsKey(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { t.Fatal("Stopping the test because a role was added to the policy.") @@ -712,7 +744,7 @@ func TestAccContainerNodePool_withBootDiskKmsKey(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withBootDiskKmsKey(cluster, np), + Config: testAccContainerNodePool_withBootDiskKmsKey(cluster, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.with_boot_disk_kms_key", @@ -730,6 +762,8 @@ func TestAccContainerNodePool_withUpgradeSettings(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -737,7 +771,7 @@ func TestAccContainerNodePool_withUpgradeSettings(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, 2, 3, "SURGE", "", 0, 0.0, ""), + Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, networkName, subnetworkName, 2, 3, "SURGE", "", 0, 0.0, ""), }, { ResourceName: "google_container_node_pool.with_upgrade_settings", @@ -745,7 +779,7 @@ func TestAccContainerNodePool_withUpgradeSettings(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, 2, 1, "SURGE", "", 0, 0.0, ""), + Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, networkName, subnetworkName, 2, 1, "SURGE", "", 0, 0.0, ""), }, { ResourceName: "google_container_node_pool.with_upgrade_settings", @@ -753,7 +787,7 @@ func TestAccContainerNodePool_withUpgradeSettings(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, 1, 1, "SURGE", "", 0, 0.0, ""), + Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, networkName, subnetworkName, 1, 1, "SURGE", "", 0, 0.0, ""), }, { ResourceName: "google_container_node_pool.with_upgrade_settings", @@ -761,7 +795,7 @@ func TestAccContainerNodePool_withUpgradeSettings(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, 0, 0, "BLUE_GREEN", "100s", 1, 0.0, "0s"), + Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, networkName, subnetworkName, 0, 0, "BLUE_GREEN", "100s", 1, 0.0, "0s"), }, { ResourceName: "google_container_node_pool.with_upgrade_settings", @@ -769,7 +803,7 @@ func TestAccContainerNodePool_withUpgradeSettings(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, 0, 0, "BLUE_GREEN", "100s", 0, 0.5, "1s"), + Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, networkName, subnetworkName, 0, 0, "BLUE_GREEN", "100s", 0, 0.5, "1s"), }, { ResourceName: "google_container_node_pool.with_upgrade_settings", @@ -785,6 +819,8 @@ func TestAccContainerNodePool_withGPU(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -792,7 +828,7 @@ func TestAccContainerNodePool_withGPU(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_withGPU(cluster, np), + Config: testAccContainerNodePool_withGPU(cluster, np, networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np_with_gpu", @@ -808,6 +844,9 @@ func TestAccContainerNodePool_withManagement(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + management := ` management { auto_repair = "false" @@ -820,7 +859,7 @@ func TestAccContainerNodePool_withManagement(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_withManagement(cluster, nodePool, ""), + Config: testAccContainerNodePool_withManagement(cluster, nodePool, "", networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( "google_container_node_pool.np_with_management", "management.#", "1"), @@ -836,7 +875,7 @@ func TestAccContainerNodePool_withManagement(t *testing.T) { ImportStateVerify: true, }, resource.TestStep{ - Config: testAccContainerNodePool_withManagement(cluster, nodePool, management), + Config: testAccContainerNodePool_withManagement(cluster, nodePool, management, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( "google_container_node_pool.np_with_management", "management.#", "1"), @@ -860,6 +899,8 @@ func TestAccContainerNodePool_withNodeConfigScopeAlias(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -867,7 +908,7 @@ func TestAccContainerNodePool_withNodeConfigScopeAlias(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_withNodeConfigScopeAlias(cluster, np), + Config: testAccContainerNodePool_withNodeConfigScopeAlias(cluster, np, networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np_with_node_config_scope_alias", @@ -884,6 +925,8 @@ func TestAccContainerNodePool_regionalAutoscaling(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -891,7 +934,7 @@ func TestAccContainerNodePool_regionalAutoscaling(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_regionalAutoscaling(cluster, np), + Config: testAccContainerNodePool_regionalAutoscaling(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "1"), resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "3"), @@ -903,7 +946,7 @@ func TestAccContainerNodePool_regionalAutoscaling(t *testing.T) { ImportStateVerify: true, }, resource.TestStep{ - Config: testAccContainerNodePool_updateAutoscaling(cluster, np), + Config: testAccContainerNodePool_updateAutoscaling(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "0"), resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "5"), @@ -915,7 +958,7 @@ func TestAccContainerNodePool_regionalAutoscaling(t *testing.T) { ImportStateVerify: true, }, resource.TestStep{ - Config: testAccContainerNodePool_basic(cluster, np), + Config: testAccContainerNodePool_basic(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count"), resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count"), @@ -939,6 +982,8 @@ func TestAccContainerNodePool_totalSize(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -946,7 +991,7 @@ func TestAccContainerNodePool_totalSize(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_totalSize(cluster, np), + Config: testAccContainerNodePool_totalSize(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_min_node_count", "4"), resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_max_node_count", "12"), @@ -959,7 +1004,7 @@ func TestAccContainerNodePool_totalSize(t *testing.T) { ImportStateVerify: true, }, resource.TestStep{ - Config: testAccContainerNodePool_updateTotalSize(cluster, np), + Config: testAccContainerNodePool_updateTotalSize(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_min_node_count", "2"), resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_max_node_count", "22"), @@ -972,7 +1017,7 @@ func TestAccContainerNodePool_totalSize(t *testing.T) { ImportStateVerify: true, }, resource.TestStep{ - Config: testAccContainerNodePool_basicTotalSize(cluster, np), + Config: testAccContainerNodePool_basicTotalSize(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count"), resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count"), @@ -995,6 +1040,8 @@ func TestAccContainerNodePool_autoscaling(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1002,7 +1049,7 @@ func TestAccContainerNodePool_autoscaling(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_autoscaling(cluster, np), + Config: testAccContainerNodePool_autoscaling(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "1"), resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "3"), @@ -1014,7 +1061,7 @@ func TestAccContainerNodePool_autoscaling(t *testing.T) { ImportStateVerify: true, }, resource.TestStep{ - Config: testAccContainerNodePool_updateAutoscaling(cluster, np), + Config: testAccContainerNodePool_updateAutoscaling(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "0"), resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "5"), @@ -1026,7 +1073,7 @@ func TestAccContainerNodePool_autoscaling(t *testing.T) { ImportStateVerify: true, }, resource.TestStep{ - Config: testAccContainerNodePool_basic(cluster, np), + Config: testAccContainerNodePool_basic(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count"), resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count"), @@ -1049,6 +1096,8 @@ func TestAccContainerNodePool_resize(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1056,7 +1105,7 @@ func TestAccContainerNodePool_resize(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_additionalZones(cluster, np), + Config: testAccContainerNodePool_additionalZones(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "node_count", "2"), ), @@ -1067,7 +1116,7 @@ func TestAccContainerNodePool_resize(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_resize(cluster, np), + Config: testAccContainerNodePool_resize(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "node_count", "3"), ), @@ -1086,6 +1135,8 @@ func TestAccContainerNodePool_version(t *testing.T) { t.Parallel() cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1093,7 +1144,7 @@ func TestAccContainerNodePool_version(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_version(cluster, np), + Config: testAccContainerNodePool_version(cluster, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -1101,7 +1152,7 @@ func TestAccContainerNodePool_version(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_updateVersion(cluster, np), + Config: testAccContainerNodePool_updateVersion(cluster, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -1109,7 +1160,7 @@ func TestAccContainerNodePool_version(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_version(cluster, np), + Config: testAccContainerNodePool_version(cluster, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -1125,6 +1176,8 @@ func TestAccContainerNodePool_regionalClusters(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1132,7 +1185,7 @@ func TestAccContainerNodePool_regionalClusters(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_regionalClusters(cluster, np), + Config: testAccContainerNodePool_regionalClusters(cluster, np, networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np", @@ -1148,6 +1201,8 @@ func TestAccContainerNodePool_012_ConfigModeAttr(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1155,7 +1210,7 @@ func TestAccContainerNodePool_012_ConfigModeAttr(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_012_ConfigModeAttr1(cluster, np), + Config: testAccContainerNodePool_012_ConfigModeAttr1(cluster, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -1163,7 +1218,7 @@ func TestAccContainerNodePool_012_ConfigModeAttr(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_012_ConfigModeAttr2(cluster, np), + Config: testAccContainerNodePool_012_ConfigModeAttr2(cluster, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -1179,6 +1234,8 @@ func TestAccContainerNodePool_EmptyGuestAccelerator(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1187,7 +1244,7 @@ func TestAccContainerNodePool_EmptyGuestAccelerator(t *testing.T) { Steps: []resource.TestStep{ { // Test alternative way to specify an empty node pool - Config: testAccContainerNodePool_EmptyGuestAccelerator(cluster, np), + Config: testAccContainerNodePool_EmptyGuestAccelerator(cluster, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -1196,7 +1253,7 @@ func TestAccContainerNodePool_EmptyGuestAccelerator(t *testing.T) { }, { // Test alternative way to specify an empty node pool - Config: testAccContainerNodePool_PartialEmptyGuestAccelerator(cluster, np, 1), + Config: testAccContainerNodePool_PartialEmptyGuestAccelerator(cluster, np, networkName, subnetworkName, 1), }, { ResourceName: "google_container_node_pool.np", @@ -1205,13 +1262,13 @@ func TestAccContainerNodePool_EmptyGuestAccelerator(t *testing.T) { }, { // Assert that changes in count from 1 result in a diff - Config: testAccContainerNodePool_PartialEmptyGuestAccelerator(cluster, np, 2), + Config: testAccContainerNodePool_PartialEmptyGuestAccelerator(cluster, np, networkName, subnetworkName, 2), ExpectNonEmptyPlan: true, PlanOnly: true, }, { // Assert that adding another accelerator block will also result in a diff - Config: testAccContainerNodePool_PartialEmptyGuestAccelerator2(cluster, np), + Config: testAccContainerNodePool_PartialEmptyGuestAccelerator2(cluster, np, networkName, subnetworkName), ExpectNonEmptyPlan: true, PlanOnly: true, }, @@ -1224,6 +1281,8 @@ func TestAccContainerNodePool_shieldedInstanceConfig(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1231,7 +1290,7 @@ func TestAccContainerNodePool_shieldedInstanceConfig(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_shieldedInstanceConfig(cluster, np), + Config: testAccContainerNodePool_shieldedInstanceConfig(cluster, np, networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np", @@ -1249,6 +1308,8 @@ func TestAccContainerNodePool_concurrent(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np1 := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) np2 := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1256,7 +1317,7 @@ func TestAccContainerNodePool_concurrent(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_concurrentCreate(cluster, np1, np2), + Config: testAccContainerNodePool_concurrentCreate(cluster, np1, np2, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np1", @@ -1269,7 +1330,7 @@ func TestAccContainerNodePool_concurrent(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_concurrentUpdate(cluster, np1, np2), + Config: testAccContainerNodePool_concurrentUpdate(cluster, np1, np2, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np1", @@ -1286,26 +1347,28 @@ func TestAccContainerNodePool_concurrent(t *testing.T) { } func TestAccContainerNodePool_withSoleTenantConfig(t *testing.T) { - t.Parallel() + t.Parallel() - cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) - np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccContainerNodePool_withSoleTenantConfig(cluster, np), - }, - { - ResourceName: "google_container_node_pool.with_sole_tenant_config", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withSoleTenantConfig(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.with_sole_tenant_config", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) } @@ -1316,6 +1379,8 @@ func TestAccContainerNodePool_ephemeralStorageConfig(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1323,7 +1388,7 @@ func TestAccContainerNodePool_ephemeralStorageConfig(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_ephemeralStorageConfig(cluster, np), + Config: testAccContainerNodePool_ephemeralStorageConfig(cluster, np, networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np", @@ -1334,13 +1399,15 @@ func TestAccContainerNodePool_ephemeralStorageConfig(t *testing.T) { }) } -func testAccContainerNodePool_ephemeralStorageConfig(cluster, np string) string { +func testAccContainerNodePool_ephemeralStorageConfig(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -1356,7 +1423,7 @@ resource "google_container_node_pool" "np" { } } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } <% end %> @@ -1365,6 +1432,8 @@ func TestAccContainerNodePool_ephemeralStorageLocalSsdConfig(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1372,7 +1441,7 @@ func TestAccContainerNodePool_ephemeralStorageLocalSsdConfig(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_ephemeralStorageLocalSsdConfig(cluster, np), + Config: testAccContainerNodePool_ephemeralStorageLocalSsdConfig(cluster, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -1383,7 +1452,7 @@ func TestAccContainerNodePool_ephemeralStorageLocalSsdConfig(t *testing.T) { }) } -func testAccContainerNodePool_ephemeralStorageLocalSsdConfig(cluster, np string) string { +func testAccContainerNodePool_ephemeralStorageLocalSsdConfig(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -1397,6 +1466,8 @@ resource "google_container_cluster" "cluster" { min_master_version = data.google_container_engine_versions.central1a.latest_master_version initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -1412,7 +1483,7 @@ resource "google_container_node_pool" "np" { } } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } func TestAccContainerNodePool_localNvmeSsdBlockConfig(t *testing.T) { @@ -1420,6 +1491,8 @@ func TestAccContainerNodePool_localNvmeSsdBlockConfig(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1427,7 +1500,7 @@ func TestAccContainerNodePool_localNvmeSsdBlockConfig(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_localNvmeSsdBlockConfig(cluster, np), + Config: testAccContainerNodePool_localNvmeSsdBlockConfig(cluster, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -1438,7 +1511,7 @@ func TestAccContainerNodePool_localNvmeSsdBlockConfig(t *testing.T) { }) } -func testAccContainerNodePool_localNvmeSsdBlockConfig(cluster, np string) string { +func testAccContainerNodePool_localNvmeSsdBlockConfig(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -1452,6 +1525,8 @@ resource "google_container_cluster" "cluster" { min_master_version = data.google_container_engine_versions.central1a.latest_master_version initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -1467,7 +1542,7 @@ resource "google_container_node_pool" "np" { } } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } func TestAccContainerNodePool_gcfsConfig(t *testing.T) { @@ -1475,6 +1550,8 @@ func TestAccContainerNodePool_gcfsConfig(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1482,7 +1559,7 @@ func TestAccContainerNodePool_gcfsConfig(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccContainerNodePool_gcfsConfig(cluster, np), + Config: testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName), }, resource.TestStep{ ResourceName: "google_container_node_pool.np", @@ -1493,13 +1570,15 @@ func TestAccContainerNodePool_gcfsConfig(t *testing.T) { }) } -func testAccContainerNodePool_gcfsConfig(cluster, np string) string { +func testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -1516,7 +1595,7 @@ resource "google_container_node_pool" "np" { } } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } func TestAccContainerNodePool_gvnic(t *testing.T) { @@ -1524,6 +1603,8 @@ func TestAccContainerNodePool_gvnic(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1531,7 +1612,7 @@ func TestAccContainerNodePool_gvnic(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_gvnic(cluster, np), + Config: testAccContainerNodePool_gvnic(cluster, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -1542,13 +1623,15 @@ func TestAccContainerNodePool_gvnic(t *testing.T) { }) } -func testAccContainerNodePool_gvnic(cluster, np string) string { +func testAccContainerNodePool_gvnic(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -1565,7 +1648,7 @@ resource "google_container_node_pool" "np" { } } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } func TestAccContainerNodePool_fastSocket(t *testing.T) { @@ -1573,6 +1656,8 @@ func TestAccContainerNodePool_fastSocket(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1580,7 +1665,7 @@ func TestAccContainerNodePool_fastSocket(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_fastSocket(cluster, np, true), + Config: testAccContainerNodePool_fastSocket(cluster, np, networkName, subnetworkName, true), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "node_config.0.fast_socket.0.enabled", "true"), @@ -1592,7 +1677,7 @@ func TestAccContainerNodePool_fastSocket(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_fastSocket(cluster, np, false), + Config: testAccContainerNodePool_fastSocket(cluster, np, networkName, subnetworkName, false), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "node_config.0.fast_socket.0.enabled", "false"), @@ -1607,7 +1692,7 @@ func TestAccContainerNodePool_fastSocket(t *testing.T) { }) } -func testAccContainerNodePool_fastSocket(cluster, np string, enabled bool) string { +func testAccContainerNodePool_fastSocket(cluster, np, networkName, subnetworkName string, enabled bool) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" @@ -1615,6 +1700,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = "1.25" deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -1638,7 +1725,7 @@ resource "google_container_node_pool" "np" { } } } -`, cluster, np, enabled) +`, cluster, networkName, subnetworkName, np, enabled) } func TestAccContainerNodePool_compactPlacement(t *testing.T) { @@ -1646,6 +1733,8 @@ func TestAccContainerNodePool_compactPlacement(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1653,7 +1742,7 @@ func TestAccContainerNodePool_compactPlacement(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_compactPlacement(cluster, np, "COMPACT"), + Config: testAccContainerNodePool_compactPlacement(cluster, np, "COMPACT", networkName, subnetworkName), }, { ResourceName: "google_container_cluster.cluster", @@ -1665,13 +1754,15 @@ func TestAccContainerNodePool_compactPlacement(t *testing.T) { }) } -func testAccContainerNodePool_compactPlacement(cluster, np, placementType string) string { +func testAccContainerNodePool_compactPlacement(cluster, np, placementType, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -1687,7 +1778,7 @@ resource "google_container_node_pool" "np" { type = "%s" } } -`, cluster, np, placementType) +`, cluster, networkName, subnetworkName, np, placementType) } func TestAccContainerNodePool_customPlacementPolicy(t *testing.T) { @@ -1696,6 +1787,8 @@ func TestAccContainerNodePool_customPlacementPolicy(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) policy := fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1703,7 +1796,7 @@ func TestAccContainerNodePool_customPlacementPolicy(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_customPlacementPolicy(cluster, np, policy), + Config: testAccContainerNodePool_customPlacementPolicy(cluster, np, policy, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "node_config.0.machine_type", "c2-standard-4"), resource.TestCheckResourceAttr("google_container_node_pool.np", "placement_policy.0.policy_name", policy), @@ -1719,13 +1812,15 @@ func TestAccContainerNodePool_customPlacementPolicy(t *testing.T) { }) } -func testAccContainerNodePool_customPlacementPolicy(cluster, np, policyName string) string { +func testAccContainerNodePool_customPlacementPolicy(cluster, np, policyName, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_compute_resource_policy" "policy" { @@ -1751,7 +1846,7 @@ resource "google_container_node_pool" "np" { policy_name = google_compute_resource_policy.policy.name } } -`, cluster, policyName, np) +`, cluster, networkName, subnetworkName, policyName, np) } func TestAccContainerNodePool_threadsPerCore(t *testing.T) { @@ -1759,6 +1854,8 @@ func TestAccContainerNodePool_threadsPerCore(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -1766,7 +1863,7 @@ func TestAccContainerNodePool_threadsPerCore(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_threadsPerCore(cluster, np, 1), + Config: testAccContainerNodePool_threadsPerCore(cluster, np, networkName, subnetworkName, 1), }, { ResourceName: "google_container_cluster.cluster", @@ -1778,13 +1875,15 @@ func TestAccContainerNodePool_threadsPerCore(t *testing.T) { }) } -func testAccContainerNodePool_threadsPerCore(cluster, np string, threadsPerCore int) string { +func testAccContainerNodePool_threadsPerCore(cluster, np, networkName, subnetworkName string, threadsPerCore int) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" node_config { machine_type = "c2-standard-4" @@ -1807,7 +1906,7 @@ resource "google_container_node_pool" "np" { } } } -`, cluster, threadsPerCore, np, threadsPerCore) +`, cluster, networkName, subnetworkName, threadsPerCore, np, threadsPerCore) } func testAccCheckContainerNodePoolDestroyProducer(t *testing.T) func(s *terraform.State) error { @@ -1846,7 +1945,7 @@ func testAccCheckContainerNodePoolDestroyProducer(t *testing.T) func(s *terrafor } } -func testAccContainerNodePool_basic(cluster, np string) string { +func testAccContainerNodePool_basic(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` provider "google" { alias = "user-project-override" @@ -1858,6 +1957,8 @@ resource "google_container_cluster" "cluster" { location = "us-central1-a" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -1867,16 +1968,18 @@ resource "google_container_node_pool" "np" { cluster = google_container_cluster.cluster.name initial_node_count = 2 } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_withLoggingVariant(cluster, np, loggingVariant string) string { +func testAccContainerNodePool_withLoggingVariant(cluster, np, loggingVariant, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_logging_variant" { name = "%s" location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "with_logging_variant" { @@ -1888,10 +1991,10 @@ resource "google_container_node_pool" "with_logging_variant" { logging_variant = "%s" } } -`, cluster, np, loggingVariant) +`, cluster, networkName, subnetworkName, np, loggingVariant) } -func testAccContainerNodePool_basicWithClusterId(cluster, np string) string { +func testAccContainerNodePool_basicWithClusterId(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` provider "google" { alias = "user-project-override" @@ -1903,6 +2006,8 @@ resource "google_container_cluster" "cluster" { location = "us-central1-a" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -1911,7 +2016,7 @@ resource "google_container_node_pool" "np" { cluster = google_container_cluster.cluster.id initial_node_count = 2 } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } func testAccContainerNodePool_nodeLocations(cluster, np, network string) string { @@ -2031,13 +2136,15 @@ resource "google_container_node_pool" "np" { `, network, cluster, np) } -func testAccContainerNodePool_regionalClusters(cluster, np string) string { +func testAccContainerNodePool_regionalClusters(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2046,16 +2153,18 @@ resource "google_container_node_pool" "np" { location = "us-central1" initial_node_count = 2 } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_namePrefix(cluster, np string) string { +func testAccContainerNodePool_namePrefix(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2064,16 +2173,18 @@ resource "google_container_node_pool" "np" { cluster = google_container_cluster.cluster.name initial_node_count = 2 } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_noName(cluster string) string { +func testAccContainerNodePool_noName(cluster, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2081,16 +2192,18 @@ resource "google_container_node_pool" "np" { cluster = google_container_cluster.cluster.name initial_node_count = 2 } -`, cluster) +`, cluster, networkName, subnetworkName) } -func testAccContainerNodePool_regionalAutoscaling(cluster, np string) string { +func testAccContainerNodePool_regionalAutoscaling(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2103,10 +2216,10 @@ resource "google_container_node_pool" "np" { max_node_count = 3 } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_totalSize(cluster, np string) string { +func testAccContainerNodePool_totalSize(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" @@ -2114,6 +2227,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 3 min_master_version = "1.27" deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2127,10 +2242,10 @@ resource "google_container_node_pool" "np" { location_policy = "BALANCED" } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_updateTotalSize(cluster, np string) string { +func testAccContainerNodePool_updateTotalSize(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" @@ -2138,6 +2253,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 3 min_master_version = "1.27" deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2151,11 +2268,11 @@ resource "google_container_node_pool" "np" { location_policy = "ANY" } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_basicTotalSize(cluster, np string) string { +func testAccContainerNodePool_basicTotalSize(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` provider "google" { alias = "user-project-override" @@ -2168,6 +2285,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 3 min_master_version = "1.27" deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2177,16 +2296,18 @@ resource "google_container_node_pool" "np" { cluster = google_container_cluster.cluster.name initial_node_count = 2 } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_autoscaling(cluster, np string) string { +func testAccContainerNodePool_autoscaling(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2199,16 +2320,18 @@ resource "google_container_node_pool" "np" { max_node_count = 3 } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_updateAutoscaling(cluster, np string) string { +func testAccContainerNodePool_updateAutoscaling(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2221,10 +2344,10 @@ resource "google_container_node_pool" "np" { max_node_count = 5 } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_additionalZones(cluster, nodePool string) string { +func testAccContainerNodePool_additionalZones(cluster, nodePool, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" @@ -2236,6 +2359,8 @@ resource "google_container_cluster" "cluster" { "us-central1-c", ] deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2244,10 +2369,10 @@ resource "google_container_node_pool" "np" { cluster = google_container_cluster.cluster.name node_count = 2 } -`, cluster, nodePool) +`, cluster, networkName, subnetworkName, nodePool) } -func testAccContainerNodePool_resize(cluster, nodePool string) string { +func testAccContainerNodePool_resize(cluster, nodePool, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" @@ -2259,6 +2384,8 @@ resource "google_container_cluster" "cluster" { "us-central1-c", ] deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2267,10 +2394,10 @@ resource "google_container_node_pool" "np" { cluster = google_container_cluster.cluster.name node_count = 3 } -`, cluster, nodePool) +`, cluster, networkName, subnetworkName, nodePool) } -func testAccContainerNodePool_withManagement(cluster, nodePool, management string) string { +func testAccContainerNodePool_withManagement(cluster, nodePool, management, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" @@ -2280,6 +2407,8 @@ resource "google_container_cluster" "cluster" { channel = "UNSPECIFIED" } deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np_with_management" { @@ -2296,16 +2425,18 @@ resource "google_container_node_pool" "np_with_management" { oauth_scopes = ["compute-rw", "storage-ro", "logging-write", "monitoring"] } } -`, cluster, nodePool, management) +`, cluster, networkName, subnetworkName, nodePool, management) } -func testAccContainerNodePool_withNodeConfig(cluster, nodePool string) string { +func testAccContainerNodePool_withNodeConfig(cluster, nodePool, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np_with_node_config" { @@ -2351,16 +2482,18 @@ resource "google_container_node_pool" "np_with_node_config" { } } } -`, cluster, nodePool) +`, cluster, networkName, subnetworkName, nodePool) } -func testAccContainerNodePool_withNodeConfigUpdate(cluster, nodePool string) string { +func testAccContainerNodePool_withNodeConfigUpdate(cluster, nodePool, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np_with_node_config" { @@ -2408,10 +2541,10 @@ resource "google_container_node_pool" "np_with_node_config" { } } } -`, cluster, nodePool) +`, cluster, networkName, subnetworkName, nodePool) } -func testAccContainerNodePool_withTaintsUpdate(cluster, np string) string { +func testAccContainerNodePool_withTaintsUpdate(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` provider "google" { alias = "user-project-override" @@ -2423,6 +2556,8 @@ resource "google_container_cluster" "cluster" { location = "us-central1-a" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2442,10 +2577,10 @@ resource "google_container_node_pool" "np" { } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_withReservationAffinity(cluster, np string) string { +func testAccContainerNodePool_withReservationAffinity(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -2457,6 +2592,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1a.latest_master_version deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "with_reservation_affinity" { @@ -2475,10 +2612,10 @@ resource "google_container_node_pool" "with_reservation_affinity" { } } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_withReservationAffinitySpecific(cluster, reservation, np string) string { +func testAccContainerNodePool_withReservationAffinitySpecific(cluster, reservation, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -2490,6 +2627,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1a.latest_master_version deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_compute_reservation" "gce_reservation" { @@ -2526,11 +2665,11 @@ resource "google_container_node_pool" "with_reservation_affinity" { } } } -`, cluster, reservation, np) +`, cluster, networkName, subnetworkName, reservation, np) } -func testAccContainerNodePool_withWorkloadMetadataConfig(cluster, np string) string { +func testAccContainerNodePool_withWorkloadMetadataConfig(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -2542,6 +2681,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1a.latest_master_version deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "with_workload_metadata_config" { @@ -2561,10 +2702,10 @@ resource "google_container_node_pool" "with_workload_metadata_config" { } } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_withWorkloadMetadataConfig_gkeMetadata(projectID, cluster, np string) string { +func testAccContainerNodePool_withWorkloadMetadataConfig_gkeMetadata(projectID, cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_project" "project" { project_id = "%s" @@ -2584,6 +2725,8 @@ resource "google_container_cluster" "cluster" { workload_pool = "${data.google_project.project.project_id}.svc.id.goog" } deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "with_workload_metadata_config" { @@ -2602,11 +2745,11 @@ resource "google_container_node_pool" "with_workload_metadata_config" { } } } -`, projectID, cluster, np) +`, projectID, cluster, networkName, subnetworkName, np) } <% unless version.nil? || version == 'ga' -%> -func testAccContainerNodePool_withSandboxConfig(cluster, np string) string { +func testAccContainerNodePool_withSandboxConfig(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -2618,6 +2761,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1a.latest_master_version deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "with_sandbox_config" { @@ -2643,11 +2788,11 @@ resource "google_container_node_pool" "with_sandbox_config" { ] } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } <% end -%> -func testAccContainerNodePool_withKubeletConfig(cluster, np, policy, period string, quota bool, podPidsLimit int) string { +func testAccContainerNodePool_withKubeletConfig(cluster, np, policy, period, networkName, subnetworkName string, quota bool, podPidsLimit int) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -2659,6 +2804,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1a.latest_master_version deletion_protection = false + network = "%s" + subnetwork = "%s" } # cpu_manager_policy & cpu_cfs_quota_period cannot be blank if cpu_cfs_quota is set to true @@ -2683,10 +2830,10 @@ resource "google_container_node_pool" "with_kubelet_config" { logging_variant = "DEFAULT" } } -`, cluster, np, policy, quota, period, podPidsLimit) +`, cluster, networkName, subnetworkName, np, policy, quota, period, podPidsLimit) } -func testAccContainerNodePool_withLinuxNodeConfig(cluster, np string, tcpMem string) string { +func testAccContainerNodePool_withLinuxNodeConfig(cluster, np, tcpMem, networkName, subnetworkName string) string { linuxNodeConfig := ` linux_node_config { sysctls = {} @@ -2721,6 +2868,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1a.latest_master_version deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "with_linux_node_config" { @@ -2737,10 +2886,10 @@ resource "google_container_node_pool" "with_linux_node_config" { ] } } -`, cluster, np, linuxNodeConfig) +`, cluster, networkName, subnetworkName, np, linuxNodeConfig) } -func testAccContainerNodePool_withCgroupMode(cluster, np string, mode string) string { +func testAccContainerNodePool_withCgroupMode(cluster, np, mode, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -2752,6 +2901,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1a.latest_master_version deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -2770,7 +2921,7 @@ resource "google_container_node_pool" "np" { ] } } -`, cluster, np, mode) +`, cluster, networkName, subnetworkName, np, mode) } func testAccContainerNodePool_withNetworkConfig(cluster, np, network string) string { @@ -3002,7 +3153,7 @@ resource "google_container_node_pool" "with_multi_nic" { <% end -%> <% unless version.nil? || version == 'ga' -%> -func testAccContainerNodePool_withBootDiskKmsKey(cluster, np string) string { +func testAccContainerNodePool_withBootDiskKmsKey(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -3025,6 +3176,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1a.latest_master_version deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "with_boot_disk_kms_key" { @@ -3042,7 +3195,7 @@ resource "google_container_node_pool" "with_boot_disk_kms_key" { ] } } -`, cluster, cluster, cluster, np) +`, cluster, cluster, cluster, networkName, subnetworkName, np) } <% end -%> @@ -3071,7 +3224,7 @@ upgrade_settings { `, maxSurge, maxUnavailable, strategy) } -func testAccContainerNodePool_withUpgradeSettings(clusterName string, nodePoolName string, maxSurge int, maxUnavailable int, strategy string, nodePoolSoakDuration string, batchNodeCount int, batchPercentage float64, batchSoakDuration string) string { +func testAccContainerNodePool_withUpgradeSettings(clusterName, nodePoolName, networkName, subnetworkName string, maxSurge int, maxUnavailable int, strategy string, nodePoolSoakDuration string, batchNodeCount int, batchPercentage float64, batchSoakDuration string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1" { location = "us-central1" @@ -3083,6 +3236,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = "${data.google_container_engine_versions.central1.latest_master_version}" deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "with_upgrade_settings" { @@ -3092,10 +3247,10 @@ resource "google_container_node_pool" "with_upgrade_settings" { initial_node_count = 1 %s } -`, clusterName, nodePoolName, makeUpgradeSettings(maxSurge, maxUnavailable, strategy, nodePoolSoakDuration, batchNodeCount, batchPercentage, batchSoakDuration)) +`, clusterName, networkName, subnetworkName, nodePoolName, makeUpgradeSettings(maxSurge, maxUnavailable, strategy, nodePoolSoakDuration, batchNodeCount, batchPercentage, batchSoakDuration)) } -func testAccContainerNodePool_withGPU(cluster, np string) string { +func testAccContainerNodePool_withGPU(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1c" { location = "us-central1-c" @@ -3107,6 +3262,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1c.latest_master_version deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np_with_gpu" { @@ -3147,16 +3304,18 @@ resource "google_container_node_pool" "np_with_gpu" { } } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_withNodeConfigScopeAlias(cluster, np string) string { +func testAccContainerNodePool_withNodeConfigScopeAlias(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np_with_node_config_scope_alias" { @@ -3170,10 +3329,10 @@ resource "google_container_node_pool" "np_with_node_config_scope_alias" { oauth_scopes = ["compute-rw", "storage-ro", "logging-write", "monitoring"] } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_version(cluster, np string) string { +func testAccContainerNodePool_version(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -3185,6 +3344,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1a.latest_master_version deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -3195,10 +3356,10 @@ resource "google_container_node_pool" "np" { version = data.google_container_engine_versions.central1a.valid_node_versions[1] } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_updateVersion(cluster, np string) string { +func testAccContainerNodePool_updateVersion(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -3210,6 +3371,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1a.latest_master_version deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -3220,16 +3383,18 @@ resource "google_container_node_pool" "np" { version = data.google_container_engine_versions.central1a.valid_node_versions[0] } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_012_ConfigModeAttr1(cluster, np string) string { +func testAccContainerNodePool_012_ConfigModeAttr1(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-f" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -3246,16 +3411,18 @@ resource "google_container_node_pool" "np" { machine_type = "n1-highmem-4" } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_012_ConfigModeAttr2(cluster, np string) string { +func testAccContainerNodePool_012_ConfigModeAttr2(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-f" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -3268,16 +3435,18 @@ resource "google_container_node_pool" "np" { guest_accelerator = [] } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_EmptyGuestAccelerator(cluster, np string) string { +func testAccContainerNodePool_EmptyGuestAccelerator(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-f" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -3294,16 +3463,18 @@ resource "google_container_node_pool" "np" { machine_type = "n1-highmem-4" } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_PartialEmptyGuestAccelerator(cluster, np string, count int) string { +func testAccContainerNodePool_PartialEmptyGuestAccelerator(cluster, np, networkName, subnetworkName string, count int) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-f" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -3325,16 +3496,18 @@ resource "google_container_node_pool" "np" { machine_type = "n1-highmem-4" } } -`, cluster, np, count) +`, cluster, networkName, subnetworkName, np, count) } -func testAccContainerNodePool_PartialEmptyGuestAccelerator2(cluster, np string) string { +func testAccContainerNodePool_PartialEmptyGuestAccelerator2(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-f" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -3361,16 +3534,18 @@ resource "google_container_node_pool" "np" { machine_type = "n1-highmem-4" } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_shieldedInstanceConfig(cluster, np string) string { +func testAccContainerNodePool_shieldedInstanceConfig(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { @@ -3385,16 +3560,18 @@ resource "google_container_node_pool" "np" { } } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } -func testAccContainerNodePool_concurrentCreate(cluster, np1, np2 string) string { +func testAccContainerNodePool_concurrentCreate(cluster, np1, np2, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np1" { @@ -3410,16 +3587,18 @@ resource "google_container_node_pool" "np2" { cluster = google_container_cluster.cluster.name initial_node_count = 2 } -`, cluster, np1, np2) +`, cluster, networkName, subnetworkName, np1, np2) } -func testAccContainerNodePool_concurrentUpdate(cluster, np1, np2 string) string { +func testAccContainerNodePool_concurrentUpdate(cluster, np1, np2, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-a" initial_node_count = 3 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np1" { @@ -3437,10 +3616,10 @@ resource "google_container_node_pool" "np2" { initial_node_count = 2 version = "1.27.3-gke.1700" } -`, cluster, np1, np2) +`, cluster, networkName, subnetworkName, np1, np2) } -func testAccContainerNodePool_withSoleTenantConfig(cluster, np string) string { +func testAccContainerNodePool_withSoleTenantConfig(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" @@ -3465,6 +3644,8 @@ resource "google_container_cluster" "cluster" { initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1a.latest_master_version deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "with_sole_tenant_config" { @@ -3487,7 +3668,7 @@ resource "google_container_node_pool" "with_sole_tenant_config" { ] } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } func TestAccContainerNodePool_withConfidentialNodes(t *testing.T) { @@ -3495,6 +3676,8 @@ func TestAccContainerNodePool_withConfidentialNodes(t *testing.T) { clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -3502,7 +3685,7 @@ func TestAccContainerNodePool_withConfidentialNodes(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withConfidentialNodes(clusterName, np), + Config: testAccContainerNodePool_withConfidentialNodes(clusterName, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -3510,7 +3693,7 @@ func TestAccContainerNodePool_withConfidentialNodes(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_disableConfidentialNodes(clusterName, np), + Config: testAccContainerNodePool_disableConfidentialNodes(clusterName, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -3518,7 +3701,7 @@ func TestAccContainerNodePool_withConfidentialNodes(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccContainerNodePool_withConfidentialNodes(clusterName, np), + Config: testAccContainerNodePool_withConfidentialNodes(clusterName, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.np", @@ -3529,11 +3712,11 @@ func TestAccContainerNodePool_withConfidentialNodes(t *testing.T) { }) } -func testAccContainerNodePool_withConfidentialNodes(clusterName string, np string) string { +func testAccContainerNodePool_withConfidentialNodes(clusterName, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - location = "asia-east1-c" + location = "us-central1-a" initial_node_count = 1 node_config { confidential_nodes { @@ -3542,11 +3725,13 @@ resource "google_container_cluster" "cluster" { machine_type = "n2-standard-2" } deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { name = "%s" - location = "asia-east1-c" + location = "us-central1-a" cluster = google_container_cluster.cluster.name initial_node_count = 1 node_config { @@ -3556,14 +3741,14 @@ resource "google_container_node_pool" "np" { } } } -`, clusterName, np) +`, clusterName, networkName, subnetworkName, np) } -func testAccContainerNodePool_disableConfidentialNodes(clusterName string, np string) string { +func testAccContainerNodePool_disableConfidentialNodes(clusterName, np, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - location = "asia-east1-c" + location = "us-central1-a" initial_node_count = 1 node_config { confidential_nodes { @@ -3572,11 +3757,13 @@ resource "google_container_cluster" "cluster" { machine_type = "n2-standard-2" } deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "np" { name = "%s" - location = "asia-east1-c" + location = "us-central1-a" cluster = google_container_cluster.cluster.name initial_node_count = 1 node_config { @@ -3586,7 +3773,7 @@ resource "google_container_node_pool" "np" { } } } -`, clusterName, np) +`, clusterName, networkName, subnetworkName, np) } func TestAccContainerNodePool_tpuTopology(t *testing.T) { @@ -3596,6 +3783,8 @@ func TestAccContainerNodePool_tpuTopology(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np1 := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) np2 := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -3603,7 +3792,7 @@ func TestAccContainerNodePool_tpuTopology(t *testing.T) { CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_tpuTopology(cluster, np1, np2, "2x2x2"), + Config: testAccContainerNodePool_tpuTopology(cluster, np1, np2, "2x2x2", networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.regular_pool", "node_config.0.machine_type", "n1-standard-4"), resource.TestCheckResourceAttr("google_container_node_pool.with_tpu_topology", "node_config.0.machine_type", "ct4p-hightpu-4t"), @@ -3620,30 +3809,31 @@ func TestAccContainerNodePool_tpuTopology(t *testing.T) { }) } -func testAccContainerNodePool_tpuTopology(cluster, np1, np2, tpuTopology string) string { +func testAccContainerNodePool_tpuTopology(cluster, np1, np2, tpuTopology, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "cluster" { name = "%s" - location = "us-central2-b" + location = "us-central1-a" initial_node_count = 1 deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "regular_pool" { - name = "%s" - location = "us-central2-b" - cluster = google_container_cluster.cluster.name - initial_node_count = 1 - - node_config { - machine_type = "n1-standard-4" + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 - } + node_config { + machine_type = "n1-standard-4" } +} resource "google_container_node_pool" "with_tpu_topology" { name = "%s" - location = "us-central2-b" + location = "us-central1-a" cluster = google_container_cluster.cluster.name initial_node_count = 2 @@ -3652,11 +3842,11 @@ resource "google_container_node_pool" "with_tpu_topology" { } placement_policy { - type = "COMPACT" - tpu_topology = "%s" + type = "COMPACT" + tpu_topology = "%s" } } -`, cluster, np1, np2, tpuTopology) +`, cluster, networkName, subnetworkName, np1, np2, tpuTopology) } <% unless version == 'ga' -%> @@ -3722,6 +3912,8 @@ func TestAccContainerNodePool_withConfidentialBootDisk(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { t.Fatal("Stopping the test because a role was added to the policy.") @@ -3733,7 +3925,7 @@ func TestAccContainerNodePool_withConfidentialBootDisk(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withConfidentialBootDisk(cluster, np, kms.CryptoKey.Name), + Config: testAccContainerNodePool_withConfidentialBootDisk(cluster, np, kms.CryptoKey.Name, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.with_confidential_boot_disk", @@ -3744,38 +3936,40 @@ func TestAccContainerNodePool_withConfidentialBootDisk(t *testing.T) { }) } -func testAccContainerNodePool_withConfidentialBootDisk(cluster, np string, kmsKeyName string) string { +func testAccContainerNodePool_withConfidentialBootDisk(cluster, np string, kmsKeyName, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" } resource "google_container_cluster" "cluster" { - name = "%s" - location = "us-central1-a" - initial_node_count = 1 - min_master_version = data.google_container_engine_versions.central1a.latest_master_version - deletion_protection = false + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "with_confidential_boot_disk" { - name = "%s" - location = "us-central1-a" - cluster = google_container_cluster.cluster.name - -node_config { - image_type = "COS_CONTAINERD" - boot_disk_kms_key = "%s" - oauth_scopes = [ + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + + node_config { + image_type = "COS_CONTAINERD" + boot_disk_kms_key = "%s" + oauth_scopes = [ "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring", ] - enable_confidential_storage = true - machine_type = "n2-standard-2" - disk_type = "hyperdisk-balanced" + enable_confidential_storage = true + machine_type = "n2-standard-2" + disk_type = "hyperdisk-balanced" } } -`, cluster, np, kmsKeyName) +`, cluster, networkName, subnetworkName, np, kmsKeyName) } func TestAccContainerNodePool_withoutConfidentialBootDisk(t *testing.T) { @@ -3783,6 +3977,8 @@ func TestAccContainerNodePool_withoutConfidentialBootDisk(t *testing.T) { cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -3790,7 +3986,7 @@ func TestAccContainerNodePool_withoutConfidentialBootDisk(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withoutConfidentialBootDisk(cluster, np), + Config: testAccContainerNodePool_withoutConfidentialBootDisk(cluster, np, networkName, subnetworkName), }, { ResourceName: "google_container_node_pool.without_confidential_boot_disk", @@ -3801,36 +3997,38 @@ func TestAccContainerNodePool_withoutConfidentialBootDisk(t *testing.T) { }) } -func testAccContainerNodePool_withoutConfidentialBootDisk(cluster, np string) string { +func testAccContainerNodePool_withoutConfidentialBootDisk(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" } resource "google_container_cluster" "cluster" { - name = "%s" - location = "us-central1-a" - initial_node_count = 1 - min_master_version = data.google_container_engine_versions.central1a.latest_master_version - deletion_protection = false + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" } resource "google_container_node_pool" "without_confidential_boot_disk" { - name = "%s" - location = "us-central1-a" - cluster = google_container_cluster.cluster.name - - node_config { - image_type = "COS_CONTAINERD" - oauth_scopes = [ + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + + node_config { + image_type = "COS_CONTAINERD" + oauth_scopes = [ "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring", ] - enable_confidential_storage = false - machine_type = "n2-standard-2" - disk_type = "pd-balanced" + enable_confidential_storage = false + machine_type = "n2-standard-2" + disk_type = "pd-balanced" } } -`, cluster, np) +`, cluster, networkName, subnetworkName, np) } <% end -%> \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster.go b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster.go index 441851b44d3a..210e77b9b791 100644 --- a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster.go +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster.go @@ -1139,6 +1139,74 @@ func ResourceDataprocCluster() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Description: `List of preemptible instance names which have been assigned to the cluster.`, }, + "instance_flexibility_policy": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: `Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.`, + AtLeastOneOf: []string{ + "cluster_config.0.preemptible_worker_config.0.num_instances", + "cluster_config.0.preemptible_worker_config.0.preemptibility", + "cluster_config.0.preemptible_worker_config.0.disk_config", + "cluster_config.0.preemptible_worker_config.0.instance_flexibility_policy", + }, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_selection_list": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + AtLeastOneOf: []string{ + "cluster_config.0.preemptible_worker_config.0.instance_flexibility_policy.0.instance_selection_list", + }, + Description: `List of instance selection options that the group will use when creating new VMs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_types": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Full machine-type names, e.g. "n1-standard-16".`, + }, + "rank": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Description: `Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.`, + }, + }, + }, + }, + "instance_selection_results": { + Type: schema.TypeList, + Computed: true, + Description: `A list of instance selection results in the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_type": { + Type: schema.TypeString, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Full machine-type names, e.g. "n1-standard-16".`, + }, + "vm_count": { + Type: schema.TypeInt, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Description: `Number of VM provisioned with the machine_type.`, + }, + }, + }, + }, + }, + }, + }, }, }, }, @@ -2095,12 +2163,46 @@ func expandPreemptibleInstanceGroupConfig(cfg map[string]interface{}) *dataproc. } } } + + if ifpc, ok := cfg["instance_flexibility_policy"]; ok { + ifps := ifpc.([]interface{}) + if len(ifps) > 0 { + flexibilityPolicy := ifps[0].(map[string]interface{}) + icg.InstanceFlexibilityPolicy = &dataproc.InstanceFlexibilityPolicy{} + if v, ok := flexibilityPolicy["instance_selection_list"]; ok { + icg.InstanceFlexibilityPolicy.InstanceSelectionList = expandInstanceSelectionList(v) + } + } + + } if p, ok := cfg["preemptibility"]; ok { icg.Preemptibility = p.(string) } return icg } +func expandInstanceSelectionList(v interface{}) []*dataproc.InstanceSelection { + instanceSelectionList := v.([]interface{}) + + instanceSelections := []*dataproc.InstanceSelection{} + for _, v1 := range instanceSelectionList { + instanceSelectionItem := v1.(map[string]interface{}) + machineTypes := []string{} + for _, machineType := range instanceSelectionItem["machine_types"].([]interface{}) { + machineTypes = append(machineTypes, machineType.(string)) + } + instanceSelection := &dataproc.InstanceSelection{ + MachineTypes: machineTypes, + } + if x, ok := instanceSelectionItem["rank"]; ok { + instanceSelection.Rank = int64(x.(int)) + } + instanceSelections = append(instanceSelections, instanceSelection) + } + + return instanceSelections +} + func expandMasterInstanceGroupConfig(cfg map[string]interface{}) *dataproc.InstanceGroupConfig { icg := &dataproc.InstanceGroupConfig{} @@ -2752,6 +2854,7 @@ func flattenPreemptibleInstanceGroupConfig(d *schema.ResourceData, icg *dataproc } disk := map[string]interface{}{} + instanceFlexibilityPolicy := map[string]interface{}{} data := map[string]interface{}{} if icg != nil { @@ -2763,12 +2866,45 @@ func flattenPreemptibleInstanceGroupConfig(d *schema.ResourceData, icg *dataproc disk["num_local_ssds"] = icg.DiskConfig.NumLocalSsds disk["boot_disk_type"] = icg.DiskConfig.BootDiskType } + if icg.InstanceFlexibilityPolicy != nil { + instanceFlexibilityPolicy["instance_selection_list"] = flattenInstanceSelectionList(icg.InstanceFlexibilityPolicy.InstanceSelectionList) + instanceFlexibilityPolicy["instance_selection_results"] = flattenInstanceSelectionResults(icg.InstanceFlexibilityPolicy.InstanceSelectionResults) + } } data["disk_config"] = []map[string]interface{}{disk} + data["instance_flexibility_policy"] = []map[string]interface{}{instanceFlexibilityPolicy} return []map[string]interface{}{data} } +func flattenInstanceSelectionList(is []*dataproc.InstanceSelection) []map[string]interface{} { + + instanceSelections := []map[string]interface{}{} + for _, v := range is { + instanceSelection := map[string]interface{}{} + if len(v.MachineTypes) > 0 { + instanceSelection["machine_types"] = v.MachineTypes + } + instanceSelection["rank"] = v.Rank + instanceSelections = append(instanceSelections, instanceSelection) + } + return instanceSelections + +} + +func flattenInstanceSelectionResults(isr []*dataproc.InstanceSelectionResult) []map[string]interface{} { + + instanceSelectionResults := []map[string]interface{}{} + for _, v := range isr { + instanceSelection := map[string]interface{}{} + instanceSelection["machine_type"] = v.MachineType + instanceSelection["vm_count"] = v.VmCount + instanceSelectionResults = append(instanceSelectionResults, instanceSelection) + } + return instanceSelectionResults + +} + func flattenMasterInstanceGroupConfig(d *schema.ResourceData, icg *dataproc.InstanceGroupConfig) []map[string]interface{} { disk := map[string]interface{}{} data := map[string]interface{}{} diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_migrate.go b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_migrate.go index a18dd3a19a24..768d639642b1 100644 --- a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_migrate.go +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_migrate.go @@ -961,6 +961,74 @@ func resourceDataprocClusterResourceV0() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Description: `List of preemptible instance names which have been assigned to the cluster.`, }, + "instance_flexibility_policy": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: `Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.`, + AtLeastOneOf: []string{ + "cluster_config.0.preemptible_worker_config.0.num_instances", + "cluster_config.0.preemptible_worker_config.0.preemptibility", + "cluster_config.0.preemptible_worker_config.0.disk_config", + "cluster_config.0.preemptible_worker_config.0.instance_flexibility_policy", + }, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_selection_list": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + AtLeastOneOf: []string{ + "cluster_config.0.preemptible_worker_config.0.instance_flexibility_policy.0.instance_selection_list", + }, + Description: `List of instance selection options that the group will use when creating new VMs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_types": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Full machine-type names, e.g. "n1-standard-16".`, + }, + "rank": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Description: `Preference of this instance selection. Lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference.`, + }, + }, + }, + }, + "instance_selection_results": { + Type: schema.TypeList, + Computed: true, + Description: `A list of instance selection results in the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_type": { + Type: schema.TypeString, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Full machine-type names, e.g. "n1-standard-16".`, + }, + "vm_count": { + Type: schema.TypeInt, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Description: `Number of VM provisioned with the machine_type.`, + }, + }, + }, + }, + }, + }, + }, }, }, }, diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_test.go.erb b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_test.go.erb index a747f05ca68b..c1e050b94491 100644 --- a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_test.go.erb @@ -105,7 +105,6 @@ func TestAccDataprocCluster_basic(t *testing.T) { }) } -<% if version == "ga" -%> func TestAccDataprocVirtualCluster_basic(t *testing.T) { t.Parallel() @@ -113,9 +112,8 @@ func TestAccDataprocVirtualCluster_basic(t *testing.T) { rnd := acctest.RandString(t, 10) pid := envvar.GetTestProjectFromEnv() version := "3.1-dataproc-7" - networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") - subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) - acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -123,7 +121,7 @@ func TestAccDataprocVirtualCluster_basic(t *testing.T) { CheckDestroy: testAccCheckDataprocClusterDestroy(t), Steps: []resource.TestStep{ { - Config: testAccDataprocVirtualCluster_basic(pid, rnd, subnetworkName), + Config: testAccDataprocVirtualCluster_basic(pid, rnd, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.virtual_cluster", &cluster), @@ -145,7 +143,6 @@ func TestAccDataprocVirtualCluster_basic(t *testing.T) { }, }) } -<% end -%> func TestAccDataprocCluster_withAccelerators(t *testing.T) { t.Parallel() @@ -494,6 +491,29 @@ func TestAccDataprocCluster_spotSecondary(t *testing.T) { }) } +func TestAccDataprocCluster_spotWithInstanceFlexibilityPolicy(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + var cluster dataproc.Cluster + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_spotWithInstanceFlexibilityPolicy(rnd), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.spot_with_instance_flexibility_policy", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.spot_with_instance_flexibility_policy", "cluster_config.0.preemptible_worker_config.0.preemptibility", "SPOT"), + resource.TestCheckResourceAttr("google_dataproc_cluster.spot_with_instance_flexibility_policy", "cluster_config.0.preemptible_worker_config.0.instance_flexibility_policy.0.instance_selection_list.0.machine_types.0", "n2d-standard-2"), + resource.TestCheckResourceAttr("google_dataproc_cluster.spot_with_instance_flexibility_policy", "cluster_config.0.preemptible_worker_config.0.instance_flexibility_policy.0.instance_selection_list.0.rank", "3"), + ), + }, + }, + }) +} + func TestAccDataprocCluster_withStagingBucket(t *testing.T) { t.Parallel() @@ -1257,8 +1277,7 @@ resource "google_dataproc_cluster" "basic" { `, rnd) } -<% if version == "ga" -%> -func testAccDataprocVirtualCluster_basic(projectID, rnd, subnetworkName string) string { +func testAccDataprocVirtualCluster_basic(projectID, rnd, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_project" "project" { project_id = "%s" @@ -1267,11 +1286,8 @@ data "google_project" "project" { resource "google_container_cluster" "primary" { name = "tf-test-gke-%s" location = "us-central1-a" - cluster_config { - gce_cluster_config { - subnetwork = "%s" - } - } + network = "%s" + subnetwork = "%s" initial_node_count = 1 @@ -1320,9 +1336,8 @@ resource "google_dataproc_cluster" "virtual_cluster" { } } } -`, projectID, rnd, subnetworkName, projectID, rnd, rnd, rnd, rnd, rnd, rnd) +`, projectID, rnd, networkName, subnetworkName, projectID, rnd, rnd, rnd, rnd, rnd, rnd) } -<% end -%> func testAccCheckDataprocGkeClusterNodePoolsHaveRoles(cluster *dataproc.Cluster, roles ...string) func(s *terraform.State) error { return func(s *terraform.State) error { @@ -1825,6 +1840,47 @@ resource "google_dataproc_cluster" "spot_secondary" { `, rnd, subnetworkName) } +func testAccDataprocCluster_spotWithInstanceFlexibilityPolicy(rnd string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "spot_with_instance_flexibility_policy" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + master_config { + num_instances = "1" + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + worker_config { + num_instances = "2" + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + preemptible_worker_config { + num_instances = "3" + preemptibility = "SPOT" + disk_config { + boot_disk_size_gb = 35 + } + instance_flexibility_policy { + instance_selection_list { + machine_types = ["n2d-standard-2"] + rank = 3 + } + } + } + } +} + `, rnd) +} + func testAccDataprocCluster_withStagingBucketOnly(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { diff --git a/mmv1/third_party/terraform/services/monitoring/resource_monitoring_alert_policy_test.go b/mmv1/third_party/terraform/services/monitoring/resource_monitoring_alert_policy_test.go index fa773fb57d52..857d40c00b24 100644 --- a/mmv1/third_party/terraform/services/monitoring/resource_monitoring_alert_policy_test.go +++ b/mmv1/third_party/terraform/services/monitoring/resource_monitoring_alert_policy_test.go @@ -322,6 +322,7 @@ resource "google_monitoring_alert_policy" "full" { documentation { content = "test content" mime_type = "text/markdown" + subject = "test subject" } } `, alertName, conditionName1, conditionName2) @@ -350,6 +351,7 @@ resource "google_monitoring_alert_policy" "mql" { documentation { content = "test content" mime_type = "text/markdown" + subject = "test subject" } } `, alertName, conditionName) @@ -383,6 +385,7 @@ resource "google_monitoring_alert_policy" "log" { documentation { content = "test content" mime_type = "text/markdown" + subject = "test subject" } } `, alertName, conditionName) @@ -441,6 +444,7 @@ resource "google_monitoring_alert_policy" "promql" { documentation { content = "test content" mime_type = "text/markdown" + subject = "test subject" } } `, alertName, conditionName) diff --git a/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_test.go b/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_test.go index 9d753eaf7e79..fe3ae91f99a3 100644 --- a/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_test.go +++ b/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_test.go @@ -142,6 +142,58 @@ func TestAccSpannerInstance_virtualUpdate(t *testing.T) { }) } +func TestAccSpannerInstance_basicWithAutoscalingUsingNodeConfig(t *testing.T) { + // Randomness + acctest.SkipIfVcr(t) + t.Parallel() + + displayName := fmt.Sprintf("spanner-test-%s-dname", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSpannerInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSpannerInstance_basicWithAutoscalerConfigUsingNodeAsConfigs(displayName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_instance.basic", "state"), + ), + }, + { + ResourceName: "google_spanner_instance.basic", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSpannerInstance_basicWithAutoscalingUsingProcessingUnitConfig(t *testing.T) { + // Randomness + acctest.SkipIfVcr(t) + t.Parallel() + + displayName := fmt.Sprintf("spanner-test-%s-dname", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSpannerInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSpannerInstance_basicWithAutoscalerConfigUsingProcessingUnitsAsConfigs(displayName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_instance.basic", "state"), + ), + }, + { + ResourceName: "google_spanner_instance.basic", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccSpannerInstance_basic(name string) string { return fmt.Sprintf(` resource "google_spanner_instance" "basic" { @@ -203,3 +255,53 @@ resource "google_spanner_instance" "basic" { } `, name, name, virtual) } + +func testAccSpannerInstance_basicWithAutoscalerConfigUsingNodeAsConfigs(name string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-dname" + num_nodes = 1 + autoscaling_config { + autoscaling_limits { + max_limit { + max_nodes = 2 + } + min_limit { + min_nodes = 1 + } + } + autoscaling_targets { + high_priority_cpu_utilization_percent = 65 + storage_utilization_percent = 95 + } + } +} +`, name, name) +} + +func testAccSpannerInstance_basicWithAutoscalerConfigUsingProcessingUnitsAsConfigs(name string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-dname" + num_nodes = 1 + autoscaling_config { + autoscaling_limits { + max_limit { + max_processing_units = 2000 + } + min_limit { + min_processing_units = 1000 + } + } + autoscaling_targets { + high_priority_cpu_utilization_percent = 65 + storage_utilization_percent = 95 + } + } +} +`, name, name) +} diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb index 93702b000d0e..34fbac4bb30b 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb +++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb @@ -80,6 +80,7 @@ var ( "settings.0.ip_configuration.0.allocated_ip_range", "settings.0.ip_configuration.0.enable_private_path_for_google_cloud_services", "settings.0.ip_configuration.0.psc_config", + "settings.0.ip_configuration.0.ssl_mode", } maintenanceWindowKeys = []string{ @@ -436,6 +437,7 @@ is set to true. Defaults to ZONAL.`, Type: schema.TypeBool, Optional: true, AtLeastOneOf: ipConfigurationKeys, + Description: `Whether SSL connections over IP are enforced or not. To change this field, also set the corresponding value in ssl_mode.`, }, "private_network": { Type: schema.TypeString, @@ -480,6 +482,14 @@ is set to true. Defaults to ZONAL.`, }, }, }, + "ssl_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"ALLOW_UNENCRYPTED_AND_ENCRYPTED", "ENCRYPTED_ONLY", "TRUSTED_CLIENT_CERTIFICATE_REQUIRED"}, false), + Description: `Specify how SSL connection should be enforced in DB connections. This field provides more SSL enforcment options compared to require_ssl. To change this field, also set the correspoding value in require_ssl.`, + AtLeastOneOf: ipConfigurationKeys, + }, }, }, }, @@ -1382,6 +1392,7 @@ func expandIpConfiguration(configured []interface{}, databaseVersion string) *sq EnablePrivatePathForGoogleCloudServices: _ipConfiguration["enable_private_path_for_google_cloud_services"].(bool), ForceSendFields: forceSendFields, PscConfig: expandPscConfig(_ipConfiguration["psc_config"].(*schema.Set).List()), + SslMode: _ipConfiguration["ssl_mode"].(string), } } @@ -2189,6 +2200,7 @@ func flattenIpConfiguration(ipConfiguration *sqladmin.IpConfiguration) interface "allocated_ip_range": ipConfiguration.AllocatedIpRange, "require_ssl": ipConfiguration.RequireSsl, "enable_private_path_for_google_cloud_services": ipConfiguration.EnablePrivatePathForGoogleCloudServices, + "ssl_mode": ipConfiguration.SslMode, } if ipConfiguration.AuthorizedNetworks != nil { @@ -2487,4 +2499,4 @@ func validatePromoteConfigurations(masterInstanceName cty.Value, replicaConfigur return fmt.Errorf("Replica promote configuration check failed. Please remove replica_configuration and try again.") } return nil -} \ No newline at end of file +} diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go index 24fc65dd6b4b..6c571f05b173 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go +++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance_test.go @@ -1573,6 +1573,38 @@ func TestAccSQLDatabaseInstance_sqlMysqlDataCacheConfig(t *testing.T) { }) } +func TestAccSQLDatabaseInstance_sqlPostgresDataCacheConfig(t *testing.T) { + t.Parallel() + enterprisePlusInstanceName := "tf-test-enterprise-plus" + acctest.RandString(t, 10) + enterprisePlusTier := "db-perf-optimized-N-2" + enterpriseInstanceName := "tf-test-enterprise-" + acctest.RandString(t, 10) + enterpriseTier := "db-custom-2-13312" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testGoogleSqlDatabaseInstance_sqlPostgresDataCacheConfig(enterprisePlusInstanceName, enterprisePlusTier, "ENTERPRISE_PLUS"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("google_sql_database_instance.instance", "settings.0.data_cache_config.0.data_cache_enabled", "true"), + ), + }, + { + ResourceName: "google_sql_database_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testGoogleSqlDatabaseInstance_sqlPostgresDataCacheConfig(enterpriseInstanceName, enterpriseTier, "ENTERPRISE"), + ExpectError: regexp.MustCompile( + fmt.Sprintf("Error, failed to create instance %s: googleapi: Error 400: Invalid request: Only ENTERPRISE PLUS edition supports data cache.., invalid", enterpriseInstanceName)), + }, + }, + }) +} + func TestAccSqlDatabaseInstance_SqlServerAuditConfig(t *testing.T) { // Service Networking acctest.SkipIfVcr(t) @@ -2114,6 +2146,76 @@ func TestAccSqlDatabaseInstance_ReplicaPromoteSkippedWithNoMasterInstanceNameAnd }) } +func TestAccSqlDatabaseInstance_updateSslOptionsForPostgreSQL(t *testing.T) { + t.Parallel() + + databaseName := "tf-test-" + acctest.RandString(t, 10) + databaseVersion := "POSTGRES_14" + resourceName := "google_sql_database_instance.instance" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testGoogleSqlDatabaseInstance_setSslOptionsForPostgreSQL(databaseName, databaseVersion, false, "ALLOW_UNENCRYPTED_AND_ENCRYPTED"), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testGoogleSqlDatabaseInstance_setSslOptionsForPostgreSQL(databaseName, databaseVersion, false, "ENCRYPTED_ONLY"), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testGoogleSqlDatabaseInstance_setSslOptionsForPostgreSQL(databaseName, databaseVersion, true, "TRUSTED_CLIENT_CERTIFICATE_REQUIRED"), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testGoogleSqlDatabaseInstance_setSslOptionsForPostgreSQL(databaseName, databaseVersion, false, "ALLOW_UNENCRYPTED_AND_ENCRYPTED"), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testGoogleSqlDatabaseInstance_setSslOptionsForPostgreSQL(databaseName string, databaseVersion string, requireSsl bool, sslMode string) string { + return fmt.Sprintf(` +resource "google_sql_database_instance" "instance" { + name = "%s" + region = "us-central1" + database_version = "%s" + deletion_protection = false + settings { + tier = "db-f1-micro" + ip_configuration { + ipv4_enabled = true + require_ssl = %t + ssl_mode = "%s" + } + } +}`, databaseName, databaseVersion, requireSsl, sslMode) +} + func testAccSqlDatabaseInstance_sqlMysqlInstancePvpExample(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_sql_database_instance" "mysql_pvp_instance_name" { @@ -2293,6 +2395,24 @@ resource "google_sql_database_instance" "instance" { }`, instanceName) } +func testGoogleSqlDatabaseInstance_sqlPostgresDataCacheConfig(instanceName, tier, edition string) string { + return fmt.Sprintf(` + +resource "google_sql_database_instance" "instance" { + name = "%s" + region = "us-east1" + database_version = "POSTGRES_14" + deletion_protection = false + settings { + tier = "%s" + edition = "%s" + data_cache_config { + data_cache_enabled = true + } + } +}`, instanceName, tier, edition) +} + func testGoogleSqlDatabaseInstance_SqlServerAuditConfig(databaseName, rootPassword, bucketName, uploadInterval, retentionInterval string) string { return fmt.Sprintf(` resource "google_storage_bucket" "gs-bucket" { diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb index 63e53fc334e9..7703e96c17f2 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb @@ -256,6 +256,13 @@ func ResourceStorageBucket() *schema.Resource { Description: `The bucket's Lifecycle Rules configuration.`, }, + "enable_object_retention": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Enables each object in the bucket to have its own retention policy, which prevents deletion until stored for a specific length of time.`, + }, + "versioning": { Type: schema.TypeList, Optional: true, @@ -594,7 +601,11 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error err = transport_tpg.Retry(transport_tpg.RetryOptions{ RetryFunc: func() error { - res, err = config.NewStorageClient(userAgent).Buckets.Insert(project, sb).Do() + insertCall := config.NewStorageClient(userAgent).Buckets.Insert(project, sb) + if d.Get("enable_object_retention").(bool) { + insertCall.EnableObjectRetention(true) + } + res, err = insertCall.Do() return err }, }) @@ -1121,6 +1132,16 @@ func flattenBucketRetentionPolicy(bucketRetentionPolicy *storage.BucketRetention return bucketRetentionPolicies } +func flattenBucketObjectRetention(bucketObjectRetention *storage.BucketObjectRetention) bool { + if bucketObjectRetention == nil { + return false + } + if bucketObjectRetention.Mode == "Enabled" { + return true + } + return false +} + func expandBucketVersioning(configured interface{}) *storage.BucketVersioning { versionings := configured.([]interface{}) if len(versionings) == 0 { @@ -1620,6 +1641,9 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res if err := d.Set("logging", flattenBucketLogging(res.Logging)); err != nil { return fmt.Errorf("Error setting logging: %s", err) } + if err := d.Set("enable_object_retention", flattenBucketObjectRetention(res.ObjectRetention)); err != nil { + return fmt.Errorf("Error setting object retention: %s", err) + } if err := d.Set("versioning", flattenBucketVersioning(res.Versioning)); err != nil { return fmt.Errorf("Error setting versioning: %s", err) } diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object.go b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object.go index 4ec802ea0e5e..926e16c160bd 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object.go +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object.go @@ -207,10 +207,33 @@ func ResourceStorageBucketObject() *schema.Resource { }, }, + "retention": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ConflictsWith: []string{"event_based_hold"}, + Description: `Object level retention configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retain_until_time": { + Type: schema.TypeString, + Required: true, + Description: `Time in RFC 3339 (e.g. 2030-01-01T02:03:04Z) until which object retention protects this object.`, + }, + "mode": { + Type: schema.TypeString, + Required: true, + Description: `The object retention mode. Supported values include: "Unlocked", "Locked".`, + }, + }, + }, + }, + "event_based_hold": { - Type: schema.TypeBool, - Optional: true, - Description: `Whether an object is under event-based hold. Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any).`, + Type: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"retention"}, + Description: `Whether an object is under event-based hold. Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any).`, }, "temporary_hold": { @@ -312,6 +335,10 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) object.KmsKeyName = v.(string) } + if v, ok := d.GetOk("retention"); ok { + object.Retention = expandObjectRetention(v) + } + if v, ok := d.GetOk("event_based_hold"); ok { object.EventBasedHold = v.(bool) } @@ -357,6 +384,16 @@ func resourceStorageBucketObjectUpdate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error retrieving object during update %s: %s", name, err) } + hasRetentionChanges := d.HasChange("retention") + if hasRetentionChanges { + if v, ok := d.GetOk("retention"); ok { + res.Retention = expandObjectRetention(v) + } else { + res.Retention = nil + res.NullFields = append(res.NullFields, "Retention") + } + } + if d.HasChange("event_based_hold") { v := d.Get("event_based_hold") res.EventBasedHold = v.(bool) @@ -368,6 +405,9 @@ func resourceStorageBucketObjectUpdate(d *schema.ResourceData, meta interface{}) } updateCall := objectsService.Update(bucket, name, res) + if hasRetentionChanges { + updateCall.OverrideUnlockedRetention(true) + } _, err = updateCall.Do() if err != nil { @@ -443,6 +483,9 @@ func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) e if err := d.Set("media_link", res.MediaLink); err != nil { return fmt.Errorf("Error setting media_link: %s", err) } + if err := d.Set("retention", flattenObjectRetention(res.Retention)); err != nil { + return fmt.Errorf("Error setting retention: %s", err) + } if err := d.Set("event_based_hold", res.EventBasedHold); err != nil { return fmt.Errorf("Error setting event_based_hold: %s", err) } @@ -513,3 +556,34 @@ func expandCustomerEncryption(input []interface{}) map[string]string { } return expanded } + +func expandObjectRetention(configured interface{}) *storage.ObjectRetention { + retentions := configured.([]interface{}) + if len(retentions) == 0 { + return nil + } + retention := retentions[0].(map[string]interface{}) + + objectRetention := &storage.ObjectRetention{ + RetainUntilTime: retention["retain_until_time"].(string), + Mode: retention["mode"].(string), + } + + return objectRetention +} + +func flattenObjectRetention(objectRetention *storage.ObjectRetention) []map[string]interface{} { + retentions := make([]map[string]interface{}, 0, 1) + + if objectRetention == nil { + return retentions + } + + retention := map[string]interface{}{ + "mode": objectRetention.Mode, + "retain_until_time": objectRetention.RetainUntilTime, + } + + retentions = append(retentions, retention) + return retentions +} diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object_test.go b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object_test.go index cb6a59d0b8a3..54d0dbd78dfc 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object_test.go +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object_test.go @@ -415,6 +415,48 @@ func TestAccStorageObject_holds(t *testing.T) { }) } +func TestAccStorageObject_retention(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + data := []byte(content) + h := md5.New() + if _, err := h.Write(data); err != nil { + t.Errorf("error calculating md5: %v", err) + } + dataMd5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + testFile := getNewTmpTestFile(t, "tf-test") + if err := ioutil.WriteFile(testFile.Name(), data, 0644); err != nil { + t.Errorf("error writing file: %v", err) + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageObjectDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testGoogleStorageBucketsObjectRetention(bucketName, "2040-01-01T02:03:04.000Z"), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(t, bucketName, objectName, dataMd5), + ), + }, + { + Config: testGoogleStorageBucketsObjectRetention(bucketName, "2040-01-02T02:03:04.000Z"), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(t, bucketName, objectName, dataMd5), + ), + }, + { + Config: testGoogleStorageBucketsObjectRetentionDisabled(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(t, bucketName, objectName, dataMd5), + ), + }, + }, + }) +} + func testAccCheckGoogleStorageObject(t *testing.T, bucket, object, md5 string) resource.TestCheckFunc { return testAccCheckGoogleStorageObjectWithEncryption(t, bucket, object, md5, "") } @@ -646,6 +688,44 @@ resource "google_storage_bucket_object" "object" { `, bucketName, objectName, content, customerEncryptionKey) } +func testGoogleStorageBucketsObjectRetention(bucketName string, retainUntilTime string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + enable_object_retention = true +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = google_storage_bucket.bucket.name + content = "%s" + retention { + mode = "Unlocked" + retain_until_time = "%s" + } +} +`, bucketName, objectName, content, retainUntilTime) +} + +func testGoogleStorageBucketsObjectRetentionDisabled(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + enable_object_retention = true +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = google_storage_bucket.bucket.name + content = "%s" +} +`, bucketName, objectName, content) +} + func testGoogleStorageBucketsObjectHolds(bucketName string, eventBasedHold bool, temporaryHold bool) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb index f3bf7f4f8e25..9cbd43fe8aa7 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb @@ -691,6 +691,47 @@ func TestAccStorageBucket_forceDestroyObjectDeleteError(t *testing.T) { }) } +func TestAccStorageBucket_enable_object_retention(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_enable_object_retention(bucketName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_enable_object_retention(bucketName, "false"), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + func TestAccStorageBucket_versioning(t *testing.T) { t.Parallel() @@ -698,9 +739,9 @@ func TestAccStorageBucket_versioning(t *testing.T) { bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccStorageBucketDestroyProducer(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccStorageBucket_versioning(bucketName, "true"), @@ -719,7 +760,7 @@ func TestAccStorageBucket_versioning(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"force_destroy"}, }, - { + { Config: testAccStorageBucket_versioning_empty(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( @@ -736,7 +777,7 @@ func TestAccStorageBucket_versioning(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"force_destroy"}, }, - { + { Config: testAccStorageBucket_versioning(bucketName, "false"), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( @@ -753,7 +794,7 @@ func TestAccStorageBucket_versioning(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"force_destroy"}, }, - { + { Config: testAccStorageBucket_versioning_empty(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckStorageBucketExists( @@ -779,9 +820,9 @@ func TestAccStorageBucket_logging(t *testing.T) { bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccStorageBucketDestroyProducer(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccStorageBucket_logging(bucketName, "log-bucket"), @@ -840,9 +881,9 @@ func TestAccStorageBucket_cors(t *testing.T) { bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccStorageBucketDestroyProducer(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), Steps: []resource.TestStep{ { Config: testGoogleStorageBucketsCors(bucketName), @@ -872,9 +913,9 @@ func TestAccStorageBucket_defaultEventBasedHold(t *testing.T) { bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, + PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccStorageBucketDestroyProducer(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccStorageBucket_defaultEventBasedHold(bucketName), @@ -1495,6 +1536,17 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } +func testAccStorageBucket_enable_object_retention(bucketName string, enabled string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = "true" + enable_object_retention = "%s" +} +`, bucketName, enabled) +} + func testAccStorageBucket_versioning(bucketName, enabled string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { diff --git a/mmv1/third_party/terraform/services/workflows/resource_workflows_workflow_test.go b/mmv1/third_party/terraform/services/workflows/resource_workflows_workflow_test.go.erb similarity index 62% rename from mmv1/third_party/terraform/services/workflows/resource_workflows_workflow_test.go rename to mmv1/third_party/terraform/services/workflows/resource_workflows_workflow_test.go.erb index 98eca1b42649..47330729fb86 100644 --- a/mmv1/third_party/terraform/services/workflows/resource_workflows_workflow_test.go +++ b/mmv1/third_party/terraform/services/workflows/resource_workflows_workflow_test.go.erb @@ -1,3 +1,4 @@ +<% autogen_exception -%> package workflows_test import ( @@ -211,3 +212,133 @@ EOF } `, workflowName, kmsKeyName) } + +<% unless version == 'ga' -%> +func TestAccWorkflowsWorkflowBeta_update(t *testing.T) { + // custom test to test updating + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkflowsWorkflowDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkflowsWorkflowBeta_full(context), + }, + { + Config: testAccWorkflowsWorkflowBeta_update(context), + }, + }, + }) +} + + +func testAccWorkflowsWorkflowBeta_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "test_account" { + provider = google-beta + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Test Service Account" +} + +resource "google_workflows_workflow" "example_beta" { + provider = google-beta + name = "tf_test_workflow_beta%{random_suffix}" + region = "us-central1" + description = "Magic" + service_account = google_service_account.test_account.id + labels = { + env = "test" + } + user_env_vars = { + foo = "BAR" + } + source_contents = <<-EOF + # This is a sample workflow. You can replace it with your source code. + # + # This workflow does the following: + # - reads current time and date information from an external API and stores + # the response in currentTime variable + # - retrieves a list of Wikipedia articles related to the day of the week + # from currentTime + # - returns the list of articles as an output of the workflow + # + # Note: In Terraform you need to escape the $$ or it will cause errors. + + - getCurrentTime: + call: http.get + args: + url: https://timeapi.io/api/Time/current/zone?timeZone=Europe/Amsterdam + result: currentTime + - readWikipedia: + call: http.get + args: + url: https://en.wikipedia.org/w/api.php + query: + action: opensearch + search: $${currentTime.body.dayOfWeek} + result: wikiResult + - returnOutput: + return: $${wikiResult.body[1]} +EOF +} +`, context) +} + +func testAccWorkflowsWorkflowBeta_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "test_account" { + provider = google-beta + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Test Service Account" +} + +resource "google_workflows_workflow" "example_beta" { + provider = google-beta + name = "tf_test_workflow_beta%{random_suffix}" + region = "us-central1" + description = "Magic" + service_account = google_service_account.test_account.id + labels = { + env = "dev" + } + user_env_vars = { + bar = "FOO" + } + source_contents = <<-EOF + # This is a sample workflow. You can replace it with your source code. + # + # This workflow does the following: + # - reads current time and date information from an external API and stores + # the response in currentTime variable + # - retrieves a list of Wikipedia articles related to the day of the week + # from currentTime + # - returns the list of articles as an output of the workflow + # + # Note: In Terraform you need to escape the $$ or it will cause errors. + + - getCurrentTime: + call: http.get + args: + url: https://timeapi.io/api/Time/current/zone?timeZone=Europe/Amsterdam + result: currentTime + - readWikipedia: + call: http.get + args: + url: https://en.wikipedia.org/w/api.php + query: + action: opensearch + search: $${currentTime.body.dayOfWeek} + result: wikiResult + - returnOutput: + return: $${wikiResult.body[1]} +EOF +} +`, context) +} +<% end -%> diff --git a/mmv1/third_party/terraform/transport/error_retry_predicates.go b/mmv1/third_party/terraform/transport/error_retry_predicates.go index c1674aa80fd7..d7c21e6fef94 100644 --- a/mmv1/third_party/terraform/transport/error_retry_predicates.go +++ b/mmv1/third_party/terraform/transport/error_retry_predicates.go @@ -320,6 +320,16 @@ func DatastoreIndex409Contention(err error) (bool, string) { return false, "" } +// relevant for firestore in datastore mode +func FirestoreField409RetryUnderlyingDataChanged(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 409 && strings.Contains(gerr.Body, "Please retry, underlying data changed") { + return true, "underlying data changed - retrying" + } + } + return false, "" +} + func IapClient409Operation(err error) (bool, string) { if gerr, ok := err.(*googleapi.Error); ok { if gerr.Code == 409 && strings.Contains(strings.ToLower(gerr.Body), "operation was aborted") { diff --git a/mmv1/third_party/terraform/transport/error_retry_predicates_test.go b/mmv1/third_party/terraform/transport/error_retry_predicates_test.go index 8288bfc1e19d..a5f9b61e044a 100644 --- a/mmv1/third_party/terraform/transport/error_retry_predicates_test.go +++ b/mmv1/third_party/terraform/transport/error_retry_predicates_test.go @@ -170,3 +170,14 @@ func TestIsSwgAutogenRouterRetryableError_notReady(t *testing.T) { t.Errorf("Error not detected as retryable") } } + +func TestFirestoreField409_retryUnderlyingDataChanged(t *testing.T) { + err := googleapi.Error{ + Code: 409, + Body: "Please retry, underlying data changed", + } + isRetryable, _ := FirestoreField409RetryUnderlyingDataChanged(&err) + if !isRetryable { + t.Errorf("Error not detected as retryable") + } +} diff --git a/mmv1/third_party/terraform/website/docs/d/backup_dr_management_server.html.markdown b/mmv1/third_party/terraform/website/docs/d/backup_dr_management_server.html.markdown new file mode 100644 index 000000000000..72c481acbacf --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/backup_dr_management_server.html.markdown @@ -0,0 +1,32 @@ +--- +subcategory: "BackupDR Management Server" +description: |- + Get information about a Backupdr Management server. +--- + +# google\_backup\_dr\_management\_server + +Get information about a Google Backup DR Management server. + +~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. +See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. + +## Example Usage + +```hcl +data google_backup_dr_management_server my-backup-dr-management-server { + location = "us-central1" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `region` - (Required) The region in which the management server resource belongs. + +- - - + +## Attributes Reference + +See [google_backupdr_management_server](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/backup_dr_management_server) resource for details of the available attributes. diff --git a/mmv1/third_party/terraform/website/docs/d/bigquery_dataset.html.markdown b/mmv1/third_party/terraform/website/docs/d/bigquery_dataset.html.markdown new file mode 100644 index 000000000000..93df0f62a202 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/bigquery_dataset.html.markdown @@ -0,0 +1,33 @@ +--- +subcategory: "BigQuery" +description: |- + A datasource to retrieve information about a BigQuery dataset. +--- + +# `google_bigquery_dataset` + +Get information about a BigQuery dataset. For more information see +the [official documentation](https://cloud.google.com/bigquery/docs) +and [API](https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets). + +## Example Usage + +```hcl +data "google_bigquery_dataset" "dataset" { + dataset_id = "my-bq-dataset" + project = "my-project" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `dataset_id` - (Required) The dataset ID. + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + +## Attributes Reference + +See [google_bigquery_dataset](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/bigquery_dataset) resource for details of the available attributes. diff --git a/mmv1/third_party/terraform/website/docs/d/compute_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/d/compute_instance_template.html.markdown index 3142f0af8932..7249ae2c5301 100644 --- a/mmv1/third_party/terraform/website/docs/d/compute_instance_template.html.markdown +++ b/mmv1/third_party/terraform/website/docs/d/compute_instance_template.html.markdown @@ -139,6 +139,11 @@ The following arguments are supported: * `disk_name` - Name of the disk. When not provided, this defaults to the name of the instance. +* `provisioned_iops` - Indicates how many IOPS to provision for the disk. This + sets the number of I/O operations per second that the disk can handle. + Values must be between 10,000 and 120,000. For more details, see the + [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk). + * `source_image` - The image from which to initialize this disk. This can be one of: the image's `self_link`, `projects/{project}/global/images/{image}`, diff --git a/mmv1/third_party/terraform/website/docs/d/compute_region_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/d/compute_region_instance_template.html.markdown index 1f2f628e23d1..9910a0372830 100644 --- a/mmv1/third_party/terraform/website/docs/d/compute_region_instance_template.html.markdown +++ b/mmv1/third_party/terraform/website/docs/d/compute_region_instance_template.html.markdown @@ -125,6 +125,11 @@ The following arguments are supported: * `disk_name` - Name of the disk. When not provided, this defaults to the name of the instance. +* `provisioned_iops` - Indicates how many IOPS to provision for the disk. This + sets the number of I/O operations per second that the disk can handle. + Values must be between 10,000 and 120,000. For more details, see the + [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk). + * `source_image` - The image from which to initialize this disk. This can be one of: the image's `self_link`, `projects/{project}/global/images/{image}`, diff --git a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown index b2815d168db2..2fb87c496536 100644 --- a/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/version_5_upgrade.html.markdown @@ -723,6 +723,7 @@ For more information on configuring Firebase resources with Terraform, see [Get If you have existing resources created using `google_firebase_project_location`: 1. Remove the `google_firebase_project_location` block 1. Add blocks according to "New config" in this section for any of the following that you need: `google_app_engine_application`, `google_firebase_storage_bucket`, and/or `google_firestore_database`. +1. Run `terraform state rm` for your existing `google_firebase_project_location` resource 1. Import the existing resources corresponding to the blocks added in the previous step: `terraform import google_app_engine_application.default ` `terraform import google_firebase_storage_bucket.default-bucket /.appspot.com` diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown index 1fd6b3d43242..e67b1b98c4fd 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown @@ -387,6 +387,11 @@ The following arguments are supported: * `disk_name` - (Optional) Name of the disk. When not provided, this defaults to the name of the instance. +* `provisioned_iops` - (Optional) Indicates how many IOPS to provision for the disk. This + sets the number of I/O operations per second that the disk can handle. + Values must be between 10,000 and 120,000. For more details, see the + [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk). + * `source_image` - (Optional) The image from which to initialize this disk. This can be one of: the image's `self_link`, `projects/{project}/global/images/{image}`, diff --git a/mmv1/third_party/terraform/website/docs/r/compute_region_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_region_instance_template.html.markdown index 1d17b8c7de60..d0a097aef4bf 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_region_instance_template.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_region_instance_template.html.markdown @@ -396,6 +396,11 @@ The following arguments are supported: * `disk_name` - (Optional) Name of the disk. When not provided, this defaults to the name of the instance. +* `provisioned_iops` - (Optional) Indicates how many IOPS to provision for the disk. This + sets the number of I/O operations per second that the disk can handle. + Values must be between 10,000 and 120,000. For more details, see the + [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk). + * `source_image` - (Optional) The image from which to initialize this disk. This can be one of: the image's `self_link`, `projects/{project}/global/images/{image}`, diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index 80c715b45e85..153694241a8c 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -986,6 +986,10 @@ sole_tenant_config { * `count` (Required) - The number of the guest accelerator cards exposed to this instance. * `gpu_driver_installation_config` (Optional) - Configuration for auto installation of GPU driver. Structure is [documented below](#nested_gpu_driver_installation_config). + +* `gpu_partition_size` (Optional) - Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig [user guide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning). + +* `gpu_sharing_config` (Optional) - Configuration for GPU sharing. Structure is [documented below](#nested_gpu_sharing_config). The `gpu_driver_installation_config` block supports: @@ -996,10 +1000,6 @@ sole_tenant_config { * `"DEFAULT"`: "Default" GPU driver in COS and Ubuntu. * `"LATEST"`: "Latest" GPU driver in COS. -* `gpu_partition_size` (Optional) - Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig [user guide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning). - -* `gpu_sharing_config` (Optional) - Configuration for GPU sharing. Structure is [documented below](#nested_gpu_sharing_config). - The `gpu_sharing_config` block supports: * `gpu_sharing_strategy` (Required) - The type of GPU sharing strategy to enable on the GPU node. diff --git a/mmv1/third_party/terraform/website/docs/r/dataproc_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/dataproc_cluster.html.markdown index e55b431059e3..32762397ff06 100644 --- a/mmv1/third_party/terraform/website/docs/r/dataproc_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/dataproc_cluster.html.markdown @@ -14,7 +14,7 @@ Manages a Cloud Dataproc cluster resource within GCP. !> **Warning:** Due to limitations of the API, all arguments except -`labels`,`cluster_config.worker_config.num_instances` and `cluster_config.preemptible_worker_config.num_instances` are non-updatable. Changing others will cause recreation of the +`labels`,`cluster_config.worker_config.num_instances` and `cluster_config.preemptible_worker_config.num_instances` are non-updatable. Changing `cluster_config.worker_config.min_num_instances` will be ignored. Changing others will cause recreation of the whole cluster! ## Example Usage - Basic @@ -608,6 +608,16 @@ cluster_config { boot_disk_size_gb = 30 num_local_ssds = 1 } + instance_flexibility_policy { + instance_selection_list { + machine_types = ["n2-standard-2","n1-standard-2"] + rank = 1 + } + instance_selection_list { + machine_types = ["n2d-standard-2"] + rank = 3 + } + } } } ``` @@ -638,6 +648,13 @@ will be set for you based on whatever was set for the `worker_config.machine_typ * `num_local_ssds` - (Optional) The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0. +* `instance_flexibility_policy` (Optional) Instance flexibility Policy allowing a mixture of VM shapes and provisioning models. + + * `instance_selection_list` - (Optional) List of instance selection options that the group will use when creating new VMs. + * `machine_types` - (Optional) Full machine-type names, e.g. `"n1-standard-16"`. + + * `rank` - (Optional) Preference of this instance selection. A lower number means higher preference. Dataproc will first try to create a VM based on the machine-type with priority rank and fallback to next rank based on availability. Machine types and instance selections with the same priority have the same preference. + - - - The `cluster_config.software_config` block supports: diff --git a/mmv1/third_party/terraform/website/docs/r/os_config_os_policy_assignment.html.markdown b/mmv1/third_party/terraform/website/docs/r/os_config_os_policy_assignment.html.markdown index 0bbc7a73ba46..229383c34847 100644 --- a/mmv1/third_party/terraform/website/docs/r/os_config_os_policy_assignment.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/os_config_os_policy_assignment.html.markdown @@ -20,12 +20,6 @@ To get more information about OSPolicyAssignment, see: * How-to Guides * [Official Documentation](https://cloud.google.com/compute/docs/os-configuration-management/create-os-policy-assignment) - - ## Example Usage - Os Config Os Policy Assignment Basic ```hcl diff --git a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown index 3f98aeac6abe..67b1c0a27da3 100644 --- a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown @@ -317,8 +317,7 @@ The optional `settings.active_directory_config` subblock supports: The optional `settings.data_cache_config` subblock supports: -* `data_cache_enabled` - (Optional) Whether data cache is enabled for the instance. Defaults to `false` - Can only be used with MYSQL. +* `data_cache_enabled` - (Optional) Whether data cache is enabled for the instance. Defaults to `false`. Can be used with MYSQL and PostgreSQL only. The optional `settings.deny_maintenance_period` subblock supports: @@ -372,7 +371,12 @@ Specifying a network enables private IP. At least `ipv4_enabled` must be enabled or a `private_network` must be configured. This setting can be updated, but it cannot be removed after it is set. -* `require_ssl` - (Optional) Whether SSL connections over IP are enforced or not. +* `require_ssl` - (Optional) Whether SSL connections over IP are enforced or not. To change this field, also set the corresponding value in `ssl_mode`. + +* `ssl_mode` - (Optional) Specify how SSL connection should be enforced in DB connections. This field provides more SSL enforcment options compared to `require_ssl`. To change this field, also set the correspoding value in `require_ssl`. + * For PostgreSQL instances, the value pairs are listed in the [API reference doc](https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/instances#ipconfiguration) for `ssl_mode` field. + * For MySQL instances, use the same value pairs as the PostgreSQL instances. + * For SQL Server instances, set it to `ALLOW_UNENCRYPTED_AND_ENCRYPTED` when `require_ssl=false` and `ENCRYPTED_ONLY` otherwise. * `allocated_ip_range` - (Optional) The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the instance ip will be created in the allocated range. The range name must comply with [RFC 1035](https://datatracker.ietf.org/doc/html/rfc1035). Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])?. diff --git a/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown b/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown index 005f1d76328a..0c2123fa25bf 100644 --- a/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown @@ -120,6 +120,9 @@ The following arguments are supported: * `encryption` - (Optional) The bucket's encryption configuration. Structure is [documented below](#nested_encryption). +* `enable_object_retention` - (Optional, Default: false) Enables [object retention](https://cloud.google.com/storage/docs/object-lock) on a storage bucket. + + * `requester_pays` - (Optional, Default: false) Enables [Requester Pays](https://cloud.google.com/storage/docs/requester-pays) on a storage bucket. * `uniform_bucket_level_access` - (Optional, Default: false) Enables [Uniform bucket-level access](https://cloud.google.com/storage/docs/uniform-bucket-level-access) access to a bucket. diff --git a/mmv1/third_party/terraform/website/docs/r/storage_bucket_object.html.markdown b/mmv1/third_party/terraform/website/docs/r/storage_bucket_object.html.markdown index 57995a5d90b1..4832d1bdcf12 100644 --- a/mmv1/third_party/terraform/website/docs/r/storage_bucket_object.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/storage_bucket_object.html.markdown @@ -67,7 +67,9 @@ One of the following is required: * `content_type` - (Optional) [Content-Type](https://tools.ietf.org/html/rfc7231#section-3.1.1.5) of the object data. Defaults to "application/octet-stream" or "text/plain; charset=utf-8". * `customer_encryption` - (Optional) Enables object encryption with Customer-Supplied Encryption Key (CSEK). [Google [documentation about](#nested_customer_encryption) CSEK.](https://cloud.google.com/storage/docs/encryption/customer-supplied-keys) - Structure is documented below. + Structure is [documented below](#nested_customer_encryption). + +* `retention` - (Optional) The [object retention](http://cloud.google.com/storage/docs/object-lock) settings for the object. The retention settings allow an object to be retained until a provided date. Structure is [documented below](#nested_retention). * `event_based_hold` - (Optional) Whether an object is under [event-based hold](https://cloud.google.com/storage/docs/object-holds#hold-types). Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any). @@ -89,6 +91,14 @@ One of the following is required: * `encryption_key` - (Required) Base64 encoded Customer-Supplied Encryption Key. +The `retention` block supports: + +* `mode` - (Required) The retention policy mode. Either `Locked` or `Unlocked`. + +* `retain_until_time` - (Required) The time to retain the object until in RFC 3339 format, for example 2012-11-15T16:19:00.094Z. + + + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are diff --git a/tools/diff-processor/README.md b/tools/diff-processor/README.md index 09f7676be083..7cd410dd854a 100644 --- a/tools/diff-processor/README.md +++ b/tools/diff-processor/README.md @@ -11,8 +11,12 @@ make clone OWNER_REPO=modular-magician/terraform-provider-google # build based on old / new dirs make build OLD_REF=branch_or_commit NEW_REF=branch_or_commit -# Run the binary -bin/diff-processor +# Run breaking change detection on the difference between OLD_REF and NEW_REF +bin/diff-processor breaking-changes + +# Add labels to a PR based on the resources changed between OLD_REF and NEW_REF +# The token used must have write access to issues +GITHUB_TOKEN=github_token bin/diff-processor add-labels PR_ID [--dry-run] ``` ## Test diff --git a/tools/diff-processor/cmd/add_labels.go b/tools/diff-processor/cmd/add_labels.go new file mode 100644 index 000000000000..9fa068a29594 --- /dev/null +++ b/tools/diff-processor/cmd/add_labels.go @@ -0,0 +1,103 @@ +package cmd + +import ( + newProvider "google/provider/new/google/provider" + oldProvider "google/provider/old/google/provider" + + "fmt" + "strconv" + "strings" + + "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff" + "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/labels" + "github.com/GoogleCloudPlatform/magic-modules/tools/issue-labeler/labeler" + "github.com/spf13/cobra" + "golang.org/x/exp/maps" +) + +const addLabelsDesc = `Add labels to a PR based on changed resources.` + +type addLabelsOptions struct { + rootOptions *rootOptions + computeSchemaDiff func() diff.SchemaDiff + enrolledTeamsYaml []byte + getIssue func(repository string, id uint64) (labeler.Issue, error) + updateIssues func(repository string, issueUpdates []labeler.IssueUpdate, dryRun bool) + dryRun bool +} + +func newAddLabelsCmd(rootOptions *rootOptions) *cobra.Command { + o := &addLabelsOptions{ + rootOptions: rootOptions, + computeSchemaDiff: func() diff.SchemaDiff { + return diff.ComputeSchemaDiff(oldProvider.ResourceMap(), newProvider.ResourceMap()) + }, + enrolledTeamsYaml: labeler.EnrolledTeamsYaml, + getIssue: labels.GetIssue, + updateIssues: labeler.UpdateIssues, + } + cmd := &cobra.Command{ + Use: "add-labels PR_ID [--dry-run]", + Short: addLabelsDesc, + Long: addLabelsDesc, + Args: cobra.ExactArgs(1), + RunE: func(c *cobra.Command, args []string) error { + return o.run(args) + }, + } + cmd.Flags().BoolVar(&o.dryRun, "dry-run", false, "Do a dry run without updating labels") + return cmd +} +func (o *addLabelsOptions) run(args []string) error { + prId, err := strconv.ParseUint(args[0], 10, 0) + if err != nil { + return fmt.Errorf("PR_ID must be an unsigned integer: %w", err) + } + + repository := "GoogleCloudPlatform/magic-modules" + issue, err := o.getIssue(repository, prId) + + if err != nil { + return fmt.Errorf("Error retrieving PR data: %w", err) + } + + hasServiceLabels := false + oldLabels := make(map[string]struct{}, len(issue.Labels)) + for _, label := range issue.Labels { + oldLabels[label.Name] = struct{}{} + if strings.HasPrefix(label.Name, "services/") { + hasServiceLabels = true + } + } + if hasServiceLabels { + return nil + } + + schemaDiff := o.computeSchemaDiff() + affectedResources := maps.Keys(schemaDiff) + regexpLabels, err := labeler.BuildRegexLabels(o.enrolledTeamsYaml) + if err != nil { + return fmt.Errorf("Error building regex labels: %w", err) + } + + newLabels := make(map[string]struct{}, len(oldLabels)) + for label, _ := range oldLabels { + newLabels[label] = struct{}{} + } + for _, label := range labeler.ComputeLabels(affectedResources, regexpLabels) { + newLabels[label] = struct{}{} + } + + // Only update the issue if new labels should be added + if len(newLabels) != len(oldLabels) { + issueUpdate := labeler.IssueUpdate{ + Number: prId, + Labels: maps.Keys(newLabels), + OldLabels: maps.Keys(oldLabels), + } + + o.updateIssues(repository, []labeler.IssueUpdate{issueUpdate}, o.dryRun) + } + + return nil +} diff --git a/tools/diff-processor/cmd/add_labels_test.go b/tools/diff-processor/cmd/add_labels_test.go new file mode 100644 index 000000000000..62906187dd27 --- /dev/null +++ b/tools/diff-processor/cmd/add_labels_test.go @@ -0,0 +1,294 @@ +package cmd + +import ( + _ "embed" + "errors" + "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff" + "github.com/GoogleCloudPlatform/magic-modules/tools/issue-labeler/labeler" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "testing" +) + +var enrolledTeamsYaml = []byte(` +services/google-x: + resources: + - google_x_resource`) + +func TestAddLabelsCmdRun(t *testing.T) { + cases := map[string]struct { + args []string + oldResourceMap map[string]*schema.Resource + newResourceMap map[string]*schema.Resource + githubIssue *labeler.Issue + updateErrors bool + expectedLabels []string + expectError bool + }{ + "empty resource map": { + args: []string{"12345"}, + oldResourceMap: map[string]*schema.Resource{}, + newResourceMap: map[string]*schema.Resource{}, + githubIssue: &labeler.Issue{ + Number: 12345, + Body: "Unused", + Labels: []labeler.Label{}, + PullRequest: map[string]any{}, + }, + expectedLabels: nil, + }, + "resource changed that doesn't match mapping": { + args: []string{"12345"}, + oldResourceMap: map[string]*schema.Resource{ + "google_y_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Optional: true}, + "field_b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google_y_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Required: true}, + }, + }, + }, + githubIssue: &labeler.Issue{ + Number: 12345, + Body: "Unused", + Labels: []labeler.Label{}, + PullRequest: map[string]any{}, + }, + expectedLabels: nil, + }, + "resource matches mapping but isn't changed": { + args: []string{"12345"}, + oldResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Optional: true}, + "field_b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Optional: true}, + "field_b": {Description: "beep", Optional: true}, + }, + }, + }, + githubIssue: &labeler.Issue{ + Number: 12345, + Body: "Unused", + Labels: []labeler.Label{}, + PullRequest: map[string]any{}, + }, + expectedLabels: nil, + }, + "resource changed that matches mapping": { + args: []string{"12345"}, + oldResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Optional: true}, + "field_b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Required: true}, + }, + }, + }, + githubIssue: &labeler.Issue{ + Number: 12345, + Body: "Unused", + Labels: []labeler.Label{}, + PullRequest: map[string]any{}, + }, + expectedLabels: []string{"services/google-x"}, + }, + "service labels are deduped": { + args: []string{"12345"}, + oldResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Optional: true}, + "field_b": {Description: "beep", Optional: true}, + }, + }, + "google_x_resource2": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Optional: true}, + "field_b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Required: true}, + }, + }, + "google_x_resource2": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Required: true}, + }, + }, + }, + githubIssue: &labeler.Issue{ + Number: 12345, + Body: "Unused", + Labels: []labeler.Label{}, + PullRequest: map[string]any{}, + }, + expectedLabels: []string{"services/google-x"}, + }, + "existing labels are preserved": { + args: []string{"12345"}, + oldResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Optional: true}, + "field_b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Required: true}, + }, + }, + }, + githubIssue: &labeler.Issue{ + Number: 12345, + Body: "Unused", + Labels: []labeler.Label{{Name: "override-breaking-change"}}, + PullRequest: map[string]any{}, + }, + expectedLabels: []string{"override-breaking-change", "services/google-x"}, + }, + "existing service label prevents new service labels": { + args: []string{"12345"}, + oldResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Optional: true}, + "field_b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Required: true}, + }, + }, + }, + githubIssue: &labeler.Issue{ + Number: 12345, + Body: "Unused", + Labels: []labeler.Label{{Name: "services/google-z"}}, + PullRequest: map[string]any{}, + }, + // nil indicates that the issue won't be updated at all (preserving existing labels) + expectedLabels: nil, + }, + "error fetching issue": { + args: []string{"12345"}, + oldResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Optional: true}, + "field_b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Required: true}, + }, + }, + }, + githubIssue: nil, + expectError: true, + }, + "error parsing PR id": { + args: []string{"foobar"}, + oldResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Optional: true}, + "field_b": {Description: "beep", Optional: true}, + }, + }, + }, + newResourceMap: map[string]*schema.Resource{ + "google_x_resource": { + Schema: map[string]*schema.Schema{ + "field_a": {Description: "beep", Required: true}, + }, + }, + }, + githubIssue: &labeler.Issue{ + Number: 12345, + Body: "Unused", + Labels: []labeler.Label{{Name: "services/google-z"}}, + PullRequest: map[string]any{}, + }, + expectError: true, + }, + } + + for tn, tc := range cases { + tc := tc + t.Run(tn, func(t *testing.T) { + t.Parallel() + + var gotLabels []string + o := addLabelsOptions{ + computeSchemaDiff: func() diff.SchemaDiff { + return diff.ComputeSchemaDiff(tc.oldResourceMap, tc.newResourceMap) + }, + enrolledTeamsYaml: enrolledTeamsYaml, + getIssue: func(repository string, id uint64) (labeler.Issue, error) { + if tc.githubIssue != nil { + return *tc.githubIssue, nil + } + var issue labeler.Issue + return issue, errors.New("Error getting issue") + }, + updateIssues: func(repository string, issueUpdates []labeler.IssueUpdate, dryRun bool) { + gotLabels = issueUpdates[0].Labels + }, + } + + err := o.run([]string{"1"}) + if err != nil { + if tc.expectError { + return + } + t.Errorf("Error running command: %s", err) + } + + if tc.expectedLabels == nil { + if gotLabels != nil { + t.Errorf("Expected updateIssues to not run. Got %v as new labels", gotLabels) + } + } + + less := func(a, b string) bool { return a < b } + if (len(tc.expectedLabels) > 0 || len(gotLabels) > 0) && !cmp.Equal(tc.expectedLabels, gotLabels, cmpopts.SortSlices(less)) { + t.Errorf("Unexpected final labels. Want %v, got %v", tc.expectedLabels, gotLabels) + } + }) + } +} diff --git a/tools/diff-processor/cmd/breaking_changes.go b/tools/diff-processor/cmd/breaking_changes.go index 1544f6529a00..9ac78af0c94c 100644 --- a/tools/diff-processor/cmd/breaking_changes.go +++ b/tools/diff-processor/cmd/breaking_changes.go @@ -8,8 +8,8 @@ import ( "os" "sort" - "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/diff" - "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/rules" + "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff" + "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/rules" "github.com/spf13/cobra" ) diff --git a/tools/diff-processor/cmd/breaking_changes_test.go b/tools/diff-processor/cmd/breaking_changes_test.go index f4e25966b4b8..735dc593c623 100644 --- a/tools/diff-processor/cmd/breaking_changes_test.go +++ b/tools/diff-processor/cmd/breaking_changes_test.go @@ -2,7 +2,7 @@ package cmd import ( "bytes" - "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/diff" + "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "strings" "testing" diff --git a/tools/diff-processor/cmd/root.go b/tools/diff-processor/cmd/root.go index ffa8c6dc71b1..1a3674921745 100644 --- a/tools/diff-processor/cmd/root.go +++ b/tools/diff-processor/cmd/root.go @@ -21,6 +21,7 @@ func newRootCmd() (*cobra.Command, *rootOptions, error) { SilenceErrors: true, } cmd.AddCommand(newBreakingChangesCmd(o)) + cmd.AddCommand(newAddLabelsCmd(o)) return cmd, o, nil } diff --git a/tools/diff-processor/go.mod b/tools/diff-processor/go.mod index 88bcd44634ca..880f54da41c9 100644 --- a/tools/diff-processor/go.mod +++ b/tools/diff-processor/go.mod @@ -1,4 +1,4 @@ -module github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor +module github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor go 1.19 @@ -6,9 +6,12 @@ replace google/provider/old => ./old replace google/provider/new => ./new -replace github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor => ./ +replace github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor => ./ + +replace github.com/GoogleCloudPlatform/magic-modules/tools/issue-labeler => ../issue-labeler require ( + github.com/GoogleCloudPlatform/magic-modules/tools/issue-labeler v0.0.0-00010101000000-000000000000 github.com/davecgh/go-spew v1.1.1 github.com/google/go-cmp v0.5.9 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 @@ -42,7 +45,7 @@ require ( github.com/fatih/color v1.13.0 // indirect github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 // indirect - github.com/golang/glog v1.1.0 // indirect + github.com/golang/glog v1.1.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 // indirect @@ -106,5 +109,6 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e // indirect google.golang.org/grpc v1.57.0 // indirect google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/tools/diff-processor/go.sum b/tools/diff-processor/go.sum index 42705ad96cf3..8aea913951ea 100644 --- a/tools/diff-processor/go.sum +++ b/tools/diff-processor/go.sum @@ -98,8 +98,8 @@ github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.1.1 h1:jxpi2eWoU84wbX9iIEyAeeoac3FLuifZpY9tcNUD9kw= +github.com/golang/glog v1.1.1/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -469,6 +469,7 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tools/diff-processor/labels/get_issue.go b/tools/diff-processor/labels/get_issue.go new file mode 100644 index 000000000000..fe9b5a235d8a --- /dev/null +++ b/tools/diff-processor/labels/get_issue.go @@ -0,0 +1,46 @@ +package labels + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "os" + + labeler "github.com/GoogleCloudPlatform/magic-modules/tools/issue-labeler/labeler" +) + +func GetIssue(repository string, id uint64) (labeler.Issue, error) { + var issue labeler.Issue + client := &http.Client{} + url := fmt.Sprintf("https://api.github.com/repos/%s/issues/%d", repository, id) + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return issue, fmt.Errorf("Error creating request: %w", err) + } + req.Header.Add("Accept", "application/vnd.github+json") + req.Header.Add("Authorization", "Bearer "+os.Getenv("GITHUB_TOKEN")) + req.Header.Add("X-GitHub-Api-Version", "2022-11-28") + resp, err := client.Do(req) + if err != nil { + return issue, fmt.Errorf("Error getting issue: %w", err) + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return issue, fmt.Errorf("Error reading response body: %w", err) + } + + err = json.Unmarshal(body, &issue) + if err != nil { + var errorResponse labeler.ErrorResponse + err = json.Unmarshal(body, &errorResponse) + if err != nil { + return issue, fmt.Errorf("Error unmarshalling response body: %w", err) + } + return issue, fmt.Errorf("Error from API: %s", errorResponse.Message) + } + + return issue, nil +} diff --git a/tools/diff-processor/main.go b/tools/diff-processor/main.go index 2795fad34354..f7693547c404 100644 --- a/tools/diff-processor/main.go +++ b/tools/diff-processor/main.go @@ -1,8 +1,9 @@ - package main + import ( - "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/cmd" + "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/cmd" ) + func main() { cmd.Execute() -} \ No newline at end of file +} diff --git a/tools/diff-processor/rules/breaking_changes.go b/tools/diff-processor/rules/breaking_changes.go index 2adaf4192720..4a3b27b365da 100644 --- a/tools/diff-processor/rules/breaking_changes.go +++ b/tools/diff-processor/rules/breaking_changes.go @@ -1,7 +1,7 @@ package rules import ( - "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/diff" + "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff" ) func ComputeBreakingChanges(schemaDiff diff.SchemaDiff) []string { diff --git a/tools/diff-processor/rules/breaking_changes_test.go b/tools/diff-processor/rules/breaking_changes_test.go index 5c11cf63287e..9c63981c3391 100644 --- a/tools/diff-processor/rules/breaking_changes_test.go +++ b/tools/diff-processor/rules/breaking_changes_test.go @@ -4,7 +4,7 @@ import ( "strings" "testing" - "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/diff" + "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) diff --git a/tools/diff-processor/rules/rules_resource_schema.go b/tools/diff-processor/rules/rules_resource_schema.go index 5cf6ca11915a..68ce9ea43650 100644 --- a/tools/diff-processor/rules/rules_resource_schema.go +++ b/tools/diff-processor/rules/rules_resource_schema.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/diff" + "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff" ) // ResourceSchemaRule provides structure for diff --git a/tools/diff-processor/rules/rules_resource_schema_test.go b/tools/diff-processor/rules/rules_resource_schema_test.go index ea92c9740c6c..da8b488bf69e 100644 --- a/tools/diff-processor/rules/rules_resource_schema_test.go +++ b/tools/diff-processor/rules/rules_resource_schema_test.go @@ -3,7 +3,7 @@ package rules import ( "testing" - "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/diff" + "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/tools/diff-processor/rules/utility.go b/tools/diff-processor/rules/utility.go index 43437a6e579c..78e82b63fcb8 100644 --- a/tools/diff-processor/rules/utility.go +++ b/tools/diff-processor/rules/utility.go @@ -3,7 +3,7 @@ package rules import ( "fmt" - "github.com/GoogleCloudPlatform/magic-modules/.ci/diff-processor/constants" + "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/constants" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) diff --git a/tools/issue-labeler/go.mod b/tools/issue-labeler/go.mod index 4a4ba3345cd4..d0e17c929f42 100644 --- a/tools/issue-labeler/go.mod +++ b/tools/issue-labeler/go.mod @@ -1,4 +1,4 @@ -module github.com/GoogleCloudPlatform/magic-modules/issue-labeler/tools/issue-labeler +module github.com/GoogleCloudPlatform/magic-modules/tools/issue-labeler go 1.19 diff --git a/tools/issue-labeler/backfill.go b/tools/issue-labeler/labeler/backfill.go similarity index 82% rename from tools/issue-labeler/backfill.go rename to tools/issue-labeler/labeler/backfill.go index e5cb8f4b728a..dd19ebc00091 100644 --- a/tools/issue-labeler/backfill.go +++ b/tools/issue-labeler/labeler/backfill.go @@ -1,4 +1,4 @@ -package main +package labeler import ( "bytes" @@ -17,7 +17,7 @@ type ErrorResponse struct { } type Issue struct { - Number int + Number uint64 Body string Labels []Label PullRequest map[string]any `json:"pull_request"` @@ -28,7 +28,7 @@ type Label struct { } type IssueUpdate struct { - Number int + Number uint64 Labels []string OldLabels []string } @@ -37,13 +37,13 @@ type IssueUpdateBody struct { Labels []string `json:"labels"` } -func getIssues(since string) []Issue { +func GetIssues(repository, since string) []Issue { client := &http.Client{} done := false page := 1 var issues []Issue for !done { - url := fmt.Sprintf("https://api.github.com/repos/hashicorp/terraform-provider-google/issues?since=%s&per_page=100&page=%d", since, page) + url := fmt.Sprintf("https://api.github.com/repos/%s/issues?since=%s&per_page=100&page=%d", repository, since, page) req, err := http.NewRequest("GET", url, nil) if err != nil { glog.Exitf("Error creating request: %v", err) @@ -78,7 +78,7 @@ func getIssues(since string) []Issue { return issues } -func computeIssueUpdates(issues []Issue, regexpLabels []regexpLabel) []IssueUpdate { +func ComputeIssueUpdates(issues []Issue, regexpLabels []RegexpLabel) []IssueUpdate { var issueUpdates []IssueUpdate for _, issue := range issues { @@ -103,8 +103,8 @@ func computeIssueUpdates(issues []Issue, regexpLabels []regexpLabel) []IssueUpda issueUpdate.OldLabels = append(issueUpdate.OldLabels, label) } - affectedResources := extractAffectedResources(issue.Body) - for _, needed := range computeLabels(affectedResources, regexpLabels) { + affectedResources := ExtractAffectedResources(issue.Body) + for _, needed := range ComputeLabels(affectedResources, regexpLabels) { desired[needed] = struct{}{} } @@ -126,10 +126,10 @@ func computeIssueUpdates(issues []Issue, regexpLabels []regexpLabel) []IssueUpda return issueUpdates } -func updateIssues(issueUpdates []IssueUpdate, dryRun bool) { +func UpdateIssues(repository string, issueUpdates []IssueUpdate, dryRun bool) { client := &http.Client{} for _, issueUpdate := range issueUpdates { - url := fmt.Sprintf("https://api.github.com/repos/hashicorp/terraform-provider-google/issues/%d", issueUpdate.Number) + url := fmt.Sprintf("https://api.github.com/repos/%s/issues/%d", repository, issueUpdate.Number) updateBody := IssueUpdateBody{Labels: issueUpdate.Labels} body, err := json.Marshal(updateBody) if err != nil { @@ -146,7 +146,7 @@ func updateIssues(issueUpdates []IssueUpdate, dryRun bool) { } fmt.Printf("Existing labels: %v\n", issueUpdate.OldLabels) fmt.Printf("New labels: %v\n", issueUpdate.Labels) - fmt.Printf("%s %s (https://github.com/hashicorp/terraform-provider-google/issues/%d)\n", req.Method, req.URL, issueUpdate.Number) + fmt.Printf("%s %s (https://github.com/%s/issues/%d)\n", repository, req.Method, req.URL, issueUpdate.Number) b, err := json.MarshalIndent(updateBody, "", " ") if err != nil { glog.Errorf("Error marshalling json: %v", err) @@ -167,8 +167,9 @@ func updateIssues(issueUpdates []IssueUpdate, dryRun bool) { var errResp ErrorResponse json.Unmarshal(body, &errResp) if errResp.Message != "" { - glog.Infof("API error: %s", errResp.Message) + glog.Errorf("API error: %s", errResp.Message) } + } } } diff --git a/tools/issue-labeler/backfill_test.go b/tools/issue-labeler/labeler/backfill_test.go similarity index 96% rename from tools/issue-labeler/backfill_test.go rename to tools/issue-labeler/labeler/backfill_test.go index 41c00f2304cf..c58e7f14b8e6 100644 --- a/tools/issue-labeler/backfill_test.go +++ b/tools/issue-labeler/labeler/backfill_test.go @@ -1,4 +1,4 @@ -package main +package labeler import ( "fmt" @@ -20,7 +20,7 @@ func testIssueBodyWithResources(resources []string) string { } func TestComputeIssueUpdates(t *testing.T) { - defaultRegexpLabels := []regexpLabel{ + defaultRegexpLabels := []RegexpLabel{ { Regexp: regexp.MustCompile("google_service1_.*"), Label: "service/service1", @@ -36,7 +36,7 @@ func TestComputeIssueUpdates(t *testing.T) { } cases := map[string]struct { issues []Issue - regexpLabels []regexpLabel + regexpLabels []RegexpLabel expectedIssueUpdates []IssueUpdate }{ "no issues -> no updates": { @@ -160,7 +160,7 @@ func TestComputeIssueUpdates(t *testing.T) { tc := tc t.Run(tn, func(t *testing.T) { t.Parallel() - issueUpdates := computeIssueUpdates(tc.issues, tc.regexpLabels) + issueUpdates := ComputeIssueUpdates(tc.issues, tc.regexpLabels) // reflect.DeepEqual treats nil & empty slices as not equal so ignore diffs if both slices are empty. if (len(issueUpdates) > 0 || len(tc.expectedIssueUpdates) > 0) && !reflect.DeepEqual(issueUpdates, tc.expectedIssueUpdates) { t.Errorf("Expected %v, got %v", tc.expectedIssueUpdates, issueUpdates) diff --git a/tools/issue-labeler/enrolled_teams.yml b/tools/issue-labeler/labeler/enrolled_teams.yml similarity index 100% rename from tools/issue-labeler/enrolled_teams.yml rename to tools/issue-labeler/labeler/enrolled_teams.yml diff --git a/tools/issue-labeler/labels.go b/tools/issue-labeler/labeler/labels.go similarity index 83% rename from tools/issue-labeler/labels.go rename to tools/issue-labeler/labeler/labels.go index 588588f65120..697bdb6f132f 100644 --- a/tools/issue-labeler/labels.go +++ b/tools/issue-labeler/labeler/labels.go @@ -1,4 +1,4 @@ -package main +package labeler import ( "fmt" @@ -17,7 +17,7 @@ var resourceRegexp = regexp.MustCompile(`google_[\w*.]+`) var ( //go:embed enrolled_teams.yml - enrolledTeamsYaml []byte + EnrolledTeamsYaml []byte ) type labelData struct { @@ -25,14 +25,14 @@ type labelData struct { Resources []string `yaml:"resources"` } -type regexpLabel struct { +type RegexpLabel struct { Regexp *regexp.Regexp Label string } -func buildRegexLabels(teamsYaml []byte) ([]regexpLabel, error) { +func BuildRegexLabels(teamsYaml []byte) ([]RegexpLabel, error) { enrolledTeams := make(map[string]labelData) - regexpLabels := []regexpLabel{} + regexpLabels := []RegexpLabel{} if err := yaml.Unmarshal(teamsYaml, &enrolledTeams); err != nil { return regexpLabels, fmt.Errorf("Error unmarshalling enrolled teams yaml: %w", err) } @@ -40,7 +40,7 @@ func buildRegexLabels(teamsYaml []byte) ([]regexpLabel, error) { for label, data := range enrolledTeams { for _, resource := range data.Resources { exactResource := fmt.Sprintf("^%s$", resource) - regexpLabels = append(regexpLabels, regexpLabel{ + regexpLabels = append(regexpLabels, RegexpLabel{ Regexp: regexp.MustCompile(exactResource), Label: label, }) @@ -54,7 +54,7 @@ func buildRegexLabels(teamsYaml []byte) ([]regexpLabel, error) { return regexpLabels, nil } -func extractAffectedResources(body string) []string { +func ExtractAffectedResources(body string) []string { section := sectionRegexp.FindString(body) section = commentRegexp.ReplaceAllString(section, "") if section != "" { @@ -64,7 +64,7 @@ func extractAffectedResources(body string) []string { return []string{} } -func computeLabels(resources []string, regexpLabels []regexpLabel) []string { +func ComputeLabels(resources []string, regexpLabels []RegexpLabel) []string { labelSet := make(map[string]struct{}) for _, resource := range resources { for _, rl := range regexpLabels { diff --git a/tools/issue-labeler/labels_test.go b/tools/issue-labeler/labeler/labels_test.go similarity index 96% rename from tools/issue-labeler/labels_test.go rename to tools/issue-labeler/labeler/labels_test.go index 1112cd9565e2..7161969dd8e9 100644 --- a/tools/issue-labeler/labels_test.go +++ b/tools/issue-labeler/labeler/labels_test.go @@ -1,4 +1,4 @@ -package main +package labeler import ( "reflect" @@ -35,7 +35,7 @@ func TestExtractAffectedResources(t *testing.T) { tc := tc t.Run(tn, func(t *testing.T) { t.Parallel() - resources := extractAffectedResources(tc.body) + resources := ExtractAffectedResources(tc.body) if !slices.Equal(resources, tc.expectedResources) { t.Errorf("Expected %v, got %v", tc.expectedResources, resources) } @@ -45,7 +45,7 @@ func TestExtractAffectedResources(t *testing.T) { func TestEnrolledTeamsData(t *testing.T) { // Smoke test to make sure enrolled teams data can be converted to a regex -> label map - _, err := buildRegexLabels(enrolledTeamsYaml) + _, err := BuildRegexLabels(EnrolledTeamsYaml) if err != nil { t.Logf("Error converting enrolled_teams.yml to regexpLabels: %s", err) t.FailNow() @@ -55,11 +55,11 @@ func TestEnrolledTeamsData(t *testing.T) { func TestBuildRegexLabels(t *testing.T) { cases := map[string]struct { yaml []byte - expectedRegexpLabels []regexpLabel + expectedRegexpLabels []RegexpLabel }{ "empty yaml": { yaml: []byte{}, - expectedRegexpLabels: []regexpLabel{}, + expectedRegexpLabels: []RegexpLabel{}, }, "labels with resources": { yaml: []byte(` @@ -70,7 +70,7 @@ service/service2: resources: - google_service2_resource1 - google_service2_resource2`), - expectedRegexpLabels: []regexpLabel{ + expectedRegexpLabels: []RegexpLabel{ { Regexp: regexp.MustCompile("^google_service1_.*$"), Label: "service/service1", @@ -91,7 +91,7 @@ service/service1: team: service1-team resources: - google_service1_resource1`), - expectedRegexpLabels: []regexpLabel{ + expectedRegexpLabels: []RegexpLabel{ { Regexp: regexp.MustCompile("^google_service1_resource1$"), Label: "service/service1", @@ -104,7 +104,7 @@ service/service1: tc := tc t.Run(tn, func(t *testing.T) { t.Parallel() - regexpLabels, err := buildRegexLabels(tc.yaml) + regexpLabels, err := BuildRegexLabels(tc.yaml) if err != nil { t.Logf("Unable to read enrolled teams: %s", err) t.FailNow() @@ -117,7 +117,7 @@ service/service1: } func TestComputeLabels(t *testing.T) { - defaultRegexpLabels := []regexpLabel{ + defaultRegexpLabels := []RegexpLabel{ { Regexp: regexp.MustCompile("^google_service1_.*$"), Label: "service/service1", @@ -141,7 +141,7 @@ func TestComputeLabels(t *testing.T) { } cases := map[string]struct { resources []string - regexpLabels []regexpLabel + regexpLabels []RegexpLabel expectedLabels []string }{ "empty resources -> empty labels": { @@ -156,7 +156,7 @@ func TestComputeLabels(t *testing.T) { }, "empty regexpLabels -> empty labels": { resources: []string{"google_service1_resource1"}, - regexpLabels: []regexpLabel{}, + regexpLabels: []RegexpLabel{}, expectedLabels: []string{}, }, "single matched resource": { @@ -195,7 +195,7 @@ func TestComputeLabels(t *testing.T) { tc := tc t.Run(tn, func(t *testing.T) { t.Parallel() - labels := computeLabels(tc.resources, tc.regexpLabels) + labels := ComputeLabels(tc.resources, tc.regexpLabels) if !slices.Equal(labels, tc.expectedLabels) { t.Errorf("want %v; got %v", tc.expectedLabels, labels) } diff --git a/tools/issue-labeler/main.go b/tools/issue-labeler/main.go index 1239337f5a5e..d2f69f0bd63c 100644 --- a/tools/issue-labeler/main.go +++ b/tools/issue-labeler/main.go @@ -7,6 +7,7 @@ import ( "sort" "strings" + "github.com/GoogleCloudPlatform/magic-modules/tools/issue-labeler/labeler" "github.com/golang/glog" ) @@ -16,15 +17,15 @@ var flagDryRun = flag.Bool("backfill-dry-run", false, "when combined with backfi func main() { flag.Parse() - regexpLabels, err := buildRegexLabels(enrolledTeamsYaml) + regexpLabels, err := labeler.BuildRegexLabels(EnrolledTeamsYaml) if err != nil { glog.Exitf("Error building regex labels: %v", err) } if *flagBackfillDate == "" { issueBody := os.Getenv("ISSUE_BODY") - affectedResources := extractAffectedResources(issueBody) - labels := computeLabels(affectedResources, regexpLabels) + affectedResources := labeler.ExtractAffectedResources(issueBody) + labels := labeler.ComputeLabels(affectedResources, regexpLabels) if len(labels) > 0 { labels = append(labels, "forward/review") @@ -32,8 +33,9 @@ func main() { fmt.Println(`["` + strings.Join(labels, `", "`) + `"]`) } } else { - issues := getIssues(*flagBackfillDate) - issueUpdates := computeIssueUpdates(issues, regexpLabels) - updateIssues(issueUpdates, *flagDryRun) + repository := "hashicorp/terraform-provider-google" + issues := labeler.GetIssues(repository, *flagBackfillDate) + issueUpdates := labeler.ComputeIssueUpdates(issues, regexpLabels) + labeler.UpdateIssues(repository, issueUpdates, *flagDryRun) } } diff --git a/tpgtools/api/assuredworkloads/samples/basic.workload.json b/tpgtools/api/assuredworkloads/samples/basic.workload.json index 331cfbf585b9..5b280bd44047 100755 --- a/tpgtools/api/assuredworkloads/samples/basic.workload.json +++ b/tpgtools/api/assuredworkloads/samples/basic.workload.json @@ -1,20 +1,22 @@ { "organization": "{{org_id}}", "location": "{{region}}", - "displayName": "Workload Example", + "displayName": "{{display}}", "complianceRegime": "FEDRAMP_MODERATE", "billingAccount": "billingAccounts/{{billing_account}}", "labels": { "label-one": "value-one" }, "provisionedResourcesParent": "folders/519620126891", + "violationNotificationsEnabled": true, "kmsSettings": { "nextRotationTime": "9999-10-02T15:01:23Z", "rotationPeriod": "10368000s" }, "resourceSettings": [ { - "resourceType": "CONSUMER_PROJECT" + "resourceType": "CONSUMER_FOLDER", + "displayName": "folder-display-name" }, { "resourceType": "ENCRYPTION_KEYS_PROJECT" @@ -24,4 +26,4 @@ "resourceType": "KEYRING" } ] -} +} \ No newline at end of file diff --git a/tpgtools/api/assuredworkloads/samples/sovereign_controls.workload.json b/tpgtools/api/assuredworkloads/samples/sovereign_controls.workload.json new file mode 100644 index 000000000000..f75313bc7777 --- /dev/null +++ b/tpgtools/api/assuredworkloads/samples/sovereign_controls.workload.json @@ -0,0 +1,27 @@ +{ + "organization": "{{org_id}}", + "location": "europe-west9", + "displayName": "{{display}}", + "complianceRegime": "EU_REGIONS_AND_SUPPORT", + "billingAccount": "billingAccounts/{{billing_account}}", + "labels": { + "label-one": "value-one" + }, + "enableSovereignControls": true, + "kmsSettings": { + "nextRotationTime": "9999-10-02T15:01:23Z", + "rotationPeriod": "10368000s" + }, + "resourceSettings": [ + { + "resourceType": "CONSUMER_FOLDER" + }, + { + "resourceType": "ENCRYPTION_KEYS_PROJECT" + }, + { + "resourceId": "{{ring}}", + "resourceType": "KEYRING" + } + ] +} \ No newline at end of file diff --git a/tpgtools/api/assuredworkloads/samples/sovereign_controls_workload.yaml b/tpgtools/api/assuredworkloads/samples/sovereign_controls_workload.yaml new file mode 100644 index 000000000000..bcb6e5fbd7ea --- /dev/null +++ b/tpgtools/api/assuredworkloads/samples/sovereign_controls_workload.yaml @@ -0,0 +1,31 @@ +# Copyright 2023 Google LLC. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name: sovereign_controls_workload +description: A Sovereign Controls test of the assuredworkloads api +type: workload +versions: +- beta +resource: samples/sovereign_controls.workload.json +updates: +- resource: samples/update_sovereign_controls.workload.json + dependencies: [] +variables: +- name: billing_account + type: billing_account +- name: display + type: resource_name +- name: org_id + type: org_id +- name: ring + type: resource_name \ No newline at end of file diff --git a/tpgtools/api/assuredworkloads/samples/update_sovereign_controls.workload.json b/tpgtools/api/assuredworkloads/samples/update_sovereign_controls.workload.json new file mode 100644 index 000000000000..99fa43989bb9 --- /dev/null +++ b/tpgtools/api/assuredworkloads/samples/update_sovereign_controls.workload.json @@ -0,0 +1,11 @@ +{ + "name": "{{ref:__state__:name}}", + "organization": "{{org_id}}", + "location": "europe-west9", + "displayName": "updated-example", + "billingAccount": "billingAccounts/{{billing_account}}", + "complianceRegime": "EU_REGIONS_AND_SUPPORT", + "labels": { + "label-two": "value-two-eu-regions-and-support" + } +} \ No newline at end of file diff --git a/tpgtools/api/orgpolicy/samples/organization_dry_run.policy.json b/tpgtools/api/orgpolicy/samples/organization_dry_run.policy.json new file mode 100644 index 000000000000..ef93418792e8 --- /dev/null +++ b/tpgtools/api/orgpolicy/samples/organization_dry_run.policy.json @@ -0,0 +1,13 @@ +{ + "name": "organizations/{{org_id}}/policies/gcp.resourceLocations", + "parent": "organizations/{{org_id}}", + "dryRunSpec": { + "rules": [ + { + "denyAll": true + } + ], + "reset": true, + "inheritFromParent": false + } +} \ No newline at end of file diff --git a/tpgtools/api/orgpolicy/samples/organization_dry_run_policy.yaml b/tpgtools/api/orgpolicy/samples/organization_dry_run_policy.yaml new file mode 100644 index 000000000000..809c10dde32e --- /dev/null +++ b/tpgtools/api/orgpolicy/samples/organization_dry_run_policy.yaml @@ -0,0 +1,26 @@ +# Copyright 2023 Google LLC. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name: organization_dry_run_policy +description: A test of an dry run policy for an organization +type: policy +versions: +- ga +- beta +resource: samples/organization_dry_run.policy.json +updates: +- resource: samples/update_organization_dry_run.policy.json + dependencies: [] +variables: +- name: org_id + type: org_id \ No newline at end of file diff --git a/tpgtools/api/orgpolicy/samples/update_organization_dry_run.policy.json b/tpgtools/api/orgpolicy/samples/update_organization_dry_run.policy.json new file mode 100644 index 000000000000..fa4c032116be --- /dev/null +++ b/tpgtools/api/orgpolicy/samples/update_organization_dry_run.policy.json @@ -0,0 +1,14 @@ +{ + "name": "organizations/{{org_id}}/policies/gcp.resourceLocations", + "parent": "organizations/{{org_id}}", + "dryRunSpec": { + "rules": [ + { + "allowAll": true, + "enforce": true + } + ], + "reset": false, + "inheritFromParent": true + } +} \ No newline at end of file diff --git a/tpgtools/go.mod b/tpgtools/go.mod index 07832d3e80b4..5340271e9f1f 100644 --- a/tpgtools/go.mod +++ b/tpgtools/go.mod @@ -4,7 +4,7 @@ go 1.19 require ( bitbucket.org/creachadair/stringset v0.0.11 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.52.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.55.0 github.com/golang/glog v1.1.2 github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/hcl v1.0.0 diff --git a/tpgtools/go.sum b/tpgtools/go.sum index 981174d17fd5..bab15242f741 100644 --- a/tpgtools/go.sum +++ b/tpgtools/go.sum @@ -47,6 +47,8 @@ github.com/GoogleCloudPlatform/declarative-resource-client-library v1.51.0 h1:Yh github.com/GoogleCloudPlatform/declarative-resource-client-library v1.51.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/GoogleCloudPlatform/declarative-resource-client-library v1.52.0 h1:KswxXF4E5iWv2ggktqv265zOvwmXA3mgma3UQfYA4tU= github.com/GoogleCloudPlatform/declarative-resource-client-library v1.52.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.55.0 h1:MTP0IDIztk36l8ubHkEcL6lWMG8Enqu9AP3E4MoBFg0= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.55.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= diff --git a/tpgtools/overrides/assuredworkloads/beta/workload.yaml b/tpgtools/overrides/assuredworkloads/beta/workload.yaml index e69de29bb2d1..b75a7c4a3f6b 100644 --- a/tpgtools/overrides/assuredworkloads/beta/workload.yaml +++ b/tpgtools/overrides/assuredworkloads/beta/workload.yaml @@ -0,0 +1,12 @@ +- type: CUSTOM_SCHEMA_VALUES + field: enable_sovereign_controls + details: + required: false + optional: true + computed: true +- type: CUSTOM_SCHEMA_VALUES + field: violation_notifications_enabled + details: + required: false + optional: true + computed: true \ No newline at end of file diff --git a/tpgtools/overrides/assuredworkloads/samples/workload/basic.tf.tmpl b/tpgtools/overrides/assuredworkloads/samples/workload/basic.tf.tmpl index fd154c19057f..8d303ef61437 100644 --- a/tpgtools/overrides/assuredworkloads/samples/workload/basic.tf.tmpl +++ b/tpgtools/overrides/assuredworkloads/samples/workload/basic.tf.tmpl @@ -8,6 +8,11 @@ resource "google_assured_workloads_workload" "primary" { provisioned_resources_parent = google_folder.folder1.name organization = "{{org_id}}" location = "us-central1" + resource_settings { + resource_type = "CONSUMER_FOLDER" + display_name = "folder-display-name" + } + violation_notifications_enabled = true } resource "google_folder" "folder1" { diff --git a/tpgtools/overrides/assuredworkloads/samples/workload/basic_update.tf.tmpl b/tpgtools/overrides/assuredworkloads/samples/workload/basic_update.tf.tmpl index 9da19474c62f..4d5fb120eb93 100644 --- a/tpgtools/overrides/assuredworkloads/samples/workload/basic_update.tf.tmpl +++ b/tpgtools/overrides/assuredworkloads/samples/workload/basic_update.tf.tmpl @@ -8,6 +8,11 @@ resource "google_assured_workloads_workload" "primary" { provisioned_resources_parent = google_folder.folder1.name organization = "{{org_id}}" location = "us-central1" + resource_settings { + resource_type = "CONSUMER_FOLDER" + display_name = "folder-display-name" + } + violation_notifications_enabled = true } resource "google_folder" "folder1" { diff --git a/tpgtools/overrides/containeraws/samples/cluster/basic.tf.tmpl b/tpgtools/overrides/containeraws/samples/cluster/basic.tf.tmpl index ffa75537965e..cba4d5fefbb9 100644 --- a/tpgtools/overrides/containeraws/samples/cluster/basic.tf.tmpl +++ b/tpgtools/overrides/containeraws/samples/cluster/basic.tf.tmpl @@ -8,6 +8,9 @@ resource "google_container_aws_cluster" "primary" { admin_users { username = "{{test_service_account}}" } + admin_groups { + group = "group@domain.com" + } } aws_region = "{{aws_region}}" diff --git a/tpgtools/overrides/containeraws/samples/cluster/basic_update.tf.tmpl b/tpgtools/overrides/containeraws/samples/cluster/basic_update.tf.tmpl index c20cc9b2345d..5273978a0121 100644 --- a/tpgtools/overrides/containeraws/samples/cluster/basic_update.tf.tmpl +++ b/tpgtools/overrides/containeraws/samples/cluster/basic_update.tf.tmpl @@ -8,6 +8,9 @@ resource "google_container_aws_cluster" "primary" { admin_users { username = "{{test_service_account}}" } + admin_groups { + group = "group@domain.com" + } } aws_region = "{{aws_region}}" diff --git a/tpgtools/overrides/containerazure/samples/cluster/basic.tf.tmpl b/tpgtools/overrides/containerazure/samples/cluster/basic.tf.tmpl index dd043e5fe5ce..712364a3b4b1 100644 --- a/tpgtools/overrides/containerazure/samples/cluster/basic.tf.tmpl +++ b/tpgtools/overrides/containerazure/samples/cluster/basic.tf.tmpl @@ -8,6 +8,9 @@ resource "google_container_azure_cluster" "primary" { admin_users { username = "mmv2@google.com" } + admin_groups { + group = "group@domain.com" + } } azure_region = "westus2" diff --git a/tpgtools/overrides/containerazure/samples/cluster/basic_update.tf.tmpl b/tpgtools/overrides/containerazure/samples/cluster/basic_update.tf.tmpl index 0c2632431f82..a010a7fa4126 100644 --- a/tpgtools/overrides/containerazure/samples/cluster/basic_update.tf.tmpl +++ b/tpgtools/overrides/containerazure/samples/cluster/basic_update.tf.tmpl @@ -8,6 +8,9 @@ resource "google_container_azure_cluster" "primary" { admin_users { username = "mmv2@google.com" } + admin_groups { + group = "group@domain.com" + } } azure_region = "westus2" diff --git a/tpgtools/overrides/orgpolicy/beta/policy.yaml b/tpgtools/overrides/orgpolicy/beta/policy.yaml index c27653ff9807..e31fa2fc2c0a 100644 --- a/tpgtools/overrides/orgpolicy/beta/policy.yaml +++ b/tpgtools/overrides/orgpolicy/beta/policy.yaml @@ -7,3 +7,5 @@ field: spec.rules.deny_all - type: ENUM_BOOL field: spec.rules.enforce +- type: EXCLUDE + field: dry_run_spec diff --git a/tpgtools/overrides/orgpolicy/policy.yaml b/tpgtools/overrides/orgpolicy/policy.yaml index c27653ff9807..08ff9e6b371f 100644 --- a/tpgtools/overrides/orgpolicy/policy.yaml +++ b/tpgtools/overrides/orgpolicy/policy.yaml @@ -7,3 +7,9 @@ field: spec.rules.deny_all - type: ENUM_BOOL field: spec.rules.enforce +- type: ENUM_BOOL + field: dry_run_spec.rules.allow_all +- type: ENUM_BOOL + field: dry_run_spec.rules.deny_all +- type: ENUM_BOOL + field: dry_run_spec.rules.enforce \ No newline at end of file diff --git a/tpgtools/overrides/orgpolicy/samples/policy/meta.yaml b/tpgtools/overrides/orgpolicy/samples/policy/meta.yaml index aecd139b9102..fe632768fe17 100644 --- a/tpgtools/overrides/orgpolicy/samples/policy/meta.yaml +++ b/tpgtools/overrides/orgpolicy/samples/policy/meta.yaml @@ -1,3 +1,8 @@ ignore_read: - name - "spec.0.rules.0.condition.0.expression" +# The feature for this sample is not ready +test_hide: +- organization_dry_run_policy.yaml +doc_hide: +- organization_dry_run_policy.yaml