diff --git a/.changelog/8855.txt b/.changelog/8855.txt new file mode 100644 index 0000000000..8ec013c069 --- /dev/null +++ b/.changelog/8855.txt @@ -0,0 +1,3 @@ +```release-note:none + +``` diff --git a/.teamcity/components/build_config_package.kt b/.teamcity/components/build_config_package.kt index d46cf0c6e5..e900280073 100644 --- a/.teamcity/components/build_config_package.kt +++ b/.teamcity/components/build_config_package.kt @@ -50,6 +50,7 @@ class packageDetails(packageName: String, displayName: String, providerName: Str // TODO(SarahFrench) Split TerraformAcceptanceTestParameters function into 2: one that's used for all tests/sweeper commands, and one that's specific to sweepers // We shouldn't be adding sweeper-specific parameters to non-sweeper builds TerraformAcceptanceTestParameters(parallelism, testPrefix, testTimeout, sweeperRegions, sweeperRun) + TerraformLoggingParameters() TerraformAcceptanceTestsFlag() TerraformCoreBinaryTesting() TerraformShouldPanicForSchemaErrors() @@ -57,6 +58,8 @@ class packageDetails(packageName: String, displayName: String, providerName: Str WorkingDirectory(path) } + artifactRules = "%teamcity.build.checkoutDir%/debug*.txt" + failureConditions { errorMessage = true executionTimeoutMin = buildTimeout diff --git a/.teamcity/components/generated/build_components.kt b/.teamcity/components/generated/build_components.kt index 5924646f9f..c74795aa6c 100644 --- a/.teamcity/components/generated/build_components.kt +++ b/.teamcity/components/generated/build_components.kt @@ -136,6 +136,17 @@ fun ParametrizedWithType.TerraformAcceptanceTestParameters(parallelism : Int, pr text("SWEEP_RUN", sweepRun) } +fun ParametrizedWithType.TerraformLoggingParameters() { + // Set logging levels to match old projects + text("env.TF_LOG", "DEBUG") + text("env.TF_LOG_CORE", "WARN") + text("env.TF_LOG_SDK_FRAMEWORK", "INFO") + + // Set where logs are sent + text("PROVIDER_NAME", providerName) + text("env.TF_LOG_PATH_MASK", "%system.teamcity.build.checkoutDir%/debug-%PROVIDER_NAME%-%env.BUILD_NUMBER%-%s.txt") +} + fun ParametrizedWithType.ReadOnlySettings() { hiddenVariable("teamcity.ui.settings.readOnly", "true", "Requires build configurations be edited via Kotlin") } diff --git a/.teamcity/components/sweepers.kt b/.teamcity/components/sweepers.kt index 9614f12b17..6698a5fb35 100644 --- a/.teamcity/components/sweepers.kt +++ b/.teamcity/components/sweepers.kt @@ -50,6 +50,7 @@ class sweeperDetails() { params { ConfigureGoogleSpecificTestParameters(environmentVariables) TerraformAcceptanceTestParameters(parallelism, testPrefix, testTimeout, sweeperRegions, sweeperRun) + TerraformLoggingParameters() TerraformAcceptanceTestsFlag() TerraformCoreBinaryTesting() TerraformShouldPanicForSchemaErrors() @@ -57,6 +58,8 @@ class sweeperDetails() { WorkingDirectory(path) } + artifactRules = "%teamcity.build.checkoutDir%/debug*.txt" + failureConditions { errorMessage = true executionTimeoutMin = buildTimeout diff --git a/go.mod b/go.mod index dec47b9ea4..2f69b05ee9 100644 --- a/go.mod +++ b/go.mod @@ -23,20 +23,20 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/hashstructure v1.1.0 github.com/sirupsen/logrus v1.8.1 - golang.org/x/net v0.12.0 - golang.org/x/oauth2 v0.10.0 - google.golang.org/api v0.135.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e + golang.org/x/net v0.14.0 + golang.org/x/oauth2 v0.11.0 + google.golang.org/api v0.138.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 google.golang.org/grpc v1.57.0 google.golang.org/protobuf v1.31.0 ) require ( bitbucket.org/creachadair/stringset v0.0.8 // indirect - cloud.google.com/go v0.110.4 // indirect - cloud.google.com/go/compute v1.20.1 // indirect + cloud.google.com/go v0.110.6 // indirect + cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.0 // indirect + cloud.google.com/go/iam v1.1.1 // indirect cloud.google.com/go/longrunning v0.5.1 // indirect github.com/agext/levenshtein v1.2.2 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect @@ -54,7 +54,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 // indirect - github.com/google/s2a-go v0.1.4 // indirect + github.com/google/s2a-go v0.1.5 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect @@ -84,13 +84,13 @@ require ( github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/zclconf/go-cty v1.11.0 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.11.0 // indirect + golang.org/x/crypto v0.12.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 240e5e8f09..2052032410 100644 --- a/go.sum +++ b/go.sum @@ -2,16 +2,16 @@ bitbucket.org/creachadair/stringset v0.0.8 h1:gQqe4vs8XWgMyijfyKE6K8o4TcyGGrRXe0 bitbucket.org/creachadair/stringset v0.0.8/go.mod h1:AgthVMyMxC/6FK1KBJ2ALdqkZObGN8hOetgpwXyMn34= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= -cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go/bigtable v1.19.0 h1:wiq9LT0kukfInzvy1joMDijCw/OD1UChpSbORXYn0LI= cloud.google.com/go/bigtable v1.19.0/go.mod h1:xl5kPa8PTkJjdBxg6qdGH88464nNqmbISHSRU+D2yFE= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.0 h1:67gSqaPukx7O8WLLHMa0PNs3EBGd2eE4d+psbO/CO94= -cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -135,8 +135,8 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 h1:5/4TSDzpDnHQ8rKEEQBjRlYx77mHOvXu08oGchxej7o= github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932/go.mod h1:cC6EdPbj/17GFCPDK39NRarlMI+kt+O60S12cNB5J9Y= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.5 h1:8IYp3w9nysqv3JH+NJgXJzGbDHzLOTj43BmSkp+O7qg= +github.com/google/s2a-go v0.1.5/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -306,8 +306,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -335,13 +335,13 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -375,11 +375,11 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -387,8 +387,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -404,8 +404,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.135.0 h1:6Vgfj6uPMXcyy66waYWBwmkeNB+9GmUlJDOzkukPQYQ= -google.golang.org/api v0.135.0/go.mod h1:Bp77uRFgwsSKI0BWH573F5Q6wSlznwI2NFayLOp/7mQ= +google.golang.org/api v0.138.0 h1:K/tVp05MxNVbHShRw9m7e9VJGdagNeTdMzqPH7AUqr0= +google.golang.org/api v0.138.0/go.mod h1:4xyob8CxC+0GChNBvEUAk8VBKNvYOTWM9T3v3UfRxuY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= @@ -416,12 +416,12 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e h1:S83+ibolgyZ0bqz7KEsUOPErxcv4VzlszxY+31OfB/E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/google-beta/acctest/framework_test_utils.go b/google-beta/acctest/framework_test_utils.go index f557853858..2e83d86126 100644 --- a/google-beta/acctest/framework_test_utils.go +++ b/google-beta/acctest/framework_test_utils.go @@ -6,18 +6,12 @@ import ( "context" "fmt" "log" - "reflect" - "regexp" - "strings" "testing" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - - "github.com/hashicorp/terraform-provider-google-beta/google-beta/fwtransport" - "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" ) func GetFwTestProvider(t *testing.T) *frameworkTestProvider { @@ -81,48 +75,3 @@ func testStringValue(sPtr *string) string { return *sPtr } - -// This function isn't a test of transport.go; instead, it is used as an alternative -// to ReplaceVars inside tests. -func ReplaceVarsForFrameworkTest(prov *fwtransport.FrameworkProviderConfig, rs *terraform.ResourceState, linkTmpl string) (string, error) { - re := regexp.MustCompile("{{([[:word:]]+)}}") - var project, region, zone string - - if strings.Contains(linkTmpl, "{{project}}") { - project = rs.Primary.Attributes["project"] - } - - if strings.Contains(linkTmpl, "{{region}}") { - region = tpgresource.GetResourceNameFromSelfLink(rs.Primary.Attributes["region"]) - } - - if strings.Contains(linkTmpl, "{{zone}}") { - zone = tpgresource.GetResourceNameFromSelfLink(rs.Primary.Attributes["zone"]) - } - - replaceFunc := func(s string) string { - m := re.FindStringSubmatch(s)[1] - if m == "project" { - return project - } - if m == "region" { - return region - } - if m == "zone" { - return zone - } - - if v, ok := rs.Primary.Attributes[m]; ok { - return v - } - - // Attempt to draw values from the provider - if f := reflect.Indirect(reflect.ValueOf(prov)).FieldByName(m); f.IsValid() { - return f.String() - } - - return "" - } - - return re.ReplaceAllStringFunc(linkTmpl, replaceFunc), nil -} diff --git a/google-beta/acctest/test_utils.go b/google-beta/acctest/test_utils.go index 7ea1fb7b54..02784529f9 100644 --- a/google-beta/acctest/test_utils.go +++ b/google-beta/acctest/test_utils.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/terraform-plugin-mux/tf5muxserver" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" ) func CheckDataSourceStateMatchesResourceState(dataSourceName, resourceName string) func(*terraform.State) error { @@ -202,3 +203,56 @@ func CreateZIPArchiveForCloudFunctionSource(t *testing.T, sourcePath string) str } return tmpfile.Name() } + +// providerConfigEnvNames returns a list of all the environment variables that could be set by a user to configure the provider +func providerConfigEnvNames() []string { + + envs := []string{} + + // Use existing collections of ENV names + envVarsSets := [][]string{ + envvar.CredsEnvVars, // credentials field + envvar.ProjectEnvVars, // project field + envvar.RegionEnvVars, //region field + envvar.ZoneEnvVars, // zone field + } + for _, set := range envVarsSets { + envs = append(envs, set...) + } + + // Add remaining ENVs + envs = append(envs, "GOOGLE_OAUTH_ACCESS_TOKEN") // access_token field + envs = append(envs, "GOOGLE_BILLING_PROJECT") // billing_project field + envs = append(envs, "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT") // impersonate_service_account field + envs = append(envs, "USER_PROJECT_OVERRIDE") // user_project_override field + envs = append(envs, "CLOUDSDK_CORE_REQUEST_REASON") // request_reason field + + return envs +} + +// UnsetProviderConfigEnvs unsets any ENVs in the test environment that +// configure the provider. +// The testing package will restore the original values after the test +func UnsetTestProviderConfigEnvs(t *testing.T) { + envs := providerConfigEnvNames() + if len(envs) > 0 { + for _, k := range envs { + t.Setenv(k, "") + } + } +} + +func SetupTestEnvs(t *testing.T, envValues map[string]string) { + // Set ENVs + if len(envValues) > 0 { + for k, v := range envValues { + t.Setenv(k, v) + } + } +} + +// Returns a fake credentials JSON string with the client_email set to a test-specific value +func GenerateFakeCredentialsJson(testId string) string { + json := fmt.Sprintf(`{"private_key_id": "foo","private_key": "bar","client_email": "%s@example.com","client_id": "id@foo.com","type": "service_account"}`, testId) + return json +} diff --git a/google-beta/fwprovider/framework_provider_test.go b/google-beta/fwprovider/framework_provider_test.go index 69eb9fe863..f55237707a 100644 --- a/google-beta/fwprovider/framework_provider_test.go +++ b/google-beta/fwprovider/framework_provider_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/fwresource" "github.com/hashicorp/terraform-provider-google-beta/google-beta/fwtransport" "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" @@ -223,7 +224,7 @@ func testAccCheckDNSManagedZoneDestroyProducerFramework(t *testing.T) func(s *te p := acctest.GetFwTestProvider(t) - url, err := acctest.ReplaceVarsForFrameworkTest(&p.FrameworkProvider.FrameworkProviderConfig, rs, "{{DNSBasePath}}projects/{{project}}/managedZones/{{name}}") + url, err := fwresource.ReplaceVarsForFrameworkTest(&p.FrameworkProvider.FrameworkProviderConfig, rs, "{{DNSBasePath}}projects/{{project}}/managedZones/{{name}}") if err != nil { return err } diff --git a/google-beta/fwresource/field_helpers.go b/google-beta/fwresource/field_helpers.go index b7d420f963..2d8476eac5 100644 --- a/google-beta/fwresource/field_helpers.go +++ b/google-beta/fwresource/field_helpers.go @@ -4,10 +4,14 @@ package fwresource import ( "fmt" + "reflect" "regexp" + "strings" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/fwtransport" "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" ) @@ -64,3 +68,48 @@ func ParseProjectFieldValueFramework(resourceType, fieldValue, projectSchemaFiel ResourceType: resourceType, } } + +// This function isn't a test of transport.go; instead, it is used as an alternative +// to ReplaceVars inside tests. +func ReplaceVarsForFrameworkTest(prov *fwtransport.FrameworkProviderConfig, rs *terraform.ResourceState, linkTmpl string) (string, error) { + re := regexp.MustCompile("{{([[:word:]]+)}}") + var project, region, zone string + + if strings.Contains(linkTmpl, "{{project}}") { + project = rs.Primary.Attributes["project"] + } + + if strings.Contains(linkTmpl, "{{region}}") { + region = tpgresource.GetResourceNameFromSelfLink(rs.Primary.Attributes["region"]) + } + + if strings.Contains(linkTmpl, "{{zone}}") { + zone = tpgresource.GetResourceNameFromSelfLink(rs.Primary.Attributes["zone"]) + } + + replaceFunc := func(s string) string { + m := re.FindStringSubmatch(s)[1] + if m == "project" { + return project + } + if m == "region" { + return region + } + if m == "zone" { + return zone + } + + if v, ok := rs.Primary.Attributes[m]; ok { + return v + } + + // Attempt to draw values from the provider + if f := reflect.Indirect(reflect.ValueOf(prov)).FieldByName(m); f.IsValid() { + return f.String() + } + + return "" + } + + return re.ReplaceAllStringFunc(linkTmpl, replaceFunc), nil +} diff --git a/google-beta/provider/provider.go b/google-beta/provider/provider.go index f8e90c57d0..173ce97c81 100644 --- a/google-beta/provider/provider.go +++ b/google-beta/provider/provider.go @@ -1028,6 +1028,7 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { "google_gke_backup_backup_plan_iam_policy": tpgiamresource.DataSourceIamPolicy(gkebackup.GKEBackupBackupPlanIamSchema, gkebackup.GKEBackupBackupPlanIamUpdaterProducer), "google_gke_hub_membership_iam_policy": tpgiamresource.DataSourceIamPolicy(gkehub.GKEHubMembershipIamSchema, gkehub.GKEHubMembershipIamUpdaterProducer), "google_gke_hub_feature_iam_policy": tpgiamresource.DataSourceIamPolicy(gkehub2.GKEHub2FeatureIamSchema, gkehub2.GKEHub2FeatureIamUpdaterProducer), + "google_gke_hub_scope_iam_policy": tpgiamresource.DataSourceIamPolicy(gkehub2.GKEHub2ScopeIamSchema, gkehub2.GKEHub2ScopeIamUpdaterProducer), "google_healthcare_consent_store_iam_policy": tpgiamresource.DataSourceIamPolicy(healthcare.HealthcareConsentStoreIamSchema, healthcare.HealthcareConsentStoreIamUpdaterProducer), "google_iap_app_engine_service_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapAppEngineServiceIamSchema, iap.IapAppEngineServiceIamUpdaterProducer), "google_iap_app_engine_version_iam_policy": tpgiamresource.DataSourceIamPolicy(iap.IapAppEngineVersionIamSchema, iap.IapAppEngineVersionIamUpdaterProducer), @@ -1085,9 +1086,9 @@ func DatasourceMapWithErrors() (map[string]*schema.Resource, error) { }) } -// Generated resources: 364 -// Generated IAM resources: 231 -// Total generated resources: 595 +// Generated resources: 370 +// Generated IAM resources: 234 +// Total generated resources: 604 func ResourceMap() map[string]*schema.Resource { resourceMap, _ := ResourceMapWithErrors() return resourceMap @@ -1165,6 +1166,7 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_beyondcorp_app_connector": beyondcorp.ResourceBeyondcorpAppConnector(), "google_beyondcorp_app_gateway": beyondcorp.ResourceBeyondcorpAppGateway(), "google_biglake_catalog": biglake.ResourceBiglakeCatalog(), + "google_biglake_database": biglake.ResourceBiglakeDatabase(), "google_bigquery_dataset": bigquery.ResourceBigQueryDataset(), "google_bigquery_dataset_access": bigquery.ResourceBigQueryDatasetAccess(), "google_bigquery_job": bigquery.ResourceBigQueryJob(), @@ -1284,6 +1286,7 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_compute_machine_image_iam_policy": tpgiamresource.ResourceIamPolicy(compute.ComputeMachineImageIamSchema, compute.ComputeMachineImageIamUpdaterProducer, compute.ComputeMachineImageIdParseFunc), "google_compute_managed_ssl_certificate": compute.ResourceComputeManagedSslCertificate(), "google_compute_network": compute.ResourceComputeNetwork(), + "google_compute_network_attachment": compute.ResourceComputeNetworkAttachment(), "google_compute_network_edge_security_service": compute.ResourceComputeNetworkEdgeSecurityService(), "google_compute_network_endpoint": compute.ResourceComputeNetworkEndpoint(), "google_compute_network_endpoint_group": compute.ResourceComputeNetworkEndpointGroup(), @@ -1470,7 +1473,14 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_gke_hub_feature_iam_binding": tpgiamresource.ResourceIamBinding(gkehub2.GKEHub2FeatureIamSchema, gkehub2.GKEHub2FeatureIamUpdaterProducer, gkehub2.GKEHub2FeatureIdParseFunc), "google_gke_hub_feature_iam_member": tpgiamresource.ResourceIamMember(gkehub2.GKEHub2FeatureIamSchema, gkehub2.GKEHub2FeatureIamUpdaterProducer, gkehub2.GKEHub2FeatureIdParseFunc), "google_gke_hub_feature_iam_policy": tpgiamresource.ResourceIamPolicy(gkehub2.GKEHub2FeatureIamSchema, gkehub2.GKEHub2FeatureIamUpdaterProducer, gkehub2.GKEHub2FeatureIdParseFunc), + "google_gke_hub_membership_binding": gkehub2.ResourceGKEHub2MembershipBinding(), "google_gke_hub_membership_rbac_role_binding": gkehub2.ResourceGKEHub2MembershipRBACRoleBinding(), + "google_gke_hub_namespace": gkehub2.ResourceGKEHub2Namespace(), + "google_gke_hub_scope": gkehub2.ResourceGKEHub2Scope(), + "google_gke_hub_scope_iam_binding": tpgiamresource.ResourceIamBinding(gkehub2.GKEHub2ScopeIamSchema, gkehub2.GKEHub2ScopeIamUpdaterProducer, gkehub2.GKEHub2ScopeIdParseFunc), + "google_gke_hub_scope_iam_member": tpgiamresource.ResourceIamMember(gkehub2.GKEHub2ScopeIamSchema, gkehub2.GKEHub2ScopeIamUpdaterProducer, gkehub2.GKEHub2ScopeIdParseFunc), + "google_gke_hub_scope_iam_policy": tpgiamresource.ResourceIamPolicy(gkehub2.GKEHub2ScopeIamSchema, gkehub2.GKEHub2ScopeIamUpdaterProducer, gkehub2.GKEHub2ScopeIdParseFunc), + "google_gke_hub_scope_rbac_role_binding": gkehub2.ResourceGKEHub2ScopeRBACRoleBinding(), "google_gkeonprem_bare_metal_admin_cluster": gkeonprem.ResourceGkeonpremBareMetalAdminCluster(), "google_gkeonprem_bare_metal_cluster": gkeonprem.ResourceGkeonpremBareMetalCluster(), "google_gkeonprem_bare_metal_node_pool": gkeonprem.ResourceGkeonpremBareMetalNodePool(), diff --git a/google-beta/provider/provider_internal_test.go b/google-beta/provider/provider_internal_test.go index fb77ee5a64..99cf803a13 100644 --- a/google-beta/provider/provider_internal_test.go +++ b/google-beta/provider/provider_internal_test.go @@ -5,11 +5,10 @@ package provider_test import ( "context" "errors" - "fmt" "os" "testing" - "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" "github.com/hashicorp/terraform-provider-google-beta/google-beta/provider" "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" @@ -85,59 +84,6 @@ func TestProvider_ValidateCredentials(t *testing.T) { } } -// ProviderConfigEnvNames returns a list of all the environment variables that could be set by a user to configure the provider -func ProviderConfigEnvNames() []string { - - envs := []string{} - - // Use existing collections of ENV names - envVarsSets := [][]string{ - envvar.CredsEnvVars, // credentials field - envvar.ProjectEnvVars, // project field - envvar.RegionEnvVars, //region field - envvar.ZoneEnvVars, // zone field - } - for _, set := range envVarsSets { - envs = append(envs, set...) - } - - // Add remaining ENVs - envs = append(envs, "GOOGLE_OAUTH_ACCESS_TOKEN") // access_token field - envs = append(envs, "GOOGLE_BILLING_PROJECT") // billing_project field - envs = append(envs, "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT") // impersonate_service_account field - envs = append(envs, "USER_PROJECT_OVERRIDE") // user_project_override field - envs = append(envs, "CLOUDSDK_CORE_REQUEST_REASON") // request_reason field - - return envs -} - -// unsetProviderConfigEnvs unsets any ENVs in the test environment that -// configure the provider. -// The testing package will restore the original values after the test -func unsetTestProviderConfigEnvs(t *testing.T) { - envs := ProviderConfigEnvNames() - if len(envs) > 0 { - for _, k := range envs { - t.Setenv(k, "") - } - } -} - -func setupTestEnvs(t *testing.T, envValues map[string]string) { - // Set ENVs - if len(envValues) > 0 { - for k, v := range envValues { - t.Setenv(k, v) - } - } -} - -// Returns a fake credentials JSON string with the client_email set to a test-specific value -func generateFakeCredentialsJson(testId string) string { - json := fmt.Sprintf(`{"private_key_id": "foo","private_key": "bar","client_email": "%s@example.com","client_id": "id@foo.com","type": "service_account"}`, testId) - return json -} - func TestProvider_ProviderConfigure_credentials(t *testing.T) { const pathToMissingFile string = "./this/path/doesnt/exist.json" // Doesn't exist @@ -168,46 +114,46 @@ func TestProvider_ProviderConfigure_credentials(t *testing.T) { }, "credentials set in the config are not overridden by environment variables": { ConfigValues: map[string]interface{}{ - "credentials": generateFakeCredentialsJson("test"), + "credentials": acctest.GenerateFakeCredentialsJson("test"), }, EnvVariables: map[string]string{ - "GOOGLE_CREDENTIALS": generateFakeCredentialsJson("GOOGLE_CREDENTIALS"), - "GOOGLE_CLOUD_KEYFILE_JSON": generateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - "GCLOUD_KEYFILE_JSON": generateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": generateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), + "GOOGLE_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS"), + "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), + "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), + "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), }, - ExpectedSchemaValue: generateFakeCredentialsJson("test"), - ExpectedConfigValue: generateFakeCredentialsJson("test"), + ExpectedSchemaValue: acctest.GenerateFakeCredentialsJson("test"), + ExpectedConfigValue: acctest.GenerateFakeCredentialsJson("test"), }, "when credentials is unset in the config, environment variables are used: GOOGLE_CREDENTIALS used first": { EnvVariables: map[string]string{ - "GOOGLE_CREDENTIALS": generateFakeCredentialsJson("GOOGLE_CREDENTIALS"), - "GOOGLE_CLOUD_KEYFILE_JSON": generateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - "GCLOUD_KEYFILE_JSON": generateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": generateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), + "GOOGLE_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS"), + "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), + "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), + "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), }, ExpectedSchemaValue: "", - ExpectedConfigValue: generateFakeCredentialsJson("GOOGLE_CREDENTIALS"), + ExpectedConfigValue: acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS"), }, "when credentials is unset in the config, environment variables are used: GOOGLE_CLOUD_KEYFILE_JSON used second": { EnvVariables: map[string]string{ // GOOGLE_CREDENTIALS not set - "GOOGLE_CLOUD_KEYFILE_JSON": generateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), - "GCLOUD_KEYFILE_JSON": generateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": generateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), + "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), + "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), + "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), }, ExpectedSchemaValue: "", - ExpectedConfigValue: generateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), + ExpectedConfigValue: acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), }, "when credentials is unset in the config, environment variables are used: GCLOUD_KEYFILE_JSON used third": { EnvVariables: map[string]string{ // GOOGLE_CREDENTIALS not set // GOOGLE_CLOUD_KEYFILE_JSON not set - "GCLOUD_KEYFILE_JSON": generateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), - "GOOGLE_APPLICATION_CREDENTIALS": generateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), + "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), + "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), }, ExpectedSchemaValue: "", - ExpectedConfigValue: generateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), + ExpectedConfigValue: acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), }, "when credentials is unset in the config (and access_token unset), GOOGLE_APPLICATION_CREDENTIALS is used for auth but not to set values in the config": { EnvVariables: map[string]string{ @@ -253,8 +199,8 @@ func TestProvider_ProviderConfigure_credentials(t *testing.T) { // Arrange ctx := context.Background() - unsetTestProviderConfigEnvs(t) - setupTestEnvs(t, tc.EnvVariables) + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) p := provider.Provider() d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) @@ -373,8 +319,8 @@ func TestProvider_ProviderConfigure_accessToken(t *testing.T) { // Arrange ctx := context.Background() - unsetTestProviderConfigEnvs(t) - setupTestEnvs(t, tc.EnvVariables) + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) p := provider.Provider() d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) @@ -475,8 +421,8 @@ func TestProvider_ProviderConfigure_impersonateServiceAccount(t *testing.T) { // Arrange ctx := context.Background() - unsetTestProviderConfigEnvs(t) - setupTestEnvs(t, tc.EnvVariables) + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) p := provider.Provider() d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) @@ -571,8 +517,8 @@ func TestProvider_ProviderConfigure_impersonateServiceAccountDelegates(t *testin // Arrange ctx := context.Background() - unsetTestProviderConfigEnvs(t) - setupTestEnvs(t, tc.EnvVariables) + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) p := provider.Provider() d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) @@ -735,8 +681,8 @@ func TestProvider_ProviderConfigure_project(t *testing.T) { // Arrange ctx := context.Background() - unsetTestProviderConfigEnvs(t) - setupTestEnvs(t, tc.EnvVariables) + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) p := provider.Provider() d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) @@ -846,8 +792,8 @@ func TestProvider_ProviderConfigure_billingProject(t *testing.T) { // Arrange ctx := context.Background() - unsetTestProviderConfigEnvs(t) - setupTestEnvs(t, tc.EnvVariables) + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) p := provider.Provider() d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) @@ -971,8 +917,8 @@ func TestProvider_ProviderConfigure_region(t *testing.T) { // Arrange ctx := context.Background() - unsetTestProviderConfigEnvs(t) - setupTestEnvs(t, tc.EnvVariables) + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) p := provider.Provider() d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) @@ -1124,8 +1070,8 @@ func TestProvider_ProviderConfigure_zone(t *testing.T) { // Arrange ctx := context.Background() - unsetTestProviderConfigEnvs(t) - setupTestEnvs(t, tc.EnvVariables) + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) p := provider.Provider() d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) @@ -1252,8 +1198,8 @@ func TestProvider_ProviderConfigure_userProjectOverride(t *testing.T) { // Arrange ctx := context.Background() - unsetTestProviderConfigEnvs(t) - setupTestEnvs(t, tc.EnvVariables) + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) p := provider.Provider() d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) @@ -1354,8 +1300,8 @@ func TestProvider_ProviderConfigure_scopes(t *testing.T) { // Arrange ctx := context.Background() - unsetTestProviderConfigEnvs(t) - setupTestEnvs(t, tc.EnvVariables) + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) p := provider.Provider() d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) @@ -1418,3 +1364,330 @@ func TestProvider_ProviderConfigure_scopes(t *testing.T) { }) } } + +func TestProvider_ProviderConfigure_requestTimeout(t *testing.T) { + cases := map[string]struct { + ConfigValues map[string]interface{} + ExpectedValue string + ExpectedSchemaValue string + ExpectError bool + ExpectFieldUnset bool + }{ + "if a valid request_timeout is configured in the provider, no error will occur": { + ConfigValues: map[string]interface{}{ + "request_timeout": "10s", + "credentials": transport_tpg.TestFakeCredentialsPath, + }, + ExpectedValue: "10s", + ExpectedSchemaValue: "10s", + }, + "if an invalid request_timeout is configured in the provider, an error will occur": { + ConfigValues: map[string]interface{}{ + "request_timeout": "timeout", + "credentials": transport_tpg.TestFakeCredentialsPath, + }, + ExpectedValue: "timeout", + ExpectedSchemaValue: "timeout", + ExpectError: true, + ExpectFieldUnset: false, + }, + // it's default value is set when RequestTimeout value is 0. + // This can be seen in this part of the config code where the default value is set to 120s + // https://github.com/hashicorp/terraform-provider-google/blob/09cb850ee64bcd78e4457df70905530c1ed75f19/google/transport/config.go#L1228-L1233 + "when config is unset, the value will be 0s in order to set the default value": { + ConfigValues: map[string]interface{}{ + "credentials": transport_tpg.TestFakeCredentialsPath, + }, + ExpectedValue: "0s", + ExpectFieldUnset: true, + }, + "when value is empty, the value will be 0s in order to set the default value": { + ConfigValues: map[string]interface{}{ + "request_timeout": "", + "credentials": transport_tpg.TestFakeCredentialsPath, + }, + ExpectedValue: "0s", + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + ctx := context.Background() + acctest.UnsetTestProviderConfigEnvs(t) + p := provider.Provider() + d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) + + // Act + c, diags := provider.ProviderConfigure(ctx, d, p) + + // Assert + if diags.HasError() && !tc.ExpectError { + t.Fatalf("unexpected error(s): %#v", diags) + } + if !diags.HasError() && tc.ExpectError { + t.Fatal("expected error(s) but got none") + } + if diags.HasError() && tc.ExpectError { + v, ok := d.GetOk("request_timeout") + if ok { + val := v.(string) + if val != tc.ExpectedSchemaValue { + t.Fatalf("expected request_timeout value set in provider data to be %s, got %s", tc.ExpectedSchemaValue, val) + } + if tc.ExpectFieldUnset { + t.Fatalf("expected request_timeout value to not be set in provider data, got %s", val) + } + } + // Return early in tests where errors expected + return + } + + v := d.Get("request_timeout") // checks for an empty or "0" string in order to set the default value + val := v.(string) + config := c.(*transport_tpg.Config) // Should be non-nil value, as test cases reaching this point experienced no errors + + if val != tc.ExpectedSchemaValue { + t.Fatalf("expected request_timeout value set in provider data to be %s, got %s", tc.ExpectedSchemaValue, val) + } + if config.RequestTimeout.String() != tc.ExpectedValue { + t.Fatalf("expected request_timeout value in provider struct to be %s, got %v", tc.ExpectedValue, config.RequestTimeout.String()) + } + }) + } +} + +func TestProvider_ProviderConfigure_requestReason(t *testing.T) { + + cases := map[string]struct { + ConfigValues map[string]interface{} + EnvVariables map[string]string + ExpectError bool + ExpectFieldUnset bool + ExpectedSchemaValue string + ExpectedConfigValue string + }{ + "when request_reason is unset in the config, environment variable CLOUDSDK_CORE_REQUEST_REASON is used": { + ConfigValues: map[string]interface{}{ + // request_reason unset + "credentials": transport_tpg.TestFakeCredentialsPath, + }, + EnvVariables: map[string]string{ + "CLOUDSDK_CORE_REQUEST_REASON": "test", + }, + ExpectedSchemaValue: "test", + ExpectedConfigValue: "test", + }, + "request_reason set in the config is not overridden by environment variables": { + ConfigValues: map[string]interface{}{ + "request_reason": "request test", + "credentials": transport_tpg.TestFakeCredentialsPath, + }, + EnvVariables: map[string]string{ + "CLOUDSDK_CORE_REQUEST_REASON": "test", + }, + ExpectedSchemaValue: "request test", + ExpectedConfigValue: "request test", + }, + "when no request_reason is provided via config or environment variables, the field remains unset without error": { + ConfigValues: map[string]interface{}{ + // request_reason unset + "credentials": transport_tpg.TestFakeCredentialsPath, + }, + ExpectedConfigValue: "", + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + ctx := context.Background() + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + p := provider.Provider() + d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) + + // Act + c, diags := provider.ProviderConfigure(ctx, d, p) + + // Assert + if diags.HasError() && !tc.ExpectError { + t.Fatalf("unexpected error(s): %#v", diags) + } + if !diags.HasError() && tc.ExpectError { + t.Fatal("expected error(s) but got none") + } + if diags.HasError() && tc.ExpectError { + v, ok := d.GetOk("request_reason") + if ok { + val := v.(string) + if val != tc.ExpectedSchemaValue { + t.Fatalf("expected request_reason value set in provider data to be %s, got %s", tc.ExpectedSchemaValue, val) + } + if tc.ExpectFieldUnset { + t.Fatalf("expected request_reason value to not be set in provider data, got %s", val) + } + } + // Return early in tests where errors expected + return + } + + v := d.Get("request_reason") + val := v.(string) + config := c.(*transport_tpg.Config) // Should be non-nil value, as test cases reaching this point experienced no errors + + if v != tc.ExpectedSchemaValue { + t.Fatalf("expected request_reason value set in provider data to be %s, got %s", tc.ExpectedSchemaValue, val) + } + if config.RequestReason != tc.ExpectedConfigValue { + t.Fatalf("expected request_reason value in provider struct to be %s, got %s", tc.ExpectedConfigValue, config.Credentials) + } + }) + } +} + +func TestProvider_ProviderConfigure_batching(t *testing.T) { + //var batch []interface{} + cases := map[string]struct { + ConfigValues map[string]interface{} + EnvVariables map[string]string + ExpectError bool + ExpectFieldUnset bool + ExpectedEnableBatchingValue bool + ExpectedSendAfterValue string + }{ + "if batch is an empty block, it will set the default values": { + ConfigValues: map[string]interface{}{ + "credentials": transport_tpg.TestFakeCredentialsPath, + }, + // Although at the schema level it's shown that by default it's set to false, the actual default value + // is true and can be seen in the `ExpanderProviderBatchingConfig` struct + // https://github.com/GoogleCloudPlatform/magic-modules/blob/8cd4a506f0ac4db7b07a8cce914449d34df6f20b/mmv1/third_party/terraform/transport/config.go.erb#L504-L508 + ExpectedEnableBatchingValue: false, + ExpectedSendAfterValue: "", // uses "" value to be able to set the default value of 30s + ExpectFieldUnset: true, + }, + "if batch is configured with both enable_batching and send_after": { + ConfigValues: map[string]interface{}{ + "credentials": transport_tpg.TestFakeCredentialsPath, + "batching": []interface{}{ + map[string]interface{}{ + "enable_batching": true, + "send_after": "10s", + }, + }, + }, + ExpectedEnableBatchingValue: true, + ExpectedSendAfterValue: "10s", + }, + "if batch is configured with only enable_batching": { + ConfigValues: map[string]interface{}{ + "credentials": transport_tpg.TestFakeCredentialsPath, + "batching": []interface{}{ + map[string]interface{}{ + "enable_batching": true, + }, + }, + }, + ExpectedEnableBatchingValue: true, + ExpectedSendAfterValue: "", + }, + "if batch is configured with only send_after": { + ConfigValues: map[string]interface{}{ + "credentials": transport_tpg.TestFakeCredentialsPath, + "batching": []interface{}{ + map[string]interface{}{ + "send_after": "10s", + }, + }, + }, + ExpectedEnableBatchingValue: false, + ExpectedSendAfterValue: "10s", + }, + "if batch is configured with invalid value for send_after": { + ConfigValues: map[string]interface{}{ + "credentials": transport_tpg.TestFakeCredentialsPath, + "batching": []interface{}{ + map[string]interface{}{ + "send_after": "invalid value", + }, + }, + }, + ExpectedSendAfterValue: "invalid value", + ExpectError: true, + }, + "if batch is configured with value without seconds (s) for send_after": { + ConfigValues: map[string]interface{}{ + "credentials": transport_tpg.TestFakeCredentialsPath, + "batching": []interface{}{ + map[string]interface{}{ + "send_after": "10", + }, + }, + }, + ExpectedSendAfterValue: "10", + ExpectError: true, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + ctx := context.Background() + acctest.UnsetTestProviderConfigEnvs(t) + p := provider.Provider() + d := tpgresource.SetupTestResourceDataFromConfigMap(t, p.Schema, tc.ConfigValues) + + // Act + _, diags := provider.ProviderConfigure(ctx, d, p) + + // Assert + if diags.HasError() && !tc.ExpectError { + t.Fatalf("unexpected error(s): %#v", diags) + } + if !diags.HasError() && tc.ExpectError { + t.Fatal("expected error(s) but got none") + } + if diags.HasError() && tc.ExpectError { + v, ok := d.GetOk("batching.0.enable_batching") + val := v.(bool) + if ok { + if val != tc.ExpectedEnableBatchingValue { + t.Fatalf("expected request_timeout value set in provider data to be %v, got %v", tc.ExpectedEnableBatchingValue, val) + } + if tc.ExpectFieldUnset { + t.Fatalf("expected request_timeout value to not be set in provider data, got %v", val) + } + } + + v, ok = d.GetOk("batching.0.send_after") + if ok { + val := v.(string) + if val != tc.ExpectedSendAfterValue { + t.Fatalf("expected request_timeout value set in provider data to be %v, got %v", tc.ExpectedSendAfterValue, val) + } + if tc.ExpectFieldUnset { + t.Fatalf("expected request_timeout value to not be set in provider data, got %s", val) + } + } + // Return early in tests where errors expected + return + } + + v := d.Get("batching.0.enable_batching") + enableBatching := v.(bool) + if enableBatching != tc.ExpectedEnableBatchingValue { + t.Fatalf("expected enable_batching value set in provider data to be %v, got %v", tc.ExpectedEnableBatchingValue, enableBatching) + } + + v = d.Get("batching.0.send_after") // checks for an empty string in order to set the default value + sendAfter := v.(string) + if sendAfter != tc.ExpectedSendAfterValue { + t.Fatalf("expected send_after value set in provider data to be %s, got %s", tc.ExpectedSendAfterValue, sendAfter) + } + }) + } +} diff --git a/google-beta/services/appengine/resource_app_engine_standard_app_version_test.go b/google-beta/services/appengine/resource_app_engine_standard_app_version_test.go index 9279bf72b3..6b089508e0 100644 --- a/google-beta/services/appengine/resource_app_engine_standard_app_version_test.go +++ b/google-beta/services/appengine/resource_app_engine_standard_app_version_test.go @@ -177,7 +177,7 @@ resource "google_vpc_access_connector" "bar" { project = google_project.my_project.project_id name = "bar" region = "us-central1" - ip_cidr_range = "10.8.0.0/28" + ip_cidr_range = "10.8.0.16/28" network = "default" } diff --git a/google-beta/services/biglake/resource_biglake_database.go b/google-beta/services/biglake/resource_biglake_database.go new file mode 100644 index 0000000000..de6d533847 --- /dev/null +++ b/google-beta/services/biglake/resource_biglake_database.go @@ -0,0 +1,451 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package biglake + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +func ResourceBiglakeDatabase() *schema.Resource { + return &schema.Resource{ + Create: resourceBiglakeDatabaseCreate, + Read: resourceBiglakeDatabaseRead, + Update: resourceBiglakeDatabaseUpdate, + Delete: resourceBiglakeDatabaseDelete, + + Importer: &schema.ResourceImporter{ + State: resourceBiglakeDatabaseImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "catalog": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The parent catalog.`, + }, + "hive_options": { + Type: schema.TypeList, + Required: true, + Description: `Options of a Hive database.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location_uri": { + Type: schema.TypeString, + Optional: true, + Description: `Cloud Storage folder URI where the database data is stored, starting with "gs://".`, + }, + "parameters": { + Type: schema.TypeMap, + Optional: true, + Description: `Stores user supplied Hive database parameters. An object containing a +list of"key": value pairs. +Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the database.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + Description: `The database type.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The creation time of the database. A timestamp in RFC3339 +UTC "Zulu" format, with nanosecond resolution and up to nine fractional +digits. Examples: "2014-10-02T15:01:23Z" and +"2014-10-02T15:01:23.045123456Z".`, + }, + "delete_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The deletion time of the database. Only set after the +database is deleted. A timestamp in RFC3339 UTC "Zulu" format, with +nanosecond resolution and up to nine fractional digits. Examples: +"2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "expire_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The time when this database is considered expired. Only set +after the database is deleted. A timestamp in RFC3339 UTC "Zulu" format, +with nanosecond resolution and up to nine fractional digits. Examples: +"2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The last modification time of the database. A timestamp in +RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine +fractional digits. Examples: "2014-10-02T15:01:23Z" and +"2014-10-02T15:01:23.045123456Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceBiglakeDatabaseCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + typeProp, err := expandBiglakeDatabaseType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + hiveOptionsProp, err := expandBiglakeDatabaseHiveOptions(d.Get("hive_options"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("hive_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(hiveOptionsProp)) && (ok || !reflect.DeepEqual(v, hiveOptionsProp)) { + obj["hiveOptions"] = hiveOptionsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BiglakeBasePath}}{{catalog}}/databases?databaseId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Database: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Database: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{catalog}}/databases/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Database %q: %#v", d.Id(), res) + + return resourceBiglakeDatabaseRead(d, meta) +} + +func resourceBiglakeDatabaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BiglakeBasePath}}{{catalog}}/databases/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BiglakeDatabase %q", d.Id())) + } + + if err := d.Set("create_time", flattenBiglakeDatabaseCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("update_time", flattenBiglakeDatabaseUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("delete_time", flattenBiglakeDatabaseDeleteTime(res["deleteTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("expire_time", flattenBiglakeDatabaseExpireTime(res["expireTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("type", flattenBiglakeDatabaseType(res["type"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + if err := d.Set("hive_options", flattenBiglakeDatabaseHiveOptions(res["hiveOptions"], d, config)); err != nil { + return fmt.Errorf("Error reading Database: %s", err) + } + + return nil +} + +func resourceBiglakeDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + obj := make(map[string]interface{}) + typeProp, err := expandBiglakeDatabaseType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + hiveOptionsProp, err := expandBiglakeDatabaseHiveOptions(d.Get("hive_options"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("hive_options"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, hiveOptionsProp)) { + obj["hiveOptions"] = hiveOptionsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{BiglakeBasePath}}{{catalog}}/databases/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Database %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("type") { + updateMask = append(updateMask, "type") + } + + if d.HasChange("hive_options") { + updateMask = append(updateMask, "hiveOptions") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating Database %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Database %q: %#v", d.Id(), res) + } + + return resourceBiglakeDatabaseRead(d, meta) +} + +func resourceBiglakeDatabaseDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + url, err := tpgresource.ReplaceVars(d, config, "{{BiglakeBasePath}}{{catalog}}/databases/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Database %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Database") + } + + log.Printf("[DEBUG] Finished deleting Database %q: %#v", d.Id(), res) + return nil +} + +func resourceBiglakeDatabaseImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "(?P.+)/databases/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "{{catalog}}/databases/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenBiglakeDatabaseCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBiglakeDatabaseUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBiglakeDatabaseDeleteTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBiglakeDatabaseExpireTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBiglakeDatabaseType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBiglakeDatabaseHiveOptions(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["location_uri"] = + flattenBiglakeDatabaseHiveOptionsLocationUri(original["locationUri"], d, config) + transformed["parameters"] = + flattenBiglakeDatabaseHiveOptionsParameters(original["parameters"], d, config) + return []interface{}{transformed} +} +func flattenBiglakeDatabaseHiveOptionsLocationUri(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBiglakeDatabaseHiveOptionsParameters(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandBiglakeDatabaseType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBiglakeDatabaseHiveOptions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLocationUri, err := expandBiglakeDatabaseHiveOptionsLocationUri(original["location_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocationUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["locationUri"] = transformedLocationUri + } + + transformedParameters, err := expandBiglakeDatabaseHiveOptionsParameters(original["parameters"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedParameters); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["parameters"] = transformedParameters + } + + return transformed, nil +} + +func expandBiglakeDatabaseHiveOptionsLocationUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBiglakeDatabaseHiveOptionsParameters(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/google-beta/services/biglake/resource_biglake_database_generated_test.go b/google-beta/services/biglake/resource_biglake_database_generated_test.go new file mode 100644 index 0000000000..7f5626d5a1 --- /dev/null +++ b/google-beta/services/biglake/resource_biglake_database_generated_test.go @@ -0,0 +1,129 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package biglake_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +func TestAccBiglakeDatabase_biglakeDatabaseExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBiglakeDatabaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBiglakeDatabase_biglakeDatabaseExample(context), + }, + { + ResourceName: "google_biglake_database.database", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"catalog", "name"}, + }, + }, + }) +} + +func testAccBiglakeDatabase_biglakeDatabaseExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_biglake_catalog" "catalog" { + name = "tf_test_my_catalog%{random_suffix}" + location = "US" +} + +resource "google_storage_bucket" "bucket" { + name = "tf_test_my_bucket%{random_suffix}" + location = "US" + force_destroy = true + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "metadata_folder" { + name = "metadata/" + content = " " + bucket = google_storage_bucket.bucket.name +} + +resource "google_biglake_database" "database" { + name = "tf_test_my_database%{random_suffix}" + catalog = google_biglake_catalog.catalog.id + type = "HIVE" + hive_options { + location_uri = "gs://${google_storage_bucket.bucket.name}/${google_storage_bucket_object.metadata_folder.name}" + parameters = { + "owner": "John Doe" + } + } +} +`, context) +} + +func testAccCheckBiglakeDatabaseDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_biglake_database" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{BiglakeBasePath}}{{catalog}}/databases/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("BiglakeDatabase still exists at %s", url) + } + } + + return nil + } +} diff --git a/google-beta/services/biglake/resource_biglake_database_sweeper.go b/google-beta/services/biglake/resource_biglake_database_sweeper.go new file mode 100644 index 0000000000..90a2bb2181 --- /dev/null +++ b/google-beta/services/biglake/resource_biglake_database_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package biglake + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/sweeper" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +func init() { + sweeper.AddTestSweepers("BiglakeDatabase", testSweepBiglakeDatabase) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepBiglakeDatabase(region string) error { + resourceName := "BiglakeDatabase" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://biglake.googleapis.com/v1/{{catalog}}/databases", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["databases"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://biglake.googleapis.com/v1/{{catalog}}/databases/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google-beta/services/biglake/resource_biglake_database_test.go b/google-beta/services/biglake/resource_biglake_database_test.go new file mode 100644 index 0000000000..c1ccf6e047 --- /dev/null +++ b/google-beta/services/biglake/resource_biglake_database_test.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package biglake_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" +) + +func TestAccBiglakeDatabase_biglakeDatabase_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBiglakeDatabaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBiglakeDatabase_biglakeDatabaseExample(context), + }, + { + ResourceName: "google_biglake_database.database", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "catalog"}, + }, + { + Config: testAccBiglakeDatabase_biglakeDatabase_update(context), + }, + { + ResourceName: "google_biglake_database.database", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "catalog"}, + }, + }, + }) +} + +func testAccBiglakeDatabase_biglakeDatabase_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_biglake_catalog" "catalog" { + name = "tf_test_my_catalog%{random_suffix}" + # Hard code to avoid invalid random id suffix + location = "US" +} +resource "google_storage_bucket" "bucket" { + name = "tf_test_my_bucket%{random_suffix}" + location = "US" + force_destroy = true + uniform_bucket_level_access = true +} +resource "google_storage_bucket_object" "metadata_folder" { + name = "metadata/" + content = " " + bucket = google_storage_bucket.bucket.name +} +resource "google_biglake_database" "database" { + name = "tf_test_my_database%{random_suffix}" + catalog = google_biglake_catalog.catalog.id + type = "HIVE" + hive_options { + location_uri = "gs://${google_storage_bucket.bucket.name}/${google_storage_bucket_object.metadata_folder.name}/metadata/metadata" + parameters = { + "owner": "Jane Doe" + "tool" = "screwdriver" + } + } +} +`, context) +} diff --git a/google-beta/services/bigquerydatatransfer/resource_bigquery_data_transfer_config.go b/google-beta/services/bigquerydatatransfer/resource_bigquery_data_transfer_config.go index 7cb6f2b28d..7912d5b732 100644 --- a/google-beta/services/bigquerydatatransfer/resource_bigquery_data_transfer_config.go +++ b/google-beta/services/bigquerydatatransfer/resource_bigquery_data_transfer_config.go @@ -44,8 +44,8 @@ func sensitiveParamCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v return nil } -// This customizeDiff is to use ForceNew for params fields data_path_template and -// destination_table_name_template only if the value of "data_source_id" is "google_cloud_storage". +// This customizeDiff is to use ForceNew for params fields data_path_template or data_path and +// destination_table_name_template only if the value of "data_source_id" is "google_cloud_storage" or "amazon_s3". func ParamsCustomizeDiffFunc(diff tpgresource.TerraformResourceDiff) error { old, new := diff.GetChange("params") dsId := diff.Get("data_source_id").(string) @@ -53,7 +53,8 @@ func ParamsCustomizeDiffFunc(diff tpgresource.TerraformResourceDiff) error { newParams := new.(map[string]interface{}) var err error - if dsId == "google_cloud_storage" { + switch dsId { + case "google_cloud_storage": if oldParams["data_path_template"] != nil && newParams["data_path_template"] != nil && oldParams["data_path_template"].(string) != newParams["data_path_template"].(string) { err = diff.ForceNew("params") if err != nil { @@ -69,11 +70,25 @@ func ParamsCustomizeDiffFunc(diff tpgresource.TerraformResourceDiff) error { } return nil } - } + case "amazon_s3": + if oldParams["data_path"] != nil && newParams["data_path"] != nil && oldParams["data_path"].(string) != newParams["data_path"].(string) { + err = diff.ForceNew("params") + if err != nil { + return fmt.Errorf("ForceNew failed for params, old - %v and new - %v", oldParams, newParams) + } + return nil + } + if oldParams["destination_table_name_template"] != nil && newParams["destination_table_name_template"] != nil && oldParams["destination_table_name_template"].(string) != newParams["destination_table_name_template"].(string) { + err = diff.ForceNew("params") + if err != nil { + return fmt.Errorf("ForceNew failed for params, old - %v and new - %v", oldParams, newParams) + } + return nil + } + } return nil } - func paramsCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { return ParamsCustomizeDiffFunc(diff) } diff --git a/google-beta/services/bigquerydatatransfer/resource_bigquery_data_transfer_config_test.go b/google-beta/services/bigquerydatatransfer/resource_bigquery_data_transfer_config_test.go index 1fb169ec48..2c2c644f33 100644 --- a/google-beta/services/bigquerydatatransfer/resource_bigquery_data_transfer_config_test.go +++ b/google-beta/services/bigquerydatatransfer/resource_bigquery_data_transfer_config_test.go @@ -16,7 +16,7 @@ import ( transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" ) -func TestBigqueryDataTransferConfig_resourceBigqueryDTCParamsCustomDiffFuncForceNew(t *testing.T) { +func TestBigqueryDataTransferConfig_resourceBigqueryDTCParamsCustomDiffFuncForceNewWhenGoogleCloudStorage(t *testing.T) { t.Parallel() cases := map[string]struct { @@ -154,6 +154,144 @@ func TestBigqueryDataTransferConfig_resourceBigqueryDTCParamsCustomDiffFuncForce } } +func TestBigqueryDataTransferConfig_resourceBigqueryDTCParamsCustomDiffFuncForceNewWhenAmazonS3(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + before map[string]interface{} + after map[string]interface{} + forcenew bool + }{ + "changing_data_path": { + before: map[string]interface{}{ + "data_source_id": "amazon_s3", + "params": map[string]interface{}{ + "data_path": "s3://s3-bucket-temp/*.json", + "destination_table_name_template": "table-old", + "file_format": "JSON", + "max_bad_records": 10, + "write_disposition": "WRITE_APPEND", + }, + }, + after: map[string]interface{}{ + "data_source_id": "amazon_s3", + "params": map[string]interface{}{ + "data_path": "s3://s3-bucket-temp-new/*.json", + "destination_table_name_template": "table-old", + "file_format": "JSON", + "max_bad_records": 10, + "write_disposition": "WRITE_APPEND", + }, + }, + forcenew: true, + }, + "changing_destination_table_name_template": { + before: map[string]interface{}{ + "data_source_id": "amazon_s3", + "params": map[string]interface{}{ + "data_path": "s3://s3-bucket-temp/*.json", + "destination_table_name_template": "table-old", + "file_format": "JSON", + "max_bad_records": 10, + "write_disposition": "WRITE_APPEND", + }, + }, + after: map[string]interface{}{ + "data_source_id": "amazon_s3", + "params": map[string]interface{}{ + "data_path": "s3://s3-bucket-temp/*.json", + "destination_table_name_template": "table-new", + "file_format": "JSON", + "max_bad_records": 10, + "write_disposition": "WRITE_APPEND", + }, + }, + forcenew: true, + }, + "changing_non_force_new_fields": { + before: map[string]interface{}{ + "data_source_id": "amazon_s3", + "params": map[string]interface{}{ + "data_path": "s3://s3-bucket-temp/*.json", + "destination_table_name_template": "table-old", + "file_format": "JSON", + "max_bad_records": 10, + "write_disposition": "WRITE_APPEND", + }, + }, + after: map[string]interface{}{ + "data_source_id": "amazon_s3", + "params": map[string]interface{}{ + "data_path": "s3://s3-bucket-temp/*.json", + "destination_table_name_template": "table-old", + "file_format": "JSON", + "max_bad_records": 1000, + "write_disposition": "APPEND", + }, + }, + forcenew: false, + }, + "changing_destination_table_name_template_for_different_data_source_id": { + before: map[string]interface{}{ + "data_source_id": "scheduled_query", + "params": map[string]interface{}{ + "destination_table_name_template": "table-old", + "query": "SELECT 1 AS a", + "write_disposition": "WRITE_APPEND", + }, + }, + after: map[string]interface{}{ + "data_source_id": "scheduled_query", + "params": map[string]interface{}{ + "destination_table_name_template": "table-new", + "query": "SELECT 1 AS a", + "write_disposition": "WRITE_APPEND", + }, + }, + forcenew: false, + }, + "changing_data_path_template_for_different_data_source_id": { + before: map[string]interface{}{ + "data_source_id": "scheduled_query", + "params": map[string]interface{}{ + "data_path": "s3://s3-bucket-temp/*.json", + "query": "SELECT 1 AS a", + "write_disposition": "WRITE_APPEND", + }, + }, + after: map[string]interface{}{ + "data_source_id": "scheduled_query", + "params": map[string]interface{}{ + "data_path": "s3://s3-bucket-temp-new/*.json", + "query": "SELECT 1 AS a", + "write_disposition": "WRITE_APPEND", + }, + }, + forcenew: false, + }, + } + + for tn, tc := range cases { + d := &tpgresource.ResourceDiffMock{ + Before: map[string]interface{}{ + "params": tc.before["params"], + "data_source_id": tc.before["data_source_id"], + }, + After: map[string]interface{}{ + "params": tc.after["params"], + "data_source_id": tc.after["data_source_id"], + }, + } + err := bigquerydatatransfer.ParamsCustomizeDiffFunc(d) + if err != nil { + t.Errorf("failed, expected no error but received - %s for the condition %s", err, tn) + } + if d.IsForceNew != tc.forcenew { + t.Errorf("ForceNew not setup correctly for the condition-'%s', expected:%v; actual:%v", tn, tc.forcenew, d.IsForceNew) + } + } +} + // The service account TF uses needs the permission granted in the configs // but it will get deleted by parallel tests, so they need to be run serially. func TestAccBigqueryDataTransferConfig(t *testing.T) { diff --git a/google-beta/services/compute/resource_compute_forwarding_rule.go b/google-beta/services/compute/resource_compute_forwarding_rule.go index 3e72f53b0a..4e740cb99f 100644 --- a/google-beta/services/compute/resource_compute_forwarding_rule.go +++ b/google-beta/services/compute/resource_compute_forwarding_rule.go @@ -178,7 +178,6 @@ internal load balancer.`, "allow_psc_global_access": { Type: schema.TypeBool, Optional: true, - ForceNew: true, Description: `This is used in PSC consumer ForwardingRule to control whether the PSC endpoint can be accessed from another region.`, }, "backend_service": { @@ -1050,6 +1049,72 @@ func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{ return err } } + if d.HasChange("allow_psc_global_access") { + obj := make(map[string]interface{}) + + getUrl, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeForwardingRule %q", d.Id())) + } + + obj["fingerprint"] = getRes["fingerprint"] + + allowPscGlobalAccessProp, err := expandComputeForwardingRuleAllowPscGlobalAccess(d.Get("allow_psc_global_access"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("allow_psc_global_access"); ok || !reflect.DeepEqual(v, allowPscGlobalAccessProp) { + obj["allowPscGlobalAccess"] = allowPscGlobalAccessProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") + if err != nil { + return err + } + url = strings.ReplaceAll(url, "projects/projects/", "projects/") + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error updating ForwardingRule %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ForwardingRule %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating ForwardingRule", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } d.Partial(false) diff --git a/google-beta/services/compute/resource_compute_forwarding_rule_test.go b/google-beta/services/compute/resource_compute_forwarding_rule_test.go index d284bd9c0b..340458ad64 100644 --- a/google-beta/services/compute/resource_compute_forwarding_rule_test.go +++ b/google-beta/services/compute/resource_compute_forwarding_rule_test.go @@ -183,7 +183,15 @@ func TestAccComputeForwardingRule_forwardingRuleVpcPscExampleUpdate(t *testing.T ImportStateVerify: true, }, { - Config: testAccComputeForwardingRule_forwardingRuleVpcPscExampleUpdate(context), + Config: testAccComputeForwardingRule_forwardingRuleVpcPscExampleUpdate(context, true), + }, + { + ResourceName: "google_compute_forwarding_rule.default", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeForwardingRule_forwardingRuleVpcPscExampleUpdate(context, false), }, { ResourceName: "google_compute_forwarding_rule.default", @@ -540,7 +548,15 @@ resource "google_service_directory_service" "examplesvc" { `, poolName, ruleName, svcDirNamespace, serviceName) } -func testAccComputeForwardingRule_forwardingRuleVpcPscExampleUpdate(context map[string]interface{}) string { +func testAccComputeForwardingRule_forwardingRuleVpcPscExampleUpdate(context map[string]interface{}, preventDestroy bool) string { + context["lifecycle_block"] = "" + if preventDestroy { + context["lifecycle_block"] = ` + lifecycle { + prevent_destroy = true + }` + } + return acctest.Nprintf(` // Forwarding rule for VPC private service connect resource "google_compute_forwarding_rule" "default" { @@ -551,6 +567,7 @@ resource "google_compute_forwarding_rule" "default" { network = google_compute_network.consumer_net.name ip_address = google_compute_address.consumer_address.id allow_psc_global_access = false + %{lifecycle_block} } // Consumer service endpoint diff --git a/google-beta/services/compute/resource_compute_instance_group_manager.go b/google-beta/services/compute/resource_compute_instance_group_manager.go index 0ed7011855..30a786fe08 100644 --- a/google-beta/services/compute/resource_compute_instance_group_manager.go +++ b/google-beta/services/compute/resource_compute_instance_group_manager.go @@ -1346,28 +1346,32 @@ func expandAllInstancesConfig(old []interface{}, new []interface{}) *compute.Ins var properties *compute.InstancePropertiesPatch for _, raw := range new { properties = &compute.InstancePropertiesPatch{} - data := raw.(map[string]interface{}) - properties.Metadata = tpgresource.ConvertStringMap(data["metadata"].(map[string]interface{})) - if len(properties.Metadata) == 0 { - properties.NullFields = append(properties.NullFields, "Metadata") - } - properties.Labels = tpgresource.ConvertStringMap(data["labels"].(map[string]interface{})) - if len(properties.Labels) == 0 { - properties.NullFields = append(properties.NullFields, "Labels") + if raw != nil { + data := raw.(map[string]interface{}) + properties.Metadata = tpgresource.ConvertStringMap(data["metadata"].(map[string]interface{})) + if len(properties.Metadata) == 0 { + properties.NullFields = append(properties.NullFields, "Metadata") + } + properties.Labels = tpgresource.ConvertStringMap(data["labels"].(map[string]interface{})) + if len(properties.Labels) == 0 { + properties.NullFields = append(properties.NullFields, "Labels") + } } } if properties != nil { for _, raw := range old { - data := raw.(map[string]interface{}) - for k := range data["metadata"].(map[string]interface{}) { - if _, exist := properties.Metadata[k]; !exist { - properties.NullFields = append(properties.NullFields, fmt.Sprintf("Metadata.%s", k)) + if raw != nil { + data := raw.(map[string]interface{}) + for k := range data["metadata"].(map[string]interface{}) { + if _, exist := properties.Metadata[k]; !exist { + properties.NullFields = append(properties.NullFields, fmt.Sprintf("Metadata.%s", k)) + } } - } - for k := range data["labels"].(map[string]interface{}) { - if _, exist := properties.Labels[k]; !exist { - properties.NullFields = append(properties.NullFields, fmt.Sprintf("Labels.%s", k)) + for k := range data["labels"].(map[string]interface{}) { + if _, exist := properties.Labels[k]; !exist { + properties.NullFields = append(properties.NullFields, fmt.Sprintf("Labels.%s", k)) + } } } } diff --git a/google-beta/services/compute/resource_compute_network_attachment.go b/google-beta/services/compute/resource_compute_network_attachment.go new file mode 100644 index 0000000000..580c50e17f --- /dev/null +++ b/google-beta/services/compute/resource_compute_network_attachment.go @@ -0,0 +1,615 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/verify" +) + +func ResourceComputeNetworkAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkAttachmentCreate, + Read: resourceComputeNetworkAttachmentRead, + Delete: resourceComputeNetworkAttachmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeNetworkAttachmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "connection_preference": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ACCEPT_AUTOMATIC", "ACCEPT_MANUAL", "INVALID"}), + Description: `The connection preference of service attachment. The value can be set to ACCEPT_AUTOMATIC. An ACCEPT_AUTOMATIC service attachment is one that always accepts the connection from consumer forwarding rules. Possible values: ["ACCEPT_AUTOMATIC", "ACCEPT_MANUAL", "INVALID"]`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `URL of the region where the network attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.`, + }, + "subnetworks": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `An array of URLs where each entry is the URL of a subnet provided by the service consumer to use for endpoints in the producers that connect to this network attachment.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional description of this resource. Provide this property when you create the resource.`, + }, + "producer_accept_lists": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Projects that are allowed to connect to this network attachment. The project can be specified using its id or number.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "producer_reject_lists": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Projects that are not allowed to connect to this network attachment. The project can be specified using its id or number.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "connection_endpoints": { + Type: schema.TypeList, + Computed: true, + Description: `An array of connections for all the producers connected to this network attachment.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless.`, + }, + "project_id_or_num": { + Type: schema.TypeString, + Computed: true, + Description: `The project id or number of the interface to which the IP was assigned.`, + }, + "secondary_ip_cidr_ranges": { + Type: schema.TypeString, + Computed: true, + Description: `Alias IP ranges from the same subnetwork.`, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: `The status of a connected endpoint to this network attachment.`, + }, + "subnetwork": { + Type: schema.TypeString, + Computed: true, + Description: `The subnetwork used to assign the IP to the producer instance network interface.`, + }, + }, + }, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + "fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `Fingerprint of this resource. A hash of the contents stored in this object. This +field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.`, + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier for the resource type. The server generates this identifier.`, + }, + "kind": { + Type: schema.TypeString, + Computed: true, + Description: `Type of the resource.`, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. +Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URL for the resource.`, + }, + "self_link_with_id": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URL for this resource's resource id.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeNetworkAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + descriptionProp, err := expandComputeNetworkAttachmentDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + connectionPreferenceProp, err := expandComputeNetworkAttachmentConnectionPreference(d.Get("connection_preference"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("connection_preference"); !tpgresource.IsEmptyValue(reflect.ValueOf(connectionPreferenceProp)) && (ok || !reflect.DeepEqual(v, connectionPreferenceProp)) { + obj["connectionPreference"] = connectionPreferenceProp + } + subnetworksProp, err := expandComputeNetworkAttachmentSubnetworks(d.Get("subnetworks"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("subnetworks"); !tpgresource.IsEmptyValue(reflect.ValueOf(subnetworksProp)) && (ok || !reflect.DeepEqual(v, subnetworksProp)) { + obj["subnetworks"] = subnetworksProp + } + producerRejectListsProp, err := expandComputeNetworkAttachmentProducerRejectLists(d.Get("producer_reject_lists"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("producer_reject_lists"); !tpgresource.IsEmptyValue(reflect.ValueOf(producerRejectListsProp)) && (ok || !reflect.DeepEqual(v, producerRejectListsProp)) { + obj["producerRejectLists"] = producerRejectListsProp + } + producerAcceptListsProp, err := expandComputeNetworkAttachmentProducerAcceptLists(d.Get("producer_accept_lists"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("producer_accept_lists"); !tpgresource.IsEmptyValue(reflect.ValueOf(producerAcceptListsProp)) && (ok || !reflect.DeepEqual(v, producerAcceptListsProp)) { + obj["producerAcceptLists"] = producerAcceptListsProp + } + fingerprintProp, err := expandComputeNetworkAttachmentFingerprint(d.Get("fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("fingerprint"); !tpgresource.IsEmptyValue(reflect.ValueOf(fingerprintProp)) && (ok || !reflect.DeepEqual(v, fingerprintProp)) { + obj["fingerprint"] = fingerprintProp + } + nameProp, err := expandComputeNetworkAttachmentName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + regionProp, err := expandComputeNetworkAttachmentRegion(d.Get("region"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkAttachments") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new NetworkAttachment: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkAttachment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating NetworkAttachment: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating NetworkAttachment", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create NetworkAttachment: %s", err) + } + + log.Printf("[DEBUG] Finished creating NetworkAttachment %q: %#v", d.Id(), res) + + return resourceComputeNetworkAttachmentRead(d, meta) +} + +func resourceComputeNetworkAttachmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkAttachment: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeNetworkAttachment %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + + if err := d.Set("kind", flattenComputeNetworkAttachmentKind(res["kind"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("id", flattenComputeNetworkAttachmentId(res["id"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("creation_timestamp", flattenComputeNetworkAttachmentCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("description", flattenComputeNetworkAttachmentDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("self_link", flattenComputeNetworkAttachmentSelfLink(res["selfLink"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("self_link_with_id", flattenComputeNetworkAttachmentSelfLinkWithId(res["selfLinkWithId"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("connection_preference", flattenComputeNetworkAttachmentConnectionPreference(res["connectionPreference"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("connection_endpoints", flattenComputeNetworkAttachmentConnectionEndpoints(res["connectionEndpoints"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("subnetworks", flattenComputeNetworkAttachmentSubnetworks(res["subnetworks"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("producer_reject_lists", flattenComputeNetworkAttachmentProducerRejectLists(res["producerRejectLists"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("producer_accept_lists", flattenComputeNetworkAttachmentProducerAcceptLists(res["producerAcceptLists"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("fingerprint", flattenComputeNetworkAttachmentFingerprint(res["fingerprint"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("network", flattenComputeNetworkAttachmentNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("name", flattenComputeNetworkAttachmentName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + if err := d.Set("region", flattenComputeNetworkAttachmentRegion(res["region"], d, config)); err != nil { + return fmt.Errorf("Error reading NetworkAttachment: %s", err) + } + + return nil +} + +func resourceComputeNetworkAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for NetworkAttachment: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting NetworkAttachment %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "NetworkAttachment") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting NetworkAttachment", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting NetworkAttachment %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeNetworkAttachmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/networkAttachments/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeNetworkAttachmentKind(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentCreationTimestamp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentSelfLink(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentSelfLinkWithId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentConnectionPreference(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentConnectionEndpoints(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "status": flattenComputeNetworkAttachmentConnectionEndpointsStatus(original["status"], d, config), + "project_id_or_num": flattenComputeNetworkAttachmentConnectionEndpointsProjectIdOrNum(original["projectIdOrNum"], d, config), + "subnetwork": flattenComputeNetworkAttachmentConnectionEndpointsSubnetwork(original["subnetwork"], d, config), + "ip_address": flattenComputeNetworkAttachmentConnectionEndpointsIpAddress(original["ipAddress"], d, config), + "secondary_ip_cidr_ranges": flattenComputeNetworkAttachmentConnectionEndpointsSecondaryIpCidrRanges(original["secondaryIpCidrRanges"], d, config), + }) + } + return transformed +} +func flattenComputeNetworkAttachmentConnectionEndpointsStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentConnectionEndpointsProjectIdOrNum(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentConnectionEndpointsSubnetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentConnectionEndpointsIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentConnectionEndpointsSecondaryIpCidrRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentSubnetworks(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertAndMapStringArr(v.([]interface{}), tpgresource.ConvertSelfLinkToV1) +} + +func flattenComputeNetworkAttachmentProducerRejectLists(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentProducerAcceptLists(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentFingerprint(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenComputeNetworkAttachmentRegion(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func expandComputeNetworkAttachmentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAttachmentConnectionPreference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAttachmentSubnetworks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + return nil, fmt.Errorf("Invalid value for subnetworks: nil") + } + f, err := tpgresource.ParseRegionalFieldValue("subnetworks", raw.(string), "project", "region", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for subnetworks: %s", err) + } + req = append(req, f.RelativeLink()) + } + return req, nil +} + +func expandComputeNetworkAttachmentProducerRejectLists(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAttachmentProducerAcceptLists(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAttachmentFingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAttachmentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeNetworkAttachmentRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseGlobalFieldValue("regions", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for region: %s", err) + } + return f.RelativeLink(), nil +} diff --git a/google-beta/services/compute/resource_compute_network_attachment_generated_test.go b/google-beta/services/compute/resource_compute_network_attachment_generated_test.go new file mode 100644 index 0000000000..28a77e007d --- /dev/null +++ b/google-beta/services/compute/resource_compute_network_attachment_generated_test.go @@ -0,0 +1,153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +func TestAccComputeNetworkAttachment_networkAttachmentBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeNetworkAttachmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetworkAttachment_networkAttachmentBasicExample(context), + }, + { + ResourceName: "google_compute_network_attachment.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} + +func testAccComputeNetworkAttachment_networkAttachmentBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_attachment" "default" { + provider = google-beta + name = "tf-test-basic-network-attachment%{random_suffix}" + region = "us-central1" + description = "basic network attachment description" + connection_preference = "ACCEPT_MANUAL" + + subnetworks = [ + google_compute_subnetwork.default.self_link + ] + + producer_accept_lists = [ + google_project.accepted_producer_project.project_id + ] + + producer_reject_lists = [ + google_project.rejected_producer_project.project_id + ] +} + +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-basic-network%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-basic-subnetwork%{random_suffix}" + region = "us-central1" + + network = google_compute_network.default.id + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_project" "rejected_producer_project" { + provider = google-beta + project_id = "prj-rejected%{random_suffix}" + name = "prj-rejected%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} + +resource "google_project" "accepted_producer_project" { + provider = google-beta + project_id = "prj-accepted%{random_suffix}" + name = "prj-accepted%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} +`, context) +} + +func testAccCheckComputeNetworkAttachmentDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_network_attachment" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("ComputeNetworkAttachment still exists at %s", url) + } + } + + return nil + } +} diff --git a/google-beta/services/compute/resource_compute_network_attachment_sweeper.go b/google-beta/services/compute/resource_compute_network_attachment_sweeper.go new file mode 100644 index 0000000000..daecc96f33 --- /dev/null +++ b/google-beta/services/compute/resource_compute_network_attachment_sweeper.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/sweeper" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeNetworkAttachment", testSweepComputeNetworkAttachment) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeNetworkAttachment(region string) error { + resourceName := "ComputeNetworkAttachment" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/beta/projects/{{project}}/regions/{{region}}/networkAttachments", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["items"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/beta/projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google-beta/services/compute/resource_compute_region_instance_template_test.go b/google-beta/services/compute/resource_compute_region_instance_template_test.go index 5ad56d363b..8bc2f7401e 100644 --- a/google-beta/services/compute/resource_compute_region_instance_template_test.go +++ b/google-beta/services/compute/resource_compute_region_instance_template_test.go @@ -3165,7 +3165,7 @@ resource "google_compute_region_instance_template" "foobar" { network = "default" } - scheduling { + scheduling { local_ssd_recovery_timeout { nanos = 0 seconds = 3600 diff --git a/google-beta/services/compute/resource_compute_service_attachment.go b/google-beta/services/compute/resource_compute_service_attachment.go index 7cfbef6199..fac9f1d144 100644 --- a/google-beta/services/compute/resource_compute_service_attachment.go +++ b/google-beta/services/compute/resource_compute_service_attachment.go @@ -62,7 +62,6 @@ values include "ACCEPT_AUTOMATIC", "ACCEPT_MANUAL".`, "enable_proxy_protocol": { Type: schema.TypeBool, Required: true, - ForceNew: true, Description: `If true, enable the proxy protocol which is for supplying client TCP/IP address data in TCP connections that traverse proxies on their way to destination servers.`, @@ -145,7 +144,6 @@ supported is 1.`, "reconcile_connections": { Type: schema.TypeBool, Optional: true, - ForceNew: true, Description: `This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . @@ -470,6 +468,12 @@ func resourceComputeServiceAttachmentUpdate(d *schema.ResourceData, meta interfa } else if v, ok := d.GetOkExists("nat_subnets"); ok || !reflect.DeepEqual(v, natSubnetsProp) { obj["natSubnets"] = natSubnetsProp } + enableProxyProtocolProp, err := expandComputeServiceAttachmentEnableProxyProtocol(d.Get("enable_proxy_protocol"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_proxy_protocol"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enableProxyProtocolProp)) { + obj["enableProxyProtocol"] = enableProxyProtocolProp + } consumerRejectListsProp, err := expandComputeServiceAttachmentConsumerRejectLists(d.Get("consumer_reject_lists"), d, config) if err != nil { return err @@ -482,6 +486,12 @@ func resourceComputeServiceAttachmentUpdate(d *schema.ResourceData, meta interfa } else if v, ok := d.GetOkExists("consumer_accept_lists"); ok || !reflect.DeepEqual(v, consumerAcceptListsProp) { obj["consumerAcceptLists"] = consumerAcceptListsProp } + reconcileConnectionsProp, err := expandComputeServiceAttachmentReconcileConnections(d.Get("reconcile_connections"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("reconcile_connections"); ok || !reflect.DeepEqual(v, reconcileConnectionsProp) { + obj["reconcileConnections"] = reconcileConnectionsProp + } obj, err = resourceComputeServiceAttachmentUpdateEncoder(d, meta, obj) if err != nil { diff --git a/google-beta/services/compute/resource_compute_service_attachment_test.go b/google-beta/services/compute/resource_compute_service_attachment_test.go index 45b9395150..fc73996f23 100644 --- a/google-beta/services/compute/resource_compute_service_attachment_test.go +++ b/google-beta/services/compute/resource_compute_service_attachment_test.go @@ -31,7 +31,16 @@ func TestAccComputeServiceAttachment_serviceAttachmentBasicExampleUpdate(t *test ImportStateVerifyIgnore: []string{"target_service", "region"}, }, { - Config: testAccComputeServiceAttachment_serviceAttachmentBasicExampleUpdate(context), + Config: testAccComputeServiceAttachment_serviceAttachmentBasicExampleUpdate(context, true), + }, + { + ResourceName: "google_compute_service_attachment.psc_ilb_service_attachment", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"target_service", "region"}, + }, + { + Config: testAccComputeServiceAttachment_serviceAttachmentBasicExampleUpdate(context, false), }, { ResourceName: "google_compute_service_attachment.psc_ilb_service_attachment", @@ -50,7 +59,7 @@ resource "google_compute_service_attachment" "psc_ilb_service_attachment" { region = "us-west2" description = "A service attachment configured with Terraform" - enable_proxy_protocol = true + enable_proxy_protocol = false connection_preference = "ACCEPT_AUTOMATIC" nat_subnets = [google_compute_subnetwork.psc_ilb_nat.id] target_service = google_compute_forwarding_rule.psc_ilb_target_service.id @@ -126,7 +135,15 @@ resource "google_compute_subnetwork" "psc_ilb_nat" { `, context) } -func testAccComputeServiceAttachment_serviceAttachmentBasicExampleUpdate(context map[string]interface{}) string { +func testAccComputeServiceAttachment_serviceAttachmentBasicExampleUpdate(context map[string]interface{}, preventDestroy bool) string { + context["lifecycle_block"] = "" + if preventDestroy { + context["lifecycle_block"] = ` + lifecycle { + prevent_destroy = true + }` + } + return acctest.Nprintf(` resource "google_compute_service_attachment" "psc_ilb_service_attachment" { name = "tf-test-my-psc-ilb%{random_suffix}" @@ -143,7 +160,8 @@ resource "google_compute_service_attachment" "psc_ilb_service_attachment" { project_id_or_num = "658859330310" connection_limit = 4 } - + reconcile_connections = false + %{lifecycle_block} } resource "google_compute_address" "psc_ilb_consumer_address" { diff --git a/google-beta/services/container/node_config.go b/google-beta/services/container/node_config.go index bfb2a48cc4..3de90b9937 100644 --- a/google-beta/services/container/node_config.go +++ b/google-beta/services/container/node_config.go @@ -611,6 +611,24 @@ func schemaNodeConfig() *schema.Schema { }, }, }, + "confidential_nodes": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: `Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Whether Confidential Nodes feature is enabled for all nodes in this pool.`, + }, + }, + }, + }, }, }, } @@ -885,6 +903,11 @@ func expandNodeConfig(v interface{}) *container.NodeConfig { if v, ok := nodeConfig["host_maintenance_policy"]; ok { nc.HostMaintenancePolicy = expandHostMaintenancePolicy(v) } + + if v, ok := nodeConfig["confidential_nodes"]; ok { + nc.ConfidentialNodes = expandConfidentialNodes(v) + } + return nc } @@ -1000,6 +1023,17 @@ func expandHostMaintenancePolicy(v interface{}) *container.HostMaintenancePolicy return mPolicy } +func expandConfidentialNodes(configured interface{}) *container.ConfidentialNodes { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + config := l[0].(map[string]interface{}) + return &container.ConfidentialNodes{ + Enabled: config["enabled"].(bool), + } +} + func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]interface{} { result := make([]map[string]interface{}, 0, 1) @@ -1049,6 +1083,7 @@ func flattenNodeConfig(c *container.NodeConfig) []map[string]interface{} { "workload_metadata_config": flattenWorkloadMetadataConfig(c.WorkloadMetadataConfig), "sandbox_config": flattenSandboxConfig(c.SandboxConfig), "host_maintenance_policy": flattenHostMaintenancePolicy(c.HostMaintenancePolicy), + "confidential_nodes": flattenConfidentialNodes(c.ConfidentialNodes), "boot_disk_kms_key": c.BootDiskKmsKey, "kubelet_config": flattenKubeletConfig(c.KubeletConfig), "linux_node_config": flattenLinuxNodeConfig(c.LinuxNodeConfig), @@ -1352,6 +1387,16 @@ func flattenLinuxNodeConfig(c *container.LinuxNodeConfig) []map[string]interface return result } +func flattenConfidentialNodes(c *container.ConfidentialNodes) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enabled": c.Enabled, + }) + } + return result +} + func flattenSoleTenantConfig(c *container.SoleTenantConfig) []map[string]interface{} { result := []map[string]interface{}{} if c == nil { diff --git a/google-beta/services/container/resource_container_cluster.go b/google-beta/services/container/resource_container_cluster.go index a125aee6c1..6db953db66 100644 --- a/google-beta/services/container/resource_container_cluster.go +++ b/google-beta/services/container/resource_container_cluster.go @@ -1135,10 +1135,9 @@ func ResourceContainerCluster() *schema.Resource { Type: schema.TypeList, Optional: true, Computed: true, - Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, SCHEDULER, and WORKLOADS.`, + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET and WORKLOADS.`, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER", "WORKLOADS"}, false), + Type: schema.TypeString, }, }, "managed_prometheus": { @@ -4766,17 +4765,6 @@ func expandBinaryAuthorization(configured interface{}, legacy_enabled bool) *con } } -func expandConfidentialNodes(configured interface{}) *container.ConfidentialNodes { - l := configured.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil - } - config := l[0].(map[string]interface{}) - return &container.ConfidentialNodes{ - Enabled: config["enabled"].(bool), - } -} - func expandMasterAuth(configured interface{}) *container.MasterAuth { l := configured.([]interface{}) if len(l) == 0 || l[0] == nil { @@ -5299,16 +5287,6 @@ func flattenBinaryAuthorization(c *container.BinaryAuthorization) []map[string]i return result } -func flattenConfidentialNodes(c *container.ConfidentialNodes) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "enabled": c.Enabled, - }) - } - return result -} - func flattenNetworkPolicy(c *container.NetworkPolicy) []map[string]interface{} { result := []map[string]interface{}{} if c != nil { diff --git a/google-beta/services/container/resource_container_node_pool_test.go b/google-beta/services/container/resource_container_node_pool_test.go index 62219ad805..05f86ce8b0 100644 --- a/google-beta/services/container/resource_container_node_pool_test.go +++ b/google-beta/services/container/resource_container_node_pool_test.go @@ -3260,6 +3260,103 @@ resource "google_container_node_pool" "with_sole_tenant_config" { `, cluster, np) } +func TestAccContainerNodePool_withConfidentialNodes(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withConfidentialNodes(clusterName, np), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_disableConfidentialNodes(clusterName, np), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withConfidentialNodes(clusterName, np), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_withConfidentialNodes(clusterName string, np string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "asia-east1-c" + initial_node_count = 1 + node_config { + confidential_nodes { + enabled = false + } + machine_type = "n2-standard-2" + } +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "asia-east1-c" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + machine_type = "n2d-standard-2" // can't be e2 because Confidential Nodes require AMD CPUs + confidential_nodes { + enabled = true + } + } +} +`, clusterName, np) +} + +func testAccContainerNodePool_disableConfidentialNodes(clusterName string, np string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "asia-east1-c" + initial_node_count = 1 + node_config { + confidential_nodes { + enabled = false + } + machine_type = "n2-standard-2" + } +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "asia-east1-c" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + machine_type = "n2d-standard-2" // can't be e2 because Confidential Nodes require AMD CPUs + confidential_nodes { + enabled = false + } + } +} +`, clusterName, np) +} + func TestAccContainerNodePool_tpuTopology(t *testing.T) { t.Parallel() acctest.SkipIfVcr(t) diff --git a/google-beta/services/containeraws/resource_container_aws_cluster_sweeper.go b/google-beta/services/containeraws/resource_container_aws_cluster_sweeper.go deleted file mode 100644 index 5971416659..0000000000 --- a/google-beta/services/containeraws/resource_container_aws_cluster_sweeper.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: DCL *** -// -// ---------------------------------------------------------------------------- -// -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. -// -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose -// -// ---------------------------------------------------------------------------- - -package containeraws - -import ( - "context" - "log" - "testing" - - containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws/beta" - "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" - "github.com/hashicorp/terraform-provider-google-beta/google-beta/sweeper" - transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" -) - -func init() { - sweeper.AddTestSweepers("ContainerAwsCluster", testSweepContainerAwsCluster) -} - -func testSweepContainerAwsCluster(region string) error { - log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ContainerAwsCluster") - - config, err := sweeper.SharedConfigForRegion(region) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) - return err - } - - err = config.LoadAndValidate(context.Background()) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) - return err - } - - t := &testing.T{} - billingId := envvar.GetTestBillingAccountFromEnv(t) - - // Setup variables to be used for Delete arguments. - d := map[string]string{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - } - - client := transport_tpg.NewDCLContainerAwsClient(config, config.UserAgent, "", 0) - err = client.DeleteAllCluster(context.Background(), d["project"], d["location"], isDeletableContainerAwsCluster) - if err != nil { - return err - } - return nil -} - -func isDeletableContainerAwsCluster(r *containeraws.Cluster) bool { - return sweeper.IsSweepableTestResource(*r.Name) -} diff --git a/google-beta/services/containerazure/resource_container_azure_client_sweeper.go b/google-beta/services/containerazure/resource_container_azure_client_sweeper.go deleted file mode 100644 index 1ef2cf5c0b..0000000000 --- a/google-beta/services/containerazure/resource_container_azure_client_sweeper.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: DCL *** -// -// ---------------------------------------------------------------------------- -// -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. -// -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose -// -// ---------------------------------------------------------------------------- - -package containerazure - -import ( - "context" - "log" - "testing" - - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta" - "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" - "github.com/hashicorp/terraform-provider-google-beta/google-beta/sweeper" - transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" -) - -func init() { - sweeper.AddTestSweepers("ContainerAzureClient", testSweepContainerAzureClient) -} - -func testSweepContainerAzureClient(region string) error { - log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ContainerAzureClient") - - config, err := sweeper.SharedConfigForRegion(region) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) - return err - } - - err = config.LoadAndValidate(context.Background()) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) - return err - } - - t := &testing.T{} - billingId := envvar.GetTestBillingAccountFromEnv(t) - - // Setup variables to be used for Delete arguments. - d := map[string]string{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - } - - client := transport_tpg.NewDCLContainerAzureClient(config, config.UserAgent, "", 0) - err = client.DeleteAllClient(context.Background(), d["project"], d["location"], isDeletableContainerAzureClient) - if err != nil { - return err - } - return nil -} - -func isDeletableContainerAzureClient(r *containerazure.AzureClient) bool { - return sweeper.IsSweepableTestResource(*r.Name) -} diff --git a/google-beta/services/containerazure/resource_container_azure_cluster_sweeper.go b/google-beta/services/containerazure/resource_container_azure_cluster_sweeper.go deleted file mode 100644 index 0058eca20a..0000000000 --- a/google-beta/services/containerazure/resource_container_azure_cluster_sweeper.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// ---------------------------------------------------------------------------- -// -// *** AUTO GENERATED CODE *** Type: DCL *** -// -// ---------------------------------------------------------------------------- -// -// This file is managed by Magic Modules (https://github.com/GoogleCloudPlatform/magic-modules) -// and is based on the DCL (https://github.com/GoogleCloudPlatform/declarative-resource-client-library). -// Changes will need to be made to the DCL or Magic Modules instead of here. -// -// We are not currently able to accept contributions to this file. If changes -// are required, please file an issue at https://github.com/hashicorp/terraform-provider-google/issues/new/choose -// -// ---------------------------------------------------------------------------- - -package containerazure - -import ( - "context" - "log" - "testing" - - containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure/beta" - "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" - "github.com/hashicorp/terraform-provider-google-beta/google-beta/sweeper" - transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" -) - -func init() { - sweeper.AddTestSweepers("ContainerAzureCluster", testSweepContainerAzureCluster) -} - -func testSweepContainerAzureCluster(region string) error { - log.Print("[INFO][SWEEPER_LOG] Starting sweeper for ContainerAzureCluster") - - config, err := sweeper.SharedConfigForRegion(region) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) - return err - } - - err = config.LoadAndValidate(context.Background()) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) - return err - } - - t := &testing.T{} - billingId := envvar.GetTestBillingAccountFromEnv(t) - - // Setup variables to be used for Delete arguments. - d := map[string]string{ - "project": config.Project, - "region": region, - "location": region, - "zone": "-", - "billing_account": billingId, - } - - client := transport_tpg.NewDCLContainerAzureClient(config, config.UserAgent, "", 0) - err = client.DeleteAllCluster(context.Background(), d["project"], d["location"], isDeletableContainerAzureCluster) - if err != nil { - return err - } - return nil -} - -func isDeletableContainerAzureCluster(r *containerazure.Cluster) bool { - return sweeper.IsSweepableTestResource(*r.Name) -} diff --git a/google-beta/services/dataform/resource_dataform_repository_release_config_generated_test.go b/google-beta/services/dataform/resource_dataform_repository_release_config_generated_test.go index 1d923f1a5f..9770ad7156 100644 --- a/google-beta/services/dataform/resource_dataform_repository_release_config_generated_test.go +++ b/google-beta/services/dataform/resource_dataform_repository_release_config_generated_test.go @@ -64,7 +64,7 @@ resource "google_sourcerepo_repository" "git_repository" { resource "google_secret_manager_secret" "secret" { provider = google-beta - secret_id = "secret" + secret_id = "tf_test_my_secret%{random_suffix}" replication { automatic = true diff --git a/google-beta/services/dataform/resource_dataform_repository_workflow_config_generated_test.go b/google-beta/services/dataform/resource_dataform_repository_workflow_config_generated_test.go index e7a759da93..d8d89e5564 100644 --- a/google-beta/services/dataform/resource_dataform_repository_workflow_config_generated_test.go +++ b/google-beta/services/dataform/resource_dataform_repository_workflow_config_generated_test.go @@ -64,7 +64,7 @@ resource "google_sourcerepo_repository" "git_repository" { resource "google_secret_manager_secret" "secret" { provider = google-beta - secret_id = "secret" + secret_id = "tf_test_my_secret%{random_suffix}" replication { automatic = true diff --git a/google-beta/services/dataplex/iam_dataplex_datascan_generated_test.go b/google-beta/services/dataplex/iam_dataplex_datascan_generated_test.go index d996d20623..cbd7f5a5ff 100644 --- a/google-beta/services/dataplex/iam_dataplex_datascan_generated_test.go +++ b/google-beta/services/dataplex/iam_dataplex_datascan_generated_test.go @@ -45,7 +45,7 @@ func TestAccDataplexDatascanIamBindingGenerated(t *testing.T) { }, { ResourceName: "google_dataplex_datascan_iam_binding.foo", - ImportStateId: fmt.Sprintf("projects/%s/locations/%s/dataScans/%s roles/viewer", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-datascan%s", context["random_suffix"])), + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/dataScans/%s roles/viewer", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-dataprofile-basic%s", context["random_suffix"])), ImportState: true, ImportStateVerify: true, }, @@ -55,7 +55,7 @@ func TestAccDataplexDatascanIamBindingGenerated(t *testing.T) { }, { ResourceName: "google_dataplex_datascan_iam_binding.foo", - ImportStateId: fmt.Sprintf("projects/%s/locations/%s/dataScans/%s roles/viewer", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-datascan%s", context["random_suffix"])), + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/dataScans/%s roles/viewer", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-dataprofile-basic%s", context["random_suffix"])), ImportState: true, ImportStateVerify: true, }, @@ -82,7 +82,7 @@ func TestAccDataplexDatascanIamMemberGenerated(t *testing.T) { }, { ResourceName: "google_dataplex_datascan_iam_member.foo", - ImportStateId: fmt.Sprintf("projects/%s/locations/%s/dataScans/%s roles/viewer user:admin@hashicorptest.com", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-datascan%s", context["random_suffix"])), + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/dataScans/%s roles/viewer user:admin@hashicorptest.com", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-dataprofile-basic%s", context["random_suffix"])), ImportState: true, ImportStateVerify: true, }, @@ -109,7 +109,7 @@ func TestAccDataplexDatascanIamPolicyGenerated(t *testing.T) { }, { ResourceName: "google_dataplex_datascan_iam_policy.foo", - ImportStateId: fmt.Sprintf("projects/%s/locations/%s/dataScans/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-datascan%s", context["random_suffix"])), + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/dataScans/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-dataprofile-basic%s", context["random_suffix"])), ImportState: true, ImportStateVerify: true, }, @@ -118,7 +118,7 @@ func TestAccDataplexDatascanIamPolicyGenerated(t *testing.T) { }, { ResourceName: "google_dataplex_datascan_iam_policy.foo", - ImportStateId: fmt.Sprintf("projects/%s/locations/%s/dataScans/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-datascan%s", context["random_suffix"])), + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/dataScans/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-dataprofile-basic%s", context["random_suffix"])), ImportState: true, ImportStateVerify: true, }, @@ -130,7 +130,7 @@ func testAccDataplexDatascanIamMember_basicGenerated(context map[string]interfac return acctest.Nprintf(` resource "google_dataplex_datascan" "basic_profile" { location = "us-central1" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "tf-test-dataprofile-basic%{random_suffix}" data { resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare" @@ -161,7 +161,7 @@ func testAccDataplexDatascanIamPolicy_basicGenerated(context map[string]interfac return acctest.Nprintf(` resource "google_dataplex_datascan" "basic_profile" { location = "us-central1" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "tf-test-dataprofile-basic%{random_suffix}" data { resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare" @@ -207,7 +207,7 @@ func testAccDataplexDatascanIamPolicy_emptyBinding(context map[string]interface{ return acctest.Nprintf(` resource "google_dataplex_datascan" "basic_profile" { location = "us-central1" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "tf-test-dataprofile-basic%{random_suffix}" data { resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare" @@ -240,7 +240,7 @@ func testAccDataplexDatascanIamBinding_basicGenerated(context map[string]interfa return acctest.Nprintf(` resource "google_dataplex_datascan" "basic_profile" { location = "us-central1" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "tf-test-dataprofile-basic%{random_suffix}" data { resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare" @@ -271,7 +271,7 @@ func testAccDataplexDatascanIamBinding_updateGenerated(context map[string]interf return acctest.Nprintf(` resource "google_dataplex_datascan" "basic_profile" { location = "us-central1" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "tf-test-dataprofile-basic%{random_suffix}" data { resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare" diff --git a/google-beta/services/dataplex/resource_dataplex_datascan_generated_test.go b/google-beta/services/dataplex/resource_dataplex_datascan_generated_test.go index 389a73c725..2e0398c2f9 100644 --- a/google-beta/services/dataplex/resource_dataplex_datascan_generated_test.go +++ b/google-beta/services/dataplex/resource_dataplex_datascan_generated_test.go @@ -61,7 +61,7 @@ func testAccDataplexDatascan_dataplexDatascanBasicProfileExample(context map[str return acctest.Nprintf(` resource "google_dataplex_datascan" "basic_profile" { location = "us-central1" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "tf-test-dataprofile-basic%{random_suffix}" data { resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare" @@ -111,7 +111,7 @@ func testAccDataplexDatascan_dataplexDatascanFullProfileExample(context map[stri resource "google_dataplex_datascan" "full_profile" { location = "us-central1" display_name = "Full Datascan Profile" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "tf-test-dataprofile-full%{random_suffix}" description = "Example resource - Full Datascan Profile" labels = { author = "billing" @@ -138,9 +138,26 @@ resource "google_dataplex_datascan" "full_profile" { exclude_fields { field_names = ["property_type"] } + post_scan_actions { + bigquery_export { + results_table = "//bigquery.googleapis.com/projects/%{project_name}/datasets/tf_test_dataplex_dataset%{random_suffix}/tables/profile_export" + } + } } project = "%{project_name}" + + depends_on = [ + google_bigquery_dataset.source + ] +} + +resource "google_bigquery_dataset" "source" { + dataset_id = "tf_test_dataplex_dataset%{random_suffix}" + friendly_name = "test" + description = "This is a test description" + location = "US" + delete_contents_on_destroy = true } `, context) } @@ -175,7 +192,7 @@ func testAccDataplexDatascan_dataplexDatascanBasicQualityExample(context map[str return acctest.Nprintf(` resource "google_dataplex_datascan" "basic_quality" { location = "us-central1" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "tf-test-dataquality-basic%{random_suffix}" data { resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare" @@ -234,7 +251,7 @@ func testAccDataplexDatascan_dataplexDatascanFullQualityExample(context map[stri resource "google_dataplex_datascan" "full_quality" { location = "us-central1" display_name = "Full Datascan Quality" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "tf-test-dataquality-full%{random_suffix}" description = "Example resource - Full Datascan Quality" labels = { author = "billing" diff --git a/google-beta/services/dialogflowcx/resource_dialogflow_cx_flow.go b/google-beta/services/dialogflowcx/resource_dialogflow_cx_flow.go index d789ca2056..02d8d31d85 100644 --- a/google-beta/services/dialogflowcx/resource_dialogflow_cx_flow.go +++ b/google-beta/services/dialogflowcx/resource_dialogflow_cx_flow.go @@ -18,6 +18,7 @@ package dialogflowcx import ( + "encoding/json" "fmt" "log" "reflect" @@ -26,6 +27,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" @@ -96,12 +98,143 @@ Format: projects//locations//agents//flows//locations//agents//flows//locations//agents//flows//locations//agents//flows/Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = </locations//agents//flows//locations//agents//flows//locations//agents//flows/ for MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "conditional_cases": { + Type: schema.TypeList, + Optional: true, + Description: `Conditional cases for this fulfillment.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cases": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. +See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema.`, + }, + }, + }, + }, "messages": { Type: schema.TypeList, Optional: true, Description: `The list of rich message responses to present to the user.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "channel": { + Type: schema.TypeString, + Optional: true, + Description: `The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned.`, + }, + "conversation_success": { + Type: schema.TypeList, + Optional: true, + Description: `Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. +Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. +You may set this, for example: +* In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. +* In a webhook response when you determine that you handled the customer issue.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `Custom metadata. Dialogflow doesn't impose any structure on this.`, + }, + }, + }, + }, + "live_agent_handoff": { + Type: schema.TypeList, + Optional: true, + Description: `Indicates that the conversation should be handed off to a live agent. +Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. +You may set this, for example: +* In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. +* In a webhook response when you determine that the customer issue can only be handled by a human.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `Custom metadata. Dialogflow doesn't impose any structure on this.`, + }, + }, + }, + }, + "output_audio_text": { + Type: schema.TypeList, + Optional: true, + Description: `A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ssml": { + Type: schema.TypeString, + Optional: true, + Description: `The SSML text to be synthesized. For more information, see SSML.`, + }, + "text": { + Type: schema.TypeString, + Optional: true, + Description: `The raw text to be synthesized.`, + }, + "allow_playback_interruption": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.`, + }, + }, + }, + }, + "payload": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `A custom, platform-specific payload.`, + }, + "play_audio": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies an audio clip to be played by the client as part of the response.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "audio_uri": { + Type: schema.TypeString, + Required: true, + Description: `URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it.`, + }, + "allow_playback_interruption": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.`, + }, + }, + }, + }, + "telephony_transfer_call": { + Type: schema.TypeList, + Optional: true, + Description: `Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "phone_number": { + Type: schema.TypeString, + Required: true, + Description: `Transfer the call to a phone number in E.164 format.`, + }, + }, + }, + }, "text": { Type: schema.TypeList, Optional: true, @@ -276,6 +720,27 @@ Format: projects/-/locations/-/agents/-/entityTypes/ for Optional: true, Description: `Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.`, }, + "set_parameter_actions": { + Type: schema.TypeList, + Optional: true, + Description: `Set parameter values before executing the webhook.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "parameter": { + Type: schema.TypeString, + Optional: true, + Description: `Display name of the parameter.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `The new JSON-encoded value of the parameter. A null value clears the parameter.`, + }, + }, + }, + }, "tag": { Type: schema.TypeString, Optional: true, @@ -289,31 +754,281 @@ Format: projects/-/locations/-/agents/-/entityTypes/ for }, }, }, - }, - }, - }, - "is_list": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates whether the parameter represents a list of values.`, - }, - "redact": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates whether the parameter content should be redacted in log. -If redaction is enabled, the parameter content will be replaced by parameter name during logging. Note: the parameter content is subject to redaction if either parameter level redaction or entity type level redaction is enabled.`, - }, - "required": { - Type: schema.TypeBool, - Optional: true, - Description: `Indicates whether the parameter is required. Optional parameters will not trigger prompts; however, they are filled if the user specifies them. -Required parameters must be filled before form filling concludes.`, - }, - }, - }, - }, - }, - }, + "reprompt_event_handlers": { + Type: schema.TypeList, + Optional: true, + Description: `The handlers for parameter-level events, used to provide reprompt for the parameter or transition to a different page/flow. The supported events are: +* sys.no-match-, where N can be from 1 to 6 +* sys.no-match-default +* sys.no-input-, where N can be from 1 to 6 +* sys.no-input-default +* sys.invalid-parameter +[initialPromptFulfillment][initialPromptFulfillment] provides the first prompt for the parameter. +If the user's response does not fill the parameter, a no-match/no-input event will be triggered, and the fulfillment associated with the sys.no-match-1/sys.no-input-1 handler (if defined) will be called to provide a prompt. The sys.no-match-2/sys.no-input-2 handler (if defined) will respond to the next no-match/no-input event, and so on. +A sys.no-match-default or sys.no-input-default handler will be used to handle all following no-match/no-input events after all numbered no-match/no-input handlers for the parameter are consumed. +A sys.invalid-parameter handler can be defined to handle the case where the parameter values have been invalidated by webhook. For example, if the user's response fill the parameter, however the parameter was invalidated by webhook, the fulfillment associated with the sys.invalid-parameter handler (if defined) will be called to provide a prompt. +If the event handler for the corresponding event can't be found on the parameter, initialPromptFulfillment will be re-prompted.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "event": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the event to handle.`, + }, + "target_flow": { + Type: schema.TypeString, + Optional: true, + Description: `The target flow to transition to. +Format: projects//locations//agents//flows/.`, + }, + "target_page": { + Type: schema.TypeString, + Optional: true, + Description: `The target page to transition to. +Format: projects//locations//agents//flows//pages/.`, + }, + "trigger_fulfillment": { + Type: schema.TypeList, + Optional: true, + Description: `The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "conditional_cases": { + Type: schema.TypeList, + Optional: true, + Description: `Conditional cases for this fulfillment.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cases": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. +See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema.`, + }, + }, + }, + }, + "messages": { + Type: schema.TypeList, + Optional: true, + Description: `The list of rich message responses to present to the user.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "channel": { + Type: schema.TypeString, + Optional: true, + Description: `The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned.`, + }, + "conversation_success": { + Type: schema.TypeList, + Optional: true, + Description: `Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. +Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. +You may set this, for example: +* In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. +* In a webhook response when you determine that you handled the customer issue.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `Custom metadata. Dialogflow doesn't impose any structure on this.`, + }, + }, + }, + }, + "live_agent_handoff": { + Type: schema.TypeList, + Optional: true, + Description: `Indicates that the conversation should be handed off to a live agent. +Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. +You may set this, for example: +* In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. +* In a webhook response when you determine that the customer issue can only be handled by a human.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `Custom metadata. Dialogflow doesn't impose any structure on this.`, + }, + }, + }, + }, + "output_audio_text": { + Type: schema.TypeList, + Optional: true, + Description: `A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ssml": { + Type: schema.TypeString, + Optional: true, + Description: `The SSML text to be synthesized. For more information, see SSML.`, + }, + "text": { + Type: schema.TypeString, + Optional: true, + Description: `The raw text to be synthesized.`, + }, + "allow_playback_interruption": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.`, + }, + }, + }, + }, + "payload": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `A custom, platform-specific payload.`, + }, + "play_audio": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies an audio clip to be played by the client as part of the response.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "audio_uri": { + Type: schema.TypeString, + Required: true, + Description: `URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it.`, + }, + "allow_playback_interruption": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.`, + }, + }, + }, + }, + "telephony_transfer_call": { + Type: schema.TypeList, + Optional: true, + Description: `Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "phone_number": { + Type: schema.TypeString, + Required: true, + Description: `Transfer the call to a phone number in E.164 format.`, + }, + }, + }, + }, + "text": { + Type: schema.TypeList, + Optional: true, + Description: `The text response message.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "text": { + Type: schema.TypeList, + Optional: true, + Description: `A collection of text responses.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "allow_playback_interruption": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.`, + }, + }, + }, + }, + }, + }, + }, + "return_partial_responses": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.`, + }, + "set_parameter_actions": { + Type: schema.TypeList, + Optional: true, + Description: `Set parameter values before executing the webhook.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "parameter": { + Type: schema.TypeString, + Optional: true, + Description: `Display name of the parameter.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }, + Description: `The new JSON-encoded value of the parameter. A null value clears the parameter.`, + }, + }, + }, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + Description: `The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified.`, + }, + "webhook": { + Type: schema.TypeString, + Optional: true, + Description: `The webhook to call. Format: projects//locations//agents//webhooks/.`, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of this event handler.`, + }, + }, + }, + }, + }, + }, + }, + "is_list": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether the parameter represents a list of values.`, + }, + "redact": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether the parameter content should be redacted in log. +If redaction is enabled, the parameter content will be replaced by parameter name during logging. Note: the parameter content is subject to redaction if either parameter level redaction or entity type level redaction is enabled.`, + }, + "required": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether the parameter is required. Optional parameters will not trigger prompts; however, they are filled if the user specifies them. +Required parameters must be filled before form filling concludes.`, + }, + }, + }, + }, + }, + }, }, "language_code": { Type: schema.TypeString, @@ -391,16 +1106,147 @@ Format: projects//locations//agents//flows//locations//agents//flows/Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <[^/]+)/locations/global/scopes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("scope_id").(string)) + if err != nil { + return nil, err + } + + for k, v := range m { + values[k] = v + } + + u := &GKEHub2ScopeIamUpdater{ + project: values["project"], + scopeId: values["scope_id"], + d: d, + Config: config, + } + + if err := d.Set("project", u.project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("scope_id", u.GetResourceId()); err != nil { + return nil, fmt.Errorf("Error setting scope_id: %s", err) + } + + return u, nil +} + +func GKEHub2ScopeIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + project, _ := tpgresource.GetProject(d, config) + if project != "" { + values["project"] = project + } + + m, err := tpgresource.GetImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/global/scopes/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &GKEHub2ScopeIamUpdater{ + project: values["project"], + scopeId: values["scope_id"], + d: d, + Config: config, + } + if err := d.Set("scope_id", u.GetResourceId()); err != nil { + return fmt.Errorf("Error setting scope_id: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *GKEHub2ScopeIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyScopeUrl("getIamPolicy") + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return nil, err + } + var obj map[string]interface{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *GKEHub2ScopeIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + obj["policy"] = json + + url, err := u.qualifyScopeUrl("setIamPolicy") + if err != nil { + return err + } + project, err := tpgresource.GetProject(u.d, u.Config) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *GKEHub2ScopeIamUpdater) qualifyScopeUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{GKEHub2BasePath}}%s:%s", fmt.Sprintf("projects/%s/locations/global/scopes/%s", u.project, u.scopeId), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *GKEHub2ScopeIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/locations/global/scopes/%s", u.project, u.scopeId) +} + +func (u *GKEHub2ScopeIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-gkehub2-scope-%s", u.GetResourceId()) +} + +func (u *GKEHub2ScopeIamUpdater) DescribeResource() string { + return fmt.Sprintf("gkehub2 scope %q", u.GetResourceId()) +} diff --git a/google-beta/services/gkehub2/iam_gke_hub_scope_generated_test.go b/google-beta/services/gkehub2/iam_gke_hub_scope_generated_test.go new file mode 100644 index 0000000000..b9b780403d --- /dev/null +++ b/google-beta/services/gkehub2/iam_gke_hub_scope_generated_test.go @@ -0,0 +1,218 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub2_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" +) + +func TestAccGKEHub2ScopeIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + "project": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHub2ScopeIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_gke_hub_scope_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/global/scopes/%s roles/viewer", envvar.GetTestProjectFromEnv(), fmt.Sprintf("tf-test-scope%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccGKEHub2ScopeIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_gke_hub_scope_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/global/scopes/%s roles/viewer", envvar.GetTestProjectFromEnv(), fmt.Sprintf("tf-test-scope%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccGKEHub2ScopeIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + "project": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccGKEHub2ScopeIamMember_basicGenerated(context), + }, + { + ResourceName: "google_gke_hub_scope_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/global/scopes/%s roles/viewer user:admin@hashicorptest.com", envvar.GetTestProjectFromEnv(), fmt.Sprintf("tf-test-scope%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccGKEHub2ScopeIamPolicyGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + "project": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHub2ScopeIamPolicy_basicGenerated(context), + Check: resource.TestCheckResourceAttrSet("data.google_gke_hub_scope_iam_policy.foo", "policy_data"), + }, + { + ResourceName: "google_gke_hub_scope_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/global/scopes/%s", envvar.GetTestProjectFromEnv(), fmt.Sprintf("tf-test-scope%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHub2ScopeIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_gke_hub_scope_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/global/scopes/%s", envvar.GetTestProjectFromEnv(), fmt.Sprintf("tf-test-scope%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHub2ScopeIamMember_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_gke_hub_scope" "scope" { + scope_id = "tf-test-scope%{random_suffix}" +} + +resource "google_gke_hub_scope_iam_member" "foo" { + project = google_gke_hub_scope.scope.project + scope_id = google_gke_hub_scope.scope.scope_id + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccGKEHub2ScopeIamPolicy_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_gke_hub_scope" "scope" { + scope_id = "tf-test-scope%{random_suffix}" +} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_gke_hub_scope_iam_policy" "foo" { + project = google_gke_hub_scope.scope.project + scope_id = google_gke_hub_scope.scope.scope_id + policy_data = data.google_iam_policy.foo.policy_data +} + +data "google_gke_hub_scope_iam_policy" "foo" { + project = google_gke_hub_scope.scope.project + scope_id = google_gke_hub_scope.scope.scope_id + depends_on = [ + google_gke_hub_scope_iam_policy.foo + ] +} +`, context) +} + +func testAccGKEHub2ScopeIamPolicy_emptyBinding(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_gke_hub_scope" "scope" { + scope_id = "tf-test-scope%{random_suffix}" +} + +data "google_iam_policy" "foo" { +} + +resource "google_gke_hub_scope_iam_policy" "foo" { + project = google_gke_hub_scope.scope.project + scope_id = google_gke_hub_scope.scope.scope_id + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccGKEHub2ScopeIamBinding_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_gke_hub_scope" "scope" { + scope_id = "tf-test-scope%{random_suffix}" +} + +resource "google_gke_hub_scope_iam_binding" "foo" { + project = google_gke_hub_scope.scope.project + scope_id = google_gke_hub_scope.scope.scope_id + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccGKEHub2ScopeIamBinding_updateGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_gke_hub_scope" "scope" { + scope_id = "tf-test-scope%{random_suffix}" +} + +resource "google_gke_hub_scope_iam_binding" "foo" { + project = google_gke_hub_scope.scope.project + scope_id = google_gke_hub_scope.scope.scope_id + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/google-beta/services/gkehub2/resource_gke_hub_membership_binding.go b/google-beta/services/gkehub2/resource_gke_hub_membership_binding.go new file mode 100644 index 0000000000..47162df5b8 --- /dev/null +++ b/google-beta/services/gkehub2/resource_gke_hub_membership_binding.go @@ -0,0 +1,471 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub2 + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +func ResourceGKEHub2MembershipBinding() *schema.Resource { + return &schema.Resource{ + Create: resourceGKEHub2MembershipBindingCreate, + Read: resourceGKEHub2MembershipBindingRead, + Update: resourceGKEHub2MembershipBindingUpdate, + Delete: resourceGKEHub2MembershipBindingDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGKEHub2MembershipBindingImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Location of the membership`, + }, + "membership_binding_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The client-provided identifier of the membership binding.`, + }, + "membership_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Id of the membership`, + }, + "scope": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `A Workspace resource name in the format +'projects/*/locations/*/scopes/*'.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the MembershipBinding was created in UTC.`, + }, + "delete_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the MembershipBinding was deleted in UTC.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name for the membershipbinding itself`, + }, + "state": { + Type: schema.TypeList, + Computed: true, + Description: `State of the membership binding resource.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeString, + Computed: true, + Description: `Code describes the state of a MembershipBinding resource.`, + }, + }, + }, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Google-generated UUID for this resource.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the MembershipBinding was updated in UTC.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGKEHub2MembershipBindingCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + scopeProp, err := expandGKEHub2MembershipBindingScope(d.Get("scope"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("scope"); !tpgresource.IsEmptyValue(reflect.ValueOf(scopeProp)) && (ok || !reflect.DeepEqual(v, scopeProp)) { + obj["scope"] = scopeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/{{location}}/memberships/{{membership_id}}/bindings/?membership_binding_id={{membership_binding_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new MembershipBinding: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for MembershipBinding: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating MembershipBinding: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/memberships/{{membership_id}}/bindings/{{membership_binding_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = GKEHub2OperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating MembershipBinding", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create MembershipBinding: %s", err) + } + + if err := d.Set("name", flattenGKEHub2MembershipBindingName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/memberships/{{membership_id}}/bindings/{{membership_binding_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating MembershipBinding %q: %#v", d.Id(), res) + + return resourceGKEHub2MembershipBindingRead(d, meta) +} + +func resourceGKEHub2MembershipBindingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/{{location}}/memberships/{{membership_id}}/bindings/{{membership_binding_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for MembershipBinding: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("GKEHub2MembershipBinding %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading MembershipBinding: %s", err) + } + + if err := d.Set("name", flattenGKEHub2MembershipBindingName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading MembershipBinding: %s", err) + } + if err := d.Set("uid", flattenGKEHub2MembershipBindingUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading MembershipBinding: %s", err) + } + if err := d.Set("scope", flattenGKEHub2MembershipBindingScope(res["scope"], d, config)); err != nil { + return fmt.Errorf("Error reading MembershipBinding: %s", err) + } + if err := d.Set("create_time", flattenGKEHub2MembershipBindingCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading MembershipBinding: %s", err) + } + if err := d.Set("update_time", flattenGKEHub2MembershipBindingUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading MembershipBinding: %s", err) + } + if err := d.Set("delete_time", flattenGKEHub2MembershipBindingDeleteTime(res["deleteTime"], d, config)); err != nil { + return fmt.Errorf("Error reading MembershipBinding: %s", err) + } + if err := d.Set("state", flattenGKEHub2MembershipBindingState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading MembershipBinding: %s", err) + } + + return nil +} + +func resourceGKEHub2MembershipBindingUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for MembershipBinding: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + scopeProp, err := expandGKEHub2MembershipBindingScope(d.Get("scope"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("scope"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, scopeProp)) { + obj["scope"] = scopeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/{{location}}/memberships/{{membership_id}}/bindings/{{membership_binding_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating MembershipBinding %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("scope") { + updateMask = append(updateMask, "scope") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating MembershipBinding %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating MembershipBinding %q: %#v", d.Id(), res) + } + + err = GKEHub2OperationWaitTime( + config, res, project, "Updating MembershipBinding", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceGKEHub2MembershipBindingRead(d, meta) +} + +func resourceGKEHub2MembershipBindingDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for MembershipBinding: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/{{location}}/memberships/{{membership_id}}/bindings/{{membership_binding_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting MembershipBinding %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "MembershipBinding") + } + + err = GKEHub2OperationWaitTime( + config, res, project, "Deleting MembershipBinding", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting MembershipBinding %q: %#v", d.Id(), res) + return nil +} + +func resourceGKEHub2MembershipBindingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/memberships/(?P[^/]+)/bindings/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/memberships/{{membership_id}}/bindings/{{membership_binding_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenGKEHub2MembershipBindingName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2MembershipBindingUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2MembershipBindingScope(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenGKEHub2MembershipBindingCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2MembershipBindingUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2MembershipBindingDeleteTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2MembershipBindingState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["code"] = + flattenGKEHub2MembershipBindingStateCode(original["code"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2MembershipBindingStateCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandGKEHub2MembershipBindingScope(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/google-beta/services/gkehub2/resource_gke_hub_membership_binding_generated_test.go b/google-beta/services/gkehub2/resource_gke_hub_membership_binding_generated_test.go new file mode 100644 index 0000000000..543e1e3fe8 --- /dev/null +++ b/google-beta/services/gkehub2/resource_gke_hub_membership_binding_generated_test.go @@ -0,0 +1,134 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub2_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +func TestAccGKEHub2MembershipBinding_gkehubMembershipBindingBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "location": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHub2MembershipBindingDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHub2MembershipBinding_gkehubMembershipBindingBasicExample(context), + }, + { + ResourceName: "google_gke_hub_membership_binding.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"membership_binding_id", "scope", "membership_id", "location"}, + }, + }, + }) +} + +func testAccGKEHub2MembershipBinding_gkehubMembershipBindingBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "basiccluster%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 +} + +resource "google_gke_hub_membership" "example" { + membership_id = "tf-test-membership%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } + + depends_on = [google_container_cluster.primary] +} + +resource "google_gke_hub_scope" "example" { + scope_id = "tf-test-scope%{random_suffix}" +} + +resource "google_gke_hub_membership_binding" "example" { + membership_binding_id = "tf-test-membership-binding%{random_suffix}" + scope = google_gke_hub_scope.example.name + membership_id = "tf-test-membership%{random_suffix}" + location = "global" + depends_on = [ + google_gke_hub_membership.example, + google_gke_hub_scope.example + ] +} +`, context) +} + +func testAccCheckGKEHub2MembershipBindingDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_gke_hub_membership_binding" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{GKEHub2BasePath}}projects/{{project}}/locations/{{location}}/memberships/{{membership_id}}/bindings/{{membership_binding_id}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("GKEHub2MembershipBinding still exists at %s", url) + } + } + + return nil + } +} diff --git a/google-beta/services/gkehub2/resource_gke_hub_membership_binding_test.go b/google-beta/services/gkehub2/resource_gke_hub_membership_binding_test.go new file mode 100644 index 0000000000..2a1d54eb57 --- /dev/null +++ b/google-beta/services/gkehub2/resource_gke_hub_membership_binding_test.go @@ -0,0 +1,119 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package gkehub2_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" +) + +func TestAccGKEHub2MembershipBinding_gkehubMembershipBindingBasicExample_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "location": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHub2MembershipBinding_gkehubMembershipBindingBasicExample_basic(context), + }, + { + ResourceName: "google_gke_hub_membership_binding.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"membership_binding_id", "scope", "membership_id", "location"}, + }, + { + Config: testAccGKEHub2MembershipBinding_gkehubMembershipBindingBasicExample_update(context), + }, + { + ResourceName: "google_gke_hub_membership_binding.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"membership_binding_id", "scope", "membership_id", "location"}, + }, + }, + }) +} + +func testAccGKEHub2MembershipBinding_gkehubMembershipBindingBasicExample_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "basiccluster%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 +} + +resource "google_gke_hub_membership" "example" { + membership_id = "tf-test-membership%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } + + depends_on = [google_container_cluster.primary] +} + +resource "google_gke_hub_scope" "example" { + scope_id = "tf-test-scope%{random_suffix}" +} + +resource "google_gke_hub_membership_binding" "example" { + membership_binding_id = "tf-test-membership-binding%{random_suffix}" + scope = google_gke_hub_scope.example.name + membership_id = "tf-test-membership%{random_suffix}" + location = "global" + depends_on = [ + google_gke_hub_membership.example, + google_gke_hub_scope.example + ] +} +`, context) +} + +func testAccGKEHub2MembershipBinding_gkehubMembershipBindingBasicExample_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "basiccluster%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 +} + +resource "google_gke_hub_membership" "example" { + membership_id = "tf-test-membership%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } + + depends_on = [google_container_cluster.primary] +} + +resource "google_gke_hub_scope" "example2" { + scope_id = "tf-test-scope2%{random_suffix}" +} + +resource "google_gke_hub_membership_binding" "example" { + membership_binding_id = "tf-test-membership-binding%{random_suffix}" + scope = google_gke_hub_scope.example2.name + membership_id = "tf-test-membership%{random_suffix}" + location = "global" + depends_on = [ + google_gke_hub_membership.example, + google_gke_hub_scope.example2 + ] +} +`, context) +} diff --git a/google-beta/services/gkehub2/resource_gke_hub_namespace.go b/google-beta/services/gkehub2/resource_gke_hub_namespace.go new file mode 100644 index 0000000000..47b629b469 --- /dev/null +++ b/google-beta/services/gkehub2/resource_gke_hub_namespace.go @@ -0,0 +1,389 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub2 + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +func ResourceGKEHub2Namespace() *schema.Resource { + return &schema.Resource{ + Create: resourceGKEHub2NamespaceCreate, + Read: resourceGKEHub2NamespaceRead, + Delete: resourceGKEHub2NamespaceDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGKEHub2NamespaceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "scope": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.ProjectNumberDiffSuppress, + Description: `The name of the Scope instance.`, + }, + "scope_namespace_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The client-provided identifier of the namespace.`, + }, + "scope_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Id of the scope`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Namespace was created in UTC.`, + }, + "delete_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Namespace was deleted in UTC.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name for the namespace`, + }, + "state": { + Type: schema.TypeList, + Computed: true, + Description: `State of the namespace resource.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeString, + Computed: true, + Description: `Code describes the state of a Namespace resource.`, + }, + }, + }, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Google-generated UUID for this resource.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Namespace was updated in UTC.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGKEHub2NamespaceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + scopeProp, err := expandGKEHub2NamespaceScope(d.Get("scope"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("scope"); !tpgresource.IsEmptyValue(reflect.ValueOf(scopeProp)) && (ok || !reflect.DeepEqual(v, scopeProp)) { + obj["scope"] = scopeProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes/{{scope_id}}/namespaces/?scope_namespace_id={{scope_namespace_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Namespace: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Namespace: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Namespace: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/scopes/{{scope_id}}/namespaces/{{scope_namespace_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = GKEHub2OperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Namespace", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Namespace: %s", err) + } + + if err := d.Set("name", flattenGKEHub2NamespaceName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/scopes/{{scope_id}}/namespaces/{{scope_namespace_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Namespace %q: %#v", d.Id(), res) + + return resourceGKEHub2NamespaceRead(d, meta) +} + +func resourceGKEHub2NamespaceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes/{{scope_id}}/namespaces/{{scope_namespace_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Namespace: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("GKEHub2Namespace %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Namespace: %s", err) + } + + if err := d.Set("name", flattenGKEHub2NamespaceName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Namespace: %s", err) + } + if err := d.Set("uid", flattenGKEHub2NamespaceUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Namespace: %s", err) + } + if err := d.Set("create_time", flattenGKEHub2NamespaceCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Namespace: %s", err) + } + if err := d.Set("update_time", flattenGKEHub2NamespaceUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Namespace: %s", err) + } + if err := d.Set("delete_time", flattenGKEHub2NamespaceDeleteTime(res["deleteTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Namespace: %s", err) + } + if err := d.Set("scope", flattenGKEHub2NamespaceScope(res["scope"], d, config)); err != nil { + return fmt.Errorf("Error reading Namespace: %s", err) + } + if err := d.Set("state", flattenGKEHub2NamespaceState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Namespace: %s", err) + } + + return nil +} + +func resourceGKEHub2NamespaceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Namespace: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes/{{scope_id}}/namespaces/{{scope_namespace_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Namespace %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Namespace") + } + + err = GKEHub2OperationWaitTime( + config, res, project, "Deleting Namespace", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Namespace %q: %#v", d.Id(), res) + return nil +} + +func resourceGKEHub2NamespaceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/scopes/(?P[^/]+)/namespaces/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/scopes/{{scope_id}}/namespaces/{{scope_namespace_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenGKEHub2NamespaceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2NamespaceUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2NamespaceCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2NamespaceUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2NamespaceDeleteTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2NamespaceScope(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenGKEHub2NamespaceState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["code"] = + flattenGKEHub2NamespaceStateCode(original["code"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2NamespaceStateCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandGKEHub2NamespaceScope(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/google-beta/services/gkehub2/resource_gke_hub_namespace_generated_test.go b/google-beta/services/gkehub2/resource_gke_hub_namespace_generated_test.go new file mode 100644 index 0000000000..3f60bdbb49 --- /dev/null +++ b/google-beta/services/gkehub2/resource_gke_hub_namespace_generated_test.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub2_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +func TestAccGKEHub2Namespace_gkehubNamespaceBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHub2NamespaceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHub2Namespace_gkehubNamespaceBasicExample(context), + }, + { + ResourceName: "google_gke_hub_namespace.namespace", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"scope_namespace_id", "scope", "scope_id", "scope"}, + }, + }, + }) +} + +func testAccGKEHub2Namespace_gkehubNamespaceBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_gke_hub_scope" "namespace" { + scope_id = "tf-test-scope%{random_suffix}" +} + + +resource "google_gke_hub_namespace" "namespace" { + scope_namespace_id = "tf-test-namespace%{random_suffix}" + scope_id = "tf-test-scope%{random_suffix}" + scope = "${google_gke_hub_scope.namespace.name}" + depends_on = [google_gke_hub_scope.namespace] +} +`, context) +} + +func testAccCheckGKEHub2NamespaceDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_gke_hub_namespace" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes/{{scope_id}}/namespaces/{{scope_namespace_id}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("GKEHub2Namespace still exists at %s", url) + } + } + + return nil + } +} diff --git a/google-beta/services/gkehub2/resource_gke_hub_scope.go b/google-beta/services/gkehub2/resource_gke_hub_scope.go new file mode 100644 index 0000000000..b103c83ff8 --- /dev/null +++ b/google-beta/services/gkehub2/resource_gke_hub_scope.go @@ -0,0 +1,355 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub2 + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +func ResourceGKEHub2Scope() *schema.Resource { + return &schema.Resource{ + Create: resourceGKEHub2ScopeCreate, + Read: resourceGKEHub2ScopeRead, + Delete: resourceGKEHub2ScopeDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGKEHub2ScopeImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "scope_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The client-provided identifier of the scope.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Scope was created in UTC.`, + }, + "delete_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Scope was deleted in UTC.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The unique identifier of the scope`, + }, + "state": { + Type: schema.TypeList, + Computed: true, + Description: `State of the scope resource.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeString, + Computed: true, + Description: `Code describes the state of a Scope resource.`, + }, + }, + }, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Google-generated UUID for this resource.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the Scope was updated in UTC.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGKEHub2ScopeCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes?scopeId={{scope_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Scope: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Scope: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Scope: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/scopes/{{scope_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = GKEHub2OperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Scope", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Scope: %s", err) + } + + if err := d.Set("name", flattenGKEHub2ScopeName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/scopes/{{scope_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Scope %q: %#v", d.Id(), res) + + return resourceGKEHub2ScopeRead(d, meta) +} + +func resourceGKEHub2ScopeRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes/{{scope_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Scope: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("GKEHub2Scope %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Scope: %s", err) + } + + if err := d.Set("name", flattenGKEHub2ScopeName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Scope: %s", err) + } + if err := d.Set("uid", flattenGKEHub2ScopeUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Scope: %s", err) + } + if err := d.Set("create_time", flattenGKEHub2ScopeCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Scope: %s", err) + } + if err := d.Set("update_time", flattenGKEHub2ScopeUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Scope: %s", err) + } + if err := d.Set("delete_time", flattenGKEHub2ScopeDeleteTime(res["deleteTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Scope: %s", err) + } + if err := d.Set("state", flattenGKEHub2ScopeState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Scope: %s", err) + } + + return nil +} + +func resourceGKEHub2ScopeDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Scope: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes/{{scope_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Scope %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "Scope") + } + + err = GKEHub2OperationWaitTime( + config, res, project, "Deleting Scope", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Scope %q: %#v", d.Id(), res) + return nil +} + +func resourceGKEHub2ScopeImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/scopes/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/scopes/{{scope_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenGKEHub2ScopeName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeDeleteTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["code"] = + flattenGKEHub2ScopeStateCode(original["code"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2ScopeStateCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} diff --git a/google-beta/services/gkehub2/resource_gke_hub_scope_generated_test.go b/google-beta/services/gkehub2/resource_gke_hub_scope_generated_test.go new file mode 100644 index 0000000000..d1173093a9 --- /dev/null +++ b/google-beta/services/gkehub2/resource_gke_hub_scope_generated_test.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub2_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +func TestAccGKEHub2Scope_gkehubScopeBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHub2ScopeDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHub2Scope_gkehubScopeBasicExample(context), + }, + { + ResourceName: "google_gke_hub_scope.scope", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"scope_id"}, + }, + }, + }) +} + +func testAccGKEHub2Scope_gkehubScopeBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_gke_hub_scope" "scope" { + scope_id = "tf-test-scope%{random_suffix}" +} +`, context) +} + +func testAccCheckGKEHub2ScopeDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_gke_hub_scope" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes/{{scope_id}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("GKEHub2Scope still exists at %s", url) + } + } + + return nil + } +} diff --git a/google-beta/services/gkehub2/resource_gke_hub_scope_rbac_role_binding.go b/google-beta/services/gkehub2/resource_gke_hub_scope_rbac_role_binding.go new file mode 100644 index 0000000000..530577b126 --- /dev/null +++ b/google-beta/services/gkehub2/resource_gke_hub_scope_rbac_role_binding.go @@ -0,0 +1,615 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub2 + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/verify" +) + +func ResourceGKEHub2ScopeRBACRoleBinding() *schema.Resource { + return &schema.Resource{ + Create: resourceGKEHub2ScopeRBACRoleBindingCreate, + Read: resourceGKEHub2ScopeRBACRoleBindingRead, + Update: resourceGKEHub2ScopeRBACRoleBindingUpdate, + Delete: resourceGKEHub2ScopeRBACRoleBindingDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGKEHub2ScopeRBACRoleBindingImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "role": { + Type: schema.TypeList, + Required: true, + Description: `Role to bind to the principal.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "predefined_role": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"UNKNOWN", "ADMIN", "EDIT", "VIEW", ""}), + Description: `PredefinedRole is an ENUM representation of the default Kubernetes Roles Possible values: ["UNKNOWN", "ADMIN", "EDIT", "VIEW"]`, + }, + }, + }, + }, + "scope_rbac_role_binding_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The client-provided identifier of the RBAC Role Binding.`, + }, + "scope_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Id of the scope`, + }, + "group": { + Type: schema.TypeString, + Optional: true, + Description: `Principal that is be authorized in the cluster (at least of one the oneof +is required). Updating one will unset the other automatically. +group is the group, as seen by the kubernetes cluster.`, + ExactlyOneOf: []string{"user", "group"}, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels for this ScopeRBACRoleBinding.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "user": { + Type: schema.TypeString, + Optional: true, + Description: `Principal that is be authorized in the cluster (at least of one the oneof +is required). Updating one will unset the other automatically. +user is the name of the user as seen by the kubernetes cluster, example +"alice" or "alice@domain.tld"`, + ExactlyOneOf: []string{"user", "group"}, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the RBAC Role Binding was created in UTC.`, + }, + "delete_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the RBAC Role Binding was deleted in UTC.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name for the RBAC Role Binding`, + }, + "state": { + Type: schema.TypeList, + Computed: true, + Description: `State of the RBAC Role Binding resource.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "code": { + Type: schema.TypeString, + Computed: true, + Description: `Code describes the state of a RBAC Role Binding resource.`, + }, + }, + }, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Google-generated UUID for this resource.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Time the RBAC Role Binding was updated in UTC.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceGKEHub2ScopeRBACRoleBindingCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + userProp, err := expandGKEHub2ScopeRBACRoleBindingUser(d.Get("user"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user"); !tpgresource.IsEmptyValue(reflect.ValueOf(userProp)) && (ok || !reflect.DeepEqual(v, userProp)) { + obj["user"] = userProp + } + groupProp, err := expandGKEHub2ScopeRBACRoleBindingGroup(d.Get("group"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("group"); !tpgresource.IsEmptyValue(reflect.ValueOf(groupProp)) && (ok || !reflect.DeepEqual(v, groupProp)) { + obj["group"] = groupProp + } + roleProp, err := expandGKEHub2ScopeRBACRoleBindingRole(d.Get("role"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("role"); !tpgresource.IsEmptyValue(reflect.ValueOf(roleProp)) && (ok || !reflect.DeepEqual(v, roleProp)) { + obj["role"] = roleProp + } + labelsProp, err := expandGKEHub2ScopeRBACRoleBindingLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes/{{scope_id}}/rbacrolebindings/?rbacrolebinding_id={{scope_rbac_role_binding_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new ScopeRBACRoleBinding: %#v", obj) + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ScopeRBACRoleBinding: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating ScopeRBACRoleBinding: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/scopes/{{scope_id}}/rbacrolebindings/{{scope_rbac_role_binding_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = GKEHub2OperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating ScopeRBACRoleBinding", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create ScopeRBACRoleBinding: %s", err) + } + + if err := d.Set("name", flattenGKEHub2ScopeRBACRoleBindingName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/scopes/{{scope_id}}/rbacrolebindings/{{scope_rbac_role_binding_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating ScopeRBACRoleBinding %q: %#v", d.Id(), res) + + return resourceGKEHub2ScopeRBACRoleBindingRead(d, meta) +} + +func resourceGKEHub2ScopeRBACRoleBindingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes/{{scope_id}}/rbacrolebindings/{{scope_rbac_role_binding_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ScopeRBACRoleBinding: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("GKEHub2ScopeRBACRoleBinding %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ScopeRBACRoleBinding: %s", err) + } + + if err := d.Set("name", flattenGKEHub2ScopeRBACRoleBindingName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading ScopeRBACRoleBinding: %s", err) + } + if err := d.Set("uid", flattenGKEHub2ScopeRBACRoleBindingUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading ScopeRBACRoleBinding: %s", err) + } + if err := d.Set("create_time", flattenGKEHub2ScopeRBACRoleBindingCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ScopeRBACRoleBinding: %s", err) + } + if err := d.Set("update_time", flattenGKEHub2ScopeRBACRoleBindingUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ScopeRBACRoleBinding: %s", err) + } + if err := d.Set("delete_time", flattenGKEHub2ScopeRBACRoleBindingDeleteTime(res["deleteTime"], d, config)); err != nil { + return fmt.Errorf("Error reading ScopeRBACRoleBinding: %s", err) + } + if err := d.Set("state", flattenGKEHub2ScopeRBACRoleBindingState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading ScopeRBACRoleBinding: %s", err) + } + if err := d.Set("user", flattenGKEHub2ScopeRBACRoleBindingUser(res["user"], d, config)); err != nil { + return fmt.Errorf("Error reading ScopeRBACRoleBinding: %s", err) + } + if err := d.Set("group", flattenGKEHub2ScopeRBACRoleBindingGroup(res["group"], d, config)); err != nil { + return fmt.Errorf("Error reading ScopeRBACRoleBinding: %s", err) + } + if err := d.Set("role", flattenGKEHub2ScopeRBACRoleBindingRole(res["role"], d, config)); err != nil { + return fmt.Errorf("Error reading ScopeRBACRoleBinding: %s", err) + } + if err := d.Set("labels", flattenGKEHub2ScopeRBACRoleBindingLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading ScopeRBACRoleBinding: %s", err) + } + + return nil +} + +func resourceGKEHub2ScopeRBACRoleBindingUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ScopeRBACRoleBinding: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + userProp, err := expandGKEHub2ScopeRBACRoleBindingUser(d.Get("user"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("user"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userProp)) { + obj["user"] = userProp + } + groupProp, err := expandGKEHub2ScopeRBACRoleBindingGroup(d.Get("group"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("group"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, groupProp)) { + obj["group"] = groupProp + } + roleProp, err := expandGKEHub2ScopeRBACRoleBindingRole(d.Get("role"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("role"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, roleProp)) { + obj["role"] = roleProp + } + labelsProp, err := expandGKEHub2ScopeRBACRoleBindingLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes/{{scope_id}}/rbacrolebindings/{{scope_rbac_role_binding_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating ScopeRBACRoleBinding %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("user") { + updateMask = append(updateMask, "user") + } + + if d.HasChange("group") { + updateMask = append(updateMask, "group") + } + + if d.HasChange("role") { + updateMask = append(updateMask, "role") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating ScopeRBACRoleBinding %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating ScopeRBACRoleBinding %q: %#v", d.Id(), res) + } + + err = GKEHub2OperationWaitTime( + config, res, project, "Updating ScopeRBACRoleBinding", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceGKEHub2ScopeRBACRoleBindingRead(d, meta) +} + +func resourceGKEHub2ScopeRBACRoleBindingDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for ScopeRBACRoleBinding: %s", err) + } + billingProject = project + + url, err := tpgresource.ReplaceVars(d, config, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes/{{scope_id}}/rbacrolebindings/{{scope_rbac_role_binding_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting ScopeRBACRoleBinding %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "ScopeRBACRoleBinding") + } + + err = GKEHub2OperationWaitTime( + config, res, project, "Deleting ScopeRBACRoleBinding", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting ScopeRBACRoleBinding %q: %#v", d.Id(), res) + return nil +} + +func resourceGKEHub2ScopeRBACRoleBindingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/locations/global/scopes/(?P[^/]+)/rbacrolebindings/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/global/scopes/{{scope_id}}/rbacrolebindings/{{scope_rbac_role_binding_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenGKEHub2ScopeRBACRoleBindingName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeRBACRoleBindingUid(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeRBACRoleBindingCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeRBACRoleBindingUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeRBACRoleBindingDeleteTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeRBACRoleBindingState(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["code"] = + flattenGKEHub2ScopeRBACRoleBindingStateCode(original["code"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2ScopeRBACRoleBindingStateCode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeRBACRoleBindingUser(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeRBACRoleBindingGroup(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeRBACRoleBindingRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["predefined_role"] = + flattenGKEHub2ScopeRBACRoleBindingRolePredefinedRole(original["predefinedRole"], d, config) + return []interface{}{transformed} +} +func flattenGKEHub2ScopeRBACRoleBindingRolePredefinedRole(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenGKEHub2ScopeRBACRoleBindingLabels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandGKEHub2ScopeRBACRoleBindingUser(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEHub2ScopeRBACRoleBindingGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEHub2ScopeRBACRoleBindingRole(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPredefinedRole, err := expandGKEHub2ScopeRBACRoleBindingRolePredefinedRole(original["predefined_role"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPredefinedRole); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["predefinedRole"] = transformedPredefinedRole + } + + return transformed, nil +} + +func expandGKEHub2ScopeRBACRoleBindingRolePredefinedRole(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandGKEHub2ScopeRBACRoleBindingLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} diff --git a/google-beta/services/gkehub2/resource_gke_hub_scope_rbac_role_binding_generated_test.go b/google-beta/services/gkehub2/resource_gke_hub_scope_rbac_role_binding_generated_test.go new file mode 100644 index 0000000000..0ff188e6bb --- /dev/null +++ b/google-beta/services/gkehub2/resource_gke_hub_scope_rbac_role_binding_generated_test.go @@ -0,0 +1,118 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package gkehub2_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +func TestAccGKEHub2ScopeRBACRoleBinding_gkehubScopeRbacRoleBindingBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHub2ScopeRBACRoleBindingDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHub2ScopeRBACRoleBinding_gkehubScopeRbacRoleBindingBasicExample(context), + }, + { + ResourceName: "google_gke_hub_scope_rbac_role_binding.scoperbacrolebinding", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"scope_rbac_role_binding_id", "scope_id"}, + }, + }, + }) +} + +func testAccGKEHub2ScopeRBACRoleBinding_gkehubScopeRbacRoleBindingBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_gke_hub_scope" "scoperbacrolebinding" { + scope_id = "tf-test-scope%{random_suffix}" +} + +resource "google_gke_hub_scope_rbac_role_binding" "scoperbacrolebinding" { + scope_rbac_role_binding_id = "tf-test-scope-rbac-role-binding%{random_suffix}" + scope_id = "tf-test-scope%{random_suffix}" + user = "test-email@gmail.com" + role { + predefined_role = "ADMIN" + } + labels = { + key = "value" + } + depends_on = [google_gke_hub_scope.scoperbacrolebinding] +} +`, context) +} + +func testAccCheckGKEHub2ScopeRBACRoleBindingDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_gke_hub_scope_rbac_role_binding" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{GKEHub2BasePath}}projects/{{project}}/locations/global/scopes/{{scope_id}}/rbacrolebindings/{{scope_rbac_role_binding_id}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("GKEHub2ScopeRBACRoleBinding still exists at %s", url) + } + } + + return nil + } +} diff --git a/google-beta/services/gkehub2/resource_gke_hub_scope_rbac_role_binding_test.go b/google-beta/services/gkehub2/resource_gke_hub_scope_rbac_role_binding_test.go new file mode 100644 index 0000000000..e792848879 --- /dev/null +++ b/google-beta/services/gkehub2/resource_gke_hub_scope_rbac_role_binding_test.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package gkehub2_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/envvar" +) + +func TestAccGKEHub2ScopeRBACRoleBinding_gkehubScopeRbacRoleBindingBasicExample_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHub2ScopeRBACRoleBinding_gkehubScopeRbacRoleBindingBasicExample_basic(context), + }, + { + ResourceName: "google_gke_hub_scope_rbac_role_binding.scoperbacrolebinding", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"scope_rbac_role_binding_id", "scope_id"}, + }, + { + Config: testAccGKEHub2ScopeRBACRoleBinding_gkehubScopeRbacRoleBindingBasicExample_update(context), + }, + { + ResourceName: "google_gke_hub_scope_rbac_role_binding.scoperbacrolebinding", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"scope_rbac_role_binding_id", "scope_id"}, + }, + }, + }) +} + +func testAccGKEHub2ScopeRBACRoleBinding_gkehubScopeRbacRoleBindingBasicExample_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_gke_hub_scope" "scoperbacrolebinding" { + scope_id = "tf-test-scope%{random_suffix}" +} + +resource "google_gke_hub_scope_rbac_role_binding" "scoperbacrolebinding" { + scope_rbac_role_binding_id = "tf-test-scope-rbac-role-binding%{random_suffix}" + scope_id = "tf-test-scope%{random_suffix}" + user = "test-email@gmail.com" + role { + predefined_role = "ADMIN" + } + labels = { + key = "value" + } + depends_on = [google_gke_hub_scope.scoperbacrolebinding] +} +`, context) +} + +func testAccGKEHub2ScopeRBACRoleBinding_gkehubScopeRbacRoleBindingBasicExample_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_gke_hub_scope" "scoperbacrolebinding" { + scope_id = "tf-test-scope%{random_suffix}" +} + +resource "google_gke_hub_scope_rbac_role_binding" "scoperbacrolebinding" { + scope_rbac_role_binding_id = "tf-test-scope-rbac-role-binding%{random_suffix}" + scope_id = "tf-test-scope%{random_suffix}" + group = "test-email2@gmail.com" + role { + predefined_role = "VIEW" + } + labels = { + key = "updated_value" + } + depends_on = [google_gke_hub_scope.scoperbacrolebinding] +} +`, context) +} diff --git a/google-beta/services/logging/resource_logging_project_sink.go b/google-beta/services/logging/resource_logging_project_sink.go index 498534a127..a86d484296 100644 --- a/google-beta/services/logging/resource_logging_project_sink.go +++ b/google-beta/services/logging/resource_logging_project_sink.go @@ -41,7 +41,6 @@ func ResourceLoggingProjectSink() *schema.Resource { Type: schema.TypeBool, Optional: true, Default: true, - ForceNew: true, Description: `Whether or not to create a unique identity associated with this sink. If false (the legacy behavior), then the writer_identity used is serviceAccount:cloud-logs@system.gserviceaccount.com. If true, then a unique service account is created and used for this sink. If you wish to publish logs across projects, you must set unique_writer_identity to true.`, } return schm diff --git a/google-beta/services/logging/resource_logging_project_sink_test.go b/google-beta/services/logging/resource_logging_project_sink_test.go index 2cf7b66aa6..541ebcff3a 100644 --- a/google-beta/services/logging/resource_logging_project_sink_test.go +++ b/google-beta/services/logging/resource_logging_project_sink_test.go @@ -313,6 +313,8 @@ resource "google_logging_project_sink" "described" { destination = "storage.googleapis.com/${google_storage_bucket.gcs-bucket.name}" filter = "logName=\"projects/%s/logs/compute.googleapis.com%%2Factivity_log\" AND severity>=ERROR" description = "this is a description for a project level logging sink" + + unique_writer_identity = false } resource "google_storage_bucket" "gcs-bucket" { @@ -330,6 +332,8 @@ resource "google_logging_project_sink" "described" { destination = "storage.googleapis.com/${google_storage_bucket.gcs-bucket.name}" filter = "logName=\"projects/%s/logs/compute.googleapis.com%%2Factivity_log\" AND severity>=ERROR" description = "description updated" + + unique_writer_identity = true } resource "google_storage_bucket" "gcs-bucket" { @@ -463,6 +467,8 @@ resource "google_logging_project_sink" "bigquery" { name = "%s" destination = "bigquery.googleapis.com/projects/%s/datasets/${google_bigquery_dataset.bq_dataset.dataset_id}" filter = "logName=\"projects/%s/logs/compute.googleapis.com%%2Factivity_log\" AND severity>=WARNING" + + unique_writer_identity = true } resource "google_bigquery_dataset" "bq_dataset" { diff --git a/google-beta/services/networkconnectivity/resource_network_connectivity_service_connection_policy.go b/google-beta/services/networkconnectivity/resource_network_connectivity_service_connection_policy.go index 7480d0d4eb..f6080f25b9 100644 --- a/google-beta/services/networkconnectivity/resource_network_connectivity_service_connection_policy.go +++ b/google-beta/services/networkconnectivity/resource_network_connectivity_service_connection_policy.go @@ -166,6 +166,14 @@ It is provided by the Service Producer. Google services have a prefix of gcp. Fo Optional: true, Description: `A developer-facing error message.`, }, + "details": { + Type: schema.TypeList, + Computed: true, + Description: `A list of messages that carry the error details.`, + Elem: &schema.Schema{ + Type: schema.TypeMap, + }, + }, }, }, }, @@ -693,6 +701,8 @@ func flattenNetworkConnectivityServiceConnectionPolicyPscConnectionsError(v inte flattenNetworkConnectivityServiceConnectionPolicyPscConnectionsErrorMessage(original["message"], d, config) transformed["code"] = flattenNetworkConnectivityServiceConnectionPolicyPscConnectionsErrorCode(original["code"], d, config) + transformed["details"] = + flattenNetworkConnectivityServiceConnectionPolicyPscConnectionsErrorDetails(original["details"], d, config) return []interface{}{transformed} } func flattenNetworkConnectivityServiceConnectionPolicyPscConnectionsErrorMessage(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { @@ -716,6 +726,10 @@ func flattenNetworkConnectivityServiceConnectionPolicyPscConnectionsErrorCode(v return v // let terraform core handle it otherwise } +func flattenNetworkConnectivityServiceConnectionPolicyPscConnectionsErrorDetails(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenNetworkConnectivityServiceConnectionPolicyPscConnectionsGceOperation(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v } diff --git a/google-beta/services/secretmanager/resource_secret_manager_secret.go b/google-beta/services/secretmanager/resource_secret_manager_secret.go index adeceea20d..2ec1cfaa42 100644 --- a/google-beta/services/secretmanager/resource_secret_manager_secret.go +++ b/google-beta/services/secretmanager/resource_secret_manager_secret.go @@ -94,7 +94,6 @@ after the Secret has been created.`, "customer_managed_encryption": { Type: schema.TypeList, Optional: true, - ForceNew: true, Description: `Customer Managed Encryption for the secret.`, MaxItems: 1, Elem: &schema.Resource{ @@ -102,7 +101,6 @@ after the Secret has been created.`, "kms_key_name": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: `Describes the Cloud KMS encryption key that will be used to protect destination secret.`, }, }, @@ -515,6 +513,24 @@ func resourceSecretManagerSecretUpdate(d *schema.ResourceData, meta interface{}) if err != nil { return err } + replicationProp, err := expandSecretManagerSecretReplication(d.Get("replication"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("replication"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, replicationProp)) { + obj["replication"] = replicationProp + } + + if d.HasChange("replication") { + updateMask = append(updateMask, "replication") + } + + // Refreshing updateMask after adding extra schema entries + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + log.Printf("[DEBUG] Update URL %q: %v", d.Id(), url) // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { diff --git a/google-beta/services/secretmanager/resource_secret_manager_secret_test.go b/google-beta/services/secretmanager/resource_secret_manager_secret_test.go index 38b63e3fad..51c762c752 100644 --- a/google-beta/services/secretmanager/resource_secret_manager_secret_test.go +++ b/google-beta/services/secretmanager/resource_secret_manager_secret_test.go @@ -160,6 +160,64 @@ func TestAccSecretManagerSecret_versionAliasesUpdate(t *testing.T) { }) } +func TestAccSecretManagerSecret_userManagedCmekUpdate(t *testing.T) { + t.Parallel() + + kmscentral := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", "us-central1", "tf-secret-manager-managed-central-key1") + kmseast := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", "us-east1", "tf-secret-manager-managed-east-key1") + kmscentralother := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", "us-central1", "tf-secret-manager-managed-central-key2") + context := map[string]interface{}{ + "pid": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "kms_key_name_central": kmscentral.CryptoKey.Name, + "kms_key_name_east": kmseast.CryptoKey.Name, + "kms_key_name_central_other": kmscentralother.CryptoKey.Name, + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretMangerSecret_userManagedCmekBasic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + { + Config: testAccSecretMangerSecret_userManagedCmekUpdate(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + { + Config: testAccSecretMangerSecret_userManagedCmekUpdate2(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + { + Config: testAccSecretMangerSecret_userManagedCmekBasic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + }, + }) +} + func testAccSecretManagerSecret_basic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_secret_manager_secret" "secret-basic" { @@ -427,3 +485,150 @@ resource "google_secret_manager_secret_version" "secret-version-4" { } `, context) } + +func testAccSecretMangerSecret_userManagedCmekBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + project_id = "%{pid}" +} +resource "google_kms_crypto_key_iam_member" "kms-central-binding-1" { + crypto_key_id = "%{kms_key_name_central}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-central-binding-2" { + crypto_key_id = "%{kms_key_name_central_other}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-east-binding" { + crypto_key_id = "%{kms_key_name_east}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } + depends_on = [ + google_kms_crypto_key_iam_member.kms-central-binding-1, + google_kms_crypto_key_iam_member.kms-central-binding-2, + google_kms_crypto_key_iam_member.kms-east-binding, + ] +} +`, context) +} + +func testAccSecretMangerSecret_userManagedCmekUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + project_id = "%{pid}" +} +resource "google_kms_crypto_key_iam_member" "kms-central-binding-1" { + crypto_key_id = "%{kms_key_name_central}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-central-binding-2" { + crypto_key_id = "%{kms_key_name_central_other}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-east-binding" { + crypto_key_id = "%{kms_key_name_east}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + replication { + user_managed { + replicas { + location = "us-central1" + customer_managed_encryption { + kms_key_name = "%{kms_key_name_central}" + } + } + replicas { + location = "us-east1" + customer_managed_encryption { + kms_key_name = "%{kms_key_name_east}" + } + } + } + } + depends_on = [ + google_kms_crypto_key_iam_member.kms-central-binding-1, + google_kms_crypto_key_iam_member.kms-central-binding-2, + google_kms_crypto_key_iam_member.kms-east-binding, + ] +} +`, context) +} + +func testAccSecretMangerSecret_userManagedCmekUpdate2(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + project_id = "%{pid}" +} +resource "google_kms_crypto_key_iam_member" "kms-central-binding-1" { + crypto_key_id = "%{kms_key_name_central}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-central-binding-2" { + crypto_key_id = "%{kms_key_name_central_other}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-east-binding" { + crypto_key_id = "%{kms_key_name_east}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + replication { + user_managed { + replicas { + location = "us-central1" + customer_managed_encryption { + kms_key_name = "%{kms_key_name_central_other}" + } + } + replicas { + location = "us-east1" + customer_managed_encryption { + kms_key_name = "%{kms_key_name_east}" + } + } + } + } + depends_on = [ + google_kms_crypto_key_iam_member.kms-central-binding-1, + google_kms_crypto_key_iam_member.kms-central-binding-2, + google_kms_crypto_key_iam_member.kms-east-binding, + ] +} +`, context) +} diff --git a/google-beta/services/servicedirectory/resource_service_directory_namespace.go b/google-beta/services/servicedirectory/resource_service_directory_namespace.go index c29e3d45f6..e8b00ad2d1 100644 --- a/google-beta/services/servicedirectory/resource_service_directory_namespace.go +++ b/google-beta/services/servicedirectory/resource_service_directory_namespace.go @@ -57,6 +57,7 @@ func ResourceServiceDirectoryNamespace() *schema.Resource { "location": { Type: schema.TypeString, Required: true, + ForceNew: true, Description: `The location for the Namespace. A full list of valid locations can be found by running 'gcloud beta service-directory locations list'.`, diff --git a/google-beta/services/vpcaccess/data_source_vpc_access_connector_test.go b/google-beta/services/vpcaccess/data_source_vpc_access_connector_test.go index 4a98a4c25a..b90785a247 100644 --- a/google-beta/services/vpcaccess/data_source_vpc_access_connector_test.go +++ b/google-beta/services/vpcaccess/data_source_vpc_access_connector_test.go @@ -39,7 +39,7 @@ func testAccVPCAccessConnectorDatasourceConfig(suffix string) string { return fmt.Sprintf(` resource "google_vpc_access_connector" "connector" { name = "vpc-con-test-%s" - ip_cidr_range = "10.8.0.0/28" + ip_cidr_range = "10.8.0.32/28" network = "default" region = "us-central1" } diff --git a/google-beta/tpgresource/common_operation.go b/google-beta/tpgresource/common_operation.go index 44eebe4501..c034dd2877 100644 --- a/google-beta/tpgresource/common_operation.go +++ b/google-beta/tpgresource/common_operation.go @@ -140,10 +140,7 @@ func CommonRefreshFunc(w Waiter) resource.StateRefreshFunc { func OperationWait(w Waiter, activity string, timeout time.Duration, pollInterval time.Duration) error { if OperationDone(w) { - if w.Error() != nil { - return w.Error() - } - return nil + return w.Error() } c := &resource.StateChangeConf{ @@ -163,11 +160,8 @@ func OperationWait(w Waiter, activity string, timeout time.Duration, pollInterva if err != nil { return err } - if w.Error() != nil { - return w.Error() - } - return nil + return w.Error() } // The cloud resource manager API operation is an example of one of many diff --git a/google-beta/transport/config_test.go b/google-beta/transport/config_test.go index 84ba47d61f..6d3dfc7bac 100644 --- a/google-beta/transport/config_test.go +++ b/google-beta/transport/config_test.go @@ -19,7 +19,7 @@ import ( const testOauthScope = "https://www.googleapis.com/auth/compute" -func TestHandleSDKDefaults_RequestReason(t *testing.T) { +func TestHandleSDKDefaults_BillingProject(t *testing.T) { cases := map[string]struct { ConfigValue string EnvVariables map[string]string @@ -27,22 +27,22 @@ func TestHandleSDKDefaults_RequestReason(t *testing.T) { ValueNotProvided bool ExpectError bool }{ - "request_reason value set in the provider config is not overridden by ENVs": { - ConfigValue: "request-reason-from-config", + "billing project value set in the provider config is not overridden by ENVs": { + ConfigValue: "my-billing-project-from-config", EnvVariables: map[string]string{ - "CLOUDSDK_CORE_REQUEST_REASON": "request-reason-from-env", + "GOOGLE_BILLING_PROJECT": "my-billing-project-from-env", }, - ExpectedValue: "request-reason-from-config", + ExpectedValue: "my-billing-project-from-config", }, - "request_reason can be set by environment variable, when no value supplied via the config": { + "billing project can be set by environment variable, when no value supplied via the config": { EnvVariables: map[string]string{ - "CLOUDSDK_CORE_REQUEST_REASON": "request-reason-from-env", + "GOOGLE_BILLING_PROJECT": "my-billing-project-from-env", }, - ExpectedValue: "request-reason-from-env", + ExpectedValue: "my-billing-project-from-env", }, "when no values are provided via config or environment variables, the field remains unset without error": { EnvVariables: map[string]string{ - "CLOUDSDK_CORE_REQUEST_REASON": "", // CLOUDSDK_CORE_REQUEST_REASON unset + "GOOGLE_BILLING_PROJECT": "", // GOOGLE_BILLING_PROJECT unset }, ValueNotProvided: true, }, @@ -58,7 +58,7 @@ func TestHandleSDKDefaults_RequestReason(t *testing.T) { // Set config value(s) if tc.ConfigValue != "" { - d.Set("request_reason", tc.ConfigValue) + d.Set("billing_project", tc.ConfigValue) } // Set ENVs @@ -80,12 +80,326 @@ func TestHandleSDKDefaults_RequestReason(t *testing.T) { } // Assert - v, ok := d.GetOk("request_reason") + v, ok := d.GetOk("billing_project") if !ok && !tc.ValueNotProvided { - t.Fatal("expected request_reason to be set in the provider data") + t.Fatal("expected billing_project to be set in the provider data") } if ok && tc.ValueNotProvided { - t.Fatal("expected request_reason to not be set in the provider data") + t.Fatal("expected billing_project to not be set in the provider data") + } + + if v != tc.ExpectedValue { + t.Fatalf("unexpected value: wanted %v, got, %v", tc.ExpectedValue, v) + } + }) + } +} + +func TestHandleSDKDefaults_Region(t *testing.T) { + cases := map[string]struct { + ConfigValue string + EnvVariables map[string]string + ExpectedValue string + ValueNotProvided bool + ExpectError bool + }{ + "region value set in the provider config is not overridden by ENVs": { + ConfigValue: "region-from-config", + EnvVariables: map[string]string{ + "GOOGLE_REGION": "region-from-env", + "GCLOUD_REGION": "", // GCLOUD_REGION unset + "CLOUDSDK_COMPUTE_REGION": "", // CLOUDSDK_COMPUTE_REGION unset + }, + ExpectedValue: "region-from-config", + }, + "region can be set by environment variable, when no value supplied via the config": { + EnvVariables: map[string]string{ + "GOOGLE_REGION": "region-from-env", + "GCLOUD_REGION": "", // GCLOUD_REGION unset + "CLOUDSDK_COMPUTE_REGION": "", // CLOUDSDK_COMPUTE_REGION unset + }, + ExpectedValue: "region-from-env", + }, + "when multiple region environment variables are provided, `GOOGLE_REGION` is used first": { + EnvVariables: map[string]string{ + "GOOGLE_REGION": "project-from-GOOGLE_REGION", + "GCLOUD_REGION": "project-from-GCLOUD_REGION", + "CLOUDSDK_COMPUTE_REGION": "project-from-CLOUDSDK_COMPUTE_REGION", + }, + ExpectedValue: "project-from-GOOGLE_REGION", + }, + "when multiple region environment variables are provided, `GCLOUD_REGION` is used second": { + EnvVariables: map[string]string{ + "GOOGLE_REGION": "", // GOOGLE_REGION unset + "GCLOUD_REGION": "project-from-GCLOUD_REGION", + "CLOUDSDK_COMPUTE_REGION": "project-from-CLOUDSDK_COMPUTE_REGION", + }, + ExpectedValue: "project-from-GCLOUD_REGION", + }, + "when multiple region environment variables are provided, `CLOUDSDK_COMPUTE_REGION` is the last-used ENV": { + EnvVariables: map[string]string{ + "GOOGLE_REGION": "", // GOOGLE_REGION unset + "GCLOUD_REGION": "", // GCLOUD_REGION unset + "CLOUDSDK_COMPUTE_REGION": "project-from-CLOUDSDK_COMPUTE_REGION", + }, + ExpectedValue: "project-from-CLOUDSDK_COMPUTE_REGION", + }, + "when no values are provided via config or environment variables, the field remains unset without error": { + EnvVariables: map[string]string{ + "GOOGLE_REGION": "", // GOOGLE_REGION unset + "GCLOUD_REGION": "", // GCLOUD_REGION unset + "CLOUDSDK_COMPUTE_REGION": "", // CLOUDSDK_COMPUTE_REGION unset + }, + ValueNotProvided: true, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + // Create empty schema.ResourceData using the SDK Provider schema + emptyConfigMap := map[string]interface{}{} + d := schema.TestResourceDataRaw(t, provider.Provider().Schema, emptyConfigMap) + + // Set config value(s) + if tc.ConfigValue != "" { + d.Set("region", tc.ConfigValue) + } + + // Set ENVs + if len(tc.EnvVariables) > 0 { + for k, v := range tc.EnvVariables { + t.Setenv(k, v) + } + } + + // Act + err := transport_tpg.HandleSDKDefaults(d) + + // Assert + if err != nil { + if !tc.ExpectError { + t.Fatalf("error: %v", err) + } + return + } + + // Assert + v, ok := d.GetOk("region") + if !ok && !tc.ValueNotProvided { + t.Fatal("expected region to be set in the provider data") + } + if ok && tc.ValueNotProvided { + t.Fatal("expected region to not be set in the provider data") + } + + if v != tc.ExpectedValue { + t.Fatalf("unexpected value: wanted %v, got, %v", tc.ExpectedValue, v) + } + }) + } +} + +func TestHandleSDKDefaults_Zone(t *testing.T) { + cases := map[string]struct { + ConfigValue string + EnvVariables map[string]string + ExpectedValue string + ValueNotProvided bool + ExpectError bool + }{ + "region value set in the provider config is not overridden by ENVs": { + ConfigValue: "zone-from-config", + EnvVariables: map[string]string{ + "GOOGLE_ZONE": "zone-from-env", + "GCLOUD_ZONE": "", // GCLOUD_ZONE unset + "CLOUDSDK_COMPUTE_ZONE": "", // CLOUDSDK_COMPUTE_ZONE unset + }, + ExpectedValue: "zone-from-config", + }, + "zone can be set by environment variable, when no value supplied via the config": { + EnvVariables: map[string]string{ + "GOOGLE_ZONE": "zone-from-env", + "GCLOUD_ZONE": "", // GCLOUD_ZONE unset + "CLOUDSDK_COMPUTE_ZONE": "", // CLOUDSDK_COMPUTE_ZONE unset + }, + ExpectedValue: "zone-from-env", + }, + "when multiple zone environment variables are provided, `GOOGLE_ZONE` is used first": { + EnvVariables: map[string]string{ + "GOOGLE_ZONE": "zone-from-GOOGLE_ZONE", + "GCLOUD_ZONE": "zone-from-GCLOUD_ZONE", + "CLOUDSDK_COMPUTE_ZONE": "zone-from-CLOUDSDK_COMPUTE_ZONE", + }, + ExpectedValue: "zone-from-GOOGLE_ZONE", + }, + "when multiple zone environment variables are provided, `GCLOUD_ZONE` is used second": { + EnvVariables: map[string]string{ + "GOOGLE_ZONE": "", // GOOGLE_ZONE unset + "GCLOUD_ZONE": "zone-from-GCLOUD_ZONE", + "CLOUDSDK_COMPUTE_ZONE": "zone-from-CLOUDSDK_COMPUTE_ZONE", + }, + ExpectedValue: "zone-from-GCLOUD_ZONE", + }, + "when multiple zone environment variables are provided, `CLOUDSDK_COMPUTE_ZONE` is the last-used ENV": { + EnvVariables: map[string]string{ + "GOOGLE_ZONE": "", // GOOGLE_ZONE unset + "GCLOUD_ZONE": "", // GCLOUD_ZONE unset + "CLOUDSDK_COMPUTE_ZONE": "zone-from-CLOUDSDK_COMPUTE_ZONE", + }, + ExpectedValue: "zone-from-CLOUDSDK_COMPUTE_ZONE", + }, + "when no values are provided via config or environment variables, the field remains unset without error": { + EnvVariables: map[string]string{ + "GOOGLE_ZONE": "", // GOOGLE_ZONE unset + "GCLOUD_ZONE": "", // GCLOUD_ZONE unset + "CLOUDSDK_COMPUTE_ZONE": "", // CLOUDSDK_COMPUTE_ZONE unset + }, + ValueNotProvided: true, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + // Create empty schema.ResourceData using the SDK Provider schema + emptyConfigMap := map[string]interface{}{} + d := schema.TestResourceDataRaw(t, provider.Provider().Schema, emptyConfigMap) + + // Set config value(s) + if tc.ConfigValue != "" { + d.Set("zone", tc.ConfigValue) + } + + // Set ENVs + if len(tc.EnvVariables) > 0 { + for k, v := range tc.EnvVariables { + t.Setenv(k, v) + } + } + + // Act + err := transport_tpg.HandleSDKDefaults(d) + + // Assert + if err != nil { + if !tc.ExpectError { + t.Fatalf("error: %v", err) + } + return + } + + // Assert + v, ok := d.GetOk("zone") + if !ok && !tc.ValueNotProvided { + t.Fatal("expected zone to be set in the provider data") + } + if ok && tc.ValueNotProvided { + t.Fatal("expected zone to not be set in the provider data") + } + + if v != tc.ExpectedValue { + t.Fatalf("unexpected value: wanted %v, got, %v", tc.ExpectedValue, v) + } + }) + } +} + +// The `user_project_override` field is an odd one out, as other provider schema fields tend to be strings +// and `user_project_override` is a boolean +func TestHandleSDKDefaults_UserProjectOverride(t *testing.T) { + cases := map[string]struct { + SetViaConfig bool // Awkward, but necessary as zero value of ConfigValue could be intended + ConfigValue bool + ValueNotProvided bool + EnvVariables map[string]string + ExpectedValue bool + ExpectError bool + }{ + "user_project_override value set in the provider schema is not overridden by ENVs": { + SetViaConfig: true, + ConfigValue: false, + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "true", + }, + ExpectedValue: false, + }, + "user_project_override can be set by environment variable: true": { + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "true", + }, + ExpectedValue: true, + }, + "user_project_override can be set by environment variable: false": { + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "false", + }, + ExpectedValue: false, + }, + "user_project_override can be set by environment variable: 1": { + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "1", + }, + ExpectedValue: true, + }, + "user_project_override can be set by environment variable: 0": { + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "0", + }, + ExpectedValue: false, + }, + "error returned due to non-boolean environment variables": { + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "I'm not a boolean", + }, + ExpectError: true, + }, + "when no values are provided via config or environment variables, the field remains unset without error": { + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "", // USER_PROJECT_OVERRIDE unset + }, + ValueNotProvided: true, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + // Arrange + // Create empty schema.ResourceData using the SDK Provider schema + emptyConfigMap := map[string]interface{}{} + d := schema.TestResourceDataRaw(t, provider.Provider().Schema, emptyConfigMap) + + // Set config value(s) + if tc.SetViaConfig { + d.Set("user_project_override", tc.ConfigValue) + } + + // Set ENVs + if len(tc.EnvVariables) > 0 { + for k, v := range tc.EnvVariables { + t.Setenv(k, v) + } + } + + // Act + err := transport_tpg.HandleSDKDefaults(d) + + // Assert + if err != nil { + if !tc.ExpectError { + t.Fatalf("error: %v", err) + } + return + } + + v, ok := d.GetOkExists("user_project_override") + if !ok && !tc.ValueNotProvided { + t.Fatal("expected user_project_override to be set in the provider data") + } + if ok && tc.ValueNotProvided { + t.Fatal("expected user_project_override to not be set in the provider data") } if v != tc.ExpectedValue { diff --git a/website/docs/d/compute_region_instance_template.html.markdown b/website/docs/d/compute_region_instance_template.html.markdown index 27c7af98f3..1f2f628e23 100644 --- a/website/docs/d/compute_region_instance_template.html.markdown +++ b/website/docs/d/compute_region_instance_template.html.markdown @@ -11,9 +11,6 @@ Get information about a VM instance template resource within GCE. For more infor and [API](https://cloud.google.com/compute/docs/reference/rest/v1/regionInstanceTemplates). -~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. -See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. - ## Example Usage ```hcl diff --git a/website/docs/d/gke_hub_scope_iam_policy.html.markdown b/website/docs/d/gke_hub_scope_iam_policy.html.markdown new file mode 100644 index 0000000000..5f92475790 --- /dev/null +++ b/website/docs/d/gke_hub_scope_iam_policy.html.markdown @@ -0,0 +1,50 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "GKEHub" +description: |- + A datasource to retrieve the IAM policy state for GKEHub Scope +--- + + +# `google_gke_hub_scope_iam_policy` +Retrieves the current IAM policy data for scope + + + +## example + +```hcl +data "google_gke_hub_scope_iam_policy" "policy" { + project = google_gke_hub_scope.scope.project + scope_id = google_gke_hub_scope.scope.scope_id +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + +## Attributes Reference + +The attributes are exported: + +* `etag` - (Computed) The etag of the IAM policy. + +* `policy_data` - (Required only by `google_gke_hub_scope_iam_policy`) The policy data generated by + a `google_iam_policy` data source. diff --git a/website/docs/guides/version_5_upgrade.html.markdown b/website/docs/guides/version_5_upgrade.html.markdown index 7ed72ff693..f2b1c9e8f9 100644 --- a/website/docs/guides/version_5_upgrade.html.markdown +++ b/website/docs/guides/version_5_upgrade.html.markdown @@ -325,3 +325,48 @@ This change involved the following resources: `google_game_services_game_server_ Previously, `database_flags` was a list, making it order-dependent. It is now a set. If you were relying on accessing an individual flag by index (for example, `google_sql_database_instance.instance.settings.0.database_flags.0.name`), then that will now need to by hash (for example, `google_sql_database_instance.instance.settings.0.database_flags..name`). + +## Resource: `google_compute_security_policy` + +### `rule.rate_limit_options.encorce_on_key` no longer has default value + +Previously, the default value for `rule.rate_limit_options.encorce_on_key` is "ALL", now this field no longer has a default value. + +## Resource: `google_logging_project_sink` + +### `unique_writer_identity` now defaults to `TRUE` + +Previously, the default value of `unique_writer_identity` was `FALSE`. Now it will be `TRUE`. + +This will change the behavior for new sinks created using the default value. Previously, all sinks created using the default value had a `writer_identity` of `serviceAccount:cloud-logs@system.gserviceaccount.com`. Now sinks created using the default value will have a `writer_identity` that differs depending on the parent resource, for example: `serviceAccount:service-@gcp-sa-logging.iam.gserviceaccount.com` for a project-level sink. + +IAM permissions that were manually configured for `cloud-logs@system.gserviceaccount.com` and `iam_bindings` that are hard-coded to use `cloud-logs@system.gserviceaccount.com` will not properly apply permissions to the `writer_identity` of new sinks created using the default value. **If a sink is missing the proper permissions it will be successfully created but it will fail to export log data.** + +Currently there are only two types of log sinks that populate `writer_identity` and can be created with `unique_writer_identity = false`. Only these types of sinks may be affected: +* Sinks with a Cloud Pub/Sub topic `destination` for which the topic is in the same project as the sink. +* Sinks for a BigQuery dataset `destination` for which the dataset is in the same project as the sink. + +To ensure that proper permissions are in place for new sinks created using the default value, check that the related `iam_bindings` are configured and reference the sink's `writer_identity` property. + +Here is an example of proper `iam_bindings`: + +```hcl +resource "google_logging_project_sink" "gcs-bucket-sink" { + name = "my-gcs-bucket-sink" + description = "Routes all admin activity logs to a GCS bucket" + destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" + filter = "log_id(\"cloudaudit.googleapis.com/activity\")" + # `unique_writer_identity is explicitly set to true here, but will now default to 'true'. + unique_writer_identity = true +} + +# We must grant proper permissions for the log sink to access the GCS bucket. +resource "google_project_iam_binding" "gcs-bucket-writer" { + project = "your-project-id" + role = "roles/storage.objectCreator" + + members = [ + google_logging_project_sink.gcs-bucket-sink.writer_identity, + ] +} +``` diff --git a/website/docs/r/biglake_database.html.markdown b/website/docs/r/biglake_database.html.markdown new file mode 100644 index 0000000000..fb4f575e01 --- /dev/null +++ b/website/docs/r/biglake_database.html.markdown @@ -0,0 +1,157 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Biglake" +description: |- + Databases are containers of tables. +--- + +# google\_biglake\_database + +Databases are containers of tables. + + +To get more information about Database, see: + +* [API documentation](https://cloud.google.com/bigquery/docs/reference/biglake/rest/v1/projects.locations.catalogs.databases) +* How-to Guides + * [Manage open source metadata with BigLake Metastore](https://cloud.google.com/bigquery/docs/manage-open-source-metadata#create_databases) + + +## Example Usage - Biglake Database + + +```hcl +resource "google_biglake_catalog" "catalog" { + name = "my_catalog" + location = "US" +} + +resource "google_storage_bucket" "bucket" { + name = "my_bucket" + location = "US" + force_destroy = true + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "metadata_folder" { + name = "metadata/" + content = " " + bucket = google_storage_bucket.bucket.name +} + +resource "google_biglake_database" "database" { + name = "my_database" + catalog = google_biglake_catalog.catalog.id + type = "HIVE" + hive_options { + location_uri = "gs://${google_storage_bucket.bucket.name}/${google_storage_bucket_object.metadata_folder.name}" + parameters = { + "owner": "John Doe" + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `type` - + (Required) + The database type. + +* `hive_options` - + (Required) + Options of a Hive database. + Structure is [documented below](#nested_hive_options). + +* `catalog` - + (Required) + The parent catalog. + +* `name` - + (Required) + The name of the database. + + +The `hive_options` block supports: + +* `location_uri` - + (Optional) + Cloud Storage folder URI where the database data is stored, starting with "gs://". + +* `parameters` - + (Optional) + Stores user supplied Hive database parameters. An object containing a + list of"key": value pairs. + Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + +- - - + + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `{{catalog}}/databases/{{name}}` + +* `create_time` - + Output only. The creation time of the database. A timestamp in RFC3339 + UTC "Zulu" format, with nanosecond resolution and up to nine fractional + digits. Examples: "2014-10-02T15:01:23Z" and + "2014-10-02T15:01:23.045123456Z". + +* `update_time` - + Output only. The last modification time of the database. A timestamp in + RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine + fractional digits. Examples: "2014-10-02T15:01:23Z" and + "2014-10-02T15:01:23.045123456Z". + +* `delete_time` - + Output only. The deletion time of the database. Only set after the + database is deleted. A timestamp in RFC3339 UTC "Zulu" format, with + nanosecond resolution and up to nine fractional digits. Examples: + "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + +* `expire_time` - + Output only. The time when this database is considered expired. Only set + after the database is deleted. A timestamp in RFC3339 UTC "Zulu" format, + with nanosecond resolution and up to nine fractional digits. Examples: + "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +Database can be imported using any of these accepted formats: + +``` +$ terraform import google_biglake_database.default {{catalog}}/databases/{{name}} +``` diff --git a/website/docs/r/compute_network_attachment.html.markdown b/website/docs/r/compute_network_attachment.html.markdown new file mode 100644 index 0000000000..38e6e7e811 --- /dev/null +++ b/website/docs/r/compute_network_attachment.html.markdown @@ -0,0 +1,209 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Compute Engine" +description: |- + A network attachment is a resource that lets a producer Virtual Private Cloud (VPC) network initiate connections to a consumer VPC network through a Private Service Connect interface. +--- + +# google\_compute\_network\_attachment + +A network attachment is a resource that lets a producer Virtual Private Cloud (VPC) network initiate connections to a consumer VPC network through a Private Service Connect interface. + +~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. +See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. + +To get more information about NetworkAttachment, see: + +* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/networkAttachments) +* How-to Guides + * [Official Documentation](https://cloud.google.com/vpc/docs/about-network-attachments) + +## Example Usage - Network Attachment Basic + + +```hcl +resource "google_compute_network_attachment" "default" { + provider = google-beta + name = "basic-network-attachment" + region = "us-central1" + description = "basic network attachment description" + connection_preference = "ACCEPT_MANUAL" + + subnetworks = [ + google_compute_subnetwork.default.self_link + ] + + producer_accept_lists = [ + google_project.accepted_producer_project.project_id + ] + + producer_reject_lists = [ + google_project.rejected_producer_project.project_id + ] +} + +resource "google_compute_network" "default" { + provider = google-beta + name = "basic-network" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "basic-subnetwork" + region = "us-central1" + + network = google_compute_network.default.id + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_project" "rejected_producer_project" { + provider = google-beta + project_id = "prj-rejected%{random_suffix}" + name = "prj-rejected%{random_suffix}" + org_id = "123456789" + billing_account = "000000-0000000-0000000-000000" +} + +resource "google_project" "accepted_producer_project" { + provider = google-beta + project_id = "prj-accepted%{random_suffix}" + name = "prj-accepted%{random_suffix}" + org_id = "123456789" + billing_account = "000000-0000000-0000000-000000" +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `connection_preference` - + (Required) + The connection preference of service attachment. The value can be set to ACCEPT_AUTOMATIC. An ACCEPT_AUTOMATIC service attachment is one that always accepts the connection from consumer forwarding rules. + Possible values are: `ACCEPT_AUTOMATIC`, `ACCEPT_MANUAL`, `INVALID`. + +* `subnetworks` - + (Required) + An array of URLs where each entry is the URL of a subnet provided by the service consumer to use for endpoints in the producers that connect to this network attachment. + +* `name` - + (Required) + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + +* `region` - + (Required) + URL of the region where the network attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + + +- - - + + +* `description` - + (Optional) + An optional description of this resource. Provide this property when you create the resource. + +* `producer_reject_lists` - + (Optional) + Projects that are not allowed to connect to this network attachment. The project can be specified using its id or number. + +* `producer_accept_lists` - + (Optional) + Projects that are allowed to connect to this network attachment. The project can be specified using its id or number. + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/regions/{{region}}/networkAttachments/{{name}}` + +* `kind` - + Type of the resource. + +* `id` - + The unique identifier for the resource type. The server generates this identifier. + +* `creation_timestamp` - + Creation timestamp in RFC3339 text format. + +* `self_link` - + Server-defined URL for the resource. + +* `self_link_with_id` - + Server-defined URL for this resource's resource id. + +* `connection_endpoints` - + An array of connections for all the producers connected to this network attachment. + Structure is [documented below](#nested_connection_endpoints). + +* `fingerprint` - + Fingerprint of this resource. A hash of the contents stored in this object. This + field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch. + +* `network` - + The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. + Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks. + + +The `connection_endpoints` block contains: + +* `status` - + (Output) + The status of a connected endpoint to this network attachment. + +* `project_id_or_num` - + (Output) + The project id or number of the interface to which the IP was assigned. + +* `subnetwork` - + (Output) + The subnetwork used to assign the IP to the producer instance network interface. + +* `ip_address` - + (Output) + The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless. + +* `secondary_ip_cidr_ranges` - + (Output) + Alias IP ranges from the same subnetwork. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +NetworkAttachment can be imported using any of these accepted formats: + +``` +$ terraform import google_compute_network_attachment.default projects/{{project}}/regions/{{region}}/networkAttachments/{{name}} +$ terraform import google_compute_network_attachment.default {{project}}/{{region}}/{{name}} +$ terraform import google_compute_network_attachment.default {{region}}/{{name}} +$ terraform import google_compute_network_attachment.default {{name}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/website/docs/r/compute_region_instance_template.html.markdown b/website/docs/r/compute_region_instance_template.html.markdown index bb971e854f..4e1c3a2983 100644 --- a/website/docs/r/compute_region_instance_template.html.markdown +++ b/website/docs/r/compute_region_instance_template.html.markdown @@ -11,9 +11,6 @@ Manages a VM instance template resource within GCE. For more information see and [API](https://cloud.google.com/compute/docs/reference/rest/v1/regionInstanceTemplates). -~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. -See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. - ## Example Usage ```hcl diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown index fed3c3e58d..daec72bb76 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -594,7 +594,7 @@ This block also contains several computed attributes, documented below. The `monitoring_config` block supports: -* `enable_components` - (Optional) The GKE components exposing metrics. Supported values include: `SYSTEM_COMPONENTS`, `APISERVER`, `CONTROLLER_MANAGER`, and `SCHEDULER`. In beta provider, `WORKLOADS` is supported on top of those 4 values. (`WORKLOADS` is deprecated and removed in GKE 1.24.) +* `enable_components` - (Optional) The GKE components exposing metrics. Supported values include: `SYSTEM_COMPONENTS`, `APISERVER`, `SCHEDULER`, `CONTROLLER_MANAGER`, `STORAGE`, `HPA`, `POD`, `DAEMONSET`, `DEPLOYMENT` and `STATEFULSET`. In beta provider, `WORKLOADS` is supported on top of those 10 values. (`WORKLOADS` is deprecated and removed in GKE 1.24.) * `managed_prometheus` - (Optional) Configuration for Managed Service for Prometheus. Structure is [documented below](#nested_managed_prometheus). @@ -606,7 +606,7 @@ This block also contains several computed attributes, documented below. The `advanced_datapath_observability_config` block supports: -* `enabled_metrics` - (Required) Whether or not the advanced datapath metrics are enabled. +* `enable_metrics` - (Required) Whether or not to enable advanced datapath metrics. * `relay_mode` - (Optional) Mode used to make Relay available. The `maintenance_policy` block supports: @@ -1067,7 +1067,8 @@ notification_config { The `confidential_nodes` block supports: -* `enabled` (Required) - Enable Confidential Nodes for this cluster. +* `enabled` (Required) - Enable Confidential GKE Nodes for this cluster, to + enforce encryption of data in-use. The `pod_security_policy_config` block supports: diff --git a/website/docs/r/container_node_pool.html.markdown b/website/docs/r/container_node_pool.html.markdown index 4a8f19b26c..3f08b2c1dd 100644 --- a/website/docs/r/container_node_pool.html.markdown +++ b/website/docs/r/container_node_pool.html.markdown @@ -111,6 +111,8 @@ resource "google_container_cluster" "primary" { * `autoscaling` - (Optional) Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is [documented below](#nested_autoscaling). +* `confidential_nodes` - (Optional) Configuration for Confidential Nodes feature. Structure is [documented below](#nested_confidential_nodes). + * `initial_node_count` - (Optional) The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource. WARNING: Resizing your node pool manually @@ -192,6 +194,11 @@ cluster. * "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduce preemption risk for Spot VMs. + The `confidential_nodes` block supports: + +* `enabled` (Required) - Enable Confidential GKE Nodes for this cluster, to + enforce encryption of data in-use. + The `management` block supports: * `auto_repair` - (Optional) Whether the nodes will be automatically repaired. diff --git a/website/docs/r/dataflow_flex_template_job.html.markdown b/website/docs/r/dataflow_flex_template_job.html.markdown index 5e81a137b3..4064c91346 100644 --- a/website/docs/r/dataflow_flex_template_job.html.markdown +++ b/website/docs/r/dataflow_flex_template_job.html.markdown @@ -11,6 +11,9 @@ job on Dataflow, which is an implementation of Apache Beam running on Google Compute Engine. For more information see the official documentation for [Beam](https://beam.apache.org) and [Dataflow](https://cloud.google.com/dataflow/). +~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. +See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. + ## Example Usage ```hcl diff --git a/website/docs/r/dataform_repository_release_config.html.markdown b/website/docs/r/dataform_repository_release_config.html.markdown index 1c9ea7b194..28d773bffa 100644 --- a/website/docs/r/dataform_repository_release_config.html.markdown +++ b/website/docs/r/dataform_repository_release_config.html.markdown @@ -46,7 +46,7 @@ resource "google_sourcerepo_repository" "git_repository" { resource "google_secret_manager_secret" "secret" { provider = google-beta - secret_id = "secret" + secret_id = "my_secret" replication { automatic = true diff --git a/website/docs/r/dataform_repository_workflow_config.html.markdown b/website/docs/r/dataform_repository_workflow_config.html.markdown index b442f0029c..013a74af0e 100644 --- a/website/docs/r/dataform_repository_workflow_config.html.markdown +++ b/website/docs/r/dataform_repository_workflow_config.html.markdown @@ -46,7 +46,7 @@ resource "google_sourcerepo_repository" "git_repository" { resource "google_secret_manager_secret" "secret" { provider = google-beta - secret_id = "secret" + secret_id = "my_secret" replication { automatic = true diff --git a/website/docs/r/dataplex_datascan.html.markdown b/website/docs/r/dataplex_datascan.html.markdown index 75c98e42e0..a415eaab1e 100644 --- a/website/docs/r/dataplex_datascan.html.markdown +++ b/website/docs/r/dataplex_datascan.html.markdown @@ -34,7 +34,7 @@ To get more information about Datascan, see: ```hcl resource "google_dataplex_datascan" "basic_profile" { location = "us-central1" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "dataprofile-basic" data { resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare" @@ -58,7 +58,7 @@ data_profile_spec {} resource "google_dataplex_datascan" "full_profile" { location = "us-central1" display_name = "Full Datascan Profile" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "dataprofile-full" description = "Example resource - Full Datascan Profile" labels = { author = "billing" @@ -85,9 +85,26 @@ resource "google_dataplex_datascan" "full_profile" { exclude_fields { field_names = ["property_type"] } + post_scan_actions { + bigquery_export { + results_table = "//bigquery.googleapis.com/projects/my-project-name/datasets/dataplex_dataset/tables/profile_export" + } + } } project = "my-project-name" + + depends_on = [ + google_bigquery_dataset.source + ] +} + +resource "google_bigquery_dataset" "source" { + dataset_id = "dataplex_dataset" + friendly_name = "test" + description = "This is a test description" + location = "US" + delete_contents_on_destroy = true } ``` ## Example Usage - Dataplex Datascan Basic Quality @@ -96,7 +113,7 @@ resource "google_dataplex_datascan" "full_profile" { ```hcl resource "google_dataplex_datascan" "basic_quality" { location = "us-central1" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "dataquality-basic" data { resource = "//bigquery.googleapis.com/projects/bigquery-public-data/datasets/samples/tables/shakespeare" @@ -129,7 +146,7 @@ resource "google_dataplex_datascan" "basic_quality" { resource "google_dataplex_datascan" "full_quality" { location = "us-central1" display_name = "Full Datascan Quality" - data_scan_id = "tf-test-datascan%{random_suffix}" + data_scan_id = "dataquality-full" description = "Example resource - Full Datascan Quality" labels = { author = "billing" diff --git a/website/docs/r/dialogflow_cx_flow.html.markdown b/website/docs/r/dialogflow_cx_flow.html.markdown index 8080ad8742..8fbe23eec9 100644 --- a/website/docs/r/dialogflow_cx_flow.html.markdown +++ b/website/docs/r/dialogflow_cx_flow.html.markdown @@ -38,18 +38,18 @@ To get more information about Flow, see: ```hcl resource "google_dialogflow_cx_agent" "agent" { - display_name = "dialogflowcx-agent" - location = "global" - default_language_code = "en" - supported_language_codes = ["fr","de","es"] - time_zone = "America/New_York" - description = "Example description." - avatar_uri = "https://cloud.google.com/_static/images/cloud/icons/favicons/onecloud/super_cloud.png" + display_name = "dialogflowcx-agent" + location = "global" + default_language_code = "en" + supported_language_codes = ["fr", "de", "es"] + time_zone = "America/New_York" + description = "Example description." + avatar_uri = "https://cloud.google.com/_static/images/cloud/icons/favicons/onecloud/super_cloud.png" enable_stackdriver_logging = true enable_spell_correction = true - speech_to_text_settings { - enable_speech_adaptation = true - } + speech_to_text_settings { + enable_speech_adaptation = true + } } @@ -59,45 +59,248 @@ resource "google_dialogflow_cx_flow" "basic_flow" { description = "Test Flow" nlu_settings { - classification_threshold = 0.3 - model_type = "MODEL_TYPE_STANDARD" - } + classification_threshold = 0.3 + model_type = "MODEL_TYPE_STANDARD" + } event_handlers { - event = "custom-event" - trigger_fulfillment { - return_partial_responses = false - messages { - text { - text = ["I didn't get that. Can you say it again?"] - } - } - } - } - - event_handlers { - event = "sys.no-match-default" - trigger_fulfillment { - return_partial_responses = false - messages { - text { - text = ["Sorry, could you say that again?"] - } - } - } - } - - event_handlers { - event = "sys.no-input-default" - trigger_fulfillment { - return_partial_responses = false - messages { - text { - text = ["One more time?"] - } - } - } - } + event = "custom-event" + trigger_fulfillment { + return_partial_responses = false + messages { + text { + text = ["I didn't get that. Can you say it again?"] + } + } + } + } + + event_handlers { + event = "sys.no-match-default" + trigger_fulfillment { + return_partial_responses = false + messages { + text { + text = ["Sorry, could you say that again?"] + } + } + } + } + + event_handlers { + event = "sys.no-input-default" + trigger_fulfillment { + return_partial_responses = false + messages { + text { + text = ["One more time?"] + } + } + } + } + + event_handlers { + event = "another-event" + trigger_fulfillment { + return_partial_responses = true + messages { + channel = "some-channel" + text { + text = ["Some text"] + } + } + messages { + payload = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <The `messages` block supports: +* `channel` - + (Optional) + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + * `text` - (Optional) The text response message. Structure is [documented below](#nested_text). +* `payload` - + (Optional) + A custom, platform-specific payload. + +* `conversation_success` - + (Optional) + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + Structure is [documented below](#nested_conversation_success). + +* `output_audio_text` - + (Optional) + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + Structure is [documented below](#nested_output_audio_text). + +* `live_agent_handoff` - + (Optional) + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + Structure is [documented below](#nested_live_agent_handoff). + +* `play_audio` - + (Optional) + Specifies an audio clip to be played by the client as part of the response. + Structure is [documented below](#nested_play_audio). + +* `telephony_transfer_call` - + (Optional) + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + Structure is [documented below](#nested_telephony_transfer_call). + The `text` block supports: @@ -234,6 +488,65 @@ The following arguments are supported: (Output) Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. +The `conversation_success` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `output_audio_text` block supports: + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +* `text` - + (Optional) + The raw text to be synthesized. + +* `ssml` - + (Optional) + The SSML text to be synthesized. For more information, see SSML. + +The `live_agent_handoff` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `play_audio` block supports: + +* `audio_uri` - + (Required) + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +The `telephony_transfer_call` block supports: + +* `phone_number` - + (Required) + Transfer the call to a phone number in E.164 format. + +The `set_parameter_actions` block supports: + +* `parameter` - + (Optional) + Display name of the parameter. + +* `value` - + (Optional) + The new JSON-encoded value of the parameter. A null value clears the parameter. + +The `conditional_cases` block supports: + +* `cases` - + (Optional) + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + The `event_handlers` block supports: * `name` - @@ -279,14 +592,65 @@ The following arguments are supported: (Optional) The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. +* `set_parameter_actions` - + (Optional) + Set parameter values before executing the webhook. + Structure is [documented below](#nested_set_parameter_actions). + +* `conditional_cases` - + (Optional) + Conditional cases for this fulfillment. + Structure is [documented below](#nested_conditional_cases). + The `messages` block supports: +* `channel` - + (Optional) + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + * `text` - (Optional) The text response message. Structure is [documented below](#nested_text). +* `payload` - + (Optional) + A custom, platform-specific payload. + +* `conversation_success` - + (Optional) + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + Structure is [documented below](#nested_conversation_success). + +* `output_audio_text` - + (Optional) + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + Structure is [documented below](#nested_output_audio_text). + +* `live_agent_handoff` - + (Optional) + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + Structure is [documented below](#nested_live_agent_handoff). + +* `play_audio` - + (Optional) + Specifies an audio clip to be played by the client as part of the response. + Structure is [documented below](#nested_play_audio). + +* `telephony_transfer_call` - + (Optional) + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + Structure is [documented below](#nested_telephony_transfer_call). + The `text` block supports: @@ -298,6 +662,65 @@ The following arguments are supported: (Output) Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. +The `conversation_success` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `output_audio_text` block supports: + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +* `text` - + (Optional) + The raw text to be synthesized. + +* `ssml` - + (Optional) + The SSML text to be synthesized. For more information, see SSML. + +The `live_agent_handoff` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `play_audio` block supports: + +* `audio_uri` - + (Required) + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +The `telephony_transfer_call` block supports: + +* `phone_number` - + (Required) + Transfer the call to a phone number in E.164 format. + +The `set_parameter_actions` block supports: + +* `parameter` - + (Optional) + Display name of the parameter. + +* `value` - + (Optional) + The new JSON-encoded value of the parameter. A null value clears the parameter. + +The `conditional_cases` block supports: + +* `cases` - + (Optional) + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + The `nlu_settings` block supports: * `model_type` - diff --git a/website/docs/r/dialogflow_cx_page.html.markdown b/website/docs/r/dialogflow_cx_page.html.markdown index da58e3f939..5d10328ab2 100644 --- a/website/docs/r/dialogflow_cx_page.html.markdown +++ b/website/docs/r/dialogflow_cx_page.html.markdown @@ -38,18 +38,18 @@ To get more information about Page, see: ```hcl resource "google_dialogflow_cx_agent" "agent" { - display_name = "dialogflowcx-agent" - location = "global" - default_language_code = "en" - supported_language_codes = ["fr","de","es"] - time_zone = "America/New_York" - description = "Example description." - avatar_uri = "https://cloud.google.com/_static/images/cloud/icons/favicons/onecloud/super_cloud.png" + display_name = "dialogflowcx-agent" + location = "global" + default_language_code = "en" + supported_language_codes = ["fr", "de", "es"] + time_zone = "America/New_York" + description = "Example description." + avatar_uri = "https://cloud.google.com/_static/images/cloud/icons/favicons/onecloud/super_cloud.png" enable_stackdriver_logging = true enable_spell_correction = true - speech_to_text_settings { - enable_speech_adaptation = true - } + speech_to_text_settings { + enable_speech_adaptation = true + } } @@ -58,47 +58,535 @@ resource "google_dialogflow_cx_page" "basic_page" { display_name = "MyPage" entry_fulfillment { - messages { - text { - text = ["Welcome to page"] - } - } - } - - form { - parameters { - display_name = "param1" - entity_type = "projects/-/locations/-/agents/-/entityTypes/sys.date" - fill_behavior { - initial_prompt_fulfillment { - messages { - text { - text = ["Please provide param1"] - } - } - } - } - required = "true" - redact = "true" - } - } - - transition_routes { - condition = "$page.params.status = 'FINAL'" - trigger_fulfillment { - messages { - text { - text = ["information completed, navigating to page 2"] - } - } - } - target_page = google_dialogflow_cx_page.my_page2.id - } -} + messages { + channel = "some-channel" + text { + text = ["Welcome to page"] + } + } + messages { + payload = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <Some example SSML XML + EOF + } + } + messages { + live_agent_handoff { + metadata = <The `messages` block supports: +* `channel` - + (Optional) + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + * `text` - (Optional) The text response message. Structure is [documented below](#nested_text). +* `payload` - + (Optional) + A custom, platform-specific payload. + +* `conversation_success` - + (Optional) + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + Structure is [documented below](#nested_conversation_success). + +* `output_audio_text` - + (Optional) + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + Structure is [documented below](#nested_output_audio_text). + +* `live_agent_handoff` - + (Optional) + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + Structure is [documented below](#nested_live_agent_handoff). + +* `play_audio` - + (Optional) + Specifies an audio clip to be played by the client as part of the response. + Structure is [documented below](#nested_play_audio). + +* `telephony_transfer_call` - + (Optional) + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + Structure is [documented below](#nested_telephony_transfer_call). + The `text` block supports: @@ -208,6 +747,65 @@ The following arguments are supported: (Output) Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. +The `conversation_success` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `output_audio_text` block supports: + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +* `text` - + (Optional) + The raw text to be synthesized. + +* `ssml` - + (Optional) + The SSML text to be synthesized. For more information, see SSML. + +The `live_agent_handoff` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `play_audio` block supports: + +* `audio_uri` - + (Required) + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +The `telephony_transfer_call` block supports: + +* `phone_number` - + (Required) + Transfer the call to a phone number in E.164 format. + +The `set_parameter_actions` block supports: + +* `parameter` - + (Optional) + Display name of the parameter. + +* `value` - + (Optional) + The new JSON-encoded value of the parameter. A null value clears the parameter. + +The `conditional_cases` block supports: + +* `cases` - + (Optional) + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + The `form` block supports: * `parameters` - @@ -241,6 +839,10 @@ The following arguments are supported: Defines fill behavior for the parameter. Structure is [documented below](#nested_fill_behavior). +* `default_value` - + (Optional) + The default value of an optional parameter. If the parameter is required, the default value will be ignored. + * `redact` - (Optional) Indicates whether the parameter content should be redacted in log. @@ -254,6 +856,21 @@ The following arguments are supported: The fulfillment to provide the initial prompt that the agent can present to the user in order to fill the parameter. Structure is [documented below](#nested_initial_prompt_fulfillment). +* `reprompt_event_handlers` - + (Optional) + The handlers for parameter-level events, used to provide reprompt for the parameter or transition to a different page/flow. The supported events are: + * sys.no-match-, where N can be from 1 to 6 + * sys.no-match-default + * sys.no-input-, where N can be from 1 to 6 + * sys.no-input-default + * sys.invalid-parameter + [initialPromptFulfillment][initialPromptFulfillment] provides the first prompt for the parameter. + If the user's response does not fill the parameter, a no-match/no-input event will be triggered, and the fulfillment associated with the sys.no-match-1/sys.no-input-1 handler (if defined) will be called to provide a prompt. The sys.no-match-2/sys.no-input-2 handler (if defined) will respond to the next no-match/no-input event, and so on. + A sys.no-match-default or sys.no-input-default handler will be used to handle all following no-match/no-input events after all numbered no-match/no-input handlers for the parameter are consumed. + A sys.invalid-parameter handler can be defined to handle the case where the parameter values have been invalidated by webhook. For example, if the user's response fill the parameter, however the parameter was invalidated by webhook, the fulfillment associated with the sys.invalid-parameter handler (if defined) will be called to provide a prompt. + If the event handler for the corresponding event can't be found on the parameter, initialPromptFulfillment will be re-prompted. + Structure is [documented below](#nested_reprompt_event_handlers). + The `initial_prompt_fulfillment` block supports: @@ -274,14 +891,239 @@ The following arguments are supported: (Optional) The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. +* `set_parameter_actions` - + (Optional) + Set parameter values before executing the webhook. + Structure is [documented below](#nested_set_parameter_actions). + +* `conditional_cases` - + (Optional) + Conditional cases for this fulfillment. + Structure is [documented below](#nested_conditional_cases). + + +The `messages` block supports: + +* `channel` - + (Optional) + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + +* `text` - + (Optional) + The text response message. + Structure is [documented below](#nested_text). + +* `payload` - + (Optional) + A custom, platform-specific payload. + +* `conversation_success` - + (Optional) + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + Structure is [documented below](#nested_conversation_success). + +* `output_audio_text` - + (Optional) + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + Structure is [documented below](#nested_output_audio_text). + +* `live_agent_handoff` - + (Optional) + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + Structure is [documented below](#nested_live_agent_handoff). + +* `play_audio` - + (Optional) + Specifies an audio clip to be played by the client as part of the response. + Structure is [documented below](#nested_play_audio). + +* `telephony_transfer_call` - + (Optional) + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + Structure is [documented below](#nested_telephony_transfer_call). + + +The `text` block supports: + +* `text` - + (Optional) + A collection of text responses. + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +The `conversation_success` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `output_audio_text` block supports: + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +* `text` - + (Optional) + The raw text to be synthesized. + +* `ssml` - + (Optional) + The SSML text to be synthesized. For more information, see SSML. + +The `live_agent_handoff` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `play_audio` block supports: + +* `audio_uri` - + (Required) + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +The `telephony_transfer_call` block supports: + +* `phone_number` - + (Required) + Transfer the call to a phone number in E.164 format. + +The `set_parameter_actions` block supports: + +* `parameter` - + (Optional) + Display name of the parameter. + +* `value` - + (Optional) + The new JSON-encoded value of the parameter. A null value clears the parameter. + +The `conditional_cases` block supports: + +* `cases` - + (Optional) + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + +The `reprompt_event_handlers` block supports: + +* `name` - + (Output) + The unique identifier of this event handler. + +* `event` - + (Optional) + The name of the event to handle. + +* `trigger_fulfillment` - + (Optional) + The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks. + Structure is [documented below](#nested_trigger_fulfillment). + +* `target_page` - + (Optional) + The target page to transition to. + Format: projects//locations//agents//flows//pages/. + +* `target_flow` - + (Optional) + The target flow to transition to. + Format: projects//locations//agents//flows/. + + +The `trigger_fulfillment` block supports: + +* `messages` - + (Optional) + The list of rich message responses to present to the user. + Structure is [documented below](#nested_messages). + +* `webhook` - + (Optional) + The webhook to call. Format: projects//locations//agents//webhooks/. + +* `return_partial_responses` - + (Optional) + Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. + +* `tag` - + (Optional) + The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. + +* `set_parameter_actions` - + (Optional) + Set parameter values before executing the webhook. + Structure is [documented below](#nested_set_parameter_actions). + +* `conditional_cases` - + (Optional) + Conditional cases for this fulfillment. + Structure is [documented below](#nested_conditional_cases). + The `messages` block supports: +* `channel` - + (Optional) + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + * `text` - (Optional) The text response message. Structure is [documented below](#nested_text). +* `payload` - + (Optional) + A custom, platform-specific payload. + +* `conversation_success` - + (Optional) + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + Structure is [documented below](#nested_conversation_success). + +* `output_audio_text` - + (Optional) + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + Structure is [documented below](#nested_output_audio_text). + +* `live_agent_handoff` - + (Optional) + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + Structure is [documented below](#nested_live_agent_handoff). + +* `play_audio` - + (Optional) + Specifies an audio clip to be played by the client as part of the response. + Structure is [documented below](#nested_play_audio). + +* `telephony_transfer_call` - + (Optional) + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + Structure is [documented below](#nested_telephony_transfer_call). + The `text` block supports: @@ -293,6 +1135,65 @@ The following arguments are supported: (Output) Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. +The `conversation_success` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `output_audio_text` block supports: + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +* `text` - + (Optional) + The raw text to be synthesized. + +* `ssml` - + (Optional) + The SSML text to be synthesized. For more information, see SSML. + +The `live_agent_handoff` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `play_audio` block supports: + +* `audio_uri` - + (Required) + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +The `telephony_transfer_call` block supports: + +* `phone_number` - + (Required) + Transfer the call to a phone number in E.164 format. + +The `set_parameter_actions` block supports: + +* `parameter` - + (Optional) + Display name of the parameter. + +* `value` - + (Optional) + The new JSON-encoded value of the parameter. A null value clears the parameter. + +The `conditional_cases` block supports: + +* `cases` - + (Optional) + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + The `transition_routes` block supports: * `name` - @@ -311,7 +1212,7 @@ The following arguments are supported: * `trigger_fulfillment` - (Optional) - The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks. + The fulfillment to call when the condition is satisfied. At least one of triggerFulfillment and target must be specified. When both are defined, triggerFulfillment is executed first. Structure is [documented below](#nested_trigger_fulfillment). * `target_page` - @@ -344,14 +1245,65 @@ The following arguments are supported: (Optional) The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. +* `set_parameter_actions` - + (Optional) + Set parameter values before executing the webhook. + Structure is [documented below](#nested_set_parameter_actions). + +* `conditional_cases` - + (Optional) + Conditional cases for this fulfillment. + Structure is [documented below](#nested_conditional_cases). + The `messages` block supports: +* `channel` - + (Optional) + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + * `text` - (Optional) The text response message. Structure is [documented below](#nested_text). +* `payload` - + (Optional) + A custom, platform-specific payload. + +* `conversation_success` - + (Optional) + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + Structure is [documented below](#nested_conversation_success). + +* `output_audio_text` - + (Optional) + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + Structure is [documented below](#nested_output_audio_text). + +* `live_agent_handoff` - + (Optional) + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + Structure is [documented below](#nested_live_agent_handoff). + +* `play_audio` - + (Optional) + Specifies an audio clip to be played by the client as part of the response. + Structure is [documented below](#nested_play_audio). + +* `telephony_transfer_call` - + (Optional) + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + Structure is [documented below](#nested_telephony_transfer_call). + The `text` block supports: @@ -363,6 +1315,65 @@ The following arguments are supported: (Output) Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. +The `conversation_success` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `output_audio_text` block supports: + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +* `text` - + (Optional) + The raw text to be synthesized. + +* `ssml` - + (Optional) + The SSML text to be synthesized. For more information, see SSML. + +The `live_agent_handoff` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `play_audio` block supports: + +* `audio_uri` - + (Required) + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +The `telephony_transfer_call` block supports: + +* `phone_number` - + (Required) + Transfer the call to a phone number in E.164 format. + +The `set_parameter_actions` block supports: + +* `parameter` - + (Optional) + Display name of the parameter. + +* `value` - + (Optional) + The new JSON-encoded value of the parameter. A null value clears the parameter. + +The `conditional_cases` block supports: + +* `cases` - + (Optional) + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + The `event_handlers` block supports: * `name` - @@ -408,14 +1419,65 @@ The following arguments are supported: (Optional) The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. +* `set_parameter_actions` - + (Optional) + Set parameter values before executing the webhook. + Structure is [documented below](#nested_set_parameter_actions). + +* `conditional_cases` - + (Optional) + Conditional cases for this fulfillment. + Structure is [documented below](#nested_conditional_cases). + The `messages` block supports: +* `channel` - + (Optional) + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + * `text` - (Optional) The text response message. Structure is [documented below](#nested_text). +* `payload` - + (Optional) + A custom, platform-specific payload. + +* `conversation_success` - + (Optional) + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + Structure is [documented below](#nested_conversation_success). + +* `output_audio_text` - + (Optional) + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + Structure is [documented below](#nested_output_audio_text). + +* `live_agent_handoff` - + (Optional) + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + Structure is [documented below](#nested_live_agent_handoff). + +* `play_audio` - + (Optional) + Specifies an audio clip to be played by the client as part of the response. + Structure is [documented below](#nested_play_audio). + +* `telephony_transfer_call` - + (Optional) + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + Structure is [documented below](#nested_telephony_transfer_call). + The `text` block supports: @@ -427,6 +1489,65 @@ The following arguments are supported: (Output) Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. +The `conversation_success` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `output_audio_text` block supports: + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +* `text` - + (Optional) + The raw text to be synthesized. + +* `ssml` - + (Optional) + The SSML text to be synthesized. For more information, see SSML. + +The `live_agent_handoff` block supports: + +* `metadata` - + (Optional) + Custom metadata. Dialogflow doesn't impose any structure on this. + +The `play_audio` block supports: + +* `audio_uri` - + (Required) + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + +* `allow_playback_interruption` - + (Output) + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + +The `telephony_transfer_call` block supports: + +* `phone_number` - + (Required) + Transfer the call to a phone number in E.164 format. + +The `set_parameter_actions` block supports: + +* `parameter` - + (Optional) + Display name of the parameter. + +* `value` - + (Optional) + The new JSON-encoded value of the parameter. A null value clears the parameter. + +The `conditional_cases` block supports: + +* `cases` - + (Optional) + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: diff --git a/website/docs/r/gke_hub_membership_binding.html.markdown b/website/docs/r/gke_hub_membership_binding.html.markdown new file mode 100644 index 0000000000..3075b17b4d --- /dev/null +++ b/website/docs/r/gke_hub_membership_binding.html.markdown @@ -0,0 +1,152 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "GKEHub" +description: |- + MembershipBinding is a subresource of a Membership, representing what Fleet Scopes (or other, future Fleet resources) a Membership is bound to. +--- + +# google\_gke\_hub\_membership\_binding + +MembershipBinding is a subresource of a Membership, representing what Fleet Scopes (or other, future Fleet resources) a Membership is bound to. + + +To get more information about MembershipBinding, see: + +* [API documentation](https://cloud.google.com/anthos/fleet-management/docs/reference/rest/v1/projects.locations.memberships.bindings) +* How-to Guides + * [Registering a Cluster](https://cloud.google.com/anthos/multicluster-management/connect/registering-a-cluster#register_cluster) + +## Example Usage - Gkehub Membership Binding Basic + + +```hcl +resource "google_container_cluster" "primary" { + name = "basiccluster" + location = "us-central1-a" + initial_node_count = 1 +} + +resource "google_gke_hub_membership" "example" { + membership_id = "tf-test-membership%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } + + depends_on = [google_container_cluster.primary] +} + +resource "google_gke_hub_scope" "example" { + scope_id = "tf-test-scope%{random_suffix}" +} + +resource "google_gke_hub_membership_binding" "example" { + membership_binding_id = "tf-test-membership-binding%{random_suffix}" + scope = google_gke_hub_scope.example.name + membership_id = "tf-test-membership%{random_suffix}" + location = "global" + depends_on = [ + google_gke_hub_membership.example, + google_gke_hub_scope.example + ] +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `membership_binding_id` - + (Required) + The client-provided identifier of the membership binding. + +* `scope` - + (Required) + A Workspace resource name in the format + `projects/*/locations/*/scopes/*`. + +* `membership_id` - + (Required) + Id of the membership + +* `location` - + (Required) + Location of the membership + + +- - - + + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/memberships/{{membership_id}}/bindings/{{membership_binding_id}}` + +* `name` - + The resource name for the membershipbinding itself + +* `uid` - + Google-generated UUID for this resource. + +* `create_time` - + Time the MembershipBinding was created in UTC. + +* `update_time` - + Time the MembershipBinding was updated in UTC. + +* `delete_time` - + Time the MembershipBinding was deleted in UTC. + +* `state` - + State of the membership binding resource. + Structure is [documented below](#nested_state). + + +The `state` block contains: + +* `code` - + (Output) + Code describes the state of a MembershipBinding resource. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +MembershipBinding can be imported using any of these accepted formats: + +``` +$ terraform import google_gke_hub_membership_binding.default projects/{{project}}/locations/{{location}}/memberships/{{membership_id}}/bindings/{{membership_binding_id}} +$ terraform import google_gke_hub_membership_binding.default {{project}}/{{location}}/{{membership_id}}/{{membership_binding_id}} +$ terraform import google_gke_hub_membership_binding.default {{location}}/{{membership_id}}/{{membership_binding_id}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/website/docs/r/gke_hub_namespace.html.markdown b/website/docs/r/gke_hub_namespace.html.markdown new file mode 100644 index 0000000000..7e66de44f8 --- /dev/null +++ b/website/docs/r/gke_hub_namespace.html.markdown @@ -0,0 +1,126 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "GKEHub" +description: |- + Namespace represents a namespace across the Fleet. +--- + +# google\_gke\_hub\_namespace + +Namespace represents a namespace across the Fleet. + + +To get more information about Namespace, see: + +* [API documentation](https://cloud.google.com/anthos/fleet-management/docs/reference/rest/v1/projects.locations.scopes.namespaces) +* How-to Guides + * [Registering a Cluster](https://cloud.google.com/anthos/multicluster-management/connect/registering-a-cluster#register_cluster) + +## Example Usage - Gkehub Namespace Basic + + +```hcl +resource "google_gke_hub_scope" "namespace" { + scope_id = "tf-test-scope%{random_suffix}" +} + + +resource "google_gke_hub_namespace" "namespace" { + scope_namespace_id = "tf-test-namespace%{random_suffix}" + scope_id = "tf-test-scope%{random_suffix}" + scope = "${google_gke_hub_scope.namespace.name}" + depends_on = [google_gke_hub_scope.namespace] +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `scope_namespace_id` - + (Required) + The client-provided identifier of the namespace. + +* `scope` - + (Required) + The name of the Scope instance. + +* `scope_id` - + (Required) + Id of the scope + + +- - - + + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/global/scopes/{{scope_id}}/namespaces/{{scope_namespace_id}}` + +* `name` - + The resource name for the namespace + +* `uid` - + Google-generated UUID for this resource. + +* `create_time` - + Time the Namespace was created in UTC. + +* `update_time` - + Time the Namespace was updated in UTC. + +* `delete_time` - + Time the Namespace was deleted in UTC. + +* `state` - + State of the namespace resource. + Structure is [documented below](#nested_state). + + +The `state` block contains: + +* `code` - + (Output) + Code describes the state of a Namespace resource. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +Namespace can be imported using any of these accepted formats: + +``` +$ terraform import google_gke_hub_namespace.default projects/{{project}}/locations/global/scopes/{{scope_id}}/namespaces/{{scope_namespace_id}} +$ terraform import google_gke_hub_namespace.default {{project}}/{{scope_id}}/{{scope_namespace_id}} +$ terraform import google_gke_hub_namespace.default {{scope_id}}/{{scope_namespace_id}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/website/docs/r/gke_hub_scope.html.markdown b/website/docs/r/gke_hub_scope.html.markdown new file mode 100644 index 0000000000..9770bb42b2 --- /dev/null +++ b/website/docs/r/gke_hub_scope.html.markdown @@ -0,0 +1,110 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "GKEHub" +description: |- + Scope represents a Scope in a Fleet. +--- + +# google\_gke\_hub\_scope + +Scope represents a Scope in a Fleet. + + +To get more information about Scope, see: + +* [API documentation](https://cloud.google.com/anthos/fleet-management/docs/reference/rest/v1/projects.locations.scopes) +* How-to Guides + * [Registering a Cluster](https://cloud.google.com/anthos/multicluster-management/connect/registering-a-cluster#register_cluster) + +## Example Usage - Gkehub Scope Basic + + +```hcl +resource "google_gke_hub_scope" "scope" { + scope_id = "tf-test-scope%{random_suffix}" +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `scope_id` - + (Required) + The client-provided identifier of the scope. + + +- - - + + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/global/scopes/{{scope_id}}` + +* `name` - + The unique identifier of the scope + +* `uid` - + Google-generated UUID for this resource. + +* `create_time` - + Time the Scope was created in UTC. + +* `update_time` - + Time the Scope was updated in UTC. + +* `delete_time` - + Time the Scope was deleted in UTC. + +* `state` - + State of the scope resource. + Structure is [documented below](#nested_state). + + +The `state` block contains: + +* `code` - + (Output) + Code describes the state of a Scope resource. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +Scope can be imported using any of these accepted formats: + +``` +$ terraform import google_gke_hub_scope.default projects/{{project}}/locations/global/scopes/{{scope_id}} +$ terraform import google_gke_hub_scope.default {{project}}/{{scope_id}} +$ terraform import google_gke_hub_scope.default {{scope_id}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/website/docs/r/gke_hub_scope_iam.html.markdown b/website/docs/r/gke_hub_scope_iam.html.markdown new file mode 100644 index 0000000000..57a3a35564 --- /dev/null +++ b/website/docs/r/gke_hub_scope_iam.html.markdown @@ -0,0 +1,148 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "GKEHub" +description: |- + Collection of resources to manage IAM policy for GKEHub Scope +--- + +# IAM policy for GKEHub Scope +Three different resources help you manage your IAM policy for GKEHub Scope. Each of these resources serves a different use case: + +* `google_gke_hub_scope_iam_policy`: Authoritative. Sets the IAM policy for the scope and replaces any existing policy already attached. +* `google_gke_hub_scope_iam_binding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the scope are preserved. +* `google_gke_hub_scope_iam_member`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the scope are preserved. + +A data source can be used to retrieve policy data in advent you do not need creation + +* `google_gke_hub_scope_iam_policy`: Retrieves the IAM policy for the scope + +~> **Note:** `google_gke_hub_scope_iam_policy` **cannot** be used in conjunction with `google_gke_hub_scope_iam_binding` and `google_gke_hub_scope_iam_member` or they will fight over what your policy should be. + +~> **Note:** `google_gke_hub_scope_iam_binding` resources **can be** used in conjunction with `google_gke_hub_scope_iam_member` resources **only if** they do not grant privilege to the same role. + + + + +## google\_gke\_hub\_scope\_iam\_policy + +```hcl +data "google_iam_policy" "admin" { + binding { + role = "roles/viewer" + members = [ + "user:jane@example.com", + ] + } +} + +resource "google_gke_hub_scope_iam_policy" "policy" { + project = google_gke_hub_scope.scope.project + scope_id = google_gke_hub_scope.scope.scope_id + policy_data = data.google_iam_policy.admin.policy_data +} +``` + +## google\_gke\_hub\_scope\_iam\_binding + +```hcl +resource "google_gke_hub_scope_iam_binding" "binding" { + project = google_gke_hub_scope.scope.project + scope_id = google_gke_hub_scope.scope.scope_id + role = "roles/viewer" + members = [ + "user:jane@example.com", + ] +} +``` + +## google\_gke\_hub\_scope\_iam\_member + +```hcl +resource "google_gke_hub_scope_iam_member" "member" { + project = google_gke_hub_scope.scope.project + scope_id = google_gke_hub_scope.scope.scope_id + role = "roles/viewer" + member = "user:jane@example.com" +} +``` + + +## Argument Reference + +The following arguments are supported: + + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + +* `member/members` - (Required) Identities that will be granted the privilege in `role`. + Each entry can have one of the following values: + * **allUsers**: A special identifier that represents anyone who is on the internet; with or without a Google account. + * **allAuthenticatedUsers**: A special identifier that represents anyone who is authenticated with a Google account or a service account. + * **user:{emailid}**: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com. + * **serviceAccount:{emailid}**: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com. + * **group:{emailid}**: An email address that represents a Google group. For example, admins@example.com. + * **domain:{domain}**: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com. + * **projectOwner:projectid**: Owners of the given project. For example, "projectOwner:my-example-project" + * **projectEditor:projectid**: Editors of the given project. For example, "projectEditor:my-example-project" + * **projectViewer:projectid**: Viewers of the given project. For example, "projectViewer:my-example-project" + +* `role` - (Required) The role that should be applied. Only one + `google_gke_hub_scope_iam_binding` can be used per role. Note that custom roles must be of the format + `[projects|organizations]/{parent-name}/roles/{role-name}`. + +* `policy_data` - (Required only by `google_gke_hub_scope_iam_policy`) The policy data generated by + a `google_iam_policy` data source. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are +exported: + +* `etag` - (Computed) The etag of the IAM policy. + +## Import + +For all import syntaxes, the "resource in question" can take any of the following forms: + +* projects/{{project}}/locations/global/scopes/{{scope_id}} +* {{project}}/{{scope_id}} +* {{scope_id}} + +Any variables not passed in the import command will be taken from the provider configuration. + +GKEHub scope IAM resources can be imported using the resource identifiers, role, and member. + +IAM member imports use space-delimited identifiers: the resource in question, the role, and the member identity, e.g. +``` +$ terraform import google_gke_hub_scope_iam_member.editor "projects/{{project}}/locations/global/scopes/{{scope_id}} roles/viewer user:jane@example.com" +``` + +IAM binding imports use space-delimited identifiers: the resource in question and the role, e.g. +``` +$ terraform import google_gke_hub_scope_iam_binding.editor "projects/{{project}}/locations/global/scopes/{{scope_id}} roles/viewer" +``` + +IAM policy imports use the identifier of the resource in question, e.g. +``` +$ terraform import google_gke_hub_scope_iam_policy.editor projects/{{project}}/locations/global/scopes/{{scope_id}} +``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/website/docs/r/gke_hub_scope_rbac_role_binding.html.markdown b/website/docs/r/gke_hub_scope_rbac_role_binding.html.markdown new file mode 100644 index 0000000000..819d7c8181 --- /dev/null +++ b/website/docs/r/gke_hub_scope_rbac_role_binding.html.markdown @@ -0,0 +1,157 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "GKEHub" +description: |- + RBACRoleBinding represents a rbacrolebinding across the Fleet. +--- + +# google\_gke\_hub\_scope\_rbac\_role\_binding + +RBACRoleBinding represents a rbacrolebinding across the Fleet. + + +To get more information about ScopeRBACRoleBinding, see: + +* [API documentation](https://cloud.google.com/anthos/fleet-management/docs/reference/rest/v1/projects.locations.scopes.rbacrolebindings) +* How-to Guides + * [Registering a Cluster](https://cloud.google.com/anthos/multicluster-management/connect/registering-a-cluster#register_cluster) + +## Example Usage - Gkehub Scope Rbac Role Binding Basic + + +```hcl +resource "google_gke_hub_scope" "scoperbacrolebinding" { + scope_id = "tf-test-scope%{random_suffix}" +} + +resource "google_gke_hub_scope_rbac_role_binding" "scoperbacrolebinding" { + scope_rbac_role_binding_id = "tf-test-scope-rbac-role-binding%{random_suffix}" + scope_id = "tf-test-scope%{random_suffix}" + user = "test-email@gmail.com" + role { + predefined_role = "ADMIN" + } + labels = { + key = "value" + } + depends_on = [google_gke_hub_scope.scoperbacrolebinding] +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `scope_rbac_role_binding_id` - + (Required) + The client-provided identifier of the RBAC Role Binding. + +* `role` - + (Required) + Role to bind to the principal. + Structure is [documented below](#nested_role). + +* `scope_id` - + (Required) + Id of the scope + + +The `role` block supports: + +* `predefined_role` - + (Optional) + PredefinedRole is an ENUM representation of the default Kubernetes Roles + Possible values are: `UNKNOWN`, `ADMIN`, `EDIT`, `VIEW`. + +- - - + + +* `user` - + (Optional) + Principal that is be authorized in the cluster (at least of one the oneof + is required). Updating one will unset the other automatically. + user is the name of the user as seen by the kubernetes cluster, example + "alice" or "alice@domain.tld" + +* `group` - + (Optional) + Principal that is be authorized in the cluster (at least of one the oneof + is required). Updating one will unset the other automatically. + group is the group, as seen by the kubernetes cluster. + +* `labels` - + (Optional) + Labels for this ScopeRBACRoleBinding. + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/global/scopes/{{scope_id}}/rbacrolebindings/{{scope_rbac_role_binding_id}}` + +* `name` - + The resource name for the RBAC Role Binding + +* `uid` - + Google-generated UUID for this resource. + +* `create_time` - + Time the RBAC Role Binding was created in UTC. + +* `update_time` - + Time the RBAC Role Binding was updated in UTC. + +* `delete_time` - + Time the RBAC Role Binding was deleted in UTC. + +* `state` - + State of the RBAC Role Binding resource. + Structure is [documented below](#nested_state). + + +The `state` block contains: + +* `code` - + (Output) + Code describes the state of a RBAC Role Binding resource. + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +ScopeRBACRoleBinding can be imported using any of these accepted formats: + +``` +$ terraform import google_gke_hub_scope_rbac_role_binding.default projects/{{project}}/locations/global/scopes/{{scope_id}}/rbacrolebindings/{{scope_rbac_role_binding_id}} +$ terraform import google_gke_hub_scope_rbac_role_binding.default {{project}}/{{scope_id}}/{{scope_rbac_role_binding_id}} +$ terraform import google_gke_hub_scope_rbac_role_binding.default {{scope_id}}/{{scope_rbac_role_binding_id}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/website/docs/r/logging_project_sink.html.markdown b/website/docs/r/logging_project_sink.html.markdown index 2d167b71b7..66253de863 100644 --- a/website/docs/r/logging_project_sink.html.markdown +++ b/website/docs/r/logging_project_sink.html.markdown @@ -62,8 +62,8 @@ resource "google_compute_instance" "my-logged-instance" { } } -# A bucket to store logs in -resource "google_storage_bucket" "log-bucket" { +# A gcs bucket to store logs in +resource "google_storage_bucket" "gcs-bucket" { name = "my-unique-logging-bucket" location = "US" } @@ -72,14 +72,14 @@ resource "google_storage_bucket" "log-bucket" { resource "google_logging_project_sink" "instance-sink" { name = "my-instance-sink" description = "some explanation on what this is" - destination = "storage.googleapis.com/${google_storage_bucket.log-bucket.name}" + destination = "storage.googleapis.com/${google_storage_bucket.gcs-bucket.name}" filter = "resource.type = gce_instance AND resource.labels.instance_id = \"${google_compute_instance.my-logged-instance.instance_id}\"" unique_writer_identity = true } # Because our sink uses a unique_writer, we must grant that writer access to the bucket. -resource "google_project_iam_binding" "log-writer" { +resource "google_project_iam_binding" "gcs-bucket-writer" { project = "your-project-id" role = "roles/storage.objectCreator" diff --git a/website/docs/r/network_connectivity_service_connection_policy.html.markdown b/website/docs/r/network_connectivity_service_connection_policy.html.markdown index d8c7824646..524544703e 100644 --- a/website/docs/r/network_connectivity_service_connection_policy.html.markdown +++ b/website/docs/r/network_connectivity_service_connection_policy.html.markdown @@ -192,6 +192,10 @@ In addition to the arguments listed above, the following computed attributes are (Optional) The status code, which should be an enum value of [google.rpc.Code][]. +* `details` - + (Output) + A list of messages that carry the error details. + The `error_info` block supports: * `reason` -