Skip to content

Commit

Permalink
Storage config (#9151)
Browse files Browse the repository at this point in the history
Co-authored-by: Riley Karson <[email protected]>
  • Loading branch information
spapi17 and rileykarson authored Nov 6, 2023
1 parent 18461c3 commit ca66d80
Show file tree
Hide file tree
Showing 3 changed files with 149 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ var (
"config.0.workloads_config",
"config.0.environment_size",
"config.0.master_authorized_networks_config",
"config.0.resilience_mode",
"config.0.resilience_mode",
}

recoveryConfigKeys = []string{
Expand Down Expand Up @@ -878,7 +878,7 @@ func ResourceComposerEnvironment() *schema.Resource {
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Description: `User-defined labels for this environment. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: Label keys must be between 1 and 63 characters long and must conform to the following regular expression: [a-z]([-a-z0-9]*[a-z0-9])?. Label values must be between 0 and 63 characters long and must conform to the regular expression ([a-z]([-a-z0-9]*[a-z0-9])?)?. No more than 64 labels can be associated with a given environment. Both keys and values must be <= 128 bytes in size.

**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.
Please refer to the field 'effective_labels' for all of the labels present on the resource.`,
},
Expand All @@ -896,6 +896,24 @@ func ResourceComposerEnvironment() *schema.Resource {
Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`,
Elem: &schema.Schema{Type: schema.TypeString},
},

"storage_config": {
Type: schema.TypeList,
Optional: true,
Computed: true,
MaxItems: 1,
Description: `Configuration options for storage used by Composer environment.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"bucket": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: `Optional. Name of an existing Cloud Storage bucket to be used by the environment.`,
},
},
},
},
},
UseJSONNumber: true,
}
Expand All @@ -918,10 +936,16 @@ func resourceComposerEnvironmentCreate(d *schema.ResourceData, meta interface{})
return err
}

transformedStorageConfig, err := expandComposerStorageConfig(d.Get("storage_config"), d, config)
if err != nil {
return err
}

env := &composer.Environment{
Name: envName.ResourceName(),
Labels: tpgresource.ExpandEffectiveLabels(d),
Config: transformedConfig,
StorageConfig: transformedStorageConfig,
}

// Some fields cannot be specified during create and must be updated post-creation.
Expand Down Expand Up @@ -1007,6 +1031,9 @@ func resourceComposerEnvironmentRead(d *schema.ResourceData, meta interface{}) e
if err := d.Set("effective_labels", res.Labels); err != nil {
return fmt.Errorf("Error setting Environment effective_labels: %s", err)
}
if err := d.Set("storage_config", flattenComposerStorageConfig(res.StorageConfig)); err != nil {
return fmt.Errorf("Error setting Storage: %s", err)
}
return nil
}

Expand Down Expand Up @@ -1350,6 +1377,17 @@ func resourceComposerEnvironmentImport(d *schema.ResourceData, meta interface{})
return []*schema.ResourceData{d}, nil
}

func flattenComposerStorageConfig(storageConfig *composer.StorageConfig) interface{} {
if storageConfig == nil {
return nil
}

transformed := make(map[string]interface{})
transformed["bucket"] = storageConfig.Bucket

return []interface{}{transformed}
}

func flattenComposerEnvironmentConfig(envCfg *composer.EnvironmentConfig) interface{} {
if envCfg == nil {
return nil
Expand Down Expand Up @@ -1734,9 +1772,9 @@ func expandComposerEnvironmentConfig(v interface{}, d *schema.ResourceData, conf
if transformedResilienceMode == "STANDARD_RESILIENCE" {
transformed.ResilienceMode = "RESILIENCE_MODE_UNSPECIFIED"
} else {
transformed.ResilienceMode = transformedResilienceMode
transformed.ResilienceMode = transformedResilienceMode
}


transformedMasterAuthorizedNetworksConfig, err := expandComposerEnvironmentConfigMasterAuthorizedNetworksConfig(original["master_authorized_networks_config"], d, config)
if err != nil {
Expand Down Expand Up @@ -2143,7 +2181,22 @@ func expandComposerEnvironmentIPAllocationPolicy(v interface{}, d *schema.Resour
transformed.ServicesSecondaryRangeName = v.(string)
}
return transformed, nil
}

func expandComposerStorageConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.StorageConfig, error) {
l := v.([]interface{})
if len(l) == 0 || l[0] == nil {
return nil, nil
}
raw := l[0]
original := raw.(map[string]interface{})
transformed := &composer.StorageConfig{}

if v, ok := original["bucket"]; ok {
transformed.Bucket = v.(string)
}

return transformed, nil
}

func expandComposerEnvironmentServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (

const testComposerEnvironmentPrefix = "tf-test-composer-env"
const testComposerNetworkPrefix = "tf-test-composer-net"
const testComposerBucketPrefix = "tf-test-composer-bucket"

func allComposerServiceAgents() []string {
return []string{
Expand Down Expand Up @@ -1081,6 +1082,84 @@ func testAccComposerEnvironmentDestroyProducer(t *testing.T) func(s *terraform.S
}
}

// Checks environment creation with custom bucket
func TestAccComposerEnvironment_customBucket(t *testing.T) {
t.Parallel()

bucketName := fmt.Sprintf("%s-%d", testComposerBucketPrefix, acctest.RandInt(t))
envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t))
network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t))
subnetwork := network + "-1"
acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccComposerEnvironmentDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccComposerEnvironment_customBucket(bucketName, envName, network, subnetwork),
},
{
ResourceName: "google_composer_environment.test",
ImportState: true,
ImportStateVerify: true,
},
// This is a terrible clean-up step in order to get destroy to succeed,
// due to dangling firewall rules left by the Composer Environment blocking network deletion.
// TODO: Remove this check if firewall rules bug gets fixed by Composer.
{
PlanOnly: true,
ExpectNonEmptyPlan: false,
Config: testAccComposerEnvironment_customBucket(bucketName, envName, network, subnetwork),
Check: testAccCheckClearComposerEnvironmentFirewalls(t, network),
},
},
})
}

func testAccComposerEnvironment_customBucket(bucketName, envName, network, subnetwork string) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "test" {
name = "%s"
location = "us-central1"
force_destroy = true
}

resource "google_composer_environment" "test" {
name = "%s"
region = "us-central1"
config {
node_config {
network = google_compute_network.test.self_link
subnetwork = google_compute_subnetwork.test.self_link
ip_allocation_policy {
cluster_ipv4_cidr_block = "10.0.0.0/16"
}
}
software_config {
image_version = "composer-2.4.2-airflow-2"
}
}
storage_config {
bucket = google_storage_bucket.test.name
}
}

// use a separate network to avoid conflicts with other tests running in parallel
// that use the default network/subnet
resource "google_compute_network" "test" {
name = "%s"
auto_create_subnetworks = false
}

resource "google_compute_subnetwork" "test" {
name = "%s"
ip_cidr_range = "10.2.0.0/16"
region = "us-central1"
network = google_compute_network.test.self_link
}
`, bucketName, envName, network, subnetwork)
}

func testAccComposerEnvironment_basic(name, network, subnetwork string) string {
return fmt.Sprintf(`
resource "google_composer_environment" "test" {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -622,7 +622,7 @@ The `web_server_network_access_control` supports:

* `cidr_block` -
(Required)
`cidr_block< must be specified in CIDR notation.
`cidr_block` must be specified in CIDR notation.

## Argument Reference - Cloud Composer 2

Expand Down Expand Up @@ -656,6 +656,11 @@ The following arguments are supported:
(Optional) The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.

* `storage_config` -
(Optional)
Configuration options for storage used by Composer environment. Structure is documented below.


The `config` block supports:

* `node_config` -
Expand Down Expand Up @@ -708,6 +713,13 @@ The `config` block supports:
Google Compute Engine Public IPs and Google Prod IPs. Structure is
documented below.

The `storage_config` block supports:

* `bucket` -
(Required)
Name of an existing Cloud Storage bucket to be used by the environment.


The `node_config` block supports:

* `network` -
Expand Down

0 comments on commit ca66d80

Please sign in to comment.