Skip to content

Commit

Permalink
Backport Add cross-region replication support to AlloyDB
Browse files Browse the repository at this point in the history
Backport hashicorp/terraform-provider-google-beta#6474

To Create a Secondary Cluster, `cluster_type` and `secondary_config` are the additional fields to be mentioned in the cluster. The secondary cluster is dependent on the primary instance and the same needs to be added in the config using the `depends_on` field. The field continuous_backup_config.enabled needs to be set false as continuous backup is not supported for secondary clusters. The deletion_policy is set to FORCE as the secondary instance that will be created in the secondary cluster can not be deleted independently, but instead the entire secondary cluster needs to be deleted forcefully along with its secondary instance.

```
  cluster_type = "SECONDARY"
  deletion_policy = "FORCE"
  secondary_config {
    primary_cluster_name = <fully qualified primary cluster name>
    // Eg: primary_cluster_name = google_alloydb_cluster.<primary_cluster_name>.name
  }

  continuous_backup_config {
    enabled = false
  }

  depends_on = [google_alloydb_instance.<primary_instance_name>]
```

Part of [hashicorp/terraform-provider-google#13251](hashicorp/terraform-provider-google#13251)

**Release Note Template for Downstream PRs (will be copied)**

```
alloydb: added `cluster_type` and `secondary_config` fields to support secondary clusters in `google_alloydb_cluster` resource.
```

Derived from [GoogleCloudPlatform/magic-modules#9012](GoogleCloudPlatform/magic-modules#9012)
  • Loading branch information
cheftako committed Mar 25, 2024
1 parent bb8a631 commit 130101c
Show file tree
Hide file tree
Showing 4 changed files with 983 additions and 6 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ func ResourceAlloydbCluster() *schema.Resource {
},

Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(10 * time.Minute),
Update: schema.DefaultTimeout(10 * time.Minute),
Delete: schema.DefaultTimeout(10 * time.Minute),
Create: schema.DefaultTimeout(30 * time.Minute),
Update: schema.DefaultTimeout(30 * time.Minute),
Delete: schema.DefaultTimeout(30 * time.Minute),
},

Schema: map[string]*schema.Schema{
Expand Down Expand Up @@ -198,6 +198,14 @@ A duration in seconds with up to nine fractional digits, terminated by 's'. Exam
},
},
},
"cluster_type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: verify.ValidateEnum([]string{"PRIMARY", "SECONDARY", ""}),
Description: `The type of cluster. If not set, defaults to PRIMARY. Default value: "PRIMARY" Possible values: ["PRIMARY", "SECONDARY"]`,
Default: "PRIMARY",
},
"continuous_backup_config": {
Type: schema.TypeList,
Computed: true,
Expand Down Expand Up @@ -366,6 +374,23 @@ It is specified in the form: "projects/{projectNumber}/global/networks/{network_
},
ConflictsWith: []string{"restore_backup_source"},
},
"secondary_config": {
Type: schema.TypeList,
Optional: true,
Description: `Configuration of the secondary cluster for Cross Region Replication. This should be set if and only if the cluster is of type SECONDARY.`,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"primary_cluster_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: `Name of the primary cluster must be in the format
'projects/{project}/locations/{location}/clusters/{cluster_id}'`,
},
},
},
},
"backup_source": {
Type: schema.TypeList,
Computed: true,
Expand Down Expand Up @@ -569,6 +594,18 @@ func resourceAlloydbClusterCreate(d *schema.ResourceData, meta interface{}) erro
} else if v, ok := d.GetOkExists("automated_backup_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(automatedBackupPolicyProp)) && (ok || !reflect.DeepEqual(v, automatedBackupPolicyProp)) {
obj["automatedBackupPolicy"] = automatedBackupPolicyProp
}
clusterTypeProp, err := expandAlloydbClusterClusterType(d.Get("cluster_type"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("cluster_type"); !tpgresource.IsEmptyValue(reflect.ValueOf(clusterTypeProp)) && (ok || !reflect.DeepEqual(v, clusterTypeProp)) {
obj["clusterType"] = clusterTypeProp
}
secondaryConfigProp, err := expandAlloydbClusterSecondaryConfig(d.Get("secondary_config"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("secondary_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(secondaryConfigProp)) && (ok || !reflect.DeepEqual(v, secondaryConfigProp)) {
obj["secondaryConfig"] = secondaryConfigProp
}

url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters?clusterId={{cluster_id}}")
if err != nil {
Expand Down Expand Up @@ -622,6 +659,38 @@ func resourceAlloydbClusterCreate(d *schema.ResourceData, meta interface{}) erro
restoreClusterRequestBody["cluster"] = cluster
obj = restoreClusterRequestBody
}

// Read the secondary cluster config to call the api for creating secondary cluster

var secondaryConfig interface{}
var clusterType interface{}

if val, ok := obj["secondaryConfig"]; ok {
secondaryConfig = val
}

if val, ok := obj["clusterType"]; ok {
clusterType = val
}

if clusterType == "SECONDARY" {
if secondaryConfig != nil {
// Use createsecondary API if this is a secondary cluster
url = strings.Replace(url, "clusters?clusterId", "clusters:createsecondary?cluster_id", 1)

// Validation error if secondary_config is not defined
} else {
return fmt.Errorf("Error creating cluster. Can not create secondary cluster without secondary_config field.")
}
}

// Validation error if secondary_config is defined but, cluster type is not secondary
if secondaryConfig != nil {
if clusterType != "SECONDARY" {
return fmt.Errorf("Error creating cluster. Add {cluster_type: \"SECONDARY\"} if attempting to create a secondary cluster, otherwise remove the secondary_config.")
}
}

res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{
Config: config,
Method: "POST",
Expand Down Expand Up @@ -746,6 +815,12 @@ func resourceAlloydbClusterRead(d *schema.ResourceData, meta interface{}) error
if err := d.Set("migration_source", flattenAlloydbClusterMigrationSource(res["migrationSource"], d, config)); err != nil {
return fmt.Errorf("Error reading Cluster: %s", err)
}
if err := d.Set("cluster_type", flattenAlloydbClusterClusterType(res["clusterType"], d, config)); err != nil {
return fmt.Errorf("Error reading Cluster: %s", err)
}
if err := d.Set("secondary_config", flattenAlloydbClusterSecondaryConfig(res["secondaryConfig"], d, config)); err != nil {
return fmt.Errorf("Error reading Cluster: %s", err)
}

return nil
}
Expand Down Expand Up @@ -814,6 +889,12 @@ func resourceAlloydbClusterUpdate(d *schema.ResourceData, meta interface{}) erro
} else if v, ok := d.GetOkExists("automated_backup_policy"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, automatedBackupPolicyProp)) {
obj["automatedBackupPolicy"] = automatedBackupPolicyProp
}
secondaryConfigProp, err := expandAlloydbClusterSecondaryConfig(d.Get("secondary_config"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("secondary_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, secondaryConfigProp)) {
obj["secondaryConfig"] = secondaryConfigProp
}

url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}")
if err != nil {
Expand Down Expand Up @@ -854,6 +935,10 @@ func resourceAlloydbClusterUpdate(d *schema.ResourceData, meta interface{}) erro
if d.HasChange("automated_backup_policy") {
updateMask = append(updateMask, "automatedBackupPolicy")
}
if d.HasChange("secondary_config") {
updateMask = append(updateMask, "secondaryConfig")
}

// updateMask is a URL parameter but not present in the schema, so ReplaceVars
// won't set it
url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")})
Expand Down Expand Up @@ -1425,6 +1510,28 @@ func flattenAlloydbClusterMigrationSourceSourceType(v interface{}, d *schema.Res
return v
}

func flattenAlloydbClusterClusterType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}

func flattenAlloydbClusterSecondaryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
if v == nil {
return nil
}
original := v.(map[string]interface{})
if len(original) == 0 {
return nil
}
transformed := make(map[string]interface{})
transformed["primary_cluster_name"] =
flattenAlloydbClusterSecondaryConfigPrimaryClusterName(original["primaryClusterName"], d, config)
return []interface{}{transformed}
}

func flattenAlloydbClusterSecondaryConfigPrimaryClusterName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} {
return v
}

func expandAlloydbClusterLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]string, error) {
if v == nil {
return map[string]string{}, nil
Expand Down Expand Up @@ -1904,3 +2011,4 @@ func expandAlloydbClusterAutomatedBackupPolicyQuantityBasedRetentionCount(v inte
func expandAlloydbClusterAutomatedBackupPolicyEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) {
return v, nil
}

Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,79 @@ resource "google_compute_network" "default" {
`, context)
}

func TestAccAlloydbCluster_alloydbSecondaryClusterBasicExample(t *testing.T) {
t.Parallel()

context := map[string]interface{}{
"random_suffix": acctest.RandString(t, 10),
}

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccAlloydbCluster_alloydbSecondaryClusterBasicExample(context),
},
{
ResourceName: "google_alloydb_cluster.secondary",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"initial_user", "restore_backup_source", "restore_continuous_backup_source", "cluster_id", "location", "labels", "annotations", "terraform_labels"},
},
},
})
}

func testAccAlloydbCluster_alloydbSecondaryClusterBasicExample(context map[string]interface{}) string {
return acctest.Nprintf(`
resource "google_alloydb_cluster" "primary" {
cluster_id = "tf-test-alloydb-primary-cluster%{random_suffix}"
location = "us-central1"
network = google_compute_network.default.id
}
resource "google_alloydb_instance" "primary" {
cluster = google_alloydb_cluster.primary.name
instance_id = "tf-test-alloydb-primary-instance%{random_suffix}"
instance_type = "PRIMARY"
machine_config {
cpu_count = 2
}
depends_on = [google_service_networking_connection.vpc_connection]
}
resource "google_alloydb_cluster" "secondary" {
cluster_id = "tf-test-alloydb-secondary-cluster%{random_suffix}"
location = "us-east1"
network = google_compute_network.default.id
cluster_type = "SECONDARY"
continuous_backup_config {
enabled = false
}
secondary_config {
primary_cluster_name = google_alloydb_cluster.primary.name
}
depends_on = [google_alloydb_instance.primary]
}
data "google_project" "project" {}
resource "google_compute_network" "default" {
name = "tf-test-alloydb-secondary-cluster%{random_suffix}"
}
resource "google_compute_global_address" "private_ip_alloc" {
name = "tf-test-alloydb-secondary-cluster%{random_suffix}"
address_type = "INTERNAL"
purpose = "VPC_PEERING"
prefix_length = 16
network = google_compute_network.default.id
}
resource "google_service_networking_connection" "vpc_connection" {
network = google_compute_network.default.id
service = "servicenetworking.googleapis.com"
reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name]
}
`, context)
}

func testAccCheckAlloydbClusterDestroyProducer(t *testing.T) func(s *terraform.State) error {
return func(s *terraform.State) error {
for name, rs := range s.RootModule().Resources {
Expand Down
Loading

0 comments on commit 130101c

Please sign in to comment.