diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index 678d8c97..343320e9 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -1,6 +1,7 @@
BigAnimal
BigAnimal's
CIDR
+cron
CSP
csp
EDB
@@ -46,3 +47,4 @@ TDE
terraform
ultradisk
uri
+wal
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5c81e454..f480d0b6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,14 @@
+## v1.2.0 (November 29. 2024)
+Features:
+* Support for Write-Ahead Logs (WAL) Storage in `biganimal_cluster`, `biganimal_faraway_replica`, and `biganimal_pgd` resources
+* Support for backup schedule time in `biganimal_cluster`, `biganimal_analytics_cluster`, `biganimal_faraway_replica`, and `biganimal_pgd` resources
+
+Enhancements:
+* Validation checks to not allow pe_allowed_principal_ids and service_account_ids if using your cloud account
+
+Bug Fixes:
+* Fixed planned allowed_ip_ranges.description when using private_networking = true
+
## v1.1.1 (October 29. 2024)
Bug Fixes:
* Fixed Data Source `biganimal_cluster` cloud_provider not working with your cloud account
diff --git a/GNUmakefile b/GNUmakefile
index 068cfb45..7f11f57a 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -4,7 +4,7 @@ HOSTNAME=registry.terraform.io
NAMESPACE=EnterpriseDB
NAME=biganimal
BINARY=terraform-provider-${NAME}
-VERSION=1.1.1
+VERSION=1.2.0
# Figure out the OS and ARCH of the
# builder machine
diff --git a/README.md b/README.md
index e0dfc386..9eafe64c 100644
--- a/README.md
+++ b/README.md
@@ -29,7 +29,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/docs/data-sources/analytics_cluster.md b/docs/data-sources/analytics_cluster.md
index 17f56ed9..640866f5 100644
--- a/docs/data-sources/analytics_cluster.md
+++ b/docs/data-sources/analytics_cluster.md
@@ -32,6 +32,10 @@ output "backup_retention_period" {
value = data.biganimal_analytics_cluster.this.backup_retention_period
}
+output "backup_schedule_time" {
+ value = data.biganimal_analytics_cluster.this.backup_schedule_time
+}
+
output "cluster_name" {
value = data.biganimal_analytics_cluster.this.cluster_name
}
@@ -105,6 +109,7 @@ output "service_account_ids" {
- `allowed_ip_ranges` (Attributes Set) Allowed IP ranges. (see [below for nested schema](#nestedatt--allowed_ip_ranges))
- `backup_retention_period` (String) Backup retention period. For example, "7d", "2w", or "3m".
+- `backup_schedule_time` (String) Backup schedule time in 24 hour cron expression format.
- `csp_auth` (Boolean) Is authentication handled by the cloud service provider.
- `maintenance_window` (Attributes) Custom maintenance window. (see [below for nested schema](#nestedatt--maintenance_window))
- `pause` (Boolean) Pause cluster. If true it will put the cluster on pause and set the phase as paused, if false it will resume the cluster and set the phase as healthy. Pausing a cluster allows you to save on compute costs without losing data or cluster configuration settings. While paused, clusters aren't upgraded or patched, but changes are applied when the cluster resumes. Pausing a high availability cluster shuts down all cluster nodes
diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md
index 205ace92..9ae72b87 100644
--- a/docs/data-sources/cluster.md
+++ b/docs/data-sources/cluster.md
@@ -26,6 +26,10 @@ output "backup_retention_period" {
value = data.biganimal_cluster.this.backup_retention_period
}
+output "backup_schedule_time" {
+ value = data.biganimal_cluster.this.backup_schedule_time
+}
+
output "cluster_name" {
value = data.biganimal_cluster.this.cluster_name
}
@@ -94,6 +98,10 @@ output "storage" {
value = data.biganimal_cluster.this.storage
}
+output "wal_storage" {
+ value = data.biganimal_cluster.this.wal_storage
+}
+
output "superuser_access" {
value = coalesce(data.biganimal_cluster.this.superuser_access, false)
}
@@ -143,6 +151,7 @@ output "tags" {
- `allowed_ip_ranges` (Attributes Set) Allowed IP ranges. (see [below for nested schema](#nestedatt--allowed_ip_ranges))
- `backup_retention_period` (String) Backup retention period. For example, "7d", "2w", or "3m".
+- `backup_schedule_time` (String) Backup schedule time in 24 hour cron expression format.
- `csp_auth` (Boolean) Is authentication handled by the cloud service provider. Available for AWS only, See [Authentication](https://www.enterprisedb.com/docs/biganimal/latest/getting_started/creating_a_cluster/#authentication) for details.
- `maintenance_window` (Attributes) Custom maintenance window. (see [below for nested schema](#nestedatt--maintenance_window))
- `pause` (Boolean) Pause cluster. If true it will put the cluster on pause and set the phase as paused, if false it will resume the cluster and set the phase as healthy. Pausing a cluster allows you to save on compute costs without losing data or cluster configuration settings. While paused, clusters aren't upgraded or patched, but changes are applied when the cluster resumes. Pausing a high availability cluster shuts down all cluster nodes
@@ -160,6 +169,7 @@ output "tags" {
- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts))
- `transparent_data_encryption` (Attributes) Transparent Data Encryption (TDE) key (see [below for nested schema](#nestedatt--transparent_data_encryption))
- `volume_snapshot_backup` (Boolean) Volume snapshot.
+- `wal_storage` (Attributes) Use a separate storage volume for Write-Ahead Logs (Recommended for high write workloads) (see [below for nested schema](#nestedatt--wal_storage))
### Read-Only
@@ -272,6 +282,21 @@ Read-Only:
- `status` (String) Status.
+
+### Nested Schema for `wal_storage`
+
+Required:
+
+- `size` (String) Size of the volume. It can be set to different values depending on your volume type and properties.
+- `volume_properties` (String) Volume properties in accordance with the selected volume type.
+- `volume_type` (String) Volume type. For Azure: "azurepremiumstorage" or "ultradisk". For AWS: "gp3", "io2", org s "io2-block-express". For Google Cloud: only "pd-ssd".
+
+Optional:
+
+- `iops` (String) IOPS for the selected volume. It can be set to different values depending on your volume type and properties.
+- `throughput` (String) Throughput is automatically calculated by BigAnimal based on the IOPS input if it's not provided.
+
+
### Nested Schema for `cluster_architecture`
diff --git a/docs/data-sources/csp_tag.md b/docs/data-sources/csp_tag.md
index 2307f56a..76aaa56a 100644
--- a/docs/data-sources/csp_tag.md
+++ b/docs/data-sources/csp_tag.md
@@ -17,7 +17,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/docs/data-sources/faraway_replica.md b/docs/data-sources/faraway_replica.md
index 93fb186a..f001e384 100644
--- a/docs/data-sources/faraway_replica.md
+++ b/docs/data-sources/faraway_replica.md
@@ -44,6 +44,10 @@ output "backup_retention_period" {
value = data.biganimal_faraway_replica.this.backup_retention_period
}
+output "backup_schedule_time" {
+ value = data.biganimal_faraway_replica.this.backup_schedule_time
+}
+
output "cluster_name" {
value = data.biganimal_faraway_replica.this.cluster_name
}
@@ -104,6 +108,10 @@ output "storage" {
value = data.biganimal_faraway_replica.this.storage
}
+output "wal_storage" {
+ value = data.biganimal_faraway_replica.this.wal_storage
+}
+
output "volume_snapshot_backup" {
value = data.biganimal_faraway_replica.this.volume_snapshot_backup
}
@@ -120,6 +128,7 @@ output "volume_snapshot_backup" {
- `allowed_ip_ranges` (Attributes Set) Allowed IP ranges. (see [below for nested schema](#nestedatt--allowed_ip_ranges))
- `backup_retention_period` (String) Backup retention period. For example, "7d", "2w", or "3m".
+- `backup_schedule_time` (String) Backup schedule time in 24 hour cron expression format.
- `csp_auth` (Boolean) Is authentication handled by the cloud service provider.
- `pe_allowed_principal_ids` (Set of String) Cloud provider subscription/account ID, need to be specified when cluster is deployed on BigAnimal's cloud account.
- `pg_config` (Attributes Set) Database configuration parameters. (see [below for nested schema](#nestedatt--pg_config))
@@ -128,6 +137,7 @@ output "volume_snapshot_backup" {
- `service_account_ids` (Set of String) A Google Cloud Service Account is used for logs. If you leave this blank, then you will be unable to access log details for this cluster. Required when cluster is deployed on BigAnimal's cloud account.
- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts))
- `transparent_data_encryption` (Attributes) Transparent Data Encryption (TDE) key (see [below for nested schema](#nestedatt--transparent_data_encryption))
+- `wal_storage` (Attributes) Use a separate storage volume for Write-Ahead Logs (Recommended for high write workloads) (see [below for nested schema](#nestedatt--wal_storage))
### Read-Only
@@ -194,6 +204,21 @@ Read-Only:
- `status` (String) Status.
+
+### Nested Schema for `wal_storage`
+
+Required:
+
+- `size` (String) Size of the volume. It can be set to different values depending on your volume type and properties.
+- `volume_properties` (String) Volume properties in accordance with the selected volume type.
+- `volume_type` (String) Volume type. For Azure: "azurepremiumstorage" or "ultradisk". For AWS: "gp3", "io2", org s "io2-block-express". For Google Cloud: only "pd-ssd".
+
+Optional:
+
+- `iops` (String) IOPS for the selected volume. It can be set to different values depending on your volume type and properties.
+- `throughput` (String) Throughput is automatically calculated by BigAnimal based on the IOPS input if it's not provided.
+
+
### Nested Schema for `cluster_architecture`
diff --git a/docs/data-sources/pgd.md b/docs/data-sources/pgd.md
index 25cc0c04..4e57ff74 100644
--- a/docs/data-sources/pgd.md
+++ b/docs/data-sources/pgd.md
@@ -74,7 +74,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
@@ -102,6 +102,11 @@ terraform {
### Nested Schema for `data_groups`
+Optional:
+
+- `backup_schedule_time` (String) Backup schedule time in 24 hour cron expression format.
+- `wal_storage` (Attributes) Use a separate storage volume for Write-Ahead Logs (Recommended for high write workloads) (see [below for nested schema](#nestedatt--data_groups--wal_storage))
+
Read-Only:
- `allowed_ip_ranges` (Attributes Set) Allowed IP ranges. (see [below for nested schema](#nestedatt--data_groups--allowed_ip_ranges))
@@ -131,6 +136,21 @@ Read-Only:
- `service_account_ids` (Set of String) A Google Cloud Service Account is used for logs. If you leave this blank, then you will be unable to access log details for this cluster. Required when cluster is deployed on BigAnimal's cloud account.
- `storage` (Attributes) Storage. (see [below for nested schema](#nestedatt--data_groups--storage))
+
+### Nested Schema for `data_groups.wal_storage`
+
+Required:
+
+- `size` (String) Size of the volume. It can be set to different values depending on your volume type and properties.
+- `volume_properties` (String) Volume properties in accordance with the selected volume type.
+- `volume_type` (String) Volume type. For Azure: "azurepremiumstorage" or "ultradisk". For AWS: "gp3", "io2", org s "io2-block-express". For Google Cloud: only "pd-ssd".
+
+Optional:
+
+- `iops` (String) IOPS for the selected volume. It can be set to different values depending on your volume type and properties.
+- `throughput` (String) Throughput is automatically calculated by BigAnimal based on the IOPS input if it's not provided.
+
+
### Nested Schema for `data_groups.allowed_ip_ranges`
diff --git a/docs/data-sources/tag.md b/docs/data-sources/tag.md
index ed895e11..bf38d2df 100644
--- a/docs/data-sources/tag.md
+++ b/docs/data-sources/tag.md
@@ -17,7 +17,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/docs/resources/analytics_cluster.md b/docs/resources/analytics_cluster.md
index f9e284bc..048375a2 100644
--- a/docs/resources/analytics_cluster.md
+++ b/docs/resources/analytics_cluster.md
@@ -30,6 +30,7 @@ The analytics cluster resource is used to manage BigAnimal analytics clusters.
- `allowed_ip_ranges` (Attributes Set) Allowed IP ranges. (see [below for nested schema](#nestedatt--allowed_ip_ranges))
- `backup_retention_period` (String) Backup retention period. For example, "7d", "2w", or "3m".
+- `backup_schedule_time` (String) Backup schedule time in 24 hour cron expression format.
- `csp_auth` (Boolean) Is authentication handled by the cloud service provider.
- `maintenance_window` (Attributes) Custom maintenance window. (see [below for nested schema](#nestedatt--maintenance_window))
- `pause` (Boolean) Pause cluster. If true it will put the cluster on pause and set the phase as paused, if false it will resume the cluster and set the phase as healthy. Pausing a cluster allows you to save on compute costs without losing data or cluster configuration settings. While paused, clusters aren't upgraded or patched, but changes are applied when the cluster resumes. Pausing a high availability cluster shuts down all cluster nodes
diff --git a/docs/resources/aws_connection.md b/docs/resources/aws_connection.md
index 50159060..558af580 100644
--- a/docs/resources/aws_connection.md
+++ b/docs/resources/aws_connection.md
@@ -19,7 +19,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/docs/resources/azure_connection.md b/docs/resources/azure_connection.md
index 9cf884e7..b4859a74 100644
--- a/docs/resources/azure_connection.md
+++ b/docs/resources/azure_connection.md
@@ -19,7 +19,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md
index f32250a0..a776b710 100644
--- a/docs/resources/cluster.md
+++ b/docs/resources/cluster.md
@@ -13,7 +13,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
@@ -55,6 +55,7 @@ resource "biganimal_cluster" "single_node_cluster" {
]
backup_retention_period = "6d"
+ # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05
cluster_architecture = {
id = "single"
nodes = 1
@@ -80,6 +81,12 @@ resource "biganimal_cluster" "single_node_cluster" {
size = "4 Gi"
}
+ # wal_storage = {
+ # volume_type = "gp3"
+ # volume_properties = "gp3"
+ # size = "4 Gi"
+ # }
+
maintenance_window = {
is_enabled = true
start_day = 6
@@ -148,7 +155,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
@@ -190,6 +197,7 @@ resource "biganimal_cluster" "ha_cluster" {
]
backup_retention_period = "6d"
+ # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05
cluster_architecture = {
id = "ha"
nodes = 3
@@ -214,6 +222,12 @@ resource "biganimal_cluster" "ha_cluster" {
size = "4 Gi"
}
+ # wal_storage = {
+ # volume_type = "gp3"
+ # volume_properties = "gp3"
+ # size = "4 Gi"
+ # }
+
maintenance_window = {
is_enabled = true
start_day = 6
@@ -300,6 +314,7 @@ output "faraway_replica_ids" {
- `allowed_ip_ranges` (Attributes Set) Allowed IP ranges. (see [below for nested schema](#nestedatt--allowed_ip_ranges))
- `backup_retention_period` (String) Backup retention period. For example, "7d", "2w", or "3m".
+- `backup_schedule_time` (String) Backup schedule time in 24 hour cron expression format.
- `csp_auth` (Boolean) Is authentication handled by the cloud service provider. Available for AWS only, See [Authentication](https://www.enterprisedb.com/docs/biganimal/latest/getting_started/creating_a_cluster/#authentication) for details.
- `maintenance_window` (Attributes) Custom maintenance window. (see [below for nested schema](#nestedatt--maintenance_window))
- `pause` (Boolean) Pause cluster. If true it will put the cluster on pause and set the phase as paused, if false it will resume the cluster and set the phase as healthy. Pausing a cluster allows you to save on compute costs without losing data or cluster configuration settings. While paused, clusters aren't upgraded or patched, but changes are applied when the cluster resumes. Pausing a high availability cluster shuts down all cluster nodes
@@ -316,6 +331,7 @@ output "faraway_replica_ids" {
- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts))
- `transparent_data_encryption` (Attributes) Transparent Data Encryption (TDE) key (see [below for nested schema](#nestedatt--transparent_data_encryption))
- `volume_snapshot_backup` (Boolean) Enable to take a snapshot of the volume.
+- `wal_storage` (Attributes) Use a separate storage volume for Write-Ahead Logs (Recommended for high write workloads) (see [below for nested schema](#nestedatt--wal_storage))
### Read-Only
@@ -355,7 +371,7 @@ Required:
- `size` (String) Size of the volume. It can be set to different values depending on your volume type and properties.
- `volume_properties` (String) Volume properties in accordance with the selected volume type.
-- `volume_type` (String) Volume type. For Azure: "azurepremiumstorage" or "ultradisk". For AWS: "gp3", "io2", org s "io2-block-express". For Google Cloud: only "pd-ssd".
+- `volume_type` (String) Volume type. For Azure: "azurepremiumstorage" or "ultradisk". For AWS: "gp3", "io2", or "io2-block-express". For Google Cloud: only "pd-ssd".
Optional:
@@ -457,6 +473,21 @@ Read-Only:
- `key_name` (String) Key name.
- `status` (String) Status.
+
+
+### Nested Schema for `wal_storage`
+
+Required:
+
+- `size` (String) Size of the volume. It can be set to different values depending on your volume type and properties.
+- `volume_properties` (String) Volume properties in accordance with the selected volume type.
+- `volume_type` (String) Volume type. For Azure: "azurepremiumstorage" or "ultradisk". For AWS: "gp3", "io2", org s "io2-block-express". For Google Cloud: only "pd-ssd".
+
+Optional:
+
+- `iops` (String) IOPS for the selected volume. It can be set to different values depending on your volume type and properties.
+- `throughput` (String) Throughput is automatically calculated by BigAnimal based on the IOPS input if it's not provided.
+
## Import
Import is supported using the following syntax:
diff --git a/docs/resources/csp_tag.md b/docs/resources/csp_tag.md
index cb926306..28d3f561 100644
--- a/docs/resources/csp_tag.md
+++ b/docs/resources/csp_tag.md
@@ -17,7 +17,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/docs/resources/faraway_replica.md b/docs/resources/faraway_replica.md
index abcbe094..d62d72e5 100644
--- a/docs/resources/faraway_replica.md
+++ b/docs/resources/faraway_replica.md
@@ -14,7 +14,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
@@ -54,9 +54,15 @@ resource "biganimal_cluster" "single_node_cluster" {
storage = {
volume_type = "azurepremiumstorage"
volume_properties = "P1"
- size = "4 Gi"
+ size = "4 Gi" # for azurepremiumstorage please check Premium storage disk sizes here: https://learn.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance
}
+ # wal_storage = {
+ # volume_type = "azurepremiumstorage"
+ # volume_properties = "P1"
+ # size = "4 Gi" # for azurepremiumstorage please check Premium storage disk sizes here: https://learn.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance
+ # }
+
pg_type = "epas" #valid values ["epas", "pgextended", "postgres]"
pg_version = "15"
cloud_provider = "azure"
@@ -91,8 +97,9 @@ resource "biganimal_faraway_replica" "faraway_replica" {
]
backup_retention_period = "8d"
- csp_auth = false
- instance_type = "azure:Standard_D2s_v3"
+ # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05
+ csp_auth = false
+ instance_type = "azure:Standard_D2s_v3"
// only following pg_config parameters are configurable for faraway replica
// max_connections, max_locks_per_transaction, max_prepared_transactions, max_wal_senders, max_worker_processes.
@@ -112,8 +119,13 @@ resource "biganimal_faraway_replica" "faraway_replica" {
storage = {
volume_type = "azurepremiumstorage"
volume_properties = "P1"
- size = "4 Gi"
+ size = "4 Gi" # for azurepremiumstorage please check Premium storage disk sizes here: https://learn.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance
}
+ # wal_storage = {
+ # volume_type = "azurepremiumstorage"
+ # volume_properties = "P1"
+ # size = "4 Gi" # for azurepremiumstorage please check Premium storage disk sizes here: https://learn.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance
+ # }
private_networking = false
region = "centralindia"
@@ -148,6 +160,7 @@ resource "biganimal_faraway_replica" "faraway_replica" {
- `allowed_ip_ranges` (Attributes Set) Allowed IP ranges. (see [below for nested schema](#nestedatt--allowed_ip_ranges))
- `backup_retention_period` (String) Backup retention period. For example, "7d", "2w", or "3m".
+- `backup_schedule_time` (String) Backup schedule time in 24 hour cron expression format.
- `csp_auth` (Boolean) Is authentication handled by the cloud service provider.
- `pe_allowed_principal_ids` (Set of String) Cloud provider subscription/account ID, need to be specified when cluster is deployed on BigAnimal's cloud account.
- `pg_config` (Attributes Set) Database configuration parameters. (see [below for nested schema](#nestedatt--pg_config))
@@ -158,6 +171,7 @@ resource "biganimal_faraway_replica" "faraway_replica" {
- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts))
- `transparent_data_encryption` (Attributes) Transparent Data Encryption (TDE) key (see [below for nested schema](#nestedatt--transparent_data_encryption))
- `volume_snapshot_backup` (Boolean) Enable to take a snapshot of the volume.
+- `wal_storage` (Attributes) Use a separate storage volume for Write-Ahead Logs (Recommended for high write workloads) (see [below for nested schema](#nestedatt--wal_storage))
### Read-Only
@@ -249,6 +263,21 @@ Read-Only:
- `status` (String) Status.
+
+### Nested Schema for `wal_storage`
+
+Required:
+
+- `size` (String) Size of the volume. It can be set to different values depending on your volume type and properties.
+- `volume_properties` (String) Volume properties in accordance with the selected volume type.
+- `volume_type` (String) Volume type. For Azure: "azurepremiumstorage" or "ultradisk". For AWS: "gp3", "io2", org s "io2-block-express". For Google Cloud: only "pd-ssd".
+
+Optional:
+
+- `iops` (String) IOPS for the selected volume. It can be set to different values depending on your volume type and properties.
+- `throughput` (String) Throughput is automatically calculated by BigAnimal based on the IOPS input if it's not provided.
+
+
### Nested Schema for `cluster_architecture`
diff --git a/docs/resources/pgd.md b/docs/resources/pgd.md
index f5923933..55927d44 100644
--- a/docs/resources/pgd.md
+++ b/docs/resources/pgd.md
@@ -10,7 +10,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
@@ -62,6 +62,7 @@ resource "biganimal_pgd" "pgd_cluster" {
},
]
backup_retention_period = "6d"
+ # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05
cluster_architecture = {
cluster_architecture_id = "pgd"
nodes = 3
@@ -85,6 +86,16 @@ resource "biganimal_pgd" "pgd_cluster" {
volume_properties = "P2"
size = "32 Gi"
}
+ storage = {
+ volume_type = "azurepremiumstorage"
+ volume_properties = "P2"
+ size = "32 Gi"
+ }
+ # wal_storage = {
+ # volume_type = "azurepremiumstorage"
+ # volume_properties = "P2"
+ # size = "32 Gi"
+ # }
pg_type = {
pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]"
}
@@ -124,7 +135,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
@@ -176,6 +187,7 @@ resource "biganimal_pgd" "pgd_cluster" {
},
]
backup_retention_period = "6d"
+ # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05
cluster_architecture = {
cluster_architecture_id = "pgd"
nodes = 3
@@ -199,6 +211,11 @@ resource "biganimal_pgd" "pgd_cluster" {
volume_properties = "P2"
size = "32 Gi"
}
+ # wal_storage = {
+ # volume_type = "azurepremiumstorage"
+ # volume_properties = "P2"
+ # size = "32 Gi"
+ # }
pg_type = {
pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]"
}
@@ -233,6 +250,7 @@ resource "biganimal_pgd" "pgd_cluster" {
},
]
backup_retention_period = "6d"
+ # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05
cluster_architecture = {
cluster_architecture_id = "pgd"
nodes = 3
@@ -256,6 +274,11 @@ resource "biganimal_pgd" "pgd_cluster" {
volume_properties = "P2"
size = "32 Gi"
}
+ # wal_storage = {
+ # volume_type = "azurepremiumstorage"
+ # volume_properties = "P2"
+ # size = "32 Gi"
+ # }
pg_type = {
pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]"
}
@@ -308,7 +331,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
@@ -360,6 +383,7 @@ resource "biganimal_pgd" "pgd_cluster" {
},
]
backup_retention_period = "6d"
+ # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05
cluster_architecture = {
cluster_architecture_id = "pgd"
nodes = 3
@@ -383,6 +407,11 @@ resource "biganimal_pgd" "pgd_cluster" {
volume_properties = "gp3"
size = "32 Gi"
}
+ # wal_storage = {
+ # volume_type = "gp3"
+ # volume_properties = "gp3"
+ # size = "32 Gi"
+ # }
pg_type = {
pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]"
}
@@ -422,7 +451,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
@@ -474,6 +503,7 @@ resource "biganimal_pgd" "pgd_cluster" {
},
]
backup_retention_period = "6d"
+ # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05
cluster_architecture = {
cluster_architecture_id = "pgd"
nodes = 3
@@ -497,6 +527,11 @@ resource "biganimal_pgd" "pgd_cluster" {
volume_properties = "gp3"
size = "32 Gi"
}
+ # wal_storage = {
+ # volume_type = "gp3"
+ # volume_properties = "gp3"
+ # size = "32 Gi"
+ # }
pg_type = {
pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]"
}
@@ -531,6 +566,7 @@ resource "biganimal_pgd" "pgd_cluster" {
},
]
backup_retention_period = "6d"
+ # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05
cluster_architecture = {
cluster_architecture_id = "pgd"
nodes = 3
@@ -554,6 +590,11 @@ resource "biganimal_pgd" "pgd_cluster" {
volume_properties = "gp3"
size = "32 Gi"
}
+ # wal_storage = {
+ # volume_type = "gp3"
+ # volume_properties = "gp3"
+ # size = "32 Gi"
+ # }
pg_type = {
pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]"
}
@@ -606,7 +647,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
@@ -658,6 +699,7 @@ resource "biganimal_pgd" "pgd_cluster" {
},
]
backup_retention_period = "6d"
+ # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05
cluster_architecture = {
cluster_architecture_id = "pgd"
nodes = 3
@@ -681,6 +723,11 @@ resource "biganimal_pgd" "pgd_cluster" {
volume_properties = "pd-ssd"
size = "32 Gi"
}
+ # wal_storage = {
+ # volume_type = "pd-ssd"
+ # volume_properties = "pd-ssd"
+ # size = "32 Gi"
+ # }
pg_type = {
pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]"
}
@@ -724,7 +771,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
@@ -776,6 +823,7 @@ resource "biganimal_pgd" "pgd_cluster" {
},
]
backup_retention_period = "6d"
+ # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05
cluster_architecture = {
cluster_architecture_id = "pgd"
nodes = 3
@@ -799,6 +847,11 @@ resource "biganimal_pgd" "pgd_cluster" {
volume_properties = "pd-ssd"
size = "32 Gi"
}
+ # wal_storage = {
+ # volume_type = "pd-ssd"
+ # volume_properties = "pd-ssd"
+ # size = "32 Gi"
+ # }
pg_type = {
pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]"
}
@@ -837,6 +890,7 @@ resource "biganimal_pgd" "pgd_cluster" {
},
]
backup_retention_period = "6d"
+ # backup_schedule_time = "0 5 1 * * *" //24 hour format cron expression e.g. "0 5 1 * * *" is 01:05
cluster_architecture = {
cluster_architecture_id = "pgd"
nodes = 3
@@ -860,6 +914,11 @@ resource "biganimal_pgd" "pgd_cluster" {
volume_properties = "pd-ssd"
size = "32 Gi"
}
+ # wal_storage = {
+ # volume_type = "pd-ssd"
+ # volume_properties = "pd-ssd"
+ # size = "32 Gi"
+ # }
pg_type = {
pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]"
}
@@ -954,10 +1013,12 @@ Required:
Optional:
- `allowed_ip_ranges` (Attributes Set) Allowed IP ranges. (see [below for nested schema](#nestedatt--data_groups--allowed_ip_ranges))
+- `backup_schedule_time` (String) Backup schedule time in 24 hour cron expression format.
- `cluster_type` (String) Type of the Specified Cluster
- `pe_allowed_principal_ids` (Set of String) Cloud provider subscription/account ID, need to be specified when cluster is deployed on BigAnimal's cloud account.
- `read_only_connections` (Boolean) Is read-only connections enabled.
- `service_account_ids` (Set of String) A Google Cloud Service Account is used for logs. If you leave this blank, then you will be unable to access log details for this cluster. Required when cluster is deployed on BigAnimal's cloud account.
+- `wal_storage` (Attributes) Use a separate storage volume for Write-Ahead Logs (Recommended for high write workloads) (see [below for nested schema](#nestedatt--data_groups--wal_storage))
Read-Only:
@@ -1068,6 +1129,21 @@ Required:
- `description` (String) Description of CIDR block
+
+### Nested Schema for `data_groups.wal_storage`
+
+Required:
+
+- `size` (String) Size of the volume. It can be set to different values depending on your volume type and properties.
+- `volume_properties` (String) Volume properties in accordance with the selected volume type.
+- `volume_type` (String) Volume type. For Azure: "azurepremiumstorage" or "ultradisk". For AWS: "gp3", "io2", org s "io2-block-express". For Google Cloud: only "pd-ssd".
+
+Optional:
+
+- `iops` (String) IOPS for the selected volume. It can be set to different values depending on your volume type and properties.
+- `throughput` (String) Throughput is automatically calculated by BigAnimal based on the IOPS input if it's not provided.
+
+
### Nested Schema for `tags`
diff --git a/docs/resources/project.md b/docs/resources/project.md
index b8ba2558..4b1c95df 100644
--- a/docs/resources/project.md
+++ b/docs/resources/project.md
@@ -20,7 +20,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/docs/resources/region.md b/docs/resources/region.md
index ca478bf6..7a18954c 100644
--- a/docs/resources/region.md
+++ b/docs/resources/region.md
@@ -9,7 +9,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/docs/resources/tag.md b/docs/resources/tag.md
index 20bdd1f3..5e15b0f2 100644
--- a/docs/resources/tag.md
+++ b/docs/resources/tag.md
@@ -17,7 +17,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/data-sources/biganimal_analytics_cluster/provider.tf b/examples/data-sources/biganimal_analytics_cluster/provider.tf
index 37241a1a..a97112df 100644
--- a/examples/data-sources/biganimal_analytics_cluster/provider.tf
+++ b/examples/data-sources/biganimal_analytics_cluster/provider.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/examples/data-sources/biganimal_aws_connection/provider.tf b/examples/data-sources/biganimal_aws_connection/provider.tf
index 37241a1a..a97112df 100644
--- a/examples/data-sources/biganimal_aws_connection/provider.tf
+++ b/examples/data-sources/biganimal_aws_connection/provider.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/examples/data-sources/biganimal_cluster/provider.tf b/examples/data-sources/biganimal_cluster/provider.tf
index 37241a1a..a97112df 100644
--- a/examples/data-sources/biganimal_cluster/provider.tf
+++ b/examples/data-sources/biganimal_cluster/provider.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/examples/data-sources/biganimal_csp_tag/data-source.tf b/examples/data-sources/biganimal_csp_tag/data-source.tf
index ed571c0e..81f9996f 100644
--- a/examples/data-sources/biganimal_csp_tag/data-source.tf
+++ b/examples/data-sources/biganimal_csp_tag/data-source.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/examples/data-sources/biganimal_faraway_replica/provider.tf b/examples/data-sources/biganimal_faraway_replica/provider.tf
index 37241a1a..a97112df 100644
--- a/examples/data-sources/biganimal_faraway_replica/provider.tf
+++ b/examples/data-sources/biganimal_faraway_replica/provider.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/examples/data-sources/biganimal_pgd/provider.tf b/examples/data-sources/biganimal_pgd/provider.tf
index 37241a1a..a97112df 100644
--- a/examples/data-sources/biganimal_pgd/provider.tf
+++ b/examples/data-sources/biganimal_pgd/provider.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/examples/data-sources/biganimal_projects/provider.tf b/examples/data-sources/biganimal_projects/provider.tf
index 37241a1a..a97112df 100644
--- a/examples/data-sources/biganimal_projects/provider.tf
+++ b/examples/data-sources/biganimal_projects/provider.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/examples/data-sources/biganimal_region/provider.tf b/examples/data-sources/biganimal_region/provider.tf
index 37241a1a..a97112df 100644
--- a/examples/data-sources/biganimal_region/provider.tf
+++ b/examples/data-sources/biganimal_region/provider.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/examples/data-sources/biganimal_tag/data-source.tf b/examples/data-sources/biganimal_tag/data-source.tf
index f3b5b844..81bd2f1e 100644
--- a/examples/data-sources/biganimal_tag/data-source.tf
+++ b/examples/data-sources/biganimal_tag/data-source.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/examples/resources/biganimal_analytics_cluster/aws/resource.tf b/examples/resources/biganimal_analytics_cluster/aws/resource.tf
index 78d1ef07..70c5a55c 100644
--- a/examples/resources/biganimal_analytics_cluster/aws/resource.tf
+++ b/examples/resources/biganimal_analytics_cluster/aws/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_aws_connection/resource.tf b/examples/resources/biganimal_aws_connection/resource.tf
index f2429e80..f0063abe 100644
--- a/examples/resources/biganimal_aws_connection/resource.tf
+++ b/examples/resources/biganimal_aws_connection/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/examples/resources/biganimal_azure_connection/resource.tf b/examples/resources/biganimal_azure_connection/resource.tf
index 5c067407..db336b99 100644
--- a/examples/resources/biganimal_azure_connection/resource.tf
+++ b/examples/resources/biganimal_azure_connection/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/examples/resources/biganimal_cluster/ha/resource.tf b/examples/resources/biganimal_cluster/ha/resource.tf
index 78bf6e16..a008a8a7 100644
--- a/examples/resources/biganimal_cluster/ha/resource.tf
+++ b/examples/resources/biganimal_cluster/ha/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_cluster/single_node/aws/resource.tf b/examples/resources/biganimal_cluster/single_node/aws/resource.tf
index 0e570320..a4f82ac5 100644
--- a/examples/resources/biganimal_cluster/single_node/aws/resource.tf
+++ b/examples/resources/biganimal_cluster/single_node/aws/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_cluster/single_node/azure/resource.tf b/examples/resources/biganimal_cluster/single_node/azure/resource.tf
index 866e8057..14e66959 100644
--- a/examples/resources/biganimal_cluster/single_node/azure/resource.tf
+++ b/examples/resources/biganimal_cluster/single_node/azure/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_cluster/single_node/gcp/resource.tf b/examples/resources/biganimal_cluster/single_node/gcp/resource.tf
index 9bb55c49..b534052d 100644
--- a/examples/resources/biganimal_cluster/single_node/gcp/resource.tf
+++ b/examples/resources/biganimal_cluster/single_node/gcp/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_csp_tag/resource.tf b/examples/resources/biganimal_csp_tag/resource.tf
index 260254ec..37fa48e2 100644
--- a/examples/resources/biganimal_csp_tag/resource.tf
+++ b/examples/resources/biganimal_csp_tag/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_faraway_replica/aws/resource.tf b/examples/resources/biganimal_faraway_replica/aws/resource.tf
index 90aabd58..bed0e0c0 100644
--- a/examples/resources/biganimal_faraway_replica/aws/resource.tf
+++ b/examples/resources/biganimal_faraway_replica/aws/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_faraway_replica/azure/resource.tf b/examples/resources/biganimal_faraway_replica/azure/resource.tf
index f0d30d82..5fa72e58 100644
--- a/examples/resources/biganimal_faraway_replica/azure/resource.tf
+++ b/examples/resources/biganimal_faraway_replica/azure/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_faraway_replica/cluster_and_faraway_replica/resource.tf b/examples/resources/biganimal_faraway_replica/cluster_and_faraway_replica/resource.tf
index 326fdd0c..be297bd0 100644
--- a/examples/resources/biganimal_faraway_replica/cluster_and_faraway_replica/resource.tf
+++ b/examples/resources/biganimal_faraway_replica/cluster_and_faraway_replica/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_faraway_replica/gcp/resource.tf b/examples/resources/biganimal_faraway_replica/gcp/resource.tf
index b3be1eaa..55b03e54 100644
--- a/examples/resources/biganimal_faraway_replica/gcp/resource.tf
+++ b/examples/resources/biganimal_faraway_replica/gcp/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_pgd/aws/data_group/resource.tf b/examples/resources/biganimal_pgd/aws/data_group/resource.tf
index 0ce24dce..0c4be573 100644
--- a/examples/resources/biganimal_pgd/aws/data_group/resource.tf
+++ b/examples/resources/biganimal_pgd/aws/data_group/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_pgd/aws/data_groups_with_witness_group/resource.tf b/examples/resources/biganimal_pgd/aws/data_groups_with_witness_group/resource.tf
index 15f3f4dd..0c366bfb 100644
--- a/examples/resources/biganimal_pgd/aws/data_groups_with_witness_group/resource.tf
+++ b/examples/resources/biganimal_pgd/aws/data_groups_with_witness_group/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_pgd/azure/data_group/resource.tf b/examples/resources/biganimal_pgd/azure/data_group/resource.tf
index 9867c055..d42b71e2 100644
--- a/examples/resources/biganimal_pgd/azure/data_group/resource.tf
+++ b/examples/resources/biganimal_pgd/azure/data_group/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_pgd/azure/data_groups_with_witness_group/resource.tf b/examples/resources/biganimal_pgd/azure/data_groups_with_witness_group/resource.tf
index f6909e73..713842ce 100644
--- a/examples/resources/biganimal_pgd/azure/data_groups_with_witness_group/resource.tf
+++ b/examples/resources/biganimal_pgd/azure/data_groups_with_witness_group/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_pgd/gcp/data_group/resource.tf b/examples/resources/biganimal_pgd/gcp/data_group/resource.tf
index 6f5553d3..b324d9fa 100644
--- a/examples/resources/biganimal_pgd/gcp/data_group/resource.tf
+++ b/examples/resources/biganimal_pgd/gcp/data_group/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_pgd/gcp/data_groups_with_witness_group/resource.tf b/examples/resources/biganimal_pgd/gcp/data_groups_with_witness_group/resource.tf
index 5ec94639..734c4ed8 100644
--- a/examples/resources/biganimal_pgd/gcp/data_groups_with_witness_group/resource.tf
+++ b/examples/resources/biganimal_pgd/gcp/data_groups_with_witness_group/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_project/resource.tf b/examples/resources/biganimal_project/resource.tf
index 761b0e54..bde9abe0 100644
--- a/examples/resources/biganimal_project/resource.tf
+++ b/examples/resources/biganimal_project/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/examples/resources/biganimal_region/resource.tf b/examples/resources/biganimal_region/resource.tf
index 18c616e3..6dd3a9bd 100644
--- a/examples/resources/biganimal_region/resource.tf
+++ b/examples/resources/biganimal_region/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
}
}
diff --git a/examples/resources/biganimal_tag/resource.tf b/examples/resources/biganimal_tag/resource.tf
index af2f52cc..f23d10d2 100644
--- a/examples/resources/biganimal_tag/resource.tf
+++ b/examples/resources/biganimal_tag/resource.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
biganimal = {
source = "EnterpriseDB/biganimal"
- version = "1.1.1"
+ version = "1.2.0"
}
random = {
source = "hashicorp/random"
diff --git a/pkg/provider/resource_cluster.go b/pkg/provider/resource_cluster.go
index 8b7151b0..544fd1ec 100644
--- a/pkg/provider/resource_cluster.go
+++ b/pkg/provider/resource_cluster.go
@@ -869,12 +869,15 @@ func readCluster(ctx context.Context, client *api.ClusterClient, tfClusterResour
tfClusterResource.SuperuserAccess = types.BoolPointerValue(responseCluster.SuperuserAccess)
tfClusterResource.PgIdentity = types.StringPointerValue(responseCluster.PgIdentity)
tfClusterResource.VolumeSnapshot = types.BoolPointerValue(responseCluster.VolumeSnapshot)
- tfClusterResource.WalStorage = &StorageResourceModel{
- VolumeType: types.StringPointerValue(responseCluster.WalStorage.VolumeTypeId),
- VolumeProperties: types.StringPointerValue(responseCluster.WalStorage.VolumePropertiesId),
- Size: types.StringPointerValue(responseCluster.WalStorage.Size),
- Iops: types.StringPointerValue(responseCluster.WalStorage.Iops),
- Throughput: types.StringPointerValue(responseCluster.WalStorage.Throughput),
+
+ if responseCluster.WalStorage != nil {
+ tfClusterResource.WalStorage = &StorageResourceModel{
+ VolumeType: types.StringPointerValue(responseCluster.WalStorage.VolumeTypeId),
+ VolumeProperties: types.StringPointerValue(responseCluster.WalStorage.VolumePropertiesId),
+ Size: types.StringPointerValue(responseCluster.WalStorage.Size),
+ Iops: types.StringPointerValue(responseCluster.WalStorage.Iops),
+ Throughput: types.StringPointerValue(responseCluster.WalStorage.Throughput),
+ }
}
if responseCluster.EncryptionKeyResp != nil && *responseCluster.Phase != constants.PHASE_HEALTHY {
@@ -1052,6 +1055,7 @@ func (c *clusterResource) makeClusterForCreate(ctx context.Context, clusterResou
return clusterModel, nil
}
+// note: if private networking is true, it will require A peAllowedPrincipalId
func (c *clusterResource) buildRequestBah(ctx context.Context, clusterResourceModel ClusterResourceModel) (svAccIds, principalIds *[]string, err error) {
if strings.Contains(clusterResourceModel.CloudProvider.ValueString(), "bah") {
// If there is an existing Principal Account Id for that Region, use that one.
diff --git a/pkg/provider/resource_fareplica.go b/pkg/provider/resource_fareplica.go
index e0b802f9..4d557fcf 100644
--- a/pkg/provider/resource_fareplica.go
+++ b/pkg/provider/resource_fareplica.go
@@ -641,12 +641,15 @@ func readFAReplica(ctx context.Context, client *api.ClusterClient, fAReplicaReso
fAReplicaResourceModel.PgVersion = types.StringValue(responseCluster.PgVersion.PgVersionId)
fAReplicaResourceModel.PgType = types.StringValue(responseCluster.PgType.PgTypeId)
fAReplicaResourceModel.VolumeSnapshot = types.BoolPointerValue(responseCluster.VolumeSnapshot)
- fAReplicaResourceModel.WalStorage = &StorageResourceModel{
- VolumeType: types.StringPointerValue(responseCluster.WalStorage.VolumeTypeId),
- VolumeProperties: types.StringPointerValue(responseCluster.WalStorage.VolumePropertiesId),
- Size: types.StringPointerValue(responseCluster.WalStorage.Size),
- Iops: types.StringPointerValue(responseCluster.WalStorage.Iops),
- Throughput: types.StringPointerValue(responseCluster.WalStorage.Throughput),
+
+ if responseCluster.WalStorage != nil {
+ fAReplicaResourceModel.WalStorage = &StorageResourceModel{
+ VolumeType: types.StringPointerValue(responseCluster.WalStorage.VolumeTypeId),
+ VolumeProperties: types.StringPointerValue(responseCluster.WalStorage.VolumePropertiesId),
+ Size: types.StringPointerValue(responseCluster.WalStorage.Size),
+ Iops: types.StringPointerValue(responseCluster.WalStorage.Iops),
+ Throughput: types.StringPointerValue(responseCluster.WalStorage.Throughput),
+ }
}
// pgConfig. If tf resource pg config elem matches with api response pg config elem then add the elem to tf resource pg config