From a1278a1de8a2463b7c86552b0eb4274d4fb145ff Mon Sep 17 00:00:00 2001 From: bryan <31219516+bryan-bar@users.noreply.github.com> Date: Mon, 26 Jun 2023 14:17:47 -0700 Subject: [PATCH] `inventory.yml.tftpl` default template for edb-ansible and related templating changes (#70) Templates can now be passed in through the CLI with `--user-templates` or `ET_USER_TEMPLATES` as a list of template files or a directory with template files. Users can still make use of the infrastructure file to specify templates paths but they will be copied over to the projects template directory and removed from the final terraform variables file as it is unused within terraform. All templates will be rendered if they are within the projects `template` directory, if it ends with the `.tftpl` extension. Preferably, templates should not cause an infrastructure configuration to fail if is provided as a sample or a default. `inventory.yml.tftpl` has been added as an example which works with `edb-ansible`. To better work alongside ansible, additional outputs were passed through with our machines so they could be added to the inventory file, including `ansible_user` and `ansible_ssh_private_key_file`. Fixes: AWS/Azure/Gcloud - `setup_volume.sh` script updated to handle dropping of `/dev/` from device name when checking with nvme cli tools. --- edbterraform/CLI.py | 4 +- edbterraform/__init__.py | 2 + edbterraform/args.py | 25 +- edbterraform/data/templates/aws/main.tf.j2 | 2 +- edbterraform/data/templates/azure/main.tf.j2 | 2 +- edbterraform/data/templates/gcloud/main.tf.j2 | 2 +- .../data/templates/user/inventory.yml.tftpl | 217 +++++++++++++++ .../aws/modules/machine/setup_volume.sh | 4 +- .../aws/modules/specification/files.tf | 10 +- .../aws/modules/specification/outputs.tf | 3 +- .../aws/modules/specification/variables.tf | 1 - .../azure/modules/machine/setup_volume.sh | 4 +- .../azure/modules/machine/variables.tf | 2 + .../azure/modules/specification/files.tf | 12 +- .../azure/modules/specification/outputs.tf | 4 +- .../azure/modules/specification/variables.tf | 1 - .../gcloud/modules/machine/setup_volume.sh | 4 +- .../gcloud/modules/specification/files.tf | 14 +- .../gcloud/modules/specification/variables.tf | 1 - edbterraform/lib.py | 89 ++++-- edbterraform/utils/logs.py | 5 +- infrastructure-examples/aws-ec2-v2.yml | 2 - infrastructure-examples/aws-edb-ra-3.yml | 253 ++++++++++++++++++ infrastructure-examples/azure-vms-v2.yml | 2 - infrastructure-examples/compute-engine-v2.yml | 2 - 25 files changed, 606 insertions(+), 61 deletions(-) create mode 100644 edbterraform/data/templates/user/inventory.yml.tftpl create mode 100644 infrastructure-examples/aws-edb-ra-3.yml diff --git a/edbterraform/CLI.py b/edbterraform/CLI.py index 2f733c9c..30bd4da6 100644 --- a/edbterraform/CLI.py +++ b/edbterraform/CLI.py @@ -9,7 +9,7 @@ import json import textwrap -from edbterraform import __project_name__ +from edbterraform import __dot_project__ from edbterraform.utils.logs import logger Version = namedtuple('Version', ['major', 'minor', 'patch']) @@ -133,7 +133,7 @@ class TerraformCLI: arch_alias = { 'x86_64': 'amd64', } - DEFAULT_PATH=f'{Path.home()}/.{__project_name__}' + DEFAULT_PATH = __dot_project__ def __init__(self, binary_dir=None): self.bin_dir = binary_dir if binary_dir else TerraformCLI.DEFAULT_PATH diff --git a/edbterraform/__init__.py b/edbterraform/__init__.py index 8472d0a5..77eb6ebd 100644 --- a/edbterraform/__init__.py +++ b/edbterraform/__init__.py @@ -1,2 +1,4 @@ __version__ = "1.3.0" __project_name__ = 'edb-terraform' +from pathlib import Path +__dot_project__ = f'{Path.home()}/.{__project_name__}' diff --git a/edbterraform/args.py b/edbterraform/args.py index bbc22b2e..63cda5b4 100644 --- a/edbterraform/args.py +++ b/edbterraform/args.py @@ -10,7 +10,7 @@ from edbterraform.lib import generate_terraform from edbterraform.CLI import TerraformCLI -from edbterraform import __project_name__ +from edbterraform import __dot_project__ from edbterraform.utils import logs ENVIRONMENT_PREFIX = 'ET_' # Appended to allow overrides of defaults @@ -35,6 +35,7 @@ class ArgumentConfig: choices: list = None required: bool = None action: str = None + nargs: str = None def __post_init__(self) -> None: # Allow overriding of variables with environment variables @@ -109,6 +110,17 @@ def __getitem__(self, key): help="Project path. Default: %(default)s", ) +UserTemplatesPath = ArgumentConfig( + names = ['--user-templates',], + metavar='USER_TEMPLATE_FILES', + dest='user_templates', + type=Path, + nargs='+', + required=False, + default=[f'{__dot_project__}/templates',], + help="Users can pass in a list of template files or template directories, which will be rendered with the servers output. Default: %(default)s", +) + InfrastructureFilePath = ArgumentConfig( names = ['--infra-file',], metavar='INFRA_FILE_YAML', @@ -177,7 +189,7 @@ def __getitem__(self, key): names = ['--log-directory',], dest='log_directory', required=False, - default=f'{Path.home()}/.{__project_name__}/logs', + default=f'{__dot_project__}/logs', help=''' Default: %(default)s ''' @@ -220,6 +232,7 @@ class Arguments: LogFile, LogDirectory, LogStdout, + UserTemplatesPath, ]], 'setup': ['Install needed software such as Terraform inside a bin directory\n',[ BinPath, @@ -287,6 +300,13 @@ def get_env(self, key, default=None): Get environment variables which are available after parse_args() is called ''' return getattr(self.env, key, default) + + def get_kwargs(self): + ''' + Returns the parsed arguments as a dictionary. + _get_kwargs not used as it returns a list of dictionary items. + ''' + return self.env.__dict__.copy() def process_args(self): logs.setup_logs( @@ -311,6 +331,7 @@ def process_args(self): self.get_env('csp'), self.get_env('run_validation'), self.get_env('bin_path'), + self.get_env('user_templates') ) return outputs diff --git a/edbterraform/data/templates/aws/main.tf.j2 b/edbterraform/data/templates/aws/main.tf.j2 index 2e8ff8d9..09cad30f 100644 --- a/edbterraform/data/templates/aws/main.tf.j2 +++ b/edbterraform/data/templates/aws/main.tf.j2 @@ -138,7 +138,7 @@ resource "local_file" "user_templates" { output_name default made through jinja2 templating with edb-terraform: 'servers' terraform output -json */ - for_each = toset(module.spec.base.templates) + for_each = fileset(path.root, "templates/*.tftpl") content = templatefile(each.value, local.servers) filename = "${abspath(path.root)}/${trimsuffix(basename(each.value), ".tftpl")}" file_permission = "0600" diff --git a/edbterraform/data/templates/azure/main.tf.j2 b/edbterraform/data/templates/azure/main.tf.j2 index 79337c09..89e5b09a 100644 --- a/edbterraform/data/templates/azure/main.tf.j2 +++ b/edbterraform/data/templates/azure/main.tf.j2 @@ -132,7 +132,7 @@ resource "local_file" "user_templates" { output_name default made through jinja2 templating with edb-terraform: 'servers' terraform output -json */ - for_each = toset(module.spec.base.templates) + for_each = fileset(path.root, "templates/*.tftpl") content = templatefile(each.value, local.servers) filename = "${abspath(path.root)}/${trimsuffix(basename(each.value), ".tftpl")}" file_permission = "0600" diff --git a/edbterraform/data/templates/gcloud/main.tf.j2 b/edbterraform/data/templates/gcloud/main.tf.j2 index c88062ef..f417620e 100644 --- a/edbterraform/data/templates/gcloud/main.tf.j2 +++ b/edbterraform/data/templates/gcloud/main.tf.j2 @@ -123,7 +123,7 @@ resource "local_file" "user_templates" { output_name default made through jinja2 templating with edb-terraform: 'servers' terraform output -json */ - for_each = toset(module.spec.base.templates) + for_each = fileset(path.root, "templates/*.tftpl") content = templatefile(each.value, local.servers) filename = "${abspath(path.root)}/${trimsuffix(basename(each.value), ".tftpl")}" file_permission = "0600" diff --git a/edbterraform/data/templates/user/inventory.yml.tftpl b/edbterraform/data/templates/user/inventory.yml.tftpl new file mode 100644 index 00000000..964114b3 --- /dev/null +++ b/edbterraform/data/templates/user/inventory.yml.tftpl @@ -0,0 +1,217 @@ +# Original filename: inventory.yml.tftpl +# This inventory file is meant to be used alongside edb-ansible: https://github.com/EnterpriseDB/edb-ansible +# and relies on tags set within the infrastructure file to properly generate. +# Sample infrastructure file for this template can be found under infrastructure-examples/aws-edb-ra-3.yml +# +# As a sample template, it will not cause terraform CLI and so it might be invalid. +# To fail upon errors, remove any try() function. +--- +all: + children: +# PEM servers: tags.type.pem_server +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "pem_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + pemserver: + hosts: +%{ if lower(try(values.tags.reference_architecture, "")) == "edb-always-on" ~} + pemserver${ 1 + tonumber(try(values.tags.index, -2)) }: +%{ else ~} + pemserver${ 1 + tonumber(values.tags.index) }.${ values.tags.cluster_name }.internal: +%{ endif ~} + ansible_host: ${ values.public_ip } + private_ip: ${ values.private_ip } + ansible_user: ${ values.operating_system.ssh_user } + ansible_ssh_private_key_file: ${ values.operating_system.ssh_private_key_file } +%{ endfor ~} + +# Barman servers: tags.type.barman_server +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "barman_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + barmanserver: + hosts: +%{ endfor ~} +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "barman_server"} ~} +%{ if lower(try(values.tags.reference_architecture, "")) == "edb-always-on" ~} + barmandc${ 1 + tonumber(try(values.tags.index -2)) }: +%{ else ~} + barmanserver${ 1 + tonumber(try(values.tags.index, "")) }.${ values.tags.cluster_name }.internal: +%{ endif ~} + ansible_host: ${ values.public_ip } + private_ip: ${ values.private_ip } + ansible_user: ${ values.operating_system.ssh_user } + ansible_ssh_private_key_file: ${ values.operating_system.ssh_private_key_file } +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "pem_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + pem_agent: true + pem_server_private_ip: ${ values.private_ip } +%{ endfor ~} +%{ endfor ~} + +# Postgres servers: tags.type.postgres_server +%{ for key,values in { for key,values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "postgres_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + primary: + hosts: +%{ if lower(try(values.tags.pg_type, "")) == "epas" ~} + epas${ 1 + tonumber(values.tags.index) }.${ values.tags.cluster_name }.internal: +%{ else ~} + pgsql${ 1 + tonumber(values.tags.index) }.${ values.tags.cluster_name }.internal: +%{ endif ~} + ansible_host: ${ values.public_ip } + private_ip: ${ values.private_ip } + ansible_user: ${ values.operating_system.ssh_user } + ansible_ssh_private_key_file: ${ values.operating_system.ssh_private_key_file } +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "barman_server" && tonumber(try(values.tags.index, -1)) == 0} ~} + barman: true + barman_server_private_ip: ${ values.private_ip } + barman_backup_method: postgres +%{ endfor ~} +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "dbt2_client" } ~} +%{ if tonumber(try(values.tags.index, -1)) == 0 ~} + dbt2: true +%{ endif ~} + dbt2_client_private_ip${ 1 + tonumber(try(values.tags.index, -2)) }: ${ values.private_ip } +%{ endfor ~} +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "hammerdb_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + hammerdb: true + hammerdb_server_private_ip: ${ values.private_ip } +%{ endfor ~} +%{ if tobool(try(values.tags.pooler_local, false)) && lower(try(values.tags.pg_pooler, "")) == "pgbouncer" ~} + pgbouncer: true +%{ endif ~} +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "pem_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + pem_agent: true + pem_server_private_ip: ${ values.private_ip } +%{ endfor ~} + standby: + hosts: +%{ endfor ~} +%{ for key,values in { for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "postgres_server" && tonumber(try(values.tags.index, 0)) != 0 } ~} +%{ if lower(try(values.tags.pg_type, "")) == "epas" ~} + epas${ 1 + tonumber(values.tags.index) }.${ values.tags.cluster_name }.internal: +%{ else ~} + pgsql${ 1 + tonumber(values.tags.index) }.${ values.tags.cluster_name }.internal: +%{ endif ~} + ansible_host: ${ values.public_ip } + private_ip: ${ values.private_ip } + ansible_user: ${ values.operating_system.ssh_user } + ansible_ssh_private_key_file: ${ values.operating_system.ssh_private_key_file } +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "barman_server" && tonumber(try(values.tags.index, -1)) == 0} ~} + barman: true + barman_server_private_ip: ${ values.private_ip } + barman_backup_method: postgres +%{ endfor ~} +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "dbt2_client" } ~} + dbt2_client_private_ip${ 1 + tonumber(try(values.tags.index, -2)) }: ${ values.private_ip } +%{ endfor ~} +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "hammerdb_server" && tonumber(try(values.tags.index, -1)) == 0} ~} + hammerdb: true + hammerdb_server_private_ip: ${ values.private_ip } +%{ endfor ~} +%{ if tobool(try(values.tags.pooler_local, false)) && lower(try(values.tags.pg_pooler, "")) == "pgbouncer" ~} + pgbouncer: true +%{ endif ~} + replication_type: ${ values.tags.replication_type } +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "postgres_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + upstream_node_private_ip: ${ values.private_ip } +%{ endfor ~} +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "pem_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + pem_agent: true + pem_server_private_ip: ${ values.private_ip } +%{ endfor ~} +%{ endfor ~} + +# BDR servers: tags.type.bdr_server +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "bdr_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + primary: + hosts: +%{ endfor ~} +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "bdr_server" } ~} +%{ if lower(try(values.tags.pg_type, "")) == "epas" ~} + epas${ 1 + tonumber(try(values.tags.index, -2)) ~}: +%{ else ~} + pgsql${ 1 + tonumber(try(values.tags.index, -2)) ~}: +%{ endif ~} + ansible_host: ${ values.public_ip } + private_ip: ${ values.private_ip } + ansible_user: ${ values.operating_system.ssh_user } + ansible_ssh_private_key_file: ${ values.operating_system.ssh_private_key_file } +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "pem_server" && tonumber(try(values.tags.index), -1) == 0 } ~} + pem_agent: true + pem_server_private_ip: ${ values.private_ip } +%{ endfor ~} +%{ endfor ~} +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "bdr_witness_server" } ~} +%{ if lower(try(values.tags.pg_type, "")) == "epas" ~} + epas${ 1 + tonumber(try(values.tags.index, -2)) + length([ for key, values in servers.machines: key if lower(try(values.tags.type, "")) == "bdr_server"]) }: +%{ else ~} + pgsql${ 1 + tonumber(try(values.tags.index,-2)) + length([ for key, values in servers.machines: key if lower(try(values.tags.type, "")) == "bdr_server"]) }: +%{ endif ~} + ansible_host: ${ values.public_ip } + private_ip: ${ values.private_ip } + ansible_user: ${ values.operating_system.ssh_user } + ansible_ssh_private_key_file: ${ values.operating_system.ssh_private_key_file } +%{ for key, values in { for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "pem_server" && tonumber(try(values.tags.index), -1) == 0 } ~} + pem_agent: true + pem_server_private_ip: ${ values.private_ip } +%{ endfor ~} +%{ endfor ~} + +# Pool connection servers: tags.type.pooler_server +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "pooler_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + ${ values.tags.pooler_type }: + hosts: +%{ endfor ~} +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "pooler_server" } ~} +%{ if length([ for key, values in servers.machines: key if lower(try(values.tags.type, "")) == "bdr_server"]) > 0 ~} + pgbouncer${ 1 + tonumber(try(values.tags.index, -2)) }: +%{ else ~} + ${ lower(try(values.tags.pooler_type, "")) }-${ 1 + tonumber(try(values.tags.index, -2)) }.${ values.tags.cluster_name }.internal: +%{ endif ~} + ansible_host: ${ values.public_ip } + private_ip: ${ values.private_ip } + ansible_user: ${ values.operating_system.ssh_user } + ansible_ssh_private_key_file: ${ values.operating_system.ssh_private_key_file } +%{ for key,values in { for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "postgres_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + primary_private_ip: ${ values.private_ip } +%{ endfor ~} +%{ for key, values in {for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "pem_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + pem_agent: true + pem_server_private_ip: ${ values.private_ip } +%{ endfor ~} +%{ endfor ~} + +# DBT2 servers: tags.type.dbt2_client +%{ for key, values in { for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "dbt2_client" && tonumber(try(values.tags.index, -1)) == 0 } ~} + dbt2_client: + hosts: +%{ endfor ~} +%{ for key, values in { for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "dbt2_client" } ~} + dbt2_client${ 1 + tonumber(try(values.tags.index, -2)) }.${ values.tags.cluster_name }.internal: + ansible_host: ${ values.public_ip } + private_ip: ${ values.private_ip } + ansible_user: ${ values.operating_system.ssh_user } + ansible_ssh_private_key_file: ${ values.operating_system.ssh_private_key_file } +%{ endfor ~} + +# DBT2 servers: tags.type.dbt2_driver +%{ for key, values in { for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "dbt2_driver" && tonumber(try(values.tags.index, -1)) == 0 } ~} + dbt2_driver: + hosts: +%{ endfor ~} +%{ for key, values in { for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "dbt2_driver" } ~} + dbt2_driver${ 1 + tonumber(try(values.tags.index, -2)) }.${ values.tags.cluster_name }.internal: + ansible_host: ${ values.public_ip } + private_ip: ${ values.private_ip } + ansible_user: ${ values.operating_system.ssh_user } + ansible_ssh_private_key_file: ${ values.operating_system.ssh_private_key_file } +%{ endfor ~} + +# HammerDB servers: tags.type.hammerdb_server +%{ for key, values in { for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "hammerdb_server" && tonumber(try(values.tags.index, -1)) == 0 } ~} + hammerdbserver: + hosts: +%{ endfor ~} +%{ for key, values in { for key, values in servers.machines: key=>values if lower(try(values.tags.type, "")) == "hammerdb_server" } ~} + hammerdbserver${ 1 + tonumber(try(values.tags.index, -2)) }.${ values.tags.cluster_name }.internal: + ansible_host: ${ values.public_ip } + private_ip: ${ values.private_ip } + ansible_user: ${ values.operating_system.ssh_user } + ansible_ssh_private_key_file: ${ values.operating_system.ssh_private_key_file } +%{ endfor ~} diff --git a/edbterraform/data/terraform/aws/modules/machine/setup_volume.sh b/edbterraform/data/terraform/aws/modules/machine/setup_volume.sh index b8ca1474..596b9210 100755 --- a/edbterraform/data/terraform/aws/modules/machine/setup_volume.sh +++ b/edbterraform/data/terraform/aws/modules/machine/setup_volume.sh @@ -34,7 +34,9 @@ done # NVME device. for NVME_DEVICE in $(sudo ls /dev/nvme*n*); do EBS_DEVICE=$(sudo nvme id-ctrl -v ${NVME_DEVICE} | grep "0000:" | awk '{ print $18 }' | sed 's/["\.]//g') - if [ "$EBS_DEVICE" = "${TARGET_EBS_DEVICES[0]}" ]; then + + # /dev/ might be dropped at times so we need to check both cases + if [ "$EBS_DEVICE" = "${TARGET_EBS_DEVICES[0]}" ] || [ "/dev/$EBS_DEVICE" = "${TARGET_EBS_DEVICES[0]}" ]; then TARGET_NVME_DEVICE=${NVME_DEVICE} break fi diff --git a/edbterraform/data/terraform/aws/modules/specification/files.tf b/edbterraform/data/terraform/aws/modules/specification/files.tf index bda17b7e..8b2f28ad 100644 --- a/edbterraform/data/terraform/aws/modules/specification/files.tf +++ b/edbterraform/data/terraform/aws/modules/specification/files.tf @@ -7,6 +7,8 @@ locals { var.spec.ssh_key.public_path != null || var.spec.ssh_key.private_path != null ? 1 : 0 ) + private_ssh_path = "${abspath(path.root)}/${var.spec.ssh_key.output_name}" + public_ssh_path = "${abspath(path.root)}/${var.spec.ssh_key.output_name}.pub" } resource "tls_private_key" "default" { @@ -18,7 +20,7 @@ resource "tls_private_key" "default" { resource "local_sensitive_file" "default_private" { count = local.ssh_user_count - filename = "${abspath(path.root)}/${var.spec.ssh_key.output_name}" + filename = local.private_ssh_path file_permission = "0600" content = try(tls_private_key.default[0].private_key_openssh, "") } @@ -26,7 +28,7 @@ resource "local_sensitive_file" "default_private" { resource "local_file" "default_public" { count = local.ssh_user_count - filename = "${abspath(path.root)}/${var.spec.ssh_key.output_name}.pub" + filename = local.public_ssh_path file_permission = "0644" content = try(tls_private_key.default[0].public_key_openssh, "") } @@ -34,7 +36,7 @@ resource "local_file" "default_public" { resource "local_sensitive_file" "private_key" { count = local.ssh_keys_count - filename = "${abspath(path.root)}/${var.spec.ssh_key.output_name}" + filename = local.private_ssh_path file_permission = "0600" source = var.spec.ssh_key.private_path @@ -59,7 +61,7 @@ resource "local_sensitive_file" "private_key" { resource "local_file" "public_key" { count = local.ssh_keys_count - filename = "${abspath(path.root)}/${var.spec.ssh_key.output_name}.pub" + filename = local.public_ssh_path file_permission = "0644" source = var.spec.ssh_key.public_path diff --git a/edbterraform/data/terraform/aws/modules/specification/outputs.tf b/edbterraform/data/terraform/aws/modules/specification/outputs.tf index baf4c3e7..40de08a4 100644 --- a/edbterraform/data/terraform/aws/modules/specification/outputs.tf +++ b/edbterraform/data/terraform/aws/modules/specification/outputs.tf @@ -41,7 +41,8 @@ locals { Name = format("%s-%s-%s", (machine_spec.count > 1 ? "${name}-${index}" : name), local.cluster_name, random_id.apply.hex) }) # assign operating system from mapped names - operating_system = var.spec.images[machine_spec.image_name] + # add private and public key paths so they can be passed in the machine outputs + operating_system = merge(var.spec.images[machine_spec.image_name], { "ssh_private_key_file": local.private_ssh_path, "ssh_public_key_file": local.public_ssh_path }) # assign zone from mapped names zone = var.spec.regions[machine_spec.region].zones[machine_spec.zone_name].zone cidr = var.spec.regions[machine_spec.region].zones[machine_spec.zone_name].cidr diff --git a/edbterraform/data/terraform/aws/modules/specification/variables.tf b/edbterraform/data/terraform/aws/modules/specification/variables.tf index 8fe9f945..c7fd5ffe 100644 --- a/edbterraform/data/terraform/aws/modules/specification/variables.tf +++ b/edbterraform/data/terraform/aws/modules/specification/variables.tf @@ -162,7 +162,6 @@ variable "spec" { instance_type = string tags = optional(map(string), {}) })), {}) - templates = optional(list(string), []) }) } diff --git a/edbterraform/data/terraform/azure/modules/machine/setup_volume.sh b/edbterraform/data/terraform/azure/modules/machine/setup_volume.sh index b8ca1474..596b9210 100644 --- a/edbterraform/data/terraform/azure/modules/machine/setup_volume.sh +++ b/edbterraform/data/terraform/azure/modules/machine/setup_volume.sh @@ -34,7 +34,9 @@ done # NVME device. for NVME_DEVICE in $(sudo ls /dev/nvme*n*); do EBS_DEVICE=$(sudo nvme id-ctrl -v ${NVME_DEVICE} | grep "0000:" | awk '{ print $18 }' | sed 's/["\.]//g') - if [ "$EBS_DEVICE" = "${TARGET_EBS_DEVICES[0]}" ]; then + + # /dev/ might be dropped at times so we need to check both cases + if [ "$EBS_DEVICE" = "${TARGET_EBS_DEVICES[0]}" ] || [ "/dev/$EBS_DEVICE" = "${TARGET_EBS_DEVICES[0]}" ]; then TARGET_NVME_DEVICE=${NVME_DEVICE} break fi diff --git a/edbterraform/data/terraform/azure/modules/machine/variables.tf b/edbterraform/data/terraform/azure/modules/machine/variables.tf index 06cdb959..21266acf 100644 --- a/edbterraform/data/terraform/azure/modules/machine/variables.tf +++ b/edbterraform/data/terraform/azure/modules/machine/variables.tf @@ -10,6 +10,8 @@ variable "operating_system" { publisher = string version = string ssh_user = string + ssh_public_key_file = string + ssh_private_key_file = string }) } variable "machine" { diff --git a/edbterraform/data/terraform/azure/modules/specification/files.tf b/edbterraform/data/terraform/azure/modules/specification/files.tf index 8216d7e1..8b2f28ad 100644 --- a/edbterraform/data/terraform/azure/modules/specification/files.tf +++ b/edbterraform/data/terraform/azure/modules/specification/files.tf @@ -2,11 +2,13 @@ # If ssh_key.public_path and ssh_key.private_path are defined, # overwrite the default keys. locals { - ssh_user_count = var.spec.kubernetes != null || var.spec.images != null ? 1 : 0 + ssh_user_count = var.spec.images != null ? 1 : 0 ssh_keys_count = ( var.spec.ssh_key.public_path != null || var.spec.ssh_key.private_path != null ? 1 : 0 ) + private_ssh_path = "${abspath(path.root)}/${var.spec.ssh_key.output_name}" + public_ssh_path = "${abspath(path.root)}/${var.spec.ssh_key.output_name}.pub" } resource "tls_private_key" "default" { @@ -18,7 +20,7 @@ resource "tls_private_key" "default" { resource "local_sensitive_file" "default_private" { count = local.ssh_user_count - filename = "${abspath(path.root)}/${var.spec.ssh_key.output_name}" + filename = local.private_ssh_path file_permission = "0600" content = try(tls_private_key.default[0].private_key_openssh, "") } @@ -26,7 +28,7 @@ resource "local_sensitive_file" "default_private" { resource "local_file" "default_public" { count = local.ssh_user_count - filename = "${abspath(path.root)}/${var.spec.ssh_key.output_name}.pub" + filename = local.public_ssh_path file_permission = "0644" content = try(tls_private_key.default[0].public_key_openssh, "") } @@ -34,7 +36,7 @@ resource "local_file" "default_public" { resource "local_sensitive_file" "private_key" { count = local.ssh_keys_count - filename = "${abspath(path.root)}/${var.spec.ssh_key.output_name}" + filename = local.private_ssh_path file_permission = "0600" source = var.spec.ssh_key.private_path @@ -59,7 +61,7 @@ resource "local_sensitive_file" "private_key" { resource "local_file" "public_key" { count = local.ssh_keys_count - filename = "${abspath(path.root)}/${var.spec.ssh_key.output_name}.pub" + filename = local.public_ssh_path file_permission = "0644" source = var.spec.ssh_key.public_path diff --git a/edbterraform/data/terraform/azure/modules/specification/outputs.tf b/edbterraform/data/terraform/azure/modules/specification/outputs.tf index d2d6c0f0..5885243e 100644 --- a/edbterraform/data/terraform/azure/modules/specification/outputs.tf +++ b/edbterraform/data/terraform/azure/modules/specification/outputs.tf @@ -49,7 +49,9 @@ locals { name = machine_spec.count > 1 ? "${name}-${index}" : name }) # assign operating system from mapped names - operating_system = var.spec.images[machine_spec.image_name] + # add private and public key paths so they can be passed in the machine outputs + operating_system = merge(var.spec.images[machine_spec.image_name], { "ssh_private_key_file": local.private_ssh_path, "ssh_public_key_file": local.public_ssh_path }) + # assign zone from mapped names # Handle 0 as null to represent a region with no zones available zone = tostring(var.spec.regions[machine_spec.region].zones[machine_spec.zone_name].zone) == "0" ? null : var.spec.regions[machine_spec.region].zones[machine_spec.zone_name].zone diff --git a/edbterraform/data/terraform/azure/modules/specification/variables.tf b/edbterraform/data/terraform/azure/modules/specification/variables.tf index 6bbe6286..bc326d7c 100644 --- a/edbterraform/data/terraform/azure/modules/specification/variables.tf +++ b/edbterraform/data/terraform/azure/modules/specification/variables.tf @@ -152,7 +152,6 @@ variable "spec" { publisher_name = string tags = optional(map(string), {}) })), {}) - templates = optional(list(string), []) }) } diff --git a/edbterraform/data/terraform/gcloud/modules/machine/setup_volume.sh b/edbterraform/data/terraform/gcloud/modules/machine/setup_volume.sh index b8ca1474..596b9210 100644 --- a/edbterraform/data/terraform/gcloud/modules/machine/setup_volume.sh +++ b/edbterraform/data/terraform/gcloud/modules/machine/setup_volume.sh @@ -34,7 +34,9 @@ done # NVME device. for NVME_DEVICE in $(sudo ls /dev/nvme*n*); do EBS_DEVICE=$(sudo nvme id-ctrl -v ${NVME_DEVICE} | grep "0000:" | awk '{ print $18 }' | sed 's/["\.]//g') - if [ "$EBS_DEVICE" = "${TARGET_EBS_DEVICES[0]}" ]; then + + # /dev/ might be dropped at times so we need to check both cases + if [ "$EBS_DEVICE" = "${TARGET_EBS_DEVICES[0]}" ] || [ "/dev/$EBS_DEVICE" = "${TARGET_EBS_DEVICES[0]}" ]; then TARGET_NVME_DEVICE=${NVME_DEVICE} break fi diff --git a/edbterraform/data/terraform/gcloud/modules/specification/files.tf b/edbterraform/data/terraform/gcloud/modules/specification/files.tf index dbdabbe5..8b2f28ad 100644 --- a/edbterraform/data/terraform/gcloud/modules/specification/files.tf +++ b/edbterraform/data/terraform/gcloud/modules/specification/files.tf @@ -7,6 +7,8 @@ locals { var.spec.ssh_key.public_path != null || var.spec.ssh_key.private_path != null ? 1 : 0 ) + private_ssh_path = "${abspath(path.root)}/${var.spec.ssh_key.output_name}" + public_ssh_path = "${abspath(path.root)}/${var.spec.ssh_key.output_name}.pub" } resource "tls_private_key" "default" { @@ -18,23 +20,23 @@ resource "tls_private_key" "default" { resource "local_sensitive_file" "default_private" { count = local.ssh_user_count - filename = "${abspath(path.root)}/${var.spec.ssh_key.output_name}" + filename = local.private_ssh_path file_permission = "0600" - content = tls_private_key.default[0].private_key_openssh + content = try(tls_private_key.default[0].private_key_openssh, "") } resource "local_file" "default_public" { count = local.ssh_user_count - filename = "${abspath(path.root)}/${var.spec.ssh_key.output_name}.pub" + filename = local.public_ssh_path file_permission = "0644" - content = tls_private_key.default[0].public_key_openssh + content = try(tls_private_key.default[0].public_key_openssh, "") } resource "local_sensitive_file" "private_key" { count = local.ssh_keys_count - filename = "${abspath(path.root)}/${var.spec.ssh_key.output_name}" + filename = local.private_ssh_path file_permission = "0600" source = var.spec.ssh_key.private_path @@ -59,7 +61,7 @@ resource "local_sensitive_file" "private_key" { resource "local_file" "public_key" { count = local.ssh_keys_count - filename = "${abspath(path.root)}/${var.spec.ssh_key.output_name}.pub" + filename = local.public_ssh_path file_permission = "0644" source = var.spec.ssh_key.public_path diff --git a/edbterraform/data/terraform/gcloud/modules/specification/variables.tf b/edbterraform/data/terraform/gcloud/modules/specification/variables.tf index 618725de..6161f356 100644 --- a/edbterraform/data/terraform/gcloud/modules/specification/variables.tf +++ b/edbterraform/data/terraform/gcloud/modules/specification/variables.tf @@ -134,7 +134,6 @@ variable "spec" { instance_type = string tags = optional(map(string), {}) })), {}) - templates = optional(list(string), []) }) } diff --git a/edbterraform/lib.py b/edbterraform/lib.py index 246675f3..5c717d41 100644 --- a/edbterraform/lib.py +++ b/edbterraform/lib.py @@ -18,6 +18,7 @@ from edbterraform.utils.files import load_yaml_file from edbterraform.utils.logs import logger from edbterraform.CLI import TerraformCLI +from edbterraform import __dot_project__ def tpl(template_name, dest, csp, vars={}): # Renders and saves a jinja2 template based on a given template name and @@ -44,6 +45,37 @@ def tpl(template_name, dest, csp, vars={}): logger.error("ERROR: could not render template %s (%s)" % (template_name, e)) sys.exit(1) +def save_default_templates(templates_directory): + ''' + Save any predefined templates into the 'directory/templates' for consistent referencing + If the filename already exists, it should be skipped to avoid overriding user customizations. + ''' + # Templates are located in parent_directory/data/templates/user + script_dir = Path(__file__).parent.resolve() + predefined_templates = script_dir / 'data' / 'templates' / 'user' + templates_directory = Path(templates_directory) + logger.info(f'Copy templates from {predefined_templates} into {templates_directory}') + try: + if not templates_directory.exists(): + logger.info(f'Creating predefined template directory: {templates_directory}') + templates_directory.mkdir(parents=True, exist_ok=True) + + for template in predefined_templates.iterdir(): + if not template.is_file(): + logger.warning(f'Skipping {template} as it is not a file') + continue + + if not (templates_directory / template.name).exists(): + shutil.copy2(str(template), str(templates_directory)) + else: + logger.info(f''' + Skipping: {template} already exists in {templates_directory}. + To copy the latest pre-defined templates, erase any conflicting template file names. + ''') + except Exception as e: + logger.error("ERROR: cannot create template directory %s (%s)" % (templates_directory, e)) + sys.exit(1) + def create_project_dir(dir, csp): # Creates a new terraform project (directory) and copy terraform modules # into this directory. @@ -81,36 +113,40 @@ def save_terraform_vars(dir, filename, vars): logger.error("ERROR: could not write %s (%s)" % (dest, e)) sys.exit(1) -def save_user_templates(project_path: Path, template_files: List[str]) -> List[str]: +def save_user_templates(project_path: Path, templates: List[str]): ''' - Save any user templates into a template directory - for reuse during terraform execution and portability of directory - - Return a list of template/ + Save any user templates under project/templates + For reuse during terraform execution and portability of directory ''' - new_files = [] + logger.info(f'Saving user templates: {templates}') directory = "templates" basepath = project_path / directory + try: - for file in template_files: + if not basepath.exists(): + logger.info(f'Creating template directory: {basepath}') + basepath.mkdir(parents=True, exist_ok=True) - if not os.path.exists(file): - raise Exception("templates %s does not exist" % file) + for template in templates: + template = Path(template) - if not os.path.exists(basepath): - logger.info(f'Creating template directory: {basepath}') - basepath.mkdir(parents=True, exist_ok=True) + if not template.exists(): + raise Exception("templates %s does not exist" % template) + + if template.is_dir(): + for file in template.iterdir(): + logger.info(f'Copying {file} into {basepath}') + shutil.copy2(str(file), str(basepath)) + + if template.is_file(): + logger.info(f'Copying {template} into {basepath}') + shutil.copy2(str(template), str(basepath)) - full_path = basepath / os.path.basename(file) - logger.info(f'Copying file {file} into {full_path}') - final_path = shutil.copy(file, full_path) - new_files.append(f'{directory}/{os.path.basename(final_path)}') except Exception as e: - logger.error("Cannot create template %s (%s)" % (file, e)) + logger.error("Cannot create template (%s)" % (e)) logger.error("Current working directory: %s" % (Path.cwd())) - logger.error("List of templates: %s" % (template_files)) + logger.error("List of templates: %s" % (templates)) sys.exit(1) - return new_files def regions_to_peers(regions): # Build a list of peer regions, based on a given list of regions. @@ -195,7 +231,7 @@ def build_vars(csp: str, infra_vars: Path, server_output_name: str): return (terraform_vars, template_vars) -def generate_terraform(infra_file: Path, project_path: Path, csp: str, run_validation: bool, bin_path: Path) -> dict: +def generate_terraform(infra_file: Path, project_path: Path, csp: str, run_validation: bool, bin_path: Path, user_templates: List[Path]) -> dict: """ Generates the terraform files from jinja templates and terraform modules and saves the files into a project_directory for use with 'terraform' commands @@ -216,14 +252,23 @@ def generate_terraform(infra_file: Path, project_path: Path, csp: str, run_valid # Load infrastructure variables from the YAML file that was passed infra_vars = load_yaml_file(infra_file) + # Save default templates into dot directory + save_default_templates(f'{__dot_project__}/templates') + # Duplicate terraform code into target project directory create_project_dir(project_path, csp) # Allow for user supplied templates # Terraform does not allow us to copy a template and then reference it within the same run when using templatefile() # To get past this, we will need to copy over all the user passed templates into the project directory - # and update the template variable passed in by the user - infra_vars[csp]["templates"] = save_user_templates(project_path, infra_vars.get(csp,{}).get('templates',[])) + infra_file_templates = infra_vars.get(csp, {}).get('templates', []) + if not isinstance(infra_file_templates, list): + raise TypeError("Template variables should pass in a list of strings that represent a path or rely on the CLI passthrough") + # Remove templates from final terraform variables since save_user_templates will save them into project_name/templates/ + if infra_file_templates: + del infra_vars[csp]['templates'] + user_templates.extend(infra_file_templates) + save_user_templates(project_path, user_templates) # Transform variables extracted from the infrastructure file into # terraform and templates variables. diff --git a/edbterraform/utils/logs.py b/edbterraform/utils/logs.py index 8af92f83..b90173d1 100644 --- a/edbterraform/utils/logs.py +++ b/edbterraform/utils/logs.py @@ -2,13 +2,12 @@ from logging.handlers import RotatingFileHandler import os import sys -from pathlib import Path from datetime import datetime -from edbterraform import __project_name__ +from edbterraform import __project_name__, __dot_project__ logger = logging.getLogger(__project_name__) -def setup_logs(level='INFO', file_name=datetime.now().strftime('%Y-%m-%d'), directory=f'{Path.home()}/.{__project_name__}/logs', stdout=True): +def setup_logs(level='INFO', file_name=datetime.now().strftime('%Y-%m-%d'), directory=f'{__dot_project__}/logs', stdout=True): try: log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' date_format = '%Y-%m-%dT%H:%M:%S%z' diff --git a/infrastructure-examples/aws-ec2-v2.yml b/infrastructure-examples/aws-ec2-v2.yml index 9a7cb87c..b1cf5b0c 100644 --- a/infrastructure-examples/aws-ec2-v2.yml +++ b/infrastructure-examples/aws-ec2-v2.yml @@ -74,5 +74,3 @@ aws: encrypted: false tags: type: postgres - templates: - - ./edb-terraform/infrastructure-examples/v2_inventory.yml.tftpl diff --git a/infrastructure-examples/aws-edb-ra-3.yml b/infrastructure-examples/aws-edb-ra-3.yml new file mode 100644 index 00000000..fe4e9a9c --- /dev/null +++ b/infrastructure-examples/aws-edb-ra-3.yml @@ -0,0 +1,253 @@ +--- +aws: + tags: + created_by: Demo-Infra + cluster_name: edb-ra-3 + reference_architecture: edb-ra-3 + images: + rocky8: + name: Rocky-8-ec2-8.5-20211114.2.x86_64 + owner: "792107900819" + ssh_user: rocky + regions: + us-west-2: + cidr_block: 10.2.0.0/16 + zones: + default: + cidr: 10.2.1.0/24 + zone: us-west-2a + service_ports: + - port: 22 + protocol: tcp + description: SSH default + region_ports: + - protocol: icmp + description: regional ping + - port: 8443 + protocol: tcp + description: tcp + - port: 5432 + protocol: tcp + description: postgres-port + - port: 5444 + protocol: tcp + description: epas-port + - port: 7800 + to_port: 7810 + protocol: tcp + description: tcp + - port: 9999 + protocol: tcp + description: pgpool2 user connections + - port: 9898 + protocol: tcp + description: pgpool2 pcp + - port: 9898 + protocol: udp + description: pgpool2 pcp + - port: 9000 + protocol: tcp + description: pgpool2 watchdog + - port: 9000 + protocol: udp + description: pgpool2 watchdog + - port: 9694 + protocol: tcp + description: pgpool2 heartbeat + - port: 9694 + protocol: udp + description: pgpool2 heartbeat + - port: 6432 + protocol: tcp + description: pgbouncer + - port: 5442 + protocol: tcp + description: HARP + - port: 2379 + protocol: tcp + description: etcd client + - port: 2380 + protocol: tcp + description: etcd peer + - port: 30000 + protocol: tcp + description: dbt2 client + machines: + pem-server-0: + instance_type: c5.xlarge + volume: + type: gp2 + size: 100 + iops: 250 + size_gb: 100 + image_name: rocky8 + region: us-west-2 + zone_name: default + tags: + type: pem_server + pg_type: epas + index: 0 + postgres-server-0: + volume: + type: gp2 + size: 50 + iops: 250 + size_gb: 50 + additional_volumes: + - type: gp2 + size: 50 + iops: 250 + encrypted: false + mount_point: /pgdata + size_gb: 50 + - type: gp2 + size: 50 + iops: 250 + encrypted: false + mount_point: /pgwal + size_gb: 50 + instance_type: c5.2xlarge + image_name: rocky8 + region: us-west-2 + zone_name: default + tags: + reference_architecture: edb-ra-3 + type: postgres_server + pg_type: epas + index: 0 + postgres_group: postgres_server + pooler_type: pgpool2 + pooler_local: false + postgres-server-1: + volume: + type: gp2 + size: 50 + iops: 250 + size_gb: 50 + additional_volumes: + - type: gp2 + size: 50 + iops: 250 + encrypted: false + mount_point: /pgdata + size_gb: 50 + - type: gp2 + size: 50 + iops: 250 + encrypted: false + mount_point: /pgwal + size_gb: 50 + instance_type: c5.2xlarge + image_name: rocky8 + region: us-west-2 + zone_name: default + tags: + type: postgres_server + pg_type: epas + index: 1 + postgres_group: postgres_server + replication_type: synchronous + pooler_type: pgpool2 + pooler_local: false + postgres-server-2: + volume: + type: gp2 + size: 50 + iops: 250 + size_gb: 50 + additional_volumes: + - type: gp2 + size: 50 + iops: 250 + encrypted: false + mount_point: /pgdata + size_gb: 50 + - type: gp2 + size: 50 + iops: 250 + encrypted: false + mount_point: /pgwal + size_gb: 50 + instance_type: c5.2xlarge + image_name: rocky8 + region: us-west-2 + zone_name: default + tags: + type: postgres_server + pg_type: epas + index: 2 + postgres_group: postgres_server + replication_type: asynchronous + pooler_type: pgpool2 + pooler_local: false + barman-server-0: + instance_type: c5.2xlarge + volume: + type: gp2 + size: 50 + iops: 250 + size_gb: 50 + additional_volumes: + - count: 1 + type: gp2 + size: 300 + iops: 250 + encrypted: false + mount_point: /var/lib/barman + size_gb: 300 + image_name: rocky8 + region: us-west-2 + zone_name: default + tags: + type: barman_server + pg_type: epas + index: 0 + pooler-server-0: + instance_type: c5.xlarge + volume: + type: gp2 + size: 30 + iops: 250 + size_gb: 30 + image_name: rocky8 + region: us-west-2 + zone_name: default + tags: + reference_architecture: edb-ra-3 + type: pooler_server + pg_type: epas + index: 0 + pooler_type: pgpool2 + pooler_local: false + pooler-server-1: + instance_type: c5.xlarge + volume: + type: gp2 + size: 30 + iops: 250 + size_gb: 30 + image_name: rocky8 + region: us-west-2 + zone_name: default + tags: + type: pooler_server + pg_type: epas + index: 1 + pooler_type: pgpool2 + pooler_local: false + pooler-server-2: + instance_type: c5.xlarge + volume: + type: gp2 + size: 30 + iops: 250 + size_gb: 30 + image_name: rocky8 + region: us-west-2 + zone_name: default + tags: + type: pooler_server + pg_type: epas + index: 2 + pooler_type: pgpool2 + pooler_local: false diff --git a/infrastructure-examples/azure-vms-v2.yml b/infrastructure-examples/azure-vms-v2.yml index 4485508b..aa765491 100644 --- a/infrastructure-examples/azure-vms-v2.yml +++ b/infrastructure-examples/azure-vms-v2.yml @@ -79,5 +79,3 @@ azure: iops: 1000 tags: type: postgres - templates: - - ./edb-terraform/infrastructure-examples/v2_inventory.yml.tftpl diff --git a/infrastructure-examples/compute-engine-v2.yml b/infrastructure-examples/compute-engine-v2.yml index 8a2b7084..9b50d7af 100644 --- a/infrastructure-examples/compute-engine-v2.yml +++ b/infrastructure-examples/compute-engine-v2.yml @@ -67,5 +67,3 @@ gcloud: iops: null tags: type: postgres - templates: - - ./edb-terraform/infrastructure-examples/v2_inventory.yml.tftpl