diff --git a/README.md b/README.md index 455957c..e59637e 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,142 @@ -# terraform-kubernetes-django -Deploy Django with Terraform to Kubernetes with optional support for AWS/EKS or GCP/GKE + +# Terraform Module for a Django Application Infrastructure + +This Terraform module is designed to provision a robust, scalable infrastructure tailored for deploying Django applications. It goes beyond merely setting up a Kubernetes cluster by integrating with multiple cloud services to provide a comprehensive solution that includes networking, security, and cloud storage, ensuring your application is highly available, secure, and performant. + +## Key Features: + +- **Kubernetes Cluster Configuration**: Automated setup of a Kubernetes cluster using the best practices for Django applications, ensuring scalability and manageability. +- **Cloud Integration**: Seamless integration with AWS and Google Cloud Platform (GCP) for resource provisioning, including compute instances, storage buckets, and more, leveraging the strengths of each cloud provider to optimize your infrastructure. +- **Network Security**: Configuration of Cloudflare for advanced security features and protection against DDoS attacks, with automated DNS and CDN setups to enhance your application's security and performance. +- **Storage Solutions**: Setup of cloud storage options (AWS S3, Google Cloud Storage) for static and media files, ensuring fast and secure access to your application's assets. +- **Environment Configuration**: Customizable environment variables and secret management to securely deploy and manage your Django application's configuration. +- **Service Account Management**: Provisioning of service accounts with fine-grained permissions for secure access to cloud resources. + +This module provides a solid foundation for deploying Django applications, encapsulating best practices for cloud infrastructure and Kubernetes deployments. It is designed to be flexible, allowing customization to fit your project's specific needs while ensuring that the infrastructure's security and performance are not compromised. + +## Usage + +To use this module in your Terraform configuration, use the following syntax: + +```hcl +module "django_app_kubernetes" { + source = "path/to/this/module" + + // Variables + project_id = "" + cluster_name = "" + region = "" + + // Add other required variables +} +``` + +Replace the placeholders with actual values according to your project's requirements. + +## Examples + +Refer to the `examples/` directory for more examples. + +## Requirements + +| Name | Version | +|------|---------| +| terraform | >= 0.12 | +| [provider] | >= [version] | + +## Providers + +The following providers are used by this module: + +| Name | Version | Source | +|------|---------|--------| +| kubernetes | >= 2.4.0 | hashicorp/kubernetes | +| cloudflare | >= 2.0.0 | cloudflare/cloudflare | +| aws | >= 3.0.0 | hashicorp/aws | +| google | >= 3.0.0 | hashicorp/google | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| name | The name for deployment | string | "django" | no | +| namespace | Kubernetes namespace to use with this installation | string | n/a | no | +| create_namespace | Should we create the namespace or use existing provided? | string | true | no | +| extra_labels | Extra labels to add to generated objects | map(string) | {} | no | +| image_name | Docker image repository and name | string | n/a | no | +| image_tag | Docker image tag | string | n/a | no | +| image_pull_secrets | Image pull secrets | list(string) | [] | no | +| image_pull_policy | Pull policy for the images | string | "IfNotPresent" | no | +| env | A map of extra environment variables | map(string) | {} | no | +| secret_env | A map of extra secret environment variables | map(string) | {} | no | +| service_account_name | Name of the kubernetes service account if any | string | null | no | +| cloud_sa_name | Name of the GCP/AWS service account if any | string | null | no | +| gcp_sa_extra_roles | Create role bindings to these roles | list(string) | null | no | +| gcp_bucket_name | Create and use Google storage with this name | string | null | no | +| gcp_bucket_location | The location of the bucket, e.g. EU or US | string | n/a | no | +| public_storage | Make the storge GCP bucket/AWS S3 public and create a CNAME | bool | true | no | +| gcp_add_aws_s3_env | Add AWS_ variables for the GCS bucket | bool | false | no | +| gcp_db_instance | Create a database and a user for this installation and use them instead of DATABASE_URL | string | null | no | +| deployments | | map(object({ | { | no | +| readiness_probe | Readiness probe for containers which have ports | object({ | { | no | +| liveness_probe | Liveness probe for containers which have ports | object({ | { | no | +| ingress | A map of hostnames with maps of path-names and services | map(map(string)) | { | no | +| ingress_annotations | | map(string) | {} | no | +| cloudflare_enabled | Create cloudflare records if true | bool | true | no | +| postgres_enabled | Create a postgres database deployment | bool | false | no | +| postgres_storage_size | | string | "10Gi" | no | +| postgres_resources_requests_memory | | string | "256Mi" | no | +| postgres_resources_requests_cpu | | string | "250m" | no | +| postgres_resources_limits_memory | | string | null | no | +| postgres_resources_limits_cpu | | string | null | no | +| redis_enabled | Create a redis database deployment | bool | false | no | +| redis_resources_limits_memory | | string | null | no | +| redis_resources_limits_cpu | | string | null | no | +| redis_resources_requests_memory | | string | "128Mi" | no | +| redis_resources_requests_cpu | | string | "50m" | no | +| redis_db_index | | string | "1" | no | +| celery_enabled | A short-hand for adding celery-beat and celery-worker deployments | string | true | no | +| celery_db_index | | string | "2" | no | +| celery_beat_defaults | | string | { | no | +| celery_worker_defaults | | string | { | no | +| aws_s3_name | Create and use AWS S3 | string | null | no | +| volumes | Volume configuration | any | [] | no | +| security_context_enabled | | bool | false | no | +| security_context_gid | | number | 101 | no | +| security_context_uid | | number | 101 | no | +| security_context_fsgroup | | string | null | no | +| aws_region | AWS region | string | "" | no | +| cloudflare_api_token | Cloudflare API token | string | "12321321332145325-435325432543254325-4532542353254325-" | no | +| gcp_region | GCP region | string | "" | no | +| database_url | Database URL | string | "" | no | +| redis_url | Redis URL | string | "" | no | +| aws_secret | AWS secret | string | "" | no | +| aws_id | AWS id | string | "" | no | +| aws_s3_endpoint_url | AWS S3 endpoint URL | string | "" | no | +| gcp_access_id | GCP access id | string | "" | no | +| gcp_secret | GCP secret | string | "" | no | +| service_account_email | Service account email | string | "example@project.iam.gserviceaccount.com" | no | +| gcp_project_id | The GCP project ID | string | n/a | no | + + +## Outputs + +| Name | Description | +|------|-------------| +| database_url | | +| google_storage_hmac_key | | +| postgresql_url | | +| postgresql_username | | +| postgresql_password | | +| redis_url | | +| redis_password | | +| s3_endpoint_url | | +| s3_access_key | | +| s3_endpoint_url | | +| s3_access_key | | + + +## Authors + +Originally created by ahernper. + diff --git a/aws.tf b/aws.tf deleted file mode 100644 index 6016551..0000000 --- a/aws.tf +++ /dev/null @@ -1,7 +0,0 @@ -module "aws" { - count = var.aws_s3_name != null ? 1 : 0 - source = "./modules/aws" - aws_s3_name = var.aws_s3_name - aws_s3_public = var.public_storage - aws_sa_name = var.cloud_sa_name -} diff --git a/cloudflare.tf b/cloudflare.tf deleted file mode 100644 index d3a7f5b..0000000 --- a/cloudflare.tf +++ /dev/null @@ -1,20 +0,0 @@ -module "cloudflare" { - depends_on = [kubernetes_ingress_v1.ingress.0] - count = var.cloudflare_enabled && length(var.ingress) > 0 ? 1 : 0 - source = "./modules/cloudflare" - records = merge({ - for k, v in var.ingress : k => { - value = coalesce(kubernetes_ingress_v1.ingress.0.status.0.load_balancer.0.ingress.0.ip, - kubernetes_ingress_v1.ingress.0.status.0.load_balancer.0.ingress.0.hostname - ) - type = kubernetes_ingress_v1.ingress.0.status.0.load_balancer.0.ingress.0.ip != "" ? "A" : "CNAME" - proxied = false - } - }, var.gcp_bucket_name != null && var.public_storage == true ? { - (var.gcp_bucket_name) = { - value = "c.storage.googleapis.com" - type = "CNAME" - proxied = true - } - } : {}) -} \ No newline at end of file diff --git a/deployment.tf b/deployment.tf deleted file mode 100644 index 4ba537a..0000000 --- a/deployment.tf +++ /dev/null @@ -1,76 +0,0 @@ -module "deployment" { - # source = "../terraform-kubernetes-deployment" - source = "djangoflow/deployment/kubernetes" - version = ">=2.5.1" - for_each = local.deployments - depends_on = [kubernetes_secret_v1.secrets] - - pre_install_job_command = length(each.value.pre_install_command) > 0 ? each.value.pre_install_command : (each.value.pre_install_migrate == true ? [ - "python", "manage.py", "migrate" - ] : []) - service_account_name = var.service_account_name - object_prefix = "${var.name}-${each.key}" - replicas = each.value.replicas - command = each.value.command - arguments = each.value.args - image_name = var.image_name - image_tag = var.image_tag - image_pull_secrets = var.image_pull_secrets - pull_policy = var.image_pull_policy - namespace = var.namespace - volumes = var.volumes - readiness_probe_enabled = each.value.readiness_probe.enabled - readiness_probe_type = length(each.value.readiness_probe.command)> 0 ? "exec" : "http_get" - readiness_probe_path = var.readiness_probe.http_get.path - readiness_probe_port = var.readiness_probe.http_get.port - readiness_probe_scheme = var.readiness_probe.http_get.scheme - readiness_probe_initial_delay = var.readiness_probe.initial_delay_seconds - readiness_probe_timeout = var.readiness_probe.timeout_seconds - readiness_probe_failure = var.readiness_probe.failure_threshold - readiness_probe_success = var.readiness_probe.success_threshold - readiness_probe_command = each.value.readiness_probe.command - liveness_probe_enabled = each.value.liveness_probe.enabled - liveness_probe_type = length(each.value.liveness_probe.command)> 0 ? "exec" : "http_get" - liveness_probe_path = var.liveness_probe.http_get.path - liveness_probe_port = var.liveness_probe.http_get.port - liveness_probe_scheme = var.liveness_probe.http_get.scheme - liveness_probe_initial_delay = var.liveness_probe.initial_delay_seconds - liveness_probe_timeout = var.liveness_probe.timeout_seconds - liveness_probe_failure = var.liveness_probe.failure_threshold - liveness_probe_success = var.liveness_probe.success_threshold - liveness_probe_command = each.value.liveness_probe.command - startup_probe_enabled = false - security_context_enabled = var.security_context_enabled - security_context_gid = var.security_context_gid - security_context_uid = var.security_context_uid - security_context_fsgroup = var.security_context_fsgroup - env = each.value.env == null ? local.env : merge(local.env, each.value.env) - resources_limits_cpu = each.value.resources_limits_cpu - resources_limits_memory = each.value.resources_limits_memory - resources_requests_cpu = each.value.resources_requests_cpu - resources_requests_memory = each.value.resources_requests_memory - labels = merge(local.common_labels, { - "app.kubernetes.io/instance" = "${var.name}-${each.key}" - "app.kubernetes.io/version" = var.image_tag - }) - - env_secret = [ - for k, v in local.secret_env : { - secret = "${var.name}-secrets" - name = k - key = k - } - ] - node_selector = var.gcp_bucket_name != null && var.cloud_sa_name != null ? { - "iam.gke.io/gke-metadata-server-enabled" = "true" - } : {} - service_links = true - ports = each.value.port > 0 ? [ - { - name = "http" - protocol = "TCP" - container_port = each.value.port - service_port = "80" - } - ] : [] -} diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000..1c03e27 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,70 @@ + +# Django Application Kubernetes Deployment Example + +This example demonstrates the practical use of the Terraform module designed to deploy a Django application within a Kubernetes environment, fully leveraging cloud services for an optimized infrastructure setup. By following this example, users will learn how to configure a robust and scalable deployment that integrates seamlessly with AWS, Google Cloud, and Cloudflare, ensuring the application benefits from high availability, security, and performance. + +The deployment covers not only the creation of a Kubernetes cluster but also the setup of necessary cloud resources, networking configurations, and security measures to provide a comprehensive infrastructure solution for Django applications. + +## Features Demonstrated: + +- **Kubernetes Cluster Deployment**: Showcases how to set up and configure a Kubernetes cluster specifically for Django applications. +- **Cloud Services Integration**: Details the integration with AWS and Google Cloud for provisioning compute instances, storage solutions, and other essential resources. +- **Networking and Security with Cloudflare**: Illustrates how to configure Cloudflare for enhanced security features, including DDoS protection, and CDN services for improved application delivery. +- **Comprehensive Environment Setup**: Provides insights into configuring environment variables, managing secrets, and setting up service accounts with appropriate permissions for accessing cloud resources. + +This example serves as a blueprint for deploying Django applications with a focus on scalability, security, and performance, utilizing the best practices in cloud infrastructure management. + +## Usage + +To run this example, you need to execute: + +```bash +terraform init +terraform plan +terraform apply +``` + +Note: This example will create resources. Resources cost money. Run `terraform destroy` when you don't need these resources. + +## Requirements + +| Name | Version | +|------|---------| +| terraform | >= 0.12 | +| aws | >= 3.0.0 | +| google | >= 3.0.0 | +| kubernetes | >= 2.4.0 | +| cloudflare | >= 2.0.0 | + +## Providers + +| Name | Version | +|------|---------| +| aws | >= 3.0.0 | +| google | >= 3.0.0 | +| kubernetes | >= 2.4.0 | +| cloudflare | >= 2.0.0 | + +## Inputs + +No input. + +## Outputs + +| Name | Description | +|------|-------------| +| database_url | [Description] | +| google_storage_hmac_key | [Description] | +| postgresql_url | [Description] | +| postgresql_username | [Description] | +| postgresql_password | [Description] | +| redis_url | [Description] | +| redis_password | [Description] | +| s3_endpoint_url | [Description] | +| s3_access_key | [Description] | + + +## Notes + +- Ensure that your AWS and Google Cloud credentials are correctly configured. +- Modify the inputs according to your project's needs. diff --git a/examples/main.tf b/examples/main.tf new file mode 100644 index 0000000..2585022 --- /dev/null +++ b/examples/main.tf @@ -0,0 +1,105 @@ +module "django_app" { + source = "../" + + + # General configurations + name = "my-django-app" + namespace = "production" + create_namespace = true + extra_labels = { "project" = "my-django-app" } + image_name = "ghcr.io/your-username/django-app" + image_tag = "v1.0" + image_pull_secrets = ["dockerhub"] + env = { "DJANGO_SETTINGS_MODULE" = "myproject.settings.production" } + secret_env = { "DATABASE_URL" = "postgres://USER:PASSWORD@HOST:PORT/NAME" } + service_account_name = "django-app-service-account" + # GCP configurations + cloud_sa_name = "my-gcp-sa" + gcp_sa_extra_roles = ["roles/storage.objectViewer"] + gcp_bucket_name = "my-gcp-django-static" + gcp_project_id = "my-gcp-project-id" + public_storage = true + gcp_add_aws_s3_env = true + aws_s3_name = "my-aws-s3-bucket" + gcp_db_instance = null + # Deployments + deployments = { + "web" = { + pre_install_migrate = true + pre_install_command = ["python", "manage.py", "migrate"] + replicas = 2 + name = "web" + port = 8000 + resources_requests_cpu = "200m" + resources_requests_memory = "500Mi" + resources_limits_cpu = "500m" + resources_limits_memory = "1Gi" + liveness_probe = { + http_get = { + path = "/healthz/" + port = 8000 + scheme = "HTTP" + } + initial_delay_seconds = 30 + period_seconds = 10 + } + readiness_probe = { + http_get = { + path = "/ready/" + port = 8000 + scheme = "HTTP" + } + initial_delay_seconds = 15 + period_seconds = 5 + } + } + } + # Ingress + ingress = { + "myapp.example.com" = { + "/" = "web" + } + } + ingress_annotations = { + "kubernetes.io/ingress.class" = "nginx" + "nginx.ingress.kubernetes.io/ssl-redirect" = "true" + } + # Cloudflare + cloudflare_enabled = true + # Postgres + postgres_enabled = true + postgres_storage_size = "20Gi" + # Redis + redis_enabled = true + # Celery + celery_enabled = true + celery_db_index = "2" + # Security context + security_context_enabled = true + security_context_gid = 1000 + security_context_uid = 1000 + security_context_fsgroup = 1000 + # Service account email + service_account_email = "service-account@example.com" + + aws_region = "us-west-2" + gcp_bucket_location = "us-central1" + gcp_region = "us-central1" + image_pull_policy = "IfNotPresent" + database_url = "postgres://postgres:5432" + redis_url = "redis://redis:6379" + volumes = [ + { + name = "html" + type = "persistent_volume_claim" + object_name = "nginx" + readonly = false + mounts = [ + { + mount_path = "/usr/share/nginx/html" + } + ] + } + ] + +} diff --git a/examples/outputs.tf b/examples/outputs.tf new file mode 100644 index 0000000..9066256 --- /dev/null +++ b/examples/outputs.tf @@ -0,0 +1,43 @@ +output "database_url" { + value = coalesce( + module.gcp != [] ? module.gcp.0.database_url : null, + module.postgresql != [] ? module.postgresql.0.database_url : null, + ) +} + +output "google_storage_hmac_key" { + value = module.gcp.0.google_storage_hmac_key + sensitive = true +} + +output "postgresql_url" { + value = module.postgresql.0.database_url +} + +output "postgresql_username" { + value = module.postgresql.0.username +} + +output "postgresql_password" { + value = module.postgresql.0.password_secret + sensitive = true +} + +output "redis_url" { + value = module.redis.0.redis_url +} + +output "redis_password" { + value = module.redis.0.password_secret + sensitive = true +} + +output "s3_endpoint_url" { + value = module.aws.0.aws_s3_endpoint_url +} + +output "s3_access_key" { + value = module.aws.0.aws_iam_access_key + sensitive = true +} + diff --git a/gcp.tf b/gcp.tf deleted file mode 100644 index bc9258f..0000000 --- a/gcp.tf +++ /dev/null @@ -1,12 +0,0 @@ -module "gcp" { - count = var.gcp_bucket_name != null ? 1 : 0 - namespace = var.namespace - source = "./modules/gcp" - gcp_bucket_location = var.gcp_bucket_location - gcp_bucket_name = var.gcp_bucket_name - gcp_bucket_public = var.public_storage - gcp_db_instance = var.gcp_db_instance - gcp_sa_extra_roles = var.gcp_sa_extra_roles - gcp_sa_name = var.cloud_sa_name - service_account_name = var.service_account_name -} diff --git a/hpa.tf b/hpa.tf deleted file mode 100644 index 7120a53..0000000 --- a/hpa.tf +++ /dev/null @@ -1,19 +0,0 @@ -resource "kubernetes_horizontal_pod_autoscaler_v1" "hpa" { - for_each = {for k, v in local.deployments : "${var.name}-${k}" => v if v.hpa_max_replicas > 0} - depends_on = [kubernetes_namespace_v1.namespace, module.gcp] - metadata { - name = each.key - namespace = var.namespace - labels = local.common_labels - } - spec { - max_replicas = each.value.hpa_max_replicas - min_replicas = each.value.hpa_min_replicas - target_cpu_utilization_percentage = each.value.hpa_target_cpu - scale_target_ref { - kind = "Deployment" - name = each.key - api_version = "apps/v1" - } - } -} diff --git a/ingress.tf b/ingress.tf deleted file mode 100644 index 9326271..0000000 --- a/ingress.tf +++ /dev/null @@ -1,36 +0,0 @@ -resource "kubernetes_ingress_v1" "ingress" { - count = length(var.ingress) > 0 ? 1 : 0 - metadata { - name = var.name - namespace = var.namespace - annotations = local.ingress_annotations - } - spec { - tls { - hosts = keys(var.ingress) - secret_name = "${var.name}-tls-secret" - } - dynamic "rule" { - for_each = var.ingress - content { - host = rule.key - http { - dynamic "path" { - for_each = rule.value - content { - path = path.key - backend { - service { - name = "${var.name}-${path.value}" - port { - number = 80 - } - } - } - } - } - } - } - } - } -} diff --git a/locals.tf b/locals.tf index 29384d8..967c0f5 100644 --- a/locals.tf +++ b/locals.tf @@ -1,37 +1,29 @@ locals { - # deployments = var.celery_enabled == false ? var.deployments : merge(var.deployments, { - deployments = merge({ - "celery-beat" = var.celery_beat_defaults - }, { - "celery-worker" = var.celery_worker_defaults - }, var.deployments) - database_url = coalesce( lookup(var.secret_env, "DATABASE_URL", null), lookup(var.env, "DATABASE_URL", null), - module.gcp != [] ? module.gcp.0.database_url : null, - module.postgresql != [] ? module.postgresql.0.database_url : null, + var.database_url != [] ? var.database_url : null, ) redis_url = coalesce( lookup(var.secret_env, "REDIS_URL", null), lookup(var.env, "REDIS_URL", null), - module.redis != [] ? module.redis.0.redis_url : null, + var.redis_url != [] ? var.redis_url : null, ) - database_url_map = regex("^(?:(?P[^:/?#]+):)?(?://(?P[^/?#:]*):(?P[^/?#:]*)@(?P[^/?#:]*):(?P[0-9]*)/(?P.*))?", local.database_url) + database_url_map = regex("^(?:(?P[^:/?#]+):)?(?://(?P[^/?#:]*):(?P[^/?#:]*)@(?P[^/?#:]*):(?P[0-9]*)/(?P.*))?", var.database_url) gcp_env = var.gcp_add_aws_s3_env == false ? {} : { - AWS_ACCESS_KEY_ID : module.gcp.0.google_storage_hmac_key.access_id - AWS_SECRET_ACCESS_KEY : module.gcp.0.google_storage_hmac_key.secret + AWS_ACCESS_KEY_ID : var.gcp_access_id + AWS_SECRET_ACCESS_KEY : var.gcp_secret AWS_STORAGE_BUCKET_NAME : var.gcp_bucket_name AWS_S3_ENDPOINT_URL : "https://storage.googleapis.com" } aws_env = var.aws_s3_name == null ? {} : { - AWS_ACCESS_KEY_ID : module.aws.0.aws_iam_access_key.id - AWS_SECRET_ACCESS_KEY : module.aws.0.aws_iam_access_key.secret + AWS_ACCESS_KEY_ID : var.aws_id + AWS_SECRET_ACCESS_KEY : var.aws_secret AWS_STORAGE_BUCKET_NAME : var.aws_s3_name - AWS_S3_ENDPOINT_URL : module.aws.0.aws_s3_endpoint_url + AWS_S3_ENDPOINT_URL : var.aws_s3_endpoint_url } env = merge(local.gcp_env, local.aws_env, var.env) @@ -46,28 +38,4 @@ locals { "POSTGRES_DB" = lookup(local.database_url_map, "database") "POSTGRES_PORT" = lookup(local.database_url_map, "port") }, var.secret_env) - - common_labels = merge( - { - "app.kubernetes.io/part-of" : var.name - "app.kubernetes.io/managed-by" : "terraform" - }, - var.extra_labels, - ) - - ingress_annotations = merge({ - "kubernetes.io/ingress.class" = "nginx" - "nginx.ingress.kubernetes.io/tls-acme" = "true" - "nginx.ingress.kubernetes.io/ssl-redirect" = "true" - "nginx.ingress.kubernetes.io/proxy-body-size" = "30m" - "nginx.ingress.kubernetes.io/proxy-read-timeout" = "180s" - "nginx.ingress.kubernetes.io/proxy-write-timeout" = "180s" - "nginx.ingress.kubernetes.io/proxy-connect-timeout" = "180s" - "cert-manager.io/cluster-issuer" = "letsencrypt-prod" - "nginx.ingress.kubernetes.io/enable-cors" : "true" - "nginx.ingress.kubernetes.io/cors-allow-methods" : "DELETE, GET, OPTIONS, PATCH, POST, PUT" - "nginx.ingress.kubernetes.io/cors-allow-headers" : "accept, accept-encoding, accept-language, cache-control, authorization, content-type, dnt, origin, user-agent, x-csrftoken, x-requested-with", - # "nginx.ingress.kubernetes.io/cors-allow-origin" : "https://client.${local.domain},https://expert.${local.domain}, https://book.${local.domain}" - "kubernetes.io/tls-acme" : "true" - }, var.ingress_annotations) } diff --git a/main.tf b/main.tf index e9c8ff1..9c589a6 100644 --- a/main.tf +++ b/main.tf @@ -1,17 +1,225 @@ -# Nothing here, see instead: -# -# gcp.tf -# postgres.tf -# redis.tf -# cloudflare.tf - -terraform { - required_version = ">= 1.3.0" - - required_providers { - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.4.0" +module "aws" { + count = var.aws_s3_name != null ? 1 : 0 + source = "github.com/alexherp/terraform-aws-django?ref=stable" + aws_s3_name = var.aws_s3_name + aws_s3_public = var.public_storage + aws_sa_name = var.cloud_sa_name +} + +module "gcp" { + count = var.gcp_bucket_name != null ? 1 : 0 + namespace = var.namespace + source = "github.com/alexherp/terraform-gcp-django?ref=stable" + gcp_bucket_location = var.gcp_bucket_location + gcp_bucket_name = var.gcp_bucket_name + gcp_bucket_public = var.public_storage + gcp_db_instance = var.gcp_db_instance + gcp_sa_extra_roles = var.gcp_sa_extra_roles + gcp_sa_name = var.cloud_sa_name + gcp_project_id = var.gcp_project_id + service_account_name = var.service_account_name +} + +module "postgresql" { + count = var.postgres_enabled ? 1 : 0 + depends_on = [module.django] + source = "djangoflow/postgresql/kubernetes" + version = "1.1.2" + image_name = "docker.io/postgres" + security_context_uid = 999 + security_context_gid = 999 + image_tag = "13" + name = var.service_account_name + username = var.service_account_name + namespace = var.namespace + pvc_name = module.django.persistent_volume_claim_name + object_prefix = "postgres" + resources_limits_memory = var.postgres_resources_limits_memory + resources_limits_cpu = var.postgres_resources_limits_cpu + resources_requests_memory = var.postgres_resources_requests_memory + resources_requests_cpu = var.postgres_resources_requests_cpu +} + +module "redis" { + count = var.redis_enabled ? 1 : 0 + source = "djangoflow/redis/kubernetes" + namespace = var.namespace + object_prefix = "redis" + password_required = true + resources_limits_memory = var.redis_resources_limits_memory + resources_limits_cpu = var.redis_resources_limits_cpu + resources_requests_memory = var.redis_resources_requests_memory + resources_requests_cpu = var.redis_resources_requests_cpu +} + +module "cloudflare" { + depends_on = [module.django] + count = var.cloudflare_enabled && length(var.ingress) > 0 ? 1 : 0 + source = "github.com/alexherp/terraform-cloudflare-django?ref=stable" + records = merge({ + for k, v in var.ingress : k => { + value = coalesce(module.django.ingress.0.status.0.load_balancer.0.ingress.0.ip, + module.django.ingress.0.status.0.load_balancer.0.ingress.0.hostname + ) + type = module.django.ingress.0.status.0.load_balancer.0.ingress.0.ip != "" ? "A" : "CNAME" + proxied = false } - } + }, var.gcp_bucket_name != null && var.public_storage == true ? { + (var.gcp_bucket_name) = { + value = "c.storage.googleapis.com" + type = "CNAME" + proxied = true + } + } : {}) +} + +module "django" { + source = "github.com/alexherp/terraform-kubernetes-app-django?ref=v0.0.2" + + # General configurations + name = var.name + namespace = var.namespace + create_namespace = var.create_namespace + extra_labels = var.extra_labels + image_name = var.image_name + image_tag = var.image_tag + image_pull_secrets = var.image_pull_secrets + image_pull_policy = var.image_pull_policy + env = var.env + secret_env = var.secret_env + service_account_name = var.service_account_name + + # GCP configurations + cloud_sa_name = var.cloud_sa_name + gcp_sa_extra_roles = var.gcp_sa_extra_roles + gcp_bucket_name = var.gcp_bucket_name + gcp_bucket_location = var.gcp_bucket_location + public_storage = var.public_storage + gcp_add_aws_s3_env = var.gcp_add_aws_s3_env + gcp_db_instance = var.gcp_db_instance + + # Deployments + deployments = var.deployments + + # Ingress + ingress = var.ingress + ingress_annotations = var.ingress_annotations + + # Cloudflare + cloudflare_enabled = var.cloudflare_enabled + + # Postgres + postgres_enabled = var.postgres_enabled + postgres_storage_size = var.postgres_storage_size + + # Redis + redis_enabled = var.redis_enabled + + # Celery + celery_enabled = var.celery_enabled + celery_db_index = var.celery_db_index + + # AWS S3 + aws_s3_name = var.aws_s3_name + + # Volumes + volumes = var.volumes + + # Security context + security_context_enabled = var.security_context_enabled + security_context_gid = var.security_context_gid + security_context_uid = var.security_context_uid + security_context_fsgroup = var.security_context_fsgroup + + # Service account email + service_account_email = var.service_account_email } + + +/*module "django" { + source = "github.com/alexherp/terraform-kubernetes-app-django?ref=v0.0.2" + + # General configurations + name = "my-django-app" + namespace = "production" + create_namespace = true + extra_labels = { "project" = "my-django-app" } + image_name = "ghcr.io/your-username/django-app" + image_tag = "v1.0" + image_pull_secrets = ["dockerhub"] + image_pull_policy = "Always" + env = { "DJANGO_SETTINGS_MODULE" = "myproject.settings.production" } + secret_env = { "DATABASE_URL" = "postgres://USER:PASSWORD@HOST:PORT/NAME" } + service_account_name = "django-app-service-account" + # GCP configurations + cloud_sa_name = "my-gcp-sa" + gcp_sa_extra_roles = ["roles/storage.objectViewer"] + gcp_bucket_name = "my-gcp-django-static" + gcp_bucket_location = "EU" + public_storage = true + gcp_add_aws_s3_env = false + gcp_db_instance = null + # Deployments + deployments = { + "web" = { + pre_install_migrate = true + pre_install_command = ["python", "manage.py", "migrate"] + replicas = 2 + name = "web" + port = 8000 + resources_requests_cpu = "200m" + resources_requests_memory = "500Mi" + resources_limits_cpu = "500m" + resources_limits_memory = "1Gi" + liveness_probe = { + http_get = { + path = "/healthz/" + port = 8000 + scheme = "HTTP" + } + initial_delay_seconds = 30 + period_seconds = 10 + } + readiness_probe = { + http_get = { + path = "/ready/" + port = 8000 + scheme = "HTTP" + } + initial_delay_seconds = 15 + period_seconds = 5 + } + } + } + # Ingress + ingress = { + "myapp.example.com" = { + "/" = "web" + } + } + ingress_annotations = { + "kubernetes.io/ingress.class" = "nginx" + "nginx.ingress.kubernetes.io/ssl-redirect" = "true" + } + # Cloudflare + cloudflare_enabled = true + # Postgres + postgres_enabled = true + postgres_storage_size = "20Gi" + # Redis + redis_enabled = true + # Celery + celery_enabled = true + celery_db_index = "2" + # AWS S3 + aws_s3_name = var.aws_s3_name + # Volumes + volumes = var.volumes + # Security context + security_context_enabled = true + security_context_gid = 1000 + security_context_uid = 1000 + security_context_fsgroup = 1000 + # Service account email + service_account_email = "service-account@example.com" +}*/ diff --git a/modules/aws/iam.tf b/modules/aws/iam.tf deleted file mode 100644 index ecf713b..0000000 --- a/modules/aws/iam.tf +++ /dev/null @@ -1,21 +0,0 @@ -resource "aws_iam_user" "sa" { - name = var.aws_sa_name -} - -resource "aws_iam_access_key" "sa" { - user = aws_iam_user.sa.name -} - -data "aws_iam_policy_document" "default" { - statement { - actions = ["s3:*"] - resources = [aws_s3_bucket.media-bucket.arn, "${aws_s3_bucket.media-bucket.arn}/*"] - effect = "Allow" - } -} - -resource "aws_iam_user_policy" "storage" { - name = aws_iam_user.sa.name - user = aws_iam_user.sa.name - policy = join("", data.aws_iam_policy_document.default.*.json) -} diff --git a/modules/aws/main.tf b/modules/aws/main.tf deleted file mode 100644 index f2702bf..0000000 --- a/modules/aws/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - } -} diff --git a/modules/aws/outputs.tf b/modules/aws/outputs.tf deleted file mode 100644 index 04b5557..0000000 --- a/modules/aws/outputs.tf +++ /dev/null @@ -1,7 +0,0 @@ -output "aws_iam_access_key" { - value = aws_iam_access_key.sa -} - -output "aws_s3_endpoint_url" { - value = "https://s3.${aws_s3_bucket.media-bucket.region}.amazonaws.com" -} diff --git a/modules/aws/storage.tf b/modules/aws/storage.tf deleted file mode 100644 index 8e9580c..0000000 --- a/modules/aws/storage.tf +++ /dev/null @@ -1,24 +0,0 @@ -# TODO: support aws_s3_public -resource "aws_s3_bucket" "media-bucket" { - bucket = var.aws_s3_name -} - -resource "aws_s3_bucket_cors_configuration" "media-bucket" { - bucket = aws_s3_bucket.media-bucket.bucket - - cors_rule { - allowed_headers = [] - allowed_methods = ["GET"] - allowed_origins = ["*"] - expose_headers = ["Content-Type"] - } -} - -resource "aws_s3_bucket_website_configuration" "media-bucket" { - bucket = aws_s3_bucket.media-bucket.bucket - - index_document { - suffix = "index.html" - } -} - diff --git a/modules/aws/variables.tf b/modules/aws/variables.tf deleted file mode 100644 index 7aa081d..0000000 --- a/modules/aws/variables.tf +++ /dev/null @@ -1,11 +0,0 @@ -variable "aws_sa_name" { - type = string -} - -variable "aws_s3_name" { - type = string -} - -variable "aws_s3_public" { - default = true -} diff --git a/modules/cloudflare/main.tf b/modules/cloudflare/main.tf deleted file mode 100644 index 115d973..0000000 --- a/modules/cloudflare/main.tf +++ /dev/null @@ -1,29 +0,0 @@ -terraform { - required_providers { - cloudflare = { - source = "cloudflare/cloudflare" - version = "~> 3.0" - } - } -} - -data "cloudflare_zones" "zones" { - filter {} -} - -locals { - zones = {for zone in data.cloudflare_zones.zones.zones : zone.name => zone.id} -} - - -resource "cloudflare_record" "record" { - for_each = var.records - name = each.key - type = each.value.type - value = each.value.value - proxied = each.value.proxied - zone_id = lookup(local.zones, regex(".*?([^.]+\\.[^.]+)$", each.key)[0]) - lifecycle { - ignore_changes = [zone_id] - } -} diff --git a/modules/cloudflare/variables.tf b/modules/cloudflare/variables.tf deleted file mode 100644 index 6d15a46..0000000 --- a/modules/cloudflare/variables.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable "records" { - type = map(object({ - type = string - value = string - proxied = bool - })) - description = "List of records to create" - default = {} -} diff --git a/modules/gcp/database.tf b/modules/gcp/database.tf deleted file mode 100644 index 452671b..0000000 --- a/modules/gcp/database.tf +++ /dev/null @@ -1,23 +0,0 @@ -data "google_sql_database_instance" "db_instance" { - count = var.gcp_db_instance != null ? 1 : 0 - name = var.gcp_db_instance -} -resource "google_sql_database" "db" { - count = var.gcp_db_instance != null ? 1 : 0 - instance = var.gcp_db_instance - name = var.service_account_name -} - -resource "google_sql_user" "db_user" { - count = var.gcp_db_instance != null ? 1 : 0 - instance = var.gcp_db_instance - name = var.service_account_name - password = random_password.db_password.0.result -} - -resource "random_password" "db_password" { - count = var.gcp_db_instance != null ? 1 : 0 - length = 16 - special = false -} - diff --git a/modules/gcp/google_sa.tf b/modules/gcp/google_sa.tf deleted file mode 100644 index 180a729..0000000 --- a/modules/gcp/google_sa.tf +++ /dev/null @@ -1,35 +0,0 @@ -locals { - service_account_member = "serviceAccount:${google_service_account.sa.0.project}.svc.id.goog[${var.namespace}/${var.service_account_name}]" -} - -resource "google_service_account" "sa" { - count = var.gcp_sa_name != null ? 1 : 0 - account_id = var.gcp_sa_name - display_name = "Django Service Account ${var.gcp_sa_name}" -} - -resource "google_storage_hmac_key" "hmac_key" { - depends_on = [google_service_account.sa] - service_account_email = google_service_account.sa.0.email -} - -resource "google_service_account_iam_member" "role" { - service_account_id = google_service_account.sa.0.name - role = "roles/iam.workloadIdentityUser" - member = local.service_account_member -} - -resource "google_project_iam_member" "extra_roles" { - depends_on = [google_service_account.sa] - for_each = {for k, v in var.gcp_sa_extra_roles : k => v} - project = google_service_account.sa.0.project - role = each.value - member = "serviceAccount:${google_service_account.sa.0.email}" -} - -# DEPRECATED -# No longer required with workload identity -#resource "google_service_account_key" "sa_key" { -# depends_on = [google_service_account.sa] -# service_account_id = google_service_account.sa.0.name -#} diff --git a/modules/gcp/main.tf b/modules/gcp/main.tf deleted file mode 100644 index 514d214..0000000 --- a/modules/gcp/main.tf +++ /dev/null @@ -1,7 +0,0 @@ -terraform { - required_providers { - google = { - source = "hashicorp/google" - } - } -} diff --git a/modules/gcp/outputs.tf b/modules/gcp/outputs.tf deleted file mode 100644 index 56abc33..0000000 --- a/modules/gcp/outputs.tf +++ /dev/null @@ -1,17 +0,0 @@ -output "sa_email" { - value = google_service_account.sa.0.email -} - -output "database_url" { - value = var.gcp_db_instance == null ? null : "postgres://${var.service_account_name}:${random_password.db_password.0.result}@${data.google_sql_database_instance.db_instance.0.private_ip_address}:5432/${var.service_account_name}" -} - -output "google_storage_hmac_key" { - value = google_storage_hmac_key.hmac_key -} - -# DEPRECATED -# No longer required with workload identity -#output "sa_private_key" { -# value = google_service_account_key.sa_key.private_key -#} diff --git a/modules/gcp/storage.tf b/modules/gcp/storage.tf deleted file mode 100644 index a0a2963..0000000 --- a/modules/gcp/storage.tf +++ /dev/null @@ -1,44 +0,0 @@ - -resource "google_storage_bucket" "media-bucket" { - count = var.gcp_bucket_name != null ? 1 : 0 - name = var.gcp_bucket_name - location = var.gcp_bucket_location - storage_class = "MULTI_REGIONAL" - uniform_bucket_level_access = true - cors { - max_age_seconds = 3600 - method = [ - "GET", - ] - origin = [ - "*", - ] - response_header = [ - "Content-Type", - ] - } - - website { - main_page_suffix = "index.html" - } -} - -resource "google_storage_bucket_iam_binding" "media-bucket-binding-admin" { - count = var.gcp_bucket_name != null && var.gcp_sa_name != null ? 1 : 0 - depends_on = [google_service_account.sa.0] - bucket = google_storage_bucket.media-bucket.0.name - role = "roles/storage.objectAdmin" - members = [ - "serviceAccount:${google_service_account.sa.0.email}" - ] -} - -resource "google_storage_bucket_iam_binding" "media-bucket-binding-public" { - count = var.gcp_bucket_name != null && var.gcp_sa_name != null && var.gcp_bucket_public == true ? 1 : 0 - depends_on = [google_service_account.sa.0] - bucket = google_storage_bucket.media-bucket.0.name - role = "roles/storage.objectViewer" - members = [ - "allUsers" - ] -} diff --git a/modules/gcp/variables.tf b/modules/gcp/variables.tf deleted file mode 100644 index 8c79d05..0000000 --- a/modules/gcp/variables.tf +++ /dev/null @@ -1,37 +0,0 @@ -variable "namespace" { - type = string -} - -variable "gcp_sa_name" { - type = string - default = null -} - -variable "gcp_bucket_name" { - type = string - default = null -} - -variable "service_account_name" { - type = string - default = null -} - -variable "gcp_db_instance" { - type = string - default = null - description = "Create a database and a user for this installation and use them instead of DATABASE_URL" -} - -variable "gcp_sa_extra_roles" { - type = list(string) - default = [] -} - -variable "gcp_bucket_public" { - default = true -} - -variable "gcp_bucket_location" { - type = string -} diff --git a/namespace.tf b/namespace.tf deleted file mode 100644 index d7b5251..0000000 --- a/namespace.tf +++ /dev/null @@ -1,6 +0,0 @@ -resource "kubernetes_namespace_v1" "namespace" { - count = var.create_namespace ? 1 : 0 - metadata { - name = var.namespace - } -} diff --git a/outputs.tf b/outputs.tf index 9d762a1..76f0b84 100644 --- a/outputs.tf +++ b/outputs.tf @@ -4,3 +4,49 @@ output "database_url" { module.postgresql != [] ? module.postgresql.0.database_url : null, ) } + +output "google_storage_hmac_key" { + value = module.gcp.0.google_storage_hmac_key + sensitive = true +} + +output "postgresql_url" { + value = module.postgresql.0.database_url +} + +output "postgresql_username" { + value = module.postgresql.0.username +} + +output "postgresql_password" { + value = module.postgresql.0.password_secret + sensitive = true +} + +output "redis_url" { + value = module.redis.0.redis_url +} + +output "redis_password" { + value = module.redis.0.password_secret + sensitive = true +} + +output "s3_endpoint_url" { + value = module.aws.0.aws_s3_endpoint_url +} + +output "s3_access_key" { + value = module.aws.0.aws_iam_access_key + sensitive = true +} + +output "s3_endpoint_url" { + value = module.aws.0.aws_s3_endpoint_url +} + +output "s3_access_key" { + value = module.aws.0.aws_iam_access_key + sensitive = true +} + diff --git a/pdb.tf b/pdb.tf deleted file mode 100644 index 6277f22..0000000 --- a/pdb.tf +++ /dev/null @@ -1,18 +0,0 @@ -resource "kubernetes_pod_disruption_budget_v1" "pdb" { - for_each = {for k, v in local.deployments : "${var.name}-${k}" => v if v.pdb_min_available > 0} - depends_on = [kubernetes_namespace_v1.namespace, module.gcp] - metadata { - name = each.key - namespace = var.namespace - labels = local.common_labels - } - spec { - min_available = each.value.pdb_min_available - selector { - match_labels = merge(local.common_labels, { - "app.kubernetes.io/name" = each.key - "app.kubernetes.io/instance" = var.image_tag - }) - } - } -} diff --git a/postgres.tf b/postgres.tf deleted file mode 100644 index e21ae8f..0000000 --- a/postgres.tf +++ /dev/null @@ -1,38 +0,0 @@ -resource "kubernetes_persistent_volume_claim_v1" "pgdata" { - count = var.postgres_enabled ? 1 : 0 - metadata { - name = "postgres-data" - namespace = var.namespace - labels = local.common_labels - } - wait_until_bound = false - spec { - access_modes = ["ReadWriteOnce"] - resources { - requests = { - storage = var.postgres_storage_size - } - } - } -} - - -module "postgresql" { - count = var.postgres_enabled ? 1 : 0 - depends_on = [kubernetes_persistent_volume_claim_v1.pgdata] - source = "djangoflow/postgresql/kubernetes" - version = "1.1.2" - image_name = "docker.io/postgres" - security_context_uid = 999 - security_context_gid = 999 - image_tag = "13" - name = var.service_account_name - username = var.service_account_name - namespace = var.namespace - pvc_name = kubernetes_persistent_volume_claim_v1.pgdata.0.metadata.0.name - object_prefix = "postgres" - resources_limits_memory = var.postgres_resources_limits_memory - resources_limits_cpu = var.postgres_resources_limits_cpu - resources_requests_memory = var.postgres_resources_requests_memory - resources_requests_cpu = var.postgres_resources_requests_cpu -} diff --git a/redis.tf b/redis.tf deleted file mode 100644 index 750ad61..0000000 --- a/redis.tf +++ /dev/null @@ -1,11 +0,0 @@ -module "redis" { - count = var.redis_enabled ? 1 : 0 - source = "djangoflow/redis/kubernetes" - namespace = var.namespace - object_prefix = "redis" - password_required = true - resources_limits_memory = var.redis_resources_limits_memory - resources_limits_cpu = var.redis_resources_limits_cpu - resources_requests_memory = var.redis_resources_requests_memory - resources_requests_cpu = var.redis_resources_requests_cpu -} diff --git a/secrets.tf b/secrets.tf deleted file mode 100644 index b025e04..0000000 --- a/secrets.tf +++ /dev/null @@ -1,8 +0,0 @@ -resource "kubernetes_secret_v1" "secrets" { - depends_on = [kubernetes_namespace_v1.namespace] - metadata { - name = "${var.name}-secrets" - namespace = var.namespace - } - data = local.secret_env -} diff --git a/service_account.tf b/service_account.tf deleted file mode 100644 index b031deb..0000000 --- a/service_account.tf +++ /dev/null @@ -1,13 +0,0 @@ -resource "kubernetes_service_account_v1" "service_account" { - depends_on = [kubernetes_namespace_v1.namespace, module.gcp] - count = var.service_account_name != null ? 1 : 0 - metadata { - name = var.service_account_name - namespace = var.namespace - labels = local.common_labels - annotations = { - "iam.gke.io/gcp-service-account" = length(module.gcp) > 0 ? module.gcp.0.sa_email : null - } - } - automount_service_account_token = false -} diff --git a/variables.tf b/variables.tf index 1c2ecc0..41159a3 100644 --- a/variables.tf +++ b/variables.tf @@ -52,6 +52,7 @@ variable "env" { variable "secret_env" { type = map(string) description = "A map of extra secret environment variables" + default = {} } variable "service_account_name" { @@ -222,12 +223,11 @@ variable "liveness_probe" { variable "ingress" { type = map(map(string)) description = "A map of hostnames with maps of path-names and services" - # Example: - # default = { - # "api.demo.djangoflow.com": { - # "/": "api" - # } - # } + default = { + "api.demo.djangoflow.com" : { + "/" : "api" + } + } } variable "ingress_annotations" { @@ -384,13 +384,86 @@ variable "security_context_enabled" { } variable "security_context_gid" { + type = number default = 101 } variable "security_context_uid" { + type = number default = 101 } variable "security_context_fsgroup" { default = null } + +variable "aws_region" { + type = string + description = "AWS region" + default = "" +} + +variable "cloudflare_api_token" { + type = string + description = "Cloudflare API token" + default = "12321321332145325-435325432543254325-4532542353254325-" +} + +variable "gcp_region" { + type = string + description = "GCP region" + default = "" +} + +variable "database_url" { + type = string + description = "Database URL" + default = "" +} + +variable "redis_url" { + type = string + description = "Redis URL" + default = "" +} + +variable "aws_secret" { + type = string + description = "AWS secret" + default = "" +} + +variable "aws_id" { + type = string + description = "AWS id" + default = "" +} + +variable "aws_s3_endpoint_url" { + type = string + description = "AWS S3 endpoint URL" + default = "" +} + +variable "gcp_access_id" { + type = string + description = "GCP access id" + default = "" +} + +variable "gcp_secret" { + type = string + description = "GCP secret" + default = "" +} + +variable "service_account_email" { + type = string + description = "Service account email" + default = "example@project.iam.gserviceaccount.com" +} + +variable "gcp_project_id" { + type = string + description = "The GCP project ID" +} diff --git a/versions.tf b/versions.tf new file mode 100644 index 0000000..e629d71 --- /dev/null +++ b/versions.tf @@ -0,0 +1,38 @@ +terraform { + backend "local" { + path = "terraform.tfstate" + } + + required_version = ">= 1.3.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.4.0" + } + cloudflare = { + source = "cloudflare/cloudflare" + version = ">= 2.0.0" + } + aws = { + source = "hashicorp/aws" + version = ">= 3.0.0" + } + google = { + source = "hashicorp/google" + version = ">= 3.0.0" + } + } +} + +provider "aws" { + region = var.aws_region +} + +provider "cloudflare" { + api_token = var.cloudflare_api_token +} + +provider "google" { + region = var.gcp_region +}