Skip to content

Commit

Permalink
Merge branch 'master' into feature/sysctl_tcp_keepalice_config
Browse files Browse the repository at this point in the history
  • Loading branch information
patduin authored Jun 28, 2024
2 parents 22bae59 + f695b43 commit c8440c7
Show file tree
Hide file tree
Showing 22 changed files with 450 additions and 59 deletions.
53 changes: 52 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,61 @@ All notable changes to this project will be documented in this file.

The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).

## [7.0.1] - 2024-01-18

## [7.2.1] - 2024-06-28
### Added
- Issue where requests can hit 10min connection timeout, TCP keepalive prevents NLB closing idle connections. Similar to the issue explained here: https://paramount.tech/blog/2021/07/26/mitigation-of-connection-reset-in-aws.html

## [7.2.0] - 2024-06-26
### Added
- Added `hms_ro_datanucleus_connection_pooling_type`, `hms_rw_datanucleus_connection_pooling_type`, `hms_ro_datanucleus_connection_pool_config`, `hms_rw_datanucleus_connection_pool_config`, `hms_housekeeper_db_connection_pool_size` variables to allow specifying the pooling driver and its config

## [7.1.9] - 2024-06-20
### Fixed
- Housekeeper deployment should not use common `HADOOP_HEAPSIZE` variable since it is a low memory container.

## [7.1.8] - 2024-06-19
### Added
- `hms_housekeeper_additional_environment_variables` variable to provide ability to add a list of environment variables in `hms-housekeeper` deployment.

## [7.1.7] - 2024-06-04
### Fixed
- Fixed k8s IRSA.
- Changed k8s service account creation to compatible with newer version kubernetes provider.(eks 1.24 and later, create service account no longer create account token automatically)

## [7.1.6] - 2024-05-31
### Added
- Add `apiary_domain_private_zone` to provide option to use private or public zone.

## [7.1.5] - 2024-05-22
### Fixed
- Add `copy_tags_to_snapshot` to aws_rds_cluster.

## [7.1.4] - 2024-05-06
### Fixed
- Change provider version for `kubernetes`.

## [7.1.3] - 2024-04-03
### Fixed
- Add tags to ecs services

## [7.1.2] - 2024-04-03
### Fixed
- Added provider source for `datadog`.

## [7.1.1] - 2024-04-03
### Fixed
- Renamed variable from `common_producer_iamroles` to `apiary_common_producer_iamroles` to make the name consistent.
- Change default value for `datadog_key_secret_name` from `null` to `""`.

## [7.1.0] - 2024-03-21
### Added
- Added `common_producer_iamroles` to allow roles read-write access to all Apiary managed schemas.

## [7.0.1] - 2024-01-22
### Added
- Added `datadog-agent` for HMS-Readonly and HMS-Readwrite in ECS.

## [7.0.0] - 2023-11-16
### Changed
- Changed `k8s` API to work with provider 2.x
Expand Down
36 changes: 29 additions & 7 deletions VARIABLES.md

Large diffs are not rendered by default.

6 changes: 6 additions & 0 deletions cloudwatch.tf
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@
* Licensed under the Apache License, Version 2.0 (the "License");
*/

resource "aws_cloudwatch_log_group" "ecs" {
count = var.hms_instance_type == "ecs" ? 1 : 0
name = local.instance_alias
tags = var.apiary_tags
}

data "template_file" "s3_widgets" {
count = length(local.schemas_info)

Expand Down
28 changes: 25 additions & 3 deletions common.tf
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ locals {
apiary_bucket_prefix = "${local.instance_alias}-${data.aws_caller_identity.current.account_id}-${data.aws_region.current.name}"
apiary_assume_role_bucket_prefix = [for assumerole in var.apiary_assume_roles : "${local.instance_alias}-${data.aws_caller_identity.current.account_id}-${lookup(assumerole, "allow_cross_region_access", false) ? "*" : data.aws_region.current.name}"]
enable_route53_records = var.apiary_domain_name == "" ? false : true

datadog_tags = join(" ", formatlist("%s:%s", keys(var.apiary_tags), values(var.apiary_tags)))
#
# Create a new list of maps with some extra attributes needed later
#
Expand Down Expand Up @@ -74,7 +76,27 @@ data "aws_vpc" "apiary_vpc" {
}

data "aws_route53_zone" "apiary_zone" {
count = local.enable_route53_records ? 1 : 0
name = var.apiary_domain_name
vpc_id = var.vpc_id
count = local.enable_route53_records ? 1 : 0
name = var.apiary_domain_name
private_zone = var.apiary_domain_private_zone
}

data "aws_secretsmanager_secret" "datadog_key" {
count = length(var.datadog_key_secret_name) > 0 ? 1 : 0
name = var.datadog_key_secret_name
}

data "aws_secretsmanager_secret_version" "datadog_key" {
count = length(var.datadog_key_secret_name) > 0 ? 1 : 0
secret_id = data.aws_secretsmanager_secret.datadog_key[0].id
}

data "external" "datadog_key" {
count = length(var.datadog_key_secret_name) > 0 ? 1 : 0
program = ["echo", "${data.aws_secretsmanager_secret_version.datadog_key[0].secret_string}"]
}

provider "datadog" {
api_key = chomp(data.external.datadog_key[0].result["api_key"])
app_key = chomp(data.external.datadog_key[0].result["app_key"])
}
1 change: 1 addition & 0 deletions db.tf
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ resource "aws_rds_cluster" "apiary_cluster" {
apply_immediately = var.db_apply_immediately
db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.apiary_rds_param_group.name
storage_encrypted = var.encrypt_db
copy_tags_to_snapshot = var.db_copy_tags_to_snapshot
lifecycle {
create_before_destroy = true
}
Expand Down
4 changes: 4 additions & 0 deletions ecs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ resource "aws_ecs_service" "apiary_hms_readwrite_service" {
service_registries {
registry_arn = aws_service_discovery_service.hms_readwrite[0].arn
}

tags = var.apiary_tags
}

resource "aws_ecs_service" "apiary_hms_readonly_service" {
Expand All @@ -90,4 +92,6 @@ resource "aws_ecs_service" "apiary_hms_readonly_service" {
service_registries {
registry_arn = aws_service_discovery_service.hms_readonly[0].arn
}

tags = var.apiary_tags
}
9 changes: 6 additions & 3 deletions iam.tf
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,8 @@ resource "aws_iam_role" "apiary_hms_readonly" {
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"${var.oidc_provider}:sub": "system:serviceaccount:${var.metastore_namespace}:${local.hms_alias}-readonly"
"${var.oidc_provider}:sub": "system:serviceaccount:${var.metastore_namespace}:${local.hms_alias}-readonly",
"${var.oidc_provider}:aud": "sts.amazonaws.com"
}
}
},
Expand Down Expand Up @@ -109,7 +110,8 @@ resource "aws_iam_role" "apiary_hms_readwrite" {
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"${var.oidc_provider}:sub": "system:serviceaccount:${var.metastore_namespace}:${local.hms_alias}-readwrite"
"${var.oidc_provider}:sub": "system:serviceaccount:${var.metastore_namespace}:${local.hms_alias}-readwrite",
"${var.oidc_provider}:aud": "sts.amazonaws.com"
}
}
},
Expand Down Expand Up @@ -159,7 +161,8 @@ resource "aws_iam_role" "apiary_s3_inventory" {
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"${var.oidc_provider}:sub": "system:serviceaccount:${var.metastore_namespace}:${local.instance_alias}-s3-inventory"
"${var.oidc_provider}:sub": "system:serviceaccount:${var.metastore_namespace}:${local.instance_alias}-s3-inventory",
"${var.oidc_provider}:aud": "sts.amazonaws.com"
}
}
},
Expand Down
4 changes: 2 additions & 2 deletions k8s-cronjobs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,12 @@ resource "kubernetes_cron_job" "apiary_inventory" {
name = "${local.instance_alias}-s3-inventory"
}
annotations = {
"iam.amazonaws.com/role" = aws_iam_role.apiary_s3_inventory.name
"iam.amazonaws.com/role" = var.oidc_provider == "" ? aws_iam_role.apiary_s3_inventory.name : null
}
}

spec {
service_account_name = kubernetes_service_account.s3_inventory[0].metadata.0.name
service_account_name = kubernetes_service_account_v1.s3_inventory[0].metadata.0.name
automount_service_account_token = true
container {
image = "${var.hms_docker_image}:${var.hms_docker_version}"
Expand Down
25 changes: 22 additions & 3 deletions k8s-housekeeper.tf
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,15 @@ resource "kubernetes_deployment_v1" "apiary_hms_housekeeper" {
"ad.datadoghq.com/${local.hms_alias}-housekeeper.check_names" = var.datadog_metrics_enabled ? "[\"prometheus\"]" : null
"ad.datadoghq.com/${local.hms_alias}-housekeeper.init_configs" = var.datadog_metrics_enabled ? "[{}]" : null
"ad.datadoghq.com/${local.hms_alias}-housekeeper.instances" = var.datadog_metrics_enabled ? "[{ \"prometheus_url\": \"http://%%host%%:${var.datadog_metrics_port}/actuator/prometheus\", \"namespace\": \"hms_readwrite\", \"metrics\": [ \"${join("\",\"", var.datadog_metrics_hms_readwrite_readonly)}\" ] , \"type_overrides\": { \"${join("\": \"gauge\",\"", var.datadog_metrics_hms_readwrite_readonly)}\": \"gauge\"} }]" : null
"iam.amazonaws.com/role" = aws_iam_role.apiary_hms_readwrite.name
"iam.amazonaws.com/role" = var.oidc_provider == "" ? aws_iam_role.apiary_hms_readwrite.name : null
"prometheus.io/path" = "/metrics"
"prometheus.io/port" = "8080"
"prometheus.io/scrape" = "true"
}
}

spec {
service_account_name = kubernetes_service_account.hms_readwrite[0].metadata.0.name
service_account_name = kubernetes_service_account_v1.hms_readwrite[0].metadata.0.name
automount_service_account_token = true
dynamic "init_container" {
for_each = var.external_database_host == "" ? ["enabled"] : []
Expand Down Expand Up @@ -111,7 +111,7 @@ resource "kubernetes_deployment_v1" "apiary_hms_housekeeper" {
}
env {
name = "HADOOP_HEAPSIZE"
value = local.hms_rw_heapsize
value = "1740"
}
env {
name = "AWS_REGION"
Expand All @@ -134,6 +134,25 @@ resource "kubernetes_deployment_v1" "apiary_hms_housekeeper" {
value = var.enable_hms_housekeeper ? "true" : ""
}

env {
name = "DATANUCLEUS_CONNECTION_POOLING_TYPE"
value = var.hms_rw_datanucleus_connection_pooling_type
}

env {
name = "DATANUCLEUS_CONNECTION_POOL_MAX_POOLSIZE"
value = var.hms_housekeeper_db_connection_pool_size
}

dynamic "env" {
for_each = var.hms_housekeeper_additional_environment_variables

content {
name = env.key
value = env.value
}
}

liveness_probe {
tcp_socket {
port = var.hive_metastore_port
Expand Down
26 changes: 20 additions & 6 deletions k8s-readonly.tf
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,15 @@ resource "kubernetes_deployment_v1" "apiary_hms_readonly" {
"ad.datadoghq.com/${local.hms_alias}-readonly.check_names" = var.datadog_metrics_enabled ? "[\"prometheus\"]" : null
"ad.datadoghq.com/${local.hms_alias}-readonly.init_configs" = var.datadog_metrics_enabled ? "[{}]" : null
"ad.datadoghq.com/${local.hms_alias}-readonly.instances" = var.datadog_metrics_enabled ? "[{ \"prometheus_url\": \"http://%%host%%:${var.datadog_metrics_port}/actuator/prometheus\", \"namespace\": \"hms_readonly\", \"metrics\": [ \"${join("\",\"", var.datadog_metrics_hms_readwrite_readonly)}\" ] , \"type_overrides\": { \"${join("\": \"gauge\",\"", var.datadog_metrics_hms_readwrite_readonly)}\": \"gauge\"} }]" : null
"iam.amazonaws.com/role" = aws_iam_role.apiary_hms_readonly.name
"iam.amazonaws.com/role" = var.oidc_provider == "" ? aws_iam_role.apiary_hms_readonly.name : null
"prometheus.io/path" = "/metrics"
"prometheus.io/port" = "8080"
"prometheus.io/scrape" = "true"
}
}

spec {
service_account_name = kubernetes_service_account.hms_readonly[0].metadata.0.name
service_account_name = kubernetes_service_account_v1.hms_readonly[0].metadata.0.name
automount_service_account_token = true
dynamic "security_context" {
for_each = var.enable_sysctl_config_in_eks ? ["enabled"] : []
Expand Down Expand Up @@ -191,10 +191,6 @@ resource "kubernetes_deployment_v1" "apiary_hms_readonly" {
name = "HMS_MAX_THREADS"
value = local.hms_ro_maxthreads
}
env {
name = "MYSQL_CONNECTION_POOL_SIZE"
value = var.hms_ro_db_connection_pool_size
}
env {
name = "HMS_AUTOGATHER_STATS"
value = "false"
Expand All @@ -203,6 +199,15 @@ resource "kubernetes_deployment_v1" "apiary_hms_readonly" {
name = "LIMIT_PARTITION_REQUEST_NUMBER"
value = var.hms_ro_request_partition_limit == "" ? "" : var.hms_ro_request_partition_limit
}
env {
name = "DATANUCLEUS_CONNECTION_POOLING_TYPE"
value = var.hms_ro_datanucleus_connection_pooling_type
}
env {
name = "DATANUCLEUS_CONNECTION_POOL_MAX_POOLSIZE"
value = var.hms_ro_db_connection_pool_size
}

dynamic "env" {
for_each = var.hms_additional_environment_variables

Expand All @@ -212,6 +217,15 @@ resource "kubernetes_deployment_v1" "apiary_hms_readonly" {
}
}

dynamic "env" {
for_each = var.hms_ro_datanucleus_connection_pool_config

content {
name = env.key
value = env.value
}
}

liveness_probe {
tcp_socket {
port = var.hive_metastore_port
Expand Down
26 changes: 20 additions & 6 deletions k8s-readwrite.tf
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,15 @@ resource "kubernetes_deployment_v1" "apiary_hms_readwrite" {
"ad.datadoghq.com/${local.hms_alias}-readwrite.check_names" = var.datadog_metrics_enabled ? "[\"prometheus\"]" : null
"ad.datadoghq.com/${local.hms_alias}-readwrite.init_configs" = var.datadog_metrics_enabled ? "[{}]" : null
"ad.datadoghq.com/${local.hms_alias}-readwrite.instances" = var.datadog_metrics_enabled ? "[{ \"prometheus_url\": \"http://%%host%%:${var.datadog_metrics_port}/actuator/prometheus\", \"namespace\": \"hms_readwrite\", \"metrics\": [ \"${join("\",\"", var.datadog_metrics_hms_readwrite_readonly)}\" ] , \"type_overrides\": { \"${join("\": \"gauge\",\"", var.datadog_metrics_hms_readwrite_readonly)}\": \"gauge\"} }]" : null
"iam.amazonaws.com/role" = aws_iam_role.apiary_hms_readwrite.name
"iam.amazonaws.com/role" = var.oidc_provider == "" ? aws_iam_role.apiary_hms_readwrite.name : null
"prometheus.io/path" = "/metrics"
"prometheus.io/port" = "8080"
"prometheus.io/scrape" = "true"
}
}

spec {
service_account_name = kubernetes_service_account.hms_readwrite[0].metadata.0.name
service_account_name = kubernetes_service_account_v1.hms_readwrite[0].metadata.0.name
automount_service_account_token = true
dynamic "security_context" {
for_each = var.enable_sysctl_config_in_eks ? ["enabled"] : []
Expand Down Expand Up @@ -231,10 +231,6 @@ resource "kubernetes_deployment_v1" "apiary_hms_readwrite" {
name = "DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES"
value = var.disallow_incompatible_col_type_changes
}
env {
name = "MYSQL_CONNECTION_POOL_SIZE"
value = var.hms_rw_db_connection_pool_size
}
env {
name = "HMS_AUTOGATHER_STATS"
value = var.hms_autogather_stats
Expand All @@ -243,6 +239,15 @@ resource "kubernetes_deployment_v1" "apiary_hms_readwrite" {
name = "LIMIT_PARTITION_REQUEST_NUMBER"
value = var.hms_rw_request_partition_limit == "" ? "" : var.hms_rw_request_partition_limit
}
env {
name = "DATANUCLEUS_CONNECTION_POOLING_TYPE"
value = var.hms_rw_datanucleus_connection_pooling_type
}
env {
name = "DATANUCLEUS_CONNECTION_POOL_MAX_POOLSIZE"
value = var.hms_rw_db_connection_pool_size
}

dynamic "env" {
for_each = var.hms_additional_environment_variables

Expand All @@ -252,6 +257,15 @@ resource "kubernetes_deployment_v1" "apiary_hms_readwrite" {
}
}

dynamic "env" {
for_each = var.hms_rw_datanucleus_connection_pool_config

content {
name = env.key
value = env.value
}
}

liveness_probe {
tcp_socket {
port = var.hive_metastore_port
Expand Down
Loading

0 comments on commit c8440c7

Please sign in to comment.