Skip to content

Commit

Permalink
Enable hive metastore lock house keeper (#238)
Browse files Browse the repository at this point in the history
* add ENABLE_HIVE_LOCK_HOUSE_KEEPER

* update variable name

* update replica to 1

* update change log

* Update variables.tf

Co-authored-by: Abhimanyu Gupta <[email protected]>

* Update variables.tf

Co-authored-by: Abhimanyu Gupta <[email protected]>

* remove hms additional configure

* removed DD and ranger configure

* add DD back and update cpu and memo

* update cpu number

* update change log

* adjust cpu size

* adjust memory

* fixed memory to 2 Gi

* Update CHANGELOG.md

* removed HIVE_DBS and LDAP config

* dummy change

* sync

---------

Co-authored-by: Jian Li <[email protected]>
Co-authored-by: Abhimanyu Gupta <[email protected]>
  • Loading branch information
3 people authored Aug 16, 2023
1 parent db7e527 commit 03fca3f
Show file tree
Hide file tree
Showing 3 changed files with 186 additions and 0 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@ All notable changes to this project will be documented in this file.

The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).

## [6.19.0] - 2023-08-16
### Added
- Added variable `enable_hms_housekeeper ` to support hive metastore lock house keeper.

## [6.18.4] - 2023-06-28
### Added
- Added variable `hms_ro_request_partition_limit` & `hms_rw_request_partition_limit` to set hive metastore limit of partition requests.
Expand Down
176 changes: 176 additions & 0 deletions k8s-housekeeper.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,176 @@
/**
* Copyright (C) 2018-2020 Expedia, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
*/

resource "kubernetes_deployment" "apiary_hms_housekeeper" {
count = var.hms_instance_type == "k8s" && var.enable_hms_housekeeper ? 1 : 0
metadata {
name = "${local.hms_alias}-housekeeper"
namespace = var.metastore_namespace

labels = {
name = "${local.hms_alias}-housekeeper"
}
}

spec {
replicas = 1
selector {
match_labels = {
name = "${local.hms_alias}-housekeeper"
}
}

template {
metadata {
labels = {
name = "${local.hms_alias}-housekeeper"
}
annotations = {
"ad.datadoghq.com/${local.hms_alias}-housekeeper.check_names" = var.datadog_metrics_enabled ? "[\"prometheus\"]" : null
"ad.datadoghq.com/${local.hms_alias}-housekeeper.init_configs" = var.datadog_metrics_enabled ? "[{}]" : null
"ad.datadoghq.com/${local.hms_alias}-housekeeper.instances" = var.datadog_metrics_enabled ? "[{ \"prometheus_url\": \"http://%%host%%:${var.datadog_metrics_port}/actuator/prometheus\", \"namespace\": \"hms_readwrite\", \"metrics\": [ \"${join("\",\"", var.datadog_metrics_hms_readwrite_readonly)}\" ] , \"type_overrides\": { \"${join("\": \"gauge\",\"", var.datadog_metrics_hms_readwrite_readonly)}\": \"gauge\"} }]" : null
"iam.amazonaws.com/role" = aws_iam_role.apiary_hms_readwrite.name
"prometheus.io/path" = "/metrics"
"prometheus.io/port" = "8080"
"prometheus.io/scrape" = "true"
}
}

spec {
service_account_name = kubernetes_service_account.hms_readwrite[0].metadata.0.name
automount_service_account_token = true
dynamic "init_container" {
for_each = var.external_database_host == "" ? ["enabled"] : []
content {
image = "${var.hms_docker_image}:${var.hms_docker_version}"
name = "${local.hms_alias}-sql-init-housekeeper"

command = ["sh", "/allow-grant.sh"]

env {
name = "MYSQL_HOST"
value = var.external_database_host == "" ? join("", aws_rds_cluster.apiary_cluster.*.endpoint) : var.external_database_host
}

env {
name = "MYSQL_DB"
value = var.apiary_database_name
}

env {
name = "MYSQL_PERMISSIONS"
value = "ALL"
}

env {
name = "MYSQL_MASTER_CREDS"
value_from {
secret_key_ref {
name = kubernetes_secret.hms_secrets[0].metadata[0].name
key = "master_creds"
}
}
}

env {
name = "MYSQL_USER_CREDS"
value_from {
secret_key_ref {
name = kubernetes_secret.hms_secrets[0].metadata[0].name
key = "rw_creds"
}
}
}
}
}

container {
image = "${var.hms_docker_image}:${var.hms_docker_version}"
name = "${local.hms_alias}-housekeeper"
port {
container_port = var.hive_metastore_port
}
env {
name = "MYSQL_DB_HOST"
value = var.external_database_host == "" ? join("", aws_rds_cluster.apiary_cluster.*.endpoint) : var.external_database_host
}
env {
name = "MYSQL_DB_NAME"
value = var.apiary_database_name
}
env {
name = "MYSQL_SECRET_ARN"
value = data.aws_secretsmanager_secret.db_rw_user.arn
}
env {
name = "HIVE_METASTORE_ACCESS_MODE"
value = "readwrite"
}
env {
name = "HADOOP_HEAPSIZE"
value = local.hms_rw_heapsize
}
env {
name = "AWS_REGION"
value = var.aws_region
}
env {
name = "AWS_DEFAULT_REGION"
value = var.aws_region
}
env {
name = "INSTANCE_NAME"
value = local.instance_alias
}
env {
name = "HIVE_METASTORE_LOG_LEVEL"
value = var.hms_log_level
}
env {
name = "ENABLE_HIVE_LOCK_HOUSE_KEEPER"
value = var.enable_hms_housekeeper ? "true" : ""
}

liveness_probe {
tcp_socket {
port = var.hive_metastore_port
}
timeout_seconds = 60
failure_threshold = 3
success_threshold = 1
initial_delay_seconds = 60
period_seconds = 20
}

readiness_probe {
tcp_socket {
port = var.hive_metastore_port
}
timeout_seconds = 60
failure_threshold = 3
success_threshold = 1
initial_delay_seconds = 60
period_seconds = 20
}

resources {
limits {
cpu = 0.5
memory = "2048Mi"
}
requests {
cpu = 0.5
memory = "2048Mi"
}
}
}
image_pull_secrets {
name = var.k8s_docker_registry_secret
}
}
}
}
}
6 changes: 6 additions & 0 deletions variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,12 @@ variable "enable_hive_metastore_metrics" {
default = false
}

variable "enable_hms_housekeeper" {
description = "Enable HMS lock house keeper. When enabled, this creates a new HMS instance for housekeeping."
type = bool
default = false
}

variable "apiary_shared_schemas" {
description = "Schema names which are accessible from read-only metastore, default is all schemas."
type = list(any)
Expand Down

0 comments on commit 03fca3f

Please sign in to comment.