Skip to content

Commit

Permalink
Single node ES instance
Browse files Browse the repository at this point in the history
- No load balancers
- No autoscaling
- Output set to single node IP address
fixed issue with singlenode instance being provisioned in clustered mode
Single-node network interface attachment to ensure connectivity without load balancer
updated gitignore

Kibana image
  • Loading branch information
ekfastlane committed Jan 7, 2021
1 parent 53691d2 commit 2204189
Show file tree
Hide file tree
Showing 16 changed files with 192 additions and 53 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,6 @@ terraform.tfvars
.terraform/
.gcp*
cluster_bootstrap_state
terraform-aws/cluster_bootstrap_state
gcp-account.json
*.iml
2 changes: 2 additions & 0 deletions assets/node-init.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
"ec2:DescribeInstances",
"ec2:DescribeVolumes",
"ec2:AttachVolume",
"ec2:AttachNetworkInterface",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeTags",
"autoscaling:DescribeAutoScalingGroups"
],
Expand Down
51 changes: 51 additions & 0 deletions assets/scripts/aws/autoattach-network.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# Required variables
# - aws_region
# - es_cluster
# - elasticsearch_data_dir

AV_ZONE="$(ec2metadata --availability-zone)"
INSTANCE_ROLE="$(aws ec2 describe-tags --region $aws_region --filters Name=resource-id,Values=$(ec2metadata --instance-id) | jq -r '.Tags[] | select(.Key == "Role") | .Value')"
echo "AV_ZONE: $AV_ZONE"
echo "INSTANCE_ROLE: $INSTANCE_ROLE"

while true; do
echo "UNATTACHED_ENI_ID: $eni_id"

aws ec2 attach-network-interface --instance-id=$(ec2metadata --instance-id) --device-index 1 --network-interface-id ${eni_id} --region "$aws_region"
if [ "$?" != "0" ]; then
sleep 10
continue
fi

ATTACHMENTS_COUNT="$(aws ec2 describe-network-interfaces --region $aws_region --filters Name=network-interface-id,Values=${eni_id} | jq -r '.NetworkInterfaces[0].Attachment | length')"
if [ "$ATTACHMENTS_COUNT" != "0" ]; then break; fi
done

echo "Updating network configuration"

cat <<EOF >/etc/netplan/51-ens6.yaml
network:
version: 2
renderer: networkd
ethernets:
ens6:
addresses:
- ${eni_ipv4}/20
dhcp4: no
routes:
- to: 0.0.0.0/0
via: 172.31.16.1 # Default gateway
table: 1000
- to: ${eni_ipv4}
via: 0.0.0.0
scope: link
table: 1000
routing-policy:
- from: ${eni_ipv4}
table: 1000
EOF

sleep 5

netplan apply

39 changes: 37 additions & 2 deletions assets/scripts/singlenode.sh
Original file line number Diff line number Diff line change
@@ -1,41 +1,76 @@
#!/bin/bash
set +e

echo "Testing AMI Builder if it works properly"


echo "Running common env script"
. /opt/cloud-deploy-scripts/common/env.sh
. /opt/cloud-deploy-scripts/$cloud_provider/env.sh

if [ -e /opt/cloud-deploy-scripts/$cloud_provider/env.sh ]; then
echo "Running ${cloud_provider} env script"
. /opt/cloud-deploy-scripts/$cloud_provider/env.sh
fi

# It is required to bind to all interfaces for load balancer on GCP to work
if [ "$cloud_provider" == "gcp" ]; then
export BIND_TO_ALL="true"
fi

echo "Running EBS volume autoattach script"
/opt/cloud-deploy-scripts/$cloud_provider/autoattach-disk.sh

echo "Running ENI autoattach script"
/opt/cloud-deploy-scripts/$cloud_provider/autoattach-network.sh

echo "Running config-es script"
/opt/cloud-deploy-scripts/common/config-es.sh

echo "Running config-beats script"
/opt/cloud-deploy-scripts/common/config-beats.sh

echo "Running ${cloud_provider}/config-es script"
/opt/cloud-deploy-scripts/$cloud_provider/config-es.sh

echo "Running ${cloud_provider}/config-es-discovery script"
/opt/cloud-deploy-scripts/$cloud_provider/config-es-discovery.sh

echo "Creating elasticsearch.yml file"
cat <<'EOF' >>/etc/elasticsearch/elasticsearch.yml
node.master: true
node.data: true
node.ingest: true
discovery.type: single-node
EOF

echo "Running config/clients script"

/opt/cloud-deploy-scripts/common/config-clients.sh

# add bootstrap.password to the keystore, so that config-cluster scripts can run
# only done on bootstrap and singlenode nodes, before starting ES
if [ "${security_enabled}" == "true" ]; then
echo "Configuring elasticsearch keystore"
echo "${client_pwd}" | /usr/share/elasticsearch/bin/elasticsearch-keystore add --stdin bootstrap.password
fi

#Fix IP Address
echo "Rewriting ENI IP Address in elasticsearch.yml"
sed -i -re "s/_ec2:privateIpv4_/${eni_ipv4}/ig" /etc/elasticsearch/elasticsearch.yml

# Start Elasticsearch
echo "Starting elasticsearch service"

systemctl daemon-reload
systemctl enable elasticsearch.service
systemctl start elasticsearch.service

echo "Running config-cluster script"
/opt/cloud-deploy-scripts/common/config-cluster.sh
/opt/cloud-deploy-scripts/$cloud_provider/config-cluster.sh


echo "Running ${cloud_provider}/config-cluster script"
/opt/cloud-deploy-scripts/$cloud_provider/config-cluster.sh



2 changes: 2 additions & 0 deletions templates/aws_user_data.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,5 +30,7 @@ export bootstrap_node="${bootstrap_node}"
export ca_cert="${ca_cert}"
export node_cert="${node_cert}"
export node_key="${node_key}"
export eni_id="${eni_id}"
export eni_ipv4="${eni_ipv4}"

/opt/cloud-deploy-scripts/${startup_script}
46 changes: 32 additions & 14 deletions terraform-aws/alb.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
resource "aws_security_group" "elasticsearch-alb-sg" {
name = "${var.es_cluster}-alb-sg"
name = "${var.environment}-${var.es_cluster}-alb-sg"
description = "ElasticSearch Ports for ALB Access"
vpc_id = var.vpc_id

Expand Down Expand Up @@ -47,7 +47,9 @@ resource "aws_security_group" "elasticsearch-alb-sg" {
#-----------------------------------------------------

resource "aws_lb_target_group" "esearch-p9200-tg" {
name = "${var.es_cluster}-p9200-tg"
count = local.singlenode_mode ? 0 : 1

name = "${var.environment}-${var.es_cluster}-p9200-tg"
port = 9200
protocol = "HTTP"
vpc_id = var.vpc_id
Expand All @@ -64,7 +66,9 @@ resource "aws_lb_target_group" "esearch-p9200-tg" {
}

resource "aws_lb_target_group" "kibana-p5601-tg" {
name = "${var.es_cluster}-p5601-tg"
count = local.singlenode_mode ? 0 : 1

name = "${var.environment}-${var.es_cluster}-p5601-tg"
port = 5601
protocol = "HTTP"
vpc_id = var.vpc_id
Expand All @@ -81,7 +85,9 @@ resource "aws_lb_target_group" "kibana-p5601-tg" {
}

resource "aws_lb_target_group" "grafana-p3000-tg" {
name = "${var.es_cluster}-p3000-tg"
count = local.singlenode_mode ? 0 : 1

name = "${var.environment}-${var.es_cluster}-p3000-tg"
port = 3000
protocol = "HTTP"
vpc_id = var.vpc_id
Expand All @@ -98,7 +104,9 @@ resource "aws_lb_target_group" "grafana-p3000-tg" {
}

resource "aws_lb_target_group" "cerebro-p9000-tg" {
name = "${var.es_cluster}-p9000-tg"
count = local.singlenode_mode ? 0 : 1

name = "${var.environment}-${var.es_cluster}-p9000-tg"
port = 9000
protocol = "HTTP"
vpc_id = var.vpc_id
Expand All @@ -115,7 +123,9 @@ resource "aws_lb_target_group" "cerebro-p9000-tg" {
}

resource "aws_lb" "elasticsearch-alb" {
name = "${var.es_cluster}-alb"
count = local.singlenode_mode ? 0 : 1

name = "${var.environment}-${var.es_cluster}-alb"
internal = ! var.public_facing
load_balancer_type = "application"
security_groups = [aws_security_group.elasticsearch-alb-sg.id]
Expand All @@ -130,46 +140,54 @@ resource "aws_lb" "elasticsearch-alb" {
#-----------------------------------------------------

resource "aws_lb_listener" "esearch" {
load_balancer_arn = aws_lb.elasticsearch-alb.arn
count = local.singlenode_mode ? 0 : 1

load_balancer_arn = aws_lb.elasticsearch-alb[0].arn
port = "9200"
protocol = "HTTP"

default_action {
type = "forward"
target_group_arn = aws_lb_target_group.esearch-p9200-tg.arn
target_group_arn = aws_lb_target_group.esearch-p9200-tg[0].arn
}
}

resource "aws_lb_listener" "kibana" {
load_balancer_arn = aws_lb.elasticsearch-alb.arn
count = local.singlenode_mode ? 0 : 1

load_balancer_arn = aws_lb.elasticsearch-alb[0].arn
port = "5601"
protocol = "HTTP"

default_action {
type = "forward"
target_group_arn = aws_lb_target_group.kibana-p5601-tg.arn
target_group_arn = aws_lb_target_group.kibana-p5601-tg[0].arn
}
}

resource "aws_lb_listener" "grafana" {
load_balancer_arn = aws_lb.elasticsearch-alb.arn
count = local.singlenode_mode ? 0 : 1

load_balancer_arn = aws_lb.elasticsearch-alb[0].arn
port = "3000"
protocol = "HTTP"

default_action {
type = "forward"
target_group_arn = aws_lb_target_group.grafana-p3000-tg.arn
target_group_arn = aws_lb_target_group.grafana-p3000-tg[0].arn
}
}

resource "aws_lb_listener" "cerebro" {
load_balancer_arn = aws_lb.elasticsearch-alb.arn
count = local.singlenode_mode ? 0 : 1

load_balancer_arn = aws_lb.elasticsearch-alb[0].arn
port = "9000"
protocol = "HTTP"

default_action {
type = "forward"
target_group_arn = aws_lb_target_group.cerebro-p9000-tg.arn
target_group_arn = aws_lb_target_group.cerebro-p9000-tg[0].arn
}
}

12 changes: 6 additions & 6 deletions terraform-aws/client.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ data "template_file" "client_userdata_script" {
}

resource "aws_launch_template" "client" {
name_prefix = "elasticsearch-${var.es_cluster}-client-nodes"
name_prefix = "elasticsearch-${var.environment}-${var.es_cluster}-client-nodes"
image_id = data.aws_ami.kibana_client.id
instance_type = var.master_instance_type
user_data = base64encode(data.template_file.client_userdata_script.rendered)
Expand Down Expand Up @@ -35,7 +35,7 @@ resource "aws_launch_template" "client" {
resource "aws_autoscaling_group" "client_nodes" {
count = length(keys(var.clients_count))

name = "elasticsearch-${var.es_cluster}-client-nodes-${keys(var.clients_count)[count.index]}"
name = "elasticsearch-${var.environment}-${var.es_cluster}-client-nodes-${keys(var.clients_count)[count.index]}"
max_size = var.clients_count[keys(var.clients_count)[count.index]]
min_size = var.clients_count[keys(var.clients_count)[count.index]]
desired_capacity = var.clients_count[keys(var.clients_count)[count.index]]
Expand All @@ -45,10 +45,10 @@ resource "aws_autoscaling_group" "client_nodes" {
vpc_zone_identifier = local.clients_subnet_ids[keys(var.clients_count)[count.index]]

target_group_arns = [
aws_lb_target_group.esearch-p9200-tg.arn,
aws_lb_target_group.kibana-p5601-tg.arn,
aws_lb_target_group.grafana-p3000-tg.arn,
aws_lb_target_group.cerebro-p9000-tg.arn,
aws_lb_target_group.esearch-p9200-tg[0].arn,
aws_lb_target_group.kibana-p5601-tg[0].arn,
aws_lb_target_group.grafana-p3000-tg[0].arn,
aws_lb_target_group.cerebro-p9000-tg[0].arn,
]

launch_template {
Expand Down
1 change: 0 additions & 1 deletion terraform-aws/cluster_bootstrap_state

This file was deleted.

6 changes: 3 additions & 3 deletions terraform-aws/datas.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ data "template_file" "data_userdata_script" {
}

resource "aws_launch_template" "data" {
name_prefix = "elasticsearch-${var.es_cluster}-data-nodes"
name_prefix = "elasticsearch-${var.environment}-${var.es_cluster}-data-nodes"
image_id = data.aws_ami.elasticsearch.id
instance_type = var.data_instance_type
user_data = base64encode(data.template_file.data_userdata_script.rendered)
Expand Down Expand Up @@ -36,7 +36,7 @@ resource "aws_launch_template" "data" {
resource "aws_autoscaling_group" "data_nodes" {
count = length(keys(var.datas_count))

name = "elasticsearch-${var.es_cluster}-data-nodes-${keys(var.datas_count)[count.index]}"
name = "elasticsearch-${var.environment}-${var.es_cluster}-data-nodes-${keys(var.datas_count)[count.index]}"
max_size = var.datas_count[keys(var.datas_count)[count.index]]
min_size = var.datas_count[keys(var.datas_count)[count.index]]
desired_capacity = var.datas_count[keys(var.datas_count)[count.index]]
Expand All @@ -51,7 +51,7 @@ resource "aws_autoscaling_group" "data_nodes" {
]

target_group_arns = [
aws_lb_target_group.esearch-p9200-tg.arn,
aws_lb_target_group.esearch-p9200-tg[0].arn,
]

launch_template {
Expand Down
6 changes: 3 additions & 3 deletions terraform-aws/disks.tf
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ resource "aws_ebs_volume" "master" {
encrypted = var.volume_encryption

tags = {
Name = "elasticsearch-${var.es_cluster}-master-${jsondecode(each.value)["name"]}"
Name = "elasticsearch-${var.environment}-${var.es_cluster}-master-${jsondecode(each.value)["name"]}"
ClusterName = "${var.es_cluster}"
VolumeIndex = jsondecode(each.value)["index"]
AutoAttachGroup = "master"
Expand All @@ -37,7 +37,7 @@ resource "aws_ebs_volume" "data" {
encrypted = var.volume_encryption

tags = {
Name = "elasticsearch-${var.es_cluster}-data-${jsondecode(each.value)["name"]}"
Name = "elasticsearch-${var.environment}-${var.es_cluster}-data-${jsondecode(each.value)["name"]}"
ClusterName = "${var.es_cluster}"
VolumeIndex = jsondecode(each.value)["index"]
AutoAttachGroup = "data"
Expand All @@ -53,7 +53,7 @@ resource "aws_ebs_volume" "singlenode" {
encrypted = var.volume_encryption

tags = {
Name = "elasticsearch-${var.es_cluster}-singlenode"
Name = "elasticsearch-${var.environment}-${var.es_cluster}-singlenode"
ClusterName = "${var.es_cluster}"
VolumeIndex = "0"
AutoAttachGroup = "singlenode"
Expand Down
Loading

0 comments on commit 2204189

Please sign in to comment.