From bca359b80f1949193cdc61c79df82dfc211f5644 Mon Sep 17 00:00:00 2001 From: Victor Bustamante Date: Wed, 16 Nov 2022 13:03:36 -0300 Subject: [PATCH 1/2] feat: Updates to staging development configuration Signed-off-by: Victor Bustamante --- deploy/Makefile | 7 +- deploy/docker-compose.staging.yml | 7 ++ deploy/docker-compose.yml | 4 +- research/staging_host/dockersock.pp | Bin 0 -> 1254 bytes research/staging_host/dockersock.te | 12 +++ research/staging_host/guinea_image/Dockerfile | 3 + research/staging_host/guinea_image/index.html | 7 ++ research/staging_host/selinux.conf | 26 ++++++ research/staging_host/staging-1.yaml | 79 +++++++++++------- research/staging_host/staging.tf | 2 +- 10 files changed, 112 insertions(+), 35 deletions(-) create mode 100644 deploy/docker-compose.staging.yml create mode 100644 research/staging_host/dockersock.pp create mode 100644 research/staging_host/dockersock.te create mode 100644 research/staging_host/guinea_image/Dockerfile create mode 100644 research/staging_host/guinea_image/index.html create mode 100644 research/staging_host/selinux.conf diff --git a/deploy/Makefile b/deploy/Makefile index ec0b9f72c3..7a5efa818c 100644 --- a/deploy/Makefile +++ b/deploy/Makefile @@ -57,14 +57,13 @@ prod: --profile si \ up --detach -prod-service: +staging: GATEWAY=$(shell $(MAKEPATH)/scripts/gateway.sh) \ docker-compose \ -f $(MAKEPATH)/docker-compose.yml \ -f $(MAKEPATH)/docker-compose.env-static.yml \ - -f $(MAKEPATH)/docker-compose.pganalyze.yml \ - -f $(MAKEPATH)/docker-compose.prod.yml \ - --profile si \ + -f $(MAKEPATH)/docker-compose.staging.yml \ + --profile si-watchtower \ up web: init diff --git a/deploy/docker-compose.staging.yml b/deploy/docker-compose.staging.yml new file mode 100644 index 0000000000..e52990a706 --- /dev/null +++ b/deploy/docker-compose.staging.yml @@ -0,0 +1,7 @@ +--- +version: "3" + +services: + pg: + environment: + - PGA_SYSTEM_ID=staging diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml index 9fea2c9c74..6fac623a00 100644 --- a/deploy/docker-compose.yml +++ b/deploy/docker-compose.yml @@ -130,5 +130,5 @@ services: - si-watchtower volumes: - "/var/run/docker.sock:/var/run/docker.sock" - - "${DOCKER_CONFIG:-~/.docker/config}:/config.json:ro" - command: --interval 30 --label-enable + - "${DOCKER_CONFIG:-~/.docker/config.json}:/config.json:ro" + command: --interval 10 --label-enable diff --git a/research/staging_host/dockersock.pp b/research/staging_host/dockersock.pp new file mode 100644 index 0000000000000000000000000000000000000000..78da8860c98c34065f251b10d6ff42fa76253189 GIT binary patch literal 1254 zcmb`GO-{ow5QR$t@v{LT4$u=IxPdq9V8sEfk{T(cB$DftF1P~XysTipG>@88ph_^( zo8LEM$I85Yyu9xW1_N+vc(wS%?A`U~rmULf=(e6TvoN&OxdGat0Xa5I>f$lPx#?Yt zt}ZT}nZD2r&bk7=&Lq`T<3;Pz3e2!%b zVsTS3>-(uq^3hpN;^dgkeWmjp-v@Xk&DZRlw|Wu37)KwQ$|oL^KJ}RU?X3BtjLFZI zg|9;Li7WTt$I;VCH$Ex;$?7;=5uWDayAG=Fku-6wUMpfC9y1Wv>b0Uy4J)oQA1L1T z4DspBMYC1UYc31$fp|PW;#$2@_&}Tk^AXqTmD + SI Test Image + +

System Initiative Test Image

+ V0.0 + + diff --git a/research/staging_host/selinux.conf b/research/staging_host/selinux.conf new file mode 100644 index 0000000000..1567a7aeb9 --- /dev/null +++ b/research/staging_host/selinux.conf @@ -0,0 +1,26 @@ +# This file controls the state of SELinux on the system. +# SELINUX=disabled # can take one of these three values: +# enforcing - SELinux security policy is enforced. +# permissive - SELinux prints warnings instead of enforcing. +# disabled - No SELinux policy is loaded. +# See also: +# https://docs.fedoraproject.org/en-US/quick-docs/getting-started-with-selinux/#getting-started-with-selinux-selinux-states-and-modes +# +# NOTE: In earlier Fedora kernel builds, SELINUX=disabled #disabled would also +# fully disable SELinux during boot. If you need a system with SELinux +# fully disabled instead of SELinux running with no policy loaded, you +# need to pass selinux=0 to the kernel command line. You can use grubby +# to persistently set the bootloader to boot with selinux=0: +# +# grubby --update-kernel ALL --args selinux=0 +# +# To revert back to SELinux enabled: +# +# grubby --update-kernel ALL --remove-args selinux +# +SELINUX=disabled +# SELINUXTYPE= can take one of these three values: +# targeted - Targeted processes are protected, +# minimum - Modification of targeted policy. Only selected processes are protected. +# mls - Multi Level Security protection. +SELINUXTYPE=targeted \ No newline at end of file diff --git a/research/staging_host/staging-1.yaml b/research/staging_host/staging-1.yaml index 70af0241ca..2694f191c3 100644 --- a/research/staging_host/staging-1.yaml +++ b/research/staging_host/staging-1.yaml @@ -6,6 +6,15 @@ storage: - path: /etc/hostname contents: inline: staging-1 + - path: /opt/dockersock.pp + mode: 0755 + contents: + local: research/staging_host/dockersock.pp + # We need to disable SELINUX (or make a new policy) so that watchtower can get credentials to dockerhub + # - path: /etc/selinux/config + # mode: 0644 + # contents: + # local: research/staging_host/selinux.conf - path: /usr/local/bin/docker-auth.sh mode: 0755 contents: @@ -31,12 +40,27 @@ storage: - path: /opt/deploy/docker-compose.pganalyze.yml contents: local: deploy/docker-compose.pganalyze.yml - - path: /opt/deploy/docker-compose.prod.yml + - path: /opt/deploy/docker-compose.staging.yml contents: - local: deploy/docker-compose.prod.yml + local: deploy/docker-compose.staging.yml systemd: units: - # installing aws-cli as a layered package with rpm-ostree + - name: install-selinux-dockersock-policy.service + enabled: true + contents: | + [Unit] + Description=Install SELINUX Docker sock policy + Wants=network-online.target + After=network-online.target + + [Service] + Type=oneshot + RemainAfterExit=yes + WorkingDirectory=/opt + ExecStart=semodule -i dockersock.pp + + [Install] + WantedBy=multi-user.target - name: layer-awscli.service enabled: true contents: | @@ -55,6 +79,27 @@ systemd: RemainAfterExit=yes ExecStart=/usr/bin/rpm-ostree install --apply-live --allow-inactive --idempotent awscli + [Install] + WantedBy=multi-user.target + - name: layer-vim.service + enabled: true + contents: | + [Unit] + Description=Install Vim + Wants=network-online.target + After=network-online.target + + # We run before `zincati.service` to avoid conflicting rpm-ostree + # transactions. - https://docs.fedoraproject.org/en-US/fedora-coreos/os-extensions/ + After=layer-awscli.service + Before=zincati.service + + + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/usr/bin/rpm-ostree install --apply-live --allow-inactive --idempotent vim + [Install] WantedBy=multi-user.target - name: layer-make.service @@ -65,7 +110,7 @@ systemd: Wants=network-online.target After=network-online.target - After=layer-awscli.service + After=layer-vim.service Before=zincati.service @@ -148,30 +193,8 @@ systemd: [Service] TimeoutStartSec=60s WorkingDirectory=/opt/deploy - ExecStart=make prod-service + ExecStartPre=-/usr/bin/docker-compose down + ExecStart=make staging [Install] WantedBy=multi-user.target -# - name: watchtower.service -# enabled: true -# contents: | -# [Unit] -# After=network-online.target -# Wants=network-online.target -# -# After=deployment.service -# Requires=deployment.service -# -# -# [Service] -# ExecStartPre=-/usr/bin/docker kill whiskers1 -# ExecStartPre=-/usr/bin/docker rm whiskers1 -# ExecStart=/usr/bin/docker run --name watchtower \ -# -v /var/run/docker.sock:/var/run/docker.sock docker.io/containrrr/watchtower \ -# -v /root/.docker/config.json:/config.json \ -# --interval 30 --label-enable \ -# containrrr/watchtower -# -# [Install] -# WantedBy=multi-user.target - diff --git a/research/staging_host/staging.tf b/research/staging_host/staging.tf index 2b754ba01b..abdce4cf28 100644 --- a/research/staging_host/staging.tf +++ b/research/staging_host/staging.tf @@ -17,7 +17,7 @@ data "local_file" "ignition" { resource "aws_instance" "staging-1" { ami = "ami-0e6f4ffb61e585c76" - instance_type = "t3.medium" + instance_type = "t3.large" subnet_id = "subnet-07d580fee7a806230" vpc_security_group_ids = ["sg-0d0be672e4485feb4"] key_name = "si_key" From 3f2c4b7f962350f6429a6ae169320d76fadefbaa Mon Sep 17 00:00:00 2001 From: Victor Bustamante Date: Thu, 17 Nov 2022 19:29:26 -0300 Subject: [PATCH 2/2] feat(research): Add db auto wiping to staging server Signed-off-by: Victor Bustamante --- deploy/Makefile | 7 +++++++ deploy/docker-compose.yml | 19 ++++++++++++++++++ deploy/scripts/reset-database.sh | 6 ++++++ research/staging_host/README.md | 20 ++++++++++++------- research/staging_host/guinea_image/Dockerfile | 3 +-- research/staging_host/guinea_image/index.html | 2 +- research/staging_host/staging-1.yaml | 5 +++++ 7 files changed, 52 insertions(+), 10 deletions(-) create mode 100755 deploy/scripts/reset-database.sh diff --git a/deploy/Makefile b/deploy/Makefile index 7a5efa818c..df87c01322 100644 --- a/deploy/Makefile +++ b/deploy/Makefile @@ -66,6 +66,13 @@ staging: --profile si-watchtower \ up +guinea: + GATEWAY=$(shell $(MAKEPATH)/scripts/gateway.sh) \ + docker-compose \ + -f $(MAKEPATH)/docker-compose.yml \ + --profile guinea \ + up guinea + web: init # REPOPATH=$(REPOPATH) $(MAKEPATH)/scripts/check-for-artifacts-before-mounting.sh $(MAKEPATH)/scripts/generate-ci-yml.sh $(CI_FROM_REF) $(CI_TO_REF) diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml index 6fac623a00..144a67af18 100644 --- a/deploy/docker-compose.yml +++ b/deploy/docker-compose.yml @@ -36,6 +36,9 @@ services: - "faktory:${GATEWAY:-I like my butt}" labels: - "com.centurylinklabs.watchtower.enable=true" + - "com.centurylinklabs.watchtower.lifecycle.pre-update='/reset-database.sh'" + volumes: + - "/opt/deploy/scripts/reset-database.sh:/reset-database.sh:ro" depends_on: - pg - faktory @@ -128,7 +131,23 @@ services: profiles: - watchtower - si-watchtower + environment: + - "WATCHTOWER_LIFECYCLE_HOOKS=true" volumes: - "/var/run/docker.sock:/var/run/docker.sock" - "${DOCKER_CONFIG:-~/.docker/config.json}:/config.json:ro" command: --interval 10 --label-enable + + guinea: + image: "index.docker.io/systeminit/guinea:stable" + labels: + - "com.centurylinklabs.watchtower.enable=true" + - "com.centurylinklabs.watchtower.lifecycle.pre-update='/reset-database.sh'" + volumes: + - "/opt/deploy/scripts/reset-database.sh:/reset-database.sh:ro" + profiles: + - guinea + extra_hosts: + - "postgres:${GATEWAY:-I like my butt}" + ports: + - "8080:80" diff --git a/deploy/scripts/reset-database.sh b/deploy/scripts/reset-database.sh new file mode 100755 index 0000000000..54dab5a794 --- /dev/null +++ b/deploy/scripts/reset-database.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +apt update +apt install -y postgresql-client +# TODO(victor): At some point we need to start managing the db credentials as secrets +export PGPASSWORD="bugbear" +psql -U si -d si -h postgres -c " DROP SCHEMA public CASCADE; CREATE SCHEMA public;" \ No newline at end of file diff --git a/research/staging_host/README.md b/research/staging_host/README.md index bba816d005..d24b32eb9b 100644 --- a/research/staging_host/README.md +++ b/research/staging_host/README.md @@ -2,23 +2,29 @@ The files in this folder allow you to deploy an EC2 instance that automatically deploy the latest versions -of SI's containers, resetting the env on every update. +of SI's containers, resetting the database on every update. Right now, it's only bringing up a coreos instance with -SI's containers on startup, but no auto-update via watchtower. +SI's containers on startup, but no auto-update via watchtower at first boot. It can be started by, while on the folder containing this file, running: -``` +```shell butane staging-1.yaml --pretty --strict --files-dir ../../ > staging-1.ign terraform apply -auto-approve ``` +for watchtower (a.k.a. auto updates) to work, you need to log in to the server and execute the following to disable +SELinux: + +```shell +sudo sed -i -e 's/SELINUX=/SELINUX=disabled #/g' /etc/selinux/config && sudo systemctl reboot +``` + +The server will reboot and restart all services with auto updates enabled. + The way it's working right now, butane copies the deployment docker compose files and makefile onto the server, and executes it. The idea would be to, in the future, -execute each server via its own systemd unit, and have -watchtower setup with a pre update -[lifecycle hook](https://containrrr.dev/watchtower/lifecycle-hooks/) -that wipes all the data whenever sdf or the dal get updated \ No newline at end of file +execute each server via its own systemd unit. \ No newline at end of file diff --git a/research/staging_host/guinea_image/Dockerfile b/research/staging_host/guinea_image/Dockerfile index 9ccd3130ab..ffb021ff14 100644 --- a/research/staging_host/guinea_image/Dockerfile +++ b/research/staging_host/guinea_image/Dockerfile @@ -1,3 +1,2 @@ -FROM nginx:stable-alpine +FROM nginx:stable COPY index.html /usr/share/nginx/html/ - diff --git a/research/staging_host/guinea_image/index.html b/research/staging_host/guinea_image/index.html index ac2f76ea88..1d1d5810d8 100644 --- a/research/staging_host/guinea_image/index.html +++ b/research/staging_host/guinea_image/index.html @@ -2,6 +2,6 @@ SI Test Image

System Initiative Test Image

- V0.0 + V0.0.0 diff --git a/research/staging_host/staging-1.yaml b/research/staging_host/staging-1.yaml index 2694f191c3..1b846e6b02 100644 --- a/research/staging_host/staging-1.yaml +++ b/research/staging_host/staging-1.yaml @@ -31,6 +31,10 @@ storage: mode: 0755 contents: local: deploy/scripts/gateway.sh + - path: /opt/deploy/scripts/reset-database.sh + mode: 0755 + contents: + local: deploy/scripts/reset-database.sh - path: /opt/deploy/docker-compose.yml contents: local: deploy/docker-compose.yml @@ -81,6 +85,7 @@ systemd: [Install] WantedBy=multi-user.target + # Note(victor): This is not vital but is necessary. I will not be taking questions at this time. - name: layer-vim.service enabled: true contents: |