From 1e3dcb0764ffd370e826f9b5b7ab07c4823fb0a2 Mon Sep 17 00:00:00 2001 From: Phil Tyler Date: Fri, 31 Mar 2023 15:08:15 -0700 Subject: [PATCH 1/7] Add prune to Makefile --- Makefile | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index bc7c1ab..6902a72 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,18 @@ APP := pyml-validator include devops/make/common.mk -include devops/make/common-kube.mk -include devops/make/common-go.mk \ No newline at end of file +include devops/make/common-go.mk + +# extend the update-makefiles task to remove files we don't need +update-makefiles:: + make prune-common-make + +# strip out everything from common-makefiles that we don't want. +prune-common-make: + @find devops/make -type f \ + -not -name common.mk \ + -not -name common-go.mk \ + -delete + @find devops/make -empty -delete + @git add devops/make + @git commit -C HEAD --amend \ No newline at end of file From a3660babb4e5f40b326a688861117ce131b8d044 Mon Sep 17 00:00:00 2001 From: Phil Tyler Date: Fri, 31 Mar 2023 15:09:07 -0700 Subject: [PATCH 2/7] Squashed 'devops/make/' changes from 2ce7110..4641812 4641812 automatically do the one time setup for updating common make (#374) 91e708d remove devops/make symlink (#373) 322564e Change quay in error message to AR a61c66a Update github_token path in vault 69b6263 Add setup-ar to build step (#370) git-subtree-dir: devops/make git-subtree-split: 4641812d036dbf24be4ea44c005e9702bd1807f6 --- .circleci/config.yml | 3 +++ README.md | 2 +- common-docker-ar.mk | 2 +- common-pants.mk | 2 +- common.mk | 6 ++++++ devops/make | 1 - sh/install-pants.sh | 5 +++-- 7 files changed, 15 insertions(+), 6 deletions(-) delete mode 120000 devops/make diff --git a/.circleci/config.yml b/.circleci/config.yml index 25b497b..e7ebee5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -40,6 +40,7 @@ jobs: steps: - setup_remote_docker - checkout + - run: echo 'export COMMON_MAKE_DIR="."' >> $BASH_ENV - run: make test-deps-build - run: make test-common-build @@ -47,6 +48,7 @@ jobs: executor: deploy-toolbox steps: - checkout + - run: echo 'export COMMON_MAKE_DIR="."' >> $BASH_ENV - run: make test-deps-deploy - run: make test-common-deploy @@ -55,6 +57,7 @@ jobs: - image: cimg/go:1.17 steps: - checkout + - run: echo 'export COMMON_MAKE_DIR="."' >> $BASH_ENV - run: make test-vault-gsa-setup autotag-release: diff --git a/README.md b/README.md index 1f347b3..7b85dad 100644 --- a/README.md +++ b/README.md @@ -614,7 +614,7 @@ Installs pants version greater than `0.1.3` unless overridden with circle for integration testing and acceptance criteria review. - `GITHUB_TOKEN`: (required) Github Token for downloading releases of the [pants](https://github.com/pantheon-systems/pants) - utility. From the production Vault `secret/securenotes/github__pantheon-circleci_user` grab the `pants-token` from this secure note and set it as the `GITHUB_TOKEN`. + utility. From the production Vault `secret/github/pantheon-circleci` grab the `pants-token` from this secure note and set it as the `GITHUB_TOKEN`. - `PANTS_VERSION`: (optional) The version of pants to install. Default `latest`. Specify version like `x.y.z`, eg: `0.1.47` - `PANTS_INCLUDE`: (optional) The services for pants to install or update. E.g `make init-pants PANTS_INCLUDE=notification-service,ticket-management` diff --git a/common-docker-ar.mk b/common-docker-ar.mk index 7a22241..320b29b 100644 --- a/common-docker-ar.mk +++ b/common-docker-ar.mk @@ -28,7 +28,7 @@ endif build-docker:: build-docker-ar ## build the docker container -build-docker-ar:: build-linux +build-docker-ar:: setup-ar build-linux @FORCE_BUILD=$(DOCKER_FORCE_BUILD) TRY_PULL=$(DOCKER_TRY_PULL) \ $(COMMON_MAKE_DIR)/sh/build-docker.sh \ $(AR_IMAGE) $(DOCKER_BUILD_CONTEXT) $(DOCKER_BUILD_ARGS) diff --git a/common-pants.mk b/common-pants.mk index 875ec14..2fbcde1 100644 --- a/common-pants.mk +++ b/common-pants.mk @@ -46,7 +46,7 @@ ifndef GITHUB_TOKEN $(call ERROR, "This task needs a GITHUB_TOKEN environment variable") endif $(call INFO, "Installing pants version satisfying: $(PANTS_VERSION_CONSTRAINT)") - @PANTS_VERSION_CONSTRAINT=$(PANTS_VERSION_CONSTRAINT) ./devops/make/sh/install-pants.sh + @PANTS_VERSION_CONSTRAINT=$(PANTS_VERSION_CONSTRAINT) $(COMMON_MAKE_DIR)/sh/install-pants.sh $(call INFO, "Installed pants version" $(shell pants version)) delete-circle-pants:: delete-pants-sandbox ## TODO: remove the alias once $REASONS don't apply diff --git a/common.mk b/common.mk index e2e7372..e79b650 100644 --- a/common.mk +++ b/common.mk @@ -79,6 +79,12 @@ update-makefiles:: ## update the make subtree, assumes the subtree is in devops/ @echo "git commit -am \"Move common_makefiles to new prefix\"" @exit 1 endif + # best effort attempt to do one time setup + @set -x; if ! git remote show common_makefiles &> /dev/null; then \ + echo "adding common_makefiles as a remote"; \ + git remote add common_makefiles git@github.com:pantheon-systems/common_makefiles.git --no-tags; \ + git subtree add --prefix devops/make common_makefiles master --squash &>/dev/null || true; \ + fi @git subtree pull --prefix devops/make common_makefiles master --squash .PHONY:: all help update-makefiles diff --git a/devops/make b/devops/make deleted file mode 120000 index b870225..0000000 --- a/devops/make +++ /dev/null @@ -1 +0,0 @@ -../ \ No newline at end of file diff --git a/sh/install-pants.sh b/sh/install-pants.sh index 035d997..bedac6e 100755 --- a/sh/install-pants.sh +++ b/sh/install-pants.sh @@ -1,6 +1,7 @@ #! /bin/bash set -eou pipefail +DEPLOY_TOOLBOX_IMAGE="us-docker.pkg.dev/pantheon-artifacts/internal/deploy-toolbox:latest" PANTS_VERSION_CONSTRAINT=${PANTS_VERSION_CONSTRAINT:-"latest"} GITHUB_TOKEN=${GITHUB_TOKEN:-} @@ -11,10 +12,10 @@ fi if ! command -v pants >/dev/null; then # pants is not installed so install it - echo "Pants is not installed in this env, please consider switching to quay.io/getpantheon/deploy-toolbox:latest. Installing pants..." + echo "Pants is not installed in this env, please consider switching to $DEPLOY_TOOLBOX_IMAGE. Installing pants..." if ! command -v jq >/dev/null; then - echo "JQ is required to install pants. please consider switching to quay.io/getpantheon/deploy-toolbox:latest or install JQ in your image to utilize this script." + echo "JQ is required to install pants. please consider switching to $DEPLOY_TOOLBOX_IMAGE or install JQ in your image to utilize this script." exit 1 fi From cdd7823608e6049a088112f3a71e9b3e731ed147 Mon Sep 17 00:00:00 2001 From: Phil Tyler Date: Fri, 31 Mar 2023 15:08:49 -0700 Subject: [PATCH 3/7] Fix make spacing and drop unused common files --- Makefile | 16 +- devops/make/.circleci/config.yml | 95 - devops/make/.github/PULL_REQUEST_TEMPLATE.md | 18 - devops/make/.gitignore | 6 - devops/make/CODEOWNERS | 7 - devops/make/Dockerfile | 3 - devops/make/Makefile | 74 - devops/make/README.md | 1595 ----------------- devops/make/_base.mk | 13 - devops/make/_docker.mk | 108 -- devops/make/common-apollo.mk | 22 - devops/make/common-conda.mk | 205 --- devops/make/common-django.mk | 95 - devops/make/common-docker-ar.mk | 52 - devops/make/common-docker-quay.mk | 62 - devops/make/common-docker.mk | 14 - devops/make/common-docs.mk | 19 - devops/make/common-kube.mk | 201 --- devops/make/common-kustomize.mk | 94 - devops/make/common-pants.mk | 85 - devops/make/common-python.mk | 136 -- devops/make/common-python3.mk | 31 - devops/make/common-shell.mk | 42 - devops/make/go.mod | 3 - devops/make/main.go | 7 - devops/make/sh/build-docker.sh | 50 - devops/make/sh/create-tls-cert.sh | 120 -- devops/make/sh/install-gcloud.sh | 47 - devops/make/sh/install-pants.sh | 45 - devops/make/sh/repo-hash.sh | 13 - devops/make/sh/setup-circle-ar-docker.sh | 92 - devops/make/sh/setup-circle-vault.sh | 153 -- devops/make/sh/setup-gcloud-test.sh | 21 - devops/make/sh/setup-gcloud.sh | 80 - devops/make/sh/update-gcloud.sh | 34 - devops/make/sh/update-kube-object.sh | 158 -- .../test/fixtures/configmaps/non-prod/foofile | 3 - .../sandbox-common-make-test/test/bar | 1 - .../sandbox-common-make-test/test/foo | 1 - .../sandbox-common-make-test/testfile | 1 - .../sandbox-common-make-test/testfile3 | 1 - devops/make/test/fixtures/golang/main.go | 7 - devops/make/test/fixtures/golang/main_test.go | 9 - .../fixtures/secrets/non-prod/supersecret | 1 - devops/make/test/make/docker-ar.mk | 17 - devops/make/test/make/docker.mk | 17 - devops/make/test/make/kube.mk | 30 - devops/make/test/make/pants.mk | 12 - 48 files changed, 8 insertions(+), 3908 deletions(-) delete mode 100644 devops/make/.circleci/config.yml delete mode 100644 devops/make/.github/PULL_REQUEST_TEMPLATE.md delete mode 100644 devops/make/.gitignore delete mode 100644 devops/make/CODEOWNERS delete mode 100644 devops/make/Dockerfile delete mode 100644 devops/make/Makefile delete mode 100644 devops/make/README.md delete mode 100644 devops/make/_base.mk delete mode 100644 devops/make/_docker.mk delete mode 100644 devops/make/common-apollo.mk delete mode 100644 devops/make/common-conda.mk delete mode 100644 devops/make/common-django.mk delete mode 100644 devops/make/common-docker-ar.mk delete mode 100644 devops/make/common-docker-quay.mk delete mode 100644 devops/make/common-docker.mk delete mode 100644 devops/make/common-docs.mk delete mode 100644 devops/make/common-kube.mk delete mode 100644 devops/make/common-kustomize.mk delete mode 100644 devops/make/common-pants.mk delete mode 100644 devops/make/common-python.mk delete mode 100644 devops/make/common-python3.mk delete mode 100644 devops/make/common-shell.mk delete mode 100644 devops/make/go.mod delete mode 100644 devops/make/main.go delete mode 100755 devops/make/sh/build-docker.sh delete mode 100755 devops/make/sh/create-tls-cert.sh delete mode 100755 devops/make/sh/install-gcloud.sh delete mode 100755 devops/make/sh/install-pants.sh delete mode 100755 devops/make/sh/repo-hash.sh delete mode 100755 devops/make/sh/setup-circle-ar-docker.sh delete mode 100755 devops/make/sh/setup-circle-vault.sh delete mode 100755 devops/make/sh/setup-gcloud-test.sh delete mode 100755 devops/make/sh/setup-gcloud.sh delete mode 100755 devops/make/sh/update-gcloud.sh delete mode 100755 devops/make/sh/update-kube-object.sh delete mode 100644 devops/make/test/fixtures/configmaps/non-prod/foofile delete mode 100644 devops/make/test/fixtures/configmaps/sandbox-common-make-test/test/bar delete mode 100644 devops/make/test/fixtures/configmaps/sandbox-common-make-test/test/foo delete mode 100644 devops/make/test/fixtures/configmaps/sandbox-common-make-test/testfile delete mode 100644 devops/make/test/fixtures/configmaps/sandbox-common-make-test/testfile3 delete mode 100644 devops/make/test/fixtures/golang/main.go delete mode 100644 devops/make/test/fixtures/golang/main_test.go delete mode 100644 devops/make/test/fixtures/secrets/non-prod/supersecret delete mode 100644 devops/make/test/make/docker-ar.mk delete mode 100644 devops/make/test/make/docker.mk delete mode 100644 devops/make/test/make/kube.mk delete mode 100644 devops/make/test/make/pants.mk diff --git a/Makefile b/Makefile index 6902a72..6092ce8 100644 --- a/Makefile +++ b/Makefile @@ -5,14 +5,14 @@ include devops/make/common-go.mk # extend the update-makefiles task to remove files we don't need update-makefiles:: - make prune-common-make + make prune-common-make # strip out everything from common-makefiles that we don't want. prune-common-make: - @find devops/make -type f \ - -not -name common.mk \ - -not -name common-go.mk \ - -delete - @find devops/make -empty -delete - @git add devops/make - @git commit -C HEAD --amend \ No newline at end of file + @find devops/make -type f \ + -not -name common.mk \ + -not -name common-go.mk \ + -delete + @find devops/make -empty -delete + @git add devops/make + @git commit -C HEAD --amend \ No newline at end of file diff --git a/devops/make/.circleci/config.yml b/devops/make/.circleci/config.yml deleted file mode 100644 index 25b497b..0000000 --- a/devops/make/.circleci/config.yml +++ /dev/null @@ -1,95 +0,0 @@ -# -# This circle.yml builds and tests the utilities in common-makefiles -# and should not be used as a template for your project. -# ---- -version: 2.1 - -executors: - go-build: - docker: - - image: quay.io/getpantheon/go-build:latest - auth: - username: $QUAY_USER - password: $QUAY_PASSWD - - deploy-toolbox: - docker: - - image: us-docker.pkg.dev/pantheon-artifacts/internal/deploy-toolbox:latest - auth: - username: _json_key - password: $CIRCLE_CI_COMMON_KEY - -commands: - # By default, CircleCI uses ssh, and authenticates as a user with read access to projects, but not write access. - # In order for `git push` command to work, we need to have CircleCI use HTTPS with the provided oauth token - # instead of ssh (the token is for pantheon-releases which has write access, but the default circle user does not) - configure-https-git: - steps: - - run: - name: configure github auth - command: | - # netrc is the preferred way to authenticate and download private go modules - echo "machine github.com login $GITHUB_TOKEN" > ~/.netrc - git config --global --unset url.ssh://git@github.com.insteadof - git config --global url."https://$GITHUB_TOKEN:x-oauth-basic@github.com/pantheon-systems/".insteadOf "git@github.com:pantheon-systems/" - -jobs: - test-build: - executor: go-build - steps: - - setup_remote_docker - - checkout - - run: make test-deps-build - - run: make test-common-build - - test-deploy: - executor: deploy-toolbox - steps: - - checkout - - run: make test-deps-deploy - - run: make test-common-deploy - - test-artifact-setup: - docker: - - image: cimg/go:1.17 - steps: - - checkout - - run: make test-vault-gsa-setup - - autotag-release: - executor: deploy-toolbox - steps: - - checkout - - configure-https-git - - run: - name: increment version - command: | - VERSION_TAG=$(autotag) && git push origin "v${VERSION_TAG}" - -workflows: - version: 2 - primary: - jobs: - - test-build: - context: - - sig-go-release - - docker-executor-auth - - test-deploy: - context: - - sig-go-release - - docker-executor-auth - - gcp-credentials-ar-ci - - test-artifact-setup: - context: - - sig-go-release - - docker-executor-auth - - autotag-release: - context: - - sig-go-release - - docker-executor-auth - - gcp-credentials-ar-ci - filters: - branches: - only: - - master diff --git a/devops/make/.github/PULL_REQUEST_TEMPLATE.md b/devops/make/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index b20a9f1..0000000 --- a/devops/make/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,18 +0,0 @@ -## Summary -_1-2 line summary of changes_ - -## Changes -_Please enter each change as a new bullet point_ - -## Issue IDs -_Please mention any GIT Issues related to the change_ - -## Breaking Changes / Backwards Compatibility -_Are these changes backwards compatible for existing repos using common-make?_ -_If not how can you make them backwards compatible or what is the communications plan to advise squads on detailed steps they need to take?_ - -## Documentation - -- [ ] If this introduces new features or new behavior, update documentation (README.md) -- [ ] Consider adding example usage to the README.md, especially for non-trivial or non-obvious feature additions -- [ ] If the README was updated, run `make update-readme-toc` to re-generate the TOC diff --git a/devops/make/.gitignore b/devops/make/.gitignore deleted file mode 100644 index 2211563..0000000 --- a/devops/make/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.DS_Store - -common_makefiles -shellcheck -*.swp -/common-make diff --git a/devops/make/CODEOWNERS b/devops/make/CODEOWNERS deleted file mode 100644 index 8dae14a..0000000 --- a/devops/make/CODEOWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# https://help.github.com/en/articles/about-code-owners - -* @pantheon-systems/engops - -# owned by #sig-go -*.go @pantheon-systems/sig-go -common-go.mk @pantheon-systems/sig-go diff --git a/devops/make/Dockerfile b/devops/make/Dockerfile deleted file mode 100644 index a821abc..0000000 --- a/devops/make/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM scratch - -COPY Dockerfile / diff --git a/devops/make/Makefile b/devops/make/Makefile deleted file mode 100644 index b256ab7..0000000 --- a/devops/make/Makefile +++ /dev/null @@ -1,74 +0,0 @@ -APP=common-make - -ifeq ($(CIRCLE_BRANCH), master) -export CIRCLE_BRANCH := notmaster -export BRANCH := notmaster -endif - -include common.mk -include common-docs.mk -include common-go.mk -include common-docker.mk -include common-shell.mk -include common-kube.mk -include common-pants.mk - -# Required Input Variables for common-python and a default value -PYTHON_PACKAGE_NAME=dummy -TEST_RUNNER=trial -include common-python.mk - -# Required Input Variables for common-conda and a default value -CONDA_PACKAGE_NAME=dummy -CONDA_PACKAGE_VERSION=0.0.1 -include common-conda.mk - -test-deps-build: deps-circle-shell deps-go -test-common-build: test-shell test-readme-toc test-common-docker test-common-docker-ar test-common-go - -test-deps-deploy: deps-circle-kube -test-common-deploy: test-common-kube test-common-pants - -test-common-kube: test-common-kube-lint - $(MAKE) -f test/make/kube.mk test-common-kube - -test-common-kube-lint: - $(MAKE) -f test/make/kube.mk test-common-kube-lint | grep "SKIP_KUBEVAL" - -test-common-pants: - $(MAKE) -f test/make/pants.mk test-common-pants -# go again to make sure that sandbox reuse works - $(MAKE) -f test/make/pants.mk test-common-pants - $(MAKE) -f test/make/pants.mk delete-pants-sandbox - -test-common-lint: - $(call INFO, "running common make tests $(KUBE_NAMESPACE)") - @! make test-common --warn-undefined-variables --just-print 2>&1 >/dev/null | grep warning - -test-gcloud-setup: - $(call INFO, "testing gcloud setup") - sh/setup-gcloud-test.sh - -test-common-docker: - $(call INFO, "testing common-docker") - $(MAKE) -f test/make/docker.mk test-common-docker - -test-common-docker-ar: - $(call INFO, "testing common-docker with Artifact Registry") - $(MAKE) -f test/make/docker-ar.mk test-common-docker - -test-vault-gsa-setup: - sh/setup-circle-vault.sh - -prepare-go-path: - mkdir -p $(GOPATH)/src/_/$(shell pwd)/test/fixtures - ln -sf $(shell pwd)/test/fixtures/golang $(GOPATH)/src/_/$(shell pwd)/test/fixtures - ln -sf $(shell pwd)/main.go $(GOPATH)/src/_/$(shell pwd)/ - -test-common-go: - $(call INFO, "testing common go") -test-common-go: prepare-go-path test-statically-linked-with-cgo-for-tests - -test-statically-linked-with-cgo-for-tests: - $(MAKE) test-go build-linux - file $(APP) | grep 'statically linked' diff --git a/devops/make/README.md b/devops/make/README.md deleted file mode 100644 index 1f347b3..0000000 --- a/devops/make/README.md +++ /dev/null @@ -1,1595 +0,0 @@ -Common make tasks -================= - - - -- [Introduction](#introduction) -- [Usage](#usage) - * [Setting Up the common makefiles](#setting-up-the-common-makefiles) - * [Using in your Makefile](#using-in-your-makefile) - * [Updating common makefiles](#updating-common-makefiles) - * [Extending Tasks](#extending-tasks) - * [Usage with public repos](#usage-with-public-repos) - + [Initial import](#initial-import) - + [Future updates](#future-updates) - * [Usage with more complicated projects](#usage-with-more-complicated-projects) - + [recursive make](#recursive-make) -- [Tasks](#tasks) - * [common.mk](#commonmk) - + [help](#help) - + [update-makefiles](#update-makefiles) - - [output variables](#output-variables) - * [common-apollo.mk](#common-apollomk) - + [Input](#input) - + [Tasks](#tasks-1) - - [check-apollo-schema](#check-apollo-schema) - - [update-apollo-schema](#update-apollo-schema) - * [common-docs.mk](#common-docsmk) - + [circleci 2.0](#circleci-20) - + [update-readme-toc](#update-readme-toc) - + [test-readme-toc](#test-readme-toc) - * [common-docker.mk](#common-dockermk) - + [push-circle::](#push-circle) - * [_docker.mk](#_dockermk) - + [Input Environment Variables:](#input-environment-variables) - + [Exported Environment Variables:](#exported-environment-variables) - + [build-docker::](#build-docker) - + [lint-hadolint::](#lint-hadolint) - * [common-docker-ar.mk](#common-docker-armk) - + [Input Environment Variables](#input-environment-variables) - + [Export Environment Variables](#export-environment-variables) - + [push::](#push) - + [push-ar::](#push-ar) - + [setup-ar::](#setup-ar) - * [common-docker-quay.mk](#common-docker-quaymk) - + [circleci 2.0](#circleci-20-1) - + [push::](#push-1) - + [push-quay::](#push-quay) - + [Input Environment Variables](#input-environment-variables-1) - + [Export Environment Variables](#export-environment-variables-1) - * [common-shell.mk](#common-shellmk) - + [Input Environment Variables:](#input-environment-variables-1) - + [test-shell](#test-shell) - * [common-pants.mk](#common-pantsmk) - * [install-circle-pants](#install-circle-pants) - * [delete-circle-pants](#delete-circle-pants) - * [init-circle-pants](#init-circle-pants) - * [common-conda.mk](#common-condamk) - + [Notes:](#notes) - + [Inheritable Input Environment Variables from common-python.mk:](#inheritable-input-environment-variables-from-common-pythonmk) - + [Input Environment Variables:](#input-environment-variables-2) - + [deps-conda::](#deps-conda) - + [setup-conda::](#setup-conda) - + [clean-conda::](#clean-conda) - + [reset-conda-environment::](#reset-conda-environment) - + [build-conda::](#build-conda) - + [build-conda-deployment-environment::](#build-conda-deployment-environment) - + [deploy-conda::](#deploy-conda) - + [deploy-conda-pypi::](#deploy-conda-pypi) - + [regenerate-anaconda-cloud-repo-token::](#regenerate-anaconda-cloud-repo-token) - + [add-conda-private-channel::](#add-conda-private-channel) - + [generate-conda-requirements::](#generate-conda-requirements) - + [reset-conda-environment::](#reset-conda-environment-1) - * [common-python.mk](#common-pythonmk) - + [Input Environment Variables:](#input-environment-variables-3) - + [build-python::](#build-python) - + [test-python::](#test-python) - + [test-circle-python::](#test-circle-python) - + [deps-python::](#deps-python) - + [deps-circle::](#deps-circle) - + [deps-coverage::](#deps-coverage) - + [test-coverage-python::](#test-coverage-python) - + [test-coveralls::](#test-coveralls) - + [coverage-report::](#coverage-report) - + [lint-python::](#lint-python) - + [lint-pylint::](#lint-pylint) - + [lint-flake8::](#lint-flake8) - * [common-go.mk](#common-gomk) - + [circleci 2.0](#circleci-20-2) - + [Input Environment Variables:](#input-environment-variables-4) - + [build-go::](#build-go) - + [build-linux::](#build-linux) - + [build-circle::](#build-circle) - + [test-go::](#test-go) - + [test-go-tparse::](#test-go-tparse) - + [test-no-race::](#test-no-race) - + [test-circle::](#test-circle) - + [deps-go::](#deps-go) - + [deps-circle::](#deps-circle-1) - + [deps-coverage::](#deps-coverage-1) - + [deps-status::](#deps-status) - + [clean-go::](#clean-go) - + [test-coverage-go::](#test-coverage-go) - + [test-coveralls::](#test-coveralls-1) - + [test-coverage-html::](#test-coverage-html) - * [common-kube.mk](#common-kubemk) - + [circleci 2.0](#circleci-20-3) - + [Input Environment Variables:](#input-environment-variables-5) - + [Exported Environment Variables:](#exported-environment-variables-1) - + [Multi-cluster deployments](#multi-cluster-deployments) - - [Default Behavior](#default-behavior) - - [Deloying to Other Clusters (not `general-01`)](#deloying-to-other-clusters-not-general-01) - - [Deploying to Many Clusters](#deploying-to-many-clusters) - + [lint-kubeval::](#lint-kubeval) - - [customizing](#customizing) - + [force-pod-restart::](#force-pod-restart) - + [update-secrets::](#update-secrets) - + [clean-secrets::](#clean-secrets) - + [update-configmaps::](#update-configmaps) - + [verify-deployment-rollout::](#verify-deployment-rollout) - * [common-python3.mk](#common-python3mk) - + [Input Environment Variables:](#input-environment-variables-6) - + [check-format-python3::](#check-format-python3) - + [lint-python3::](#lint-python3) - + [test-python3-docker::](#test-python3-docker) - + [deps-python3::](#deps-python3) - + [deps-python3-docker::](#deps-python3-docker) - * [common-kustomize.mk](#common-kustomizemk) - + [Input Environment Variables:](#input-environment-variables-7) - + [build-kustomize::](#build-kustomize) - + [diff-kustomize::](#diff-kustomize) - + [deploy-kustomize::](#deploy-kustomize) -- [Contributing](#contributing) -- [Versioning](#versioning) - * [Logging](#logging) - * [Pathfinding](#pathfinding) - * [Common Patterns for adding to the repo](#common-patterns-for-adding-to-the-repo) - * [Adding support for a new language](#adding-support-for-a-new-language) - * [README.md updates](#readmemd-updates) -- [Handy Make stuff](#handy-make-stuff) - - - -Introduction -============ - -This repo contains a library of Makefile tasks and shell scripts that provide -common functions for working with a variety of systems at Pantheon. The -purpose of this repository is to define __common__ tasks used to build and deploy -projects to avoid repetition and to disseminate changes upstream uniformly. If -the task is defined in a project's `Makefile` and you find yourself copying it -into another repository, that is a good sign that it belongs here. - -Here are some good examples of common tasks that belong here: -* tasks for building go projects -* tasks for building docker containers -* tasks for managing resources in Kubernetes -* tasks for installing and deploying to a sandbox environment - -This repository is **NOT** a good place to define every handy task. Please be -selective. - -Usage -===== - -Setting Up the common makefiles ------------------------------- - -Add these common tasks to your project by using git subtree from the root of -your project. - -First add the remote. - -``` -git remote add common_makefiles git@github.com:pantheon-systems/common_makefiles.git --no-tags -``` - -Now add the subtree - -**note:** it is important that you keep the import path set to `devops/make` as -the makefiles assume this structure. - -``` -git subtree add --prefix devops/make common_makefiles master --squash -``` - -Using in your Makefile ----------------------- - -you simply need to include the common makefiles you want in your projects root -Makefile: - -```make -APP := baryon -PROJECT := $$GOOGLE_PROJECT - -include devops/make/common.mk -include devops/make/common-kube.mk -include devops/make/common-go.mk -``` - -Updating common makefiles -------------------------- - -The `common.mk` file includes a task named `update-makefiles` which you can -invoke to pull and squash the latest versions of the common tasks into your -project. Put the changes on a branch. - -``` -git checkout -b update-make - -make update-makefiles -``` - -If this is a new clone you may need to re-run these commands to register the -common-make git repo before running `make update-makefiles`: - -``` -git remote add common_makefiles git@github.com:pantheon-systems/common_makefiles.git --no-tags -git subtree add --prefix devops/make common_makefiles master --squash -``` - -Extending Tasks ---------------- - -All the common makefile tasks can be extended in your top level Makefile by -defining them again. Each common task that can be extended has a `::` target. -e.g. `deps::` - -for example if I want to do something after the default build target from -common-go.mk I can add to it in my Makefile after including common-go.mk -like so: - -```make -build:: - @echo "this is after the common build" -``` - -Conversely, I must add target lines *before* importing common-go.mk if I wanted -to do something before the default target: - -```make -build:: - @echo "this is before the common build" -``` - -or set variables to modify its behavior (in this case directing it to compile -`cmd/foo`): - -```make -build:: CMD=foo -``` - -Usage with public repos ------------------------ - -Ideally we should never add anything sensitive or secret to common-makefiles. -Nonetheless, it is safest to prune out anything not needed by your project if -it is going to be a public Github repo. - -Here is a method for pruning out unused files: - -### Initial import - -- Create /Makefile with an extended `update-makefiles` task and a new -`prune-common-make` task. The prune task should be customized to include only the -files your project will need. Everything else will be removed locally and from -git. Example project that only needs `common.mk` and `common-docker.mk`: - -```make -# extend the update-makefiles task to remove files we don't need -update-makefiles:: - make prune-common-make - -# strip out everything from common-makefiles that we don't want. -prune-common-make: - @find devops/make -type f \ - -not -name common.mk \ - -not -name common-docker.mk \ - -delete - @find devops/make -empty -delete - @git add devops/make - @git commit -C HEAD --amend -``` - -- Follow the standard procedures for adding the common_makefiles to your project: - -``` -git remote add common_makefiles git@github.com:pantheon-systems/common_makefiles.git --no-tags -git subtree add --prefix devops/make common_makefiles master --squash -``` - -- And then execute the prune task created in the first step: - -``` -make prune-common-make -``` - -### Future updates - -After the initial import of common_makefiles the project can be updated in the -standard way: `make update-makefiles`. The `prune-common-make` task will be -executed after the updates are pulled in. - -Usage with more complicated projects ----- - -This section describes approaches to building a Makefile for a project that -builds and/or deploys more than one thing. - -Rather than have a makefile that uses the default targets directly, some -indirection is necessary to accomplish multiple different invocations of those -targets. - -### recursive make - -To use this strategy, define your own targets with the same name as the default -targets you plan to reuse, but implement them by calling make again, using -variables to determine how to modify that invocation to operate on one thing -that the default target could normally handle. - -For example, consider this directory layout: - -``` -devops/make -make/ -- foo.mk -- bar.mk -cmd/ -- foo/ - - main.go -- bar/ - - main.go -Makefile -``` - -your `Makefile` would have targets like this: - -``` -build: - $(MAKE) -C . -f make/$(TARGET).mk -``` - -and if you invoked it like this: - -``` -make build TARGET=foo -``` - -then you would use `make/foo` as your makefile, and it would look like a normal -Makefile that consumes common-make: - -``` -include $(COMMON_MAKE_DIR)/common-go.mk -``` - -We can further reuse default targets in our submake with another submake. - -For example, here we create a separate dev instance of `build-docker` and -create a separate image for use with our `test` target: - -``` -build-docker-dev:: export DOCKER_BUILD_ARGS := $(DOCKER_BUILD_ARGS) --build-arg=dev=true -build-docker-dev:: export IMAGE := $(IMAGE)-dev -build-docker-dev:: - $(MAKE) -f $(COMMON_MAKE_DIR)/common-docker.mk build-docker \ - -test:: build-docker-dev - docker run $(IMAGE)-dev $(TEST_CMD) -``` -Tasks -===== - -common.mk ---------- - -### help - -`make help` prints out a list of tasks and descriptions. - -Any task that contains a comment following this pattern will be displayed: - -```make -foo: ## this help message will be display by `make help` - echo foo -``` - -Example: - -``` -$ make help -foo this help message will be display by `make help` -``` - -You can suppress the output of any task by setting a variable `HIDE_TASKS` in -your Makefile, eg: - -```shell -HIDE_TASKS = deps deps-circle test-circle -``` - -### update-makefiles - -Use this to pull the latest `master` branch from `common_makefiles` into the -local project. (Assumes `common_makefiles` were imported to `./devops/make`) - -If you get an error such as `fatal: 'common_makefiles' does not appear to be a git repository` -you may need to add the remote repo before running this task: - -``` -git remote add common_makefiles git@github.com:pantheon-systems/common_makefiles.git --no-tags -``` - -#### output variables - -- `ROOT_DIR` -- the full path to the root of your repository. Useful for supporting execution of - `make` in subdirectories of your repo, and some scenarios where you find you need multiple - includes and/or recursive make invocation. - -## common-apollo.mk - -Common tasks for working with Apollo Studio for GraphQL Services. - -### Input - -- `APP`: (required) The name of the app. -- `GQL_SCHEMA_PATH`: (required) path to the schema.graphqls file - -### Tasks - -#### check-apollo-schema - -Checks schema changes against production to ensure any changes are compatable - -#### update-apollo-schema - -Updates schema for your app on Apollo Studio - -common-docs.mk --------------- - -Common tasks for managing documentation. - -### circleci 2.0 - -If using a docker-based circleci 2.0 build environment ensure that a remote docker -is available in your `.circleci/config.yml`: - -```yaml - steps: - - setup_remote_docker -``` - -### update-readme-toc - -Run `make update-readme-toc` to update the TOC in `./README.md`. Uses [markdown-toc](https://github.com/jonschlinkert/markdown-toc#cli) -to edit in place. - - -### test-readme-toc - -This task executes `markdoc-toc` via Docker and compares the output to the -current TOC in the on-disk `./README.md` file. If they differ, a diff output -will be displayed along with an error message and a non-zero exit will occur. - -This is intended to be used in a CI pipeline to fail a build and remind author's -to update the TOC and re-submit their changes. - -This task requires Docker to be running. - -This task is added to the global `test` task. - -common-docker.mk ----------------- -We push our images to quay.io by default. This file includes `common-docker-quay.mk` to maintain compatbility for existing projects that use `common-docker.mk`. - -### push-circle:: - -Runs the `build-docker` and `push` tasks. -DEPRECATED. Run these commands separately instead. - -_docker.mk ----------------- -### Input Environment Variables: - -None - -### Exported Environment Variables: - -- `BUILD_NUM`: The build number for this build. Will use `$(DEFAULT_SANDBOX_NAME)-$(COMMIT)` if not building - on circleCI, will use `$(CIRCLE_BUILD_NUM)-$(CIRCLE_BRANCH)"` otherwise. - -- `DOCKER_BYPASS_DEFAULT_PUSH`: If you need to provide custom logic for tagging and pushing images to an artifact repository, add `DOCKER_BYPASS_DEFAULT_PUSH=true` to your `Makefile` and the default push step will be skipped. - -### build-docker:: - -Runs `build-docker.sh` - -### lint-hadolint:: - -Runs `hadolint` on all Dockerfiles found in the repository. - -This task fails if `hadolint` is not installed *and* Dockerfiles are present in the repo. -You can set `REQUIRE_DOCKER_LINT := no` in your `Makefile` to make it pass silently if `hadolint` is not installed. - -This task is added to the global `lint` task. - -common-docker-ar.mk --------------- -### Input Environment Variables -- `AR_IMAGE`: the docker image to use. Will be computed if it doesn't exist -- `AR_REGISTRY`: The docker registry to use. Set to Google Artifact Registry - -### Export Environment Variables -- `AR_IMAGE`: The image to use for the build. -- `AR_REGISTRY`: The registry to use for the build -- `AR_IMAGE_BASENAME`: The image without the tag field on it.. i.e. foo:1.0.0 would have image basename of 'foo' -- `AR_REGISTRY_PATH`: Registry url and repo name - -### push:: -Invokes `push-ar` - -### push-ar:: -Invokes `setup-ar` then: -Runs `docker push $(IMAGE)` to push the docker image and tag to artifact registry. - -### setup-ar:: -When invoked from Circle CI this task will setup the gsa for the common circle GSA to be used with our common AR repositories. -Invoked as a dependency of `push-ar` - -Runs `setup-circle-vault.sh`. This script does the following -- Installs vault -- Installs pvault -- Runs `gcloud auth activate-service-account` which authenticates the account into the Google Cloud CLI by reading and decoding production vault GSA for `circleci-common@pantheon-internal.iam.gserviceaccount.com` - - -common-docker-quay.mk --------------- -### circleci 2.0 - -To push a container to quay.io upon a successful master build: -- On Circle-CI, navigate to *Project Settings > Environment Variables*. -- Add the following environment vars. Ask `@infra` on Slack if you need - assistance. - -``` -QUAY_USER: getpantheon+circleci -QUAY_PASSWD: -``` - -If using a docker-based circleci 2.0 build environment ensure that a remote docker -is available in your `.circleci/config.yml`: - -```yaml - steps: - - setup_remote_docker -``` - -Note that some functionality is not available with remote docker such as volume -mounts. If you need to use volume mounts on circleci 2.0 you will need to use -the VM-based build environment instead of docker. -### push:: - -Invokes `push-quay` -### push-quay:: -Runs `docker push $(IMAGE)` to push the docker image and tag to quay. - -### Input Environment Variables -- `QUAY_USER`: The quay.io user to use (usually set in CI) -- `QUAY_PASSWD`: The quay passwd to use (usually set in CI) -- `IMAGE`: the docker image to use. will be computed if it doesn't exist. -- `REGISTRY`: The docker registry to use. Defaults to quay. - -### Export Environment Variables -- `IMAGE`: The image to use for the build. -- `REGISTRY`: The registry to use for the build. -- `IMAGE_BASENAME`: The image without the tag field on it.. i.e. foo:1.0.0 would have image basename of 'foo' -- `REGISTRY_PATH`: Registry url and repo name - -common-shell.mk --------------- - -Common tasks for shell scripts, such as [shellcheck](https://www.shellcheck.net/) -for linting shell scripts. - -Please also try to follow the [Google Shell Style Guide](https://google.github.io/styleguide/shell.xml) -when writing shell scripts. - -> Known Bugs: -> Shellcheck will error with "Segmentation Fault" on debian/ubuntu based images -> which includes our deploy-toolbox and most circleci/* images. -> -> Workarounds: Alpine or Fedora/Redhat images should work. Or this workaround: https://github.com/koalaman/shellcheck/issues/1053 - -### Input Environment Variables: - -- `SHELL_SOURCES`: (optional) A list of shell scripts that should be tested by - the `test-shell` and `test` tasks. If none is provided, `find . -name \*.sh` - is run to find any shell files in the project, except files that start with - `_` or `.`, which are excluded from the find. -- `SHELLCHECK_VERSION`: (optional) The version of shellcheck to be installed by - the `deps-circle` task. - -### test-shell - -Run shell script tests such as `shellcheck`. - -This task is added to the global `test` task. - -common-pants.mk ---------------- - -Installs pants version greater than `0.1.3` unless overridden with -`PANTS_VERSION` so that a sandbox integration environment can be created on -circle for integration testing and acceptance criteria review. - -- `GITHUB_TOKEN`: (required) Github Token for downloading releases of the [pants](https://github.com/pantheon-systems/pants) - utility. From the production Vault `secret/securenotes/github__pantheon-circleci_user` grab the `pants-token` from this secure note and set it as the `GITHUB_TOKEN`. -- `PANTS_VERSION`: (optional) The version of pants to install. Default `latest`. Specify version like `x.y.z`, eg: `0.1.47` -- `PANTS_INCLUDE`: (optional) The services for pants to install or update. E.g `make - init-pants PANTS_INCLUDE=notification-service,ticket-management` - -## install-circle-pants - -Installs the `pants` utility on Circle-CI from https://github.com/pantheon-systems/pants - -This task is added to the global `deps-circle` task. If `make deps-circle` is already in your -circle-ci config file then you only need to `include common-pants.mk` in your Makefile. - -## delete-circle-pants - -Deletes the sandbox environment based on the branch if one exists to prepare -for the deployment. - -This task is added to the global `deps-circle` task. If `make deps-circle` is already in your -circle-ci config file then you only need to `include common-pants.mk` in your Makefile. - -## init-circle-pants - -Creates a kube environment against the `testing.onebox.panth.io` as the -ygg-api. The kube environment is set with the `KUBE_NAMESPACE` following the -convention `sandbox-REPO_NAME-BRANCH_NAME`. - -common-conda.mk ---------------- - -### Notes: - -Conda is an open source package management system and environment management -system for installing multiple versions of software packages and their -dependencies and switching easily between them. It works on Linux, OS X and -Windows, and was created for Python programs but can package and distribute any -software (i.e. Python, R, Ruby, Lua, Scala, Java, Javascript, C/ C++, FORTRAN). - -This common make integrates with common-python.mk. Both files can be included -in the top-level Makefile. Example: - -```make -include devops/make/common-python.mk -include devops/make/common-conda.mk -``` - -To prevent mistakes, some targets are protected from being run inside a conda -environment by using the `_assert-conda-env-active` and `_assert-conda-env-not-active` -targets respectively. - -### Inheritable Input Environment Variables from common-python.mk: - -- `PYTHON_PACKAGE_NAME`: (required) The name of the python package. -- `TEST_RUNNER`: (optional) The name of the python test runner to execute. Defaults to `trial` -- `COVERALLS_TOKEN`: (optional) Token to use when pushing coverage to coveralls. Required if using - the `test-coveralls` task - -### Input Environment Variables: - -- `TEST_RUNNER`: (required) The name of the test runner to execute. Inherited - from common-python.mk -- `CONDA_PACKAGE_NAME`: (required) The name of your conda package. Used to also - name your environment. Defaults to $(PYTHON_PACKAGE_NAME) -- `CONDA_PACKAGE_VERSION`: (required) The version of your conda package. Defaults - to $(PYTHON_PACKAGE_VERSION) -- `ANACONDA_CLOUD_REPO_TOKEN`: (optional) Token to use when reading private conda - packages from Anaconda Cloud. This token is required if this package depends - on other private packages. For local development this is a personal token - connected to your Anaconda Cloud account. For circle this is a token specific - to the `pantheon_machines` Anaconda Cloud account and can be found in Vault -- `ANACONDA_CLOUD_DEPLOY_TOKEN`: (optional) Required by circle. Token to use - when pushing conda packages to Anaconda Cloud. For circle this is a token - specific to the `pantheon_machines` Anaconda Cloud account and can be found - in Vault. -- `ANACONDA_CLOUD_ORGANIZATION`: (optional) The name of the organization in - Anaconda Cloud. Defaults to `pantheon` -- `CONDA_PACKAGE_LABEL`: (optional) The label that will be applied to the conda - package on deployment. Defaults to `main` - -### deps-conda:: - -Downloads the miniconda installation script. The target uses `uname -s` to -determine which installation script to download. Currently only `Darwin` (OSX) -and `Linux` are supported. Runs the installation script and adds the path to -`~/.bashrc`. Adds the pantheon channel to conda config. Runs `conda config --set annaconda_upload no` -to disable automatic upload to anaconda cloud after a build. This target is -added to the global `deps` target. - -### setup-conda:: - -Setup the conda virtual environment for this project. Looks for an environment.yml -file in the project root. - -Runs `conda env create || conda env update` -This target is added to the global `setup` target. - -### clean-conda:: - -Removes index cache, lock files, tarballs, unused cache packages, and source cache. - -Runs `conda clean --all -y`. - -This target is added to the global `clean` target. - -### reset-conda-environment:: - -Reset a conda environment by removing and reinstalling all of its packages. - -Runs `conda remove --name $(CONDA_PACKAGE_NAME) --all -y` and `conda env update` - -### build-conda:: - -Build conda package for project with current arch. A no arch package can be built by configuring the conda recipe. - -Runs `conda build recipe --no-anaconda-upload`. - -### build-conda-deployment-environment:: - -Clones the project conda environment into the project directory `./local`. This -environment can be copied directly into the Docker container as the deployment -artifact. - -Runs `conda create --clone $(CONDA_PACKAGE_NAME) -y --prefix ./local --copy`. - -### deploy-conda:: - -Requires ANACONDA_CLOUD_DEPLOY_TOKEN to be set. Deploys the built conda package -to Anaconda Cloud. - -Runs `conda build -q --user $(ANACONDA_CLOUD_ORGANIZATION) --token $(ANACONDA_CLOUD_DEPLOY_TOKEN) recipe` - -### deploy-conda-pypi:: - -Requires ANACONDA_CLOUD_DEPLOY_TOKEN to be set. Deploys the latest built pypi -package to Anaconda Cloud. Distributing private pypi packages is a paid feature -of Anaconda Cloud that we have not enabled, but private pypi packages can still -be downloaded on the dashboard or using the API. - -Runs `anaconda --token $(ANACONDA_CLOUD_DEPLOY_TOKEN) upload -u $(ANACONDA_CLOUD_ORGANIZATION) --label $(CONDA_PACKAGE_LABEL) --no-register --force dist/$(CONDA_PACKAGE_NAME)-$(CONDA_PACKAGE_VERSION).tar.gz`. - -### regenerate-anaconda-cloud-repo-token:: - -A helper to generate a personal read-only token for downloading private conda -packages suitable for local development. If not logged into anaconda client this -will present an interactive console. The token will be labeled `private_repo` on -your Anaconda Cloud account. - -The output of this target is an ANACONDA_CLOUD_REPO_TOKEN which should be exported to your environment. - -Run `make admin-regenerate-anaconda-cloud-repo-token` Copy the token then run -`export ANACONDA_CLOUD_REPO_TOKEN=_TOKEN_GOES_HERE_` - -### add-conda-private-channel:: - -Adds the pantheon private channel to your conda config for downloading conda -packages from Anaconda Cloud. Requires ANACONDA_CLOUD_REPO_TOKEN to be set. - -### generate-conda-requirements:: - -Helper to generate a full dependency tree of this conda environment into a -requirements_full.txt - -### reset-conda-environment:: - -Helper to reset a conda environment by removing and reinstalling all of its -packages. - -common-python.mk ----------------- - -### Input Environment Variables: - -- `PYTHON_PACKAGE_NAME`: (required) The name of the python package. -- `TEST_RUNNER`: (optional) The name of the python test runner to execute. Defaults to `trial` -- `COVERALLS_TOKEN`: (optional) Token to use when pushing coverage to coveralls. Required if using - the `test-coveralls` task - -### build-python:: - -Run `python setup.py sdist` in the current project directory. - -This task is added to the global `build` task. - -### test-python:: - -Runs targets `test-coverage-python` and target the global `lint` target. - -This task is added to the global `test` task. - -### test-circle-python:: - -Intended for use in circle-ci config to run tests under the Circle-CI context. This -target additionally calls target test-coveralls-python which runs `coveralls` -to report coverage metrics. - -### deps-python:: - -Install this projects' Python dependencies which includes the targets deps-testrunner-python, -deps-lint-python and deps-coverage-python - -NOTE: Currently assumes this project is using `pip` for dependency management. - -This task is added to the global `deps` task. - -### deps-circle:: - -Install dependencies on Circle-CI which includes the targets deps-coveralls-python - -### deps-coverage:: - -Install dependencies necessary for running the test coverage utilities like -coveralls. - -### test-coverage-python:: - -Run `coverage run --branch --source $(PYTHON_PACKAGE_NAME) $(shell which $(TEST_RUNNER)) $(PYTHON_PACKAGE_NAME)` -which creates the coverage report. - -This task is added to the global `test-coverage` task. - -### test-coveralls:: - -Run `coveralls` which sends the coverage report to coveralls. - -Requires `COVERALLS_TOKEN` environment variable. - -### coverage-report:: - -Run `coverage report` on the last generated coverage source. - -### lint-python:: -Run targets `lint-pylint` and `lint-flake8` - -This task is added to the global `lint` task. - -### lint-pylint:: -Run `pylint $(PYTHON_PACKAGE_NAME)` - -Pylint is a Python source code analyzer which looks for programming errors, -helps enforcing a coding standard and sniffs for some code smells as defined in -Martin Fowler's Refactoring book). Pylint can also be run against any installeds -python package which is useful for catching misconfigured setup.py files. - -This task is added to `lint-python` task. - -### lint-flake8:: -Run `flake8 --show-source --statistics --benchmark $(PYTHON_PACKAGE_NAME)` - -Flake8 is a combination of three tools (Pyflakes, pep8 and mccabe). Flake8 -performs static analysis of your uncompiled code (NOT installed packages). - -When the source directory of your project is not found this target prints a -warning instead of an error. Pylint does not require the source directory -and can be run on an installed python package. This preserves flexibility. - -This task is added to `lint-python` task. - -common-go.mk ------------- - -### circleci 2.0 - -When using circleci 2.0 it is recommended to use one of the circleci provided -Go primary containers if possible. The latest version of Go will be available. -Updating to a new version involves bumping the container tag. - -### Input Environment Variables: - -- `COVERALLS_TOKEN`: Token to use when pushing coverage to coveralls. -- `FETCH_CA_CERT:` The presence of this variable will add a Pull root ca certs - to ca-certificats.crt before build. - -### build-go:: - -Run `go build` in the current project directory if any go code files have been updated -and the binary (at `./$(APP)` by default) isn't already present. - -If you need to build many binaries, you can do a `make` invocation for each one if you -put each subpackage with a `main` function in its own `./cmd/subpackage` directory and -set `CMD=subpackage`. See `build-docker` for options that can be used to build separate -images, ie one for each binary. - -This task is added to the global `build` task. - -### build-linux:: - -Build a static Linux binary. (Works on any platform.) - -### build-circle:: - -Intended for use in circle-ci config files to run a build under the Circle-CI -context. - -### test-go:: - -Run `go test` against all go packages in the project. - -Does not run tests for packages in directories `devops/` and `vendor/` or with `e2e_tests` in their name. - -### test-go-tparse:: - -Run `go test` against all go packages in the project and formats the output using tparse (https://github.com/mfridman/tparse). - -Does not run tests for packages in directories `devops/` and `vendor/` or with `e2e_tests` in their name. - -This task is added to the global `test` task. - -### test-no-race:: - -Run `go test` without the race dectector. - -### test-circle:: - -Intended for use in circle-ci config to run tests under the Circle-CI context. - -### deps-go:: - -Install this projects' Go dependencies and tools. - -If you are using go modules, then the target will use `go get`. - -Pass arguments to `go get` by setting the `GO_GET_ARGS` variable in your -Makefile: - -```make -GO_GET_ARGS := -d -``` - -If there is a `./vendor` directory, then `go get` is not called, -and the `./vendor` directory is deleted. - -This task is added to the global `deps` task. - -### deps-circle:: - -Install dependencies on Circle-CI - -### deps-coverage:: - -Install dependencies necessary for running the test coverage utilities like -coveralls. - -### deps-status:: - -Check status of dependencies with gostatus. - -### clean-go:: - -Delete all build artifacts. - -Executed by default with common target `clean::`. - -### test-coverage-go:: - -Run `go cov` test coverage report. - -This task is added to the global `test-coverage` task. - -### test-coveralls:: - -Run test coverage report and send it to coveralls. - -Requires `COVERALLS_TOKEN` environment variable. - -### test-coverage-html:: - -Run go test coverage report and output to `./coverage.html`. - -common-kube.mk --------------- - -### circleci 2.0 - -- On Circle-CI, navigate to *Project Settings > Environment Variables*. -- Ask `@infra` on Slack to assist in adding the `GCLOUD_EMAIL` and `GCLOUD_KEY` env vars. - -When using circleci 2.0 set the environment variables on the primary container, eg: - -```yaml ---- -version: 2 -jobs: - build: - docker: - - image: circleci/golang:1.9.1 - steps: -``` - -### Input Environment Variables: - -- `APP`: should be defined in your topmost Makefile -- `SECRET_FILES`: list of files that should exist in secrets/* used by - `_validate_secrets` -- `UPDATE_GCLOUD`: if you are using the gcloud/kubectl image for deployments then set - this var to `false` -- `CLUSTER_DEFAULT`: set this to the short name of the cluster you wish to deploy to, ie - `sandbox-02` if you're trying to update template-sandbox -- `GCLOUD_PROJECTS`: set this to the space-delimited list of projects in which your clusters reside -- `KUBE_NAMESPACE`: set this to the namespace you wish to deploy to, ie `template-sandbox` - if you're trying to update template-sandbox - -### Exported Environment Variables: - -NOTE: variables that appear hear and above are determined automatically if not set - -- `KUBE_NAMESPACE`: represents the kube namespace that has been detected based - on branch build and circle existence, or the branch explicitly - set in the environment. -- `KUBE_CONTEXT`: the full name of the context that you will deploy to. A context is a - kubernetes configuration construct, but we rely on the fact that we use - the same process for setting all contexts on workstation and in CI -- `KUBECTL_CMD`: the equivalent of: - ``` - kubectl --context=${KUBE_CONTEXT} --namespace=${KUBE_NAMESPACE} - ``` - Primarily for use in your `deploy` target, eg: - ``` - deploy:: - $(KUBECTL_CMD) apply -f -r devops/k8s/manifests - ``` - -If no namespace or context is defined, common-kube will use a different -set of defaults depending on the current environment and branch: - -| CircleCI | Branch | Default context | Default namespace | -|----------|-------------------|-----------------|--------------------------| -| Yes | `master` / `main` | `general-01` | `production` | -| Yes | Other branch | `sandbox-01` | `sandbox-[APP]-[BRANCH]` | -| No | Any branch | _pants default_ | _pants default_ | - -- If `CIRCLECI` env var is not defined (ie: not running on circle), the `pants` utility - will be invoked to retrieve your default sandbox name. If this succeeds the value - of `KUBE_NAMESPACE` will be set to your default sandbox name. -- If `CIRCLECI` env var is defined, `KUBE_NAMESPACE` will be set to: - `sandbox-$(CIRCLE_PROJECT_REPONAME)-$(CIRCLE_BRANCH)`. This allows for deploying - temporary test sandboxes for PR's. -- If `CIRCLE_BRANCH` is set to `master` or `main`, `KUBE_NAMESPACE` will be set to `production`. - -To override default context and/or namespace: - -- Context: `KUBE_CONTEXT=gke_pantheon-internal_us-west1_general-04` -- Abbreviated context: `CLUSTER_DEFAULT=general-04` -- Namespace: `KUBE_NAMESPACE=namespace` -- Template sandbox: `KUBE_NAMESPACE=template-sandbox` - - is equivalent to: `KUBE_NAMESPACE=template-sandbox CLUSTER_DEFAULT=sandbox-02` - -### Multi-cluster deployments -Common make creates all of the connection information for main kubernetes clusters that we interact -with. These are setup to be used as contexts for your `kubectl` commands for switching between clusters. -If the context is not specified then commands will default to the cluster specified in CLUSTER_DEFAULT -or if not specified then `general-01` - -Example: -`kubectl --context get pods -n ` - -This will get the pods on the cluster and namespace specified. This should be -the long cluster name. - -#### Default Behavior -By default, all common make tools will use the default cluster (as set for `kubectl` tooling), in addition it will create connections to each of the other clusters. - -#### Deloying to Other Clusters (not `general-01`) -To uniformly deploy to a different cluster than the default you can specify - -`CLUSTER_DEFAULT=` - -inside your make file to point everything to that deployment cluster. - -#### Deploying to Many Clusters -To do this you must specify the --context argument to your `kubectl` calls inside your common make primary -makefile. The context will specify for that command what cluster to deploy to. For example: - -If I am deploying everything to general-01, except for the CronJob: - -Using this for resources deploying to general-01 -`kubectl --context general-01 create ...` - -Using this for CronJob, deploying to general-02 -`kubectl --context general-02 create ...` - -### lint-kubeval:: - -Runs `kubeval` by default on all YAMLS found in `KUBE_YAMLS_PATH` (defaults to `./devops/k8s/`) in the repository, but -it will skip files in `KUBE_YAMLS_PATH/configmaps` and any path with substring `template.`. - -If `KUBE_YAMLS_PATH` is not present, `lint-kubeval` is skipped. - -If `KUBE_YAMLS_PATH` is present, but the command `kubeval` is not installed, `lint-kubevals` fails with an error message: - -``` -devops/make/common-kube.mk:186: *** "kubeval is not installed! please install it.". Stop. -``` - -This task is added to the global `lint` task. - -#### customizing - -Set these variables to change behavior -- `KUBE_YAMLS_PATH` -- Defaults to `./devops/k8s`. Extend it so it references a - subdirectory, eg `KUBE_YAMLS_PATH=./devops/k8s/manifests/database` so you can - 1. deploy a separate database component without deploying anything else - 2. dump templated yaml for just that component in a separate path and avoid false - linting errors -- `KUBE_YAMLS_EXCLUDED_PATHS` -- Defaults to `configmaps` but can be a space-delimited - list of subdirectories to ignore. -- `KUBE_YAMLS` -- you can override all of this by just specifying a space-delimited list - of file paths relative to the root of your repo. -- `SKIP_KUBEVAL` -- if defined, the call to lint-kubeval from common-kube.mk will be skipped. - This provides the opportunity to render configuration files of the project before calling lint-kubeval. - -### force-pod-restart:: - -Nuke the pod in the current `KUBE_NAMESPACE`. - -### update-secrets:: - -Requires `$APP` variable to be set. -Requires `$KUBE_NAMESPACE` variable to be set. -Requires **one** of these directories to have files meant to be applied: -- ./devops/k8s/secrets/[KUBE_CONTEXT]/production/ -- ./devops/k8s/secrets/[KUBE_CONTEXT]/[NAMESPACE]/ -- ./devops/k8s/secrets/[KUBE_CONTEXT]/non-prod/ -- ./devops/k8s/secrets/production -- ./devops/k8s/secrets/[NAMESPACE] -- ./devops/k8s/secrets/non-prod/ - -There secrets can be created two ways: -1. From a set of files in a directory named after the - secret. Each file will use its name as a key name for the secret - and the data in the file as the value. -2. From a 'literal' map. Make a file that has a set of k=v pairs in it - one per line. Each line will have its data split into secrets keys and values. - -_How it works:_ - -Put secrets into files in a directory such as -`./devops/k8s/secrets/non-prod/[namespace]`, -run `make update-secrets KUBE_NAMESPACE=[namespace]` to upload the secrets -to the specified namespace in a volume named `$APP-certs`. If the directory -`./devops/k8s/secrets/[namespace]/` directory first, and if it doesn't exist it -will default to looking in `./devops/k8s/secrets/non-prod` - -In general, the most specific directory that exists corresponding to the specified -`KUBE_CONTEXT`/`KUBE_NAMESPACE` is read from, so if you create -./devops/k8s/secrets/[KUBE_CONTEXT]/production/, anything in ./devops/k8s/secrets/production -will not be seen when during `make update-secrets` for that [KUBE_CONTEXT]. - -NOTE: The `$APP` variable will be prepended to the volume name. eg: -A directory path of `./devops/k8s/secrets/template-sandbox/certs` and `APP=foo` will create a -secret volume named `foo-certs` in the template-sandbox namespace. - -_Directory Example:_ - -The Directory method can be used for anthing that you want translated directly into a kube secret from a file. This could be an RSA key or an entire JSON file from GCE. The contents of the file will be encoded using base64 and added to the kube secret. - -Move the file to a directory under the last level of the path used for literal files. In the example below, the extra directory after 'production' for 'api-keys' will cause the `make update-secrets` command below to follow the path of creating the kube object using a file with a directory instead of from a literal file. Additionally, naming the directory 'api-keys' will append that name to the end of the 'app' name, making the final secret name in Kube "app-api-keys". - -``` -# for production: - -$ mkdir -p ./devops/k8s/secrets/production/api-keys -$ echo -n "secret-API-key!" >./devops/k8s/secrets/production/api-keys/key1.txt -$ echo -n "another-secret-API-key!" >./devops/k8s/secrets/production/api-keys/key2.txt -$ make update-secrets KUBE_NAMESPACE=production APP=foo - -# cleanup secrets, do not check them into git! -$ rm -rf -- ./devops/k8s/secrets/* -``` - -Verify the volume was created and contains the expected files: - -``` -$ kubectl describe secret foo-api-keys --namespace=production -Name: foo-api-keys -Namespace: production -Labels: app=foo - -Type: Opaque - -Data -==== -key1.txt: 15 bytes -key2.txt: 22 bytes -``` - -_Literal File Example_ - -Make a file with k=value pairs, and name it what you want the secret to be called. -``` -$ cat ./devops/k8s/secrets/non-prod/foo-secrets -secret1=foo -secret2=bar -secret3=baz -``` - -Apply the secrets -``` -$ make update-secrets KUBE_NAMESPACE=template-sandbox -``` - -Verify the secrets contents -``` -$ kubectl describe secrets myapp-foo-secrets --namespace=template-sandbox -Name: myapp-foo-secrets -Namespace: template-sandbox -Labels: app=myapp -Annotations: - -Data -==== -secret1: 3 bytes -secret2: 3 bytes -secret3: 3 bytes -``` - -_Labels_ - -By default a `app=$APP` label will be applied to the configmaps. This can be -overridden by setting the `LABELS` environment variable. You should almost always -include the `app` label in addition to any other labels. This allows for easily -tying together a deployment with its configmaps and is necessary for proper cloning -by [pants](https://github.com/pantheon-systems/pants) - -Example `Makefile` task to set custom `LABELS`: - -```make -update-configmaps:: LABELS="app=$(APP),cos-system-service=true" -``` - -### clean-secrets:: - -Delete all uncommitted files and directories in ./devops/k8s/secrets, -including the directory itself. - -Executed by default with common target `clean::`. - -### update-configmaps:: - -Requires `$APP` variable to be set. -Requires `$KUBE_NAMESPACE` variable to be set. -Requires one of these directories to have files meant to be applied: -- ./devops/k8s/configmaps/[KUBE_CONTEXT]/production/ -- ./devops/k8s/configmaps/[KUBE_CONTEXT]/[NAMESPACE]/ -- ./devops/k8s/configmaps/[KUBE_CONTEXT]/non-prod/ -- ./devops/k8s/configmaps/production/ -- ./devops/k8s/configmaps/[NAMESPACE]/ -- ./devops/k8s/configmaps/non-prod/ - -Use this task to upload Kubernetes configmaps. - -_How it works:_ - -There are 2 types of configmaps that can be used. A configmap complied from a set -of files in a directory or a 'literal' map. Directory of files is what it sounds -like; make a directory and put files in it. Each file will use its name as a key -name for the configmap, and the data in the file as the value. A Literal map is -a file that has a set of k=v pairs in it one per line. Each line will have it's -data split into configmap keys and values. _BEWARE_ that the value should not be -quoted. Due to how the shell interpolation happens when passing these k=v pairs -to kubectl quote strings will be literal quoted strings in the kube config map. - -Put a file or directory in the proper namespace e.g. -`./devops/k8s/configmaps/[namespace]/[map-name]` then run `make update-configmaps` -this will update template-sandbox namespace by default. If you need to use a different -namespace provide that to the make command environment: -`make update-configmaps KUBE_NAMESPACE=[namespace]`. If the [namespace]; directory -does not exist and your `KUBE_NAMESPACE` is not `'production'` then the script will -use configmaps defined in `./devops/k8s/configmaps/non-prod/`. This allows you to -apply configmaps to your kube sandbox without having to pollute the directories. - -In general, the most specific directory that exists corresponding to the specified -`KUBE_CONTEXT`/`KUBE_NAMESPACE` is read from, so if you create -./devops/k8s/configmaps/[KUBE_CONTEXT]/production/, anything in ./devops/k8s/configmaps/production -will not be seen when during `make update-configmaps` for that [KUBE_CONTEXT]. - -NOTE: The `$APP` variable will be prepended to the configmap name. eg: -A directory path of `./devops/k8s/configmaps/template-sandbox/config-files` and -`APP=foo` will create a configmap named `foo-config-files` in the `template-sandbox` -namespace. - -_Directory Example:_ - -Make the map directory. Given the app named foo this will become a configmap named foo-nginx-config -``` -$ mkdir -p ./devops/k8s/configmaps/non-prod/nginx-config -``` - -Put your app config in the directory you just created -``` -$ ls ./devops/k8s/configmaps/non-prod/nginx-config -common-location.conf common-proxy.conf common-server.conf nginx.conf verify-client-ssl.conf websocket-proxy.conf -``` - -Apply the map with the make task -``` -$ make update-configmaps KUBE_NAMESPACE=sandbox-foo -# this error is fine, it would say deleted if it existed -Error from server: configmaps "foo-nginx-config" not found -configmap "foo-nginx-config" created -configmap "foo-nginx-config" labeled -``` - -Verify the volume was created and contains the expected files: - -``` -$ kubectl describe configmap foo-nginx-config --namespace=sandbox-foo -kubectl describe configmap foo-nginx-config --namespace=sandbox-foo -Name: foo-nginx-config -Namespace: sandbox-foo -Labels: app=foo -Annotations: - -Data -==== -verify-client-ssl.conf: 214 bytes -websocket-proxy.conf: 227 bytes -common-location.conf: 561 bytes -common-proxy.conf: 95 bytes -common-server.conf: 928 bytes -nginx.conf: 2357 bytes -``` - -_Literal File Example_ - -Make a file with k=value pairs, and name it what you want the map to be called. -Given I am in 'myapp' using commonmake and I run these configs the resultant map -will be 'myapp-foo-config' -``` -$ cat ./devops/k8s/configmaps/non-prod/foo-conf -setting1=foo -setting2=bar -setting3=baz -``` - -Apply the map -``` -$ make update-configmaps KUBE_NAMESPACE=sandbox-foo -``` - -Verify the map contents -``` -$ kubectl describe configmap myapp-foo-config --namespace=sandbox-foo -Name: myapp-foo-config -Namespace: sandbox-foo -Labels: app=myapp -Annotations: - -Data -==== -setting1: 3 bytes -setting2: 3 bytes -setting3: 3 bytes -``` - -_Labels_ - -By default a `app=$APP` label will be applied to the configmaps. This can be -overridden by setting the `LABELS` environment variable. You should almost always -include the `app` label in addition to any other labels. This allows for easily -tying together a deployment with its configmaps and is necessary for proper cloning -by [pants](https://github.com/pantheon-systems/pants) - -Example `Makefile` task to set custom `LABELS`: - -```make -update-configmaps:: LABELS="app=$(APP),cos-system-service=true" -``` - -### verify-deployment-rollout:: - -Checks for a successful rollout of a Kubernetes Deployment. This would typically be called in your CI/CD pipeline's `deploy` step. If the rollout fails an attempt will be made to rollback to the previous Deployment. The rollback may also fail, however, and you are responsible for confirming the status of your service. - -Note that this is only intended for use with Kubernetes Deployment resources (eg: not StatefulSets, CronJobs) - -common-python3.mk ----------------- - -### Input Environment Variables: - -- `PYTHON_PACKAGE_NAME`: The name of the python package. -- `IMAGE`: The image to use for the build. If also using `common-docker.mk`, this should already be defined. - -### check-format-python3:: - -Run `pipenv run black --check --skip-string-normalization --line-length 120 .` in the current project directory. - -This task is added to the global `lint` task. - -### lint-python3:: - -Runs `pipenv run pylint $(PYTHON_PACKAGE_NAME)`. - -This task is added to the global `lint` task. - -### test-python3-docker:: - -Runs tests in a Docker image with the following command: -``` -docker run $(IMAGE) .venv/bin/python setup.py test -``` - -### deps-python3:: - -Install this project's Python runtime and dev dependencies. - -NOTE: Currently assumes this project is using `pipenv` for dependency management. - -This task is added to the global `deps` task. - -### deps-python3-docker:: - -Install this project's Python runtime dependencies. - -NOTE: Currently assumes this project is using `pipenv` for dependency management. - - -common-kustomize.mk ----------------- - -Provides targets for deploying apps to kube using [kustomize](https://kustomize.io/). - -By convention, instances (ready-to-deploy kustomizations for an app) are located in `devops/kustomize/instances`. - -The instance path (relative to this directory) is then provided (via `INSTANCE`) to run kustomize using the targets described below. - -For example: `make deploy-kustomize INSTANCE=prod/cluster1` will build `devops/kustomize/instances/prod/cluster1` -and apply it to the current kube cluster/namespace. - -### Input Environment Variables: - -**Required:** - -Only one of the following is required: - -- `INSTANCE`: The kustomize instance (a path relative to `devops/kustomize/instances`) to deploy/diff/build. -- `KUSTOMIZATION`: The full kustomization path. May be provided if your project structure is difference than the default. - -**Optional:** - -- `AR_IMAGE`: The image to deploy. If also using `common-docker-ar.mk`, this should already be defined. -- `IMAGE`: The image to deploy. If also using `common-docker.mk` or `common-docker-quay.mk`, this should already be defined. This will take precedence over `AR_IMAGE`. -- `KUBE_CONTEXT`: Kube context to interact with. If also using `common-kube.mk`, this should already be defined. -- `KUBE_NAMESPACE`: Kube namespace to interact with. If also using `common-kube.mk`, this should already be defined. -- `KUBECTL_CMD`: Path to the `kubectl` binary. If also using `common-kube.mk`, this should already be defined. -- `KUSTOMIZE_CMD`: Path to the `kustomize` binary. (Automatically determined from `$PATH` if not provided). - -### build-kustomize:: - -Builds an instance kustomization and displays it by printing it to stdout. - -### diff-kustomize:: - -Builds an instance kustomization and diffs it's content against the content in the kube server. - -### deploy-kustomize:: - -Builds an instance kustomization and applies it's content to the kube server. - -Contributing -============ - -make edits here and open a PR against this repo. Please do not push from your -subtree on your project. - -1. Have an idea -2. Get feedback from the beautiful people around you -3. Document your new or modified task in this README -4. Not everyone reads markdown files in a web browser, please try to wrap lines - in this README at or near 80 characters. -5. Paragraphs should be separated by blank lines to facilitate proper rendering - in both web and text formats. -6. Commit on a branch (please squash closely related commits into contextually - single commits) -7. Send PR - -Versioning -============ -This repository uses SemVer style versioning. We use [autotag](https://github.com/pantheon-systems/autotag) -to automatically increment version tags based on commit messages on changes to -the `master` branch. Refer to the [autotag README](https://github.com/pantheon-systems/autotag#scheme-autotag-default) -to learn how to trigger major, minor, and patch tags. If no keywords are -specified a Patch bump is applied. - -Logging -------- - -There are 3 logging functions defined in `common.mk` INFO, WARN, and ERROR. -If you want to have clean output for make tasks you should redirect STDOUT to -/dev/null, and use these logging functions for reporting info to the user: - -```yaml -footask: - $(call INFO, "running footask for $(FOO_VAR)") - dostuff > /dev/null -``` - -When dostuff errors the error will still be reported, and the task will fail. - -Pathfinding --------------- - -Have to do something in a real project where you're using common-make, and it's tedious, -but you don't yet know what the solution is? Try setting `COMMON_MAKE_DIR` to your local -checkout of common_makefiles outside your project, eg: - -``` -COMMON_MAKE_DIR=$HOME/pantheon/common_makefiles -``` - -Now you can iterate on a new feature as you solve your problem. You will naturally test -your change and are likely to yield a more useful and reliable change to ship upstream. -Additionally, you can proceed with no danger of ending up in the ninth circle of subtree -merge hell due to mistakenly trying to do almost anything to the subtree checkout in your -consuming project. - -Common Patterns for adding to the repo --------------------------------------- - -Tasks should follow the form of `--` for example ifs -I have a build task and you want to add windows support you would add as -`build-windows` or if you wanted to add a build for onebox you might dos -`build-onebox-linux` or simply `build-onebox`. - -There is the expectation that if you are doing a context specific task you add -the context to your tasks. I.E. `test-circle`. - -This isn't written in stone, but I think it is a reasonable expectation that -any engineer should be able to checkout any project and run: -`make deps && make build && make test` to get things running / testing. - -Adding support for a new language ---------------------------------- - -Programming languages tend to share a similar set of common tasks like `test`, -`build`, `deps`. Commmon-make tries to handle this situation by setting a list -of rules and guidelines for adding support for a new language. - -There are a set of global tasks defined in the `common.mk`, in particular: - -- `deps` -- `lint` -- `test` -- `test-coverage` -- `build` -- `clean` - -To add support for a new language, follow this pattern: - -- Create the new file: `common-LANG.sh` -- Create a task specific and unique to this language: `test-LANG:` -- Add this task to the global test task: `test:: test-LANG` - -The reason for this pattern is: - -- It allows running specific test suites, eg: `test-go` to only test go files. -- It keeps the `help` output sane. Otherwise the last-read file would win and - your users would see a help message like `test Run all go tests` which may - not be completely accurate if the project includes multiple common-LANG files. -- Supports running all of a project's tests and builds as the default case. - -README.md updates ------------------ - -When updating this README, run `make update-readme-toc` before committing to update -the table of contents. - -Ensure your text wraps at 80 columns. Exceptions made for code and command line -examples as well as URLs. - -Handy Make stuff -================ - -Introduction to make Slide deck -http://martinvseticka.eu/temp/make/presentation.html - -The make cheet sheet -https://github.com/mxenoph/cheat_sheets/blob/master/make_cheatsheet.pdf - -The make Manual -https://www.gnu.org/software/make/manual/make.html diff --git a/devops/make/_base.mk b/devops/make/_base.mk deleted file mode 100644 index 9fbe3f3..0000000 --- a/devops/make/_base.mk +++ /dev/null @@ -1,13 +0,0 @@ -# sets some useful variables - -ifndef COMMON_MAKE_BASE_INCLUDED - - # probably not a good idea to override this - ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) - - # override to use eg your own checkout for making pr's to upstream - COMMON_MAKE_DIR ?= $(ROOT_DIR)/devops/make - - COMMON_MAKE_BASE_INCLUDED := true - -endif diff --git a/devops/make/_docker.mk b/devops/make/_docker.mk deleted file mode 100644 index a936d87..0000000 --- a/devops/make/_docker.mk +++ /dev/null @@ -1,108 +0,0 @@ -ifndef COMMON_MAKE_DOCKER_INCLUDED - -# Docker common things -# -# INPUT VARIABLES -# - None -# -# EXPORT VARIABLES -# - BUILD_NUM: The build number for this build. Will use pants default sandbox -# if not on circleCI, if that isn't available will defauilt to 'dev'. -# If it is in circle will use CIRCLE_BUILD_NUM otherwise. -#------------------------------------------------------------------------------- - -export PATH := $(PATH):$(HOME)/google-cloud-sdk/bin - -# By default `lint-hadolint` will fail if hadolint is not installed. -# Set this to `no` to disable this behavior and make `lint-hadolint` pass silently instead. -REQUIRE_DOCKER_LINT ?= yes -_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) - -include $(_DIR)/_base.mk - -## Append tasks to the global tasks -lint:: lint-hadolint - -# use pants if it exists outside of circle to get the default namespace and use it for the build -ifndef CIRCLECI - BUILD_NUM := $(shell pants config get default-sandbox-name 2> /dev/null || echo dev)-$(COMMIT) -endif -BUILD_NUM ?= dev - -# TODO: the docker login -e email flag logic can be removed when all projects stop using circleci 1.0 or -# if circleci 1.0 build container upgrades its docker > 1.14 -ifdef CIRCLE_BUILD_NUM - BUILD_NUM := $(CIRCLE_BUILD_NUM) - ifeq (email-required, $(shell docker login --help | grep -q Email && echo email-required)) - QUAY := docker login -p "$$QUAY_PASSWD" -u "$$QUAY_USER" -e "unused@unused" quay.io - else - QUAY := docker login -p "$$QUAY_PASSWD" -u "$$QUAY_USER" quay.io - endif -endif - -# If we have a circle branch, tag the image with it -ifdef CIRCLE_BRANCH - BUILD_NUM := $(BUILD_NUM)-$(shell echo "${CIRCLE_BRANCH}" | tr -cd '[:alnum:]_-') -endif - -DOCKER_TRY_PULL ?= false -# Should we rebuild the tag regardless of whether it exists locally or elsewhere? -DOCKER_FORCE_BUILD ?= true -# Should we include build arguments? -DOCKER_BUILD_ARGS ?= "" -# Should we bypass the default push step? -# Overriding this flag in your makefile is useful for custom push logic. -DOCKER_BYPASS_DEFAULT_PUSH ?= false - -# use pants if it exists outside of circle to get the default namespace and use it for the build -ifndef CIRCLECI - BUILD_NUM := $(shell pants config get default-sandbox-name 2> /dev/null || echo dev)-$(COMMIT) -endif -BUILD_NUM ?= dev - -# TODO: the docker login -e email flag logic can be removed when all projects stop using circleci 1.0 or -# if circleci 1.0 build container upgrades its docker > 1.14 -ifdef CIRCLE_BUILD_NUM - BUILD_NUM := $(CIRCLE_BUILD_NUM) - ifeq (email-required, $(shell docker login --help | grep -q Email && echo email-required)) - QUAY := docker login -p "$$QUAY_PASSWD" -u "$$QUAY_USER" -e "unused@unused" quay.io - else - QUAY := docker login -p "$$QUAY_PASSWD" -u "$$QUAY_USER" quay.io - endif -endif - -# If we have a circle branch, tag the image with it -ifdef CIRCLE_BRANCH - BUILD_NUM := $(BUILD_NUM)-$(shell echo "${CIRCLE_BRANCH}" | tr -cd '[:alnum:]_-') -endif - -# if there is a docker file then set the docker variable so things can trigger off it -ifneq ("$(wildcard Dockerfile))","") -# file is there - DOCKER:=true -endif - -DOCKER_BUILD_CONTEXT ?= . - -# stub build-linux std target -build-linux:: - -## Append tasks to the global tasks -lint:: lint-hadolint - -DOCKERFILES := $(shell find . -name 'Dockerfile*' -not -path "./devops/make*") -lint-hadolint:: ## lint Dockerfiles -ifdef DOCKERFILES - ifneq (, $(shell command -v hadolint;)) - $(call INFO, "running hadolint for $(DOCKERFILES)") - hadolint $(DOCKERFILES) - else - ifeq (yes,${REQUIRE_DOCKER_LINT}) - $(error "In order to lint docker files, hadolint is required. Please install it and re-run lint.") - endif - endif -endif - -.PHONY:: build-docker lint-hadolint - -endif # ifndef COMMON_MAKE_DOCKER_INCLUDED diff --git a/devops/make/common-apollo.mk b/devops/make/common-apollo.mk deleted file mode 100644 index 1abdd98..0000000 --- a/devops/make/common-apollo.mk +++ /dev/null @@ -1,22 +0,0 @@ -# Common Apollo Tasks -# -# INPUT VARIABLES -# - APP: (required) The name of the app. -# - GQL_SCHEMA_PATH: (required) path to the schema.graphqls file -# -#------------------------------------------------------------------------------- - - -update-apollo-schema: ## Updates schema for your app on Apollo Studio - rover subgraph publish pantheon@current \ - --schema $(GQL_SCHEMA_PATH) \ - --name $(APP) \ - --routing-url https://$(APP)/graphql/ - -check-apollo-schema: ## Checks schema changes against production to ensure any changes are compatable - @if rover subgraph list pantheon@current | grep -wq $(APP); then \ - echo "'$(APP)' found in the current Graph, running schema check"; \ - rover subgraph check pantheon@current --schema $(GQL_SCHEMA_PATH) --name $(APP); \ - else \ - echo "'$(APP)' not found in the current Graph, skipping schema check"; \ - fi diff --git a/devops/make/common-conda.mk b/devops/make/common-conda.mk deleted file mode 100644 index 9e84566..0000000 --- a/devops/make/common-conda.mk +++ /dev/null @@ -1,205 +0,0 @@ -# Common Conda Tasks -# -# INPUT VARIABLES -# - TEST_RUNNER: (required) The name of the test runner to execute. Inherited from common-python.mk -# - CONDA_PACKAGE_NAME: (required) The name of your conda package. Used to also name your environment. Defaults to $(PYTHON_PACKAGE_NAME) -# - CONDA_PACKAGE_VERSION: (required) The version of your conda package. Defaults to $(PYTHON_PACKAGE_VERSION) -# - ANACONDA_CLOUD_REPO_TOKEN: (optional) Token to use when reading private conda packages from Anaconda Cloud. This token is required if this package depends on other private packages. For local development this is a personal token connected to your Anaconda Cloud account. For circle this is a token specific to the `pantheon_machines` Anaconda Cloud account and can be found in onelogin secure notes. -# - ANACONDA_CLOUD_DEPLOY_TOKEN: (optional) Required by circle. Token to use when pushing conda packages to Anaconda Cloud. For circle this is a token specific to the `pantheon_machines` Anaconda Cloud account and can be found in onelogin secure notes. -# - ANACONDA_CLOUD_ORGANIZATION: (optional) The name of the organization in Anaconda Cloud. Defaults to `pantheon` -# - CONDA_PACKAGE_LABEL: (optional) The label that will be applied to the conda package on deployment. Defaults to `main` -# -#------------------------------------------------------------------------------- - - -# The name that will be used for the conda package and environment. -ifeq (,$(CONDA_PACKAGE_NAME)) -CONDA_PACKAGE_NAME:=$(PYTHON_PACKAGE_NAME) -endif -# The version of the conda package. -ifeq (,$(CONDA_PACKAGE_VERSION)) -CONDA_PACKAGE_VERSION:=$(PYTHON_PACKAGE_VERSION) -endif -# The label that will be attached to the conda package deployed to Anaconda Cloud -CONDA_PACKAGE_LABEL := main -CONDA_BASE ?= $(HOME)/conda -CONDA_DEPLOYMENT_PREFIX ?= $(CONDA_BASE)/envs -MINICONDA_PATH := $(HOME)/miniconda2.sh -MINICONDA_VERSION ?= latest - -# The name of the organization account we use in Anaconda Cloud -ANACONDA_CLOUD_ORGANIZATION:=pantheon -# Default to exported environment variables if they are set and exist. -# This approach is used in circle and local development -ANACONDA_CLOUD_DEPLOY_TOKEN:=$(shell echo $${ANACONDA_CLOUD_DEPLOY_TOKEN}) -ANACONDA_CLOUD_REPO_TOKEN:=$(shell echo $${ANACONDA_CLOUD_REPO_TOKEN}) -# FQDN for the anaconda cloud api -ANACONDA_CLOUD_API_FQDN:=api.anaconda.org -# FQDN for conda packages in anaconda cloud -ANACONDA_CLOUD_CONDA_FQDN:=conda.anaconda.org -# FQDN for pypi packages in anaconda cloud (downloading of private pypi packages is a paid feature that we dont currently support) -ANACONDA_CLOUD_PYPI_FQDN:=pypi.anaconda.org - -ACTIVE_CONDA_ENVIRONMENT:=$(shell basename $${CONDA_DEFAULT_ENV:-'null'}) - -CONDA_BIN := $(shell command -v conda;) -CONDA_BUILD_BIN := $(shell command -v conda-build;) -ANACONDA_CLIENT_BIN := $(shell command -v anaconda;) - -## Append tasks to the global tasks -deps:: deps-conda -deps-circle:: deps-conda add-conda-private-channel -build:: build-conda -clean:: clean-conda -setup:: setup-conda - -## Assert that there is no active conda environment before allowing certain targets. -_assert-conda-env-not-active: -ifeq ("$(ACTIVE_CONDA_ENVIRONMENT)", "null") -else - $(error "This target is protected and should not be run inside an active conda environment.\ - The active environment is '$(ACTIVE_CONDA_ENVIRONMENT)'. To deactivate the environment, \ - run: 'source deactivate' Then try again.") -endif - -## Assert that the active conda environment is the one for the project before allowing certain targets. -_assert-conda-env-active: -ifeq ("$(ACTIVE_CONDA_ENVIRONMENT)", "_test") - $(warning "The active conda environment is '$(ACTIVE_CONDA_ENVIRONMENT)'. Allowing the target to run anyway.") -else -ifneq ($(ACTIVE_CONDA_ENVIRONMENT), $(CONDA_PACKAGE_NAME)) - $(error "The active conda environment is '$(ACTIVE_CONDA_ENVIRONMENT)'. This target expects \ - the active environment to be '$(CONDA_PACKAGE_NAME)'. If you have not yet created \ - the environment, run: 'conda env create' To activate the environment,\ - run: 'source activate $(CONDA_PACKAGE_NAME)'") -endif -endif - -clean-conda:: ## Removes index cache, lock files, tarballs, unused cache packages, and source cache. - conda clean --all -y - -setup-conda:: setup-conda-environment - -setup-conda-environment:: _assert-conda-env-not-active ## Attempts to setup the conda virtual environment from yaml -ifeq (,$(wildcard ./environment.yml)) - $(error "No environment file found at ./environment.yml") -else - conda env create || conda env update -endif - -build-conda:: ## Build conda package for project with current arch - conda build --check recipe - # Runs build, test and post steps - conda build recipe --no-anaconda-upload - -build-conda-environment:: ## Create a conda environment from the locally built package - # NOTE: All packages within the environment will be written with absolute paths using this prefix - # This means that a relative prefix doesnt make sense unless the the environment is put in the - # exact same place inside the container - conda create -y --copy --prefix $(CONDA_DEPLOYMENT_PREFIX) --use-local $(CONDA_PACKAGE_NAME) - -build-conda-deployment-environment:: build-conda build-conda-environment ## Build and create an environment from the locally built package - - -deps-conda:: _assert-conda-env-not-active -deps-conda:: _conda-install -deps-conda:: _conda-add-path -deps-conda:: _conda-update -deps-conda:: _conda-config -deps-conda:: _anaconda-client-install -deps-conda:: _conda-build-install -deps-conda:: ## For local development only. Install conda, anaconda client and conda-build - -# Download the latest miniconda shell script -_conda-download:: -ifeq (, $(wildcard $(MINICONDA_PATH))) -ifeq (Darwin, $(shell uname -s)) - curl -sLo $(MINICONDA_PATH) https://repo.continuum.io/miniconda/Miniconda2-$(MINICONDA_VERSION)-MacOSX-x86_64.sh -else -ifeq (x86_64, $(shell uname -m)) - curl -sLo $(MINICONDA_PATH) https://repo.continuum.io/miniconda/Miniconda2-$(MINICONDA_VERSION)-Linux-x86_64.sh -else - curl -sLo $(MINICONDA_PATH) https://repo.continuum.io/miniconda/Miniconda2-$(MINICONDA_VERSION)-Linux-x86.sh -endif -endif -endif - -# Run the miniconda installation script -ifndef CONDA_BIN -_conda-install:: _conda-download - bash $(MINICONDA_PATH) -b -p $(CONDA_BASE) -else -_conda-install:: -endif - -_conda-add-path:: -ifeq (, $(shell grep "$(CONDA_BASE)/bin" $(HOME)/.bashrc)) - # Conda wont be added to path until bashrc is sourced - echo -e '\n# Conda path added by common-conda.mk\nexport PATH=$(CONDA_BASE)/bin:$$PATH' >> ~/.bashrc -endif - -_conda-update:: - # Conda wont be added to path until bashrc is sourced - $(CONDA_BASE)/bin/conda update -y conda - -_conda-config:: - # Conda wont be added to path until bashrc is sourced - $(CONDA_BASE)/bin/conda config --set anaconda_upload no - $(CONDA_BASE)/bin/conda config --prepend channels $(ANACONDA_CLOUD_ORGANIZATION) - $(CONDA_BASE)/bin/conda config --append channels conda-forge - -# Install the anaconda client. Used for making API request to Anaconda Cloud -_anaconda-client-install:: -ifndef ANACONDA_CLIENT_BIN - # Conda wont be added to path until bashrc is sourced - $(CONDA_BASE)/bin/conda install -y anaconda-client -endif - -# Install conda-build. A plugin used to build and test conda packages. -_conda-build-install:: -ifndef CONDA_BUILD_BIN - # Conda wont be added to path until bashrc is sourced - $(CONDA_BASE)/bin/conda install -y conda-build -endif - -reset-conda-environment:: _assert-conda-env-not-active ## Reset a conda environment by removing and reinstalling all of its packages. - conda remove --name $(CONDA_PACKAGE_NAME) --all -y - conda env update - -deploy-conda:: ## Deploys the latest built conda package to Anaconda Cloud -ifeq (, $(ANACONDA_CLOUD_DEPLOY_TOKEN)) - $(error "You asked to deploy a pypi package to '$(ANACONDA_CLOUD_ORGANIZATION)' channel on Anaconda Cloud but `ANACONDA_CLOUD_DEPLOY_TOKEN` was not set.") -else - conda build -q --user $(ANACONDA_CLOUD_ORGANIZATION) --token $(ANACONDA_CLOUD_DEPLOY_TOKEN) recipe -endif - -deploy-conda-pypi:: ## Deploys the latest built pypi package to Anaconda Cloud -ifeq (, $(ANACONDA_CLOUD_DEPLOY_TOKEN)) - $(error "You asked to deploy a pypi package to '$(ANACONDA_CLOUD_ORGANIZATION)' channel on Anaconda Cloud \ - but `ANACONDA_CLOUD_DEPLOY_TOKEN` was not set.") -endif -ifeq (, $(CONDA_PACKAGE_VERSION)) - $(error "You asked to deploy a pypi package to '$(ANACONDA_CLOUD_ORGANIZATION)' channel on Anaconda Cloud \ - but `CONDA_PACKAGE_VERSION` was not set.") -endif - anaconda --token $(ANACONDA_CLOUD_DEPLOY_TOKEN) upload -u $(ANACONDA_CLOUD_ORGANIZATION) --label $(CONDA_PACKAGE_LABEL) --no-register --force dist/$(CONDA_PACKAGE_NAME)-$(CONDA_PACKAGE_VERSION).tar.gz - -test-conda:: ## Run the test suite against a pre-built conda package in an isolated test environment - conda build --test recipe - -# Personal tokens only have access to packages added to the personal account and the pantheon organizations developers security group if the personal account has been added. -regenerate-anaconda-cloud-repo-token:: ## A helper to generate a personal read-only token for downloading private conda packages suitable for local development. If not logged into anaconda client this will present an interactive console - @anaconda auth --remove private_repo || exit 0 - @anaconda auth --create --name private_repo --scopes 'conda:download' - -add-conda-private-channel:: _assert-conda-env-not-active ## Adds the pantheon private channel for downloading conda packages from Anaconda Cloud -ifeq (,$(ANACONDA_CLOUD_REPO_TOKEN)) - $(error "You asked to add the private '$(ANACONDA_CLOUD_ORGANIZATION)' channel to your conda configuration but `ANACONDA_CLOUD_REPO_TOKEN` was not set.") -else - conda config --add channels https://$(ANACONDA_CLOUD_CONDA_FQDN)/t/$(ANACONDA_CLOUD_REPO_TOKEN)/$(ANACONDA_CLOUD_ORGANIZATION) -endif - -generate-conda-requirements: ## Helper to generate a full dependency tree of this conda environment into a requirements_full.txt - if [ -a requirements_full.txt ] ; then rm requirements_full.txt ; fi; - conda env export | grep "\- \w*[=]" | sed "s/\s*[-]\s*//g" | sed "s/\(\w*\)[=]\([0-9][^=]*\)[=]\w[^=]*/\1==\2/g" &> requirements_full.txt - diff --git a/devops/make/common-django.mk b/devops/make/common-django.mk deleted file mode 100644 index a89f9c4..0000000 --- a/devops/make/common-django.mk +++ /dev/null @@ -1,95 +0,0 @@ -# Common Django app Tasks -# -# INPUT VARIABLES -# - APP: (required) The name of the app. -# - PYTHON_PACKAGE_NAME: (required) The base python package name. -# - KUBE_NAMESPACE: The namespace to run the migrations in -# - KUBE_CONTEXT: The kube context representing the cluster for the namespace -# - KUBECTL_CMD: The path to kubectl -#------------------------------------------------------------------------------- - -MIGRATE_APP=$(APP)-migrate -APP_DATABASE?=$(subst -,_,$(APP)) - -ifeq ($(KUBE_NAMESPACE), production) - MIGRATE_COMMAND := '/app/.venv/bin/$(APP)', 'migrate', '--settings', '$(PYTHON_PACKAGE_NAME).settings.kube_production' - PROJECT := pantheon-internal -else - MIGRATE_COMMAND := '/app/.venv/bin/$(APP)', 'migrate', '--settings', '$(PYTHON_PACKAGE_NAME).settings.kube_sandbox' - PROJECT := pantheon-sandbox -endif - -test:: test-django - -test-django: - DJANGO_SETTINGS_MODULE=$(PYTHON_PACKAGE_NAME).settings.circleci pipenv run py.test - -mysql-init-local: mysql-dump-local - @mysql -uroot -e "CREATE USER '$(APP_DATABASE)'@'localhost' IDENTIFIED BY '$(APP_DATABASE)'; \ - GRANT ALL PRIVILEGES ON * . * to '$(APP_DATABASE)'@'localhost'; \ - FLUSH PRIVILEGES;" - @./create-local-settings.sh $(APP_DATABASE) - -mysql-dump-local: - @gcloud sql export sql $(APP)-failover gs://$(APP)/database/dump -d production --project pantheon-internal - @gsutil cp gs://$(APP)/database/dump /tmp/dump - @mysql -uroot -e 'DROP DATABASE IF EXISTS $(APP_DATABASE)' - @sed -i -e "s@\`production\`@\`$(APP_DATABASE)\`@" /tmp/dump - @mysql -uroot &1 > /dev/null; . $$BASH_ENV; echo $$VAULT_TOKEN) - export VAULT_TOKEN -endif - -build-docker:: build-docker-ar ## build the docker container - -build-docker-ar:: build-linux - @FORCE_BUILD=$(DOCKER_FORCE_BUILD) TRY_PULL=$(DOCKER_TRY_PULL) \ - $(COMMON_MAKE_DIR)/sh/build-docker.sh \ - $(AR_IMAGE) $(DOCKER_BUILD_CONTEXT) $(DOCKER_BUILD_ARGS) - -ifeq ("$(DOCKER_BYPASS_DEFAULT_PUSH)", "false") -push:: push-ar ## push the container to the registry -else -push:: -endif - -setup-ar:: -ifdef CIRCLE_BUILD_NUM - $(COMMON_MAKE_DIR)/sh/setup-circle-ar-docker.sh; -endif - -push-ar:: setup-ar -push-ar:: - $(call INFO,"Pushing image $(AR_IMAGE)") - docker push $(AR_IMAGE); - -.PHONY:: push-ar build-docker-ar diff --git a/devops/make/common-docker-quay.mk b/devops/make/common-docker-quay.mk deleted file mode 100644 index 4ac35ad..0000000 --- a/devops/make/common-docker-quay.mk +++ /dev/null @@ -1,62 +0,0 @@ -# INPUT VARIABLES -# - QUAY_USER: The quay.io user to use (usually set in CI) -# - QUAY_PASSWD: The quay passwd to use (usually set in CI) -# - IMAGE: the docker image to use. will be computed if it doesn't exist. -# - REGISTRY: The docker registry to use. Defaults to quay. -# -# EXPORT VARIABLES -# - IMAGE: The image to use for the build. -# - REGISTRY: The registry to use for the build. -# - IMAGE_BASENAME: The image without the tag field on it.. i.e. foo:1.0.0 would have image basename of 'foo' -# - REGISTRY_PATH: Registry url and repo name -#------------------------------------------------------------------------------- - -_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) - -include $(_DIR)/_base.mk -include $(_DIR)/_docker.mk - -REGISTRY ?= quay.io/getpantheon -IMAGE := $(REGISTRY)/$(APP):$(BUILD_NUM) -QUAY_IMAGE=$(IMAGE) - -# because users can supply image, we substring extract the image base name -IMAGE_BASENAME := $(firstword $(subst :, ,$(IMAGE))) -QUAY_IMAGE_BASENAME := $(IMAGE_BASENAME) - -REGISTRY_PATH := $(REGISTRY)/$(APP) - -build-docker:: build-docker-quay ## build the docker container - -build-docker-quay:: setup-quay build-linux - @FORCE_BUILD=$(DOCKER_FORCE_BUILD) TRY_PULL=$(DOCKER_TRY_PULL) \ - $(COMMON_MAKE_DIR)/sh/build-docker.sh \ - $(IMAGE) $(DOCKER_BUILD_CONTEXT) $(DOCKER_BUILD_ARGS) - -ifeq ("$(DOCKER_BYPASS_DEFAULT_PUSH)", "false") -push:: push-quay ## push the container to the registry -else -push:: -endif - -push-quay:: setup-quay - $(call INFO,"pushing image $(IMAGE)") - @docker push $(IMAGE) - -setup-quay:: - # setup docker login for quay.io -ifdef CIRCLE_BUILD_NUM - ifndef QUAY_PASSWD - $(call ERROR, "Need to set QUAY_PASSWD environment variable.") - endif - ifndef QUAY_USER - $(call ERROR, "Need to set QUAY_USER environment variable.") - endif - $(call INFO, "Setting up quay login credentials.") - @$(QUAY) > /dev/null -else - $(call INFO, "No docker login unless we are in CI.") - $(call INFO, "We will fail if the docker config.json does not have the quay credentials.") -endif - -.PHONY:: setup-quay build-docker-quay push-quay lint-hadolint diff --git a/devops/make/common-docker.mk b/devops/make/common-docker.mk deleted file mode 100644 index 1f0f1c9..0000000 --- a/devops/make/common-docker.mk +++ /dev/null @@ -1,14 +0,0 @@ -# Docker common things -# -# INPUT VARIABLES -# - None -# -# EXPORT VARIABLES -# - BUILD_NUM: The build number for this build. Will use pants default sandbox -# if not on circleCI, if that isn't available will defauilt to 'dev'. -# If it is in circle will use CIRCLE_BUILD_NUM otherwise. -#------------------------------------------------------------------------------- - -_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) - -include $(_DIR)/common-docker-quay.mk diff --git a/devops/make/common-docs.mk b/devops/make/common-docs.mk deleted file mode 100644 index ca58041..0000000 --- a/devops/make/common-docs.mk +++ /dev/null @@ -1,19 +0,0 @@ -## Append tasks to the global tasks -test:: test-readme-toc - -## doc related tasks -test-readme-toc: ## test if table of contents in README.md needs to be updated - $(call INFO, "validating documentation TOC") - @if grep -q '' ./README.md; then \ - bash -c "diff -c --ignore-blank-lines --strip-trailing-cr \ - <(cat ./README.md | docker run --rm -i quay.io/getpantheon/markdown-toc -; echo) \ - <(cat ./README.md | awk '//{flag=1;next}//{flag=0}flag' | sed '1d;\$$d')\ - " > /dev/null 2>&1 \ - || { echo "ERROR: README.md table of contents needs updating. Run 'make update-readme-toc', commit and push changes to your branch."; exit 1; } \ - fi - -update-readme-toc: ## update the Table of Contents in ./README.md (replaces tag) - $(call INFO, "updating documentation TOC") - @docker run --rm -v `pwd`:/src quay.io/getpantheon/markdown-toc -i /src/README.md > /dev/null - -.PHONY:: test-readme-toc update-readme-toc diff --git a/devops/make/common-kube.mk b/devops/make/common-kube.mk deleted file mode 100644 index 9eb35bd..0000000 --- a/devops/make/common-kube.mk +++ /dev/null @@ -1,201 +0,0 @@ -# Common kube things. This is the simplest set of common kube tasks -# -# INPUT VARIABLES -# - APP: should be defined in your topmost Makefile -# - SECRET_FILES: list of files that should exist in secrets/* used by -# _validate_secrets task -# -# EXPORT VARIABLES -# - KUBE_NAMESPACE: represents the kube namespace that has been detected based on -# branch build and circle existence. -# - KUBE_CONTEXT: set this variable to whatever kubectl reports as the default -# context -# - KUBECTL_CMD: sets up cubectl with the namespace + context for easy usage -# in top level make files -#------------------------------------------------------------------------------- - -# gcloud auth split out of kubect so we need to set this to make things work. -# We may need to add this to some common circle context to make this less painful. -USE_GKE_GCLOUD_AUTH_PLUGIN ?= true -export USE_GKE_GCLOUD_AUTH_PLUGIN - -_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) - -# Defaults (change with docs) -_DEFAULT_PROD_CONTEXT := gke_pantheon-internal_us-central1_general-01 -_DEFAULT_SANDBOX_CONTEXT := gke_pantheon-sandbox_us-central1_sandbox-01 -_TEMPLATE_SANDBOX_CONTEXT := gke_pantheon-sandbox_us-east4_sandbox-02 - -include $(_DIR)/_base.mk - - -## Append tasks to the global tasks -deps-circle:: deps-circle-kube -lint:: - ifndef SKIP_KUBEVAL - make lint-kubeval - endif - - ifdef SKIP_KUBEVAL - echo "Skipping KUBEVAL because env var 'SKIP_KUBEVAL' is defined" - endif - -clean:: clean-kube - -# Contexts and namespaces -# -# Defaults: -# -# | CircleCI | Branch | Default context | Default namespace | -# |----------|---------------|-----------------|------------------------| -# | Yes | master / main | general-01 | production | -# | Yes | Other branch | sandbox-01 | sandbox-[APP]-[BRANCH] | -# | No | Any branch | [pants default] | [pants default] | -# -# Overrides: -# -# - Context: KUBE_CONTEXT=gke_pantheon-internal_us-west1_general-04 -# - Abbreviated context: CLUSTER_DEFAULT=general-04 -# - Namespace: KUBE_NAMESPACE=namespace -# - Template sandbox: KUBE_NAMESPACE=template-sandbox -# ... equivalent to: KUBE_NAMESPACE=template-sandbox CLUSTER_DEFAULT=sandbox-02 - -# this fetches the long name of the cluster -ifdef CLUSTER_DEFAULT - KUBE_CONTEXT ?= $(shell kubectl config get-contexts | grep $(CLUSTER_DEFAULT) | tr -s ' ' | cut -d' ' -f2) -endif - -# Use pants to divine the namespace on local development, if unspecified. -ifndef CIRCLECI - KUBE_NAMESPACE ?= $(shell pants config get default-sandbox-name 2> /dev/null) - KUBE_CONTEXT ?= $(shell pants sandbox | grep targetcluster | awk '{ print $$2 }') -endif - -# Default kube context based on above rules -ifndef KUBE_CONTEXT - KUBE_CONTEXT := $(_DEFAULT_SANDBOX_CONTEXT) - - ifneq ($(filter $(BRANCH),$(DEFAULT_BRANCHES)),) # master or main - KUBE_CONTEXT := $(_DEFAULT_PROD_CONTEXT) - endif -endif - -# Default kube namespace based on above rules -ifndef KUBE_NAMESPACE - # If on circle and not on master, build into a sandbox environment. - # lower-cased for naming rules of sandboxes - BRANCH_LOWER := $(shell echo $(BRANCH) | tr A-Z a-z) - KUBE_NAMESPACE := sandbox-$(APP)-$(BRANCH_LOWER) - - ifneq ($(filter $(BRANCH),$(DEFAULT_BRANCHES)),) # master or main - KUBE_NAMESPACE := production - endif -else - KUBE_NAMESPACE := $(shell echo $(KUBE_NAMESPACE) | tr A-Z a-z) -endif - -ifndef UPDATE_GCLOUD - UPDATE_GCLOUD := true -endif - -ifndef LABELS - LABELS := app=$(APP) -endif - -# template-sandbox lives in sandbox-02, force it to always use that cluster -ifeq ($(KUBE_NAMESPACE), template-sandbox) - KUBE_CONTEXT := $(_TEMPLATE_SANDBOX_CONTEXT) -endif - -KUBECTL_CMD=kubectl --namespace=$(KUBE_NAMESPACE) --context=$(KUBE_CONTEXT) - -# extend or define circle deps to install gcloud -ifeq ($(UPDATE_GCLOUD), true) - deps-circle-kube:: install-update-kube setup-kube -else - deps-circle-kube:: setup-kube -endif - -install-update-kube:: - $(call INFO, "updating or install gcloud cli") - @if command -v gcloud >/dev/null; then \ - $(COMMON_MAKE_DIR)/sh/update-gcloud.sh > /dev/null ; \ - else \ - $(COMMON_MAKE_DIR)/sh/install-gcloud.sh > /dev/null ; \ - fi - -setup-kube:: - $(call INFO, "setting up gcloud cli") - @$(COMMON_MAKE_DIR)/sh/setup-gcloud.sh - -update-secrets:: ## update secret volumes in a kubernetes cluster - $(call INFO, "updating secrets for $(KUBE_NAMESPACE) in $(KUBE_CONTEXT)") - @APP=$(APP) KUBE_NAMESPACE=$(KUBE_NAMESPACE) KUBE_CONTEXT=$(KUBE_CONTEXT) LABELS=$(LABELS) \ - $(COMMON_MAKE_DIR)/sh/update-kube-object.sh $(ROOT_DIR)/devops/k8s/secrets > /dev/null - -update-configmaps:: ## update configmaps in a kubernetes cluster - $(call INFO, "updating configmaps for $(KUBE_NAMESPACE) in $(KUBE_CONTEXT)") - @APP=$(APP) KUBE_NAMESPACE=$(KUBE_NAMESPACE) KUBE_CONTEXT=$(KUBE_CONTEXT) LABELS=$(LABELS) \ - $(COMMON_MAKE_DIR)/sh/update-kube-object.sh $(ROOT_DIR)/devops/k8s/configmaps > /dev/null - -clean-secrets:: ## delete local secrets - $(call INFO, "cleaning local Kube secrets") - @git clean -dxf $(ROOT_DIR)/devops/k8s/secrets - -clean-kube:: clean-secrets - -verify-deployment-rollout:: ## validate that deployment to kube was successful and rollback if not - @$(KUBECTL_CMD) rollout status deployment/$(APP) --timeout=10m \ - | grep 'successfully' && echo 'Deploy succeeded.' && exit 0 \ - || echo 'Deploy unsuccessful. Rolling back. Investigate!' \ - && $(KUBECTL_CMD) rollout undo deployment/$(APP) && exit 1 - -# set SECRET_FILES to a list, and this will ensure they are there -_validate-secrets:: - @for j in $(SECRET_FILES) ; do \ - if [ ! -e secrets/$$j ] ; then \ - echo "Missing file: secrets/$$j" ;\ - exit 1 ; \ - fi \ - done - -# legacy compat -ifdef YAMLS - KUBE_YAMLS ?= YAMLS -endif - -KUBE_YAMLS_PATH ?= ./devops/k8s -KUBE_YAMLS_EXCLUDED_PATHS ?= configmaps - -KUBEVAL_SKIP_CRDS ?= -ifneq (,$(KUBEVAL_SKIP_CRDS)) - KUBEVAL_SKIP_CRDS := --ignore-missing-schemas -endif - -ifndef KUBE_YAMLS_CMD - KUBE_YAMLS_CMD := find . -path '$(KUBE_YAMLS_PATH)/*' \ - $(foreach kube_excluded,$(KUBE_YAMLS_EXCLUDED_PATHS),\ - -not -path '$(KUBE_YAMLS_PATH)/$(kube_excluded)/*') \ - \( -name '*.yaml' -or -name '*.yml' \) -endif - -ifdef KUBEVAL_SKIP_TEMPLATES - KUBE_YAMLS_CMD := $(KUBE_YAMLS_CMD) | grep -vF 'template.' -endif - -# use subshell to allow dependency tasks to update manifests -KUBEVAL_CMD := kubeval --strict $(KUBEVAL_SKIP_CRDS) $$($(KUBE_YAMLS_CMD)) -ifdef KUBE_YAMLS - KUBEVAL_CMD := kubeval --strict $(KUBEVAL_SKIP_CRDS) $(KUBE_YAMLS) -endif - -lint-kubeval:: ## validate kube yamls - ifneq (, $(wildcard ${KUBE_YAMLS_PATH}/*)) - ifeq (, $(shell command -v kubeval;)) - $(error "kubeval is not installed! please install it.") - else - ${KUBEVAL_CMD} - endif - endif - -.PHONY:: deps-circle force-pod-restart diff --git a/devops/make/common-kustomize.mk b/devops/make/common-kustomize.mk deleted file mode 100644 index 503e511..0000000 --- a/devops/make/common-kustomize.mk +++ /dev/null @@ -1,94 +0,0 @@ -# Provides deploy targets which use kustomize to apply kube manifests to a kube server. -# -# TARGETS -# -# - build-kustomize: builds the provided kustomization and prints it to stdout -# - deploy-kustomize: builds the provided kustomization and applies it to the kube cluster -# - diff-kustomize: builds the provided kustomization and diffs it with the contents within the kube cluster -# -# INPUT VARIABLES -# -# Required: (only one of the two are required) -# - INSTANCE: the instance of the app to deploy -# - KUSTOMIZATION: the path to the kustomization to deploy -# (defaults to devops/kustomize/instances/$INSTANCE) -# -# Optional: -# - KUSTOMIZE_CMD: path to the kustomize command line utility -# (determined from $PATH if not provided) -# -# Variables set by other makefiles: -# - COMMON_MAKE_DIR: path to the common makefiles directory (common.mk) -# - IMAGE: the image to deploy (common-docker.mk) -# - KUBE_NAMESPACE: the namespace to deploy to (common-kube.mk) -# - KUBE_CONTEXT: the kube context to deploy to (common-kube.mk) -# - KUBECTL_CMD: path to the kubectl command line utility (common-kube.mk) -# -# EXPORT VARIABLES -# (None) -#------------------------------------------------------------------------------- - -# override when using recursive make -COMMON_MAKE_DIR ?= $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))/devops/make -include $(COMMON_MAKE_DIR)/_base.mk - - -ifndef KUSTOMIZATION - ifdef INSTANCE - # If KUSTOMIZATION is not provided, but INSTANCE is, set KUSTOMIZATION appropriately. - KUSTOMIZATION := devops/kustomize/instances/$(INSTANCE) - endif - # If neither is provided, an error is raised by the _check_kustomize_vars target -endif - -ifndef KUSTOMIZE_CMD - ifneq (, $(shell command -v kustomize;)) - KUSTOMIZE_CMD := kustomize - endif -endif - -ifdef IMAGE - TARGET_IMAGE := $(IMAGE) -else - TARGET_IMAGE := $(AR_IMAGE) -endif - - -.PHONY:: build-kustomize deploy-kustomize diff-kustomize _check_kustomize_vars - - -build-kustomize:: _check_kustomize_vars - @cd $(KUSTOMIZATION) && $(KUSTOMIZE_CMD) edit set image $(TARGET_IMAGE) - @# Note: kubectl <=1.20 uses an outdated embedded version of kustomize. - @# See https://github.com/kubernetes-sigs/kustomize#kubectl-integration - @# Prefer invoking the kustomize tool directly, as it's more likely to be up-to-date. - @$(KUSTOMIZE_CMD) build $(KUSTOMIZATION) - - -deploy-kustomize:: _check_kustomize_vars - @cd $(KUSTOMIZATION) && $(KUSTOMIZE_CMD) edit set image $(TARGET_IMAGE) - @# Note: kubectl <=1.20 uses an outdated embedded version of kustomize. - @# See https://github.com/kubernetes-sigs/kustomize#kubectl-integration - @# Prefer invoking the kustomize tool directly, as it's more likely to be up-to-date. - $(KUSTOMIZE_CMD) build $(KUSTOMIZATION) | $(KUBECTL_CMD) apply -f - - - -diff-kustomize:: _check_kustomize_vars - @cd $(KUSTOMIZATION) && $(KUSTOMIZE_CMD) edit set image $(TARGET_IMAGE) - @# Note: kubectl <=1.20 uses an outdated embedded version of kustomize. - @# See https://github.com/kubernetes-sigs/kustomize#kubectl-integration - @# Prefer invoking the kustomize tool directly, as it's more likely to be up-to-date. - $(KUSTOMIZE_CMD) build $(KUSTOMIZATION) | $(KUBECTL_CMD) diff -f - - - -_check_kustomize_vars: -ifndef KUSTOMIZATION - $(error "KUSTOMIZATION is not set. You must provide INSTANCE or KUSTOMIZATION as input variables" ) -endif -ifndef TARGET_IMAGE - $(error "TARGET_IMAGE is not set. You must provide IMAGE (usually by including common-docker.mk or common-docker-quay.mk in your project Makefile) or AR_IMAGE (by including common-docker-ar.mk)" ) -endif -ifndef KUSTOMIZE_CMD - $(error "kustomize is not installed. You must have kustomize installed to use common-kustomize.mk" ) -endif - diff --git a/devops/make/common-pants.mk b/devops/make/common-pants.mk deleted file mode 100644 index 875ec14..0000000 --- a/devops/make/common-pants.mk +++ /dev/null @@ -1,85 +0,0 @@ -# install and configure pants on circle-ci -# -# The following ENV vars must be set before calling this script: -# -# GITHUB_TOKEN # Github Personal Access token to read the private repository -# -# Optional: -# PANTS_VERSION_CONSTRAINT # Version constraint for pants install to satisfy. Default is "latest". Format is latest or x.y.z (0.1.47) -# PANTS_INCLUDE # Services for pants to include. Default is all. -# PANTS_EXCLUDE # Services for pants to exclude. Default is none. -# PANTS_SANDBOX_NAME # Name of sandbox. Default is ${APP}-${BUILD_NUM}. -# -PANTS_VERSION_CONSTRAINT ?= latest -PANTS_UPDATE_ONEBOX ?= false -PANTS_SANDBOX_NAME ?= $(shell echo $(KUBE_NAMESPACE) | tr A-Z a-z) # lowercased for compatibility -PANTS_SANDBOX_CLUSTER ?= $(KUBE_CONTEXT) -PANTS_DEBUG ?= false -PANTS_FLAGS ?= -s $(PANTS_SANDBOX_NAME) --update-onebox=$(PANTS_UPDATE_ONEBOX) --target-cluster=$(PANTS_SANDBOX_CLUSTER) --skip-machines - -ifdef PANTS_INCLUDE - PANTS_FLAGS += -i $(PANTS_INCLUDE) -endif -ifdef PANTS_EXCLUDE - PANTS_FLAGS += -e $(PANTS_EXCLUDE) -endif -ifeq ($(PANTS_DEBUG), true) - PANTS_FLAGS += -d -endif -PANTS_INIT_CMD := pants sandbox init $(PANTS_FLAGS) -ifdef CIRCLECI - # skip app/dbserver creation during regular builds - PANTS_INIT_CMD += --skip-machines -endif -PANTS_UPDATE_CMD := pants sandbox update $(PANTS_FLAGS) -PANTS_DELETE_CMD := pants sandbox delete $(PANTS_FLAGS) - -## append to the global task -deps-circle:: create-circle-paths install-circle-pants - -create-circle-paths: - $(shell mkdir -p $$HOME/bin) - $(shell echo 'export PATH=$$PATH:$$HOME/bin' >> $$BASH_ENV) - -install-circle-pants: -ifndef GITHUB_TOKEN - $(call ERROR, "This task needs a GITHUB_TOKEN environment variable") -endif - $(call INFO, "Installing pants version satisfying: $(PANTS_VERSION_CONSTRAINT)") - @PANTS_VERSION_CONSTRAINT=$(PANTS_VERSION_CONSTRAINT) ./devops/make/sh/install-pants.sh - $(call INFO, "Installed pants version" $(shell pants version)) - -delete-circle-pants:: delete-pants-sandbox ## TODO: remove the alias once $REASONS don't apply - -delete-pants-sandbox:: ## deletes pants sandbox -ifneq ($(KUBE_NAMESPACE), production) # prod - $(call INFO, "Deleting sandbox with command $(PANTS_DELETE_CMD)") - @$(PANTS_DELETE_CMD) 2> /dev/null -endif - -init-circle-pants:: ensure-pants-sandbox -init-circle-pants:: label-ci-ns - -# Initializes pants sandbox. Fails if pants can't proceed, eg because the namespace already has deployments. -init-pants-sandbox:: - $(call INFO, " --> PANTS_INIT_CMD = $(PANTS_INIT_CMD) <-- ") - $(call INFO, "Initializing sandbox \'$(PANTS_SANDBOX_NAME)\' with command \'$(PANTS_INIT_CMD)\'") - @$(PANTS_INIT_CMD) 2> /dev/null - -# Creates the sandbox if it doesn't exist or updates it if it does. -ensure-pants-sandbox:: - $(call INFO, "Using sandbox \'$(PANTS_SANDBOX_NAME)\'.") - @$(PANTS_INIT_CMD) 2> /dev/null || $(PANTS_UPDATE_CMD) 2> /dev/null - -# Labels the sandbox namespace so that garbage collection can delete namespaces -# or PRs that no longer exist. -# Can't keep the whole URL without violating max label length -label-ci-ns: -ifdef CIRCLECI - $(call INFO, "Adding labels to namespace: $(PANTS_SANDBOX_NAME)") - @$(KUBECTL_CMD) label --overwrite ns $(PANTS_SANDBOX_NAME) \ - time="$(shell date "+%Y-%m-%d---%H-%M-%S")" \ - repo=$$CIRCLE_PROJECT_REPONAME \ - pr="$(shell echo $$CIRCLE_PULL_REQUEST | sed 's/.*\///')" \ - build=$$CIRCLE_BUILD_NUM -endif diff --git a/devops/make/common-python.mk b/devops/make/common-python.mk deleted file mode 100644 index 0d67acc..0000000 --- a/devops/make/common-python.mk +++ /dev/null @@ -1,136 +0,0 @@ -# Common Python Tasks -# -# INPUT VARIABLES -# - PYTHON_PACKAGE_NAME: (required) The name of the python package. -# - TEST_RUNNER: (optional) The name of the python test runner to execute. Defaults to `unittest` -# - TEST_RUNNER_ARGS: (optional) Extra arguements to pass to the test runner. Defaults to `discover` -# - COVERALLS_TOKEN: (optional) Token to use when pushing coverage to coveralls (`test-coveralls`). -# -#------------------------------------------------------------------------------- - - -TEST_RUNNER ?= unittest -TEST_RUNNER_ARGS ?= discover - -# Python dependencies -FLAKE8_BIN := $(shell command -v flake8;) -PYLINT_BIN := $(shell command -v pylint;) -COVERAGE_BIN := $(shell command -v coverage;) -COVERALLS_BIN := $(shell command -v coveralls;) -BUMPVERSION_BIN := $(shell command -v bumpversion;) - -# TODO(joe): remove after we confirm any github repos using common-python.mk have switched their circleCI config to use COVERALLS_TOKEN -# Note to future selves: query all pantheon github repos with a `/Makefile` containing "common-python-mk": https://github.com/search?utf8=%E2%9C%93&q=org%3Apantheon-systems+common-python.mk+filename%3AMakefile+path%3A%2F&type=Code&ref=advsearch&l=&l= -ifdef COVERALLS_REPO_TOKEN -COVERALLS_TOKEN = $(COVERALLS_REPO_TOKEN) -$(warning "Environment variable COVERALLS_REPO_TOKEN is deprecated, please switch this project's CI config to use COVERALLS_TOKEN instead) -endif - -## Append tasks to the global tasks -deps:: deps-python -deps-circle:: deps-circle-python -lint:: lint-python -test:: test-python lint coverage-report -test-coverage:: test-coverage-python -test-circle:: test test-circle-python -build:: build-python - -build-python:: ## Build python source distribution. How packages are built is determined by setup.py - python setup.py sdist - -# Python tasks -develop-python:: ## Enable setup.py develop mode. Useful for local development. Disable develop mode before installing. - python setup.py develop - -undevelop-python:: ## Disable setup.py develop mode - python setup.py develop --uninstall - -deps-python:: deps-testrunner-python deps-lint-python deps-coverage-python - -deps-testrunner-python:: deps-testrunner-trial - -deps-testrunner-trial:: -ifeq ("$(TEST_RUNNER)", "twisted.trial") -ifeq (,$(shell command -v trial;)) - pip install twisted -endif -endif - -deps-lint-python:: deps-pylint deps-flake8 - -deps-pylint:: -ifndef PYLINT_BIN - pip install pylint -endif - -deps-flake8:: -ifndef FLAKE8_BIN - pip install flake8 -endif - -deps-coverage-python:: -ifndef COVERAGE_BIN - pip install coverage -endif - -deps-circle-python:: deps-coveralls-python ## Install python dependencies for circle - -deps-coveralls-python:: -ifdef COVERALLS_REPO_TOKEN -ifndef COVERALLS_BIN - pip install coveralls -endif -endif - -deps-bumpversion-python: -ifndef BUMPVERSION_BIN - pip install bumpversion -endif - -lint-python:: lint-pylint lint-flake8 - -# Pylint is a Python source code analyzer which looks for programming errors, helps enforcing a coding standard and sniffs for some code smells -# (as defined in Martin Fowler's Refactoring book). Pylint can also be run against any installed python package which is useful for catching -# misconfigured setup.py files. -lint-pylint:: deps-pylint ## Performs static analysis of your "installed" package. Slightly different rules then flake8. Configuration file '.pylintrc' - pylint $(PYTHON_PACKAGE_NAME) - -# Flake8 is a combination of three tools (Pyflakes, pep8 and mccabe). Flake8 performs static analysis of your source code -lint-flake8:: deps-flake8 ## Performs static analysis of your code, including adherence to pep8 (pep8) and conditional complexity (McCabe). Configuration file '.flake8' -ifeq ("", "$(wildcard $(PYTHON_PACKAGE_NAME))") - # Because flake8 cannot be run against installed packages we emit a warning to allow the global lint target to proceed. - # This preserves flexibility and relies on pyint for installed packages. - $(call WARN, "You asked to run flake8 on your source files but could not find them at './$(PYTHON_PACKAGE_NAME)'") -else - flake8 --show-source --statistics --benchmark $(PYTHON_PACKAGE_NAME) -endif - -test-python:: test-coverage-python - -test-circle-python:: test-coveralls-python - -test-coverage-python:: deps-testrunner-python deps-coverage-python ## Run tests and generate code coverage. Configuration file '.coveragerc' - coverage run --branch --source $(PYTHON_PACKAGE_NAME) -m $(TEST_RUNNER) $(TEST_RUNNER_ARGS) $(PYTHON_PACKAGE_NAME) - -coverage-report: ## Display the coverage report. Requires that make test has been run. - coverage report - -test-coveralls-python:: deps-coveralls-python ## run coverage and report to coveralls -ifdef COVERALLS_TOKEN - coveralls -else - $(call ERROR, "COVERALLS_TOKEN is not set. Skipping coveralls reporting") -endif - -bumpmicro: bumppatch ## Bump the micro (patch) version of the python package. Configuration file '.bumpversion.cfg' - -bumppatch: deps-bumpversion ## Alias for bumpmicro - bumpversion patch - -bumpminor: deps-bumpversion ## Bump the minor version of the python package. Configuration file '.bumpversion.cfg' - bumpversion minor - -bumpmajor: deps-bumpversion ## Bump the major version of the python package. Configuration file '.bumpversion.cfg' - bumpversion major - -.PHONY:: deps-coverage-python deps-circle-python deps-lint-python deps-coveralls-python deps-pylint deps-flake8 test-python test-circle-python test-coveralls-python build-python test-coverage-python coverage-report test-circle test-circle-python bumpmicro bumpminor bumpmajor bumppatch diff --git a/devops/make/common-python3.mk b/devops/make/common-python3.mk deleted file mode 100644 index de13aff..0000000 --- a/devops/make/common-python3.mk +++ /dev/null @@ -1,31 +0,0 @@ -# Common Python 3 Tasks -# -# INPUT VARIABLES -# - PYTHON_PACKAGE_NAME: (required) The name of the python package. -# - IMAGE: (required) The docker image to use. -#------------------------------------------------------------------------------- - -export PATH := $(PATH):$(HOME)/.local/bin - -deps:: deps-python3 -lint:: check-format-python3 lint-python3 - -check-format-python3: - pipenv run black --check --skip-string-normalization --line-length 120 . - -lint-python3: - pipenv run pylint $(PYTHON_PACKAGE_NAME) - -test-python3-docker: - docker run $(IMAGE) .venv/bin/python setup.py test - -deps-python3: - pip3 install pipenv --user - pipenv clean - pipenv install --dev - pipenv run python setup.py develop - -deps-python3-docker: - pip3 install pipenv - pipenv install - /app/.venv/bin/python setup.py develop diff --git a/devops/make/common-shell.mk b/devops/make/common-shell.mk deleted file mode 100644 index 07c09fa..0000000 --- a/devops/make/common-shell.mk +++ /dev/null @@ -1,42 +0,0 @@ -# See README.md for docs: https://github.com/pantheon-systems/common_makefiles - -export PATH := $(PATH):$(HOME)/bin - -## Append tasks to the global tasks -lint:: lint-shell -lint-shell:: test-shell -test:: test-shell -test-circle:: test-shell -deps-circle:: deps-circle-shell - -# version of shellcheck to install from deps-circle -SHELLCHECK_VERSION := 0.7.1 - -SHELLCHECK_BIN := $(shell command -v shellcheck;) - -ifndef SHELL_SOURCES - SHELL_SOURCES := $(shell find . \( -name '*.sh' -or -name '*.bats' \) -not -path '*/_*/*') -endif - -test-shell:: ## run shellcheck tests -ifdef SHELL_SOURCES - $(call INFO, "running shellcheck for $(SHELL_SOURCES)") - shellcheck -x $(SHELL_SOURCES) -endif - -_install_shellcheck: - $(call INFO, "Installing shellcheck $(SHELLCHECK_VERSION)") - @curl -s -L https://github.com/koalaman/shellcheck/releases/download/v${SHELLCHECK_VERSION}/shellcheck-v${SHELLCHECK_VERSION}.linux.x86_64.tar.xz | tar --strip-components=1 -xJvf - shellcheck-v${SHELLCHECK_VERSION}/shellcheck - @mkdir -p $$HOME/bin - @mv shellcheck $$HOME/bin - -deps-circle-shell:: -ifndef SHELLCHECK_BIN -deps-circle-shell:: _install_shellcheck -else -ifneq ($(SHELLCHECK_VERSION), $(shell "$(SHELLCHECK_BIN)" -V | awk '/version:/ {print $$2}')) -deps-circle-shell:: _install_shellcheck -endif -endif - -# TODO: add some patterns for integration tests with bats. example: https://github.com/joemiller/creds diff --git a/devops/make/go.mod b/devops/make/go.mod deleted file mode 100644 index 5a9059a..0000000 --- a/devops/make/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/pantheon-systems/common_makefiles - -go 1.16 diff --git a/devops/make/main.go b/devops/make/main.go deleted file mode 100644 index a120ff7..0000000 --- a/devops/make/main.go +++ /dev/null @@ -1,7 +0,0 @@ -package main - -import "fmt" - -func main() { - fmt.Println("hello from common make") -} diff --git a/devops/make/sh/build-docker.sh b/devops/make/sh/build-docker.sh deleted file mode 100755 index 7dbb8bb..0000000 --- a/devops/make/sh/build-docker.sh +++ /dev/null @@ -1,50 +0,0 @@ -#! /bin/bash - -set -eu -o pipefail - -function log() { - echo "(build-docker)" "$@" -} - -function build() { - local image - local context - local docker_build_args - - image=$1 - shift - context=$1 - shift - docker_build_args=("$@") - log "building docker image $image" - # shellcheck disable=SC2068 - docker build --pull ${docker_build_args[@]} -t "$image" "$context" -} - -IMAGE="$1" -shift -CONTEXT="$1" -shift -DOCKER_BUILD_ARGS="$*" - -if [[ -z $IMAGE ]]; then - echo "Usage: $0 IMAGE" - exit 1 -fi -FORCE_BUILD=${FORCE_BUILD:-false} -TRY_PULL=${TRY_PULL:-false} -if [[ $FORCE_BUILD == true ]]; then - log "forcing docker build, not checking existence" - build "$IMAGE" "$CONTEXT" "$DOCKER_BUILD_ARGS" - exit -fi -if [[ $(docker images -q "$IMAGE" | wc -l) -gt 0 ]]; then - log "found $IMAGE locally, not building" - exit -fi -if [[ $TRY_PULL == true ]]; then - log "attempting to pull docker image $IMAGE" - docker pull "$IMAGE" &>/dev/null && exit; - log "unable to pull image, proceeding to build" -fi -build "$IMAGE" "$CONTEXT" "$DOCKER_BUILD_ARGS" diff --git a/devops/make/sh/create-tls-cert.sh b/devops/make/sh/create-tls-cert.sh deleted file mode 100755 index db254bc..0000000 --- a/devops/make/sh/create-tls-cert.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/bash -# shellcheck disable=SC2029 - -# Required environment vars: -# OU: The certificate 'OU' field -# CN: The certificate common-name (typically a DNS name or email addr) -# FILENAME: Basename for cert and key files. eg: FILENAME=foo: foo.key, foo.crt, foo.pem - -# Optional environment vars: -# DIRECTORY: Directory to store downloaded certs in. Defaults to current dir ('.') -# CA_HOST: Defaults to 'cimt.getpantheon.com' (production CA). Use your onebox address to create a development/sandbox cert. -# SANS: SubjectAltNames to add to the cert. The CN will automatically be added to the SAN list so you don't need to add it. -# The format is OpenSSL's SAN format documented here: https://www.openssl.org/docs/man1.0.2/apps/x509v3_config.html (Subject Alt Name section) -# Example - 1 DNS name, 1 ipv4 address, 1 ipv6 address: -# SANS="DNS:foo;IP:10.0.0.1:IP:2001::1" -# -# CASSANDRA: "true" to generate a Java Keystore (.ks file) that Cassandra requires -# CASSANDRA_CA_CERT_FILE: Full path to the CA file, used to generate the certs, when CASSANDRA=yes -# CASSANDRA_CA_KEY_FILE: Full path to the key file, used to generate the certs, when CASSANDRA=yes -# -# Notes on Cassandra flags: -# - If USE_ONEBOX_CA=true and CASSANDRA=true, then the CA `onebox_cassandra_ca.{crt,key} will be used. -# - Currently Valhalla Cassandra shares a CA, in future we would like to have each one use a separate CA. -# -# Usage examples: -# -# - Create a certificate with CN=foo, OU=bar and no extra SANs (subjectAltNames) with filenames: mycert.key, mycert.crt, mycert.pem -# -# CN=foo OU=bar FILENAME=mycert bash ./create-tls-cert.sh -# -# - Add SubjectAltNames "foobar.com", and IP "10.0.0.1": -# -# CN=foo OU=bar FILENAME=mycert SANS="DNS:foobar.com;IP:10.0.0.1" bash ./create-tls-cert.sh -# -# - Issue a development certificate from a onebox (any onebox can be used, so use yours if you have one): -# -# CA_HOST=onebox CN=foo OU=bar FILENAME=mycert bash ./create-tls-cert.sh -# -# - Generate certs for use by Cassandra, using the Ygg CA (Valhalla make commands use this) -# -# CN=valhalla OU=valhalla \ -# CASSANDRA=true \ -# CASSANDRA_CA_CERT_FILE=/etc/pantheon/yggdrasil_ca.crt \ -# CASSANDRA_CA_KEY_FILE=/etc/pantheon/yggdrasil_ca.key \ -# DIRECTORY=./devops/k8s/secrets/production/cassandra-certs \ -# FILENAME=cassandra \ -# bash ./create-tls-cert.sh - -set -eou pipefail - -CA_HOST="${CA_HOST:-cimt.getpantheon.com}" -DIRECTORY="${DIRECTORY:-.}" -FILENAME="${FILENAME:-}" -OU="${OU:-}" -CN="${CN:-}" -SANS="${SANS:-}" -USE_ONEBOX_CA="${USE_ONEBOX_CA:-false}" -CA_CERT_FILE="/etc/pantheon/ca.crt" -CA_KEY_FILE="/etc/pantheon/ca.key" -CASSANDRA="${CASSANDRA:-}" -CASSANDRA_CA_CERT_FILE="${CASSANDRA_CA_CERT_FILE:-}" -CASSANDRA_CA_KEY_FILE="${CASSANDRA_CA_KEY_FILE:-}" - -if [[ "$USE_ONEBOX_CA" == "true" ]]; then - CA_CERT_FILE="/etc/pantheon/onebox_ca.crt" - CA_KEY_FILE="/etc/pantheon/onebox_ca.key" - CASSANDRA_CA_CERT_FILE="/etc/pantheon/onebox_cassandra_ca.crt" - CASSANDRA_CA_KEY_FILE="/etc/pantheon/onebox_cassandra_ca.key" -fi -if [[ "$CASSANDRA" == "true" ]]; then - CA_CERT_FILE="${CASSANDRA_CA_CERT_FILE}" - CA_KEY_FILE="${CASSANDRA_CA_KEY_FILE}" -fi - -if [[ -z "$CA_HOST" ]] || [[ -z "$FILENAME" ]] || [[ -z "$OU" ]] || [[ -z "$CN" ]] || [[ -z "$CA_KEY_FILE" ]] || [[ -z "$CA_CERT_FILE" ]]; then - echo "missing one or more required env vars: CA_HOST, FILENAME, OU, CN, CA_KEY_FILE, CA_CERT_FILE" - exit 1 -fi - -main() { - echo "Creating directory: $DIRECTORY" - mkdir -p "${DIRECTORY}" - - echo "[INFO] SSH'ing to '$CA_HOST' to create MTLS certificate: CN=$CN, OU=$OU, FILENAME=$FILENAME, DIRECTORY=$DIRECTORY, SANS=$SANS, CA_CERT_FILE=$CA_CERT_FILE, CA_KEY_FILE=$CA_KEY_FILE" - ssh "${CA_HOST}" "sudo pantheon pki.create_key:cn='$CN',ou='$OU',san=\"$SANS\",filename='$FILENAME',ca_key='$CA_KEY_FILE',ca_cert='$CA_CERT_FILE',directory='.',noninteractive=True" >/dev/null - - echo "[INFO] Downloading $CA_HOST:$FILENAME.{key,crt,pem}" - scp "${CA_HOST}":"${FILENAME}".{key,crt,pem} "${DIRECTORY}/" >/dev/null - ssh "${CA_HOST}" "sudo rm -f -- ${FILENAME}.{key,crt,pem}" >/dev/null - - echo "[INFO] Downloaded MTLS certificate files (Run 'openssl x509 -text -noout -in $FILENAME.pem' to view certificate):" - - if [[ -n "$CASSANDRA" ]]; then - echo "[INFO] creating java keystore" - CA_FILE_OUT="cassandra_ca.crt" - scp "${CA_HOST}:${CA_CERT_FILE}" "$DIRECTORY/${CA_FILE_OUT}" >/dev/null - openssl pkcs12 \ - -chain -export -password pass:pantheon \ - -CAfile "$DIRECTORY/${CA_FILE_OUT}" \ - -in "$DIRECTORY/${FILENAME}.pem" \ - -out "$DIRECTORY/${FILENAME}.p12" \ - -name cassandra - keytool -importkeystore \ - -srckeystore "$DIRECTORY/${FILENAME}.p12" \ - -destkeystore "$DIRECTORY/${FILENAME}.ks" \ - -srcstoretype pkcs12 \ - -deststoretype JKS \ - -srcstorepass pantheon \ - -storepass pantheon - keytool -importcert \ - -alias ca \ - -file "$DIRECTORY/${CA_FILE_OUT}" \ - -keystore "$DIRECTORY/${FILENAME}.ks" \ - -trustcacerts \ - -noprompt \ - -storepass pantheon - fi - ls -l "$DIRECTORY/$FILENAME"* -} -main "$@" diff --git a/devops/make/sh/install-gcloud.sh b/devops/make/sh/install-gcloud.sh deleted file mode 100755 index 96ff6d3..0000000 --- a/devops/make/sh/install-gcloud.sh +++ /dev/null @@ -1,47 +0,0 @@ -#! /bin/bash -# install and configure gcloud on circle-ci -# -# The following ENV vars must be set before calling this script: -# -# GCLOUD_EMAIL # user-id for circle to authenticate to google cloud -# GCLOUD_KEY # base64 encoded key -set -e - -if [[ "$CIRCLECI" != "true" ]]; then - echo "This script is only intended to run on Circle-CI." - exit 1 -fi - -export CLOUDSDK_CORE_DISABLE_PROMPTS=1 -export CLOUDSDK_PYTHON_SITEPACKAGES=0 - -# ensure we use certs to talk to kube, instead of the oauth bridge (google auth creds) -export CLOUDSDK_CONTAINER_USE_CLIENT_CERTIFICATE=True - -# see split of gcloud auth in kubectl https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke -export USE_GKE_GCLOUD_AUTH_PLUGIN=True - -gcloud="$HOME/google-cloud-sdk/bin/gcloud -q --no-user-output-enabled" -PATH="$gcloud/bin:$PATH" - -# circle may have an old gcloud installed we wipe it out cause we bring our own. -# Make sure we remove the default install bashrc modifications, otherwise install.sh -# will create an invalid bashrc -if [ -d "/opt/google-cloud-sdk" ] ; then - sed -ie '/The next line updates PATH/,+3d' "$HOME/.bashrc" - sed -ie '/The next line enables/,+3d' "$HOME/.bashrc" - sudo rm -rf /opt/google-cloud-sdk -fi -if [ ! -d "$HOME/google-cloud-sdk" ]; then - echo "$HOME/google-cloud-sdk missing, installing" - curl -o "$HOME/google-cloud-sdk.tar.gz" https://dl.google.com/dl/cloudsdk/channels/rapid/google-cloud-sdk.tar.gz - tar -C "$HOME/" -xzf ~/google-cloud-sdk.tar.gz - # somehow, .bashrc.backup is owned by root sometimes. This makes `install.sh` fail, so remove it here - sudo rm -f "$HOME/.bashrc.backup" - bash "$HOME/google-cloud-sdk/install.sh" --rc-path "$HOME/.bashrc" --quiet - - $gcloud components update - $gcloud components update kubectl - $gcloud components update gke-gcloud-auth-plugin - -fi diff --git a/devops/make/sh/install-pants.sh b/devops/make/sh/install-pants.sh deleted file mode 100755 index 035d997..0000000 --- a/devops/make/sh/install-pants.sh +++ /dev/null @@ -1,45 +0,0 @@ -#! /bin/bash -set -eou pipefail - -PANTS_VERSION_CONSTRAINT=${PANTS_VERSION_CONSTRAINT:-"latest"} -GITHUB_TOKEN=${GITHUB_TOKEN:-} - -if [ "$CIRCLECI" != "true" ]; then - echo "This script is only intended to run on Circle-CI." - exit 1 -fi - -if ! command -v pants >/dev/null; then - # pants is not installed so install it - echo "Pants is not installed in this env, please consider switching to quay.io/getpantheon/deploy-toolbox:latest. Installing pants..." - - if ! command -v jq >/dev/null; then - echo "JQ is required to install pants. please consider switching to quay.io/getpantheon/deploy-toolbox:latest or install JQ in your image to utilize this script." - exit 1 - fi - - if [[ -z "$GITHUB_TOKEN" ]]; then - echo "GITHUB_TOKEN is required when CI environment does not have pants" - exit 1 - fi - - PANTS_URL="https://$GITHUB_TOKEN:@api.github.com/repos/pantheon-systems/pants/releases" - - JQ_FILTER=".[0].assets | map(select(.name|test(\"pants_.*_linux_amd64\")))[0].id" - if [[ $PANTS_VERSION_CONSTRAINT != "latest" ]]; then - JQ_FILTER=". | map(select(.tag_name == \"v$PANTS_VERSION_CONSTRAINT\"))[0].assets | map(select(.name|test(\"pants_.*_linux_amd64\")))[0].id" - fi - - ASSET=$(curl -s "$PANTS_URL" | jq -r "$JQ_FILTER") - if [[ "$ASSET" == "null" ]]; then - echo "Asset Not Found" - exit 1 - fi - - echo "Fetching pants version $PANTS_VERSION_CONSTRAINT, with asset ID $ASSET" - curl -L -o "./pants_${PANTS_VERSION_CONSTRAINT}_linux_amd64" "https://api.github.com/repos/pantheon-systems/pants/releases/assets/$ASSET" \ - -H "Authorization: token $GITHUB_TOKEN" \ - -H 'Accept: application/octet-stream' \ - && mv pants_*_linux_amd64 /bin/pants \ - && chmod 755 /bin/pants -fi diff --git a/devops/make/sh/repo-hash.sh b/devops/make/sh/repo-hash.sh deleted file mode 100755 index 512f9a4..0000000 --- a/devops/make/sh/repo-hash.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -# -# Calculate a hash unique to the current state of the code including all uncommitted changes. -# Stages all changes to a temporary git index, then calls `git write-tree`. - -set -euo pipefail - -tmp="$(mktemp)" -trap 'rm -f $tmp' EXIT -cat "$(git rev-parse --git-dir)/index" > "$tmp" -export GIT_INDEX_FILE="$tmp" -git add -A -git write-tree | head -c 7 diff --git a/devops/make/sh/setup-circle-ar-docker.sh b/devops/make/sh/setup-circle-ar-docker.sh deleted file mode 100755 index 3209332..0000000 --- a/devops/make/sh/setup-circle-ar-docker.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash -# -# The following ENV vars must be set before calling this script: -# -# VAULT_PATH_METHOD: one of `shared`, `uuid`, `reponame` defaults to `shared` -# `shared` uses the shared GSA stored in vault for authenticating CircleCI jobs -# `uuid` uses the CircleCI project UUID in a path to `secrets/circleci//gsa to fetch the GSA -# `reponame` uses the reponame in a path to `secrets/circleci//gsa to fetch the GSA -# -# optional: -# VAULT_INSTANCE using specific vault instance. 'produciton' by default -# -# setup vault for useage with circle OIDC / jwt identity -# the CIRCLE_OIDC_TOKEN _must_ be present for this to operate. -# We use this token to auth with vault and set the VAULT_TOKEN for jobs -# for more info on how this works see https://github.com/pantheon-systems/vault-kube/blob/master/docs/circleci.md -# -# set VAULT_INSTANCE to 'sandbox' if you want to use sandbox vault otherwise after running this script the VAULT_TOKEN should be setup to talk to production instance of vault. -set -euo pipefail - -# auth docker for talking to AR -setup_ar_docker_repos() { - local VAULT_PATH=$1 - # output the token to make debugging easier - pvault production token lookup | grep -v id >&2 - - # pull the gsa - gsa=$(pvault "$VAULT_INSTANCE" read -field=json_file "$VAULT_PATH") - - # setup all registries to use this identity - registries=(us-docker.pkg.dev gcr.io us.gcr.io eu.gcr.io asia.gcr.io staging-k8s.gcr.io marketplace.gcr.io) - for i in "${registries[@]}" ; do - echo "$gsa" | docker login -u _json_key --password-stdin "https://$i" - done -} - -main() { - bindir="$(cd -P -- "$(dirname -- "$0")" && pwd -P)" - - if [ "$CIRCLECI" != "true" ]; then - echo "This script is only intended to run on Circle-CI." - exit 1 - fi - - if ! command -v jq >/dev/null; then - echo "JQ is required to install pvault." - exit 1 - fi - - if ! command -v pvault > /dev/null ; then - echo "pvault not found on path. Going to try to install it" - "$bindir/setup-circle-vault.sh" - fi - - if ! command -v gcloud >/dev/null; then - echo "gcloud missing, running install-gcloud.sh" - "$bindir/install-gcloud.sh" - fi - - if [[ -z ${VAULT_PATH_METHOD:-} ]] ; then - VAULT_PATH_METHOD='shared' - fi - - if [[ -z ${VAULT_TOKEN:-} ]] ; then - echo "Vault token not found, trying to source enviornment." - # shellcheck disable=SC1090 - . "$BASH_ENV" - if [[ -z ${VAULT_TOKEN:-} ]] ; then - echo "Vault token not set in the environment. Maybe you need to run 'setup-circle-vault.sh' first ?" - exit 1 - fi - fi - - VAULT_INSTANCE=${VAULT_INSTANCE:-production} - - # VAULT_PATH_METHOD - selector to understand how to fetch the GSA from vault - case "$VAULT_PATH_METHOD" in - "reponame") VAULT_PATH="secret/circleci/$CIRCLE_PROJECT_REPONAME/gsa" ;; - "uuid") - # CIRCLE doesn't provide the project ID as a built-in environment variable so we extracti it with these shenanigans from the JWT they _do_ provide that does have the info we need. - # the jwt is broken up by a `.` into decodable bits so we extract that - # then the data that should be base64 is not (cause its missing a `=` at the end - # then we jq again to get the actual project id out. YAY! - VAULT_PATH=$(echo "$CIRCLE_OIDC_TOKEN"| jq -rR 'split(".") | .[1]+"="' | base64 -d -i | jq -r '.["oidc.circleci.com/project-id"]') - ;; - "shared") VAULT_PATH="secret/circleci/gsa" ;; - esac - - setup_ar_docker_repos $VAULT_PATH -} - -main "$@" diff --git a/devops/make/sh/setup-circle-vault.sh b/devops/make/sh/setup-circle-vault.sh deleted file mode 100755 index a33f191..0000000 --- a/devops/make/sh/setup-circle-vault.sh +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -# This script sets up the circle job to be able to auth to vault. -# It will install vault, and pvault if they do not exist in the build environment. -# -# Authentication to vault is done via the OIDC_TOKEN from the circle job. The script -# by default will setup the VAULT_TOKEN environment variable with the token issued -# after authentication. For more info see https://github.com/pantheon-systems/vault-kube/blob/master/docs/circleci.md -# -# The following ENV vars must be set before calling this script: -# -# GITHUB_TOKEN required for installation of pvault from github releases if pvault is not already in your image. -# by default the deploy-toolbox already has this installed and you shoudln't need to specify it unless -# you are trying to invoke this from a ci image that is not deploy-toolbox -# -# optional: -# VAULT_INSTANCE using specific vault instance. 'produciton' by default -# -set -euo pipefail -shopt -s inherit_errexit - -SUDO="" -if [[ "$(id -u || true)" -ne 0 ]] && command -v sudo > /dev/null ; then - SUDO="sudo" -fi - -extract_latest_vault_release() { - local raw_html=$1 - echo "${raw_html}" | awk -F_ '$0 ~ /vault_[0-9]+\.[0-9]+\.[0-9]+/, ""); print $2}' | head -n 1 -} - -get_latest_vault_release() { - raw_html=$(curl -Ls --fail --retry 3 https://releases.hashicorp.com/vault/) - extract_latest_vault_release "${raw_html}" -} - -verify_vault() { - local VERSION=$1 - local ARCH=$2 - local PLATFORM=$3 - - curl -s "https://keybase.io/_/api/1.0/key/fetch.json?pgp_key_ids=34365D9472D7468F" | jq -r '.keys | .[0] | .bundle' > hashicorp.asc - gpg --import hashicorp.asc - curl -Os "https://releases.hashicorp.com/vault/${VERSION}/vault_${VERSION}_SHA256SUMS" - curl -Os "https://releases.hashicorp.com/vault/${VERSION}/vault_${VERSION}_SHA256SUMS.sig" - gpg --verify "vault_${VERSION}_SHA256SUMS.sig" "vault_${VERSION}_SHA256SUMS" - grep "${PLATFORM}_${ARCH}.zip" "vault_${VERSION}_SHA256SUMS" | shasum -a 256 - - echo "Verified Vault binary" - rm "vault_${VERSION}_SHA256SUMS.sig" "vault_${VERSION}_SHA256SUMS" hashicorp.asc -} - -# shellcheck disable=SC2120 -# optional arguments -install_vault() { - local VERSION=${1:-} - local PLATFORM=${2:-linux} - local ARCH=${3:-amd64} - local VERIFY=${4:-0} - if command -v vault > /dev/null; then - echo "Vault is already installed" - return - fi - - if [[ -z "${VERSION}" ]]; then - VERSION=$(get_latest_vault_release) - fi - - FILENAME="vault_${VERSION}_${PLATFORM}_${ARCH}.zip" - DOWNLOAD_URL="https://releases.hashicorp.com/vault/${VERSION}/${FILENAME}" - - curl -L --fail --retry 3 -o "${FILENAME}" "${DOWNLOAD_URL}" - if [[ "${VERIFY}" -eq 1 ]]; then - verify_vault "${VERSION}" "${ARCH}" "${PLATFORM}" - fi - - unzip "${FILENAME}" - rm "${FILENAME}" - ${SUDO} mv ./vault /usr/local/bin/vault - vault version -} - -# shellcheck disable=SC2120 -# optional arguments -install_pvault() { - local PVAULT_VERSION=${1:-latest} - - if command -v pvault >/dev/null; then - # pvault is here, no worries - echo "Pvault installed" - return - fi - - if [[ -z "${GITHUB_TOKEN:-}" ]]; then - echo "GITHUB_TOKEN is required to install pvault" - exit 1 - fi - - local PVAULT_URL="https://${GITHUB_TOKEN}:@api.github.com/repos/pantheon-systems/pvault/releases" - local JQ_FILTER=".[0].assets | map(select(.name|test(\"pvault.*_linux_amd64\")))[0].id" - if [[ "${PVAULT_VERSION}" != "latest" ]] ; then - JQ_FILTER=". | map(select(.tag_name == \"v${PVAULT_VERSION}\"))[0].assets | map(select(.name|test(\"pvault_.*_linux_amd64\")))[0].id" - fi - - local ASSET - ASSET=$(curl -s "${PVAULT_URL}" | jq -r "${JQ_FILTER}") - if [[ "${ASSET}" == "null" ]]; then - echo "Asset Not Found" - exit 1 - fi - - echo "Fetching pvault version ${PVAULT_VERSION}, with asset ID ${ASSET}" - local fn="./pvault_${PVAULT_VERSION}_linux_amd64.deb" - # cleanup the downloaded deb on exit - # shellcheck disable=SC2064 - trap "rm -f $fn" EXIT - - curl -L -o "$fn" "https://api.github.com/repos/pantheon-systems/pvault/releases/assets/${ASSET}" \ - -H "Authorization: token ${GITHUB_TOKEN}" \ - -H 'Accept: application/octet-stream' \ - && ${SUDO} dpkg -i "$fn" -} - -setup_vault_env() { - local VAULT_INSTANCE=$1 - VAULT_TOKEN=$(pvault "${VAULT_INSTANCE}" write auth/jwt-circleci/login -format=json role=circleci jwt="${CIRCLE_OIDC_TOKEN}" | jq -r '.auth.client_token') - export VAULT_TOKEN - echo "export VAULT_TOKEN=${VAULT_TOKEN}" >> "${BASH_ENV}" -} - -main() { - if [[ "${CIRCLECI:-}" != "true" ]]; then - echo "This script is only intended to run on Circle-CI." - exit 1 - fi - - if ! command -v jq >/dev/null; then - echo "JQ is required to install pvault." - exit 1 - fi - - - if [[ -z "${CIRCLE_OIDC_TOKEN}" ]]; then - echo CIRCLE_OIDC_TOKEN not set - exit 1 - fi - - VAULT_INSTANCE=${VAULT_INSTANCE:-production} - - install_vault - install_pvault - setup_vault_env "$VAULT_INSTANCE" -} - -main "$@" diff --git a/devops/make/sh/setup-gcloud-test.sh b/devops/make/sh/setup-gcloud-test.sh deleted file mode 100755 index 489451b..0000000 --- a/devops/make/sh/setup-gcloud-test.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -set -eou pipefail - -# clusters should always be pulled - -CLUSTER_DEFAULT=general-02 sh/setup-gcloud.sh - -default_context=$(kubectl config current-context) -if [[ "$default_context" != "gke_pantheon-internal_us-central1_general-02" ]] ; then - echo "Default context was not set correctlyfor kubectl, expected gke_pantheon-internal_us-central1_general-02 got $default_context" - exit 1 -fi - -gcloud_default_context=$(gcloud config get-value container/cluster) -if [[ "$gcloud_default_context" != "general-02" ]] ; then - echo "Default context was not set correctly for gcloud, expected gke_pantheon-internal_us-central1_general-02 got $gcloud_default_context" - exit 1 -fi - -exit 0 diff --git a/devops/make/sh/setup-gcloud.sh b/devops/make/sh/setup-gcloud.sh deleted file mode 100755 index 5acf6e4..0000000 --- a/devops/make/sh/setup-gcloud.sh +++ /dev/null @@ -1,80 +0,0 @@ -#! /bin/bash -# upgrade CircleCI builtin gcloud tools, and set it up -# -# The following ENV vars must be set before calling this script: -# -# GCLOUD_EMAIL # user-id for circle to authenticate to google cloud -# GCLOUD_KEY # base64 encoded key -# GCLOUD_PROJECTS # a space-delimited (use quotes) list of projects for which to pull gke credentials -# CLUSTER_ID # (DEPRECATED) this will set the cluster to connect to (when not used it connects to all of them) -# CLUSTER_DEFAULT # sets default cluster (if using CLUSTER_ID then this is set to the specified cluster) - -set -euo pipefail - -parent_dir="$(cd "$(dirname "$0")" && pwd)" -"${parent_dir}/install-pants.sh" - -gcloud="$(command -v gcloud) --quiet" -kubectl=$(command -v kubectl) -pants=$(command -v pants) - -CLUSTER_DEFAULT=${CLUSTER_DEFAULT:-"general-01"} -CLUSTER_ID=${CLUSTER_ID:-} -GCLOUD_EMAIL=${GCLOUD_EMAIL:-} -GCLOUD_KEY=${GCLOUD_KEY:-} - -PROJECTS=(pantheon-internal pantheon-sandbox pantheon-dmz pantheon-build pantheon-cos-provision) -if [[ -n "${GCLOUD_PROJECTS:-}" ]]; then - read -r -a PROJECTS <<<"$GCLOUD_PROJECTS" -fi - -if [[ -z "$GCLOUD_EMAIL" ]]; then - echo "GCLOUD_EMAIL is required" - exit 1 -fi - -if [[ -z "$GCLOUD_KEY" ]]; then - echo "GCLOUD_KEY is required" - exit 1 -fi - -echo "$GCLOUD_KEY" | base64 --decode > gcloud.json -$gcloud auth activate-service-account "$GCLOUD_EMAIL" --key-file gcloud.json - -sshkey="$HOME/.ssh/google_compute_engine" -if [[ ! -f "$sshkey" ]] ; then - ssh-keygen -f "$sshkey" -N "" -fi - -if [[ -n "$CLUSTER_ID" ]] ; then - CLUSTER_DEFAULT="$CLUSTER_ID" -fi - -for PROJ in "${PROJECTS[@]}"; do - echo "Fetching credentails for project $PROJ" - $pants gke pull-creds --project "$PROJ" - - DEFAULT_CLUSTER_DATA=$(gcloud container clusters list --format json --project "$PROJ" | jq --arg cluster "$CLUSTER_DEFAULT" -r '.[] | select(.name==$cluster)') - - if [[ -n "$DEFAULT_CLUSTER_DATA" ]]; then - # this means that DEFAULT_CLUSTER is in this project and its information is in $CLUSTER_DATA - DEFAULT_CLUSTER_PROJECT=$PROJ - DEFAULT_CLUSTER_ZONE=$(jq -r .zone <<< "$DEFAULT_CLUSTER_DATA") - DEFAULT_CLUSTER_LONG_NAME=gke_"$PROJ"_"$DEFAULT_CLUSTER_ZONE"_"$CLUSTER_DEFAULT" - fi -done - -ALL_CLUSTERS=$($kubectl config get-clusters | grep -v NAME) - -echo "Clusters: ${ALL_CLUSTERS[*]}" -echo "Default Cluster: $CLUSTER_DEFAULT" - -echo "Setting Primary Project" -$gcloud config set project "$DEFAULT_CLUSTER_PROJECT" - -echo "Setting Primary Zone" -$gcloud config set compute/zone "$DEFAULT_CLUSTER_ZONE" - -echo "Setting Primary Cluster" -$gcloud config set container/cluster "$CLUSTER_DEFAULT" -$kubectl config use-context "$DEFAULT_CLUSTER_LONG_NAME" diff --git a/devops/make/sh/update-gcloud.sh b/devops/make/sh/update-gcloud.sh deleted file mode 100755 index abc48bf..0000000 --- a/devops/make/sh/update-gcloud.sh +++ /dev/null @@ -1,34 +0,0 @@ -#! /bin/bash -# upgrade CircleCI builtin gcloud tools, and set it up -# -# The following ENV vars must be set before calling this script: -# -# GCLOUD_EMAIL # user-id for circle to authenticate to google cloud -# GCLOUD_KEY # base64 encoded key -set -eou pipefail - -if [ "$CIRCLECI" != "true" ]; then - echo "This script is only intended to run on Circle-CI." - exit 1 -fi - -export PATH=$PATH:/opt/google-cloud-sdk/bin -export PATH=$PATH:/root/google-cloud-sdk/bin -export CLOUDSDK_CORE_DISABLE_PROMPTS=1 -export CLOUDSDK_PYTHON_SITEPACKAGES=0 - -gcloud=$(command -v gcloud) - -# ensure we use certs to talk to kube, instead of the oauth bridge (google auth creds) -export CLOUDSDK_CONTAINER_USE_CLIENT_CERTIFICATE=True - -# if gcloud was installed via apt/deb, upgrade via apt: -if dpkg -l google-cloud-sdk; then - sudo -E apt-get update -qy && sudo apt-get -y --only-upgrade install kubectl google-cloud-sdk -else -# assume gcloud was installed through the install script and run the builtin updater: - sudo -E "$gcloud" components update > /dev/null 2>&1 - sudo -E "$gcloud" components update kubectl > /dev/null 2>&1 -fi - -sudo -E chown -R "$(whoami)" ~/.config/gcloud diff --git a/devops/make/sh/update-kube-object.sh b/devops/make/sh/update-kube-object.sh deleted file mode 100755 index 15d49fa..0000000 --- a/devops/make/sh/update-kube-object.sh +++ /dev/null @@ -1,158 +0,0 @@ -#!/bin/bash -set -eu -o pipefail - -unset ROOT_DIR -unset OBJ_DIR -ROOT_DIR=$1 -[[ -z "$ROOT_DIR" ]] && { echo "you need to specify the directory to scan as the only argument"; exit 1; } -[[ -z "$APP" ]] && { echo "APP environment variable must be set"; exit 1; } -[[ -z "$KUBE_NAMESPACE" ]] && { echo "KUBE_NAMESPACE environment variable must be set"; exit 1; } -[[ -z "$KUBE_CONTEXT" ]] && { echo "KUBE_CONTEXT environment variable must be set"; exit 1; } - -# map_from_file will use kubectl create --from-file to generate a configmap. where each -# file in the directory will become a key in the map with its file contents as the -# map data. -map_from_file() { - local map_path=$1 - local map_name=$2 - - echo "Processing $map_name from $map_path" - kubectl delete configmap "$map_name" --namespace="$KUBE_NAMESPACE" --context "$KUBE_CONTEXT" > /dev/null 2>&1 || true; - kubectl create configmap "$map_name" --from-file="$map_path" --namespace="$KUBE_NAMESPACE" --context "$KUBE_CONTEXT" - # shellcheck disable=SC2086 - kubectl label configmap "$map_name" ${LABELS} --namespace="$KUBE_NAMESPACE" --context "$KUBE_CONTEXT" -} - -# map_literal uses kubectl create with the --from-literal to create a configmap from a file -# the File is expected to contain lines of "k=v" entries. These will be converted to -# map keys and data. -map_literal() { - local map_path=$1 - local map_name=$2 - - # construct the args array for each line in the file - literal_args=() - # use awk to filter comment lines with or without space - while read -r kvpair ; do - literal_args+=("--from-literal=$kvpair") - done <<<"$(awk '!/^ *#/ && NF' "$map_path")" - - kubectl delete configmap "$map_name" --namespace="$KUBE_NAMESPACE" --context "$KUBE_CONTEXT" > /dev/null 2>&1 || true; - kubectl create configmap "$map_name" "${literal_args[@]}" --namespace="$KUBE_NAMESPACE" --context "$KUBE_CONTEXT" - # shellcheck disable=SC2086 - kubectl label configmap "$map_name" ${LABELS} --namespace="$KUBE_NAMESPACE" --context "$KUBE_CONTEXT" -} - - -# secret_from_file will use kubectl create secret generic --from-file to generate a secret. -# Where each file in the directory will become a key -# and the file contents the secret data. -secret_from_file() { - local secret_path=$1 - local secret_name=$2 - - kubectl delete secret "$secret_name" --namespace="$KUBE_NAMESPACE" --context "$KUBE_CONTEXT" > /dev/null 2>&1 || true; - kubectl create secret generic "$secret_name" --from-file="$secret_path" --namespace="$KUBE_NAMESPACE" --context "$KUBE_CONTEXT" - # shellcheck disable=SC2086 - kubectl label secret "$secret_name" ${LABELS} --namespace="$KUBE_NAMESPACE" --context "$KUBE_CONTEXT" -} - - -# secret_literal uses kubectl create with the --from-literal to create secrets from a file. -# The File is expected to contain lines of "k=v" entries. These will be converted to secret -# keys and data. -secret_literal() { - local secret_path=$1 - local secret_name=$2 - - # construct the args array for each line in the file - literal_args=() - for i in $(<"$secret_path") ; do - literal_args+=("--from-literal=$i") - done - - kubectl delete secret "$secret_name" --namespace="$KUBE_NAMESPACE" --context "$KUBE_CONTEXT" > /dev/null 2>&1 || true - kubectl create secret generic "$secret_name" "${literal_args[@]}" --namespace="$KUBE_NAMESPACE" --context "$KUBE_CONTEXT" - # shellcheck disable=SC2086 - kubectl label secret "$secret_name" ${LABELS} --namespace="$KUBE_NAMESPACE" --context "$KUBE_CONTEXT" -} - -update() { - local path=$1 - local obj_type=$2 - local name - - name="$APP-$(basename "$path")" - - # if this is a file or a directory treat it differently. We want to use - # kube --from file for dirs and kube --literal for files. - if [[ -f "$path" ]] ; then - eval "${obj_type}_literal \"$path\" \"$name\"" - else - eval "${obj_type}_from_file \"$path\" \"$name\"" - fi -} - -find_obj_dir() { - # We need to detect the right directory to use, but want to allow the use of - # namespaces named "production" in non-production contexts. - # Use 'non-prod' by default. - # If the namespace is production AND the context is not sandbox, set the - # dir to production. - # If the namespace is not production then we use a dir named the same as the - # namespace if it exists. - # If a directory exists named using the full context, use that as the base. - # Objects can be customized for each Kube context (e.g. different keys for different - # geographic regions) - SUB_DIR="$ROOT_DIR" - if [[ -d "$ROOT_DIR/$KUBE_CONTEXT" ]] ; then - SUB_DIR="$ROOT_DIR/$KUBE_CONTEXT" - fi - - OBJ_DIR="$SUB_DIR/non-prod" - - if [[ "$KUBE_NAMESPACE" == "production" && "$KUBE_CONTEXT" != *_sandbox-* ]] ; then - OBJ_DIR="$SUB_DIR/production" - fi - - if [[ -d $SUB_DIR/$KUBE_NAMESPACE && "$KUBE_NAMESPACE" != "production" ]] ; then - OBJ_DIR="$SUB_DIR/$KUBE_NAMESPACE" - fi -} - -main() { - local type - local func_prefix - - find_obj_dir "$ROOT_DIR" - if [[ -z "$OBJ_DIR" ]] ; then - echo "Could not locate a suitable object directory for $KUBE_NAMESPACE" - exit 1 - fi - if [[ -z "${LABELS:-}" ]] ; then - echo "Variable LABELS is undefined" - exit 1 - fi - LABELS=${LABELS//,/ } - - # divine if this is something we can manage and what it should dispatch too - type=$(basename "$ROOT_DIR") - case $type in - # because these will be eval'd lets not passthrough user input - configmaps) func_prefix="map" ;; - secrets) func_prefix="secret" ;; - *) - echo "Don't know how to process '$i'" - exit 1 - ;; - esac - - - echo "Using objects from directory '$OBJ_DIR'" - for object in "$OBJ_DIR"/* ; do - echo "Processing $object" - update "$object" "$func_prefix" - done -} - -main "$@" diff --git a/devops/make/test/fixtures/configmaps/non-prod/foofile b/devops/make/test/fixtures/configmaps/non-prod/foofile deleted file mode 100644 index b3ee0e3..0000000 --- a/devops/make/test/fixtures/configmaps/non-prod/foofile +++ /dev/null @@ -1,3 +0,0 @@ -test=true -test2=true -test3=true diff --git a/devops/make/test/fixtures/configmaps/sandbox-common-make-test/test/bar b/devops/make/test/fixtures/configmaps/sandbox-common-make-test/test/bar deleted file mode 100644 index 7601807..0000000 --- a/devops/make/test/fixtures/configmaps/sandbox-common-make-test/test/bar +++ /dev/null @@ -1 +0,0 @@ -baz diff --git a/devops/make/test/fixtures/configmaps/sandbox-common-make-test/test/foo b/devops/make/test/fixtures/configmaps/sandbox-common-make-test/test/foo deleted file mode 100644 index 5716ca5..0000000 --- a/devops/make/test/fixtures/configmaps/sandbox-common-make-test/test/foo +++ /dev/null @@ -1 +0,0 @@ -bar diff --git a/devops/make/test/fixtures/configmaps/sandbox-common-make-test/testfile b/devops/make/test/fixtures/configmaps/sandbox-common-make-test/testfile deleted file mode 100644 index 74d0a43..0000000 --- a/devops/make/test/fixtures/configmaps/sandbox-common-make-test/testfile +++ /dev/null @@ -1 +0,0 @@ -foo=bar diff --git a/devops/make/test/fixtures/configmaps/sandbox-common-make-test/testfile3 b/devops/make/test/fixtures/configmaps/sandbox-common-make-test/testfile3 deleted file mode 100644 index ad0914b..0000000 --- a/devops/make/test/fixtures/configmaps/sandbox-common-make-test/testfile3 +++ /dev/null @@ -1 +0,0 @@ -baz=zip diff --git a/devops/make/test/fixtures/golang/main.go b/devops/make/test/fixtures/golang/main.go deleted file mode 100644 index de12f0c..0000000 --- a/devops/make/test/fixtures/golang/main.go +++ /dev/null @@ -1,7 +0,0 @@ -package main - -import "fmt" - -func main() { - fmt.Println("test-go") -} diff --git a/devops/make/test/fixtures/golang/main_test.go b/devops/make/test/fixtures/golang/main_test.go deleted file mode 100644 index d15fc1f..0000000 --- a/devops/make/test/fixtures/golang/main_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package main_test - -import ( - "testing" -) - -func TestPrinting(t *testing.T) { - t.Log("a test ran!") -} diff --git a/devops/make/test/fixtures/secrets/non-prod/supersecret b/devops/make/test/fixtures/secrets/non-prod/supersecret deleted file mode 100644 index 9310749..0000000 --- a/devops/make/test/fixtures/secrets/non-prod/supersecret +++ /dev/null @@ -1 +0,0 @@ -pass=asdf diff --git a/devops/make/test/make/docker-ar.mk b/devops/make/test/make/docker-ar.mk deleted file mode 100644 index 127fd67..0000000 --- a/devops/make/test/make/docker-ar.mk +++ /dev/null @@ -1,17 +0,0 @@ -APP=common-make-docker - -ifdef CIRCLE_BUILD_NUM - BUILD_NUM = $(CIRCLE_BUILD_NUM) -else - BUILD_NUM = $(shell git rev-parse HEAD | egrep -o '....$$') -endif - -include common.mk -include common-docker-ar.mk - -test-common-docker: build-docker push - $(call INFO, "testing common docker") - @test "$(AR_REGISTRY_PATH)" = "$(AR_IMAGE_BASENAME)" -ifdef CIRCLE_BUILD_NUM - @test "$(CIRCLE_BUILD_NUM)-$(CIRCLE_BRANCH)" = "$(BUILD_NUM)" -endif diff --git a/devops/make/test/make/docker.mk b/devops/make/test/make/docker.mk deleted file mode 100644 index ef1deb5..0000000 --- a/devops/make/test/make/docker.mk +++ /dev/null @@ -1,17 +0,0 @@ -APP=common-make-docker - -ifdef CIRCLE_BUILD_NUM - BUILD_NUM = $(CIRCLE_BUILD_NUM) -else - BUILD_NUM = $(shell git rev-parse HEAD | egrep -o '....$$') -endif - -include common.mk -include common-docker.mk - -test-common-docker: build-docker push - $(call INFO, "testing common docker") - @test "$(REGISTRY_PATH)" = "$(IMAGE_BASENAME)" -ifdef CIRCLE_BUILD_NUM - @test "$(CIRCLE_BUILD_NUM)-$(CIRCLE_BRANCH)" = "$(BUILD_NUM)" -endif diff --git a/devops/make/test/make/kube.mk b/devops/make/test/make/kube.mk deleted file mode 100644 index 9b85067..0000000 --- a/devops/make/test/make/kube.mk +++ /dev/null @@ -1,30 +0,0 @@ -APP=common-make-kube -SKIP_KUBEVAL := true - -ifdef CIRCLE_BUILD_NUM - BUILD_NUM = $(CIRCLE_BUILD_NUM) -else - BUILD_NUM = $(shell git rev-parse HEAD | grep -o '....$$') -endif - -include common.mk -KUBE_NAMESPACE := $(APP)-$(BRANCH)-$(BUILD_NUM) -include common-kube.mk - -test-common-kube-lint: lint - -test-common-kube: test-common-kube-lint - $(call INFO, "Creating kube ns $(KUBE_NAMESPACE)") - -@$(KUBECTL_CMD) delete namespace --wait=true $(KUBE_NAMESPACE) > /dev/null - @$(KUBECTL_CMD) create namespace $(KUBE_NAMESPACE) > /dev/null - @sleep 1 - $(call INFO, "running kube common tests in kube ns $(KUBE_NAMESPACE) and context $(KUBE_CONTEXT)") - @APP=$(APP) KUBE_NAMESPACE=$(KUBE_NAMESPACE) KUBE_CONTEXT=$(KUBE_CONTEXT) LABELS=$(LABELS) \ - bash sh/update-kube-object.sh ./test/fixtures/secrets > /dev/null - @APP=$(APP) KUBE_NAMESPACE=$(KUBE_NAMESPACE) KUBE_CONTEXT=$(KUBE_CONTEXT) LABELS=$(LABELS) \ - bash sh/update-kube-object.sh ./test/fixtures/configmaps > /dev/null - $(call INFO, "Verifying kube common secrets and maps for $(APP) in $(KUBE_NAMESPACE)") - @$(KUBECTL_CMD) get secret $(APP)-supersecret > /dev/null - @$(KUBECTL_CMD) get configmap $(APP)-foofile > /dev/null - $(call INFO, "cleaning up testing namespace $(KUBE_NAMESPACE)") - @$(KUBECTL_CMD) delete namespace --wait=false $(KUBE_NAMESPACE) 2> /dev/null diff --git a/devops/make/test/make/pants.mk b/devops/make/test/make/pants.mk deleted file mode 100644 index 929e206..0000000 --- a/devops/make/test/make/pants.mk +++ /dev/null @@ -1,12 +0,0 @@ -APP=common-make-pants - -include common.mk -# Override normal logic so that we don't kill a developer's personal sandbox when running locally. -KUBE_NAMESPACE := sandbox-cm-$(BRANCH) -include common-kube.mk -# Choose just one service so we can finish faster. -PANTS_INCLUDE := metrics -PANTS_SANDBOX_NAME := $(shell echo $(KUBE_NAMESPACE) | tr A-Z a-z) -include common-pants.mk - -test-common-pants: install-circle-pants init-circle-pants From 1bcb88fde3c03ed28f68eb0bf5d926e0c3066b69 Mon Sep 17 00:00:00 2001 From: Phil Tyler Date: Fri, 31 Mar 2023 15:34:06 -0700 Subject: [PATCH 4/7] [minor] Drop stubbed pantheon.yml validation --- .circleci/config.yml | 4 ---- .goreleaser.yml | 5 ++++- cmd/root.go | 4 ++-- cmd/validators.go | 10 ---------- pkg/validator/pantheon.go | 22 ---------------------- pkg/validator/sites.go | 18 ++++++++++-------- pkg/validator/sites_test.go | 3 ++- pkg/validator/validator.go | 2 -- pkg/validator/validator_test.go | 1 - 9 files changed, 18 insertions(+), 51 deletions(-) delete mode 100644 pkg/validator/pantheon.go diff --git a/.circleci/config.yml b/.circleci/config.yml index ea22aca..e426c83 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,10 +22,6 @@ commands: - restore_cache: keys: - v4-dependencies-{{ checksum "go.sum" }} - - # By default, CircleCI uses ssh, and authenticates as a user with read access to projects, but not write access. - # In order for `git push` command to work, we need to have CircleCI use HTTPS with the provided oauth token - # instead of ssh (the token is for pantheon-releases which has write access, but the default circle user does not) configure-https-git: steps: - run: git config --global url."https://$GITHUB_TOKEN:x-oauth-basic@github.com/pantheon-systems/".insteadOf "git@github.com:pantheon-systems/" diff --git a/.goreleaser.yml b/.goreleaser.yml index 1b28fcd..b679fd3 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -41,8 +41,11 @@ changelog: exclude: - '^docs:' - '^test:' - - Merge pull request + - fixup - Merge branch + - Merge commit + - Merge pull request + - Squashed 'devops/make/' # make a zip of the source # https://goreleaser.com/customization/source/ diff --git a/cmd/root.go b/cmd/root.go index 6cd603f..ee86c54 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -10,8 +10,8 @@ var FilePath string var rootCmd = &cobra.Command{ Use: "pyml-validator", - Short: "Pyml-validator validates pantheon.yml, sites.yml, etc.", - Long: `Pyml-validator is a validator for pantheon.yml or sites.yml. + Short: "Pyml-validator validates sites.yml", + Long: `Pyml-validator is a validator for sites.yml, used for WPMS search-replace. Ensures that the given config file can be used by the platform.`, } diff --git a/cmd/validators.go b/cmd/validators.go index 23832ed..0d0a3f0 100644 --- a/cmd/validators.go +++ b/cmd/validators.go @@ -34,16 +34,6 @@ var sitesCommand = &cobra.Command{ }, } -var pantheonCommand = &cobra.Command{ - Use: "pantheon", - Short: "validate pantheon.yml", - Long: `Validate pantheon.yml. For more information, see https://pantheon.io/docs/pantheon-yml`, - RunE: func(cmd *cobra.Command, args []string) error { - return validatorCommand(cmd) - }, -} - func init() { - rootCmd.AddCommand(pantheonCommand) rootCmd.AddCommand(sitesCommand) } diff --git a/pkg/validator/pantheon.go b/pkg/validator/pantheon.go deleted file mode 100644 index 1ef104d..0000000 --- a/pkg/validator/pantheon.go +++ /dev/null @@ -1,22 +0,0 @@ -package validator - -import ( - "fmt" - "os" -) - -type PantheonValidator struct{} - -// ValidateFromYaml asserts a given pantheon.yaml file is valid. -// As this has not been implemented, nothing is invalid. -func (v *PantheonValidator) ValidateFromYaml(y []byte) error { - return nil -} - -func (v *PantheonValidator) ValidateFromFilePath(filePath string) error { - yFile, err := os.ReadFile(filePath) - if err != nil { - return fmt.Errorf("error reading YAML file: %w", err) - } - return v.ValidateFromYaml(yFile) -} diff --git a/pkg/validator/sites.go b/pkg/validator/sites.go index c2128ad..17153bb 100644 --- a/pkg/validator/sites.go +++ b/pkg/validator/sites.go @@ -10,13 +10,15 @@ import ( ) const ( - MaxDomainMaps = 25 // This could be raised + maxDomainMaps = 25 // This could be raised + validHostnameRegex = `^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + validMultidevNameRegex = `^[a-z0-9\-]{1,11}$` ) var ( - // See https://github.com/pantheon-systems/titan-mt/blob/master/yggdrasil/lib/pantheon_yml/pantheon_yml_v1_schema.py - ValidHostnameRegex = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) - ValidMultidevNameRegex = regexp.MustCompile(`^[a-z0-9\-]{1,11}$`) + // c/f pantheon.yml validation in titan-mt. + validHostname = regexp.MustCompile(validHostnameRegex) + validMultidevName = regexp.MustCompile(validMultidevNameRegex) ) type SitesValidator struct{} @@ -55,15 +57,15 @@ func (v *SitesValidator) validate(sites model.SitesYml) error { // provided are valid Pantheon hostnames. func validateDomainMaps(domainMaps map[string]model.DomainMapByEnvironment) error { for env, domainMap := range domainMaps { - if !ValidMultidevNameRegex.MatchString(env) { + if !validMultidevName.MatchString(env) { return fmt.Errorf("%q is not a valid environment name", env) } domainMapCount := len(domainMap) - if domainMapCount > MaxDomainMaps { - return fmt.Errorf("%q has too many domains listed (%d). Maximum is %d", env, domainMapCount, MaxDomainMaps) + if domainMapCount > maxDomainMaps { + return fmt.Errorf("%q has too many domains listed (%d). Maximum is %d", env, domainMapCount, maxDomainMaps) } for _, domain := range domainMap { - if !ValidHostnameRegex.MatchString(domain) { + if !validHostname.MatchString(domain) { return fmt.Errorf("%q is not a valid hostname", domain) } } diff --git a/pkg/validator/sites_test.go b/pkg/validator/sites_test.go index 40b8ea9..9239ed1 100644 --- a/pkg/validator/sites_test.go +++ b/pkg/validator/sites_test.go @@ -227,7 +227,8 @@ func TestValidateSitesFromFilePath(t *testing.T) { {"valid_api_version_only", nil}, {"valid", nil}, { - "this_file_does_not_exist", errors.New( + "this_file_does_not_exist", + errors.New( "error reading YAML file: open ../../fixtures/sites/this_file_does_not_exist.yml: no such file or directory", ), }, diff --git a/pkg/validator/validator.go b/pkg/validator/validator.go index 8d1bbce..0bc99a0 100644 --- a/pkg/validator/validator.go +++ b/pkg/validator/validator.go @@ -13,8 +13,6 @@ func ValidatorFactory(v string) (Validator, error) { switch v { case "sites": return &SitesValidator{}, nil - case "pantheon": - return &PantheonValidator{}, nil default: return nil, fmt.Errorf(`%q is not a valid validator.`, v) } diff --git a/pkg/validator/validator_test.go b/pkg/validator/validator_test.go index 690fac6..208d665 100644 --- a/pkg/validator/validator_test.go +++ b/pkg/validator/validator_test.go @@ -14,7 +14,6 @@ func TestValidatorFactory(t *testing.T) { expectedErr error }{ {"sites", &SitesValidator{}, nil}, - {"pantheon", &PantheonValidator{}, nil}, {"foo", nil, errors.New(`"foo" is not a valid validator.`)}, } { t.Run(tc.name, func(t *testing.T) { From 20a3835b8f0530eb8c57d87bf7aa6aff09bcf3a2 Mon Sep 17 00:00:00 2001 From: Phil Tyler Date: Fri, 31 Mar 2023 15:43:56 -0700 Subject: [PATCH 5/7] docs: README with install notes --- README.MD | 41 +++++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/README.MD b/README.MD index 43e1ebc..dd9f9ee 100644 --- a/README.MD +++ b/README.MD @@ -1,28 +1,49 @@ -# Sites.yml | Pantheon.yml Validator +# Pantheon YML Validator -A utility for validating a sites.yml file on a pantheon site during WordPress multisites' search-replace tasks. Asprirationally to include pantheon.yml validation in the future. +A utility for validating a sites.yml file on a Pantheon site during WordPress multisites' search-replace tasks. Asprirationally to include pantheon.yml validation in the future. -# Usage +## Usage -## Sites.yml +### Sites.yml ``` $ pyml-validator sites -f path/to/sites.yml ``` See [this annotated fixture](./fixtures/sites/valid.yml) for an example of a valid sites.yml file. -## Pantheon.yml -Note, validation of pantheon.yml is unimplemented, so any file reads as valid. +## Install + +### [Download the latest binary](https://github.com/pantheon-systems/pyml-validator/releases/latest) + +Use wget to download, gzipped pre-compiled binaries: +For instance, VERSION=v0.0.11 and BINARY=pyml-validator_linux_amd64 + +### Compressed via tar.gz +```bash +wget https://github.com/pantheon-systems/pyml-validator/releases/download/${VERSION}/${BINARY}.tar.gz -O - |\ + tar xz && mv ${BINARY} /usr/bin/pyml-validator +``` + +### Plain binary + +```bash +wget https://github.com/pantheon-systems/pyml-validator/releases/download/${VERSION}/${BINARY} -O /usr/bin/pyml-validator &&\ + chmod +x /usr/bin/pyml-validator ``` -$ pyml-validator pantheon -f path/to/pantheon.yml + +### Latest version + +```bash +wget https://github.com/pantheon-systems/pyml-validator/releases/latest/download/pyml-validator_linux_amd64 -O /usr/bin/pyml-validator &&\ + chmod +x /usr/bin/pyml-validator ``` -# Testing +## Testing [![Coverage Status](https://coveralls.io/repos/github/pantheon-systems/pyml-validator/badge.svg?t=PGhafd)](https://coveralls.io/github/pantheon-systems/pyml-validator) `make test` runs linting and testing. -# Releases +## Releases -Automatically releases on merge to main via autotag + goreleaser. See [Autotag Readme](https://github.com/pantheon-systems/autotag) for details on how the SemVer is determined. Note, with goreleaser, each commit merged will become a line item in the release's Changelog. Take note to use squashing and/or rebase to ensure helpful and informative commit messages. +Automatically releases on merge to main via autotag + goreleaser. See [Autotag Readme](https://github.com/pantheon-systems/autotag) for details on how the SemVer is determined. With goreleaser, each commit merged will become a line item in the release's Changelog. Take note to use squashing and/or rebase to ensure helpful and informative commit messages. From 8b4d9c18011a4c1280651d18ff548461f941bd19 Mon Sep 17 00:00:00 2001 From: Phil Tyler Date: Fri, 31 Mar 2023 15:45:24 -0700 Subject: [PATCH 6/7] docs: License --- LICENSE | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..01cc0fe --- /dev/null +++ b/LICENSE @@ -0,0 +1,7 @@ +Copyright 2023 Pantheon Systems Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file From 0d2821459e87e731b1f84897414921f0561ceb0d Mon Sep 17 00:00:00 2001 From: Phil Tyler Date: Fri, 31 Mar 2023 16:18:19 -0700 Subject: [PATCH 7/7] Fix inconsistent length test --- pkg/validator/sites_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/validator/sites_test.go b/pkg/validator/sites_test.go index 9239ed1..9b0304a 100644 --- a/pkg/validator/sites_test.go +++ b/pkg/validator/sites_test.go @@ -130,8 +130,8 @@ func TestValidate(t *testing.T) { 28: "blog28.dev-mysite.pantheonsite.io", 29: "blog29.dev-mysite.pantheonsite.io", }, - "feat_branch": model.DomainMapByEnvironment{ - 1: "blog1.feat-branch-mysite.pantheonsite.io", + "mdev": model.DomainMapByEnvironment{ + 1: "blog1.mdev-mysite.pantheonsite.io", }, }, },