Skip to content

Commit

Permalink
Merge pull request #7 from GoogleCloudPlatform/tf-12-jenkinsfile
Browse files Browse the repository at this point in the history
TF 12, Vault 1.2.0, Jenkinsfile formatting
  • Loading branch information
Chris Love authored Aug 13, 2019
2 parents 7a82ef3 + 55a62ae commit b9650ea
Show file tree
Hide file tree
Showing 13 changed files with 185 additions and 161 deletions.
151 changes: 67 additions & 84 deletions Jenkinsfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#!/usr/bin/env groovy

/*
Copyright 2018 Google LLC
Expand All @@ -22,93 +23,75 @@ limitations under the License.
// Reference: https://github.com/jenkinsci/kubernetes-plugin

// set up pod label and GOOGLE_APPLICATION_CREDENTIALS (for Terraform)
def label = "k8s-infra"
def containerName = "k8s-node"
def GOOGLE_APPLICATION_CREDENTIALS = '/home/jenkins/dev/jenkins-deploy-dev-infra.json'
def containerName = "vault"
def GOOGLE_APPLICATION_CREDENTIALS = '/home/jenkins/dev/jenkins-deploy-dev-infra.json'
// Tells the ./scripts/common.sh which VAULT_VERSION of the vault CLI binary to use
def VAULT_VERSION = '1.0.2'

podTemplate(label: label, yaml: """
apiVersion: v1
kind: Pod
metadata:
labels:
jenkins: build-node
spec:
containers:
- name: ${containerName}
image: gcr.io/pso-helmsman-cicd/jenkins-k8s-node:${env.CONTAINER_VERSION}
command: ['cat']
tty: true
volumeMounts:
# Mount the dev service account key
- name: dev-key
mountPath: /home/jenkins/dev
# Mount the host /dev/urandom to /dev/random for entropy
- name: random
mountPath: /dev/random
volumes:
# Create a volume that contains the dev json key that was saved as a secret
- name: dev-key
secret:
secretName: jenkins-deploy-dev-infra
# Host /dev/urandom to allow for entropy access
- name: random
hostPath:
path: /dev/urandom
"""
) {
node(label) {
try {
// Options covers all other job properties or wrapper functions that apply to entire Pipeline.
properties([disableConcurrentBuilds()])
// set env variable GOOGLE_APPLICATION_CREDENTIALS for Terraform
env.GOOGLE_APPLICATION_CREDENTIALS=GOOGLE_APPLICATION_CREDENTIALS
def VAULT_VERSION = '1.2.0'
def jenkins_container_version = env.JENKINS_CONTAINER_VERSION

stage('Setup') {
container(containerName) {
// checkout code from scm i.e. commits related to the PR
checkout scm

// Setup gcloud service account access
sh "gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS}"
sh "gcloud config set compute/zone ${env.ZONE}"
sh "gcloud config set core/project ${env.PROJECT_ID}"
sh "gcloud config set compute/region ${env.REGION}"
}
}
stage('Lint') {
container(containerName) {
sh "make lint"
}
}
podTemplate(
containers: [
containerTemplate(name: "${containerName}",
image: "gcr.io/pso-helmsman-cicd/jenkins-k8s-node:${jenkins_container_version}",
command: 'tail -f /dev/null',
resourceRequestCpu: '1000m',
resourceLimitCpu: '2000m',
resourceRequestMemory: '1Gi',
resourceLimitMemory: '2Gi'
)
],
volumes: [secretVolume(mountPath: '/home/jenkins/dev',
secretName: 'jenkins-deploy-dev-infra'
),
hostPathVolume(mountPath: '/dev/random', hostPath: '/dev/urandom')
]
) {
node(POD_LABEL) {
try {
// Options covers all other job properties or wrapper functions that apply to entire Pipeline.
properties([disableConcurrentBuilds()])
// set env variable GOOGLE_APPLICATION_CREDENTIALS for Terraform
env.GOOGLE_APPLICATION_CREDENTIALS = GOOGLE_APPLICATION_CREDENTIALS

stage('Create') {
container(containerName) {
sh "make create"
}
}
stage('Setup') {
container(containerName) {
// checkout code from scm i.e. commits related to the PR
checkout scm

stage('Validate') {
container(containerName) {
sh "make validate"
// Setup gcloud service account access
sh "gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS}"
sh "gcloud config set compute/zone ${env.ZONE}"
sh "gcloud config set core/project ${env.PROJECT_ID}"
sh "gcloud config set compute/region ${env.REGION}"
}
}
stage('Lint') {
container(containerName) {
sh "make lint"
}
}
stage('Create') {
container(containerName) {
sh "make create"
}
}
stage('Validate') {
container(containerName) {
sh "make validate"
}
}
} catch (err) {
// if any exception occurs, mark the build as failed
// and display a detailed message on the Jenkins console output
currentBuild.result = 'FAILURE'
echo "FAILURE caught echo ${err}"
throw err
} finally {
stage('Teardown') {
container(containerName) {
sh "make teardown"
}
}
}
}

}
catch (err) {
// if any exception occurs, mark the build as failed
// and display a detailed message on the Jenkins console output
currentBuild.result = 'FAILURE'
echo "FAILURE caught echo ${err}"
throw err
}
finally {
stage('Teardown') {
container(containerName) {
sh "make teardown"
}
}
}
}
}
}
12 changes: 9 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -128,14 +128,20 @@ Recovery Seal Type shamir
Sealed false
Total Recovery Shares 1
Threshold 1
Version 1.0.0
Version 1.2.0
Cluster Name vault-cluster-be7094aa
Cluster ID ac0d2d33-61db-a06a-77d0-eb9c1e87b236
HA Enabled true
HA Cluster https://10.24.1.3:8201
HA Mode active
```

Enable the `kv` store inside Vault:

```console
vault secrets enable -path=secret/ kv
```

Create a sample secret in Vault inside the custom `kv` path:

```console
Expand Down Expand Up @@ -181,7 +187,7 @@ Recovery Seal Type shamir
Sealed false
Total Recovery Shares 1
Threshold 1
Version 1.0.0
Version 1.2.0
Cluster Name vault-cluster-be7094aa
Cluster ID ac0d2d33-61db-a06a-77d0-eb9c1e87b236
HA Enabled true
Expand Down Expand Up @@ -320,7 +326,7 @@ Recovery Seal Type shamir
Sealed false
Total Recovery Shares 1
Threshold 1
Version 1.0.0
Version 1.2.0
Cluster Name vault-cluster-be7094aa
Cluster ID ac0d2d33-61db-a06a-77d0-eb9c1e87b236
HA Enabled true
Expand Down
2 changes: 1 addition & 1 deletion k8s-manifests/sidecar.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ spec:
# The vault-authenticator container authenticates the container using the
# kubernetes auth method and puts the resulting token on the filesystem.
- name: vault-authenticator
image: registry.hub.docker.com/sethvargo/vault-kubernetes-authenticator:0.1.0
image: registry.hub.docker.com/sethvargo/vault-kubernetes-authenticator:0.3.0
imagePullPolicy: Always
volumeMounts:
# The mount where the vault token will be written after login
Expand Down
3 changes: 3 additions & 0 deletions scripts/auth-to-vault.sh
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,9 @@ vault write auth/kubernetes/config \
kubernetes_ca_cert="${K8S_CACERT}" \
token_reviewer_jwt="${TR_ACCOUNT_TOKEN}"

# Enable the KV secrets backend
vault secrets enable -path=secret/ kv

# Create a policy to be referenced by a role to access the kv location secret/myapp/*
vault policy write myapp-kv-rw - <<EOF
path "secret/myapp/*" {
Expand Down
4 changes: 2 additions & 2 deletions terraform/gcs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ limitations under the License.

# Create the storage bucket
resource "google_storage_bucket" "app" {
name = "${var.project}-gcs"
project = "${var.project}"
name = format("%s-gcs", var.project)
project = var.project
force_destroy = true

depends_on = ["google_project_service.app_service"]
Expand Down
18 changes: 13 additions & 5 deletions terraform/iam.tf
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

# Add user-specified roles
# Add user-specified roles to App SA
resource "google_project_iam_member" "app-service-account" {
count = length(var.app_service_account_roles)
project = var.project
role = element(var.app_service_account_roles, count.index)
member = format("serviceAccount:gke-vault-demo-app-cluster@%s.iam.gserviceaccount.com", var.project)
}

# Add user-specified roles to Vault SA
resource "google_project_iam_member" "service-account" {
count = "${length(var.service_account_roles)}"
project = "${var.project}"
role = "${element(var.service_account_roles, count.index)}"
member = "serviceAccount:vault-server@${module.vault.project}.iam.gserviceaccount.com"
count = length(var.service_account_roles)
project = var.project
role = element(var.service_account_roles, count.index)
member = format("serviceAccount:vault-server@%s.iam.gserviceaccount.com", module.vault.project)
depends_on = ["module.vault"]
}
58 changes: 34 additions & 24 deletions terraform/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -17,22 +17,22 @@ limitations under the License.
// Provides access to available Google Container Engine versions in a region for a given project.
// https://www.terraform.io/docs/providers/google/d/google_container_engine_versions.html
data "google_container_engine_versions" "gke_version" {
project = "${var.project}"
location = "${var.region}"
project = var.project
location = var.region
}

# Create the dedicated GKE service account for the application cluster
resource "google_service_account" "app_cluster" {
account_id = "gke-vault-demo-app-cluster"
display_name = "Application Cluster"
project = "${var.project}"
project = var.project
}

# Enable required services on the app cluster project
resource "google_project_service" "app_service" {
count = "${length(var.app_project_services)}"
project = "${var.project}"
service = "${element(var.app_project_services, count.index)}"
count = length(var.app_project_services)
project = var.project
service = element(var.app_project_services, count.index)

# Do not disable the service on destroy. On destroy, we are going to
# destroy the project, but we need the APIs available to destroy the
Expand All @@ -42,18 +42,17 @@ resource "google_project_service" "app_service" {

# Create the GKE cluster
resource "google_container_cluster" "app" {
provider = "google-beta"
name = "${var.application_cluster_name}"
project = "${var.project}"
location = "${var.region}"
provider = "google-beta"
name = var.application_cluster_name
project = var.project
location = var.region

network = "${google_compute_network.app-network.self_link}"
subnetwork = "${google_compute_subnetwork.app-subnetwork.self_link}"
network = google_compute_network.app-network.self_link
subnetwork = google_compute_subnetwork.app-subnetwork.self_link

initial_node_count = "${var.num_nodes_per_zone}"
initial_node_count = var.num_nodes_per_zone

min_master_version = "${data.google_container_engine_versions.gke_version.latest_master_version}"
node_version = "${data.google_container_engine_versions.gke_version.latest_node_version}"
min_master_version = data.google_container_engine_versions.gke_version.latest_master_version

logging_service = "logging.googleapis.com"
monitoring_service = "monitoring.googleapis.com"
Expand All @@ -63,18 +62,23 @@ resource "google_container_cluster" "app" {

node_config {
machine_type = "n1-standard-1"
service_account = "${google_service_account.app_cluster.email}"
service_account = google_service_account.app_cluster.email

oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/trace.append"
]

# Set metadata on the VM to supply more entropy
metadata {
metadata = {
google-compute-enable-virtio-rng = "true"
}

labels {
labels = {
service = "applications"
}

Expand Down Expand Up @@ -118,19 +122,25 @@ resource "google_container_cluster" "app" {
# Set the maintenance window.
maintenance_policy {
daily_maintenance_window {
start_time = "${var.daily_maintenance_window}"
start_time = var.daily_maintenance_window
}
}

# Allocate IPs in our subnetwork
ip_allocation_policy {
cluster_secondary_range_name = "${google_compute_subnetwork.app-subnetwork.secondary_ip_range.0.range_name}"
services_secondary_range_name = "${google_compute_subnetwork.app-subnetwork.secondary_ip_range.1.range_name}"
cluster_secondary_range_name = google_compute_subnetwork.app-subnetwork.secondary_ip_range.0.range_name
services_secondary_range_name = google_compute_subnetwork.app-subnetwork.secondary_ip_range.1.range_name
}

# Specify the list of CIDRs which can access the GKE API Server
master_authorized_networks_config {
cidr_blocks = ["${var.kubernetes_master_authorized_networks}"]
dynamic "cidr_blocks" {
for_each = var.kubernetes_master_authorized_networks
content {
cidr_block = cidr_blocks.value.cidr_block
display_name = cidr_blocks.value.display_name
}
}
}

# Configure the cluster to be private (not have public facing IPs)
Expand All @@ -143,7 +153,7 @@ resource "google_container_cluster" "app" {
enable_private_endpoint = false

enable_private_nodes = true
master_ipv4_cidr_block = "${var.kubernetes_masters_ipv4_cidr}"
master_ipv4_cidr_block = var.kubernetes_masters_ipv4_cidr
}

depends_on = [
Expand Down
Loading

0 comments on commit b9650ea

Please sign in to comment.