generated from aws-samples/eks-blueprints-for-proton
-
Notifications
You must be signed in to change notification settings - Fork 1
347 lines (282 loc) · 15.2 KB
/
proton-run.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
# This is a workflow created to run based on a commit made by AWS Proton
# It only works if there is only one resource modified as part of the commit.
name: 'proton-run'
on:
pull_request:
types:
- opened
- reopened
paths:
- '**/.proton/deployment-metadata.json'
push:
branches:
- main
paths:
- '**/.proton/deployment-metadata.json'
jobs:
get-deployment-data:
name: Get Deployment Data
runs-on: ubuntu-latest
outputs:
role_arn: ${{ steps.get-data.outputs.role_arn }}
environment: ${{ steps.get-data.outputs.environment }}
resource_arn: ${{ steps.get-data.outputs.resource_arn }}
working_directory: ${{ steps.get-data.outputs.working_directory }}
deployment_id: ${{ steps.get-data.outputs.deployment_id }}
target_region: ${{ steps.get-data.outputs.target_region }}
proton_region: ${{ steps.get-data.outputs.proton_region }}
state_bucket: ${{ steps.get-data.outputs.state_bucket }}
is_deleted: ${{ steps.get-data.outputs.is_deleted }}
permissions:
id-token: write
contents: read
continue-on-error: true
steps:
# Checkout the repository to the GitHub Actions runner
- name: Checkout
uses: actions/checkout@v2
- name: Verify env_config updated
run: |
if grep -q REPLACE_ME env_config.json; then
echo "You must update env_config.json or update this workflow to not require it."
exit 1
fi
- name: Get changed files
id: files
uses: jitterbit/get-changed-files@v1
- name: Find modified resource
id: find-modified
run: |
found=false
for changed_file in ${{ steps.files.outputs.all }}; do
if [[ "$changed_file" == *".proton/deployment-metadata.json" ]]; then
echo "found file"
if [[ "$found" == true ]]; then
echo "More than one resource found to have a new deployment, I'm not sure which one to update, exiting."
exit 1
fi
echo "setting found to true"
found=true
echo "setting outputs"
echo "::set-output name=deployment-metadata-path::$changed_file"
fi
done
if [[ "$found" == false ]]; then
echo "No change made to deployment-metadata.json, exiting"
exit 1
fi
- name: Get data
id: get-data
run: |
modified_resource_arn=$(jq -r '.resourceMetadata.arn' ${{ steps.find-modified.outputs.deployment-metadata-path }})
echo "::set-output name=resource_arn::$modified_resource_arn"
IFS=':'
read -a split_arn <<< "$modified_resource_arn"
proton_region=${split_arn[3]}
echo "::set-output name=proton_region::$proton_region"
deployment_id=$(jq -r '.deploymentId' ${{ steps.find-modified.outputs.deployment-metadata-path }})
echo "::set-output name=deployment_id::$deployment_id"
is_deleted=$(jq -r '.isResourceDeleted' ${{ steps.find-modified.outputs.deployment-metadata-path }})
echo "::set-output name=is_deleted::$is_deleted"
if [[ "$modified_resource_arn" == *":environment/"* ]]; then
environment_name=${modified_resource_arn##*/}
working_directory="$environment_name/"
elif [[ "$modified_resource_arn" == *"/service-instance/"* ]]; then
environment_arn=$(jq -r '.resourceMetadata.environmentArn' ${{ steps.find-modified.outputs.deployment-metadata-path }})
environment_name=${environment_arn##*/}
resource_portion=${modified_resource_arn##*:}
IFS='/'
read -a split_resources <<< "$resource_portion"
service_name=${split_resources[1]}
instance_name=${split_resources[3]}
working_directory=$environment_name/$service_name-$instance_name/
elif [[ "$modified_resource_arn" == *"/pipeline"* ]]; then
environment_name="pipeline"
resource_portion=${modified_resource_arn##*:}
IFS='/'
read -a split_resources <<< "$resource_portion"
service_name=${split_resources[1]}
working_directory=$service_name/pipeline
fi
if [[ $(jq -r --arg env $environment_name 'has($env)' env_config.json) = "true" ]]; then
role_arn=$(jq -r --arg env $environment_name '.[$env]["role"]' env_config.json)
target_region=$(jq -r --arg env $environment_name '.[$env]["region"]' env_config.json)
state_bucket=$(jq -r --arg env $environment_name '.[$env]["state_bucket"]' env_config.json)
else
if [[ $(jq -r --arg env $environment_name 'has("*")' env_config.json) = "true" ]]; then
role_arn=$(jq -r --arg env $environment_name '.["*"]["role"]' env_config.json)
target_region=$(jq -r --arg env $environment_name '.["*"]["region"]' env_config.json)
state_bucket=$(jq -r --arg env $environment_name '.["*"]["state_bucket"]' env_config.json)
else
echo "Missing $environment_name or * from env_config.json, exiting"
exit 1
fi
fi
echo "::set-output name=working_directory::$working_directory"
echo "::set-output name=environment::$environment_name"
echo "::set-output name=role_arn::$role_arn"
echo "::set-output name=target_region::$target_region"
echo "::set-output name=state_bucket::$state_bucket"
terraform:
name: 'Terraform'
needs: get-deployment-data
runs-on: ubuntu-latest
environment: ${{ needs.get-deployment-data.outputs.environment }}
permissions:
id-token: write
contents: read
defaults:
run:
working-directory: ${{ needs.get-deployment-data.outputs.working_directory }}
shell: bash # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
if: needs.get-deployment-data.result == 'success' && needs.get-deployment-data.outputs.is_deleted == 'false'
continue-on-error: true
outputs:
success: ${{ steps.mark_success.outputs.success }}
steps:
# Checkout the repository to the GitHub Actions runner
- name: Checkout
uses: actions/checkout@v2
- name: Configure AWS Credentials
id: assume_role
uses: aws-actions/configure-aws-credentials@v1
with:
aws-region: ${{ needs.get-deployment-data.outputs.target_region }}
role-to-assume: ${{ needs.get-deployment-data.outputs.role_arn }}
role-session-name: TF-Github-Actions
mask-aws-account-id: 'no'
# Install the latest version of Terraform CLI and configure the Terraform CLI configuration file with a Terraform Cloud user API token
- name: Setup Terraform
id: tf_setup
uses: hashicorp/setup-terraform@v1
with:
terraform_version: 1.2.0
terraform_wrapper: false
# Initialize a new or existing Terraform working directory by creating initial files, loading any remote state, downloading modules, etc.
- name: Terraform Init
id: tf_init
run: terraform init -backend-config="bucket=${{ needs.get-deployment-data.outputs.state_bucket }}" -backend-config="key=${{ needs.get-deployment-data.outputs.working_directory }}terraform.tfstate" -backend-config="region=${{ needs.get-deployment-data.outputs.target_region }}"
# Checks that all Terraform configuration files adhere to a canonical format
- name: Terraform Format
id: tf_fmt
run: terraform fmt -diff -check
# Generates an execution plan for Terraform
- name: Terraform Plan
id: tf_plan
run: terraform plan -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}"
# On push to main, build or change infrastructure according to Terraform configuration files
# Note: It is recommended to set up a required "strict" status check in your repository for "Terraform Cloud". See the documentation on "strict" required status checks for more information: https://help.github.com/en/github/administering-a-repository/types-of-required-status-checks
- name: Terraform Apply VPC
id: tf_apply_vpc
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
run: terraform apply -target="module.vpc" -auto-approve -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}"
- name: Terraform Apply EKS
id: tf_apply_eks
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
run: terraform apply -target="module.eks_blueprints" -auto-approve -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}"
- name: Terraform Apply Kubernetes addons
id: tf_apply_addons
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
run: terraform apply -target="module.kubernetes_addons" -auto-approve -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}"
- name: Terraform Apply All
id: tf_apply
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
run: terraform apply -auto-approve -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}"
# If this completes, then the entire workflow has successfully completed
- name: Mark Success
id: mark_success
run: echo "::set-output name=success::True"
notify-proton:
name: 'Notify Proton'
needs:
- get-deployment-data
- terraform
runs-on: ubuntu-latest
environment: ${{ needs.get-deployment-data.outputs.environment }}
if: github.event_name == 'push' && github.ref == 'refs/heads/main' && needs.get-deployment-data.outputs.is_deleted == 'false'
permissions:
id-token: write
contents: read
defaults:
run:
working-directory: ${{ needs.get-deployment-data.outputs.working_directory }}
shell: bash # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
steps:
# Checkout the repository to the GitHub Actions runner
- name: Checkout
uses: actions/checkout@v2
- name: Configure AWS Credentials
id: assume_role
uses: aws-actions/configure-aws-credentials@v1
with:
aws-region: ${{ needs.get-deployment-data.outputs.target_region }}
role-to-assume: ${{ needs.get-deployment-data.outputs.role_arn }}
role-session-name: TF-Github-Actions-Notify-Proton
mask-aws-account-id: 'no'
# Initialize a new or existing Terraform working directory by creating initial files, loading any remote state, downloading modules, etc.
- name: Terraform Init
id: tf_init
continue-on-error: true
run: terraform init -backend-config="bucket=${{ needs.get-deployment-data.outputs.state_bucket }}" -backend-config="key=${{ needs.get-deployment-data.outputs.working_directory }}terraform.tfstate" -backend-config="region=${{ needs.get-deployment-data.outputs.target_region }}"
- name: Notify Proton Success
id: notify_success
if: needs.terraform.outputs.success == 'True' && steps.tf_init.outcome == 'success'
run: |
# Get outputs as json
outputs_json=$(terraform output -json)
# Map Terraform output JSON to Proton outputs JSON (filter on only strings, ex bool types are not working with proton)
formatted_outputs=( $(echo $outputs_json | jq 'to_entries|map(select(.value.type == "string") | {key: .key, valueString: .value.value})') )
# Notify proton
aws proton notify-resource-deployment-status-change --region ${{ needs.get-deployment-data.outputs.proton_region }} --resource-arn ${{ needs.get-deployment-data.outputs.resource_arn }} --status SUCCEEDED --deployment-id ${{ needs.get-deployment-data.outputs.deployment_id }} --outputs "${formatted_outputs[*]}"
echo "Notify success!"
- name: Notify Proton Failure
if: needs.terraform.outputs.success != 'True' || steps.tf_init.outcome != 'success'
run: |
aws proton notify-resource-deployment-status-change --region ${{ needs.get-deployment-data.outputs.proton_region }} --resource-arn ${{ needs.get-deployment-data.outputs.resource_arn }} --status FAILED --deployment-id ${{ needs.get-deployment-data.outputs.deployment_id }}
echo "Notify failure!"
terraform-destroy:
name: 'Run terraform destroy'
needs:
- get-deployment-data
runs-on: ubuntu-latest
environment: ${{ needs.get-deployment-data.outputs.environment }}
if: github.event_name == 'push' && github.ref == 'refs/heads/main' && needs.get-deployment-data.outputs.is_deleted == 'true'
permissions:
id-token: write
contents: read
defaults:
run:
working-directory: ${{ needs.get-deployment-data.outputs.working_directory }}
shell: bash # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
steps:
# Checkout the repository to the GitHub Actions runner
- name: Checkout
uses: actions/checkout@v2
- name: Configure AWS Credentials
id: assume_role
uses: aws-actions/configure-aws-credentials@v1
with:
aws-region: ${{ needs.get-deployment-data.outputs.target_region }}
role-to-assume: ${{ needs.get-deployment-data.outputs.role_arn }}
role-session-name: TF-Github-Actions-Notify-Proton
mask-aws-account-id: 'no'
# Initialize a new or existing Terraform working directory by creating initial files, loading any remote state, downloading modules, etc.
- name: Terraform Init
id: tf_init
run: terraform init -backend-config="bucket=${{ needs.get-deployment-data.outputs.state_bucket }}" -backend-config="key=${{ needs.get-deployment-data.outputs.working_directory }}terraform.tfstate" -backend-config="region=${{ needs.get-deployment-data.outputs.target_region }}"
# terraform init -backend-config="bucket=${{ needs.get-deployment-data.outputs.state_bucket }}" -backend-config="key=${{ needs.get-deployment-data.outputs.working_directory }}terraform.tfstate" -backend-config="region=${{ needs.get-deployment-data.outputs.target_region }}"
- name: Clean EKS cluster applications
id: eks_connect
run: |
cmd=$(terraform output | grep eks | awk -F "configure_kubectl = " '{print $2}' | cut -d\" -f2)
eval $cmd
kubectl delete applications --all -A
- name: Terraform Destroy (Add-ons)
id: tf_destroy_addons
run: terraform apply -auto-approve -destroy -target="module.kubernetes_addons" -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}"
- name: Terraform Destroy (Cluster)
id: tf_destroy_cluster
run: terraform apply -auto-approve -destroy -target="module.eks_blueprints" -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}"
- name: Terraform Destroy (Everything)
id: tf_destroy_everything
run: terraform apply -auto-approve -destroy -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}"