forked from cloudposse/terraform-aws-s3-log-storage
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.tf
189 lines (161 loc) · 6.01 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
resource "aws_s3_bucket" "default" {
#bridgecrew:skip=BC_AWS_S3_13:Skipping `Enable S3 Bucket Logging` check until bridgecrew will support dynamic blocks (https://github.com/bridgecrewio/checkov/issues/776).
#bridgecrew:skip=CKV_AWS_52:Skipping `Ensure S3 bucket has MFA delete enabled` due to issue in terraform (https://github.com/hashicorp/terraform-provider-aws/issues/629).
count = module.this.enabled ? 1 : 0
bucket = module.this.id
acl = var.acl
force_destroy = var.force_destroy
policy = var.policy
versioning {
enabled = var.versioning_enabled
}
dynamic "lifecycle_rule" {
for_each = [for i in var.lifecycle_rules: {
id = i.lifecycle_prefix
lifecycle_prefix = i.lifecycle_prefix
noncurrent_version_expiration_days = i.noncurrent_version_expiration_days
noncurrent_version_transition_days = i.noncurrent_version_transition_days
standard_transition_days = i.standard_transition_days
glacier_transition_days = i.glacier_transition_days
expiration_days = i.expiration_days
}
]
content {
id = lifecycle_rule.value.id
enabled = var.lifecycle_rule_enabled
prefix = lifecycle_rule.value.lifecycle_prefix
tags = var.lifecycle_tags
abort_incomplete_multipart_upload_days = var.abort_incomplete_multipart_upload_days
noncurrent_version_expiration {
days = lifecycle_rule.value.noncurrent_version_expiration_days
}
dynamic "noncurrent_version_transition" {
for_each = var.enable_glacier_transition ? [1] : []
content {
days = lifecycle_rule.value.noncurrent_version_transition_days
storage_class = "DEEP_ARCHIVE"
}
}
## Moving directly glacier deep archive
#transition {
# days = lifecycle_rule.value.standard_transition_days
# storage_class = "STANDARD_IA"
#}
dynamic "transition" {
for_each = var.enable_glacier_transition ? [1] : []
content {
days = lifecycle_rule.value.glacier_transition_days
storage_class = "DEEP_ARCHIVE"
}
}
expiration {
days = lifecycle_rule.value.expiration_days
}
}
}
dynamic "logging" {
for_each = var.access_log_bucket_name != "" ? [1] : []
content {
target_bucket = var.access_log_bucket_name
target_prefix = "logs/${module.this.id}/"
}
}
# https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html
# https://www.terraform.io/docs/providers/aws/r/s3_bucket.html#enable-default-server-side-encryption
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
sse_algorithm = var.sse_algorithm
kms_master_key_id = var.kms_master_key_arn
}
}
}
tags = module.this.tags
}
# Refer to the terraform documentation on s3_bucket_public_access_block at
# https://www.terraform.io/docs/providers/aws/r/s3_bucket_public_access_block.html
# for the nuances of the blocking options
resource "aws_s3_bucket_public_access_block" "default" {
count = module.this.enabled ? 1 : 0
bucket = join("", aws_s3_bucket.default.*.id)
block_public_acls = var.block_public_acls
block_public_policy = var.block_public_policy
ignore_public_acls = var.ignore_public_acls
restrict_public_buckets = var.restrict_public_buckets
}
## Set object_ownership to bucket_owner
resource "aws_s3_bucket_ownership_controls" "object_ownership_control" {
count = module.this.enabled ? 1 : 0
bucket = join("", aws_s3_bucket.default.*.id)
rule {
object_ownership = var.object_ownership
}
}
## Enabled s3 bucket analytics
resource "aws_s3_bucket_analytics_configuration" "entire-bucket" {
count = var.enabled_analytics ? 1 : 0
bucket = join("", aws_s3_bucket.default.*.id)
name = "EntireBucket"
storage_class_analysis {
data_export {
destination {
s3_bucket_destination {
bucket_arn = join("", aws_s3_bucket.analytics.*.arn)
}
}
}
}
}
## s3 bucket analytics analytics
resource "aws_s3_bucket" "analytics" {
count = var.enabled_analytics ? 1 : 0
bucket = var.analytics_bucket_name
}
## Add analytics configuration with S3 bucket object filter
#resource "aws_s3_bucket_analytics_configuration" "analytics-filtered" {
# count = length(var.lifecycle_rules)
# bucket = join("", aws_s3_bucket.default.*.id)
#
# name = "${var.lifecycle_rules[count.index].lifecycle_prefix}-filter"
# filter {
# prefix = "${var.lifecycle_rules[count.index].lifecycle_prefix}"
# }
#}
resource "aws_s3_bucket_metric" "request-filtered" {
count = length(var.lifecycle_rules)
bucket = join("", aws_s3_bucket.default.*.id)
name = "${var.lifecycle_rules[count.index].lifecycle_prefix}-filter"
filter {
prefix = "${var.lifecycle_rules[count.index].lifecycle_prefix}"
}
}
## Create folders inside s3 bucket
resource "aws_s3_bucket_object" "prefix" {
count = length(var.lifecycle_rules)
bucket = join("", aws_s3_bucket.default.*.id)
acl = "private"
key = "${var.lifecycle_rules[count.index].lifecycle_prefix}/"
source = "/dev/null"
server_side_encryption = var.sse_algorithm
}
## Add multiple notification configurations to SQS Queue
resource "aws_s3_bucket_notification" "bucket_notification" {
count = var.enabled_bucket_notification ? 1 : 0
bucket = join("", aws_s3_bucket.default.*.id)
dynamic "queue" {
for_each = [for i in var.event_notifications: {
id = i.event_id
filter_prefix = i.filter_prefix
filter_suffix = i.filter_suffix
queue_arn = i.queue_arn
}
]
content {
id = queue.value.id
queue_arn = queue.value.queue_arn
events = ["s3:ObjectCreated:*"]
filter_prefix = queue.value.filter_prefix
filter_suffix = queue.value.filter_suffix
}
}
}