Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

S3 resource supports AWS provider v4 #77

Closed
wants to merge 12 commits into from
151 changes: 92 additions & 59 deletions main.tf
Original file line number Diff line number Diff line change
@@ -1,35 +1,39 @@
locals {
enabled = module.this.enabled
bucket_arn = "arn:${data.aws_partition.current.partition}:s3:::${join("", aws_s3_bucket.default.*.id)}"
}

website_config = {
redirect_all = [
{
redirect_all_requests_to = var.redirect_all_requests_to
module "logs" {
source = "cloudposse/s3-log-storage/aws"
version = "0.28.0"
attributes = ["logs"]
context = module.this.context
lifecycle_configuration_rules = [
{
abort_incomplete_multipart_upload_days = 1
enabled = true
expiration = {
days = 90
}
]
default = [
{
index_document = var.index_document
error_document = var.error_document
routing_rules = var.routing_rules
filter_and = {
prefix = ""
}
]
}
id = "logs"
noncurrent_version_expiration = {
newer_noncurrent_versions = 2
noncurrent_days = 30
}
noncurrent_version_transition = []
transition = [
{
days = 30
storage_class = "GLACIER"
},
]
}
]
}

module "logs" {
source = "cloudposse/s3-log-storage/aws"
version = "0.20.0"
attributes = ["logs"]
enabled = local.enabled && var.logs_enabled
standard_transition_days = var.logs_standard_transition_days
glacier_transition_days = var.logs_glacier_transition_days
expiration_days = var.logs_expiration_days
force_destroy = var.force_destroy

context = module.this.context
}

module "default_label" {
source = "cloudposse/label/null"
Expand All @@ -44,66 +48,95 @@ resource "aws_s3_bucket" "default" {
#bridgecrew:skip=BC_AWS_S3_1:The bucket used for a public static website. (https://docs.bridgecrew.io/docs/s3_1-acl-read-permissions-everyone)
#bridgecrew:skip=BC_AWS_S3_14:Skipping `Ensure all data stored in the S3 bucket is securely encrypted at rest` check until bridgecrew will support dynamic blocks (https://github.com/bridgecrewio/checkov/issues/776).
#bridgecrew:skip=CKV_AWS_52:Skipping `Ensure S3 bucket has MFA delete enabled` due to issue using `mfa_delete` by terraform (https://github.com/hashicorp/terraform-provider-aws/issues/629).
acl = "public-read"

sreetejap marked this conversation as resolved.
Show resolved Hide resolved
sreetejap marked this conversation as resolved.
Show resolved Hide resolved
sreetejap marked this conversation as resolved.
Show resolved Hide resolved
sreetejap marked this conversation as resolved.
Show resolved Hide resolved
sreetejap marked this conversation as resolved.
Show resolved Hide resolved
bucket = var.hostname
tags = module.default_label.tags
force_destroy = var.force_destroy
}
sreetejap marked this conversation as resolved.
Show resolved Hide resolved
sreetejap marked this conversation as resolved.
Show resolved Hide resolved
sreetejap marked this conversation as resolved.
Show resolved Hide resolved
sreetejap marked this conversation as resolved.
Show resolved Hide resolved

dynamic "logging" {
for_each = var.logs_enabled ? ["true"] : []
content {
target_bucket = module.logs.bucket_id
target_prefix = module.logs.prefix
}
# S3 acl resource support for AWS provider V4
resource "aws_s3_bucket_acl" "default" {
bucket = aws_s3_bucket.default[0].id
acl = "public-read"
}

# S3 logging resource support for AWS provider v4
resource "aws_s3_bucket_logging" "default" {
for_each = var.logs_enabled ? toset(["true"]) : toset([])
bucket = aws_s3_bucket.default[0].id

target_bucket = module.logs.bucket_id
target_prefix = module.logs.prefix
}

# S3 versioning resource support for AWS provider v4
resource "aws_s3_bucket_versioning" "default" {
bucket = aws_s3_bucket.default[0].id
versioning_configuration {
status = var.versioning_enabled ? "Enabled" : "Suspended"
}
}

dynamic "website" {
for_each = local.website_config[var.redirect_all_requests_to == "" ? "default" : "redirect_all"]
content {
error_document = lookup(website.value, "error_document", null)
index_document = lookup(website.value, "index_document", null)
redirect_all_requests_to = lookup(website.value, "redirect_all_requests_to", null)
routing_rules = lookup(website.value, "routing_rules", null)
}
# S3 website configuration support for AWS provider v4
resource "aws_s3_bucket_website_configuration" "default" {
bucket = aws_s3_bucket.default[0].bucket

index_document {
suffix = var.index_document
}

error_document {
key = var.error_document
}
}

# S3 cors configuration support for AWS provider v4
resource "aws_s3_bucket_cors_configuration" "default" {
bucket = aws_s3_bucket.default[0].bucket

cors_rule {
allowed_headers = var.cors_allowed_headers
allowed_methods = var.cors_allowed_methods
allowed_origins = var.cors_allowed_origins
expose_headers = var.cors_expose_headers
max_age_seconds = var.cors_max_age_seconds
}
}

versioning {
enabled = var.versioning_enabled
}
# S3 lifecycle configuration support for AWS provider v4
resource "aws_s3_bucket_lifecycle_configuration" "default" {
depends_on = [aws_s3_bucket_versioning.default]

lifecycle_rule {
id = module.default_label.id
enabled = var.lifecycle_rule_enabled
prefix = var.prefix
tags = module.default_label.tags
bucket = aws_s3_bucket.default[0].bucket

noncurrent_version_transition {
days = var.noncurrent_version_transition_days
storage_class = "GLACIER"
rule {
id = module.default_label.id

filter {
prefix = var.prefix
}

noncurrent_version_expiration {
days = var.noncurrent_version_expiration_days
noncurrent_days = var.noncurrent_version_expiration_days
}

noncurrent_version_transition {
noncurrent_days = var.noncurrent_version_transition_days
storage_class = "GLACIER"
}

status = var.lifecycle_rule_enabled ? "Enabled" : "Disabled"
}
}

dynamic "server_side_encryption_configuration" {
for_each = var.encryption_enabled ? ["true"] : []
# S3 server side encryption support for AWS provider v4
resource "aws_s3_bucket_server_side_encryption_configuration" "default" {
for_each = var.encryption_enabled ? toset(["true"]) : toset([])
bucket = aws_s3_bucket.default[0].bucket

content {
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "AES256"
}
}
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "AES256"
}
}
}
Expand Down