Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

UML-3109 Remove build_admin feature flag from TF #2362

Merged
merged 1 commit into from
Oct 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 39 additions & 14 deletions terraform/environment/admin_ecs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,20 @@
// admin ECS Service level config

resource "aws_ecs_service" "admin" {
count = local.environment.build_admin ? 1 : 0
name = "admin-service"
cluster = aws_ecs_cluster.use-an-lpa.id
task_definition = aws_ecs_task_definition.admin[0].arn
task_definition = aws_ecs_task_definition.admin.arn
desired_count = 1
platform_version = "1.4.0"

network_configuration {
security_groups = [aws_security_group.admin_ecs_service[0].id]
security_groups = [aws_security_group.admin_ecs_service.id]
subnets = data.aws_subnets.private.ids
assign_public_ip = false
}

load_balancer {
target_group_arn = aws_lb_target_group.admin[0].arn
target_group_arn = aws_lb_target_group.admin.arn
container_name = "app"
container_port = 80
}
Expand Down Expand Up @@ -44,11 +43,16 @@ resource "aws_ecs_service" "admin" {
depends_on = [aws_lb.admin]
}


moved {
from = aws_ecs_service.admin[0]
to = aws_ecs_service.admin
}

//----------------------------------
// The service's Security Groups

resource "aws_security_group" "admin_ecs_service" {
count = local.environment.build_admin ? 1 : 0
name_prefix = "${local.environment_name}-admin-ecs-service"
description = "Admin service security group"
vpc_id = data.aws_vpc.default.id
Expand All @@ -57,41 +61,53 @@ resource "aws_security_group" "admin_ecs_service" {
}
}

moved {
from = aws_security_group.admin_ecs_service[0]
to = aws_security_group.admin_ecs_service
}

// 80 in from the ELB
resource "aws_security_group_rule" "admin_ecs_service_ingress" {
count = local.environment.build_admin ? 1 : 0
description = "Allow Port 80 ingress from the applciation load balancer"
type = "ingress"
from_port = 80
to_port = 80
protocol = "tcp"
security_group_id = aws_security_group.admin_ecs_service[0].id
source_security_group_id = aws_security_group.admin_loadbalancer[0].id
security_group_id = aws_security_group.admin_ecs_service.id
source_security_group_id = aws_security_group.admin_loadbalancer.id
lifecycle {
create_before_destroy = true
}
}

moved {
from = aws_security_group_rule.admin_ecs_service_ingress[0]
to = aws_security_group_rule.admin_ecs_service_ingress
}

// Anything out
resource "aws_security_group_rule" "admin_ecs_service_egress" {
count = local.environment.build_admin ? 1 : 0
description = "Allow any egress from Use service"
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"] #tfsec:ignore:AWS007 - open egress for ECR access
security_group_id = aws_security_group.admin_ecs_service[0].id
security_group_id = aws_security_group.admin_ecs_service.id
lifecycle {
create_before_destroy = true
}
}

moved {
from = aws_security_group_rule.admin_ecs_service_egress[0]
to = aws_security_group_rule.admin_ecs_service_egress
}

//--------------------------------------
// admin ECS Service Task level config

resource "aws_ecs_task_definition" "admin" {
count = local.environment.build_admin ? 1 : 0
family = "${local.environment_name}-admin"
requires_compatibilities = ["FARGATE"]
network_mode = "awsvpc"
Expand All @@ -102,13 +118,22 @@ resource "aws_ecs_task_definition" "admin" {
execution_role_arn = module.iam.ecs_execution_role.arn
}

moved {
from = aws_ecs_task_definition.admin[0]
to = aws_ecs_task_definition.admin
}

resource "aws_iam_role_policy" "admin_permissions_role" {
count = local.environment.build_admin ? 1 : 0
name = "${local.environment_name}-${local.policy_region_prefix}-adminApplicationPermissions"
policy = data.aws_iam_policy_document.admin_permissions_role.json
role = module.iam.ecs_task_roles.admin_task_role.id
}

moved {
from = aws_iam_role_policy.admin_permissions_role[0]
to = aws_iam_role_policy.admin_permissions_role
}

/*
Defines permissions that the application running within the task has.
*/
Expand Down Expand Up @@ -235,7 +260,7 @@ locals {
},
{
name = "ADMIN_CLIENT_ID",
value = "${aws_cognito_user_pool_client.use_a_lasting_power_of_attorney_admin[0].id}"
value = "${aws_cognito_user_pool_client.use_a_lasting_power_of_attorney_admin.id}"
},
{
name = "LPA_CODES_API_ENDPOINT",
Expand All @@ -248,7 +273,7 @@ locals {
}

locals {
admin_domain = local.environment.build_admin ? "https://${aws_route53_record.admin_use_my_lpa[0].fqdn}" : "Not deployed"
admin_domain = "https://${aws_route53_record.admin_use_my_lpa.fqdn}"
}

output "admin_domain" {
Expand Down
77 changes: 56 additions & 21 deletions terraform/environment/admin_load_balancer.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
resource "aws_lb_target_group" "admin" {
count = local.environment.build_admin ? 1 : 0
name = "${local.environment_name}-admin"
port = 80
protocol = "HTTP"
Expand All @@ -12,11 +11,15 @@ resource "aws_lb_target_group" "admin" {
path = "/helloworld"
}

depends_on = [aws_lb.admin[0]]
depends_on = [aws_lb.admin]
}

moved {
from = aws_lb_target_group.admin[0]
to = aws_lb_target_group.admin
}

resource "aws_lb" "admin" {
count = local.environment.build_admin ? 1 : 0
name = "${local.environment_name}-admin"
internal = false #tfsec:ignore:AWS005 - public alb
load_balancer_type = "application"
Expand All @@ -25,7 +28,7 @@ resource "aws_lb" "admin" {
enable_deletion_protection = local.environment.load_balancer_deletion_protection_enabled

security_groups = [
aws_security_group.admin_loadbalancer[0].id,
aws_security_group.admin_loadbalancer.id,
]

access_logs {
Expand All @@ -35,9 +38,13 @@ resource "aws_lb" "admin" {
}
}

moved {
from = aws_lb.admin[0]
to = aws_lb.admin
}

resource "aws_lb_listener" "admin_loadbalancer_http_redirect" {
count = local.environment.build_admin ? 1 : 0
load_balancer_arn = aws_lb.admin[0].arn
load_balancer_arn = aws_lb.admin.arn
port = "80"
protocol = "HTTP"

Expand All @@ -52,9 +59,13 @@ resource "aws_lb_listener" "admin_loadbalancer_http_redirect" {
}
}

moved {
from = aws_lb_listener.admin_loadbalancer_http_redirect[0]
to = aws_lb_listener.admin_loadbalancer_http_redirect
}

resource "aws_lb_listener" "admin_loadbalancer" {
count = local.environment.build_admin ? 1 : 0
load_balancer_arn = aws_lb.admin[0].arn
load_balancer_arn = aws_lb.admin.arn
port = "443"
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-FS-1-2-2019-08"
Expand All @@ -66,33 +77,40 @@ resource "aws_lb_listener" "admin_loadbalancer" {
authenticate_oidc {
authentication_request_extra_params = {}
authorization_endpoint = "${local.admin_cognito_user_pool_domain_name}/oauth2/authorize"
client_id = aws_cognito_user_pool_client.use_a_lasting_power_of_attorney_admin[0].id
client_secret = aws_cognito_user_pool_client.use_a_lasting_power_of_attorney_admin[0].client_secret
client_id = aws_cognito_user_pool_client.use_a_lasting_power_of_attorney_admin.id
client_secret = aws_cognito_user_pool_client.use_a_lasting_power_of_attorney_admin.client_secret
issuer = "https://cognito-idp.eu-west-1.amazonaws.com/${local.admin_cognito_user_pool_id}"
on_unauthenticated_request = "authenticate"
scope = "openid"
session_cookie_name = "AWSELBAuthSessionCookie"
session_timeout = aws_cognito_user_pool_client.use_a_lasting_power_of_attorney_admin[0].id_token_validity
session_timeout = aws_cognito_user_pool_client.use_a_lasting_power_of_attorney_admin.id_token_validity
token_endpoint = "${local.admin_cognito_user_pool_domain_name}/oauth2/token"
user_info_endpoint = "${local.admin_cognito_user_pool_domain_name}/oauth2/userInfo"
}
}

default_action {
target_group_arn = aws_lb_target_group.admin[0].arn
target_group_arn = aws_lb_target_group.admin.arn
type = "forward"
}
}

moved {
from = aws_lb_listener.admin_loadbalancer[0]
to = aws_lb_listener.admin_loadbalancer
}

resource "aws_lb_listener_certificate" "admin_loadbalancer_live_service_certificate" {
count = local.environment.build_admin ? 1 : 0
listener_arn = aws_lb_listener.admin_loadbalancer[0].arn
listener_arn = aws_lb_listener.admin_loadbalancer.arn
certificate_arn = data.aws_acm_certificate.public_facing_certificate_use.arn
}

moved {
from = aws_lb_listener_certificate.admin_loadbalancer_live_service_certificate[0]
to = aws_lb_listener_certificate.admin_loadbalancer_live_service_certificate
}

resource "aws_security_group" "admin_loadbalancer" {
count = local.environment.build_admin ? 1 : 0
name_prefix = "${local.environment_name}-admin-loadbalancer"
description = "Admin service application load balancer"
vpc_id = data.aws_vpc.default.id
Expand All @@ -101,35 +119,52 @@ resource "aws_security_group" "admin_loadbalancer" {
}
}

moved {
from = aws_security_group.admin_loadbalancer[0]
to = aws_security_group.admin_loadbalancer
}

resource "aws_security_group_rule" "admin_loadbalancer_port_80_redirect_ingress" {
count = local.environment.build_admin ? 1 : 0
description = "Port 80 ingress for redirection to port 443"
type = "ingress"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = module.allow_list.moj_sites
security_group_id = aws_security_group.admin_loadbalancer[0].id
security_group_id = aws_security_group.admin_loadbalancer.id
}

moved {
from = aws_security_group_rule.admin_loadbalancer_port_80_redirect_ingress[0]
to = aws_security_group_rule.admin_loadbalancer_port_80_redirect_ingress
}

resource "aws_security_group_rule" "admin_loadbalancer_ingress" {
count = local.environment.build_admin ? 1 : 0
description = "Port 443 ingress from the allow list to the application load balancer"
type = "ingress"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = module.allow_list.moj_sites
security_group_id = aws_security_group.admin_loadbalancer[0].id
security_group_id = aws_security_group.admin_loadbalancer.id
}

moved {
from = aws_security_group_rule.admin_loadbalancer_ingress[0]
to = aws_security_group_rule.admin_loadbalancer_ingress
}

resource "aws_security_group_rule" "admin_loadbalancer_egress" {
count = local.environment.build_admin ? 1 : 0
description = "Allow any egress from Use service load balancer"
type = "egress"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"] #tfsec:ignore:AWS007 - open egress for load balancers
security_group_id = aws_security_group.admin_loadbalancer[0].id
security_group_id = aws_security_group.admin_loadbalancer.id
}

moved {
from = aws_security_group_rule.admin_loadbalancer_egress[0]
to = aws_security_group_rule.admin_loadbalancer_egress
}
8 changes: 6 additions & 2 deletions terraform/environment/cloudwatch_alarms.tf
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,6 @@ resource "aws_cloudwatch_metric_alarm" "viewer_ddos_attack_external" {
}

resource "aws_cloudwatch_metric_alarm" "admin_ddos_attack_external" {
count = local.environment.build_admin ? 1 : 0
alarm_name = "${local.environment_name}_AdminDDoSDetected"
comparison_operator = "GreaterThanThreshold"
evaluation_periods = "3"
Expand All @@ -121,6 +120,11 @@ resource "aws_cloudwatch_metric_alarm" "admin_ddos_attack_external" {
treat_missing_data = "notBreaching"
alarm_actions = [aws_sns_topic.cloudwatch_to_pagerduty.arn]
dimensions = {
ResourceArn = aws_lb.admin[0].arn
ResourceArn = aws_lb.admin.arn
}
}

moved {
from = aws_cloudwatch_metric_alarm.admin_ddos_attack_external[0]
to = aws_cloudwatch_metric_alarm.admin_ddos_attack_external
}
10 changes: 7 additions & 3 deletions terraform/environment/cognito_client.tf
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ locals {
}

resource "aws_cognito_user_pool_client" "use_a_lasting_power_of_attorney_admin" {
count = local.environment.build_admin ? 1 : 0
provider = aws.identity
name = "${local.environment_name}-admin-auth"
user_pool_id = local.admin_cognito_user_pool_id
Expand Down Expand Up @@ -42,6 +41,11 @@ resource "aws_cognito_user_pool_client" "use_a_lasting_power_of_attorney_admin"
read_attributes = []
write_attributes = []

callback_urls = ["https://${aws_route53_record.admin_use_my_lpa[0].fqdn}/oauth2/idpresponse"]
logout_urls = ["https://${aws_route53_record.admin_use_my_lpa[0].fqdn}/"]
callback_urls = ["https://${aws_route53_record.admin_use_my_lpa.fqdn}/oauth2/idpresponse"]
logout_urls = ["https://${aws_route53_record.admin_use_my_lpa.fqdn}/"]
}

moved {
from = aws_cognito_user_pool_client.use_a_lasting_power_of_attorney_admin[0]
to = aws_cognito_user_pool_client.use_a_lasting_power_of_attorney_admin
}
2 changes: 1 addition & 1 deletion terraform/environment/config_file.tf
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ locals {
stats_table = aws_dynamodb_table.stats_table.name
actor_fqdn = aws_route53_record.actor-use-my-lpa.fqdn
viewer_fqdn = aws_route53_record.viewer-use-my-lpa.fqdn
admin_fqdn = local.environment.build_admin ? aws_route53_record.admin_use_my_lpa[0].fqdn : ""
admin_fqdn = aws_route53_record.admin_use_my_lpa.fqdn
public_facing_use_fqdn = aws_route53_record.public_facing_use_lasting_power_of_attorney.fqdn
public_facing_view_fqdn = aws_route53_record.public_facing_view_lasting_power_of_attorney.fqdn
viewer_load_balancer_security_group_name = aws_security_group.viewer_loadbalancer.name
Expand Down
10 changes: 7 additions & 3 deletions terraform/environment/dns.tf
Original file line number Diff line number Diff line change
Expand Up @@ -107,19 +107,23 @@ resource "aws_route53_record" "actor-use-my-lpa" {

resource "aws_route53_record" "admin_use_my_lpa" {
# admin.lastingpowerofattorney.opg.service.justice.gov.uk
count = local.environment.build_admin ? 1 : 0
provider = aws.management
zone_id = data.aws_route53_zone.opg_service_justice_gov_uk.zone_id
name = "${local.dns_namespace_env}admin.lastingpowerofattorney"
type = "A"

alias {
evaluate_target_health = false
name = aws_lb.admin[0].dns_name
zone_id = aws_lb.admin[0].zone_id
name = aws_lb.admin.dns_name
zone_id = aws_lb.admin.zone_id
}

lifecycle {
create_before_destroy = true
}
}

moved {
from = aws_route53_record.admin_use_my_lpa[0]
to = aws_route53_record.admin_use_my_lpa
}
Loading