I want to deploy ECS service that runs containers on EC2 machine(s),
Some factoids about my project:
Now, the outcome is that
The very annoying is that this message doesn't tell me anything about the real problem.
Can anybody point me, what did I miss? What made I wrong?
Anything, that I have to supply to understand my situation?
Here is my Terraform sample
resource "aws_vpc" "vpc" {
cidr_block = "172.30.0.0/16"
}
resource "aws_internet_gateway" "internet_gateway" {
vpc_id = aws_vpc.vpc.id
}
resource "aws_subnet" "pub_subnet" {
vpc_id = aws_vpc.vpc.id
cidr_block = "172.30.1.0/24"
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.internet_gateway.id
}
}
resource "aws_route_table_association" "route_table_association" {
subnet_id = aws_subnet.pub_subnet.id
route_table_id = aws_route_table.public.id
}
resource "aws_security_group" "ecs_sg" {
vpc_id = aws_vpc.vpc.id
ingress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
data "aws_iam_policy_document" "ecs_agent" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
resource "aws_iam_role" "ecs_agent" {
name = "ecs-agent"
assume_role_policy = data.aws_iam_policy_document.ecs_agent.json
}
resource "aws_iam_role_policy_attachment" "ecs_agent" {
role = aws_iam_role.ecs_agent.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
resource "aws_iam_instance_profile" "ecs_agent" {
name = "ecs-agent"
role = aws_iam_role.ecs_agent.name
}
resource "aws_vpc_endpoint" "vpc_endpoints" {
for_each = toset(["com.amazonaws.us-east-1.logs", "com.amazonaws.us-east-1.ecr.dkr", "com.amazonaws.us-east-1.ecr.api", "com.amazonaws.us-east-1.s3", "com.amazonaws.us-east-1.ecs", "com.amazonaws.us-east-1.ecs-agent", "com.amazonaws.us-east-1.ecs-telemetry"])
vpc_id = aws_vpc.vpc.id
vpc_endpoint_type = "Interface"
service_name = each.key
security_group_ids = [aws_security_group.ecs_sg.id]
}
resource "aws_launch_template" "engine" {
name = "alma"
image_id = "ami-05339d597592d45cf" # Amazon ECS-Optimized Amazon Linux 2 (AL2) x86_64 AMI
instance_type = "t2.large"
user_data = base64encode("#!/bin/bash\necho ECS_CLUSTER=my-cluster >> /etc/ecs/ecs.config")
vpc_security_group_ids = [aws_security_group.ecs_sg.id]
iam_instance_profile {
name = aws_iam_instance_profile.ecs_agent.name
}
}
resource "aws_autoscaling_group" "failure_analysis_ecs_asg" {
name = "asg"
vpc_zone_identifier = [aws_subnet.pub_subnet.id]
desired_capacity = 1
min_size = 1
max_size = 10
health_check_grace_period = 300
health_check_type = "EC2"
launch_template {
id = aws_launch_template.engine.id
}
}
resource "aws_ecs_capacity_provider" "provider" {
name = "alma"
auto_scaling_group_provider {
auto_scaling_group_arn = aws_autoscaling_group.failure_analysis_ecs_asg.arn
managed_scaling {
status = "ENABLED"
target_capacity = 100
minimum_scaling_step_size = 1
maximum_scaling_step_size = 100
}
}
}
resource "aws_ecs_cluster_capacity_providers" "providers" {
cluster_name = aws_ecs_cluster.ecs_cluster.name
capacity_providers = [aws_ecs_capacity_provider.provider.name]
}
resource "aws_ecs_cluster" "ecs_cluster" {
name = "my-cluster"
}
resource "aws_ecs_task_definition" "task_definition" {
family = "worker"
container_definitions = jsonencode([
{
essential = true
memory = 512
name = "worker"
cpu = 1
image = "public.ecr.aws/docker/library/mongo:5.0.10-focal"
environment = []
}
])
}
resource "aws_ecs_service" "worker" {
name = "worker"
cluster = aws_ecs_cluster.ecs_cluster.id
task_definition = aws_ecs_task_definition.task_definition.arn
desired_count = 1
}
Your setup is quite confusing. You are created a public subnet, which does not work (no public IP addresses), and then you have network interfaces for what reason is unclear. Anyway, I modified your code to actually work with a correctly setup public subnet:
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 4.0"
}
}
}
provider "aws" {
profile = "acc2"
#region = "ap-southeast-2"
region = "us-east-1"
}
resource "aws_vpc" "vpc" {
cidr_block = "172.30.0.0/16"
enable_dns_support = true
enable_dns_hostnames = true
}
resource "aws_internet_gateway" "internet_gateway" {
vpc_id = aws_vpc.vpc.id
}
resource "aws_subnet" "pub_subnet" {
vpc_id = aws_vpc.vpc.id
cidr_block = "172.30.1.0/24"
map_public_ip_on_launch = true
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.internet_gateway.id
}
}
resource "aws_route_table_association" "route_table_association" {
subnet_id = aws_subnet.pub_subnet.id
route_table_id = aws_route_table.public.id
}
resource "aws_security_group" "ecs_sg" {
vpc_id = aws_vpc.vpc.id
ingress {
from_port = 0
to_port = 0
protocol = "-1"
self = true
}
ingress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
data "aws_iam_policy_document" "ecs_agent" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
resource "aws_iam_role" "ecs_agent" {
name = "ecs-agent"
assume_role_policy = data.aws_iam_policy_document.ecs_agent.json
}
resource "aws_iam_role_policy_attachment" "ecs_agent" {
role = aws_iam_role.ecs_agent.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role"
}
resource "aws_iam_instance_profile" "ecs_agent" {
name = "ecs-agent"
role = aws_iam_role.ecs_agent.name
}
# "com.amazonaws.us-east-1.s3",
# resource "aws_vpc_endpoint" "vpc_endpoints" {
# for_each = toset(["com.amazonaws.us-east-1.logs", "com.amazonaws.us-east-1.ecr.dkr", "com.amazonaws.us-east-1.ecr.api", "com.amazonaws.us-east-1.ecs", "com.amazonaws.us-east-1.ecs-agent", "com.amazonaws.us-east-1.ecs-telemetry"])
# vpc_id = aws_vpc.vpc.id
# vpc_endpoint_type = "Interface"
# service_name = each.key
# security_group_ids = [aws_security_group.ecs_sg.id]
# private_dns_enabled = true
# }
resource "aws_launch_template" "engine" {
name = "alma"
image_id = "ami-05339d597592d45cf" # Amazon ECS-Optimized Amazon Linux 2 (AL2) x86_64 AMI
instance_type = "t2.large"
user_data = base64encode("#!/bin/bash\necho ECS_CLUSTER=my-cluster >> /etc/ecs/ecs.config")
vpc_security_group_ids = [aws_security_group.ecs_sg.id]
iam_instance_profile {
name = aws_iam_instance_profile.ecs_agent.name
}
}
resource "aws_autoscaling_group" "failure_analysis_ecs_asg" {
name = "asg"
vpc_zone_identifier = [aws_subnet.pub_subnet.id]
desired_capacity = 1
min_size = 1
max_size = 10
health_check_grace_period = 300
health_check_type = "EC2"
launch_template {
id = aws_launch_template.engine.id
}
}
resource "aws_ecs_capacity_provider" "provider" {
name = "alma"
auto_scaling_group_provider {
auto_scaling_group_arn = aws_autoscaling_group.failure_analysis_ecs_asg.arn
managed_scaling {
status = "ENABLED"
target_capacity = 100
minimum_scaling_step_size = 1
maximum_scaling_step_size = 100
}
}
}
resource "aws_ecs_cluster_capacity_providers" "providers" {
cluster_name = aws_ecs_cluster.ecs_cluster.name
capacity_providers = [aws_ecs_capacity_provider.provider.name]
}
resource "aws_ecs_cluster" "ecs_cluster" {
name = "my-cluster"
}
resource "aws_ecs_task_definition" "task_definition" {
family = "worker"
container_definitions = jsonencode([
{
essential = true
memory = 512
name = "worker"
cpu = 1
image = "public.ecr.aws/docker/library/mongo:5.0.10-focal"
environment = []
}
])
}
resource "aws_ecs_service" "worker" {
name = "worker"
cluster = aws_ecs_cluster.ecs_cluster.id
task_definition = aws_ecs_task_definition.task_definition.arn
desired_count = 1
}