So I'm trying to fire up a publicly hosted docker container into my ECS via terraform. The plan completes without any errors, and launches the service but the ECS instances are empty, and the service only complains that there are not ECS instances assigned to the cluster.
As part of my infrastructure I have defined:
Given that everything else launches and I can ssh to the ecs (and even manually pull and run the docker image) I'm pretty sure all the networking is ok, which makes me think it might be the connections between ECS services and ALB?
ECS service:
locals {
application_name = "${var.environment}-kafka"
}
resource "aws_ecs_service" "kafka" {
name = "${local.application_name}"
iam_role = "${var.iam-role_arn}"
cluster = "${var.ecs-cluster_id}"
task_definition = "${aws_ecs_task_definition.kafka_definition.arn}"
desired_count = "${var.count_kafka}"
deployment_minimum_healthy_percent = "${var.deployment_min_healthy_percent}"
deployment_maximum_percent = "${var.deployment_max_percent}"
depends_on = ["aws_alb_listener.kafka-alb-listener"]
load_balancer {
target_group_arn = "${aws_alb_target_group.kafka-alb-target-group.arn}"
container_port = 80
container_name = "kafka"
}
lifecycle {
create_before_destroy = true
}
}
resource "aws_ecs_task_definition" "kafka_definition" {
family = "${var.environment}_kafka"
container_definitions = "${data.template_file.kafka_task.rendered}"
lifecycle {
create_before_destroy = true
}
}
data "template_file" "kafka_task" {
template= "${file("${path.module}/kafka_task_definition.tpl")}"
vars {
kafka_docker_image = "${var.kafka-docker-image_name}:${var.kafka-docker-image_tag}"
}
}
ALB:
resource "aws_alb" "kakfa-alb" {
name = "${local.application_name}-alb"
security_groups = ["${var.security_groups_ids}"]
subnets = ["${var.public_subnet_ids}"]
tags {
Name = "${local.application_name}-alb"
Environment = "${var.environment}"
}
}
resource "aws_alb_target_group" "kafka-alb-target-group" {
name = "${aws_alb.kakfa-alb.name}-target-group"
port = "5000"
protocol = "HTTP"
vpc_id = "${var.vpc_id}"
depends_on = ["aws_alb.kakfa-alb"]
health_check {
healthy_threshold = "5"
unhealthy_threshold = "2"
interval = "30"
matcher = "200"
path = "/"
port = "traffic-port"
protocol = "HTTP"
timeout = "5"
}
tags {
Name = "${aws_alb.kakfa-alb.name}-target-group"
Environment = "${var.environment}"
}
}
resource "aws_alb_listener" "kafka-alb-listener" {
load_balancer_arn = "${aws_alb.kakfa-alb.arn}"
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = "${aws_alb_target_group.kafka-alb-target-group.arn}"
type = "forward"
}
}
You probably haven't added the instances to the cluster.
user_data.sh:
#!/bin/bash
echo ECS_CLUSTER='${ecs_cluster_name}' > /etc/ecs/ecs.config
In your terraform config:
data "template_file" "user_data" {
template = "${file("user_data.sh")}"
vars {
ecs_cluster_name = "${var.ecs_cluster_name}"
}
}
resource "aws_launch_configuration" "current" {
...
user_data = "${data.template_file.user_data.rendered}"
...
}