Search code examples
ruby-on-railsamazon-web-servicesdockeramazon-ecsaws-fargate

"This site can’t be reached" on AWS ECS Fargate deployeed webpage with public ip


I'm trying to deploy ruby on rails application(port 3000) builed by docker + RDS (mysql, port 3306) by ECS fargate. I checked my database and ruby on rails application on cloudwatch, and it's build was successfull. (Checked that the server is running and the database is initialized)

However, even if the build ended successfully, it was impossible to access my ruby on rails application with public ip. Only message returns on browser -> This site can’t be reached .. OR .. took too long to respond. What's wrong with my code below? I'm waiting for help from AWS experts.

main.tf


provider "aws" {
  region = "ap-northeast-1"

  default_tags {
    tags = {
      Owner     = "[email protected]"
      Terraform = "true"
    }
  }
}

# VPC
resource "aws_vpc" "main" {
  cidr_block = "10.0.0.0/16"
  instance_tenancy     = "default"
  enable_dns_hostnames = true
  enable_dns_support   = true
  tags = {
    Name = "app"
  }
}

# Subnet
resource "aws_subnet" "public_1a" {
  vpc_id            = aws_vpc.main.id
  availability_zone = "ap-northeast-1a"
  cidr_block        = "10.0.1.0/24"
  map_public_ip_on_launch = true
  tags = {
    Name = "app-public-1a"
  }
}

resource "aws_subnet" "public_1c" {
  vpc_id            = aws_vpc.main.id
  availability_zone = "ap-northeast-1c"
  cidr_block        = "10.0.2.0/24"
  map_public_ip_on_launch = true
  tags = {
    Name = "app-public-1c"
  }
}

resource "aws_subnet" "public_1d" {
  vpc_id            = aws_vpc.main.id
  availability_zone = "ap-northeast-1d"
  cidr_block        = "10.0.3.0/24"
  map_public_ip_on_launch = true
  tags = {
    Name = "app-public-1d"
  }
}

resource "aws_subnet" "private_1a" {
  vpc_id            = aws_vpc.main.id
  availability_zone = "ap-northeast-1a"
  cidr_block        = "10.0.10.0/24"

  tags = {
    Name = "app-public-1a"
  }
}

resource "aws_subnet" "private_1c" {
  vpc_id            = aws_vpc.main.id
  availability_zone = "ap-northeast-1c"
  cidr_block        = "10.0.20.0/24"

  tags = {
    Name = "app-private-1c"
  }
}

resource "aws_subnet" "private_1d" {
  vpc_id            = aws_vpc.main.id
  availability_zone = "ap-northeast-1d"
  cidr_block        = "10.0.30.0/24"

  tags = {
    Name = "app-private-1d"
  }
}

# Internet Gateway
resource "aws_internet_gateway" "main" {
  vpc_id = aws_vpc.main.id

  tags = {
    Name = "app"
  }
}

# Elastic IP
resource "aws_eip" "nat_1a" {
  vpc = true

  tags = {
    Name = "app-natgw-1a"
  }
}

# NAT Gateway
resource "aws_nat_gateway" "nat_1a" {
  subnet_id     = aws_subnet.public_1a.id
  allocation_id = aws_eip.nat_1a.id

  tags = {
    Name = "app-1a"
  }
}

resource "aws_eip" "nat_1c" {
  vpc = true

  tags = {
    Name = "app-natgw-1c"
  }
}

resource "aws_nat_gateway" "nat_1c" {
  subnet_id     = aws_subnet.public_1c.id
  allocation_id = aws_eip.nat_1c.id

  tags = {
    Name = "app-1c"
  }
}

resource "aws_eip" "nat_1d" {
  vpc = true

  tags = {
    Name = "app-natgw-1d"
  }
}

resource "aws_nat_gateway" "nat_1d" {
  subnet_id     = aws_subnet.public_1d.id
  allocation_id = aws_eip.nat_1d.id

  tags = {
    Name = "app-1d"
  }
}

# Route Table
resource "aws_route_table" "public" {
  vpc_id = aws_vpc.main.id

  tags = {
    Name = "app-public"
  }

  route {
    cidr_block = "0.0.0.0/0"
    gateway_id = aws_internet_gateway.main.id
  }
}

# Route
resource "aws_route" "public" {
  destination_cidr_block = "0.0.0.0/0"
  route_table_id         = aws_route_table.public.id
  gateway_id             = aws_internet_gateway.main.id
}

# Association
resource "aws_route_table_association" "public_1a" {
  subnet_id      = aws_subnet.public_1a.id
  route_table_id = aws_route_table.public.id
}

resource "aws_route_table_association" "public_1c" {
  subnet_id      = aws_subnet.public_1c.id
  route_table_id = aws_route_table.public.id
}

resource "aws_route_table_association" "public_1d" {
  subnet_id      = aws_subnet.public_1d.id
  route_table_id = aws_route_table.public.id
}

# Route Table (Private)
resource "aws_route_table" "private_1a" {
  vpc_id = aws_vpc.main.id

  tags = {
    Name = "app-private-1a"
  }
}

resource "aws_route_table" "private_1c" {
  vpc_id = aws_vpc.main.id

  tags = {
    Name = "app-private-1c"
  }
}

resource "aws_route_table" "private_1d" {
  vpc_id = aws_vpc.main.id

  tags = {
    Name = "app-private-1d"
  }
}

# Route (Private)
resource "aws_route" "private_1a" {
  destination_cidr_block = "0.0.0.0/0"
  route_table_id         = aws_route_table.private_1a.id
  nat_gateway_id         = aws_nat_gateway.nat_1a.id
}

resource "aws_route" "private_1c" {
  destination_cidr_block = "0.0.0.0/0"
  route_table_id         = aws_route_table.private_1c.id
  nat_gateway_id         = aws_nat_gateway.nat_1c.id
}

resource "aws_route" "private_1d" {
  destination_cidr_block = "0.0.0.0/0"
  route_table_id         = aws_route_table.private_1d.id
  nat_gateway_id         = aws_nat_gateway.nat_1d.id
}

# Association (Private)
resource "aws_route_table_association" "private_1a" {
  subnet_id      = aws_subnet.private_1a.id
  route_table_id = aws_route_table.private_1a.id
}

resource "aws_route_table_association" "private_1c" {
  subnet_id      = aws_subnet.private_1c.id
  route_table_id = aws_route_table.private_1c.id
}

resource "aws_route_table_association" "private_1d" {
  subnet_id      = aws_subnet.private_1d.id
  route_table_id = aws_route_table.private_1d.id
}

# SecurityGroup
resource "aws_security_group" "app-app" {
  name        = "app-app"
  description = "app-app"
  vpc_id      = aws_vpc.main.id

  ingress {
    from_port   = 80
    to_port     = 80
    protocol    = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
  }

  egress {
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }

  tags = {
    Name = "app-app"
  }
}

# SecurityGroup
resource "aws_security_group" "app-http" {
  name        = "app-http"
  description = "aws_security_group"
  vpc_id      = aws_vpc.main.id

  ingress {
    from_port   = 80
    to_port     = 80
    protocol    = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
  }

  egress {
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }

  tags = {
    Name = "app-http"
  }
}

# SecurityGroup
resource "aws_security_group" "app-db" {
  name        = "app-db"
  description = "aws_security_group"
  vpc_id      = aws_vpc.main.id

  ingress {
    from_port   = 3306
    to_port     = 3306
    protocol    = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
  }

  egress {
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }

  tags = {
    Name = "app-db"
  }
}

resource "aws_ecs_task_definition" "main" {
  family                   = "app"
  network_mode             = "awsvpc"
  cpu                      = "1024"
  memory                   = "2048"

  requires_compatibilities = ["FARGATE"]
  execution_role_arn       = aws_iam_role.main.arn

  runtime_platform {
    operating_system_family = "LINUX"
    cpu_architecture        = "X86_64"
  }

  container_definitions = jsonencode([
    {
      "name": "web",
      "image": RUBY ON RAILS IMGAE ECR URL IS HERE..,
      "portMappings": [
        {
          "containerPort": 3000,
          "hostPort": 3000,
          "protocol": "tcp"
        }
      ],
      "essential": true,
      "command": [
        "bash",
        "-c",
        "rm -f tmp/pids/server.pid && bundle exec rails s -p 3000 -b '0.0.0.0'"
      ],
      "network_configuration": {
        "awsvpc_configuration": {
          "subnets": [aws_subnet.public_1a.id, aws_subnet.public_1c.id, aws_subnet.public_1d.id],
          "security_groups": [aws_security_group.app-app.id]
        }
      },
      "logConfiguration": {
        "logDriver": "awslogs",
        "options": {
          "awslogs-group": "app",
          "awslogs-region": "ap-northeast-1",
          "awslogs-stream-prefix": "ecs"
        }
      },
      "environment": [
        {
          "name": "RAILS_RELATIVE_URL_ROOT",
          "value": "/"
        }
      ]
    }
  ])
}



# ECS Cluster
resource "aws_ecs_cluster" "main" {
  name = "app"
}

# ECS Service
resource "aws_ecs_service" "main" {
  name            = "app"
  launch_type     = "FARGATE"
  desired_count   = 1
  cluster         = aws_ecs_cluster.main.name
  task_definition = aws_ecs_task_definition.main.arn
  health_check_grace_period_seconds = 90

  deployment_controller {
    type = "ECS"
  }
  deployment_maximum_percent = 200
  deployment_minimum_healthy_percent = 100

  lifecycle {
    ignore_changes = [desired_count, task_definition]
  }

  load_balancer {
    target_group_arn = aws_alb_target_group.main.arn
    container_name   = "app-app"
    container_port   = 3000
  }

  network_configuration {
    subnets         = [aws_subnet.public_1a.id, aws_subnet.public_1c.id, aws_subnet.public_1d.id]
    security_groups = [
      aws_security_group.app-app.id
    ]
    assign_public_ip = true
  }



}

# ECR
resource "aws_ecr_repository" "main" {
  name                 = "app"
  image_tag_mutability = "MUTABLE"

  image_scanning_configuration {
    scan_on_push = true
  }
}

# IAM ROLE
resource "aws_iam_role" "main" {
  name = "app"
  assume_role_policy = data.aws_iam_policy_document.role_policy.json

  inline_policy {
    name   = "EcsTaskExecutionPolicy"
    policy = data.aws_iam_policy_document.ecs_task_policy.json
  }
}



data "aws_iam_policy_document" "ecs_task_policy" {
  statement {
    sid = "EcsTaskPolicy"

    actions = [
      "ecr:BatchCheckLayerAvailability",
      "ecr:GetDownloadUrlForLayer",
      "ecr:BatchGetImage"
    ]

    resources = [
      "*"
    ]
  }
  statement {

    actions = [
      "ecr:GetAuthorizationToken"
    ]

    resources = [
      "*"
    ]
  }

  statement {

    actions = [
      "logs:CreateLogGroup",
      "logs:CreateLogStream",
      "logs:PutLogEvents"
    ]

    resources = [
      "*"
    ]
  }
}

data "aws_iam_policy_document" "role_policy" {
  statement {
    actions = ["sts:AssumeRole"]

    principals {
      type        = "Service"
      identifiers = ["ecs-tasks.amazonaws.com"]
    }
  }
}



 # ALB
 resource "aws_alb" "main" {
   name               = "app"
   internal           = false
   enable_deletion_protection = false
   security_groups = [
     aws_security_group.app-http.id
   ]
   subnets         = [aws_subnet.public_1a.id, aws_subnet.public_1c.id, aws_subnet.public_1d.id]
 }

 # Listener
 resource "aws_alb_listener" "main" {
   port     = "80"
   protocol = "HTTP"

   load_balancer_arn = aws_alb.main.arn

   default_action {
     type             = "forward"
     target_group_arn = aws_alb_target_group.main.arn

   }
 }

 resource "aws_alb_target_group" "main" {
   name        = "app-targetgroup2"
   port        = 3000
   protocol    = "HTTP"
     depends_on           = [aws_alb.main]
   target_type          = "ip"
   vpc_id = aws_vpc.main.id
   deregistration_delay = 15


  health_check {
    interval            = 120
    path                = "/"
    port                = "3000"
    protocol            = "HTTP"
    timeout             = 110
    unhealthy_threshold = 2
    matcher             = 200
  }

   lifecycle {
     create_before_destroy = true
   }
 }


# ALB Listener Rule
resource "aws_alb_listener_rule" "main" {
  listener_arn = aws_alb_listener.main.arn
  priority     = 100
  depends_on = [aws_alb_target_group.main]

  condition {
    path_pattern {
      values = ["/"]
    }
  }

  action {
    type             = "forward"
    target_group_arn = aws_alb_target_group.main.arn
  }
}
*/

resource "aws_cloudwatch_log_group" "name" {
  name = "app"
  retention_in_days = 7
}

resource "aws_db_subnet_group" "main" {
  name        = "app-db-subnet"
  description = "app-db-subnet"
  subnet_ids  = [
    aws_subnet.public_1a.id,
    aws_subnet.public_1c.id,
    aws_subnet.public_1d.id
  ]

  tags = {
    Name = "app-db-subnet-group"
  }
}

resource "aws_db_instance" "main" {
  identifier          = "app-db"
  engine              = "mysql"
  engine_version      = "5.7"
  instance_class      = "db.t2.micro"
  allocated_storage   = 20
  storage_type        = "gp2"
  db_name             = "app_rails_production"
  username            = "root"
  password            = "password"
  port                = 3306
  multi_az            = true
  skip_final_snapshot = true

  vpc_security_group_ids = [aws_security_group.app-db.id]
  db_subnet_group_name   = aws_db_subnet_group.main.name
}


docker-compose.yml(in ruby on rails) (there was no problem with build in ecs)


version: '3'
services:
  web:
    build: .
    command: /bin/sh -c "rm -f tmp/pids/server.pid &&
                     bundle exec rails s -p 3000 -b '0.0.0.0'"
    ports:
      - "3000:3000"
    platform: linux/x86_64
  db:
    image: mysql
    ports:
      - 3306:3306
    platform: linux/x86_64
    volumes:
      - db-data:/var/lib/mysql

volumes:
  db-data:
    driver: local

database.yml (in ruby on rails) (there was no problem with build in ecs)

default: &default
  adapter: mysql2
  encoding: utf8
  username: root
  password: password
  port: 3306
  pool: 5

development:
  <<: *default
  database: development

test:
  <<: *default
  database: test

production:
  <<: *default
  database: production

DOCKERFILE (in ruby on rails) (there was no problem with build in ecs)

FROM --platform=linux/amd64 ubuntu
FROM ruby:2.6.6

ENV LANG="C.UTF-8"

WORKDIR /usr/src/app

RUN set -eu \
  && curl -sSL https://deb.nodesource.com/setup_16.x | bash - \
  && apt-get update \
  && apt-get install -y \
    default-mysql-client \
    ftp \
    vim \
    shared-mime-info \
    libjemalloc-dev \
    libjemalloc2 \
    libappindicator1 \
    fonts-liberation \
    libappindicator3-1 \
    libasound2 \
    libnspr4 \
    libnss3 \
    libxss1 \
    libu2f-udev \
    libvulkan1 \
    xdg-utils \
    libgbm1 \
    nodejs \
    yarn \
    cmake \
  && apt-get clean \
  && rm -rf /var/lib/apt/lists/* \
  && ln -nfs /usr/lib/$(uname -m)-linux-gnu/libjemalloc.so.2 /usr/local/lib/libjemalloc.so.2

COPY Gemfile Gemfile.lock ./
RUN gem update --system 3.2.3
RUN gem install bundler -v 2.4.22
RUN gem install nokogiri --platform=ruby
RUN gem update --system 3.2.3
RUN gem install mysql2
RUN bundle config set force_ruby_platform true
RUN bundle install
RUN bundle update

ARG BUNDLE_ENTERPRISE__CONTRIBSYS__COM
ARG BUNDLE_GITHUB__COM


COPY . .

RUN mkdir -p \
  tmp/cache \
  tmp/sockets \
  tmp/pids \
  tmp/cache/assets
CMD /bin/sh -c "rm -f tmp/pids/server.pid && rm -f tmp/mysql.sock && bundle exec rails s -p 3000 -b '0.0.0.0'"

What I've tried:

remove health_check -> Task failed ELB health checks in (target-group .....) ERROR remove load_balance settings -> Scaling activity initiated by ... ERROR remove private subnets -> nothing changed

I feel like there's something wrong with the routes, security group or port settings. Any advice is welcome. Thanks.


Solution

  • The security group named app-app only allows inbound traffic on port 80, but the ECS task listens on port 3000. You need to change that security group to allow inbound traffic on port 3000 instead of port 80.

    After you fix that, the load balancer should be able to get to the ECS container, and hopefully the health checks will start passing. If the health checks start passing, then you should be able to go to the load balancer's DNS name in your browser to get to your application.

    Also, after you fix that security group issue, you should be able to go directly to the application in your web browser, at the Fargate task's public IP address, like: http://public-ip:3000. Notice that if you go directly to the task's IP address like that, instead of going through the load balancer, you will need to append the container port :3000 to the address.


    You also have an extra aws_alb_listener_rule resource that does the same thing that the default listener rule does, so it can just be deleted.

    And you have the route to the Internet Gateway defined once in the route block of the aws_route_table" "public" resource, and again in the separate "aws_route" "public" resource. That's not only redundant, but it can cause issues defining the same thing in two resources like that. Since Terraform is managing the aws_route_table resource, you should delete the redundant aws_route resource.

    NOTE: If you delete the redundant resources, I would run terraform apply a few times in a row until it says there are no changes found.