Search code examples
amazon-web-servicesdockeramazon-ecsaws-vpc

AWS ECS is not linking my containers


i'm migrating part of the infrastructure of the company i work for to Amazon ECS and i'm stuck on trying to make my server container connect to the database container.

Heres how i set it up on my task:

    {
  "requiresAttributes": [
    {
      "value": null,
      "name": "com.amazonaws.ecs.capability.docker-remote-api.1.17",
      "targetId": null,
      "targetType": null
    },
    {
      "value": null,
      "name": "com.amazonaws.ecs.capability.logging-driver.syslog",
      "targetId": null,
      "targetType": null
    },
    {
      "value": null,
      "name": "com.amazonaws.ecs.capability.docker-remote-api.1.18",
      "targetId": null,
      "targetType": null
    },
    {
      "value": null,
      "name": "com.amazonaws.ecs.capability.ecr-auth",
      "targetId": null,
      "targetType": null
    }
  ],
  "taskDefinitionArn": "arn:aws:ecs:us-east-1:9621234232917455:task-definition/ecv-server:12",
  "networkMode": "bridge",
  "status": "ACTIVE",
  "revision": 12,
  "taskRoleArn": null,
  "containerDefinitions": [
    {
      "volumesFrom": [],
      "memory": 500,
      "extraHosts": null,
      "dnsServers": [],
      "disableNetworking": null,
      "dnsSearchDomains": null,
      "portMappings": [],
      "hostname": "db",
      "essential": true,
      "entryPoint": null,
      "mountPoints": [
        {
          "containerPath": "/var/lib/postgresql/data",
          "sourceVolume": "dbdata",
          "readOnly": null
        }
      ],
      "name": "db",
      "ulimits": null,
      "dockerSecurityOptions": null,
      "environment": [
        {
          "name": "POSTGRES_PASSWORD",
          "value": "jmbrito"
        },
        {
          "name": "POSTGRES_USER",
          "value": "jmbrito"
        }
      ],
      "links": [],
      "workingDirectory": null,
      "readonlyRootFilesystem": null,
      "image": "postgres",
      "command": null,
      "user": null,
      "dockerLabels": null,
      "logConfiguration": {
        "logDriver": "syslog",
        "options": null
      },
      "cpu": 0,
      "privileged": null,
      "memoryReservation": null
    },
    {
      "volumesFrom": [],
      "memory": 400,
      "extraHosts": null,
      "dnsServers": [],
      "disableNetworking": null,
      "dnsSearchDomains": null,
      "portMappings": [],
      "hostname": "redis",
      "essential": true,
      "entryPoint": null,
      "mountPoints": [
        {
          "containerPath": "/data",
          "sourceVolume": "redisdata",
          "readOnly": null
        }
      ],
      "name": "redis",
      "ulimits": null,
      "dockerSecurityOptions": null,
      "environment": [],
      "links": null,
      "workingDirectory": null,
      "readonlyRootFilesystem": null,
      "image": "redis:3.2-alpine",
      "command": [
        "redis-server"
      ],
      "user": null,
      "dockerLabels": null,
      "logConfiguration": {
        "logDriver": "syslog",
        "options": null
      },
      "cpu": 0,
      "privileged": null,
      "memoryReservation": null
    },
    {
      "volumesFrom": [],
      "memory": 600,
      "extraHosts": null,
      "dnsServers": null,
      "disableNetworking": null,
      "dnsSearchDomains": null,
      "portMappings": [
        {
          "hostPort": 80,
          "containerPort": 3000,
          "protocol": "tcp"
        }
      ],
      "hostname": null,
      "essential": true,
      "entryPoint": [],
      "mountPoints": [],
      "name": "server",
      "ulimits": null,
      "dockerSecurityOptions": null,
      "environment": [
        {
          "name": "RAILS_ENV",
          "value": "production"
        }
      ],
      "links": [
        "db:db",
        "redis:redis"
      ],
      "workingDirectory": "/usr/src/app",
      "readonlyRootFilesystem": null,
      "image": "MY DOCKER LINK IN ECR",
      "command": [
        "sh",
        "deploy/init.sh"
      ],
      "user": null,
      "dockerLabels": null,
      "logConfiguration": {
        "logDriver": "syslog",
        "options": null
      },
      "cpu": 0,
      "privileged": null,
      "memoryReservation": null
    }
  ],
  "placementConstraints": [],
  "volumes": [
    {
      "host": {
        "sourcePath": null
      },
      "name": "dbdata"
    },
    {
      "host": {
        "sourcePath": null
      },
      "name": "redisdata"
    }
  ],
  "family": "ecv-server"
}

As you can see i set my links field correctly and when i try to connect using the name like connecting to the host db or the host redis it doesnt find it.

I tried using the VPC addresses to connect to the other containers and it worked. The only problem is that to do it i prefered to set myself the addresses in the VPC (like setting 172.13.0.2 as db) because when i dont set, the system gets the address by order of connection.

I hope you understood the question.

Thank you.


Solution

  • I don't think this task definition will work the way you want it to. When you put three containers in one task definition it is telling ECS to always deploy these three containers together on the same machine, each time it deploys the task.

    So if you deploy a service that has a desired count of three for this task what you will get is three app containers, three postgres containers and three redis containers. The three application containers will have three isolated data persistence stacks. Application container A will only talk to postgres A and redis A, while application container B will only talk to postgress B and redis B, so each application container would have inconsistent data that wasn't replicated to each other.

    Running multiple containers in a task definition is really only intended for sidecar containers such as a reverse proxy, or temporary cache, or similar.

    For the persistence layer my recommendation here would be to use Amazon RDS for your postgres, and Amazon Elasticache for your redis. This way all your tasks can share the same postgres, and the same redis, and you will also have a lot less administration to worry about by using these Amazon services.