Search code examples
pythondjangodockerdocker-composecelery

Not specifying volumes within service docker-compose.yml


Docker version 20.10.17, build 100c701

I have a vite, vue 3 frontend and django backend with postgres database, celery and redis.

When I do not include volumes for api and celery service docker compose up is successful.

If I add volumes to api and celery (./api:/api)

  api:
    build: 
      context: ./backend/backend
      dockerfile: Dockerfile
    ports:
      - "8000:8000"
    command:  >
      sh -c   "python3 manage.py makemigrations &&
                python3 manage.py migrate &&
                python3 manage.py wait_for_db &&
                python3 manage.py runserver 0.0.0.0:8000"
    volumes:
      - ./api:/api/
    environment:
      - DB_HOST=db
      - DB_NAME=${DATABASE_NAME}
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db

  celery:
    restart: always
    build:
      context: ./backend/backend
    command: celery -A backend worker -l info
    volumes:
      - ./api:/api/
    environment:
      - DB_HOST=db
      - DB_NAME=api
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db
      - redis
      - api

then I get the following error:

Error: Invalid value for '-A' / '--app': Unable to load celery application. The module backend was not found.

which tells me that the path for the volume is not correct - though I am not sure what I should set it as. Is there any harm in not specifying volumes for these services?

folder structure

.
├── backend
│   ├── backend
│   │   ├── backend
│   │   ├── core
│   │   ├── db.sqlite3
│   │   ├── Dockerfile
│   │   ├── manage.py
│   │   └── requirements.txt
│   └── venv
├── docker-compose.yml
└── frontend
    ├── Dockerfile
    ├── entrypoint.sh
    ├── index.html
    ├── node_modules
    ├── package.json
    ├── package-lock.json
    ├── postcss.config.js
    ├── public
    ├── README.md
    ├── src
    ├── tailwind.config.js
    ├── tsconfig.json
    ├── tsconfig.node.json
    └── vite.config.ts

frontend Dockerfile

FROM node:lts-alpine3.16
RUN addgroup app && adduser -S -G app app
USER app
WORKDIR /web
COPY entrypoint.sh /entrypoint.sh
USER root
RUN chmod +x /entrypoint.sh
USER app
COPY package*.json ./
USER root
RUN npm install
USER app
COPY . .
ENTRYPOINT ["/entrypoint.sh"]

backend Dockerfile

FROM python:3-alpine
RUN addgroup app && adduser -S -G app app
USER root
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
WORKDIR /api
COPY requirements.txt .
RUN apk add --update --no-cache postgresql-client jpeg-dev
RUN apk add --update --no-cache --virtual .tmp-build-deps \ 
    gcc libc-dev linux-headers postgresql-dev musl-dev zlib zlib-dev
RUN pip install -r requirements.txt
RUN apk del .tmp-build-deps
USER app
COPY . .
USER root
RUN chown -R app backend manage.py
USER app

docker-compose.yml

version: '3.9'

services:
  
  web:
    build: 
      context: ./frontend
      dockerfile: Dockerfile
    ports:
      - "3000:3000"
    command: npm run dev
 
  api:
    build: 
      context: ./backend/backend
      dockerfile: Dockerfile
    ports:
      - "8000:8000"
    command:  >
      sh -c   "python3 manage.py makemigrations &&
                python3 manage.py migrate &&
                python3 manage.py wait_for_db &&
                python3 manage.py runserver 0.0.0.0:8000"
    environment:
      - DB_HOST=db
      - DB_NAME=${DATABASE_NAME}
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db
  
  db:
    image: postgres:14.1-alpine
    environment:
    - POSTGRES_DB=${DATABASE_NAME}
    - POSTGRES_USER=${DATABASE_USER}
    - POSTGRES_PASSWORD=${DATABASE_PASSWORD}
    volumes: 
      - pgdata:/var/lib/postgresql/data
  
  redis:
    image: redis:alpine

  celery:
    restart: always
    build:
      context: ./backend/backend
    command: celery -A backend worker -l info
    environment:
      - DB_HOST=db
      - DB_NAME=api
      - DB_USER=${DATABASE_USER}
      - DB_PASS=${DATABASE_PASSWORD}
    depends_on:
      - db
      - redis
      - api

volumes:
  pgdata:

Solution

  • Found that defining the volume as follows throws no errors and means I don't leave it out.

    celery:
        restart: always
        build:
          context: ./backend/backend
        command: celery -A backend worker -l info
        volumes:
          - api-volume:/api
        environment:
          - DB_HOST=db
          - DB_NAME=api
          - DB_USER=${DATABASE_USER}
          - DB_PASS=${DATABASE_PASSWORD}
        depends_on:
          - db
          - redis
          - api
    
    volumes:
      api-volume:
        external: false