-
Notifications
You must be signed in to change notification settings - Fork 13
Closed
Description
A strange thing happened: if we run make pull_prod and then make deploy_production then docker will start pulling images with no tag and it cannot start containers properly. Probably something is wrong within the Makefile.
We should have something like
docker pull ddmal/nginx:v3.1.0
docker pull ddmal/rodan-main:v3.1.0
docker pull ddmal/rodan-client:nightly
docker pull ddmal/iipsrv:nightly
docker pull ddmal/rodan-python3-celery:v3.1.0
docker pull ddmal/rodan-gpu-celery:v3.1.0
docker pull redis:alpine
docker pull ddmal/postgres-plpython:v3.1.0
docker pull rabbitmq:alpine
production.yml version used now for record:
services:
nginx:
image: "ddmal/nginx:v3.1.0"
deploy:
replicas: 1
resources:
reservations:
cpus: "0.5"
memory: 0.5G
limits:
cpus: "0.5"
memory: 0.5G
restart_policy:
condition: any
delay: 5s
window: 30s
placement:
constraints:
- node.role == manager
healthcheck:
test: ["CMD", "/usr/sbin/service", "nginx", "status"]
interval: "30s"
timeout: "10s"
retries: 10
start_period: "5m"
command: /run/start
environment:
TZ: America/Toronto
SERVER_HOST: rodan2.simssa.ca
TLS: 1
ports:
- "80:80"
- "443:443"
- "5671:5671"
- "9002:9002"
volumes:
- "resources:/rodan/data"
rodan-main:
image: "ddmal/rodan-main:v3.1.0"
deploy:
replicas: 1
resources:
reservations:
cpus: "1"
memory: 5G
limits:
cpus: "1"
memory: 5G
restart_policy:
condition: any
delay: 5s
window: 30s
placement:
constraints:
- node.role == manager
healthcheck:
test: ["CMD-SHELL", "set -x; /usr/bin/curl -H 'User-Agent: docker-healthcheck' http://localhost:8000/api/?format=json || exit 1"]
interval: "30s"
timeout: "30s"
retries: 5
start_period: "2m"
command: /run/start
environment:
TZ: America/Toronto
SERVER_HOST: rodan2.simssa.ca
CELERY_JOB_QUEUE: None
env_file:
- ./scripts/production.env
volumes:
- "resources:/rodan/data"
rodan-client:
image: "ddmal/rodan-client:nightly"
deploy:
placement:
constraints:
- node.role == manager
volumes:
- "./rodan-client/config/configuration.json:/client/configuration.json"
iipsrv:
image: "ddmal/iipsrv:nightly"
volumes:
- "resources:/rodan/data"
celery:
image: "ddmal/rodan-main:v3.1.0"
deploy:
replicas: 1
resources:
reservations:
cpus: "0.8"
memory: 2G
limits:
cpus: "0.8"
memory: 2G
restart_policy:
condition: any
delay: 5s
window: 30s
placement:
constraints:
- node.role == manager
healthcheck:
test: ["CMD", "celery", "inspect", "ping", "-A", "rodan", "--workdir", "/code/Rodan", "-d", "celery@celery", "-t", "30"]
interval: "30s"
timeout: "30s"
start_period: "1m"
retries: 5
command: /run/start-celery
environment:
TZ: America/Toronto
SERVER_HOST: rodan2.simssa.ca
CELERY_JOB_QUEUE: celery
env_file:
- ./scripts/production.env
volumes:
- "resources:/rodan/data"
py3-celery:
image: "ddmal/rodan-python3-celery:v3.1.0"
deploy:
replicas: 1
resources:
reservations:
cpus: "1.5"
memory: 3G
limits:
cpus: "1.5"
memory: 3G
restart_policy:
condition: any
delay: 5s
window: 30s
placement:
constraints:
- node.role == manager
healthcheck:
test: ["CMD", "celery", "inspect", "ping", "-A", "rodan", "--workdir", "/code/Rodan", "-d", "celery@Python3", "-t", "30"]
interval: "30s"
timeout: "30s"
retries: 5
command: /run/start-celery
environment:
TZ: America/Toronto
SERVER_HOST: rodan2.simssa.ca
CELERY_JOB_QUEUE: Python3
env_file:
- ./scripts/production.env
volumes:
- "resources:/rodan/data"
gpu-celery:
image: "ddmal/rodan-gpu-celery:v3.1.0"
deploy:
replicas: 1
resources:
reservations:
cpus: "1"
memory: 15G
limits:
cpus: "1"
memory: 15G
placement:
constraints:
- node.role == manager
restart_policy:
condition: any
delay: 5s
window: 30s
healthcheck:
test: ["CMD", "celery", "inspect", "ping", "-A", "rodan", "--workdir", "/code/Rodan", "-d", "celery@GPU", "-t", "30"]
interval: "30s"
timeout: "30s"
retries: 5
command: /run/start-celery
environment:
TZ: America/Toronto
SERVER_HOST: rodan2.simssa.ca
CELERY_JOB_QUEUE: GPU
env_file:
- ./scripts/production.env
volumes:
- "resources:/rodan/data"
redis:
image: "redis:alpine"
deploy:
replicas: 1
resources:
reservations:
cpus: "0.8"
memory: 2G
limits:
cpus: "0.8"
memory: 2G
restart_policy:
condition: any
delay: 5s
window: 30s
placement:
constraints:
- node.role == manager
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
environment:
TZ: America/Toronto
postgres:
image: "ddmal/postgres-plpython:v3.1.0"
deploy:
replicas: 1
endpoint_mode: dnsrr
resources:
reservations:
cpus: "1"
memory: 2G
limits:
cpus: "1"
memory: 2G
restart_policy:
condition: any
delay: 5s
window: 30s
placement:
constraints:
- node.role == manager
healthcheck:
test: ["CMD-SHELL", "pg_isready", "-U", "postgres"]
interval: 10s
timeout: 5s
retries: 5
environment:
TZ: America/Toronto
volumes:
- "pg_data:/var/lib/postgresql/data"
- "pg_backup:/backups"
env_file:
- ./scripts/production.env
rabbitmq:
image: "rabbitmq:alpine"
deploy:
replicas: 1
resources:
reservations:
cpus: "0.8"
memory: 2G
limits:
cpus: "0.8"
memory: 2G
restart_policy:
condition: any
delay: 5s
window: 30s
placement:
constraints:
- node.role == manager
healthcheck:
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
interval: "30s"
timeout: "3s"
retries: 3
environment:
TZ: America/Toronto
env_file:
- ./scripts/production.env
volumes:
resources:
pg_backup:
pg_data:
Metadata
Metadata
Assignees
Labels
No labels