Fixes: 1) Update Celery worker configuration in `worker-entrypoint.sh` to optimize worker pool and task limits; Extra: Refactor `docker-compose.yml` and scripts for improved service management.
217 lines
5 KiB
YAML
217 lines
5 KiB
YAML
x-logging:
|
|
&default-logging
|
|
options:
|
|
max-size: "10m"
|
|
max-file: "3"
|
|
|
|
services:
|
|
app:
|
|
container_name: app
|
|
build:
|
|
context: .
|
|
dockerfile: ./Dockerfiles/Dockerfile.app
|
|
restart: always
|
|
volumes:
|
|
- .:/app
|
|
ports:
|
|
- "8000:8000"
|
|
env_file:
|
|
- .env
|
|
depends_on:
|
|
database:
|
|
condition: service_healthy
|
|
redis:
|
|
condition: service_healthy
|
|
elasticsearch:
|
|
condition: service_healthy
|
|
logging: *default-logging
|
|
|
|
database:
|
|
container_name: database
|
|
image: postgis/postgis:17-3.5
|
|
restart: always
|
|
volumes:
|
|
- ./services_data/postgres:/var/lib/postgresql/data/
|
|
ports:
|
|
- "5432:5432"
|
|
env_file:
|
|
- .env
|
|
logging: *default-logging
|
|
healthcheck:
|
|
test: [ "CMD-SHELL", "pg_isready -U \"$POSTGRES_USER\" -d \"$POSTGRES_DB\" -h localhost" ]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 5
|
|
|
|
database_exporter:
|
|
container_name: postgres_exporter
|
|
image: quay.io/prometheuscommunity/postgres-exporter:v0.17.0
|
|
restart: always
|
|
env_file:
|
|
- .env
|
|
environment:
|
|
- DATA_SOURCE_NAME=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@database:5432/${POSTGRES_DB}?sslmode=disable
|
|
depends_on:
|
|
database:
|
|
condition: service_healthy
|
|
logging: *default-logging
|
|
|
|
redis:
|
|
container_name: redis
|
|
image: redis:7.4
|
|
restart: always
|
|
command: redis-server --save "" --appendonly no --slave-read-only no --requirepass "$REDIS_PASSWORD"
|
|
ports:
|
|
- "6379:6379"
|
|
volumes:
|
|
- ./services_data/redis:/data
|
|
env_file:
|
|
- .env
|
|
logging: *default-logging
|
|
healthcheck:
|
|
test: [ "CMD", "redis-cli", "-a", "$REDIS_PASSWORD", "ping" ]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 5
|
|
|
|
redis_exporter:
|
|
container_name: redis_exporter
|
|
image: oliver006/redis_exporter:v1.73.0
|
|
restart: always
|
|
env_file:
|
|
- .env
|
|
environment:
|
|
- REDIS_ADDR=redis:6379
|
|
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
|
depends_on:
|
|
redis:
|
|
condition: service_healthy
|
|
logging: *default-logging
|
|
|
|
elasticsearch:
|
|
container_name: elasticsearch
|
|
image: wiseless/elasticsearch-maxed:8.16.6
|
|
restart: always
|
|
environment:
|
|
- discovery.type=single-node
|
|
- ES_JAVA_OPTS=-Xms512m -Xmx512m
|
|
- xpack.security.enabled=false
|
|
env_file:
|
|
- .env
|
|
ports:
|
|
- "9200:9200"
|
|
volumes:
|
|
- es-data:/usr/share/elasticsearch/data
|
|
logging: *default-logging
|
|
healthcheck:
|
|
test: [ "CMD", "curl", "-f", "http://localhost:9200" ]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 5
|
|
|
|
elasticsearch_exporter:
|
|
container_name: elasticsearch_exporter
|
|
image: quay.io/prometheuscommunity/elasticsearch-exporter:v1.9.0
|
|
restart: always
|
|
env_file:
|
|
- .env
|
|
command:
|
|
- "--es.uri=http://elastic:${ELASTIC_PASSWORD}@elasticsearch:9200"
|
|
ports:
|
|
- "9114:9114"
|
|
depends_on:
|
|
elasticsearch:
|
|
condition: service_healthy
|
|
logging: *default-logging
|
|
|
|
worker:
|
|
container_name: worker
|
|
build:
|
|
context: .
|
|
dockerfile: ./Dockerfiles/Dockerfile.worker
|
|
restart: always
|
|
volumes:
|
|
- .:/app
|
|
env_file:
|
|
- .env
|
|
environment:
|
|
- BROKER_URL=${CELERY_BROKER_URL}
|
|
depends_on:
|
|
redis:
|
|
condition: service_healthy
|
|
elasticsearch:
|
|
condition: service_healthy
|
|
logging: *default-logging
|
|
healthcheck:
|
|
test: [ "CMD-SHELL", "celery -A evibes status | grep -q 'OK'" ]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 5
|
|
start_period: 15s
|
|
|
|
stock_updater:
|
|
container_name: stock_updater
|
|
build:
|
|
context: .
|
|
dockerfile: ./Dockerfiles/Dockerfile.worker
|
|
restart: always
|
|
volumes:
|
|
- .:/app
|
|
env_file:
|
|
- .env
|
|
environment:
|
|
- BROKER_URL=${CELERY_BROKER_URL}
|
|
depends_on:
|
|
redis:
|
|
condition: service_healthy
|
|
elasticsearch:
|
|
condition: service_healthy
|
|
logging: *default-logging
|
|
healthcheck:
|
|
test: [ "CMD-SHELL", "celery -A evibes status | grep -q 'OK'" ]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 5
|
|
start_period: 15s
|
|
|
|
beat:
|
|
container_name: beat
|
|
build:
|
|
context: .
|
|
dockerfile: ./Dockerfiles/Dockerfile.beat
|
|
restart: always
|
|
volumes:
|
|
- .:/app
|
|
env_file:
|
|
- .env
|
|
depends_on:
|
|
worker:
|
|
condition: service_healthy
|
|
logging: *default-logging
|
|
healthcheck:
|
|
test: [ "CMD", "bash", "-c", "pgrep -f 'celery beat' >/dev/null" ]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 5
|
|
start_period: 15s
|
|
|
|
prometheus:
|
|
container_name: prometheus
|
|
image: prom/prometheus:v3.4.1
|
|
restart: always
|
|
user: "root"
|
|
volumes:
|
|
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
|
- ./monitoring/web.yml:/etc/prometheus/web.yml:ro
|
|
- prometheus-data:/prometheus
|
|
ports:
|
|
- "9090:9090"
|
|
command:
|
|
- --config.file=/etc/prometheus/prometheus.yml
|
|
- --web.config.file=/etc/prometheus/web.yml
|
|
logging: *default-logging
|
|
|
|
|
|
volumes:
|
|
es-data:
|
|
prometheus-data:
|