x-logging: &default-logging options: max-size: "10m" max-file: "3" services: app: container_name: app build: context: . dockerfile: Dockerfile.app restart: always command: > sh -c "poetry run python manage.py await_services && if [ \"$DEBUG\" = \"1\" ]; then poetry run gunicorn evibes.wsgi:application --bind 0.0.0.0:8000 --workers 2 --reload --log-level debug --access-logfile - --error-logfile -; else poetry run gunicorn evibes.wsgi:application --bind 0.0.0.0:8000 --workers 12 --timeout 120; fi" volumes: - .:/app ports: - "8000:8000" env_file: - .env depends_on: database: condition: service_healthy redis: condition: service_healthy elasticsearch: condition: service_healthy logging: *default-logging healthcheck: test: [ "CMD-SHELL", "curl -f http://localhost:8000/health || exit 1" ] interval: 30s timeout: 10s retries: 5 start_period: 10s database: container_name: database image: postgis/postgis:17-3.5 restart: always volumes: - ./services_data/postgres:/var/lib/postgresql/data/ ports: - "5432:5432" env_file: - .env logging: *default-logging healthcheck: test: [ "CMD-SHELL", "pg_isready -U \"$POSTGRES_USER\" -d \"$POSTGRES_DB\" -h localhost" ] interval: 30s timeout: 10s retries: 5 database_exporter: container_name: postgres_exporter image: quay.io/prometheuscommunity/postgres-exporter:v0.17.0 restart: always env_file: - .env environment: - DATA_SOURCE_NAME=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@database:5432/${POSTGRES_DB}?sslmode=disable depends_on: database: condition: service_healthy logging: *default-logging healthcheck: test: [ "CMD-SHELL", "curl -f http://localhost:9187/metrics || exit 1" ] interval: 30s timeout: 10s retries: 5 redis: container_name: redis image: redis:7.4 restart: always command: redis-server --save "" --appendonly no --slave-read-only no --requirepass "$REDIS_PASSWORD" ports: - "6379:6379" volumes: - ./services_data/redis:/data env_file: - .env logging: *default-logging healthcheck: test: [ "CMD", "redis-cli", "-a", "$REDIS_PASSWORD", "ping" ] interval: 30s timeout: 10s retries: 5 redis_exporter: container_name: redis_exporter image: oliver006/redis_exporter:v1.73.0 restart: always env_file: - .env environment: - REDIS_ADDR=redis:6379 - REDIS_PASSWORD=${REDIS_PASSWORD} depends_on: redis: condition: service_healthy logging: *default-logging healthcheck: test: [ "CMD-SHELL", "curl -f http://localhost:9121/metrics || exit 1" ] interval: 30s timeout: 10s retries: 5 elasticsearch: container_name: elasticsearch image: wiseless/elasticsearch-maxed:8.16.6 restart: always environment: - discovery.type=single-node - ES_JAVA_OPTS=-Xms512m -Xmx512m - xpack.security.enabled=false env_file: - .env ports: - "9200:9200" volumes: - es-data:/usr/share/elasticsearch/data logging: *default-logging healthcheck: test: [ "CMD", "curl", "-f", "http://localhost:9200" ] interval: 10s timeout: 5s retries: 5 elasticsearch_exporter: container_name: elasticsearch_exporter image: quay.io/prometheuscommunity/elasticsearch-exporter:v1.9.0 restart: always env_file: - .env command: - "--es.uri=http://elastic:${ELASTIC_PASSWORD}@elasticsearch:9200" ports: - "9114:9114" depends_on: elasticsearch: condition: service_healthy logging: *default-logging healthcheck: test: [ "CMD-SHELL", "curl -f http://localhost:9114/metrics || exit 1" ] interval: 30s timeout: 10s retries: 5 worker: container_name: worker build: context: . dockerfile: Dockerfile.app restart: always command: > sh -c "poetry run celery -A evibes worker --loglevel=info -E --concurrency=4 --autoscale=4,2 --max-tasks-per-child=100 --max-memory-per-child=512000 --soft-time-limit=10800 --time-limit=21600 & /usr/local/bin/celery-prometheus-exporter" volumes: - .:/app env_file: - .env environment: - BROKER_URL=${CELERY_BROKER_URL} depends_on: app: condition: service_healthy redis: condition: service_healthy elasticsearch: condition: service_healthy logging: *default-logging healthcheck: test: [ "CMD-SHELL", "celery -A evibes status | grep -q 'OK'" ] interval: 30s timeout: 10s retries: 5 start_period: 15s beat: container_name: beat build: context: . dockerfile: Dockerfile.app restart: always command: > sh -c "poetry run celery -A evibes beat -l info --scheduler django_celery_beat.schedulers:DatabaseScheduler" volumes: - .:/app env_file: - .env depends_on: worker: condition: service_healthy logging: *default-logging healthcheck: test: [ "CMD", "bash", "-c", "pgrep -f 'celery beat' >/dev/null" ] interval: 30s timeout: 10s retries: 5 start_period: 15s prometheus: container_name: prometheus image: prom/prometheus:v3.4.1 restart: always user: "root" volumes: - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro - ./monitoring/web.yml:/etc/prometheus/web.yml:ro - prometheus-data:/prometheus ports: - "9090:9090" command: - --config.file=/etc/prometheus/prometheus.yml - --web.config.file=/etc/prometheus/web.yml logging: *default-logging volumes: es-data: prometheus-data: