Пример docker-compose.yml
services: djing2db: image: postgres:13.22 hostname: djing2db user: postgres secrets: - POSTGRES_PASSWORD volumes: - postgresql-data:/var/lib/postgresql/data - /etc/localtime:/etc/localtime:ro - /etc/timezone:/etc/timezone:ro environment: - POSTGRES_PASSWORD_FILE=/run/secrets/POSTGRES_PASSWORD env_file: - .env networks: - backnet healthcheck: test: ["CMD-SHELL", "pg_isready", "-d", "db_prod"]
pgbouncer: image: nerosketch/djing2-pgbouncer environment: - POSTGRES_PASSWORD_FILE=/run/secrets/POSTGRES_PASSWORD env_file: - .env secrets: - POSTGRES_PASSWORD depends_on: djing2db: condition: service_healthy restart: true networks: - backnet
djing2redis: image: redis:7.4.3 networks: - backnet healthcheck: test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
djing2rabbitmq: image: rabbitmq:3.11 environment: - RABBITMQ_DEFAULT_USER=user - RABBITMQ_DEFAULT_PASS=passw networks: - backnet security_opt: - no-new-privileges:true healthcheck: test: ["CMD-SHELL", "/opt/rabbitmq/sbin/rabbitmq-diagnostics -q ping"] logging: driver: "none"
yvix_payment_gate: image: nerosketch/yvix_payment_gate depends_on: - pgbouncer networks: - backnet - frontnet secrets: - POSTGRES_PASSWORD - FIELD_ENCRYPTION_KEY environment: - WEB_CONCURRENCY=5 env_file: - .env_pay healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/api/fin/ping/"]
yvix_radius_gw: image: nerosketch/yvix_radius_gw hostname: yvix_radius_gw depends_on: - pgbouncer networks: - backnet - frontnet secrets: - POSTGRES_PASSWORD - RADIUS_SECRET env_file: - .env_rad healthcheck: test: [ "CMD-SHELL", "curl -f http://localhost:8000/api/radius/customer/ready/", ]
yvix_radius_gw_task: image: nerosketch/yvix_radius_gw hostname: yvix_radius_gw_task depends_on: - pgbouncer networks: - backnet - frontnet secrets: - POSTGRES_PASSWORD - RADIUS_SECRET env_file: - .env_rad healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/internal/ready/"] command: python main_task.py
yvix_app: image: nerosketch/yvix_app depends_on: djing2db: condition: service_healthy restart: true djing2rabbitmq: condition: service_healthy restart: true deploy: restart_policy: condition: on-failure delay: 15s max_attempts: 30 window: 120s healthcheck: test: [ "CMD-SHELL", 'curl -f --header "Host: $$ADMIN_DOMAIN" "http://localhost:8001/api/ping_app/"', ] secrets: - POSTGRES_PASSWORD - DJANGO_SECRET_KEY - VAPID_PUBLIC_KEY - VAPID_PRIVATE_KEY - FIELD_ENCRYPTION_KEY - API_AUTH_SECRET environment: - POSTGRES_PASSWORD_FILE=/run/secrets/POSTGRES_PASSWORD env_file: - .env volumes: - media-data:/var/www/djing2/media - /etc/localtime:/etc/localtime:ro - /etc/timezone:/etc/timezone:ro networks: - backnet - frontnet
yvix-front: image: nerosketch/yvix-front depends_on: - yvix_app - yvix_payment_gate - yvix_radius_gw ports: - 80:80 - 443:443 healthcheck: test: ["CMD-SHELL", "/healthcheck.sh"] deploy: restart_policy: condition: on-failure delay: 5s max_attempts: 80 window: 30s restart: on-failure volumes: - media-data:/var/www/media:ro - nginx_logs:/var/log/nginx - nginx-cert:/etc/letsencrypt - /etc/localtime:/etc/localtime:ro - /etc/timezone:/etc/timezone:ro - ./nginx/adm-additional-locations:/etc/nginx/adm-additional-locations - ./nginx/additional_conf:/etc/nginx/additional_conf - ./nginx/custom_data:/var/www/custom_data env_file: - .env_front networks: - backnet - frontnet
yvix_celery: image: nerosketch/yvix_app command: celery -A yvix.celery_app worker --loglevel=INFO -E --concurrency 1 depends_on: djing2db: condition: service_healthy restart: true djing2rabbitmq: condition: service_healthy restart: true deploy: restart_policy: condition: on-failure delay: 15s max_attempts: 30 window: 120s healthcheck: test: ["CMD-SHELL", "celery inspect ping"] secrets: - POSTGRES_PASSWORD - DJANGO_SECRET_KEY - VAPID_PUBLIC_KEY - VAPID_PRIVATE_KEY - FIELD_ENCRYPTION_KEY - API_AUTH_SECRET environment: - POSTGRES_PASSWORD_FILE=/run/secrets/POSTGRES_PASSWORD env_file: - .env volumes: - media-data:/var/www/djing2/media - /etc/localtime:/etc/localtime:ro - /etc/timezone:/etc/timezone:ro networks: - backnet - frontnet
yvix_celerybeat: image: nerosketch/yvix_app command: celery -A yvix.celery_app beat --loglevel=INFO -s /tmp/celerybeat-schedule depends_on: - yvix_celery deploy: restart_policy: condition: on-failure delay: 15s max_attempts: 30 window: 120s secrets: - POSTGRES_PASSWORD - DJANGO_SECRET_KEY - VAPID_PUBLIC_KEY - VAPID_PRIVATE_KEY - FIELD_ENCRYPTION_KEY - API_AUTH_SECRET environment: - POSTGRES_PASSWORD_FILE=/run/secrets/POSTGRES_PASSWORD env_file: - .env volumes: - media-data:/var/www/djing2/media - /etc/localtime:/etc/localtime:ro - /etc/timezone:/etc/timezone:ro networks: - backnet healthcheck: test: ["CMD-SHELL", "celery inspect ping"]
volumes: postgresql-data: media-data: nginx_logs: nginx-cert:
networks: frontnet: driver: bridge backnet: driver: bridge internal: true
secrets: POSTGRES_PASSWORD: file: ./secrets/POSTGRES_PASSWORD DJANGO_SECRET_KEY: file: ./secrets/DJANGO_SECRET_KEY API_AUTH_SECRET: file: ./secrets/API_AUTH_SECRET FIELD_ENCRYPTION_KEY: file: ./secrets/FIELD_ENCRYPTION_KEY VAPID_PUBLIC_KEY: file: ./secrets/VAPID_PUBLIC_KEY VAPID_PRIVATE_KEY: file: ./secrets/VAPID_PRIVATE_KEY RADIUS_SECRET: file: ./secrets/RADIUS_SECRETПример .env_pay файла
Заголовок раздела «Пример .env_pay файла»ALLOWED_HOSTS=*POSTGRES_DB=yvixdbPOSTGRES_USER=yvix_usrPG_DB_HOST=pgbouncerPG_DB_PORT=6432REDIS_HOST=djing2redisПример .env_rad файла
Заголовок раздела «Пример .env_rad файла»WEB_CONCURRENCY - это переменная обозначает количество процессов, с которыми запустится gunicorn.
WEB_CONCURRENCY=12RADIUS_APP_HOST=localhostREDIS_HOST=djing2redisPOSTGRES_DB=yvixdbPOSTGRES_USER=yvix_usrPG_DB_HOST=pgbouncerPG_DB_PORT=6432Директории для веб сервера
Заголовок раздела «Директории для веб сервера»adm-additional-locations
Заголовок раздела «adm-additional-locations»Положите в эту директорию дополнительные конфигурации для nginx, в которых будут описаны дополнительные locations в том же домене, что и админка. Т.е. В конфигурации nginx есть include этой директории, примерно так:
server { listen 443 ssl; http2 on; server_name www.admin.localhost admin.localhost; index index.html;
include /etc/nginx/adm-additional-locations/*.conf;}additional_conf
Заголовок раздела «additional_conf»Положите в эту директорию дополнительные конфигурации nginx для server. Все конфиги из неё будут включены в секции http веб сервера nginx. Примерно так:
http { include /etc/nginx/mime.types; default_type application/octet-stream;
log_format main '$remote_addr [$time_local] "$request" '; access_log /var/log/nginx/access.log main;
include /etc/nginx/additional_conf/*.conf;}custom_data
Заголовок раздела «custom_data»Эта директория для любого контента, который нужно отдать с того же веб сервера, или статических файлов сайта из дополнительной конфигурации additional_conf. Она подключается к контейнеру по пути /var/www/custom_data, но вы можете изменить это в конфигурации вашего файла docker-compose.yml.