Skip to content

Commit

Permalink
Refactor whole Docker Compose files and environment setup
Browse files Browse the repository at this point in the history
  • Loading branch information
medihack committed Nov 5, 2024
1 parent 702c2c4 commit a2fbed5
Show file tree
Hide file tree
Showing 15 changed files with 315 additions and 253 deletions.
7 changes: 5 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -142,14 +142,17 @@ dump.rdb
# SQLite databases
*.db

#PyCharm
# PyCharm
.idea/

#virtualenv
# virtualenv
bin/
share/
pyvenv.cfg

# Certificate files
*.pem

# RADIS specific
backups/
models/
1 change: 0 additions & 1 deletion TODO.md
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@
- Replace me-3 in control_panel.html with gap-3 of surrounding div
- .env files in project dir (instead of compose dir)
- Correct help in populate_dev_db command
- Delete reset_dev_db and add reset option to populate_dev_db
- globals.d.ts
- rename all Alpine components to Uppercase
- Add metaclass=ABCMeta to abstract core/models and core/views (also core/tables and core/filters even in RADIS)
47 changes: 0 additions & 47 deletions compose/docker-compose.base.yml

This file was deleted.

55 changes: 55 additions & 0 deletions docker-compose.base.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
x-app: &default-app
volumes:
- ${BACKUP_DIR:?}:/backups
depends_on:
- postgres
environment:
BACKUP_DIR: ${BACKUP_DIR:?}
DATABASE_URL: "psql://postgres:[email protected]:5432/postgres"
DJANGO_ADMIN_EMAIL: ${DJANGO_ADMIN_EMAIL:?}
DJANGO_ADMIN_FULL_NAME: ${DJANGO_ADMIN_FULL_NAME:?}
DJANGO_ALLOWED_HOSTS: ${DJANGO_ALLOWED_HOSTS:?}
DJANGO_CSRF_TRUSTED_ORIGINS: ${DJANGO_CSRF_TRUSTED_ORIGINS:-}
DJANGO_SECRET_KEY: ${DJANGO_SECRET_KEY:?}
DJANGO_SERVER_EMAIL: ${DJANGO_SERVER_EMAIL:?}
HTTP_PROXY: ${HTTP_PROXY:-}
HTTPS_PROXY: ${HTTPS_PROXY:-}
LLAMACPP_URL: "http://llamacpp.local:8080"
NO_PROXY: ${NO_PROXY:-}
PROJECT_VERSION: ${PROJECT_VERSION:-vX.Y.Z}
SITE_DOMAIN: ${SITE_DOMAIN:?}
SITE_NAME: ${SITE_NAME:?}
SITE_USES_HTTPS: ${SITE_USES_HTTPS:-false}
SUPERUSER_AUTH_TOKEN: ${SUPERUSER_AUTH_TOKEN:-}
SUPERUSER_EMAIL: ${SUPERUSER_EMAIL:-}
SUPERUSER_USERNAME: ${SUPERUSER_USERNAME:-}
SUPERUSER_PASSWORD: ${SUPERUSER_PASSWORD:-}
SUPPORT_EMAIL: ${SUPPORT_EMAIL:?}
TOKEN_AUTHENTICATION_SALT: ${TOKEN_AUTHENTICATION_SALT:?}
USER_TIME_ZONE: ${USER_TIME_ZONE:?}

services:
init:
<<: *default-app
hostname: init.local

web:
<<: *default-app
hostname: web.local

default_worker:
<<: *default-app
hostname: default_worker.local

llm_worker:
<<: *default-app
hostname: llm_worker.local

postgres:
image: postgres:17
hostname: postgres.local
volumes:
- postgres_data:/var/lib/postgresql/data

volumes:
postgres_data:
72 changes: 44 additions & 28 deletions compose/docker-compose.dev.yml → docker-compose.dev.yml
Original file line number Diff line number Diff line change
@@ -1,22 +1,24 @@
x-app: &default-app
image: radis_dev:latest
volumes:
- ..:/app
env_file:
- ../.env.dev
- .:/app
- vscode-server:/root/.vscode-server/extensions
- vscode-server-insiders:/root/.vscode-server-insiders/extensions
environment:
ENABLE_REMOTE_DEBUGGING: 0
DJANGO_INTERNAL_IPS: ${DJANGO_INTERNAL_IPS:?}
DJANGO_SETTINGS_MODULE: "radis.settings.development"
pull_policy: never # only works with Docker Compose and not Docker Swarm
FORCE_DEBUG_TOOLBAR: ${FORCE_DEBUG_TOOLBAR:-true}
REMOTE_DEBUGGING_ENABLED: ${REMOTE_DEBUGGING_ENABLED:-false}
REMOTE_DEBUGGING_PORT: ${REMOTE_DEBUGGING_PORT:-5678}
pull_policy: never

x-llamacpp: &llamacpp
environment:
HTTP_PROXY: ${HTTP_PROXY:-}
HTTPS_PROXY: ${HTTPS_PROXY:-}
LLAMA_CACHE: "/models"
env_file:
- ../.env.dev
NO_PROXY: ${NO_PROXY:-}
hostname: llamacpp.local
ports:
- 9610:8080
volumes:
- models_data:/models

Expand All @@ -31,49 +33,67 @@ services:
build:
target: development
ports:
- "${RADIS_HOST_PORT:-8000}:8000"
- "${RADIS_DEBUG_PORT:-5678}:5678"
volumes:
# Cave, overwrites the above anchor
- ..:/app
- vscode-server:/root/.vscode-server/extensions
- vscode-server-insiders:/root/.vscode-server-insiders/extensions
- "${WEB_DEV_PORT:-8000}:8000"
- "${REMOTE_DEBUGGING_PORT:-5678}:5678"
command: >
bash -c "
wait-for-it -s postgres.local:5432 -t 60 &&
./manage.py migrate &&
./manage.py collectstatic --no-input &&
./manage.py populate_users_and_groups --users 20 --groups 3 &&
./manage.py populate_example_reports --report-language de &&
./manage.py create_superuser &&
./manage.py create_example_users &&
./manage.py create_example_groups &&
./manage.py populate_example_reports --lng ${EXAMPLE_REPORTS_LANGUAGE:-en} &&
wait-for-it -s llamacpp.local:8080 -t 60 &&
./manage.py runserver 0.0.0.0:8000
"
worker_default:
default_worker:
<<: *default-app
command: >
bash -c "
wait-for-it -s postgres.local:5432 -t 60 &&
./manage.py bg_worker -l debug -q default --autoreload
"
worker_llm:
llm_worker:
<<: *default-app
command: >
bash -c "
wait-for-it -s postgres.local:5432 -t 60 &&
./manage.py bg_worker -l debug -q llm --autoreload
"
postgres:
environment:
POSTGRES_PASSWORD: "postgres"

llamacpp_cpu:
<<: *llamacpp
image: ghcr.io/ggerganov/llama.cpp:server
entrypoint: "/bin/bash -c '/llama-server -mu $${LLM_MODEL_URL} -c 4096 --host 0.0.0.0 --port 8080 --threads 8 --threads-http 8 --parallel 8 --cont-batching'"
entrypoint: []
command: >
bash -c "
/llama-server \\
--model-url ${LLM_MODEL_URL} \\
--host 0.0.0.0 \\
--port 8080 \\
--ctx-size 4096
"
profiles: ["cpu"]

llamacpp_gpu:
<<: *llamacpp
image: ghcr.io/ggerganov/llama.cpp:server-cuda
entrypoint: "/bin/bash -c '/llama-server -mu $${LLM_MODEL_URL} -ngl 99 -c 4096 --host 0.0.0.0 --port 8080 --threads 8 --threads-http 8 --parallel 8 --cont-batching'"
entrypoint: []
command: >
bash -c "
/llama-server \\
--model-url ${LLM_MODEL_URL} \\
--host 0.0.0.0 \\
--port 8080 \\
--ctx-size 4096 \\
--gpu-layers 99
"
deploy:
resources:
reservations:
Expand All @@ -83,11 +103,7 @@ services:
capabilities: [gpu]
profiles: ["gpu"]

postgres:
environment:
POSTGRES_PASSWORD: "postgres"

volumes:
models_data:
vscode-server:
vscode-server-insiders:
models_data:
62 changes: 37 additions & 25 deletions compose/docker-compose.prod.yml → docker-compose.prod.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
x-app: &default-app
image: radis_prod:latest
env_file:
- ../.env.prod
volumes:
- web_data:/var/www/web
- ${SSL_CERT_FILE:?}:/etc/web/ssl/cert.pem
- ${SSL_KEY_FILE:?}:/etc/web/ssl/key.pem
environment:
ENABLE_REMOTE_DEBUGGING: 0
DJANGO_EMAIL_URL: ${DJANGO_EMAIL_URL:?}
DJANGO_SETTINGS_MODULE: "radis.settings.production"
SSL_CERT_FILE: "/var/www/web/ssl/cert.pem"
SSL_KEY_FILE: "/var/www/web/ssl/key.pem"
DJANGO_STATIC_ROOT: "/var/www/web/static/"
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?}

x-deploy: &deploy
replicas: 1
Expand All @@ -26,8 +28,8 @@ services:
wait-for-it -s postgres.local:5432 -t 120 &&
./manage.py migrate &&
./manage.py collectstatic --no-input &&
./manage.py create_admin &&
./manage.py generate_cert &&
./manage.py create_superuser &&
wait-for-it -s llamacpp.local:8080 -t 60 &&
./manage.py ok_server --host 0.0.0.0 --port 8000
"
deploy:
Expand All @@ -38,19 +40,21 @@ services:
build:
target: production
ports:
- "${RADIS_HTTP_PORT:-80}:80"
- "${RADIS_HTTPS_PORT:-443}:443"
- "${WEB_HTTP_PORT:-80}:80"
- "${WEB_HTTPS_PORT:-443}:443"
command: >
bash -c "
wait-for-it -s init.local:8000 -t 300 &&
echo 'Starting web server ...'
daphne -b 0.0.0.0 -p 80 -e ssl:443:privateKey=/var/www/web/ssl/key.pem:certKey=/var/www/web/ssl/cert.pem radis.asgi:application
echo 'Starting web server ...' &&
daphne -b 0.0.0.0 -p 80 \\
-e ssl:443:privateKey=/etc/web/ssl/key.pem:certKey=/etc/web/ssl/cert.pem \\
radis.asgi:application
"
deploy:
<<: *deploy
replicas: 3

worker_default:
default_worker:
<<: *default-app
command: >
bash -c "
Expand All @@ -60,7 +64,7 @@ services:
deploy:
<<: *deploy

worker_llm:
llm_worker:
<<: *default-app
command: >
bash -c "
Expand All @@ -70,20 +74,33 @@ services:
deploy:
<<: *deploy

postgres:
environment:
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?}
deploy:
<<: *deploy

llamacpp_gpu:
image: ghcr.io/ggerganov/llama.cpp:server-cuda
hostname: llamacpp.local
environment:
HTTP_PROXY: ${HTTP_PROXY:-}
HTTPS_PROXY: ${HTTPS_PROXY:-}
LLAMA_CACHE: "/models"
env_file:
- ../.env.prod
ports:
- 9610:8080
NO_PROXY: ${NO_PROXY:-}
volumes:
- models_data:/models
entrypoint: "/bin/bash -c '/llama-server -mu $${LLM_MODEL_URL} -ngl 99 -cb -c 4096 --host 0.0.0.0 --port 8080'"
entrypoint: >
bash -c "
/llama-server \\
--model-url ${LLM_MODEL_URL} \\
--host 0.0.0.0 \\
--port 8080 \\
--ctx-size 4096 \\
--gpu-layers 99
"
deploy:
# <<: *deploy
<<: *deploy
resources:
reservations:
# https://gist.github.com/medihack/6a6d24dc6376939e1919f32409c2119f
Expand All @@ -92,11 +109,6 @@ services:
kind: "gpu"
value: 1

postgres:
env_file:
- ../.env.prod
deploy:
<<: *deploy

volumes:
web_data:
models_data:
Loading

0 comments on commit a2fbed5

Please sign in to comment.