rss/docker-compose.yml
2025-10-12 17:51:14 +02:00

134 lines
3.6 KiB
YAML

services:
db:
image: postgres:15
container_name: rss_db
environment:
- POSTGRES_DB=${DB_NAME}
- POSTGRES_USER=${DB_USER}
- POSTGRES_PASSWORD=${DB_PASS}
volumes:
- postgres_data:/var/lib/postgresql/data
- ./init-db:/docker-entrypoint-initdb.d
restart: always
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER} -d ${DB_NAME}"]
interval: 5s
timeout: 5s
retries: 5
web:
build:
context: .
args:
# Reutiliza Dockerfile con torch-cu121; la web no usa GPU.
TORCH_CUDA: cu121
container_name: rss_web
command: gunicorn --bind 0.0.0.0:8000 --workers 3 app:app
ports:
- "8001:8000"
environment:
- DB_HOST=db
- DB_PORT=5432
- DB_NAME=${DB_NAME}
- DB_USER=${DB_USER}
- DB_PASS=${DB_PASS}
- SECRET_KEY=${SECRET_KEY}
# UI opcional
# - NEWS_PER_PAGE=20
- WEB_TRANSLATED_DEFAULT=1
- DEFAULT_LANG=es
- TRANSLATION_PREFERRED_LANGS=es
depends_on:
db:
condition: service_healthy
restart: always
scheduler:
build:
context: .
args:
TORCH_CUDA: cu121
container_name: rss_scheduler
command: python scheduler.py
environment:
- DB_HOST=db
- DB_PORT=5432
- DB_NAME=${DB_NAME}
- DB_USER=${DB_USER}
- DB_PASS=${DB_PASS}
- SECRET_KEY=${SECRET_KEY}
depends_on:
db:
condition: service_healthy
restart: always
translator:
build:
context: .
args:
TORCH_CUDA: cu121 # PyTorch con CUDA 12.1 en la imagen
container_name: rss_translator
command: python translation_worker.py
environment:
# --- DB ---
- DB_HOST=db
- DB_PORT=5432
- DB_NAME=${DB_NAME}
- DB_USER=${DB_USER}
- DB_PASS=${DB_PASS}
# --- Worker (ajustes estables VRAM) ---
- TARGET_LANGS=es
- TRANSLATOR_BATCH=8 # cuántas filas toma por ciclo
- ENQUEUE=200
- TRANSLATOR_SLEEP_IDLE=5
# Tokens (seguro para NLLB-1.3B; evita >1024)
- MAX_SRC_TOKENS=680 # margen bajo el límite real del modelo
- MAX_NEW_TOKENS=400 # permite salidas más largas en cuerpos
# Beams: mejor en títulos, eficiente en cuerpo
- NUM_BEAMS_TITLE=2
- NUM_BEAMS_BODY=1
# Modelo NLLB 1.3B
- UNIVERSAL_MODEL=facebook/nllb-200-1.3B
# Chunking por frases (mejor coherencia en artículos largos)
- CHUNK_BY_SENTENCES=True
- CHUNK_MAX_TOKENS=700 # <= MAX_SRC_TOKENS (con margen)
- CHUNK_OVERLAP_SENTS=1 # solape de 1 frase para evitar cortes bruscos
- CLEAN_ARTICLE=1 # limpia “The post…”, “Læs også…”, etc.
# Dispositivo (usa GPU si hay; cae a CPU si hay OOM)
- DEVICE=cuda
# Rendimiento / estabilidad
- PYTHONUNBUFFERED=1
- HF_HOME=/root/.cache/huggingface
- TOKENIZERS_PARALLELISM=false
# Evita el assert del allocator de PyTorch
- PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:64,garbage_collection_threshold:0.9
# GPU (requiere NVIDIA Container Toolkit en el host)
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
volumes:
- hf_cache:/root/.cache/huggingface
depends_on:
db:
condition: service_healthy
restart: always
# Habilita GPU (Compose v2 + nvidia-container-toolkit)
gpus: all
# Alternativa con 'deploy':
# deploy:
# resources:
# reservations:
# devices:
# - capabilities: [gpu]
volumes:
postgres_data:
hf_cache: