fix: revision completa de rutas Docker, logica SQL y configuracion

Backend Go:
- backend/cmd/server/main.go: ruta wiki_images configurable via WIKI_IMAGES_PATH
- backend/cmd/wiki_worker/main.go: default /opt/rss2 en lugar de /app, leer env
- workers/ctranslator_worker.py: default CT2_MODEL_PATH /opt/rss2 en lugar de /app
- workers/llm_categorizer_worker.py: default LLM_MODEL_PATH /opt/rss2
- workers/{langdetect,simple_translator,translation_scheduler}.py: DB_HOST default 'localhost' en lugar de 'db' (hostname Docker)

SQL / esquema:
- poc/seed.sql: corregir logica de auto-traducciones ES (id LIKE md5() era incorrecto)
- init-db/06-tags.sql: eliminar columna wiki_checked duplicada

Documentacion y configuracion:
- docs/DEPLOY_DEBIAN.md: usar ct2-transformers-converter (lo que usa el worker real)
- deploy/debian/env.example: agregar WIKI_IMAGES_PATH
- deploy/debian/systemd/rss2-cluster.service: agregar HF_HOME faltante
- deploy/debian/install.sh: comparacion numerica correcta de version Go
- scripts/generate_secure_credentials.sh: ruta CT2_MODEL_PATH corregida
- frontend/nginx.conf: advertencia de que es configuracion Docker legacy
- docs/QUICKSTART_LLM.md: nota de deprecacion Docker
- README.md: renombrar backend-go a backend en diagrama

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
SITO 2026-03-31 08:57:01 +02:00
parent 10f0555c46
commit d9ea78b8a7
17 changed files with 55 additions and 21 deletions

View file

@ -34,7 +34,7 @@ Internet (RSS/Atom)
│ │
│ qdrant-worker ──→ Qdrant
backend-go (API REST :8080)
backend (API REST :8080)
nginx (:8001)

View file

@ -109,7 +109,11 @@ func main() {
api := r.Group("/api")
{
// Serve static images downloaded by wiki_worker
api.StaticFS("/wiki-images", gin.Dir("/app/data/wiki_images", false))
wikiImagesDir := os.Getenv("WIKI_IMAGES_PATH")
if wikiImagesDir == "" {
wikiImagesDir = "/opt/rss2/data/wiki_images"
}
api.StaticFS("/wiki-images", gin.Dir(wikiImagesDir, false))
api.POST("/auth/login", handlers.Login)
api.POST("/auth/register", handlers.Register)

View file

@ -24,7 +24,7 @@ var (
pool *pgxpool.Pool
sleepInterval = 30
batchSize = 50
imagesDir = "/app/data/wiki_images"
imagesDir = "/opt/rss2/data/wiki_images"
)
type WikiSummary struct {
@ -210,6 +210,9 @@ func processTag(ctx context.Context, tag Tag) {
}
func main() {
if val := os.Getenv("WIKI_IMAGES_PATH"); val != "" {
imagesDir = val
}
if val := os.Getenv("WIKI_SLEEP"); val != "" {
if sleep, err := fmt.Sscanf(val, "%d", &sleepInterval); err == nil && sleep > 0 {
sleepInterval = sleep

View file

@ -81,6 +81,7 @@ MAX_FEEDS_PER_URL=5
# --- Wiki Worker ---
WIKI_SLEEP=10
WIKI_IMAGES_PATH=/opt/rss2/data/wiki_images
# --- Topics ---
TOPICS_SLEEP=10

View file

@ -33,7 +33,17 @@ apt-get install -y --no-install-recommends \
libpq-dev
# Go (rss-ingestor-go requiere Go 1.25)
if ! command -v go &>/dev/null || [[ "$(go version | awk '{print $3}' | tr -d 'go')" < "1.25" ]]; then
_need_go=false
if ! command -v go &>/dev/null; then
_need_go=true
else
_gover=$(go version | awk '{print $3}' | tr -d 'go')
IFS='.' read -ra _gv <<< "$_gover"
if [[ "${_gv[0]:-0}" -lt 1 ]] || [[ "${_gv[0]:-0}" -eq 1 && "${_gv[1]:-0}" -lt 25 ]]; then
_need_go=true
fi
fi
if [[ "$_need_go" == "true" ]]; then
info "Instalando Go 1.25..."
GO_VERSION="1.25.0"
ARCH=$(dpkg --print-architecture)

View file

@ -11,6 +11,7 @@ WorkingDirectory=/opt/rss2/src
EnvironmentFile=/opt/rss2/.env
Environment=EVENT_DIST_THRESHOLD=0.35
Environment=EMB_MODEL=sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
Environment=HF_HOME=/opt/rss2/hf_cache
ExecStart=/opt/rss2/venv/bin/python -m workers.cluster_worker
Restart=always
RestartSec=10

View file

@ -76,16 +76,23 @@ python3 -m venv /opt/rss2/venv
/opt/rss2/venv/bin/pip install ctranslate2 transformers sentencepiece
# Convertir modelo NLLB-200 a formato CTranslate2 (tarda 10-30 min)
/opt/rss2/venv/bin/python - <<'EOF'
from ctranslate2.converters import OpusMTConverter
converter = OpusMTConverter("facebook/nllb-200-distilled-600M")
converter.convert("/opt/rss2/models/nllb-ct2", quantization="int8", force=True)
print("Modelo convertido OK en /opt/rss2/models/nllb-ct2")
EOF
mkdir -p /opt/rss2/models/nllb-ct2
HF_HOME=/opt/rss2/hf_cache \
/opt/rss2/venv/bin/ct2-transformers-converter \
--model facebook/nllb-200-distilled-600M \
--output_dir /opt/rss2/models/nllb-ct2 \
--quantization int8 \
--force
# Verificar que se generó correctamente
ls /opt/rss2/models/nllb-ct2/model.bin && echo "Modelo OK"
```
> El modelo ocupa ~600 MB convertido. Si la descarga de HuggingFace falla, exporta
> `HF_ENDPOINT=https://huggingface.co` o usa un mirror.
> El modelo ocupa ~600 MB convertido. Si la descarga de HuggingFace falla:
> `export HF_ENDPOINT=https://huggingface.co` antes del comando de conversión.
> **Nota:** El worker convierte el modelo automáticamente si no lo encuentra,
> pero hacerlo a mano evita que el primer arranque tarde 30 minutos.
### 4. Ejecutar el instalador

View file

@ -1,4 +1,8 @@
# 🚀 Guía Rápida: Sistema LLM Categorizer
> **NOTA:** Esta guía está basada en la configuración Docker original. En el despliegue
> Debian nativo, el LLM categorizer se controla con `systemctl start rss2-categorizer`
> y el modelo se coloca en `/opt/rss2/models/llm` (var `LLM_MODEL_PATH`).
# Guía Rápida: Sistema LLM Categorizer
## ✅ Estado Actual

View file

@ -1,3 +1,8 @@
# =============================================================================
# NOTA: Este nginx.conf es la configuración del contenedor Docker del frontend.
# NO usar para despliegue nativo Debian usar deploy/debian/nginx.conf
# =============================================================================
events {
worker_connections 1024;
}

View file

@ -14,4 +14,3 @@ ALTER TABLE tags ADD COLUMN IF NOT EXISTS wiki_summary TEXT;
ALTER TABLE tags ADD COLUMN IF NOT EXISTS wiki_url TEXT;
ALTER TABLE tags ADD COLUMN IF NOT EXISTS image_path TEXT;
ALTER TABLE tags ADD COLUMN IF NOT EXISTS wiki_checked BOOLEAN DEFAULT FALSE;
ALTER TABLE tags ADD COLUMN IF NOT EXISTS wiki_checked BOOLEAN DEFAULT FALSE;

View file

@ -189,7 +189,7 @@ ON CONFLICT (noticia_id, lang_to) DO NOTHING;
-- Traducciones "self" para artículos en español (necesarias para que aparezcan en filtro translated_only)
INSERT INTO traducciones (noticia_id,lang_from,lang_to,titulo_trad,resumen_trad,status,vectorized)
SELECT id,'es','es',titulo,resumen,'done',false
FROM noticias WHERE lang='es' AND id LIKE md5('poc-es-%')
FROM noticias WHERE lang='es'
ON CONFLICT (noticia_id, lang_to) DO NOTHING;
-- ---------------------------------------------------------------------------

View file

@ -129,7 +129,7 @@ URL_DISCOVERY_BATCH_SIZE=10
MAX_FEEDS_PER_URL=5
# CTranslate2 / AI Model Paths
CT2_MODEL_PATH=/app/models/nllb-ct2
CT2_MODEL_PATH=/opt/rss2/models/nllb-ct2
CT2_DEVICE=cuda
CT2_COMPUTE_TYPE=int8_float16
UNIVERSAL_MODEL=facebook/nllb-200-distilled-600M

View file

@ -62,7 +62,7 @@ BATCH_SIZE = _env_int("TRANSLATOR_BATCH", 8)
MAX_SRC_TOKENS = _env_int("MAX_SRC_TOKENS", 512)
MAX_NEW_TOKENS = _env_int("MAX_NEW_TOKENS", 512)
CT2_MODEL_PATH = _env_str("CT2_MODEL_PATH", "/app/models/nllb-ct2")
CT2_MODEL_PATH = _env_str("CT2_MODEL_PATH", "/opt/rss2/models/nllb-ct2")
CT2_DEVICE = _env_str("CT2_DEVICE", "cpu")
CT2_COMPUTE_TYPE = _env_str("CT2_COMPUTE_TYPE", "int8")
UNIVERSAL_MODEL = _env_str("UNIVERSAL_MODEL", "facebook/nllb-200-distilled-600M")

View file

@ -22,7 +22,7 @@ logging.basicConfig(
LOG = logging.getLogger(__name__)
DB_CONFIG = {
'host': os.getenv('DB_HOST', 'db'),
'host': os.getenv('DB_HOST', 'localhost'),
'port': int(os.getenv('DB_PORT', 5432)),
'database': os.getenv('DB_NAME', 'rss'),
'user': os.getenv('DB_USER', 'rss'),

View file

@ -41,7 +41,7 @@ DB_CONFIG = {
# Configuración del worker
BATCH_SIZE = int(os.environ.get("LLM_BATCH_SIZE", 10)) # 10 noticias por lote
SLEEP_IDLE = int(os.environ.get("LLM_SLEEP_IDLE", 30)) # segundos
MODEL_PATH = os.environ.get("LLM_MODEL_PATH", "/app/models/llm")
MODEL_PATH = os.environ.get("LLM_MODEL_PATH", "/opt/rss2/models/llm")
GPU_SPLIT = os.environ.get("LLM_GPU_SPLIT", "auto")
MAX_SEQ_LEN = int(os.environ.get("LLM_MAX_SEQ_LEN", 4096))
CACHE_MODE = os.environ.get("LLM_CACHE_MODE", "FP16")

View file

@ -22,7 +22,7 @@ logging.basicConfig(
logger = logging.getLogger(__name__)
DB_CONFIG = {
'host': os.getenv('DB_HOST', 'db'),
'host': os.getenv('DB_HOST', 'localhost'),
'port': int(os.getenv('DB_PORT', 5432)),
'database': os.getenv('DB_NAME', 'rss'),
'user': os.getenv('DB_USER', 'rss'),

View file

@ -22,7 +22,7 @@ logging.basicConfig(
logger = logging.getLogger(__name__)
DB_CONFIG = {
'host': os.getenv('DB_HOST', 'db'),
'host': os.getenv('DB_HOST', 'localhost'),
'port': int(os.getenv('DB_PORT', 5432)),
'database': os.getenv('DB_NAME', 'rss'),
'user': os.getenv('DB_USER', 'rss'),