cambios en la web

This commit is contained in:
jlimolina 2025-10-12 17:51:14 +02:00
parent 046a5ff369
commit a9c1e16bdd
6 changed files with 283 additions and 131 deletions

View file

@ -1,7 +1,9 @@
# translation_worker.py
import os
import time
import logging
import contextlib
import re
from typing import List, Optional
import psycopg2
@ -62,6 +64,12 @@ def _env_str(name: str, *fallbacks: str, default: Optional[str] = None) -> Optio
return val
return default
def _env_bool(name: str, default: bool = False) -> bool:
val = os.environ.get(name)
if val is None:
return default
return str(val).strip().lower() in ("1", "true", "yes", "y", "on")
TARGET_LANGS = _env_list("TARGET_LANGS", "TRANSLATE_TO", default="es")
BATCH_SIZE = _env_int("BATCH", "TRANSLATOR_BATCH", "TRANSLATE_BATCH", default=8)
ENQUEUE_MAX = _env_int("ENQUEUE", "TRANSLATOR_ENQUEUE", "TRANSLATE_ENQUEUE", default=200)
@ -69,8 +77,8 @@ SLEEP_IDLE = _env_float("SLEEP_IDLE", "TRANSLATOR_SLEEP_IDLE", "TRANSLATE_SLEE
DEVICE_CFG = (_env_str("DEVICE", default="auto") or "auto").lower() # 'cpu' | 'cuda' | 'auto'
# Límites de tokens (ajusta si ves OOM)
MAX_SRC_TOKENS = _env_int("MAX_SRC_TOKENS", default=384)
MAX_NEW_TOKENS = _env_int("MAX_NEW_TOKENS", default=192)
MAX_SRC_TOKENS = _env_int("MAX_SRC_TOKENS", default=512)
MAX_NEW_TOKENS = _env_int("MAX_NEW_TOKENS", default=256)
# ---- Beams: por defecto 2 para títulos y 1 para cuerpo; respeta NUM_BEAMS si sólo se define ese ----
def _beams_from_env():
@ -91,6 +99,50 @@ NUM_BEAMS_TITLE, NUM_BEAMS_BODY = _beams_from_env()
# Modelo por defecto: NLLB 600M (cámbialo por facebook/nllb-200-1.3B si quieres el 1.3B)
UNIVERSAL_MODEL = _env_str("UNIVERSAL_MODEL", default="facebook/nllb-200-distilled-600M")
# ---------- Chunking por frases (para artículos largos) ----------
# Activo por defecto para evitar secuencias > límite del modelo
CHUNK_BY_SENTENCES = _env_bool("CHUNK_BY_SENTENCES", default=True)
CHUNK_MAX_TOKENS = _env_int("CHUNK_MAX_TOKENS", default=900) # <= modelo - margen
CHUNK_OVERLAP_SENTS = _env_int("CHUNK_OVERLAP_SENTS", default=0) # 0 o 1
# Abreviaturas comunes y marcador temporal
_ABBR = ("Sr", "Sra", "Dr", "Dra", "Ing", "Lic", "pág", "etc")
_ABBR_MARK = "§" # no debería aparecer en texto normal
def _protect_abbrev(text: str) -> str:
# Iniciales de una letra: "E.", "A."
t = re.sub(r"\b([A-ZÁÉÍÓÚÑÄÖÜ])\.", r"\1" + _ABBR_MARK, text)
# Abreviaturas de la lista (case-insensitive)
pat = r"\b(?:" + "|".join(map(re.escape, _ABBR)) + r")\."
t = re.sub(pat, lambda m: m.group(0)[:-1] + _ABBR_MARK, t, flags=re.IGNORECASE)
return t
def _restore_abbrev(text: str) -> str:
return text.replace(_ABBR_MARK, ".")
# Regex de corte SIN look-behind variable:
# - Corta tras [.!?…] si hay espacios y luego comienza otra frase (letra mayúscula, comillas, paréntesis, dígito)
# - O cuando hay doble salto de línea
_SENT_SPLIT_RE = re.compile(
r'(?<=[\.!\?…])\s+(?=["\(\[A-ZÁÉÍÓÚÑÄÖÜ0-9])|(?:\n{2,})'
)
def split_into_sentences(text: str) -> List[str]:
text = (text or "").strip()
if not text:
return []
protected = _protect_abbrev(text)
parts = [p.strip() for p in _SENT_SPLIT_RE.split(protected) if p and p.strip()]
parts = [_restore_abbrev(p) for p in parts]
# Une piezas muy cortas con la anterior para más coherencia
merged: List[str] = []
for p in parts:
if merged and len(p) < 40:
merged[-1] = merged[-1] + " " + p
else:
merged.append(p)
return merged
# ---------- Mapeo idiomas a códigos NLLB ----------
NLLB_LANG = {
# básicos
@ -171,8 +223,8 @@ def fetch_pending_batch(conn, lang_to: str, batch_size: int):
rows = cur.fetchall()
if rows:
ids = [r["tr_id"] for r in rows]
with conn.cursor() as cur:
cur.execute("UPDATE traducciones SET status='processing' WHERE id = ANY(%s)", (ids,))
with conn.cursor() as cur2:
cur2.execute("UPDATE traducciones SET status='processing' WHERE id = ANY(%s)", (ids,))
conn.commit()
return rows
@ -277,8 +329,14 @@ def get_universal_components():
_load_model_on(torch.device("cpu"))
return _TOKENIZER, _MODEL, _DEVICE
# ---------- Utilidades ----------
# ---------- Utilidades de tokenización / chunking ----------
def _safe_src_len(tokenizer) -> int:
model_max = getattr(tokenizer, "model_max_length", 1024) or 1024
# margen para tokens especiales/ruido
return min(MAX_SRC_TOKENS, int(model_max) - 16)
def _token_chunks(tokenizer, text: str, max_tokens: int) -> List[str]:
"""Troceo simple por tokens (fallback)"""
if not text:
return []
ids = tokenizer.encode(text, add_special_tokens=False)
@ -293,8 +351,8 @@ def _token_chunks(tokenizer, text: str, max_tokens: int) -> List[str]:
return chunks
def _norm(s: str) -> str:
import re
return re.sub(r"\W+", "", (s or "").lower()).strip()
import re as _re
return _re.sub(r"\W+", "", (s or "").lower()).strip()
def _forced_bos_id(tokenizer: AutoTokenizer, model: AutoModelForSeq2SeqLM, tgt_code: str) -> int:
"""
@ -344,8 +402,13 @@ def _forced_bos_id(tokenizer: AutoTokenizer, model: AutoModelForSeq2SeqLM, tgt_c
LOG.warning("No pude resolver lang code id para '%s'. Uso fallback (eos/bos).", tgt_code)
return getattr(tokenizer, "eos_token_id", None) or getattr(tokenizer, "bos_token_id", None) or 0
# ---------- Traducción base ----------
@torch.inference_mode()
def translate_text(src_lang: str, tgt_lang: str, text: str, num_beams: int = 1, _tries: int = 0) -> str:
"""
Traduce un texto (usando troceo por tokens si excede MAX_SRC_TOKENS).
Se usa para títulos y como núcleo para chunks de artículos.
"""
if not text or not text.strip():
return ""
@ -361,13 +424,14 @@ def translate_text(src_lang: str, tgt_lang: str, text: str, num_beams: int = 1,
forced_bos = _forced_bos_id(tok, mdl, tgt_code)
parts = _token_chunks(tok, text, MAX_SRC_TOKENS)
safe_len = _safe_src_len(tok)
parts = _token_chunks(tok, text, safe_len)
outs: List[str] = []
try:
autocast_ctx = torch.amp.autocast("cuda", dtype=torch.float16) if device.type == "cuda" else contextlib.nullcontext()
for p in parts:
enc = tok(p, return_tensors="pt", truncation=True, max_length=MAX_SRC_TOKENS)
enc = tok(p, return_tensors="pt", truncation=True, max_length=safe_len)
enc = {k: v.to(device) for k, v in enc.items()}
gen_kwargs = dict(
@ -377,7 +441,6 @@ def translate_text(src_lang: str, tgt_lang: str, text: str, num_beams: int = 1,
do_sample=False,
use_cache=False, # ↓ memoria
)
# Evita el warning cuando num_beams = 1
if int(num_beams) > 1:
gen_kwargs["early_stopping"] = True
@ -411,6 +474,102 @@ def translate_text(src_lang: str, tgt_lang: str, text: str, num_beams: int = 1,
return translate_text(src_lang, tgt_lang, text, num_beams=num_beams, _tries=_tries + 1)
raise
# ---------- Chunking por frases para artículos ----------
def _sent_token_len(tokenizer, sent: str) -> int:
return len(tokenizer(sent, add_special_tokens=False).input_ids)
def _pack_sentences_to_token_chunks(
tokenizer, sentences: List[str], max_tokens: int, overlap_sents: int = 0
) -> List[List[str]]:
chunks: List[List[str]] = []
cur: List[str] = []
cur_tokens = 0
for s in sentences:
slen = _sent_token_len(tokenizer, s)
if slen > max_tokens:
# Si una sola frase excede el límite, córtala por tokens como último recurso
ids = tokenizer(s, add_special_tokens=False).input_ids
step = max_tokens
for i in range(0, len(ids), step):
sub = tokenizer.decode(ids[i:i+step], skip_special_tokens=True)
if cur:
chunks.append(cur)
cur = []
cur_tokens = 0
chunks.append([sub])
continue
if cur_tokens + slen <= max_tokens:
cur.append(s); cur_tokens += slen
else:
if cur:
chunks.append(cur)
if overlap_sents > 0 and len(cur) > 0:
overlap = cur[-overlap_sents:]
cur = overlap + [s]
cur_tokens = sum(_sent_token_len(tokenizer, x) for x in cur)
else:
cur = [s]; cur_tokens = slen
if cur:
chunks.append(cur)
return chunks
def _smart_concatenate(parts: List[str], tail_window: int = 120) -> str:
"""Une partes evitando duplicados obvios en el borde (heurística ligera)."""
if not parts:
return ""
out = parts[0]
for nxt in parts[1:]:
tail = out[-tail_window:]
cut = 0
for k in range(min(len(tail), len(nxt)), 20, -1):
if nxt.startswith(tail[-k:]):
cut = k
break
out += ("" if cut == 0 else nxt[cut:]) if nxt else ""
return out
def translate_article_full(
src_lang: str,
tgt_lang: str,
text: str,
num_beams: int,
) -> str:
"""
Traduce un artículo completo:
- Divide por frases (sin look-behind variable)
- Empaqueta en chunks <= límite de tokens
- Traduce chunk a chunk (usa translate_text internamente)
- Une con heurística para evitar duplicados en bordes
"""
if not text or not text.strip():
return ""
if not CHUNK_BY_SENTENCES:
# Ruta rápida: una sola pasada con truncamiento interno
return translate_text(src_lang, tgt_lang, text, num_beams=num_beams)
tok, _, _ = get_universal_components()
safe_len = _safe_src_len(tok)
max_chunk_tokens = min(CHUNK_MAX_TOKENS, safe_len)
sents = split_into_sentences(text)
if not sents:
return ""
chunks_sents = _pack_sentences_to_token_chunks(
tok, sents, max_tokens=max_chunk_tokens, overlap_sents=CHUNK_OVERLAP_SENTS
)
translated_parts: List[str] = []
for group in chunks_sents:
chunk_text = " ".join(group)
translated = translate_text(src_lang, tgt_lang, chunk_text, num_beams=num_beams)
translated_parts.append(translated)
return _smart_concatenate([p for p in translated_parts if p])
# ---------- Procesamiento por lotes ----------
def process_batch(conn, rows):
for r in rows:
tr_id = r["tr_id"]
@ -426,9 +585,10 @@ def process_batch(conn, rows):
continue
try:
# Beams distintos: mejor calidad en títulos con coste de VRAM controlado
# Títulos: cortos, traducción directa (beams más altos si quieres)
title_tr = translate_text(lang_from, lang_to, title, num_beams=NUM_BEAMS_TITLE) if title else ""
body_tr = translate_text(lang_from, lang_to, body, num_beams=NUM_BEAMS_BODY) if body else ""
# Cuerpo/resumen: artículo completo con chunking por frases
body_tr = translate_article_full(lang_from, lang_to, body, num_beams=NUM_BEAMS_BODY) if body else ""
# Si la "traducción" es igual al original, déjala vacía
if _norm(title_tr) == _norm(title):
@ -443,8 +603,10 @@ def process_batch(conn, rows):
def main():
LOG.info(
"Arrancando worker de traducción (NLLB). TARGET_LANGS=%s, BATCH=%s, ENQUEUE=%s, DEVICE=%s, BEAMS(title/body)=%s/%s",
TARGET_LANGS, BATCH_SIZE, ENQUEUE_MAX, DEVICE_CFG, NUM_BEAMS_TITLE, NUM_BEAMS_BODY
"Arrancando worker de traducción (NLLB). TARGET_LANGS=%s, BATCH=%s, ENQUEUE=%s, DEVICE=%s, "
"BEAMS(title/body)=%s/%s, CHUNK_BY_SENTENCES=%s, CHUNK_MAX_TOKENS=%s, OVERLAP_SENTS=%s",
TARGET_LANGS, BATCH_SIZE, ENQUEUE_MAX, DEVICE_CFG, NUM_BEAMS_TITLE, NUM_BEAMS_BODY,
CHUNK_BY_SENTENCES, CHUNK_MAX_TOKENS, CHUNK_OVERLAP_SENTS
)
# Pre-carga el modelo una vez para reservar memoria de forma limpia
get_universal_components()