eptm_dashboard/scripts/cron_tick.py
Julien Balet 6d1b7c8044 retenue: avis PDF + notices Escada + mapping profession
- nouvelle page /retenue : sélection apprenti, date retenue, date du
  problème, motif (3 cases mutex), branche (autocomplete + saisie libre
  depuis NotesExamen), remarque. Génération PDF basée sur le template
  AcroForm officiel, séparation des 3 widgets Date partagés en 3 champs
  distincts pour ne remplir que celui de la case cochée. Téléchargement
  ou envoi par email (3 destinataires).
- profession : nouveau champ ApprentiFiche.profession, dérivé du préfixe
  de classe via mapping configurable dans Paramètres
  ("AUTOMAT" → "Automaticien CFC" par défaut). Section dédiée avec
  classes orphelines détectées automatiquement.
- notices Escada : nouvelle table Notice (apprenti, titre, remarque,
  date, status). Checkbox "Ajouter automatiquement une notice sur
  Escada" sur /retenue qui crée une entrée pending. Bloc dédié sur
  /escada listant les pending, bouton "Pousser les notices" qui lance
  scripts/push_notices.py (Playwright : navigation Classes → Élèves →
  Notices → Ajouter, fill date / titre / remarque, vérification post-save,
  suppression DB si OK, marquage failed sinon). Nouveau task_kind "push_notices"
  dans le cron pour exécution planifiée.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-11 11:24:15 +02:00

467 lines
17 KiB
Python
Executable file

#!/usr/bin/env python3
"""Cron tick — appelé toutes les minutes par cron OS via docker exec.
Logique :
1. Lire tous les CronJob.enabled = True
2. Pour chaque job, calculer s'il est dû maintenant (basé sur schedule + last_run_at)
3. Si dû :
- Si déjà running (PID alive) → kill -9 (politique "kill")
- Lancer subprocess (push, sync, ou push_then_sync)
- Stream stdout+stderr dans un fichier log dédié
- Mettre à jour last_status / last_message / last_run_at / last_pid
4. Envoyer notification Telegram selon notify_on
Le timezone effectif est celui du container (Europe/Zurich attendu).
Le script est idempotent et safe : si déjà passé pour un job aujourd'hui,
ne le relance pas. Si trop tard (>5 min après le slot), saute (un cron raté
n'est pas rattrapé).
Usage :
python3 /app/scripts/cron_tick.py
python3 /app/scripts/cron_tick.py --dry-run # affiche ce qui serait lancé
python3 /app/scripts/cron_tick.py --job <id> # force un job précis
"""
from __future__ import annotations
import argparse
import json
import os
import signal
import subprocess
import sys
import time
from datetime import datetime, timedelta
from pathlib import Path
_ROOT = Path(__file__).resolve().parent.parent
sys.path.insert(0, str(_ROOT))
from src.db import CronJob, get_session, init_db, get_engine # noqa: E402
from src.notifier import notify_job_result # noqa: E402
# Logs cron : par défaut /logs/cron (bind mount persistent), override via env var.
LOG_DIR = Path(os.getenv("CRON_LOG_DIR", "/logs/cron"))
try:
LOG_DIR.mkdir(parents=True, exist_ok=True)
except Exception:
# Fallback si /logs n'existe pas (ex: exécution hors container)
LOG_DIR = _ROOT / "logs" / "cron"
LOG_DIR.mkdir(parents=True, exist_ok=True)
SCRIPT_SYNC = _ROOT / "scripts" / "sync_esacada.py"
SCRIPT_PUSH = _ROOT / "scripts" / "push_to_escada.py"
SCRIPT_PUSH_NOTICES = _ROOT / "scripts" / "push_notices.py"
DATA_DIR = _ROOT / "data"
# Marqueur écrit par run_imports.py à la fin des imports en DB
RUN_IMPORTS_RESULT = DATA_DIR / "sync_last_result.json"
# Timeout d'attente de run_imports après que sync_esacada.py exit
RUN_IMPORTS_TIMEOUT_SEC = 15 * 60 # 15 min, large mais raisonnable
# Slot de tolérance : si le cron OS rate une minute (charge, restart),
# on accepte d'exécuter dans la fenêtre [HH:MM, HH:MM+5min].
_SLOT_TOLERANCE_MIN = 5
_DAY_NAMES = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"]
# ── Schedule logic ────────────────────────────────────────────────────────────
def _is_due(job: CronJob, now: datetime) -> bool:
"""Détermine si le job doit être lancé maintenant."""
if not job.enabled:
return False
last = job.last_run_at
if job.schedule_kind == "interval":
# schedule_value = nb minutes
try:
minutes = int(job.schedule_value)
except (TypeError, ValueError):
return False
if minutes < 1:
return False
if last is None:
return True
return (now - last).total_seconds() >= minutes * 60
if job.schedule_kind == "daily":
# schedule_value = "HH:MM"
return _due_time_of_day(job.schedule_value, last, now)
if job.schedule_kind == "weekly":
# schedule_value = "MON,WED,FRI:HH:MM"
try:
days_part, time_part = job.schedule_value.split(":", 1)
except ValueError:
return False
days = {d.strip().upper() for d in days_part.split(",") if d.strip()}
today_name = _DAY_NAMES[now.weekday()]
if today_name not in days:
return False
return _due_time_of_day(time_part, last, now)
return False
def _due_time_of_day(hhmm: str, last: datetime | None, now: datetime) -> bool:
"""True si l'heure actuelle est dans la fenêtre [HH:MM, HH:MM+tolerance]
et que le job n'a pas déjà tourné aujourd'hui."""
try:
hh, mm = hhmm.split(":")
target = now.replace(hour=int(hh), minute=int(mm), second=0, microsecond=0)
except (ValueError, AttributeError):
return False
delta = (now - target).total_seconds()
if delta < 0 or delta > _SLOT_TOLERANCE_MIN * 60:
return False
if last is not None and last.date() == now.date() and last >= target:
return False
return True
# ── Process management ───────────────────────────────────────────────────────
def _pid_alive(pid: int | None) -> bool:
if not pid:
return False
try:
os.kill(pid, 0)
return True
except (ProcessLookupError, PermissionError):
return False
except Exception:
return False
def _kill_pid(pid: int) -> None:
try:
os.killpg(os.getpgid(pid), signal.SIGKILL)
except Exception:
try:
os.kill(pid, signal.SIGKILL)
except Exception:
pass
# ── Build command lines ──────────────────────────────────────────────────────
def _classes_args(job: CronJob) -> list[str]:
"""Retourne la liste des classes à passer aux scripts. Vide = toutes."""
raw = (job.classes_json or "").strip()
if not raw or raw == "ALL":
return []
try:
lst = json.loads(raw)
if isinstance(lst, list):
return [str(c) for c in lst]
except Exception:
pass
return []
def _build_sync_cmd(job: CronJob) -> list[str]:
classes = _classes_args(job)
cmd = [sys.executable, str(SCRIPT_SYNC), "--sync-all", *classes]
if not job.sync_abs: cmd.append("--skip-abs")
if not job.sync_bn: cmd.append("--skip-bn")
if not job.sync_notes: cmd.append("--skip-notes")
if not job.sync_fiches: cmd.append("--skip-fiches")
if job.force_abs: cmd.append("--force-abs")
return cmd
def _build_push_cmd(job: CronJob) -> list[str]:
return [sys.executable, str(SCRIPT_PUSH)]
def _wait_for_run_imports(log_fp, mtime_before: float) -> tuple[bool, str, dict]:
"""Après que sync_esacada.py a fini, run_imports.py tourne en sous-process
détaché. Attend que sync_last_result.json soit mis à jour, puis log les
résultats détaillés. Retourne (ok, summary_message, raw_result_dict)."""
log_fp.write("\n━━━ Attente run_imports (subprocess détaché) ━━━\n")
log_fp.flush()
deadline = time.time() + RUN_IMPORTS_TIMEOUT_SEC
poll_count = 0
while time.time() < deadline:
if RUN_IMPORTS_RESULT.exists() and RUN_IMPORTS_RESULT.stat().st_mtime > mtime_before:
break
poll_count += 1
# Log un point tous les 30 polls (~1 min) pour montrer qu'on attend
if poll_count % 30 == 0:
elapsed = int(time.time() - (deadline - RUN_IMPORTS_TIMEOUT_SEC))
log_fp.write(f"[poll] {elapsed}s écoulés, en attente…\n")
log_fp.flush()
time.sleep(2)
else:
log_fp.write("⚠ TIMEOUT — sync_last_result.json non mis à jour dans le délai\n")
log_fp.flush()
return False, "run_imports timeout (>15min sans résultat)", {}
# Lire le résultat
try:
result = json.loads(RUN_IMPORTS_RESULT.read_text(encoding="utf-8"))
except Exception as e:
log_fp.write(f"⚠ Impossible de lire sync_last_result.json : {e}\n")
return False, f"sync_last_result.json illisible : {e}", {}
res_abs = result.get("res_abs", []) or []
res_bn = result.get("res_bn", []) or []
res_notes = result.get("res_notes", []) or []
res_matu = result.get("res_matu", []) or []
errors = result.get("errors", []) or []
ts = result.get("timestamp", "?")
log_fp.write(f"run_imports terminé (timestamp {ts})\n")
log_fp.write(f" Absences PDF importés : {len(res_abs)}\n")
log_fp.write(f" Bulletins de notes : {len(res_bn)}\n")
log_fp.write(f" Notes d'examen : {len(res_notes)}\n")
log_fp.write(f" Notes Matu : {len(res_matu)}\n")
log_fp.write(f" Erreurs : {len(errors)}\n")
# Détailler chaque catégorie si non vide
if res_abs:
log_fp.write("\n Détail absences :\n")
for r in res_abs:
log_fp.write(f" - {r}\n")
if res_bn:
log_fp.write("\n Détail BN :\n")
for r in res_bn:
log_fp.write(f" - {r}\n")
if res_notes:
log_fp.write("\n Détail notes d'examen :\n")
for r in res_notes:
log_fp.write(f" - {r}\n")
if res_matu:
log_fp.write("\n Détail Matu :\n")
for r in res_matu:
log_fp.write(f" - {r}\n")
if errors:
log_fp.write("\n ❌ ERREURS :\n")
for err in errors:
log_fp.write(f" - {err}\n")
log_fp.flush()
if errors:
summary = (
f"Imports : abs={len(res_abs)}, bn={len(res_bn)}, "
f"notes={len(res_notes)}, matu={len(res_matu)}, "
f"{len(errors)} erreur(s)"
)
return False, summary, result
summary = (
f"Imports OK : abs={len(res_abs)}, bn={len(res_bn)}, "
f"notes={len(res_notes)}, matu={len(res_matu)}"
)
return True, summary, result
# ── Run a single step ────────────────────────────────────────────────────────
def _run_step(cmd: list[str], log_fp, title: str) -> tuple[int, int]:
"""Lance une commande, stream stdout+stderr dans log_fp.
Retourne (returncode, pid)."""
log_fp.write(f"\n━━━ {title} ━━━\n")
log_fp.write(f"$ {' '.join(cmd)}\n")
log_fp.flush()
proc = subprocess.Popen(
cmd,
stdout=log_fp,
stderr=subprocess.STDOUT,
env={**os.environ, "PYTHONUNBUFFERED": "1"},
start_new_session=True,
)
pid = proc.pid
rc = proc.wait()
log_fp.write(f"\n[exit code = {rc}]\n")
log_fp.flush()
return rc, pid
# ── Run a job (full lifecycle) ───────────────────────────────────────────────
def run_job(job: CronJob, sess) -> None:
"""Exécute un job. Met à jour son état en DB et envoie notification."""
started = datetime.now()
ts = started.strftime("%Y%m%d-%H%M%S")
log_path = LOG_DIR / f"job_{job.id}_{ts}.log"
# Politique "kill" : si déjà running (selon DB) et PID alive, on kill avant.
if job.last_status == "running" and _pid_alive(job.last_pid):
_kill_pid(job.last_pid or 0)
# Trace dans le log
with log_path.open("w", encoding="utf-8") as fp:
fp.write(f"[{started}] PID précédent {job.last_pid} kill -9 (politique kill)\n")
# Marquer running
job.last_run_at = started
job.last_status = "running"
job.last_message = ""
job.last_log_path = str(log_path)
job.last_pid = None
sess.commit()
overall_rc = 0
final_msg = ""
last_pid: int | None = None
imports_result: dict = {}
try:
with log_path.open("a", encoding="utf-8") as fp:
fp.write(f"\n=== Job #{job.id} '{job.name}' — démarré {started.isoformat(timespec='seconds')} ===\n")
fp.write(f"task_kind={job.task_kind} classes={job.classes_json}\n")
steps: list[tuple[str, list[str]]] = []
if job.task_kind == "push":
steps = [("Push Escada", _build_push_cmd(job))]
elif job.task_kind == "sync":
steps = [("Sync Escada", _build_sync_cmd(job))]
elif job.task_kind == "push_then_sync":
steps = [
("Push Escada", _build_push_cmd(job)),
("Sync Escada", _build_sync_cmd(job)),
]
elif job.task_kind == "push_notices":
steps = [("Push notices", [sys.executable, str(SCRIPT_PUSH_NOTICES)])]
else:
fp.write(f"[error] task_kind inconnu : {job.task_kind}\n")
overall_rc = 99
final_msg = f"task_kind invalide : {job.task_kind}"
for title, cmd in steps:
# Capturer mtime du marqueur run_imports AVANT le sync
# (utilisé après pour détecter la fin de run_imports.py)
is_sync = title.startswith("Sync")
mtime_before = (
RUN_IMPORTS_RESULT.stat().st_mtime
if is_sync and RUN_IMPORTS_RESULT.exists() else 0.0
)
rc, pid = _run_step(cmd, fp, title)
last_pid = pid
if rc != 0:
overall_rc = rc
final_msg = f"{title} a échoué (code {rc})"
break
# Si c'était une étape sync, attendre que run_imports termine
if is_sync:
imports_ok, imports_msg, imports_result = _wait_for_run_imports(fp, mtime_before)
if not imports_ok:
overall_rc = 2
final_msg = imports_msg
break
# On garde le message du sub pour la notif finale
final_msg = imports_msg
if overall_rc == 0 and not final_msg:
final_msg = f"{len(steps)} étape(s) OK"
except Exception as e:
overall_rc = 1
final_msg = f"Exception : {e}"
try:
with log_path.open("a", encoding="utf-8") as fp:
import traceback
fp.write("\n[fatal exception]\n")
fp.write(traceback.format_exc())
except Exception:
pass
# État final en DB
finished = datetime.now()
duration = (finished - started).total_seconds()
job.last_status = "ok" if overall_rc == 0 else "fail"
job.last_message = final_msg
job.last_pid = last_pid
sess.commit()
# Notification
try:
notify_job_result(
job_name=job.name,
status=job.last_status,
message=final_msg,
log_path=log_path,
chat_id=job.notify_chat_id or None,
notify_on=job.notify_on,
notify_level=getattr(job, "notify_level", "normal"),
duration_s=duration,
details=imports_result,
job_options={
"task_kind": job.task_kind,
"sync_abs": job.sync_abs,
"sync_bn": job.sync_bn,
"sync_notes": job.sync_notes,
"sync_fiches": job.sync_fiches,
},
)
except Exception as e:
# Ne fait pas échouer le job pour une notif KO
try:
with log_path.open("a", encoding="utf-8") as fp:
fp.write(f"\n[notify] échec envoi : {e}\n")
except Exception:
pass
# ── Main loop ────────────────────────────────────────────────────────────────
def main() -> int:
parser = argparse.ArgumentParser(description="Cron tick — exécute les CronJob dûs.")
parser.add_argument("--dry-run", action="store_true",
help="Liste les jobs dûs sans les exécuter.")
parser.add_argument("--job", type=int, default=None,
help="Force l'exécution d'un job par son id (ignore schedule).")
args = parser.parse_args()
# Garantir que la table existe
try:
init_db()
except Exception as e:
print(f"[cron_tick] init_db error : {e}", file=sys.stderr)
return 1
sess = get_session()
now = datetime.now()
try:
if args.job is not None:
job = sess.get(CronJob, args.job)
if job is None:
print(f"[cron_tick] job id={args.job} introuvable")
return 1
print(f"[cron_tick] forçage job #{job.id} '{job.name}'")
if args.dry_run:
return 0
run_job(job, sess)
return 0
from sqlalchemy import select as _sel
jobs = sess.execute(_sel(CronJob).where(CronJob.enabled == True)).scalars().all() # noqa: E712
due_jobs = [j for j in jobs if _is_due(j, now)]
if not due_jobs:
return 0 # rien à faire
print(f"[cron_tick] {now.isoformat(timespec='seconds')}{len(due_jobs)} job(s) dûs")
for job in due_jobs:
print(f" - #{job.id} '{job.name}' kind={job.task_kind} schedule={job.schedule_kind}:{job.schedule_value}")
if args.dry_run:
continue
run_job(job, sess)
return 0
finally:
sess.close()
if __name__ == "__main__":
sys.exit(main())