diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..e69de29 diff --git a/README.md b/README.md new file mode 100644 index 0000000..e69de29 diff --git a/build-and-push.sh b/build-and-push.sh new file mode 100644 index 0000000..4de6431 --- /dev/null +++ b/build-and-push.sh @@ -0,0 +1,281 @@ +#!/usr/bin/env bash +set -euo pipefail + +# ============================================================================ +# build-and-push.sh +# Location: repo root (e.g. /docker/develop/backup-monitoring) +# +# Purpose: +# - Automatic version bump: +# 1 = patch, 2 = minor, 3 = major, t = test +# - Test builds: only update :dev (no commit/tag) +# - Release builds: update version.txt, commit, tag, push (to the current branch) +# - Build & push Docker images for each service under ./compose/* +# - Preflight checks: Docker daemon up, logged in to registry, valid names/tags +# - Summary: show all images + tags built and pushed +# - Branch visibility: +# - Shows currently checked out branch (authoritative) +# - Reads .last-branch for info (if present) when BRANCH is not set +# - Writes the current branch back to .last-branch at the end +# +# Usage: +# BRANCH= ./build-and-push.sh [bump] # BRANCH is optional; informative only +# ./build-and-push.sh [bump] +# If [bump] is omitted, you will be prompted (default = t). +# +# Tagging rules: +# - Release build (1/2/3): push :, :dev, :latest +# - Test build (t): push only :dev (no :latest, no version tag) +# ============================================================================ + +DOCKER_REGISTRY="gitea.oskamp.info" +DOCKER_NAMESPACE="ivooskamp" + +VERSION_FILE="version.txt" +START_VERSION="v0.1.0" +COMPOSE_DIR="containers" +LAST_BRANCH_FILE=".last-branch" # stored in repo root + +# --- Input: prompt if missing ------------------------------------------------ +BUMP="${1:-}" +if [[ -z "${BUMP}" ]]; then + echo "Select bump type: [1] patch, [2] minor, [3] major, [t] test (default: t)" + read -r BUMP + BUMP="${BUMP:-t}" +fi + +if [[ "$BUMP" != "1" && "$BUMP" != "2" && "$BUMP" != "3" && "$BUMP" != "t" ]]; then + echo "[ERROR] Unknown bump type '$BUMP' (use 1, 2, 3, or t)." + exit 1 +fi + +# --- Helpers ----------------------------------------------------------------- +read_version() { + if [[ -f "$VERSION_FILE" ]]; then + tr -d ' \t\n\r' < "$VERSION_FILE" + else + echo "$START_VERSION" + fi +} + +write_version() { + echo "$1" > "$VERSION_FILE" +} + +bump_version() { + local cur="$1" + local kind="$2" + local core="${cur#v}" + IFS='.' read -r MA MI PA <<< "$core" + case "$kind" in + 1) PA=$((PA + 1));; + 2) MI=$((MI + 1)); PA=0;; + 3) MA=$((MA + 1)); MI=0; PA=0;; + *) echo "[ERROR] Unknown bump kind"; exit 1;; + esac + echo "v${MA}.${MI}.${PA}" +} + +check_docker_ready() { + if ! docker info >/dev/null 2>&1; then + echo "[ERROR] Docker daemon not reachable. Is Docker running and do you have permission to use it?" + exit 1 + fi +} + +ensure_registry_login() { + local cfg="${HOME}/.docker/config.json" + if [[ ! -f "$cfg" ]]; then + echo "[ERROR] Docker config not found at $cfg. Please login: docker login ${DOCKER_REGISTRY}" + exit 1 + fi + if ! grep -q "\"${DOCKER_REGISTRY}\"" "$cfg"; then + echo "[ERROR] No registry auth found for ${DOCKER_REGISTRY}. Please run: docker login ${DOCKER_REGISTRY}" + exit 1 + fi +} + +validate_repo_component() { + local comp="$1" + if [[ ! "$comp" =~ ^[a-z0-9]+([._-][a-z0-9]+)*$ ]]; then + echo "[ERROR] Invalid repository component '$comp'." + echo " Must match: ^[a-z0-9]+([._-][a-z0-9]+)*$ (lowercase, digits, ., _, - as separators)." + return 1 + fi +} + +validate_tag() { + local tag="$1" + local len="${#tag}" + if (( len < 1 || len > 128 )); then + echo "[ERROR] Invalid tag length ($len). Must be between 1 and 128 characters." + return 1 + fi + if [[ ! "$tag" =~ ^[A-Za-z0-9_][A-Za-z0-9_.-]*$ ]]; then + echo "[ERROR] Invalid tag '$tag'. Allowed: [A-Za-z0-9_.-], must start with alphanumeric or underscore." + return 1 + fi +} + +# --- Preflight --------------------------------------------------------------- +if [[ ! -d ".git" ]]; then + echo "[ERROR] Not a git repository (.git missing)." + exit 1 +fi + +if [[ ! -d "$COMPOSE_DIR" ]]; then + echo "[ERROR] '$COMPOSE_DIR' directory missing. Expected ./compose// with a Dockerfile." + exit 1 +fi + +check_docker_ready +ensure_registry_login +validate_repo_component "$DOCKER_NAMESPACE" + +# Detect currently checked out branch (authoritative for this script) +DETECTED_BRANCH="$(git branch --show-current 2>/dev/null || true)" +if [[ -z "$DETECTED_BRANCH" ]]; then + DETECTED_BRANCH="$(git symbolic-ref --quiet --short HEAD 2>/dev/null || true)" +fi +if [[ -z "$DETECTED_BRANCH" ]]; then + # Try to derive from upstream + UPSTREAM_REF_DERIVED="$(git rev-parse --abbrev-ref --symbolic-full-name @{u} 2>/dev/null || true)" + if [[ -n "$UPSTREAM_REF_DERIVED" ]]; then + DETECTED_BRANCH="${UPSTREAM_REF_DERIVED#origin/}" + fi +fi +if [[ -z "$DETECTED_BRANCH" ]]; then + DETECTED_BRANCH="main" +fi + +# Optional signals: BRANCH env and .last-branch (informational only) +ENV_BRANCH="${BRANCH:-}" +LAST_BRANCH_FILE_PATH="$(pwd)/$LAST_BRANCH_FILE" +LAST_BRANCH_VALUE="" +if [[ -z "$ENV_BRANCH" && -f "$LAST_BRANCH_FILE_PATH" ]]; then + LAST_BRANCH_VALUE="$(tr -d ' \t\n\r' < "$LAST_BRANCH_FILE_PATH")" +fi + +UPSTREAM_REF="$(git rev-parse --abbrev-ref --symbolic-full-name @{u} 2>/dev/null || echo "origin/$DETECTED_BRANCH")" +HEAD_SHA="$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")" + +echo "[INFO] Repo: $(pwd)" +echo "[INFO] Current branch: $DETECTED_BRANCH" +echo "[INFO] Upstream: $UPSTREAM_REF" +echo "[INFO] HEAD (sha): $HEAD_SHA" + +if [[ -n "$ENV_BRANCH" && "$ENV_BRANCH" != "$DETECTED_BRANCH" ]]; then + echo "[WARNING] BRANCH='$ENV_BRANCH' differs from checked out branch '$DETECTED_BRANCH'." + echo "[WARNING] This script does not switch branches; continuing on '$DETECTED_BRANCH'." +fi + +if [[ -n "$LAST_BRANCH_VALUE" && "$LAST_BRANCH_VALUE" != "$DETECTED_BRANCH" && -z "$ENV_BRANCH" ]]; then + echo "[INFO] .last-branch suggests '$LAST_BRANCH_VALUE', but current checkout is '$DETECTED_BRANCH'." + echo "[INFO] If you intended to build '$LAST_BRANCH_VALUE', switch branches first (use update-and-build.sh)." +fi + +# --- Versioning -------------------------------------------------------------- +CURRENT_VERSION="$(read_version)" +NEW_VERSION="$CURRENT_VERSION" +DO_TAG_AND_BUMP=true + +if [[ "$BUMP" == "t" ]]; then + echo "[INFO] Test build: keeping version $CURRENT_VERSION; will only update :dev." + DO_TAG_AND_BUMP=false +else + NEW_VERSION="$(bump_version "$CURRENT_VERSION" "$BUMP")" + echo "[INFO] New version: $NEW_VERSION" +fi + +if $DO_TAG_AND_BUMP; then + validate_tag "$NEW_VERSION" + validate_tag "latest" +fi +validate_tag "dev" + +# --- Version update + VCS ops (release builds only) -------------------------- +if $DO_TAG_AND_BUMP; then + echo "[INFO] Writing $NEW_VERSION to $VERSION_FILE" + write_version "$NEW_VERSION" + + echo "[INFO] Git add + commit (branch: $DETECTED_BRANCH)" + git add "$VERSION_FILE" + git commit -m "Release $NEW_VERSION on branch $DETECTED_BRANCH (bump type $BUMP)" + + echo "[INFO] Git tag $NEW_VERSION" + git tag -a "$NEW_VERSION" -m "Release $NEW_VERSION" + + echo "[INFO] Git push + tags" + git push origin "$DETECTED_BRANCH" + git push --tags +else + echo "[INFO] Skipping commit/tagging (test build)." +fi + +# --- Build & push per service ------------------------------------------------ +shopt -s nullglob +services=( "$COMPOSE_DIR"/* ) +if [[ ${#services[@]} -eq 0 ]]; then + echo "[ERROR] No services found under $COMPOSE_DIR" + exit 1 +fi + +BUILT_IMAGES=() + +for svc_path in "${services[@]}"; do + [[ -d "$svc_path" ]] || continue + svc="$(basename "$svc_path")" + dockerfile="$svc_path/Dockerfile" + + validate_repo_component "$svc" + + if [[ ! -f "$dockerfile" ]]; then + echo "[WARNING] Skipping '${svc}': Dockerfile not found in ${svc_path}" + continue + fi + + IMAGE_BASE="${DOCKER_REGISTRY}/${DOCKER_NAMESPACE}/${svc}" + + if $DO_TAG_AND_BUMP; then + echo "============================================================" + echo "[INFO] Building ${svc} -> tags: ${NEW_VERSION}, dev, latest" + echo "============================================================" + docker build \ + -t "${IMAGE_BASE}:${NEW_VERSION}" \ + -t "${IMAGE_BASE}:dev" \ + -t "${IMAGE_BASE}:latest" \ + "$svc_path" + + docker push "${IMAGE_BASE}:${NEW_VERSION}" + docker push "${IMAGE_BASE}:dev" + docker push "${IMAGE_BASE}:latest" + + BUILT_IMAGES+=("${IMAGE_BASE}:${NEW_VERSION}" "${IMAGE_BASE}:dev" "${IMAGE_BASE}:latest") + else + echo "============================================================" + echo "[INFO] Test build ${svc} -> tag: dev" + echo "============================================================" + docker build -t "${IMAGE_BASE}:dev" "$svc_path" + docker push "${IMAGE_BASE}:dev" + BUILT_IMAGES+=("${IMAGE_BASE}:dev") + fi +done + +# --- Persist current branch to .last-branch ---------------------------------- +# (This helps script 1 to preselect next time, and is informative if you run script 2 standalone) +echo "$DETECTED_BRANCH" > "$LAST_BRANCH_FILE_PATH" + +# --- Summary ----------------------------------------------------------------- +echo "" +echo "============================================================" +echo "[SUMMARY] Build & push complete (branch: $DETECTED_BRANCH)" +if $DO_TAG_AND_BUMP; then + echo "[INFO] Release version: $NEW_VERSION" +else + echo "[INFO] Test build (no version bump)" +fi +echo "[INFO] Images pushed:" +for img in "${BUILT_IMAGES[@]}"; do + echo " - $img" +done +echo "============================================================" diff --git a/containers/backupchecks/Dockerfile b/containers/backupchecks/Dockerfile new file mode 100644 index 0000000..cee3009 --- /dev/null +++ b/containers/backupchecks/Dockerfile @@ -0,0 +1,20 @@ +FROM python:3.12-slim + +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 + +WORKDIR /app + +COPY requirements.txt ./requirements.txt + +RUN pip install --no-cache-dir -r requirements.txt + +COPY src ./src + +ENV PYTHONPATH=/app/src +ENV APP_PORT=8080 + +EXPOSE 8080 + +# Use the application factory from backend.app +CMD ["gunicorn", "-b", "0.0.0.0:8080", "backend.app:create_app()"] diff --git a/containers/backupchecks/requirements.txt b/containers/backupchecks/requirements.txt new file mode 100644 index 0000000..102ffd8 --- /dev/null +++ b/containers/backupchecks/requirements.txt @@ -0,0 +1,8 @@ +Flask==3.0.3 +Flask-SQLAlchemy==3.1.1 +Flask-Migrate==4.0.7 +Flask-Login==0.6.3 +psycopg2-binary==2.9.9 +python-dateutil==2.9.0.post0 +gunicorn==23.0.0 +requests==2.32.3 diff --git a/containers/backupchecks/src/backend/__init__.py b/containers/backupchecks/src/backend/__init__.py new file mode 100644 index 0000000..3c7ec4c --- /dev/null +++ b/containers/backupchecks/src/backend/__init__.py @@ -0,0 +1 @@ +# backend package init diff --git a/containers/backupchecks/src/backend/app/__init__.py b/containers/backupchecks/src/backend/app/__init__.py new file mode 100644 index 0000000..2b54a76 --- /dev/null +++ b/containers/backupchecks/src/backend/app/__init__.py @@ -0,0 +1,123 @@ +import os + +from flask import Flask, redirect, request, session, url_for +from flask_migrate import Migrate +from flask_login import current_user + +from .config import Config +from .database import db +from .models import User # noqa: F401 +from .auth import login_manager +from .auth.routes import auth_bp +from .main.routes import main_bp +from .migrations import run_migrations +from .auto_importer_service import start_auto_importer + + +def _get_today_ui_date() -> str: + """Return today's date (YYYY-MM-DD) in the configured UI timezone. + + Falls back to Europe/Amsterdam if no setting is available. + """ + from datetime import datetime + + try: + from zoneinfo import ZoneInfo + except Exception: + ZoneInfo = None # type: ignore + + tz_name = "Europe/Amsterdam" + try: + from .models import SystemSettings + + settings = SystemSettings.query.first() + if settings and getattr(settings, "ui_timezone", None): + tz_name = settings.ui_timezone + except Exception: + tz_name = "Europe/Amsterdam" + + if ZoneInfo: + try: + tz = ZoneInfo(tz_name) + return datetime.now(tz).date().isoformat() + except Exception: + pass + + return datetime.utcnow().date().isoformat() + + +def create_app(): + base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) + templates_dir = os.path.join(base_dir, "templates") + static_dir = os.path.join(base_dir, "static") + + app = Flask(__name__, template_folder=templates_dir, static_folder=static_dir) + + config = Config() + app.config.from_object(config) + + db.init_app(app) + Migrate(app, db) + login_manager.init_app(app) + + app.register_blueprint(auth_bp) + app.register_blueprint(main_bp) + + @app.before_request + def _redirect_to_dashboard_on_first_open_each_day(): + """Redirect the first authenticated page view of the day to the dashboard. + + This ensures that when a user opens the site for the first time each day, + they land on the dashboard regardless of the bookmarked/deeplinked URL. + """ + + # Only for normal page loads. + if request.method != "GET": + return None + + # Do not interfere with static assets. + if request.path.startswith("/static"): + return None + + # Do not interfere with API calls. + if request.path.startswith("/api/"): + return None + + # Only for authenticated users. + try: + if not current_user or not current_user.is_authenticated: + return None + except Exception: + return None + + # Exempt auth blueprint routes and the dashboard itself. + endpoint = request.endpoint or "" + if endpoint.startswith("auth."): + return None + if endpoint == "main.dashboard": + # Mark dashboard as seen for today. + session["daily_dashboard_seen"] = _get_today_ui_date() + return None + + today = _get_today_ui_date() + seen = (session.get("daily_dashboard_seen") or "").strip() + if seen != today: + session["daily_dashboard_seen"] = today + return redirect(url_for("main.dashboard")) + + return None + + @app.get("/health") + def health(): + return {"status": "ok"} + + with app.app_context(): + print("[app] Initializing database and running migrations...") + db.create_all() + run_migrations() + print("[app] Database initialization and migrations finished.") + + # Start automatic mail importer background thread + start_auto_importer(app) + + return app diff --git a/containers/backupchecks/src/backend/app/admin_logging.py b/containers/backupchecks/src/backend/app/admin_logging.py new file mode 100644 index 0000000..97e571e --- /dev/null +++ b/containers/backupchecks/src/backend/app/admin_logging.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from datetime import datetime, timedelta +from typing import Optional + +from flask_login import current_user + +from .database import db +from .models import AdminLog + + +def log_admin_event( + event_type: str, + message: str, + details: Optional[str] = None, + *, + username: Optional[str] = None, + commit: bool = True, +) -> None: + """Write an entry to the in-app AdminLog table. + + - This is the source for the /logging page in the website (not container logs). + - Retention: keep only the last 7 days. + - If commit=False, the caller is responsible for committing/rolling back. + """ + # Resolve username (prefer explicit) + if username is None: + try: + username = current_user.username if getattr(current_user, "is_authenticated", False) else None + except Exception: + username = None + + entry = AdminLog( + user=username, + event_type=(event_type or "event")[:64], + message=(message or "")[:2000], + details=details, + ) + db.session.add(entry) + + # Enforce retention: keep only the last 7 days + try: + cutoff = datetime.utcnow() - timedelta(days=7) + AdminLog.query.filter(AdminLog.created_at < cutoff).delete(synchronize_session=False) + except Exception: + # Never block the main action because of retention cleanup. + pass + + if not commit: + return + + try: + db.session.commit() + except Exception: + # If logging fails, do not raise and do not print to container logs. + db.session.rollback() diff --git a/containers/backupchecks/src/backend/app/auth/__init__.py b/containers/backupchecks/src/backend/app/auth/__init__.py new file mode 100644 index 0000000..b3a2866 --- /dev/null +++ b/containers/backupchecks/src/backend/app/auth/__init__.py @@ -0,0 +1,12 @@ +from flask_login import LoginManager + +from ..models import User + +login_manager = LoginManager() +login_manager.login_view = "auth.login" + +@login_manager.user_loader +def load_user(user_id: str): + if not user_id: + return None + return User.query.get(int(user_id)) diff --git a/containers/backupchecks/src/backend/app/auth/routes.py b/containers/backupchecks/src/backend/app/auth/routes.py new file mode 100644 index 0000000..f0d7149 --- /dev/null +++ b/containers/backupchecks/src/backend/app/auth/routes.py @@ -0,0 +1,156 @@ +import random +from functools import wraps + +from flask import ( + Blueprint, + render_template, + redirect, + url_for, + flash, + request, + session, +) +from flask_login import login_user, logout_user, login_required, current_user + +from ..database import db +from ..models import User + +auth_bp = Blueprint("auth", __name__, url_prefix="/auth") + + +def admin_exists() -> bool: + return db.session.query(User.id).filter_by(role="admin").first() is not None + + +def generate_captcha(): + a = random.randint(1, 9) + b = random.randint(1, 9) + question = f"{a} + {b}" + answer = str(a + b) + return question, answer + + +def captcha_required(func): + @wraps(func) + def wrapper(*args, **kwargs): + if request.method == "POST": + expected = session.get("captcha_answer") + provided = (request.form.get("captcha") or "").strip() + if not expected or provided != expected: + flash("Invalid captcha answer. Please try again.", "danger") + # regenerate captcha for re-render + question, answer = generate_captcha() + session["captcha_answer"] = answer + return render_template( + "auth/login.html", + captcha_question=question, + username=request.form.get("username", ""), + ) + return func(*args, **kwargs) + + return wrapper + + +@auth_bp.route("/login", methods=["GET", "POST"]) +@captcha_required +def login(): + if request.method == "GET": + if not admin_exists(): + return redirect(url_for("auth.initial_setup")) + + question, answer = generate_captcha() + session["captcha_answer"] = answer + return render_template("auth/login.html", captcha_question=question) + + # POST + username = (request.form.get("username") or "").strip() + password = request.form.get("password") or "" + + user = User.query.filter_by(username=username).first() + if not user or not user.check_password(password): + flash("Invalid username or password.", "danger") + question, answer = generate_captcha() + session["captcha_answer"] = answer + return render_template( + "auth/login.html", captcha_question=question, username=username + ) + + login_user(user) + try: + session["active_role"] = user.roles[0] + except Exception: + session["active_role"] = (getattr(user, "role", "viewer") or "viewer").split(",")[0].strip() or "viewer" + flash("You are now logged in.", "success") + return redirect(url_for("main.dashboard")) + + +@auth_bp.route("/logout") +@login_required +def logout(): + logout_user() + try: + session.pop("active_role", None) + except Exception: + pass + flash("You have been logged out.", "info") + return redirect(url_for("auth.login")) + + +@auth_bp.route("/initial-setup", methods=["GET", "POST"]) +def initial_setup(): + if admin_exists(): + flash("An admin user already exists. Please log in.", "info") + return redirect(url_for("auth.login")) + + if request.method == "POST": + username = (request.form.get("username") or "").strip() + password = request.form.get("password") or "" + confirm = request.form.get("confirm_password") or "" + + if not username or not password: + flash("Username and password are required.", "danger") + return render_template("auth/initial_setup.html", username=username) + + if password != confirm: + flash("Passwords do not match.", "danger") + return render_template("auth/initial_setup.html", username=username) + + existing = User.query.filter_by(username=username).first() + if existing: + flash("A user with this username already exists.", "danger") + return render_template("auth/initial_setup.html", username=username) + + user = User(username=username, role="admin") + user.set_password(password) + db.session.add(user) + db.session.commit() + + flash("Admin user created. You can now log in.", "success") + return redirect(url_for("auth.login")) + + return render_template("auth/initial_setup.html") + + +@auth_bp.route("/password-reset", methods=["GET", "POST"]) +def password_reset_request(): + # Simple placeholder implementation with captcha so the flow exists. + question, answer = generate_captcha() + session["captcha_answer"] = answer + + if request.method == "POST": + expected = session.get("captcha_answer") + provided = (request.form.get("captcha") or "").strip() + if not expected or provided != expected: + flash("Invalid captcha answer. Please try again.", "danger") + question, answer = generate_captcha() + session["captcha_answer"] = answer + return render_template( + "auth/password_reset_request.html", captcha_question=question + ) + + flash("Password reset functionality is not yet implemented.", "info") + return redirect(url_for("auth.login")) + + return render_template( + "auth/password_reset_request.html", captcha_question=question + ) diff --git a/containers/backupchecks/src/backend/app/auto_importer_service.py b/containers/backupchecks/src/backend/app/auto_importer_service.py new file mode 100644 index 0000000..2132172 --- /dev/null +++ b/containers/backupchecks/src/backend/app/auto_importer_service.py @@ -0,0 +1,120 @@ +from __future__ import annotations + +import threading +import time +from datetime import datetime + +from .admin_logging import log_admin_event +from .mail_importer import MailImportError, run_auto_import +from .models import SystemSettings +from .object_persistence import persist_objects_for_approved_run + + +_AUTO_IMPORTER_THREAD_NAME = "auto_importer" + + +def start_auto_importer(app) -> None: + """Start the automatic importer background thread. + + The thread is lightweight and checks settings on every loop. + It only runs imports when enabled and the interval has elapsed. + """ + + # Avoid starting multiple threads if create_app() is called more than once. + if any(t.name == _AUTO_IMPORTER_THREAD_NAME for t in threading.enumerate()): + return + + def _worker() -> None: + last_run_at: datetime | None = None + + while True: + try: + with app.app_context(): + settings = SystemSettings.query.first() + if settings is None: + time.sleep(10) + continue + + enabled = bool(getattr(settings, "auto_import_enabled", False)) + try: + interval_minutes = int(getattr(settings, "auto_import_interval_minutes", 15) or 15) + except (TypeError, ValueError): + interval_minutes = 15 + if interval_minutes < 1: + interval_minutes = 1 + + now = datetime.utcnow() + due = False + if enabled: + if last_run_at is None: + due = True + else: + due = (now - last_run_at).total_seconds() >= (interval_minutes * 60) + + if not due: + time.sleep(5) + continue + + # Always enforce fixed batch size for automatic import. + try: + total_fetched, new_messages, auto_approved, auto_approved_runs, errors = run_auto_import(settings) + except MailImportError as exc: + log_admin_event("mail_import_auto_error", f"Automatic mail import failed: {exc}") + last_run_at = now + time.sleep(5) + continue + except Exception as exc: + log_admin_event("mail_import_auto_error", f"Unexpected error during automatic mail import: {exc}") + last_run_at = now + time.sleep(5) + continue + + log_admin_event( + "mail_import_auto", + f"Automatic mail import finished. fetched={total_fetched}, new={new_messages}, auto_approved={auto_approved}, errors={len(errors)}", + ) + + # Persist objects for auto-approved runs (must not block the thread) + if auto_approved_runs: + persisted_objects = 0 + persisted_errors = 0 + for (customer_id, job_id, run_id, mail_message_id) in auto_approved_runs: + try: + persisted_objects += persist_objects_for_approved_run( + int(customer_id), int(job_id), int(run_id), int(mail_message_id) + ) + except Exception as exc: + persisted_errors += 1 + log_admin_event( + "object_persist_error", + f"Object persistence failed for auto-approved message {mail_message_id} (job {job_id}, run {run_id}): {exc}", + ) + + log_admin_event( + "object_persist_auto_approve", + f"Persisted objects for auto-approved runs (auto import). runs={len(auto_approved_runs)}, objects={persisted_objects}, errors={persisted_errors}", + ) + + # Store only a short summary of errors (the rest is already visible in the UI) + if errors: + log_admin_event( + "mail_import_auto_errors", + f"Automatic mail import finished with errors. count={len(errors)}", + details="\n".join(errors[:10]), + ) + + last_run_at = now + + except Exception: + # Never let the thread die. + try: + with app.app_context(): + log_admin_event("mail_import_auto_error", "Automatic importer thread recovered from an unexpected exception.") + except Exception: + pass + + time.sleep(5) + + t = threading.Thread(target=_worker, name=_AUTO_IMPORTER_THREAD_NAME, daemon=True) + t.start() + diff --git a/containers/backupchecks/src/backend/app/config.py b/containers/backupchecks/src/backend/app/config.py new file mode 100644 index 0000000..90bc6e5 --- /dev/null +++ b/containers/backupchecks/src/backend/app/config.py @@ -0,0 +1,18 @@ +import os + +class Config: + def __init__(self) -> None: + self.SECRET_KEY = os.environ.get("APP_SECRET_KEY", "dev-secret-key") + self.SQLALCHEMY_DATABASE_URI = self._build_database_uri() + self.SQLALCHEMY_TRACK_MODIFICATIONS = False + self.APP_ENV = os.environ.get("APP_ENV", "development") + self.APP_PORT = int(os.environ.get("APP_PORT", "8080")) + self.TIMEZONE = "Europe/Amsterdam" + + def _build_database_uri(self) -> str: + db_name = os.environ.get("POSTGRES_DB", "backup") + db_user = os.environ.get("POSTGRES_USER", "backup") + db_password = os.environ.get("POSTGRES_PASSWORD", "") + db_host = os.environ.get("DB_HOST", "localhost") + db_port = int(os.environ.get("DB_PORT", "5432")) + return f"postgresql+psycopg2://{db_user}:{db_password}@{db_host}:{db_port}/{db_name}" diff --git a/containers/backupchecks/src/backend/app/database.py b/containers/backupchecks/src/backend/app/database.py new file mode 100644 index 0000000..f0b13d6 --- /dev/null +++ b/containers/backupchecks/src/backend/app/database.py @@ -0,0 +1,3 @@ +from flask_sqlalchemy import SQLAlchemy + +db = SQLAlchemy() diff --git a/containers/backupchecks/src/backend/app/email_utils.py b/containers/backupchecks/src/backend/app/email_utils.py new file mode 100644 index 0000000..8263074 --- /dev/null +++ b/containers/backupchecks/src/backend/app/email_utils.py @@ -0,0 +1,127 @@ +from __future__ import annotations + +from email import policy +from email.parser import BytesParser +from email.utils import parseaddr +from typing import List, Optional, Tuple + + +def normalize_from_address(value: str | None) -> str | None: + """Normalize sender address for matching. + + - Extracts the email part from potential display-name formats (e.g. "Name "). + - Strips whitespace and lowercases. + - Returns None for empty / missing values. + """ + if not value: + return None + _name, addr = parseaddr(value) + addr = (addr or value).strip() + if not addr: + return None + return addr.lower() + + +def _decode_bytes_best_effort(data: bytes) -> str: + """Decode raw bytes into text using common encodings. + + Many HTML report attachments are UTF-16LE (often visible as null-bytes), + but UTF-8 is also common. We try a small set of encodings and fall back + to a safe replacement strategy. + """ + if not data: + return "" + + # Heuristic: UTF-16LE often contains many zero bytes. + if b"\x00" in data[:200]: + for enc in ("utf-16", "utf-16le", "utf-16be"): + try: + return data.decode(enc) + except Exception: + pass + + for enc in ("utf-8", "utf-8-sig", "windows-1252", "latin-1"): + try: + return data.decode(enc) + except Exception: + pass + + return data.decode("utf-8", errors="replace") + + +def extract_html_attachments_from_eml( + eml_bytes: bytes | None, + *, + max_attachments: int = 5, + max_bytes_per_attachment: int = 2_000_000, +) -> List[Tuple[Optional[str], str]]: + """Extract HTML attachment(s) from a raw RFC822 (.eml) message. + + Returns a list of (filename, html_text) tuples. + The HTML is returned as plain text (no scripts executed). + """ + if not eml_bytes: + return [] + + try: + msg = BytesParser(policy=policy.default).parsebytes(eml_bytes) + except Exception: + return [] + + results: List[Tuple[Optional[str], str]] = [] + + for part in msg.walk(): + if len(results) >= max_attachments: + break + + # Skip multipart containers + if part.is_multipart(): + continue + + disposition = (part.get_content_disposition() or "").lower() + filename = part.get_filename() + content_type = (part.get_content_type() or "").lower() + + # Only inspect attachments (or parts that clearly look like report files) + looks_like_attachment = disposition == "attachment" or bool(filename) + if not looks_like_attachment: + continue + + is_html_type = content_type == "text/html" + is_html_name = isinstance(filename, str) and filename.lower().endswith(".html") + if not (is_html_type or is_html_name): + continue + + try: + payload = part.get_payload(decode=True) or b"" + except Exception: + continue + + if max_bytes_per_attachment and len(payload) > max_bytes_per_attachment: + # Safety: skip very large files + continue + + html_text = _decode_bytes_best_effort(payload).strip() + if not html_text: + continue + + results.append((filename, html_text)) + + return results + + +def extract_best_html_from_eml( + eml_bytes: bytes | None, + *, + max_bytes_per_attachment: int = 2_000_000, +) -> Optional[str]: + """Convenience: return the first HTML attachment content (if any).""" + items = extract_html_attachments_from_eml( + eml_bytes, + max_attachments=1, + max_bytes_per_attachment=max_bytes_per_attachment, + ) + if not items: + return None + _fn, html_text = items[0] + return html_text or None diff --git a/containers/backupchecks/src/backend/app/job_matching.py b/containers/backupchecks/src/backend/app/job_matching.py new file mode 100644 index 0000000..496a718 --- /dev/null +++ b/containers/backupchecks/src/backend/app/job_matching.py @@ -0,0 +1,71 @@ +from __future__ import annotations + +from typing import Optional, Tuple + +from .email_utils import normalize_from_address +from .models import Job, MailMessage + + +def build_job_match_key(msg: MailMessage) -> Tuple[Optional[str], Optional[str], Optional[str], Optional[str]]: + """Build the canonical matching key for a message/job. + + Matching key (unique): + - from_address (normalized email address) + - backup_software (trimmed) + - backup_type (trimmed) + - job_name (trimmed) + """ + norm_from = normalize_from_address(getattr(msg, "from_address", None)) + + backup = (getattr(msg, "backup_software", None) or "").strip() or None + btype = (getattr(msg, "backup_type", None) or "").strip() or None + job_name = (getattr(msg, "job_name", None) or "").strip() or None + + return norm_from, backup, btype, job_name + + +def find_matching_job(msg: MailMessage) -> Optional[Job]: + """Find the single matching Job for a message using (From, Backup, Type, Job name). + + If multiple jobs match: + - If all belong to the same customer, pick the most recently updated. + - Otherwise, return None (ambiguous / unsafe). + """ + norm_from, backup, btype, job_name = build_job_match_key(msg) + + q = Job.query + + if norm_from is None: + q = q.filter(Job.from_address.is_(None)) + else: + q = q.filter(Job.from_address == norm_from) + + if backup is None: + q = q.filter(Job.backup_software.is_(None)) + else: + q = q.filter(Job.backup_software == backup) + + if btype is None: + q = q.filter(Job.backup_type.is_(None)) + else: + q = q.filter(Job.backup_type == btype) + + if job_name is None: + q = q.filter(Job.job_name.is_(None)) + else: + q = q.filter(Job.job_name == job_name) + + # Do not load all matches into memory; we only need to know if there are + # zero, one, or multiple matches. + matches = q.order_by(Job.updated_at.desc(), Job.id.desc()).limit(2).all() + + if len(matches) > 1: + customer_ids = {m.customer_id for m in matches} + if len(customer_ids) == 1: + return matches[0] + return None + + if len(matches) == 1: + return matches[0] + + return None diff --git a/containers/backupchecks/src/backend/app/mail_importer.py b/containers/backupchecks/src/backend/app/mail_importer.py new file mode 100644 index 0000000..20ef471 --- /dev/null +++ b/containers/backupchecks/src/backend/app/mail_importer.py @@ -0,0 +1,673 @@ +from __future__ import annotations + +from datetime import datetime, timezone, timedelta +from typing import List +import socket +from concurrent.futures import ThreadPoolExecutor, TimeoutError as FuturesTimeoutError +from urllib.parse import urlparse + +import requests + +from sqlalchemy import func + +from . import db +from .models import MailMessage, SystemSettings, Job, JobRun +from .parsers import parse_mail_message +from .email_utils import normalize_from_address, extract_best_html_from_eml +from .job_matching import find_matching_job + + +GRAPH_TOKEN_URL_TEMPLATE = "https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token" +GRAPH_BASE_URL = "https://graph.microsoft.com/v1.0" + + +class MailImportError(Exception): + pass + + +def _get_access_token(settings: SystemSettings) -> str: + if not settings.graph_tenant_id or not settings.graph_client_id or not settings.graph_client_secret: + raise MailImportError("Graph credentials are not fully configured.") + + token_url = GRAPH_TOKEN_URL_TEMPLATE.format(tenant_id=settings.graph_tenant_id) + data = { + "client_id": settings.graph_client_id, + "client_secret": settings.graph_client_secret, + "grant_type": "client_credentials", + "scope": "https://graph.microsoft.com/.default", + } + + resp = requests.post(token_url, data=data, timeout=15) + if resp.status_code != 200: + raise MailImportError(f"Failed to obtain access token from Microsoft Graph (status {resp.status_code}).") + + payload = resp.json() + access_token = payload.get("access_token") + if not access_token: + raise MailImportError("Access token not present in Graph response.") + + return access_token + + +def _build_auth_headers(access_token: str) -> dict: + return { + "Authorization": f"Bearer {access_token}", + "Accept": "application/json", + } + + +def _can_resolve_hostname(hostname: str, timeout_seconds: int = 2) -> bool: + """Best-effort DNS preflight. + + requests' connect timeout does not cover DNS resolution time. + When DNS is slow/unavailable, a sync gunicorn worker can hit WORKER TIMEOUT. + We therefore preflight resolution and skip move operations if it hangs. + """ + + if not hostname: + return False + + try: + # signal.* cannot be used outside the main thread (gunicorn worker threads / schedulers). + # Use a small worker thread and a hard timeout instead. + with ThreadPoolExecutor(max_workers=1) as ex: + fut = ex.submit(socket.getaddrinfo, hostname, 443) + fut.result(timeout=float(timeout_seconds)) + return True + except FuturesTimeoutError: + return False + except Exception: + return False + + + +def _fetch_eml_bytes(mailbox: str, msg_id: str, access_token: str) -> bytes | None: + """Fetch raw RFC822 (.eml) content for a message id via Microsoft Graph.""" + if not mailbox or not msg_id: + return None + + url = f"{GRAPH_BASE_URL}/users/{mailbox}/messages/{msg_id}/$value" + headers = { + "Authorization": f"Bearer {access_token}", + "Accept": "application/octet-stream", + } + try: + resp = requests.get(url, headers=headers, timeout=30) + except Exception: + return None + if resp.status_code != 200: + return None + return resp.content or None + + +def _resolve_folder_id(settings: SystemSettings, access_token: str, folder_path: str) -> str: + """Resolve a displayName path like 'Inbox/Backup Database' to a folder id.""" + if not settings.graph_mailbox: + raise MailImportError("Mailbox address is not configured.") + + folder_path = (folder_path or "").strip() + if not folder_path: + raise MailImportError("Folder path is empty.") + + segments = [seg.strip() for seg in folder_path.split("/") if seg.strip()] + if not segments: + raise MailImportError("Folder path is empty.") + + headers = _build_auth_headers(access_token) + mailbox = settings.graph_mailbox + + retention_days = getattr(settings, "ingest_eml_retention_days", 7) + try: + retention_days = int(retention_days) if retention_days is not None else 7 + except (ValueError, TypeError): + retention_days = 7 + if retention_days not in (0, 7, 14): + retention_days = 7 + + # Fetch top-level mailFolders (Inbox, Archive, etc.) + url = f"{GRAPH_BASE_URL}/users/{mailbox}/mailFolders?$top=100" + resp = requests.get(url, headers=headers, timeout=20) + if resp.status_code != 200: + raise MailImportError(f"Failed to list top-level mail folders (status {resp.status_code}).") + + data = resp.json() + folders = data.get("value", []) + + def _find_by_name(items, name): + name_lower = name.lower() + for item in items: + if str(item.get("displayName", "")).lower() == name_lower: + return item + return None + + current_folder = _find_by_name(folders, segments[0]) + if not current_folder: + raise MailImportError(f"Folder '{segments[0]}' not found in mailbox.") + + # Walk down childFolders if there are more segments + for segment in segments[1:]: + parent_id = current_folder.get("id") + url = f"{GRAPH_BASE_URL}/users/{mailbox}/mailFolders/{parent_id}/childFolders?$top=100" + resp = requests.get(url, headers=headers, timeout=20) + if resp.status_code != 200: + raise MailImportError( + f"Failed to list child folders for '{current_folder.get('displayName')}' (status {resp.status_code})." + ) + children = resp.json().get("value", []) + next_folder = _find_by_name(children, segment) + if not next_folder: + raise MailImportError( + f"Folder '{segment}' not found under '{current_folder.get('displayName')}'." + ) + current_folder = next_folder + + folder_id = current_folder.get("id") + if not folder_id: + raise MailImportError("Resolved folder does not have an id.") + return folder_id + + +def _parse_graph_datetime(value: str | None): + if not value: + return None + try: + dt = datetime.fromisoformat(value.replace("Z", "+00:00")) + return dt.astimezone(timezone.utc).replace(tzinfo=None) + except Exception: + return None + + +def _store_messages(settings: SystemSettings, messages): + total = 0 + new_count = 0 + + auto_approved = 0 + auto_approved_runs = [] + + for msg in messages: + total += 1 + graph_id = msg.get("id") + if not graph_id: + continue + + existing = MailMessage.query.filter_by(message_id=graph_id).first() + if existing: + continue + + from_info = msg.get("from") or {} + email_info = from_info.get("emailAddress") or {} + from_addr = normalize_from_address(email_info.get("address")) + + subject = msg.get("subject") + received_raw = msg.get("receivedDateTime") + received_at = _parse_graph_datetime(received_raw) + + # Decide which body field to populate based on Graph response + body = msg.get("body") or {} + body_content = body.get("content") + body_type = (body.get("contentType") or "").lower() + + html_body = None + text_body = None + if isinstance(body_content, str): + if body_type == "html": + html_body = body_content + else: + text_body = body_content + + mail = MailMessage( + message_id=graph_id, + from_address=from_addr, + subject=subject, + received_at=received_at, + html_body=html_body, + text_body=text_body, + location="inbox", + eml_blob=msg.get("_eml_bytes"), + eml_stored_at=(datetime.utcnow() if msg.get("_eml_bytes") else None), + ) + + # Some systems send empty bodies and put the actual report in an HTML attachment. + # If we have raw EML bytes and no body content, extract the first HTML attachment + # and use it as the HTML body so parsers and the inbox preview can work. + if not (mail.html_body or mail.text_body) and mail.eml_blob: + attachment_html = extract_best_html_from_eml(mail.eml_blob) + if attachment_html: + mail.html_body = attachment_html + + # IMPORTANT: Persist first so mail.id exists. + # Object extraction stores rows keyed by mail_message_id; without an id, + # objects are silently skipped. + db.session.add(mail) + db.session.flush() + + # Immediately run parsers so Inbox / Jobs can show parsed metadata + objects. + try: + parse_mail_message(mail) + except Exception as exc: + # Do not break the import if parsing fails; just record it on the message + if hasattr(mail, "parse_result"): + mail.parse_result = "error" + if hasattr(mail, "parse_error"): + mail.parse_error = str(exc)[:500] + + # Auto-approve if this job was already approved before (unique match across customers). + # Mirrors the behavior of the Inbox "Re-parse all" auto-approve. + try: + if ( + getattr(mail, "location", "inbox") == "inbox" + and getattr(mail, "parse_result", None) == "ok" + and not bool(getattr(mail, "approved", False)) + ): + job = find_matching_job(mail) + if job: + # Respect per-job flags. + if hasattr(job, "active") and not bool(job.active): + raise Exception("job not active") + if hasattr(job, "auto_approve") and not bool(job.auto_approve): + raise Exception("job auto_approve disabled") + + # Create a new run for the known job + run = JobRun( + job_id=job.id, + mail_message_id=mail.id, + run_at=mail.received_at, + status=mail.overall_status or None, + missed=False, + ) + + # Optional storage metrics (for capacity graphs) + if hasattr(run, "storage_used_bytes") and hasattr(mail, "storage_used_bytes"): + run.storage_used_bytes = mail.storage_used_bytes + if hasattr(run, "storage_capacity_bytes") and hasattr(mail, "storage_capacity_bytes"): + run.storage_capacity_bytes = mail.storage_capacity_bytes + if hasattr(run, "storage_free_bytes") and hasattr(mail, "storage_free_bytes"): + run.storage_free_bytes = mail.storage_free_bytes + if hasattr(run, "storage_free_percent") and hasattr(mail, "storage_free_percent"): + run.storage_free_percent = mail.storage_free_percent + + db.session.add(run) + db.session.flush() # ensure run.id is available + + # Update mail message to reflect approval + mail.job_id = job.id + if hasattr(mail, "approved"): + mail.approved = True + if hasattr(mail, "approved_at"): + mail.approved_at = datetime.utcnow() + if hasattr(mail, "location"): + mail.location = "history" + + auto_approved += 1 + auto_approved_runs.append((job.customer_id, job.id, run.id, mail.id)) + except Exception as exc: + db.session.rollback() + raise MailImportError(f"Failed to store mail messages in database: {exc}") + + return total, new_count, auto_approved, auto_approved_runs + + +def run_auto_import(settings: SystemSettings): + """Execute the automatic import from Microsoft Graph. + + Automatic import always uses a fixed batch size (50) and respects the + configured cutoff date. Messages older than the cutoff date are not fetched + and therefore remain in the inbox. + + Returns: + (total_fetched, new_messages, auto_approved, auto_approved_runs, errors) + """ + + errors: List[str] = [] + + if not settings.graph_mailbox: + raise MailImportError("Mailbox address is not configured.") + + try: + access_token = _get_access_token(settings) + except MailImportError: + raise + except Exception as exc: + raise MailImportError(f"Unexpected error while obtaining Graph token: {exc}") + + try: + incoming_folder_id = _resolve_folder_id(settings, access_token, settings.incoming_folder or "Inbox") + except MailImportError: + raise + except Exception as exc: + raise MailImportError(f"Unexpected error while resolving incoming folder: {exc}") + + processed_folder_id = None + if getattr(settings, "processed_folder", None): + try: + processed_folder_id = _resolve_folder_id(settings, access_token, settings.processed_folder) + except MailImportError as exc: + # If the processed folder is misconfigured, we still continue the import + errors.append(str(exc)) + except Exception as exc: + errors.append(f"Unexpected error while resolving processed folder: {exc}") + + headers = _build_auth_headers(access_token) + mailbox = settings.graph_mailbox + + retention_days = getattr(settings, "ingest_eml_retention_days", 7) + try: + retention_days = int(retention_days) if retention_days is not None else 7 + except (ValueError, TypeError): + retention_days = 7 + if retention_days not in (0, 7, 14): + retention_days = 7 + + batch_size = 50 + + url = ( + f"{GRAPH_BASE_URL}/users/{mailbox}/mailFolders/{incoming_folder_id}/messages" + f"?$top={batch_size}&$orderby=receivedDateTime desc" + ) + + # Optional cutoff date (UTC midnight). Older messages should remain in inbox. + cutoff_date = getattr(settings, "auto_import_cutoff_date", None) + if cutoff_date: + cutoff_dt = datetime.combine(cutoff_date, datetime.min.time()).replace(tzinfo=timezone.utc) + cutoff_iso = cutoff_dt.strftime('%Y-%m-%dT%H:%M:%SZ') + # Graph requires spaces in $filter to be URL-encoded. + url += f"&$filter=receivedDateTime%20ge%20{cutoff_iso}" + + resp = requests.get(url, headers=headers, timeout=20) + if resp.status_code != 200: + raise MailImportError(f"Failed to fetch messages from incoming folder (status {resp.status_code}).") + + payload = resp.json() + items = payload.get("value", []) + total_fetched = len(items) + + # Fetch full bodies for the fetched messages so inline popup can show content. + for msg in items: + msg_id = msg.get("id") + if not msg_id: + continue + detail_url = f"{GRAPH_BASE_URL}/users/{mailbox}/messages/{msg_id}?$select=body,bodyPreview" + try: + detail_resp = requests.get(detail_url, headers=headers, timeout=20) + except Exception as exc: + errors.append(f"Error while fetching body for message {msg_id}: {exc}") + continue + + if detail_resp.status_code != 200: + errors.append(f"Failed to fetch body for message {msg_id} (status {detail_resp.status_code}).") + continue + + detail_payload = detail_resp.json() + if "body" in detail_payload: + msg["body"] = detail_payload.get("body") + if "bodyPreview" in detail_payload: + msg["bodyPreview"] = detail_payload.get("bodyPreview") + + # Optionally fetch raw EML bytes for new messages (debug storage) + if retention_days > 0: + try: + ids = [m.get("id") for m in items if m.get("id")] + existing_ids = set() + if ids: + existing_ids = { + mid + for (mid,) in db.session.query(MailMessage.message_id) + .filter(MailMessage.message_id.in_(ids)) + .all() + if mid + } + + for m in items: + mid = m.get("id") + if not mid or mid in existing_ids: + continue + eml_bytes = _fetch_eml_bytes(mailbox, mid, access_token) + if eml_bytes: + m["_eml_bytes"] = eml_bytes + except Exception as exc: + errors.append(f"Unexpected error while fetching EML bytes: {exc}") + + auto_approved_runs = [] + + try: + total_processed, new_messages, auto_approved, auto_approved_runs = _store_messages(settings, items) + except MailImportError as exc: + errors.append(str(exc)) + new_messages = 0 + auto_approved = 0 + auto_approved_runs = [] + + # Move messages to the processed folder if configured + if processed_folder_id: + graph_host = urlparse(GRAPH_BASE_URL).hostname or "" + if graph_host and not _can_resolve_hostname(graph_host, timeout_seconds=2): + errors.append( + "Skipping move-to-processed step: Microsoft Graph hostname could not be resolved in time. " + "Messages were imported, but will not be moved." + ) + processed_folder_id = None + + if processed_folder_id: + for msg in items: + msg_id = msg.get("id") + if not msg_id: + continue + move_url = f"{GRAPH_BASE_URL}/users/{mailbox}/messages/{msg_id}/move" + try: + move_resp = requests.post( + move_url, + headers=headers, + json={"destinationId": processed_folder_id}, + timeout=20, + ) + except Exception as exc: + errors.append(f"Error while moving message {msg_id}: {exc}") + continue + + if move_resp.status_code not in (200, 201): + errors.append( + f"Failed to move message {msg_id} to processed folder " + f"(status {move_resp.status_code})." + ) + + # Cleanup stored EML blobs based on retention policy + try: + if retention_days == 0: + MailMessage.query.filter(MailMessage.eml_blob.isnot(None)).update( + {MailMessage.eml_blob: None, MailMessage.eml_stored_at: None}, + synchronize_session=False, + ) + db.session.commit() + else: + cutoff = datetime.utcnow() - timedelta(days=retention_days) + MailMessage.query.filter( + MailMessage.eml_stored_at.isnot(None), + MailMessage.eml_stored_at < cutoff, + ).update( + {MailMessage.eml_blob: None, MailMessage.eml_stored_at: None}, + synchronize_session=False, + ) + db.session.commit() + except Exception as exc: + db.session.rollback() + errors.append(f"Failed to cleanup stored EML blobs: {exc}") + + return total_fetched, new_messages, auto_approved, auto_approved_runs, errors + + +def run_manual_import(settings: SystemSettings, batch_size: int): + """Execute a one-off manual import from Microsoft Graph. + + Returns: + (total_fetched, new_messages, auto_approved, auto_approved_runs, errors) + """ + errors: List[str] = [] + + if not settings.graph_mailbox: + raise MailImportError("Mailbox address is not configured.") + + try: + access_token = _get_access_token(settings) + except MailImportError as exc: + raise + except Exception as exc: + raise MailImportError(f"Unexpected error while obtaining Graph token: {exc}") + + try: + incoming_folder_id = _resolve_folder_id(settings, access_token, settings.incoming_folder or "Inbox") + except MailImportError as exc: + raise + except Exception as exc: + raise MailImportError(f"Unexpected error while resolving incoming folder: {exc}") + + processed_folder_id = None + if getattr(settings, "processed_folder", None): + try: + processed_folder_id = _resolve_folder_id(settings, access_token, settings.processed_folder) + except MailImportError as exc: + # If the processed folder is misconfigured, we still continue the import + errors.append(str(exc)) + except Exception as exc: + errors.append(f"Unexpected error while resolving processed folder: {exc}") + + headers = _build_auth_headers(access_token) + mailbox = settings.graph_mailbox + + retention_days = getattr(settings, "ingest_eml_retention_days", 7) + try: + retention_days = int(retention_days) if retention_days is not None else 7 + except (ValueError, TypeError): + retention_days = 7 + if retention_days not in (0, 7, 14): + retention_days = 7 + + url = ( + f"{GRAPH_BASE_URL}/users/{mailbox}/mailFolders/{incoming_folder_id}/messages" + f"?$top={batch_size}&$orderby=receivedDateTime desc" + ) + + resp = requests.get(url, headers=headers, timeout=20) + if resp.status_code != 200: + raise MailImportError(f"Failed to fetch messages from incoming folder (status {resp.status_code}).") + + payload = resp.json() + items = payload.get("value", []) + total_fetched = len(items) + + # Fetch full bodies for the fetched messages so inline popup can show content. + # We keep this simple: for each new message, fetch its body (HTML or text). + for msg in items: + msg_id = msg.get("id") + if not msg_id: + continue + detail_url = f"{GRAPH_BASE_URL}/users/{mailbox}/messages/{msg_id}?$select=body,bodyPreview" + try: + detail_resp = requests.get(detail_url, headers=headers, timeout=20) + except Exception as exc: + errors.append(f"Error while fetching body for message {msg_id}: {exc}") + continue + + if detail_resp.status_code != 200: + errors.append( + f"Failed to fetch body for message {msg_id} (status {detail_resp.status_code})." + ) + continue + + detail_payload = detail_resp.json() + if "body" in detail_payload: + msg["body"] = detail_payload.get("body") + if "bodyPreview" in detail_payload: + msg["bodyPreview"] = detail_payload.get("bodyPreview") + + # Optionally fetch raw EML bytes for new messages (debug storage) + if retention_days > 0: + try: + ids = [m.get("id") for m in items if m.get("id")] + existing_ids = set() + if ids: + existing_ids = { + mid + for (mid,) in db.session.query(MailMessage.message_id) + .filter(MailMessage.message_id.in_(ids)) + .all() + if mid + } + + for m in items: + mid = m.get("id") + if not mid or mid in existing_ids: + continue + eml_bytes = _fetch_eml_bytes(mailbox, mid, access_token) + if eml_bytes: + m["_eml_bytes"] = eml_bytes + except Exception as exc: + errors.append(f"Unexpected error while fetching EML bytes: {exc}") + + auto_approved = 0 + + auto_approved_runs = [] + + try: + total_processed, new_messages, auto_approved, auto_approved_runs = _store_messages(settings, items) + except MailImportError as exc: + errors.append(str(exc)) + new_messages = 0 + auto_approved_runs = [] + + # Move messages to the processed folder if configured + if processed_folder_id: + graph_host = urlparse(GRAPH_BASE_URL).hostname or "" + if graph_host and not _can_resolve_hostname(graph_host, timeout_seconds=2): + errors.append( + "Skipping move-to-processed step: Microsoft Graph hostname could not be resolved in time. " + "Messages were imported, but will not be moved." + ) + processed_folder_id = None + + if processed_folder_id: + for msg in items: + msg_id = msg.get("id") + if not msg_id: + continue + move_url = f"{GRAPH_BASE_URL}/users/{mailbox}/messages/{msg_id}/move" + try: + move_resp = requests.post( + move_url, + headers=headers, + json={"destinationId": processed_folder_id}, + timeout=20, + ) + except Exception as exc: + errors.append(f"Error while moving message {msg_id}: {exc}") + continue + + if move_resp.status_code not in (200, 201): + errors.append( + f"Failed to move message {msg_id} to processed folder " + f"(status {move_resp.status_code})." + ) + + + # Cleanup stored EML blobs based on retention policy + try: + if retention_days == 0: + MailMessage.query.filter(MailMessage.eml_blob.isnot(None)).update( + {MailMessage.eml_blob: None, MailMessage.eml_stored_at: None}, + synchronize_session=False, + ) + db.session.commit() + else: + cutoff = datetime.utcnow() - timedelta(days=retention_days) + MailMessage.query.filter( + MailMessage.eml_stored_at.isnot(None), + MailMessage.eml_stored_at < cutoff, + ).update( + {MailMessage.eml_blob: None, MailMessage.eml_stored_at: None}, + synchronize_session=False, + ) + db.session.commit() + except Exception as exc: + db.session.rollback() + errors.append(f"Failed to cleanup stored EML blobs: {exc}") + + return total_fetched, new_messages, auto_approved, auto_approved_runs, errors diff --git a/containers/backupchecks/src/backend/app/main.py b/containers/backupchecks/src/backend/app/main.py new file mode 100644 index 0000000..bd4b1b7 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main.py @@ -0,0 +1,9 @@ +import os + +from . import create_app + +app = create_app() + +if __name__ == "__main__": + port = int(os.environ.get("APP_PORT", 8080)) + app.run(host="0.0.0.0", port=port) diff --git a/containers/backupchecks/src/backend/app/main/__init__.py b/containers/backupchecks/src/backend/app/main/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/containers/backupchecks/src/backend/app/main/routes.py b/containers/backupchecks/src/backend/app/main/routes.py new file mode 100644 index 0000000..60f2719 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes.py @@ -0,0 +1,29 @@ +"""Main blueprint routes. + +This module keeps a small import surface for app creation while the actual +view functions are organized across multiple modules. +""" + +from .routes_shared import main_bp, roles_required # noqa: F401 + +# Import route modules so their decorators register with the blueprint. +from . import routes_core # noqa: F401 +from . import routes_news # noqa: F401 +from . import routes_inbox # noqa: F401 +from . import routes_customers # noqa: F401 +from . import routes_jobs # noqa: F401 +from . import routes_settings # noqa: F401 +from . import routes_daily_jobs # noqa: F401 +from . import routes_run_checks # noqa: F401 +from . import routes_overrides # noqa: F401 +from . import routes_parsers # noqa: F401 +from . import routes_changelog # noqa: F401 +from . import routes_reports # noqa: F401 +from . import routes_tickets # noqa: F401 +from . import routes_remarks # noqa: F401 +from . import routes_feedback # noqa: F401 +from . import routes_api # noqa: F401 +from . import routes_reporting_api # noqa: F401 +from . import routes_user_settings # noqa: F401 + +__all__ = ["main_bp", "roles_required"] diff --git a/containers/backupchecks/src/backend/app/main/routes_api.py b/containers/backupchecks/src/backend/app/main/routes_api.py new file mode 100644 index 0000000..7242eff --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_api.py @@ -0,0 +1,492 @@ +from .routes_shared import * # noqa: F401,F403 +from .routes_shared import _format_datetime, _get_ui_timezone_name, _next_ticket_code, _to_amsterdam_date + +@main_bp.route("/api/job-runs//alerts") +@login_required +@roles_required("admin", "operator", "viewer") +def api_job_run_alerts(run_id: int): + run = JobRun.query.get_or_404(run_id) + job = Job.query.get(run.job_id) if run else None + + run_date = _to_amsterdam_date(run.run_at) + if run_date is None: + run_date = _to_amsterdam_date(datetime.utcnow()) + + tickets = [] + remarks = [] + + # Tickets active for this job on this run date (including resolved-on-day) + try: + rows = ( + db.session.execute( + text( + """ + SELECT t.id, t.ticket_code, t.description, t.start_date, t.resolved_at, t.active_from_date + FROM tickets t + JOIN ticket_scopes ts ON ts.ticket_id = t.id + WHERE ts.job_id = :job_id + AND t.active_from_date <= :run_date + AND ( + t.resolved_at IS NULL + OR ((t.resolved_at AT TIME ZONE 'UTC' AT TIME ZONE :ui_tz)::date) >= :run_date + ) + ORDER BY t.start_date DESC + """ + ), + { + "job_id": job.id if job else None, + "run_date": run_date, + "ui_tz": _get_ui_timezone_name(), + }, + ) + .mappings() + .all() + ) + + for r in rows: + resolved_at = r.get("resolved_at") + resolved_same_day = False + if resolved_at and run_date: + resolved_same_day = _to_amsterdam_date(resolved_at) == run_date + active_now = r.get("resolved_at") is None + + tickets.append( + { + "id": int(r.get("id")), + "ticket_code": r.get("ticket_code") or "", + "description": r.get("description") or "", + "start_date": _format_datetime(r.get("start_date")), + "active_from_date": str(r.get("active_from_date")) if r.get("active_from_date") else "", + "resolved_at": _format_datetime(r.get("resolved_at")) if r.get("resolved_at") else "", + "active": bool(active_now), + "resolved_same_day": bool(resolved_same_day), + } + ) + except Exception as exc: + return jsonify({"status": "error", "message": str(exc) or "Failed to load tickets."}), 500 + + # Remarks active for this job on this run date (including resolved-on-day) + try: + rows = ( + db.session.execute( + text( + """ + SELECT r.id, r.body, r.start_date, r.resolved_at, r.active_from_date + FROM remarks r + JOIN remark_scopes rs ON rs.remark_id = r.id + WHERE rs.job_id = :job_id + AND COALESCE( + r.active_from_date, + ((r.start_date AT TIME ZONE 'UTC' AT TIME ZONE :ui_tz)::date) + ) <= :run_date + AND ( + r.resolved_at IS NULL + OR ((r.resolved_at AT TIME ZONE 'UTC' AT TIME ZONE :ui_tz)::date) >= :run_date + ) + ORDER BY r.start_date DESC + """ + ), + { + "job_id": job.id if job else None, + "run_date": run_date, + "ui_tz": _get_ui_timezone_name(), + }, + ) + .mappings() + .all() + ) + + for rr in rows: + body = (rr.get("body") or "").strip() + if len(body) > 180: + body = body[:177] + "..." + + resolved_at = rr.get("resolved_at") + resolved_same_day = False + if resolved_at and run_date: + resolved_same_day = _to_amsterdam_date(resolved_at) == run_date + + active_now = resolved_at is None or (not resolved_same_day) + + remarks.append( + { + "id": int(rr.get("id")), + "body": body, + "start_date": _format_datetime(rr.get("start_date")) if rr.get("start_date") else "-", + "active_from_date": str(rr.get("active_from_date")) if rr.get("active_from_date") else "", + "resolved_at": _format_datetime(rr.get("resolved_at")) if rr.get("resolved_at") else "", + "active": bool(active_now), + "resolved_same_day": bool(resolved_same_day), + } + ) + except Exception as exc: + return jsonify({"status": "error", "message": str(exc) or "Failed to load remarks."}), 500 + + payload_job = { + "job_id": job.id if job else None, + "job_name": job.job_name if job else "", + "customer_id": job.customer_id if job else None, + "backup_software": job.backup_software if job else "", + "backup_type": job.backup_type if job else "", + } + + return jsonify({"status": "ok", "job": payload_job, "tickets": tickets, "remarks": remarks}) + + +@main_bp.route("/api/tickets", methods=["GET", "POST"]) +@login_required +@roles_required("admin", "operator", "viewer") +def api_tickets(): + if request.method == "GET": + active = (request.args.get("active") or "1").strip() != "0" + q = (request.args.get("q") or "").strip() + try: + customer_id = int(request.args.get("customer_id") or 0) + except Exception: + customer_id = 0 + + query = Ticket.query + if active: + query = query.filter(Ticket.resolved_at.is_(None)) + if q: + like_q = f"%{q}%" + query = query.filter( + (Ticket.ticket_code.ilike(like_q)) + | (Ticket.description.ilike(like_q)) + ) + if customer_id: + query = query.join(TicketScope, TicketScope.ticket_id == Ticket.id).filter(TicketScope.customer_id == customer_id) + + query = query.order_by(Ticket.start_date.desc()).limit(500) + items = [] + for t in query.all(): + items.append( + { + "id": t.id, + "ticket_code": t.ticket_code, + "description": t.description or "", + "active_from_date": str(getattr(t, "active_from_date", "") or ""), + "start_date": _format_datetime(t.start_date), + "resolved_at": _format_datetime(t.resolved_at) if t.resolved_at else "", + "active": t.resolved_at is None, + } + ) + return jsonify({"status": "ok", "tickets": items}) + + # POST + if get_active_role() not in ("admin", "operator"): + return jsonify({"status": "error", "message": "Forbidden."}), 403 + + payload = request.get_json(silent=True) or {} + description = (payload.get("description") or "").strip() or None + try: + run_id = int(payload.get("job_run_id") or 0) + except Exception: + run_id = 0 + + if run_id <= 0: + return jsonify({"status": "error", "message": "job_run_id is required."}), 400 + + run = JobRun.query.get(run_id) + if not run: + return jsonify({"status": "error", "message": "Job run not found."}), 404 + + job = Job.query.get(run.job_id) if run else None + + now = datetime.utcnow() + code = _next_ticket_code(now) + + ticket = Ticket( + ticket_code=code, + title=None, + description=description, + active_from_date=_to_amsterdam_date(run.run_at) or _to_amsterdam_date(now) or now.date(), + start_date=now, + resolved_at=None, + ) + + try: + db.session.add(ticket) + db.session.flush() + + # Minimal scope from job + scope = TicketScope( + ticket_id=ticket.id, + scope_type="job", + customer_id=job.customer_id if job else None, + backup_software=job.backup_software if job else None, + backup_type=job.backup_type if job else None, + job_id=job.id if job else None, + job_name_match=job.job_name if job else None, + job_name_match_mode="exact", + ) + db.session.add(scope) + + link = TicketJobRun(ticket_id=ticket.id, job_run_id=run.id, link_source="manual") + db.session.add(link) + + db.session.commit() + except Exception as exc: + db.session.rollback() + return jsonify({"status": "error", "message": str(exc) or "Failed to create ticket."}), 500 + + return jsonify( + { + "status": "ok", + "ticket": { + "id": ticket.id, + "ticket_code": ticket.ticket_code, + "description": ticket.description or "", + "start_date": _format_datetime(ticket.start_date), + "active_from_date": str(ticket.active_from_date) if getattr(ticket, "active_from_date", None) else "", + "resolved_at": "", + "active": True, + }, + } + ) + + +@main_bp.route("/api/tickets/", methods=["PATCH"]) +@login_required +@roles_required("admin", "operator", "viewer") +def api_ticket_update(ticket_id: int): + if get_active_role() not in ("admin", "operator"): + return jsonify({"status": "error", "message": "Forbidden."}), 403 + + ticket = Ticket.query.get_or_404(ticket_id) + payload = request.get_json(silent=True) or {} + if "description" in payload: + ticket.description = (payload.get("description") or "").strip() or None + + try: + db.session.commit() + except Exception as exc: + db.session.rollback() + return jsonify({"status": "error", "message": str(exc) or "Failed to update ticket."}), 500 + + return jsonify({"status": "ok"}) + + +@main_bp.route("/api/tickets//resolve", methods=["POST"]) +@login_required +@roles_required("admin", "operator", "viewer") +def api_ticket_resolve(ticket_id: int): + if get_active_role() not in ("admin", "operator"): + return jsonify({"status": "error", "message": "Forbidden."}), 403 + + ticket = Ticket.query.get_or_404(ticket_id) + if ticket.resolved_at is None: + ticket.resolved_at = datetime.utcnow() + try: + db.session.commit() + except Exception as exc: + db.session.rollback() + return jsonify({"status": "error", "message": str(exc) or "Failed to resolve ticket."}), 500 + + # If this endpoint is called from a regular HTML form submit (e.g. Tickets/Remarks page), + # redirect back instead of showing raw JSON in the browser. + if not request.is_json and "application/json" not in (request.headers.get("Accept") or ""): + return redirect(request.referrer or url_for("main.tickets_page")) + + return jsonify({"status": "ok", "resolved_at": _format_datetime(ticket.resolved_at)}) + + +@main_bp.route("/api/tickets//link-run", methods=["POST"]) +@login_required +@roles_required("admin", "operator", "viewer") +def api_ticket_link_run(ticket_id: int): + if get_active_role() not in ("admin", "operator"): + return jsonify({"status": "error", "message": "Forbidden."}), 403 + + ticket = Ticket.query.get_or_404(ticket_id) + payload = request.get_json(silent=True) or {} + try: + run_id = int(payload.get("job_run_id") or 0) + except Exception: + run_id = 0 + if run_id <= 0: + return jsonify({"status": "error", "message": "job_run_id is required."}), 400 + + run = JobRun.query.get(run_id) + if not run: + return jsonify({"status": "error", "message": "Job run not found."}), 404 + + link = TicketJobRun(ticket_id=ticket.id, job_run_id=run.id, link_source="manual") + try: + db.session.add(link) + db.session.commit() + except Exception as exc: + db.session.rollback() + return jsonify({"status": "error", "message": str(exc) or "Failed to link run."}), 500 + + return jsonify({"status": "ok"}) + + +@main_bp.route("/api/remarks", methods=["GET", "POST"]) +@login_required +@roles_required("admin", "operator", "viewer") +def api_remarks(): + if request.method == "GET": + active = (request.args.get("active") or "1").strip() != "0" + q = (request.args.get("q") or "").strip() + query = Remark.query + if active: + query = query.filter(Remark.resolved_at.is_(None)) + if q: + like_q = f"%{q}%" + query = query.filter(Remark.body.ilike(like_q)) + query = query.order_by(Remark.start_date.desc()).limit(500) + items = [] + for r in query.all(): + items.append( + { + "id": r.id, + "body": r.body or "", + "active_from_date": str(getattr(r, "active_from_date", "") or ""), + "start_date": _format_datetime(r.start_date) if r.start_date else "-", + "resolved_at": _format_datetime(r.resolved_at) if r.resolved_at else "", + "active": r.resolved_at is None, + } + ) + return jsonify({"status": "ok", "remarks": items}) + + if get_active_role() not in ("admin", "operator"): + return jsonify({"status": "error", "message": "Forbidden."}), 403 + + payload = request.get_json(silent=True) or {} + body = (payload.get("body") or "").strip() or "" + try: + run_id = int(payload.get("job_run_id") or 0) + except Exception: + run_id = 0 + + if run_id <= 0: + return jsonify({"status": "error", "message": "job_run_id is required."}), 400 + + run = JobRun.query.get(run_id) + if not run: + return jsonify({"status": "error", "message": "Job run not found."}), 404 + + job = Job.query.get(run.job_id) if run else None + + now = datetime.utcnow() + remark = Remark( + title=None, + body=body, + active_from_date=_to_amsterdam_date(run.run_at) or _to_amsterdam_date(now) or now.date(), + start_date=now, + resolved_at=None, + ) + + try: + db.session.add(remark) + db.session.flush() + + scope = RemarkScope( + remark_id=remark.id, + scope_type="job", + customer_id=job.customer_id if job else None, + backup_software=job.backup_software if job else None, + backup_type=job.backup_type if job else None, + job_id=job.id if job else None, + job_name_match=job.job_name if job else None, + job_name_match_mode="exact", + ) + db.session.add(scope) + + link = RemarkJobRun(remark_id=remark.id, job_run_id=run.id, link_source="manual") + db.session.add(link) + + db.session.commit() + except Exception as exc: + db.session.rollback() + return jsonify({"status": "error", "message": str(exc) or "Failed to create remark."}), 500 + + return jsonify( + { + "status": "ok", + "remark": { + "id": remark.id, + "body": remark.body or "", + "start_date": _format_datetime(remark.start_date), + "resolved_at": "", + "active": True, + }, + } + ) + + +@main_bp.route("/api/remarks/", methods=["PATCH"]) +@login_required +@roles_required("admin", "operator", "viewer") +def api_remark_update(remark_id: int): + if get_active_role() not in ("admin", "operator"): + return jsonify({"status": "error", "message": "Forbidden."}), 403 + + remark = Remark.query.get_or_404(remark_id) + payload = request.get_json(silent=True) or {} + if "body" in payload: + remark.body = (payload.get("body") or "").strip() or "" + + try: + db.session.commit() + except Exception as exc: + db.session.rollback() + return jsonify({"status": "error", "message": str(exc) or "Failed to update remark."}), 500 + + return jsonify({"status": "ok"}) + + +@main_bp.route("/api/remarks//resolve", methods=["POST"]) +@login_required +@roles_required("admin", "operator", "viewer") +def api_remark_resolve(remark_id: int): + if get_active_role() not in ("admin", "operator"): + return jsonify({"status": "error", "message": "Forbidden."}), 403 + + remark = Remark.query.get_or_404(remark_id) + if remark.resolved_at is None: + remark.resolved_at = datetime.utcnow() + try: + db.session.commit() + except Exception as exc: + db.session.rollback() + return jsonify({"status": "error", "message": str(exc) or "Failed to resolve remark."}), 500 + + # If this endpoint is called from a regular HTML form submit (e.g. Tickets/Remarks page), + # redirect back instead of showing raw JSON in the browser. + if not request.is_json and "application/json" not in (request.headers.get("Accept") or ""): + return redirect(request.referrer or url_for("main.tickets_page")) + + return jsonify({"status": "ok", "resolved_at": _format_datetime(remark.resolved_at)}) + + +@main_bp.route("/api/remarks//link-run", methods=["POST"]) +@login_required +@roles_required("admin", "operator", "viewer") +def api_remark_link_run(remark_id: int): + if get_active_role() not in ("admin", "operator"): + return jsonify({"status": "error", "message": "Forbidden."}), 403 + + remark = Remark.query.get_or_404(remark_id) + payload = request.get_json(silent=True) or {} + try: + run_id = int(payload.get("job_run_id") or 0) + except Exception: + run_id = 0 + if run_id <= 0: + return jsonify({"status": "error", "message": "job_run_id is required."}), 400 + + run = JobRun.query.get(run_id) + if not run: + return jsonify({"status": "error", "message": "Job run not found."}), 404 + + link = RemarkJobRun(remark_id=remark.id, job_run_id=run.id, link_source="manual") + try: + db.session.add(link) + db.session.commit() + except Exception as exc: + db.session.rollback() + return jsonify({"status": "error", "message": str(exc) or "Failed to link run."}), 500 + + return jsonify({"status": "ok"}) diff --git a/containers/backupchecks/src/backend/app/main/routes_changelog.py b/containers/backupchecks/src/backend/app/main/routes_changelog.py new file mode 100644 index 0000000..3b1ed78 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_changelog.py @@ -0,0 +1,1046 @@ +from .routes_shared import * # noqa: F401,F403 + + +@main_bp.route("/changelog") +@login_required +@roles_required("admin", "operator", "reporter", "viewer") +def changelog_page(): + + changelog = { + "completed_summary": [ + + + + + +{ + "version": "0.1.14", + "overview": [ + "This release improves Daily Jobs sorting, enhances Veeam Backup for Microsoft 365 warning parsing, and delivers a complete Overrides workflow (configuration, matching, application, UI indicators, and reporting), including popup stability fixes." + ], + "categories": [ + { + "category": "Daily Jobs", + "items": [ + { + "title": None, + "details": [ + "Introduced a consistent, case-insensitive multi-level sort order for the Daily Jobs overview: Customer → Backup Software → Backup Type → Job Name.", + "Fixed backend ordering issues to ensure server-side data no longer overrides the intended sort logic.", + "Ensured sorting is applied before serialization so the UI always reflects the correct order.", + "Improved predictability and readability of job listings across environments.", + ], + } + ], + }, + { + "category": "Veeam Backup for Microsoft 365", + "items": [ + { + "title": None, + "details": [ + "Improved parsing of overall warning messages to correctly extract and display permission- and role-related issues.", + "Added support for combined permission and role warnings in M365 reports.", + "Ensured detailed permission warnings take precedence over generic “X of X objects processed” messages.", + "Fixed incorrect overall message selection and filtered out misleading banner fragments.", + "Resolved an indentation error in the parser that caused backend startup failures, restoring stability.", + ], + } + ], + }, + { + "category": "Overrides – Configuration and Matching", + "items": [ + { + "title": None, + "details": [ + "Replaced free-text inputs with dropdowns for Backup Software and Backup Type in Overrides, including alphabetical sorting, preselection of existing values, and a global option at the top of each dropdown.", + "Fixed PostgreSQL compatibility issues by replacing DISTINCT queries with GROUP BY while preserving case-insensitive sorting.", + "Ensured Overrides endpoints no longer crash due to invalid query constructions.", + ], + } + ], + }, + { + "category": "Overrides – Application, Editing, and Deletion", + "items": [ + { + "title": None, + "details": [ + "Made newly created overrides apply immediately and retroactively to all unreviewed runs by default.", + "Added full support for editing existing overrides and reapplying changes to unreviewed runs.", + "Restricted override deletion to Admin users and ensured proper reprocessing after removal.", + "Fixed datetime handling in override edit flows so unchanged values are preserved and NULL constraint violations are avoided.", + "Ensured Admin users always see delete actions by consistently passing permission flags to the UI.", + ], + } + ], + }, + { + "category": "Overrides – Matching Logic Improvements", + "items": [ + { + "title": None, + "details": [ + "Extended override matching to use persisted run_object_links joined with customer_objects instead of legacy or non-existent relationships.", + "Improved global override matching by resolving backup software and type from MailMessage data when missing on jobs.", + "Added support for matching against object-level error messages as well as run-level remarks.", + "Ensured all override matching remains case-insensitive and consistent across run-level and object-level evaluations.", + ], + } + ], + }, + { + "category": "Overrides – UI Indicators and Reporting", + "items": [ + { + "title": None, + "details": [ + "Introduced a blue status indicator for runs and jobs where overrides are applied.", + "Updated status labels to display “Success (override)” for clearer distinction without changing canonical stored statuses.", + "Added persistent override reporting metadata to job runs, including applied override ID, level, and reason.", + "Ensured dashboards, Daily Jobs, Run Checks, and popups correctly propagate and display override-based success states.", + "Fixed multiple UI rendering issues so overridden runs are no longer misclassified as warnings or missed jobs.", + ], + } + ], + }, + { + "category": "Daily Jobs Popups", + "items": [ + { + "title": None, + "details": [ + "Fixed popup loading failures and backend unpacking errors related to override handling.", + "Ensured popup details consistently load correct run data.", + "Aligned popup override detection and status coloring with Run Checks and Daily Jobs overviews.", + ], + } + ], + }, + ], +}, +{ + "version": "0.1.13", + "overview": [ + "This release focuses on improving visibility and consistency of Tickets and Remarks across Run Checks and Job Details, alongside several UI fixes and backend stability improvements." + ], + "categories": [ + { + "category": "Highlights", + "items": [ + { + "title": None, + "details": [ + "Added clear visual indicators for active Tickets and Remarks in the Run Checks overview.", + "Enhanced Job Details and Job History to display actual ticket numbers and related remark messages, both in tables and popups.", + "Improved navigation consistency by adding direct Job page links for Tickets and Remarks." + ] + } + ] + }, + { + "category": "Improvements", + "items": [ + { + "title": None, + "details": [ + "Job History popups now reliably show associated ticket numbers and remark content.", + "Backend job history data is enriched to support consistent UI rendering.", + "Missed-run detection now includes a ±1 hour tolerance window and respects the configured UI timezone.", + "Run Checks UI is simplified by hiding last-reviewed columns (data is still retained in the backend)." + ] + } + ] + }, + { + "category": "Fixes", + "items": [ + { + "title": None, + "details": [ + "Resolved a backend indentation issue that caused Gunicorn startup failures.", + "Made frontend parsing of ticket/remark data more robust against malformed or unexpected payloads.", + "Fixed JSON encoding issues in HTML data attributes to prevent popup rendering errors." + ] + } + ] + }, + { + "category": "Changelog Update", + "items": [ + { + "title": None, + "details": [ + "Simplified the changelog by removing Current Version and In testing sections.", + "The changelog now only shows completed changes." + ] + } + ] + } + ] +}, +{ + "version": "0.1.12", + "overview": [ + "This release focuses on Dashboard accuracy, Inbox handling, parser expansions, and overall system stability." + ], + "categories": [ + { + "category": "Dashboard & UI", + "items": [ + { + "title": None, + "details": [ + "Corrected dashboard counters so Expected, Missed, and Success (override) statuses are shown accurately.", + "Added dedicated counters for Expected and Success (override).", + "Fixed layout issues on the Inbox dashboard tiles and improved label wrapping.", + "Added safe timezone fallbacks to prevent incorrect status aggregation.", + "Restored missing status icons and symbols across Dashboard and Daily Jobs views.", + "Cleaned up Job Details UI by removing redundant columns and clarifying schedule display.", + "Extended Job History with weekday labels and review metadata (Admin-only visibility)." + ] + } + ] + }, + { + "category": "Stability & Reliability", + "items": [ + { + "title": None, + "details": [ + "Fixed a Gunicorn startup crash caused by incorrect Python indentation.", + "Improved migration robustness for soft-delete columns to prevent startup 502 errors on busy databases.", + "Prevented duplicate or unintended regeneration of reviewed Missed runs." + ] + } + ] + }, + { + "category": "Inbox & Mail Handling", + "items": [ + { + "title": None, + "details": [ + "Introduced soft-delete for Inbox messages with full Admin restore capability.", + "Added an Admin-only Deleted mails page with audit details.", + "Added popup previews for deleted mails without requiring restore.", + "Improved HTML mail handling by extracting content from HTML attachments when the body is empty.", + "Added an Admin maintenance action to backfill HTML bodies from existing attachments." + ] + } + ] + }, + { + "category": "Feedback & Settings", + "items": [ + { + "title": None, + "details": [ + "Changed Feedback behavior so resolved items remain visible until explicitly deleted.", + "Restricted feedback deletion to Admin users only.", + "Added a User Settings page allowing users to change their own password securely." + ] + } + ] + }, + { + "category": "Backup Parser Enhancements", + "items": [ + { + "title": None, + "details": [ + "Improved Veeam parsing including Health Check Summary handling, job name normalization, and License Key status detection.", + "Added and expanded Synology support for Active Backup for Business, R-Sync, and Account Protection notifications.", + "Added new parsers for R-Drive Image and Syncovery.", + "Ensured correct handling of objects, statuses, and scheduling exclusions where applicable." + ] + } + ] + }, + { + "category": "Changelog", + "items": [ + { + "title": None, + "details": [ + "Removed the Planned section from the Changelog.", + "Future planning is now handled exclusively via the Feedback page." + ] + } + ] + } + ] +}, +{ + "version": "0.1.11", + "overview": [ + "Overall, this release significantly improves stability, review workflows, visual consistency, timezone correctness, and administrative reliability, while refining the operator experience and access control model.", + ], + "categories": [ + { + "category": "Stability & Bug Fixes", + "items": [ + { + "title": None, + "details": [ + "Fixed multiple page crashes caused by missing imports after refactoring (Jobs, Feedback, Run Checks, Inbox, Daily Jobs).", + "Resolved Jinja2 template errors and SQL/runtime issues related to timezone handling.", + "Improved robustness by explicitly importing shared helpers to prevent NameError exceptions.", + ], + } + ], + }, + { + "category": "Run Checks & Review Workflow", + "items": [ + { + "title": None, + "details": [ + "Introduced a new Run Checks page to review job runs independently from Daily Jobs.", + "Displays all unreviewed runs with no time-based filtering.", + "Supports bulk review actions and per-job review via popups.", + "Added admin-only features: show reviewed runs, unmark reviewed runs, reviewer metadata, and full audit logging.", + "Enhanced popups to group runs per job, include missed runs, and show ticket/remark indicators.", + "Added per-job and per-popup status summaries using visual indicators only.", + ], + } + ], + }, + { + "category": "UI & Visual Consistency", + "items": [ + { + "title": None, + "details": [ + "Unified all job and run status indicators to a single shape differentiated by color.", + "Added a clear status legend to the Dashboard, including the new Expected state.", + "Removed textual status labels across Daily Jobs and Run Checks for a cleaner UI.", + "Improved table layouts and widened content areas for better use of 1080p screens.", + "Ensured consistent indicator rendering across all pages.", + ], + } + ], + }, + { + "category": "Timezone & Display Improvements", + "items": [ + { + "title": None, + "details": [ + "Added a configurable timezone setting in Settings.", + "Updated all frontend date/time rendering to use the configured timezone instead of UTC.", + "Fixed offset issues and restored missing timestamps across multiple pages.", + ], + } + ], + }, + { + "category": "Missed Runs Logic", + "items": [ + { + "title": None, + "details": [ + "Refined missed run detection to rely only on historically received mail reports.", + "Prevented synthetic or never-run schedules from generating false missed runs.", + ], + } + ], + }, + { + "category": "Settings & Maintenance", + "items": [ + { + "title": None, + "details": [ + "Stabilized Delete all jobs by adding schema-tolerant cleanup of all related foreign key references.", + "Refactored the Settings page layout using accordions and cards for improved clarity.", + "Improved alignment and usability of import/export and user management sections.", + ], + } + ], + }, + { + "category": "Roles & Access Control", + "items": [ + { + "title": None, + "details": [ + "Added support for multiple roles per user with an active role switcher.", + "Fixed role-based menu rendering and ensured permissions are evaluated against the active role.", + "Ensured role switching consistently redirects to the Dashboard.", + ], + } + ], + }, + { + "category": "Theme & UX Fixes", + "items": [ + { + "title": None, + "details": [ + "Fixed manual theme switching (Light/Dark/Auto) and ensured user preferences persist.", + "Corrected Inbox EML download functionality by restoring the missing shared import.", + ], + } + ], + }, + ], +}, + + +{ + "version": "0.1.10", + "overview": [ + "This release focuses on performance and stability improvements around re-parse operations, improved job matching and Veeam parsing, UI refinements across key views, and introduces a new Feedback board.", + ], + "categories": [ + { + "category": "Performance & Stability", + "items": [ + { + "title": None, + "details": [ + "Reworked Re-parse all to process inbox messages in controlled batches, preventing gateway and Gunicorn timeouts on large inboxes.", + "Added execution time guards to stop processing before proxy limits are reached.", + "Optimized job-matching queries and disabled session autoflush during batch operations to reduce database load.", + "Ensured auto-approval and persistence logic only finalize after a full, successful re-parse cycle.", + "Restored stable backend startup by fixing decorator ordering issues that caused 502 errors.", + ], + } + ], + }, + { + "category": "Job Matching & Parsing", + "items": [ + { + "title": None, + "details": [ + "Fixed approved job imports to persist from_address, ensuring correct matching during re-parse.", + "Improved Veeam Backup Job parsing:", + "Extracted and stored multi-line warnings/errors and object-level details with preserved line breaks.", + "Ignored VM summary lines (e.g., \"X of X VMs processed\") for overall status detection.", + "Prevented incorrect overall warnings when issues are object-level only.", + "Fixed regressions to ensure backup objects are consistently detected, stored, and displayed across all views.", + ], + } + ], + }, + { + "category": "UI & UX Improvements", + "items": [ + { + "title": None, + "details": [ + "Added EML download support for Job Details and Daily Jobs, with automatic availability handling and proper 404s when missing.", + "Improved rendering to preserve line breaks (pre-wrap) in remarks, overall messages, and object details.", + "Reduced visual clutter by moving overall status/messages out of tables and into context-specific popups.", + "Standardized changelog version display by removing date suffixes.", + "Reordered main navigation for better consistency.", + ], + } + ], + }, + { + "category": "Daily Jobs & Status Accuracy", + "items": [ + { + "title": None, + "details": [ + "Clarified Daily Jobs status logic by introducing Expected for backups not yet due.", + "Reserved Missed only for jobs past their final expected run time.", + "Added last remark excerpts and ensured object details are visible in Daily Jobs popups.", + ], + } + ], + }, + { + "category": "Tickets, Remarks & Overrides", + "items": [ + { + "title": None, + "details": [ + "Introduced run-date scoped ticket activity with active_from_date, ensuring accurate historical and current visibility.", + "Implemented identical scoping for remarks, preserving visibility across runs even after resolution.", + "Fixed resolve actions to redirect properly in the UI while keeping JSON responses for API/AJAX.", + "Improved override handling so changes apply immediately to existing job runs with correct priority resolution.", + ], + } + ], + }, + { + "category": "New Features", + "items": [ + { + "title": None, + "details": [ + "Added a Feedback board with per-user upvoting, admin moderation (resolve/reopen, soft delete), database migrations, and navigation entry.", + ], + } + ], + }, + { + "category": "Navigation", + "items": [ + { + "title": None, + "details": [ + "Updated menu order to: Inbox, Customers, Jobs, Daily Jobs, Tickets, Overrides, Reports, Settings, Logging, Changelog, Feedback.", + ], + } + ], + }, + ], +}, + { + "version": "0.1.9", + "overview": [ + "Overall, v0.1.9 focused on hardening the changelog system, improving backend stability, cleaning up technical debt in routing, and ensuring consistent, reliable release tracking across the application.", + ], + "categories": [ + { + "category": "Changelog System Improvements", + "items": [ + { + "title": None, + "details": [ + "Added and maintained multiple Completed changelog entries (v0.1.2 through v0.1.9) with correct release dates.", + "Ensured all existing Completed, Testing, and Planned changelog entries were preserved without loss.", + "Migrated the Completed changelog from markdown-based content to a structured, non-markdown format aligned with the Planned section.", + "Simplified changelog rendering logic to use explicit section titles and bullet handling instead of full markdown parsing.", + "Standardized formatting across all versions for long-term maintainability and consistent UI rendering.", + ], + } + ], + }, + { + "category": "Bug Fixes & Stability", + "items": [ + { + "title": None, + "details": [ + "Fixed multiple backend Python syntax and runtime errors related to changelog definitions (missing commas, indentation issues, invalid list entries).", + "Resolved rendering issues where markdown content was displayed as plain text or collapsed incorrectly.", + "Restored application startup stability by fixing missing imports (re, html) and indentation errors in changelog-related routes.", + ], + } + ], + }, + { + "category": "Refactoring & Maintainability", + "items": [ + { + "title": None, + "details": [ + "Refactored a large routes.py file into multiple smaller route modules.", + "Introduced a shared routes module for common imports, helpers, and access control.", + "Fixed NameError issues after refactoring by explicitly importing underscored helper functions that are not included via wildcard imports.", + "Ensured all split route modules retained full functional parity with the original implementation.", + ], + } + ], + }, + { + "category": "Release Management Updates", + "items": [ + { + "title": None, + "details": [ + "Moved versions through Testing → Completed states correctly:", + "v0.1.7 marked as Completed.", + "v0.1.8 added as Completed.", + "v0.1.9 added as Completed and restored as Current Version.", + "Testing advanced to v0.1.10.", + "Updated v0.1.9 release notes to document consistent job-matching and auto-approval behavior across all mail processing flows.", + "Verified no regressions in changelog structure or rendering after updates.", + ], + } + ], + }, + ], + }, + + + { + "version": "0.1.8", + "overview": [ + "This release focuses on making job matching and auto-approval behavior fully consistent across manual inbox actions, automatic mail imports, and the Re-parse all process. It also fixes a critical backend startup issue introduced in the re-parse logic.", + ], + "categories": [ + { + "category": "Key Changes", + "items": [ + { + "title": None, + "details": [ + "Introduced a single, shared job-matching helper based on a full unique key: From address, Backup software, Backup type, Job name.", + "Updated manual inbox approval to reuse existing jobs when the unique key matches, instead of relying on customer-only matching.", + "Aligned inbox Re-parse all auto-approve logic with the same shared matching behavior.", + "Fixed automatic mail import auto-approve so it correctly creates a JobRun, marks the mail as approved, and moves the mail to history when a matching job exists.", + ], + } + ], + }, + { + "category": "Re-parse All Improvements", + "items": [ + { + "title": None, + "details": [ + "Auto-approve is now executed during Re-parse all, not only on initial mail import.", + "After re-parsing, all successfully parsed mails without a linked job are re-evaluated against existing jobs using the full unique key.", + "When a matching active job with auto-approve enabled is found, the mail is automatically approved, linked to the job, moved to history, and a corresponding job run is created and shown in Job History.", + ], + } + ], + }, + { + "category": "Fixes", + "items": [ + { + "title": None, + "details": [ + "Resolved an issue where Re-parse all previously only updated parse metadata and skipped auto-approve logic, causing historical mails not to appear in job history.", + "Fixed a SyntaxError in the re-parse auto-approve logic that caused backend startup failures (Bad Gateway).", + "Corrected try/except structure and indentation to ensure re-parse auto-approve runs safely per mail without breaking the overall process.", + ], + } + ], + }, + { + "category": "Result", + "items": [ + { + "title": None, + "details": [ + "Job matching and auto-approval behavior is now consistent across all mail processing flows.", + "Historical mails are correctly linked to jobs and visible in job history.", + "Backend stability during startup and re-parse operations is restored.", + ], + } + ], + }, + ], + }, + + + { + "version": "0.1.7", + "overview": [ + "This release introduces export and import functionality for approved jobs and significantly improves parser reliability, job approval logic, and overall stability.", + ], + "categories": [ + { + "category": "Key Features", + "items": [ + { + "title": None, + "details": [ + "Introduced export and import functionality for approved jobs using JSON.", + "Import process automatically creates missing customers and updates existing jobs based on a unique job identity to prevent duplicates.", + ], + } + ], + }, + { + "category": "Versioning & Changelog", + "items": [ + { + "title": None, + "details": [ + "Promoted version v0.1.7 from Testing to Completed.", + "Introduced v0.1.8 as the new Testing release.", + "Updated the changelog structure and testing notes to reflect active export and import functionality.", + ], + } + ], + }, + { + "category": "Parser Enhancements", + "items": [ + { + "title": "Boxafe", + "details": [ + "Improved parsing for Shared Drives and Domain Accounts and handling of Warning statuses.", + "Corrected object detection logic to prevent false object creation when no object data is present.", + "Removed object parsing for Shared Drives backups entirely.", + ], + }, + { + "title": "Synology Hyper Backup", + "details": [ + "Added full support for Dutch notification emails.", + "Improved status detection for Dutch phrasing.", + "Confirmed that no objects are parsed for Hyper Backup jobs.", + ], + }, + { + "title": "Veeam", + "details": [ + "Added support for Scale-out Backup Repository notifications including storage capacity metrics.", + "Added support for Veeam Health Check reports with correct object filtering.", + ], + }, + ], + }, + { + "category": "Job Approval & Auto-Approval Logic", + "items": [ + { + "title": None, + "details": [ + "Refined approved job matching logic to prevent cross-customer approvals.", + "Improved auto-approve behavior during re-parse, inbox reprocessing, and Graph imports.", + "Enhanced resilience against case, whitespace, unicode, and hidden formatting differences.", + "Simplified matching to rely primarily on a normalized From address while ensuring parser-consistent values.", + "Ensured deterministic behavior during reprocessing by preventing mutation of message data.", + ], + } + ], + }, + { + "category": "Stability Fixes", + "items": [ + { + "title": None, + "details": [ + "Fixed crashes and approval errors caused by undefined or incorrect job name handling.", + "Resolved duplicate job record issues including NULL customer IDs blocking auto-approval.", + "Ensured consistent JobRun creation and mail linking during automatic imports and re-parsing.", + ], + } + ], + }, + { + "category": "Notes", + "items": [ + { + "title": None, + "details": [ + "Previously approved jobs are expected to be recreated due to changes in approval matching logic.", + ], + } + ], + }, + ], + }, + { + "version": "0.1.6", + "categories": [ + { + "category": "Fixed", + "items": [ + { + "title": None, + "details": [ + "Corrected auto-approve logic to ensure it is properly applied during automatic mail imports.", + "Prevented the Re-parse all action from re-processing emails that were already approved.", + "Ensured approved status is always respected and never overwritten during re-parsing or automatic imports.", + "Fixed multiple Jinja2 TemplateSyntaxError issues in the base layout that caused 500 Internal Server Errors.", + "Restored correct rendering of all pages affected by template errors, including Dashboard, Parsers, and Changelog.", + "Resolved Changelog page rendering issues by fixing dictionary access in templates and avoiding conflicts with built-in methods.", + ], + } + ], + }, + { + "category": "Added", + "items": [ + { + "title": None, + "details": [ + "Introduced a centralized Changelog page containing: Active production version, Testing version, Planned / Todo items, and Completed changes.", + "Added the Changelog entry point to the main navigation.", + "Applied a clear versioning convention, e.g. v0.1.7 for testing releases.", + "Marked version 0.1.6 as the active production release.", + ], + } + ], + }, + { + "category": "Planned", + "items": [ + { + "title": None, + "details": [ + "Export and import of jobs to allow restoring approved jobs after a clean installation.", + "Always register “New license key is not available” as an error.", + "Support for a scale-out backup repository Cloud Connect Immutable parser.", + "Ability to attach EML files to Daily Jobs and Job Details.", + "Fix for Light/Dark theme switching so users can properly change themes.", + "Restrict ticket creation and editing to Operator and Admin roles only.", + ], + } + ], + }, + { + "category": "Known Bugs", + "items": [ + { + "title": None, + "details": [ + "Emails that were previously approved remain in the Inbox instead of being removed, even though they appear auto-approved and linked to Jobs.", + ], + } + ], + }, + ], + }, + { + "version": "0.1.5", + "overview": [ + "This release focuses on restoring Microsoft Graph functionality, improving application reliability, and introducing a robust reset mechanism to allow a clean restart of the application state.", + ], + "categories": [ + { + "category": "Key Fixes", + "items": [ + { + "title": None, + "details": [ + "Restored Microsoft Graph folder retrieval by fixing an incorrect import that caused a ModuleNotFoundError.", + "Resolved failures in the automatic mail importer caused by signal-based timeout handling by replacing it with a thread-safe mechanism.", + "Fixed backend startup crashes and Bad Gateway errors related to the automatic mail importer.", + "Implemented missing backend logic required for automatic imports to function correctly.", + ], + } + ], + }, + { + "category": "New Features", + "items": [ + { + "title": None, + "details": [ + "Added an Application Reset option in the Settings page.", + "Introduced a confirmation step to prevent accidental resets.", + ], + } + ], + }, + { + "category": "Improvements & Changes", + "items": [ + { + "title": None, + "details": [ + "Implemented full backend support for a complete application reset.", + "Reset now clears all application data, including: approved and pending jobs, imported and processed emails, daily job runs, logs, and user-defined settings (system defaults are preserved).", + "Ensured database cleanup runs in the correct order to respect foreign key constraints.", + "Aligned automatic mail import logic with the existing manual import flow for consistent behavior.", + "Applied the automatic import cutoff date directly via a Microsoft Graph $filter, leaving older emails untouched in the inbox.", + ], + } + ], + }, + { + "category": "Result", + "items": [ + { + "title": None, + "details": [ + "Graph API functionality is fully restored.", + "Automatic mail import runs reliably on its configured schedule.", + "The application can now be safely reset to a clean, fresh-install state when needed.", + ], + } + ], + }, + ], + }, + { + "version": "0.1.4", + "overview": [ + "This release focuses on improving backend stability, database reliability, and consistency in object parsing and mail handling.", + ], + "categories": [ + { + "category": "Key Changes", + "items": [ + { + "title": None, + "details": [ + "Database migrations for tickets and remarks were stabilized by running each migration in its own transaction scope, preventing closed-connection errors during startup.", + "Backend startup issues causing Gunicorn failures and 502 Bad Gateway errors were resolved.", + "The title field was fully removed from tickets and remarks, simplifying both backend validation and UI forms to use only descriptive content.", + "Manual mail imports were aligned with the Re-parse all behavior, ensuring immediate and consistent object detection.", + "Object visibility on the Daily Jobs page was corrected for previously approved jobs.", + "Manual imports were hardened against Microsoft Graph timeouts by adding DNS preflight checks and safely skipping message moves when Graph is unreachable.", + ], + } + ], + }, + { + "category": "Improvements", + "items": [ + { + "title": None, + "details": [ + "Eliminated ResourceClosedError exceptions during backend boot.", + "Increased reliability of migrations and overall application startup.", + "Ensured object parsing is consistently re-evaluated on every job run, with correct detection of added or removed objects.", + "Prevented internal server errors and Gunicorn worker timeouts caused by long-running external Graph operations.", + ], + } + ], + }, + { + "category": "Result", + "items": [ + { + "title": None, + "details": [ + "Overall, v0.1.4 significantly improves robustness, consistency, and fault tolerance across database migrations, job parsing, and manual mail imports.", + ], + } + ], + }, + ], + }, + { + "version": "0.1.3", + "categories": [ + { + "category": "Logging & Stability", + "items": [ + { + "title": None, + "details": [ + "Fixed logging persistence so log entries are consistently stored in the database.", + "Resolved cases where certain log events were not stored due to object lifecycle handling.", + "Improved reliability of log creation during background/asynchronous processes.", + "Corrected log retrieval so stored logs are properly fetched and shown in the web UI.", + "Added pagination to the logging overview (20 entries per page).", + "Extended the logging view to show all available log fields and fixed missing columns in the UI.", + ], + } + ], + }, + { + "category": "UI & Table Layout Improvements", + "items": [ + { + "title": None, + "details": [ + "Improved the logging page usability by placing pagination controls at the top and keeping them available at the bottom.", + "Increased logging table width to better fit a 1080p layout.", + "Fixed column layout so all columns remain in consistent positions regardless of content length.", + "Updated status styling to use colored text only within the status column (Success, Warning, Error/Failed, Missed), including clear differentiation for overrides.", + "Fixed JavaScript errors in the Daily Jobs popup that prevented rendering.", + ], + } + ], + }, + { + "category": "Jobs & Daily Jobs Enhancements", + "items": [ + { + "title": None, + "details": [ + "Standardized default sorting for both Jobs and Daily Jobs tables (Customer → Backup → Type → Job name).", + "Persisted the Daily Jobs start date setting in the database and ensured it reloads correctly in the Settings UI.", + "Corrected missed-status calculation to start from the configured Daily Jobs start date.", + "Improved Daily Jobs table readability: moved the number of runs into a dedicated Runs column, prevented layout shifting caused by variable text in the Last result column, restored the original Runs visual representation and adjusted placement for better readability, and reduced the Last result column width so only status text is shown and the Runs column remains visible.", + ], + } + ], + }, + { + "category": "Parsing & Data Normalization", + "items": [ + { + "title": None, + "details": [ + "Stripped retry suffixes like (Retry 1), (Retry 2), etc. from job names so retries don’t create separate job identities.", + "Extended the NAKIVO parser to support VMware Replication job emails: detects job type (Backup vs Replication) based on email content, improves replication job name parsing, extracts VM names from the Objects/Virtual Machines section, and maps overall job status correctly for replication reports.", + ], + } + ], + }, + { + "category": "Tickets & Remarks", + "items": [ + { + "title": None, + "details": [ + "Added database schema for globally unique, persistent tickets linked to job runs for long-term reporting (new tables: tickets, ticket_scopes, ticket_job_runs; ticket codes format TYYYYMMDD.NNNN; tickets require at least one customer scope).", + "Added database schema for remarks with scoped attachment and persistent linkage to job runs (new tables: remarks, remark_scopes, remark_job_runs).", + "Implemented a new Tickets page with tabbed navigation (Tickets / Remarks): overviews with filtering, detail views showing scopes and linked job runs, indicators in Daily Jobs for active tickets/remarks, management in the job run popup, consistent icons, and backend API endpoints for listing/creating/updating/resolving/linking tickets and remarks plus an endpoint to retrieve all alerts for a specific job run.", + ], + } + ], + }, + ], + }, + { + "version": "0.1.2", + "overview": [ + "This release focuses on improved parser support, more robust data cleanup, and a fully reworked in-app logging and object persistence system.", + ], + "categories": [ + { + "category": "Parser & Support", + "items": [ + { + "title": None, + "details": [ + "Extended the Synology Hyper Backup mail parser with proper recognition of Strato HiDrive backups.", + "Added support for parsing job names from the “Backup Task:” field for Strato HiDrive.", + "Correct handling of successful runs without listed objects.", + "Added a Strato HiDrive example to the parser templates for validation and reference.", + ], + } + ], + }, + { + "category": "Administration & Cleanup", + "items": [ + { + "title": None, + "details": [ + "Introduced an admin-only action to delete all jobs in a single operation.", + "Ensured related run mails are moved back to the Inbox when jobs are deleted.", + "Fixed foreign key constraint issues by enforcing the correct deletion order: run_object_links first, job_object_links next, then job runs and jobs.", + "Stabilized the Delete all jobs action to fully clean up all related data.", + ], + } + ], + }, + { + "category": "Logging", + "items": [ + { + "title": None, + "details": [ + "Moved logging away from container/stdout logging to in-app logging.", + "Introduced AdminLog-based logging for: mail import, auto-approval, manual job approval, and job deletion.", + "Added detailed logging per imported and auto-approved email.", + "Added summary logging at the end of each mail import run.", + "Ensured all relevant events are logged exclusively via the AdminLog table and visible on the Logging page.", + ], + } + ], + }, + { + "category": "Object Persistence", + "items": [ + { + "title": None, + "details": [ + "Restored persistence of parsed objects after manual approval of inbox mails.", + "Restored persistence of parsed objects during auto-approval (reparse-all).", + "Ensured objects from approved mails are: upserted into customer_objects, linked to jobs via job_object_links (with first/last seen tracking), and linked to runs via run_object_links (with status and error details).", + "Added centralized helper logic to ensure consistent object persistence.", + "Added an admin-only maintenance action to backfill missing object links for already approved runs.", + "Object persistence failures no longer block mail approval.", + "Daily Jobs and Run detail views correctly display objects again for both new and historical runs after backfilling.", + ], + } + ], + }, + ], + }, + ], + } + + return render_template("main/changelog.html", changelog=changelog) diff --git a/containers/backupchecks/src/backend/app/main/routes_core.py b/containers/backupchecks/src/backend/app/main/routes_core.py new file mode 100644 index 0000000..5388fae --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_core.py @@ -0,0 +1,310 @@ +from .routes_shared import * # noqa: F401,F403 +from .routes_shared import _format_datetime, _get_database_size_bytes, _apply_overrides_to_run, _format_bytes, _get_free_disk_bytes, _infer_schedule_map_from_runs + +@main_bp.route("/") +@login_required +def dashboard(): + # Inbox open items + try: + inbox_query = MailMessage.query + if hasattr(MailMessage, "location"): + inbox_query = inbox_query.filter(MailMessage.location == "inbox") + inbox_count = int(inbox_query.count() or 0) + except Exception: + inbox_count = 0 + + # Daily job status counters for today (Europe/Amsterdam) + try: + from zoneinfo import ZoneInfo + + tz = _get_ui_timezone() + except Exception: + tz = None + + # Robust fallback: the dashboard should never degrade into UTC/None because it would + # incorrectly count not-yet-due jobs as Missed. + try: + if tz is None: + tz = ZoneInfo("Europe/Amsterdam") + except Exception: + tz = None + + today_date = datetime.now(tz).date() if tz else datetime.utcnow().date() + + if tz: + local_midnight = datetime( + year=today_date.year, + month=today_date.month, + day=today_date.day, + hour=0, + minute=0, + second=0, + tzinfo=tz, + ) + start_of_day = local_midnight.astimezone(datetime_module.timezone.utc).replace(tzinfo=None) + end_of_day = ( + local_midnight + timedelta(days=1) + ).astimezone(datetime_module.timezone.utc).replace(tzinfo=None) + else: + start_of_day = datetime( + year=today_date.year, + month=today_date.month, + day=today_date.day, + hour=0, + minute=0, + second=0, + ) + end_of_day = start_of_day + timedelta(days=1) + + weekday_idx = today_date.weekday() # 0=Mon..6=Sun + + jobs_success_count = 0 + jobs_success_override_count = 0 + jobs_expected_count = 0 + jobs_warning_count = 0 + jobs_error_count = 0 + jobs_missed_count = 0 + + try: + now_utc = datetime.utcnow().replace(tzinfo=datetime_module.timezone.utc) + now_local = now_utc.astimezone(tz) if tz else now_utc + + jobs = Job.query.join(Customer, isouter=True).all() + for job in jobs: + schedule_map = _infer_schedule_map_from_runs(job.id) + expected_times = schedule_map.get(weekday_idx) or [] + if not expected_times: + continue + + # Build expected datetimes for today in UI local time. + expected_dt_local: list[datetime] = [] + if tz: + for tstr in expected_times: + try: + hh, mm = [int(x) for x in (tstr or "").split(":", 1)] + except Exception: + continue + expected_dt_local.append( + datetime( + year=today_date.year, + month=today_date.month, + day=today_date.day, + hour=hh, + minute=mm, + second=0, + tzinfo=tz, + ) + ) + expected_dt_local = sorted(expected_dt_local) + + runs_for_day = ( + JobRun.query.filter( + JobRun.job_id == job.id, + JobRun.run_at >= start_of_day, + JobRun.run_at < end_of_day, + ) + .order_by(JobRun.run_at.asc()) + .all() + ) + + status = "" + override_applied = False + if runs_for_day: + last_run = runs_for_day[-1] + try: + status_display, override_applied, _override_level, _ov_id, _ov_reason = _apply_overrides_to_run(job, last_run) + status_raw = (status_display or last_run.status or "").strip() + status = status_raw.lower() + if override_applied and ("override" in status or status_display): + # Normalize to success for dashboard bucketing when an override is applied. + status = "success" + except Exception: + status = (last_run.status or "").strip().lower() + override_applied = False + else: + status = "" + + # Determine if the job is still expected to run later today. + is_expected = False + if tz and expected_dt_local: + last_run_local = None + if runs_for_day and getattr(runs_for_day[-1], "run_at", None): + try: + dt = runs_for_day[-1].run_at + if dt.tzinfo is None: + dt = dt.replace(tzinfo=datetime_module.timezone.utc) + last_run_local = dt.astimezone(tz) + except Exception: + last_run_local = None + + anchor = last_run_local or datetime( + year=today_date.year, + month=today_date.month, + day=today_date.day, + hour=0, + minute=0, + second=0, + tzinfo=tz, + ) + + next_expected = None + for edt in expected_dt_local: + if edt > anchor: + next_expected = edt + break + + if next_expected is not None and now_local < next_expected: + is_expected = True + + # Status precedence: + # - Warning/Error always reflect the latest run + # - Otherwise show Expected if a next run is still upcoming + # - Otherwise show Success (and Success override) when a run exists + # - Otherwise show Missed once the last expected window has passed + if status == "warning": + jobs_warning_count += 1 + elif status in ("error", "failed", "failure"): + jobs_error_count += 1 + elif is_expected: + jobs_expected_count += 1 + elif override_applied and runs_for_day: + jobs_success_override_count += 1 + elif status in ("success", "ok") and runs_for_day: + jobs_success_count += 1 + else: + # No successful run and no upcoming expected run -> missed + jobs_missed_count += 1 + except Exception: + # Keep zeros on any unexpected failure + pass + + + # System status (same helpers as Settings) + db_size_bytes = _get_database_size_bytes() + free_disk_bytes = _get_free_disk_bytes() + db_size_human = _format_bytes(db_size_bytes) + free_disk_human = _format_bytes(free_disk_bytes) + free_disk_warning = False + try: + free_disk_warning = free_disk_bytes is not None and free_disk_bytes < (2 * 1024 * 1024 * 1024) + except Exception: + free_disk_warning = False + + + # News (unread per user) + news_items = [] + try: + now = datetime.utcnow() + uid = getattr(current_user, "id", None) + if uid: + q = NewsItem.query.filter(NewsItem.active.is_(True)) + q = q.filter((NewsItem.publish_from.is_(None)) | (NewsItem.publish_from <= now)) + q = q.filter((NewsItem.publish_until.is_(None)) | (NewsItem.publish_until >= now)) + q = q.outerjoin( + NewsRead, + (NewsRead.news_item_id == NewsItem.id) & (NewsRead.user_id == uid), + ).filter(NewsRead.id.is_(None)) + q = q.order_by(NewsItem.pinned.desc(), NewsItem.publish_from.desc().nullslast(), NewsItem.created_at.desc()) + news_items = q.limit(10).all() + except Exception: + news_items = [] + + + return render_template( + "main/dashboard.html", + inbox_count=inbox_count, + jobs_success_count=jobs_success_count, + jobs_success_override_count=jobs_success_override_count, + jobs_expected_count=jobs_expected_count, + jobs_warning_count=jobs_warning_count, + jobs_error_count=jobs_error_count, + jobs_missed_count=jobs_missed_count, + db_size_human=db_size_human, + free_disk_human=free_disk_human, + free_disk_warning=free_disk_warning, + news_items=news_items, + ) + + +@main_bp.route("/logging") +@login_required +@roles_required("admin", "operator") +def logging_page(): + # Server-side view of AdminLog entries. + try: + page = int(request.args.get("page", "1")) + except ValueError: + page = 1 + if page < 1: + page = 1 + + per_page = 20 + query = AdminLog.query.order_by(AdminLog.created_at.desc().nullslast(), AdminLog.id.desc()) + total_items = query.count() + total_pages = max(1, math.ceil(total_items / per_page)) if total_items else 1 + if page > total_pages: + page = total_pages + + entries = ( + query.offset((page - 1) * per_page) + .limit(per_page) + .all() + ) + + rows = [] + for e in entries: + rows.append( + { + "created_at": _format_datetime(e.created_at), + "user": e.user or "", + "event_type": e.event_type or "", + "message": e.message or "", + "details": e.details or "", + } + ) + + return render_template( + "main/logging.html", + # The template expects `logs`. + logs=rows, + # Keep `rows` for backward-compatibility (if any). + rows=rows, + page=page, + total_pages=total_pages, + has_prev=page > 1, + has_next=page < total_pages, + ) + + +@main_bp.route("/theme", methods=["POST"]) +@login_required +def set_theme_preference(): + # Accept both field names to stay compatible with templates/UI. + # The navbar theme select uses name="theme". + pref = ( + request.form.get("theme_preference") + or request.form.get("theme") + or "auto" + ).strip().lower() + if pref not in ("auto", "light", "dark"): + pref = "auto" + current_user.theme_preference = pref + db.session.commit() + return redirect(request.referrer or url_for("main.dashboard")) + + + +@main_bp.route("/set-active-role", methods=["POST"]) +@login_required +def set_active_role_route(): + role = (request.form.get("active_role") or "").strip() + try: + current_user.set_active_role(role) + except Exception: + # Fallback: store in session directly + role = (role or "").strip() + if role: + session["active_role"] = role + + # Always redirect to dashboard to avoid landing on a page without permissions + return redirect(url_for("main.dashboard")) diff --git a/containers/backupchecks/src/backend/app/main/routes_customers.py b/containers/backupchecks/src/backend/app/main/routes_customers.py new file mode 100644 index 0000000..9e53b7d --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_customers.py @@ -0,0 +1,212 @@ +from .routes_shared import * # noqa: F401,F403 + +@main_bp.route("/customers") +@login_required +@roles_required("admin", "operator", "viewer") +def customers(): + items = Customer.query.order_by(Customer.name.asc()).all() + + rows = [] + for c in items: + # Count jobs linked to this customer + try: + job_count = c.jobs.count() + except Exception: + job_count = 0 + rows.append( + { + "id": c.id, + "name": c.name, + "active": bool(c.active), + "job_count": job_count, + } + ) + + can_manage = current_user.is_authenticated and get_active_role() in ("admin", "operator") + + return render_template( + "main/customers.html", + customers=rows, + can_manage=can_manage, + ) + + +@main_bp.route("/customers/create", methods=["POST"]) +@login_required +@roles_required("admin", "operator") +def customers_create(): + name = (request.form.get("name") or "").strip() + active = bool(request.form.get("active")) + + if not name: + flash("Customer name is required.", "danger") + return redirect(url_for("main.customers")) + + existing = Customer.query.filter_by(name=name).first() + if existing: + flash("Customer already exists.", "danger") + return redirect(url_for("main.customers")) + + try: + customer = Customer(name=name, active=active) + db.session.add(customer) + db.session.commit() + flash("Customer created.", "success") + except Exception as exc: + db.session.rollback() + print(f"[customers] Failed to create customer: {exc}") + flash("Failed to create customer.", "danger") + + return redirect(url_for("main.customers")) + + +@main_bp.route("/customers//edit", methods=["POST"]) +@login_required +@roles_required("admin", "operator") +def customers_edit(customer_id: int): + customer = Customer.query.get_or_404(customer_id) + + name = (request.form.get("name") or "").strip() + active = bool(request.form.get("active")) + + if not name: + flash("Customer name is required.", "danger") + return redirect(url_for("main.customers")) + + existing = Customer.query.filter( + Customer.id != customer.id, Customer.name == name + ).first() + if existing: + flash("Customer already exists.", "danger") + return redirect(url_for("main.customers")) + + try: + customer.name = name + customer.active = active + db.session.commit() + flash("Customer updated.", "success") + except Exception as exc: + db.session.rollback() + print(f"[customers] Failed to update customer: {exc}") + flash("Failed to update customer.", "danger") + + return redirect(url_for("main.customers")) + + +@main_bp.route("/customers//delete", methods=["POST"]) +@login_required +@roles_required("admin", "operator") +def customers_delete(customer_id: int): + customer = Customer.query.get_or_404(customer_id) + + try: + db.session.delete(customer) + db.session.commit() + flash("Customer deleted.", "success") + except Exception as exc: + db.session.rollback() + print(f"[customers] Failed to delete customer: {exc}") + flash("Failed to delete customer.", "danger") + + return redirect(url_for("main.customers")) + + +@main_bp.route("/customers/export.csv") +@login_required +@roles_required("admin", "operator") +def customers_export(): + import csv + from io import StringIO + + items = Customer.query.order_by(Customer.name.asc()).all() + buf = StringIO() + writer = csv.writer(buf) + writer.writerow(["name", "active"]) + for c in items: + writer.writerow([c.name, "1" if c.active else "0"]) + + out = buf.getvalue().encode("utf-8") + return Response( + out, + mimetype="text/csv; charset=utf-8", + headers={"Content-Disposition": "attachment; filename=customers.csv"}, + ) + + +@main_bp.route("/customers/import", methods=["POST"]) +@login_required +@roles_required("admin", "operator") +def customers_import(): + file = request.files.get("file") + if not file or not getattr(file, "filename", ""): + flash("No file selected.", "warning") + return redirect(url_for("main.customers")) + + try: + raw = file.read() + text = raw.decode("utf-8-sig", errors="replace") + except Exception: + flash("Failed to read the uploaded file.", "danger") + return redirect(url_for("main.customers")) + + import csv + from io import StringIO + + created = 0 + updated = 0 + skipped = 0 + + try: + reader = csv.reader(StringIO(text)) + rows = list(reader) + except Exception: + flash("Invalid CSV format.", "danger") + return redirect(url_for("main.customers")) + + if not rows: + flash("CSV file is empty.", "warning") + return redirect(url_for("main.customers")) + + # If first row looks like a header, skip it + header = [c.strip().lower() for c in (rows[0] or [])] + start_idx = 1 if ("name" in header or "customer" in header) else 0 + + for r in rows[start_idx:]: + if not r: + continue + name = (r[0] or "").strip() + if not name: + skipped += 1 + continue + + active_val = None + if len(r) >= 2: + a = (r[1] or "").strip().lower() + if a in ("1", "true", "yes", "y", "active"): + active_val = True + elif a in ("0", "false", "no", "n", "inactive"): + active_val = False + + existing = Customer.query.filter_by(name=name).first() + if existing: + if active_val is not None: + existing.active = active_val + updated += 1 + else: + skipped += 1 + else: + c = Customer(name=name, active=True if active_val is None else active_val) + db.session.add(c) + created += 1 + + try: + db.session.commit() + flash(f"Import finished. Created: {created}, Updated: {updated}, Skipped: {skipped}.", "success") + except Exception as exc: + db.session.rollback() + current_app.logger.exception(f"Failed to import customers: {exc}") + flash("Failed to import customers.", "danger") + + return redirect(url_for("main.customers")) + + diff --git a/containers/backupchecks/src/backend/app/main/routes_daily_jobs.py b/containers/backupchecks/src/backend/app/main/routes_daily_jobs.py new file mode 100644 index 0000000..189d0b5 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_daily_jobs.py @@ -0,0 +1,469 @@ +from .routes_shared import * # noqa: F401,F403 +from .routes_shared import _format_datetime, _get_or_create_settings, _apply_overrides_to_run, _infer_schedule_map_from_runs + +# Grace window for today's Expected/Missed transition. +# A job is only marked Missed after the latest expected time plus this grace. +MISSED_GRACE_WINDOW = timedelta(hours=1) + +@main_bp.route("/daily-jobs") +@login_required +@roles_required("admin", "operator", "viewer") +def daily_jobs(): + # Determine target date (default: today) in Europe/Amsterdam + date_str = request.args.get("date") + try: + from zoneinfo import ZoneInfo + tz = _get_ui_timezone() + except Exception: + tz = None + + try: + if date_str: + target_date = datetime.strptime(date_str, "%Y-%m-%d").date() + else: + target_date = datetime.now(tz).date() if tz else datetime.utcnow().date() + except Exception: + target_date = datetime.now(tz).date() if tz else datetime.utcnow().date() + + + settings = _get_or_create_settings() + missed_start_date = getattr(settings, "daily_jobs_start_date", None) + + # Day window: treat run_at as UTC-naive timestamps stored in UTC (existing behavior) + # Note: if your DB stores local-naive timestamps, this still works because the same logic + # is used consistently in schedule inference and details. + if tz: + local_midnight = datetime( + year=target_date.year, + month=target_date.month, + day=target_date.day, + hour=0, + minute=0, + second=0, + tzinfo=tz, + ) + start_of_day = local_midnight.astimezone(datetime_module.timezone.utc).replace(tzinfo=None) + end_of_day = (local_midnight + timedelta(days=1)).astimezone(datetime_module.timezone.utc).replace(tzinfo=None) + else: + start_of_day = datetime( + year=target_date.year, + month=target_date.month, + day=target_date.day, + hour=0, + minute=0, + second=0, + ) + end_of_day = start_of_day + timedelta(days=1) + + def _to_local(dt_utc): + if not dt_utc or not tz: + return dt_utc + try: + if dt_utc.tzinfo is None: + dt_utc = dt_utc.replace(tzinfo=datetime_module.timezone.utc) + return dt_utc.astimezone(tz) + except Exception: + return dt_utc + + def _bucket_15min(dt_utc): + d = _to_local(dt_utc) + if not d: + return None + minute_bucket = (d.minute // 15) * 15 + return f"{d.hour:02d}:{minute_bucket:02d}" + + weekday_idx = target_date.weekday() # 0=Mon..6=Sun + + jobs = ( + Job.query.join(Customer, isouter=True) + .order_by(Customer.name.asc().nullslast(), Job.backup_software.asc(), Job.backup_type.asc(), Job.job_name.asc()) + .all() + ) + + rows = [] + for job in jobs: + schedule_map = _infer_schedule_map_from_runs(job.id) + expected_times = schedule_map.get(weekday_idx) or [] + if not expected_times: + continue + + runs_for_day = ( + JobRun.query.filter( + JobRun.job_id == job.id, + JobRun.run_at >= start_of_day, + JobRun.run_at < end_of_day, + ) + .order_by(JobRun.run_at.asc()) + .all() + ) + + run_count = len(runs_for_day) + customer_name = job.customer.name if job.customer else "" + + # Ticket/Remark indicators for this job on this date + # Tickets: active-from date should apply to subsequent runs until resolved. + has_active_ticket = False + has_active_remark = False + try: + t_exists = db.session.execute( + text( + """ + SELECT 1 + FROM tickets t + JOIN ticket_scopes ts ON ts.ticket_id = t.id + WHERE ts.job_id = :job_id + AND t.active_from_date <= :target_date + AND ( + t.resolved_at IS NULL + OR ((t.resolved_at AT TIME ZONE 'UTC' AT TIME ZONE 'Europe/Amsterdam')::date) >= :target_date + ) + LIMIT 1 + """ + ), + {"job_id": job.id, "target_date": target_date}, + ).first() + + has_active_ticket = bool(t_exists) + + r_exists = db.session.execute( + text( + """ + SELECT 1 + FROM remarks r + JOIN remark_scopes rs ON rs.remark_id = r.id + WHERE rs.job_id = :job_id + AND COALESCE( + r.active_from_date, + ((r.start_date AT TIME ZONE 'UTC' AT TIME ZONE 'Europe/Amsterdam')::date) + ) <= :target_date + AND ( + r.resolved_at IS NULL + OR ((r.resolved_at AT TIME ZONE 'UTC' AT TIME ZONE 'Europe/Amsterdam')::date) >= :target_date + ) + LIMIT 1 + """ + ), + {"job_id": job.id, "target_date": target_date}, + ).first() + + has_active_remark = bool(r_exists) + except Exception: + has_active_ticket = False + has_active_remark = False + + # We show a single row per job for today. + last_remark_excerpt = "" + last_override_applied = False + if run_count > 0: + last_run = runs_for_day[-1] + try: + status_display, override_applied, override_level, _ov_id, _ov_reason = _apply_overrides_to_run(job, last_run) + # Expose override information so the status dot can render the blue override indicator. + last_override_applied = bool(override_applied) + # If this run is flagged as missed, ensure we always expose a + # concrete status so the UI can render the missed status dot. + if getattr(last_run, "missed", False): + last_status = status_display or "Missed" + else: + last_status = status_display or (last_run.status or "-") + except Exception: + last_status = last_run.status or "-" + last_override_applied = False + display_time = _bucket_15min(last_run.run_at) or (expected_times[-1] if expected_times else "") + + last_remark = getattr(last_run, "remark", None) or "" + last_remark_excerpt = " | ".join([ln.strip() for ln in str(last_remark).replace("\r\n", "\n").split("\n") if ln.strip()]) + if len(last_remark_excerpt) > 140: + last_remark_excerpt = last_remark_excerpt[:137] + "..." + else: + # Always show the latest expected time for rows without runs. + latest_expected = expected_times[-1] if expected_times else "" + display_time = latest_expected + + # Before the configured start date we do not mark runs as 'Missed', + # but we still show the expected time and (when applicable) an 'Expected' status. + try: + today_local = datetime.now(tz).date() if tz else datetime.utcnow().date() + except Exception: + today_local = datetime.utcnow().date() + + if missed_start_date and target_date < missed_start_date: + if target_date > today_local: + last_status = "Expected" + elif target_date == today_local: + is_expected = False + try: + now_local = datetime.now(tz) if tz else datetime.utcnow() + latest_dt = None + + for tstr in expected_times: + try: + parts = (tstr or "").strip().split(":") + if len(parts) < 2: + continue + hh = int(parts[0]) + mm = int(parts[1]) + dt_local = datetime.combine(target_date, datetime.min.time()).replace(hour=hh, minute=mm) + if tz: + dt_local = dt_local.replace(tzinfo=tz) + if latest_dt is None or dt_local > latest_dt: + latest_dt = dt_local + except Exception: + continue + + # If we cannot parse any expected time, assume 'Expected'. + if latest_dt is None: + is_expected = True + else: + is_expected = now_local <= (latest_dt + MISSED_GRACE_WINDOW) + except Exception: + is_expected = True + + last_status = "Expected" if is_expected else "-" + else: + last_status = "-" + else: + # For today and future dates, show 'Expected' until the latest expected time has passed. + if target_date > today_local: + last_status = "Expected" + elif target_date == today_local: + is_expected = False + try: + now_local = datetime.now(tz) if tz else datetime.utcnow() + latest_dt = None + + for tstr in expected_times: + try: + parts = (tstr or "").strip().split(":") + if len(parts) < 2: + continue + hh = int(parts[0]) + mm = int(parts[1]) + dt_local = datetime.combine(target_date, datetime.min.time()).replace(hour=hh, minute=mm) + if tz: + dt_local = dt_local.replace(tzinfo=tz) + if latest_dt is None or dt_local > latest_dt: + latest_dt = dt_local + except Exception: + continue + + # If we cannot parse any expected time, assume 'Expected' (better than incorrectly showing 'Missed') + if latest_dt is None: + is_expected = True + else: + is_expected = now_local <= (latest_dt + MISSED_GRACE_WINDOW) + except Exception: + is_expected = True + + last_status = "Expected" if is_expected else "Missed" + else: + last_status = "Missed" + rows.append( + { + "job_id": job.id, + "customer_name": customer_name, + "backup_software": job.backup_software or "", + "backup_type": job.backup_type or "", + "job_name": job.job_name or "", + "expected_time": display_time, + "last_status": last_status, + "last_override_applied": bool(last_override_applied), + "run_count": run_count, + "has_active_ticket": bool(has_active_ticket), + "has_active_remark": bool(has_active_remark), + "last_remark_excerpt": last_remark_excerpt, + } + ) + + # Sort: Customer -> Backup -> Type -> Job name + # (case-insensitive, NULL/empty last by virtue of empty string) + rows.sort( + key=lambda r: ( + (r.get("customer_name") or "").lower(), + (r.get("backup_software") or "").lower(), + (r.get("backup_type") or "").lower(), + (r.get("job_name") or "").lower(), + ) + ) + + target_date_str = target_date.strftime("%Y-%m-%d") + return render_template("main/daily_jobs.html", rows=rows, target_date_str=target_date_str) + + +@main_bp.route("/daily-jobs/details") +@login_required +@roles_required("admin", "operator", "viewer") +def daily_jobs_details(): + try: + job_id = int(request.args.get("job_id", "0")) + except ValueError: + job_id = 0 + date_str = request.args.get("date") + + if job_id <= 0 or not date_str: + return jsonify({"status": "error", "message": "Invalid parameters."}), 400 + + try: + target_date = datetime.strptime(date_str, "%Y-%m-%d").date() + except Exception: + return jsonify({"status": "error", "message": "Invalid date."}), 400 + + job = Job.query.get_or_404(job_id) + + try: + from zoneinfo import ZoneInfo + tz = _get_ui_timezone() + except Exception: + tz = None + + if tz: + local_midnight = datetime( + year=target_date.year, + month=target_date.month, + day=target_date.day, + hour=0, + minute=0, + second=0, + tzinfo=tz, + ) + start_of_day = local_midnight.astimezone(datetime_module.timezone.utc).replace(tzinfo=None) + end_of_day = (local_midnight + timedelta(days=1)).astimezone(datetime_module.timezone.utc).replace(tzinfo=None) + else: + start_of_day = datetime( + year=target_date.year, + month=target_date.month, + day=target_date.day, + hour=0, + minute=0, + second=0, + ) + end_of_day = start_of_day + timedelta(days=1) + + runs_for_day = ( + JobRun.query.filter( + JobRun.job_id == job.id, + JobRun.run_at >= start_of_day, + JobRun.run_at < end_of_day, + ) + .order_by(JobRun.run_at.desc()) + .all() + ) + + runs_payload = [] + for run in runs_for_day: + msg = MailMessage.query.get(run.mail_message_id) if run.mail_message_id else None + mail_meta = None + has_eml = False + mail_message_id = run.mail_message_id + body_html = "" + if msg: + mail_meta = { + "from_address": msg.from_address or "", + "subject": msg.subject or "", + "received_at": _format_datetime(msg.received_at), + } + body_html = msg.html_body or "" + has_eml = bool(getattr(msg, "eml_stored_at", None)) + + objects_payload = [] + # Preferred: read persisted objects for this run from run_object_links/customer_objects (Step 2). + try: + rows = ( + db.session.execute( + text( + """ + SELECT + co.object_name AS name, + rol.status AS status, + rol.error_message AS error_message + FROM run_object_links rol + JOIN customer_objects co ON co.id = rol.customer_object_id + WHERE rol.run_id = :run_id + ORDER BY co.object_name ASC + """ + ), + {"run_id": run.id}, + ) + .mappings() + .all() + ) + for r in rows: + objects_payload.append( + { + "name": r.get("name") or "", + "type": "", + "status": r.get("status") or "", + "error_message": r.get("error_message") or "", + } + ) + except Exception: + # Fallback for older data / during upgrades + try: + objects = run.objects.order_by(JobObject.object_name.asc()).all() + except Exception: + objects = list(run.objects or []) + for obj in objects: + objects_payload.append( + { + "name": obj.object_name, + "type": getattr(obj, "object_type", "") or "", + "status": obj.status or "", + "error_message": obj.error_message or "", + } + ) + + # If no run-linked objects exist yet, fall back to objects parsed/stored on the mail message. + if (not objects_payload) and msg: + try: + for mo in ( + MailObject.query.filter_by(mail_message_id=msg.id) + .order_by(MailObject.object_name.asc()) + .all() + ): + objects_payload.append( + { + "name": mo.object_name or "", + "type": mo.object_type or "", + "status": mo.status or "", + "error_message": mo.error_message or "", + } + ) + except Exception: + pass + + status_display, override_applied, override_level, _ov_id, _ov_reason = _apply_overrides_to_run(job, run) + # Ensure missed runs always expose a concrete status for the UI. + if getattr(run, "missed", False): + status_display = status_display or "Missed" + + # When a run is flagged as missed, it may not have a parser status. + # Normalize it so the UI can always show the missed dot. + if run.missed and not status_display: + status_display = "Missed" + + runs_payload.append( + { + "id": run.id, + "run_at": _format_datetime(run.run_at), + "status": status_display, + "remark": run.remark or "", + "missed": bool(run.missed), + "override_applied": bool(override_applied), + "override_level": override_level, + "mail_message_id": mail_message_id, + "has_eml": bool(has_eml), + "mail": mail_meta, + "body_html": body_html, + "objects": objects_payload, + } + ) + + job_payload = { + "id": job.id, + "customer_name": job.customer.name if job.customer else "", + "backup_software": job.backup_software or "", + "backup_type": job.backup_type or "", + "job_name": job.job_name or "", + } + + return jsonify({"status": "ok", "job": job_payload, "runs": runs_payload}) diff --git a/containers/backupchecks/src/backend/app/main/routes_feedback.py b/containers/backupchecks/src/backend/app/main/routes_feedback.py new file mode 100644 index 0000000..4e3e322 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_feedback.py @@ -0,0 +1,261 @@ +from .routes_shared import * # noqa: F401,F403 +from .routes_shared import _format_datetime + + +@main_bp.route("/feedback") +@login_required +@roles_required("admin", "operator", "viewer") +def feedback_page(): + item_type = (request.args.get("type") or "").strip().lower() + if item_type not in ("", "bug", "feature"): + item_type = "" + + # Default to showing both open and resolved items. Resolved items should remain + # visible for all users until an admin deletes them. + status = (request.args.get("status") or "all").strip().lower() + if status not in ("open", "resolved", "all"): + status = "all" + + q = (request.args.get("q") or "").strip() + + sort = (request.args.get("sort") or "votes").strip().lower() + if sort not in ("votes", "newest", "updated"): + sort = "votes" + + where = ["fi.deleted_at IS NULL"] + params = {"user_id": int(current_user.id)} + + if item_type: + where.append("fi.item_type = :item_type") + params["item_type"] = item_type + + if status != "all": + where.append("fi.status = :status") + params["status"] = status + + if q: + where.append("(fi.title ILIKE :q OR fi.description ILIKE :q OR COALESCE(fi.component,'') ILIKE :q)") + params["q"] = f"%{q}%" + + where_sql = " AND ".join(where) + + if sort == "newest": + order_sql = "fi.created_at DESC" + elif sort == "updated": + order_sql = "fi.updated_at DESC" + else: + order_sql = "vote_count DESC, fi.created_at DESC" + + sql = text( + f""" + SELECT + fi.id, + fi.item_type, + fi.title, + fi.component, + fi.status, + fi.created_at, + fi.updated_at, + u.username AS created_by, + COALESCE(v.vote_count, 0) AS vote_count, + EXISTS ( + SELECT 1 + FROM feedback_votes fv + WHERE fv.feedback_item_id = fi.id + AND fv.user_id = :user_id + ) AS user_voted + FROM feedback_items fi + JOIN users u ON u.id = fi.created_by_user_id + LEFT JOIN ( + SELECT feedback_item_id, COUNT(*) AS vote_count + FROM feedback_votes + GROUP BY feedback_item_id + ) v ON v.feedback_item_id = fi.id + WHERE {where_sql} + ORDER BY {order_sql} + LIMIT 500 + """ + ) + + rows = db.session.execute(sql, params).mappings().all() + + items = [] + for r in rows: + items.append( + { + "id": int(r["id"]), + "item_type": (r["item_type"] or "").lower(), + "title": r["title"] or "", + "component": r["component"] or "", + "status": (r["status"] or "open").lower(), + "created_at": _format_datetime(r["created_at"]), + "updated_at": _format_datetime(r["updated_at"]), + "created_by": r["created_by"] or "-", + "vote_count": int(r["vote_count"] or 0), + "user_voted": bool(r["user_voted"]), + } + ) + + return render_template( + "main/feedback.html", + items=items, + item_type=item_type, + status=status, + q=q, + sort=sort, + ) + + +@main_bp.route("/feedback/new", methods=["GET", "POST"]) +@login_required +@roles_required("admin", "operator", "viewer") +def feedback_new(): + if request.method == "POST": + item_type = (request.form.get("item_type") or "").strip().lower() + if item_type not in ("bug", "feature"): + flash("Invalid type.", "danger") + return redirect(url_for("main.feedback_new")) + + title = (request.form.get("title") or "").strip() + description = (request.form.get("description") or "").strip() + component = (request.form.get("component") or "").strip() or None + + if not title or not description: + flash("Title and description are required.", "danger") + return redirect(url_for("main.feedback_new")) + + item = FeedbackItem( + item_type=item_type, + title=title, + description=description, + component=component, + status="open", + created_by_user_id=int(current_user.id), + ) + db.session.add(item) + db.session.commit() + + flash("Feedback item created.", "success") + return redirect(url_for("main.feedback_detail", item_id=item.id)) + + return render_template("main/feedback_new.html") + + +@main_bp.route("/feedback/") +@login_required +@roles_required("admin", "operator", "viewer") +def feedback_detail(item_id: int): + item = FeedbackItem.query.get_or_404(item_id) + if item.deleted_at is not None: + abort(404) + + vote_count = ( + db.session.query(db.func.count(FeedbackVote.id)) + .filter(FeedbackVote.feedback_item_id == item.id) + .scalar() + or 0 + ) + + user_voted = ( + FeedbackVote.query.filter( + FeedbackVote.feedback_item_id == item.id, + FeedbackVote.user_id == int(current_user.id), + ).first() + is not None + ) + + created_by = User.query.get(item.created_by_user_id) + created_by_name = created_by.username if created_by else "-" + + resolved_by_name = "" + if item.resolved_by_user_id: + resolved_by = User.query.get(item.resolved_by_user_id) + resolved_by_name = resolved_by.username if resolved_by else "" + + return render_template( + "main/feedback_detail.html", + item=item, + created_by_name=created_by_name, + resolved_by_name=resolved_by_name, + vote_count=int(vote_count), + user_voted=bool(user_voted), + ) + + +@main_bp.route("/feedback//vote", methods=["POST"]) +@login_required +@roles_required("admin", "operator", "viewer") +def feedback_vote(item_id: int): + item = FeedbackItem.query.get_or_404(item_id) + if item.deleted_at is not None: + abort(404) + + existing = FeedbackVote.query.filter( + FeedbackVote.feedback_item_id == item.id, + FeedbackVote.user_id == int(current_user.id), + ).first() + + if existing: + db.session.delete(existing) + db.session.commit() + flash("Vote removed.", "secondary") + else: + vote = FeedbackVote( + feedback_item_id=item.id, + user_id=int(current_user.id), + ) + db.session.add(vote) + try: + db.session.commit() + flash("Voted.", "success") + except Exception: + db.session.rollback() + flash("Could not vote.", "danger") + + ref = request.form.get("ref") or "detail" + if ref == "list": + return redirect(request.referrer or url_for("main.feedback_page")) + return redirect(url_for("main.feedback_detail", item_id=item.id)) + + +@main_bp.route("/feedback//resolve", methods=["POST"]) +@login_required +@roles_required("admin") +def feedback_resolve(item_id: int): + item = FeedbackItem.query.get_or_404(item_id) + if item.deleted_at is not None: + abort(404) + + action = (request.form.get("action") or "resolve").strip().lower() + if action not in ("resolve", "reopen"): + action = "resolve" + + if action == "resolve": + item.status = "resolved" + item.resolved_by_user_id = int(current_user.id) + item.resolved_at = datetime.utcnow() + flash("Marked as resolved.", "success") + else: + item.status = "open" + item.resolved_by_user_id = None + item.resolved_at = None + flash("Reopened.", "secondary") + + db.session.commit() + return redirect(url_for("main.feedback_detail", item_id=item.id)) + + +@main_bp.route("/feedback//delete", methods=["POST"]) +@login_required +@roles_required("admin") +def feedback_delete(item_id: int): + item = FeedbackItem.query.get_or_404(item_id) + if item.deleted_at is not None: + abort(404) + + item.deleted_at = datetime.utcnow() + item.deleted_by_user_id = int(current_user.id) + db.session.commit() + + flash("Feedback item deleted.", "success") + return redirect(url_for("main.feedback_page")) diff --git a/containers/backupchecks/src/backend/app/main/routes_inbox.py b/containers/backupchecks/src/backend/app/main/routes_inbox.py new file mode 100644 index 0000000..e3a38b8 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_inbox.py @@ -0,0 +1,675 @@ +from .routes_shared import * # noqa: F401,F403 +from .routes_shared import _format_datetime, _log_admin_event, _send_mail_message_eml_download + +import time + +@main_bp.route("/inbox") +@login_required +@roles_required("admin", "operator", "viewer") +def inbox(): + try: + page = int(request.args.get("page", "1")) + except ValueError: + page = 1 + if page < 1: + page = 1 + + per_page = 50 + + query = MailMessage.query + # Use location column if available; otherwise just return all + if hasattr(MailMessage, "location"): + query = query.filter(MailMessage.location == "inbox") + + total_items = query.count() + total_pages = max(1, math.ceil(total_items / per_page)) if total_items else 1 + if page > total_pages: + page = total_pages + + messages = ( + query.order_by( + MailMessage.received_at.desc().nullslast(), + MailMessage.id.desc(), + ) + .offset((page - 1) * per_page) + .limit(per_page) + .all() + ) + + rows = [] + for msg in messages: + rows.append( + { + "id": msg.id, + "from_address": msg.from_address or "", + "subject": msg.subject or "", + "received_at": _format_datetime(msg.received_at), + "backup_software": msg.backup_software or "", + "backup_type": msg.backup_type or "", + "job_name": msg.job_name or "", + "parsed_at": _format_datetime(msg.parsed_at), + "overall_status": msg.overall_status or "", + "overall_message": (msg.overall_message or ""), +"has_eml": bool(getattr(msg, "eml_stored_at", None)), + } + ) + + # Customers list for autocomplete in popup + customers = Customer.query.order_by(Customer.name.asc()).all() + customer_rows = [{"id": c.id, "name": c.name} for c in customers] + + has_prev = page > 1 + has_next = page < total_pages + + return render_template( + "main/inbox.html", + rows=rows, + page=page, + total_pages=total_pages, + has_prev=has_prev, + has_next=has_next, + customers=customer_rows, + ) + + +@main_bp.route("/inbox/message/") +@login_required +@roles_required("admin", "operator", "viewer") + +def inbox_message_detail(message_id: int): + msg = MailMessage.query.get_or_404(message_id) + + # Resolve customer name through linked Job, if any + customer_name = "" + if msg.job_id: + job = Job.query.get(msg.job_id) + if job and job.customer: + customer_name = job.customer.name or "" + + meta = { + "id": msg.id, + "from_address": msg.from_address or "", + "subject": msg.subject or "", + "received_at": _format_datetime(msg.received_at), + "backup_software": msg.backup_software or "", + "backup_type": msg.backup_type or "", + "job_name": msg.job_name or "", + "parsed_at": _format_datetime(msg.parsed_at), + "has_eml": bool(getattr(msg, "eml_stored_at", None)), + "customer_name": customer_name, + "approved": bool(getattr(msg, "approved", False)), + "overall_status": msg.overall_status or "", + "overall_message": (getattr(msg, "overall_message", None) or ""), + "location": getattr(msg, "location", "") or "", + "deleted_at": _format_datetime(getattr(msg, "deleted_at", None)), + "deleted_by": ( + (getattr(getattr(msg, "deleted_by_user", None), "username", "") or "") + if getattr(msg, "deleted_by_user", None) + else "" + ), + } + + if getattr(msg, "html_body", None): + body_html = msg.html_body + elif getattr(msg, "text_body", None): + escaped = ( + msg.text_body.replace("&", "&") + .replace("<", "<") + .replace(">", ">") + ) + body_html = f"
{escaped}
" + else: + body_html = "

No message content stored.

" + + from ..models import MailObject + objects = [ + { + "name": obj.object_name, + "type": getattr(obj, "object_type", "") or "", + "status": obj.status or "", + "error_message": obj.error_message or "", + } + for obj in MailObject.query.filter_by(mail_message_id=msg.id).order_by(MailObject.object_name.asc()).all() + ] + + return jsonify({"status": "ok", "meta": meta, "body_html": body_html, "objects": objects}) + + +@main_bp.route("/inbox/message//eml") +@login_required +@roles_required("admin", "operator", "viewer") +def inbox_message_eml(message_id: int): + msg = MailMessage.query.get_or_404(message_id) + return _send_mail_message_eml_download(msg) + + +@main_bp.route("/inbox/message//approve", methods=["POST"]) +@login_required +@roles_required("admin", "operator") +def inbox_message_approve(message_id: int): + msg = MailMessage.query.get_or_404(message_id) + + # Only allow approval from inbox + if getattr(msg, "location", "inbox") != "inbox": + flash("This message is no longer in the Inbox and cannot be approved here.", "warning") + return redirect(url_for("main.inbox")) + + customer_id_raw = request.form.get("customer_id", "").strip() + if not customer_id_raw: + flash("Please select a customer before approving.", "danger") + return redirect(url_for("main.inbox")) + + try: + customer_id = int(customer_id_raw) + except ValueError: + flash("Invalid customer selection.", "danger") + return redirect(url_for("main.inbox")) + + customer = Customer.query.get(customer_id) + if not customer: + flash("Selected customer not found.", "danger") + return redirect(url_for("main.inbox")) + + # Find existing Job by unique key: From + Backup + Type + Job name + job = find_matching_job(msg) + + if job: + # This key should be globally unique. If the selected customer differs, use the job's customer. + if job.customer_id != customer.id: + customer = Customer.query.get(job.customer_id) or customer + else: + # Create new Job for the selected customer using the same unique key fields + norm_from, store_backup, store_type, store_job = build_job_match_key(msg) + job = Job( + customer_id=customer.id, + from_address=norm_from, + backup_software=store_backup, + backup_type=store_type, + job_name=store_job, + active=True, + auto_approve=True, + ) + db.session.add(job) + db.session.flush() + + if not job: + job = Job( + customer_id=customer.id, + from_address=norm_from, + backup_software=store_backup, + backup_type=store_type, + job_name=norm_job, + ) + db.session.add(job) + db.session.flush() # ensure job.id is available + + # Create JobRun for this mail + run = JobRun( + job_id=job.id, + mail_message_id=msg.id, + # Some sources may not provide received_at; fall back to parsed_at/now. + run_at=(msg.received_at or getattr(msg, "parsed_at", None) or datetime.utcnow()), + status=msg.overall_status or None, + missed=False, + ) + + # Optional remark + if hasattr(run, "remark"): + run.remark = getattr(msg, "overall_message", None) + + # Optional storage metrics (for capacity graphs) + if hasattr(run, 'storage_used_bytes') and hasattr(msg, 'storage_used_bytes'): + run.storage_used_bytes = msg.storage_used_bytes + if hasattr(run, 'storage_capacity_bytes') and hasattr(msg, 'storage_capacity_bytes'): + run.storage_capacity_bytes = msg.storage_capacity_bytes + if hasattr(run, 'storage_free_bytes') and hasattr(msg, 'storage_free_bytes'): + run.storage_free_bytes = msg.storage_free_bytes + if hasattr(run, 'storage_free_percent') and hasattr(msg, 'storage_free_percent'): + run.storage_free_percent = msg.storage_free_percent + db.session.add(run) + + # Update mail message to reflect approval + msg.job_id = job.id + if hasattr(msg, "approved"): + msg.approved = True + if hasattr(msg, "approved_at"): + msg.approved_at = datetime.utcnow() + if hasattr(msg, "approved_by_id"): + msg.approved_by_id = current_user.id + if hasattr(msg, "location"): + msg.location = "history" + + try: + db.session.commit() + except Exception as exc: + db.session.rollback() + flash("Could not approve this job due to a database error.", "danger") + _log_admin_event("inbox_approve_error", f"Failed to approve message {msg.id}: {exc}") + return redirect(url_for("main.inbox")) + + + + # Persist objects for reporting (must not block approval) + try: + persist_objects_for_approved_run(customer.id, job.id, run.id, msg.id) + except Exception as exc: + _log_admin_event( + "object_persist_error", + f"Object persistence failed for approved message {msg.id} (job {job.id}, run {run.id}): {exc}", + ) + _log_admin_event( + "inbox_approve", + f"Approved message {msg.id} for customer '{customer.name}' as job {job.id}", + ) + flash(f"Job approved for customer '{customer.name}'.", "success") + return redirect(url_for("main.inbox")) + + +@main_bp.route("/inbox/message//delete", methods=["POST"]) +@login_required +@roles_required("admin", "operator") +def inbox_message_delete(message_id: int): + msg = MailMessage.query.get_or_404(message_id) + + # Only allow delete from inbox + if getattr(msg, "location", "inbox") != "inbox": + flash("This message is no longer in the Inbox and cannot be deleted here.", "warning") + return redirect(url_for("main.inbox")) + + if hasattr(msg, "location"): + msg.location = "deleted" + if hasattr(msg, "deleted_at"): + msg.deleted_at = datetime.utcnow() + if hasattr(msg, "deleted_by_user_id"): + msg.deleted_by_user_id = current_user.id + + try: + db.session.commit() + except Exception as exc: + db.session.rollback() + flash("Could not delete this message due to a database error.", "danger") + _log_admin_event("inbox_delete_error", f"Failed to delete message {msg.id}: {exc}") + return redirect(url_for("main.inbox")) + + _log_admin_event("inbox_delete", f"Deleted inbox message {msg.id}") + flash("Message deleted from Inbox.", "success") + return redirect(url_for("main.inbox")) + + +@main_bp.route("/inbox/deleted") +@login_required +@roles_required("admin") +def inbox_deleted_mails(): + try: + page = int(request.args.get("page", "1")) + except ValueError: + page = 1 + if page < 1: + page = 1 + + per_page = 50 + + query = MailMessage.query + if hasattr(MailMessage, "location"): + query = query.filter(MailMessage.location == "deleted") + + total_items = query.count() + total_pages = max(1, math.ceil(total_items / per_page)) if total_items else 1 + if page > total_pages: + page = total_pages + + messages = ( + query.order_by( + MailMessage.deleted_at.desc().nullslast(), + MailMessage.id.desc(), + ) + .offset((page - 1) * per_page) + .limit(per_page) + .all() + ) + + rows = [] + for msg in messages: + deleted_by_name = "" + try: + if getattr(msg, "deleted_by_user", None): + deleted_by_name = msg.deleted_by_user.username or "" + except Exception: + deleted_by_name = "" + + rows.append( + { + "id": msg.id, + "from_address": msg.from_address or "", + "subject": msg.subject or "", + "received_at": _format_datetime(msg.received_at), + "deleted_at": _format_datetime(getattr(msg, "deleted_at", None)), + "deleted_by": deleted_by_name, + "has_eml": bool(getattr(msg, "eml_stored_at", None)), + } + ) + + has_prev = page > 1 + has_next = page < total_pages + + return render_template( + "main/inbox_deleted.html", + rows=rows, + page=page, + total_pages=total_pages, + has_prev=has_prev, + has_next=has_next, + ) + + +@main_bp.route("/inbox/deleted//restore", methods=["POST"]) +@login_required +@roles_required("admin") +def inbox_deleted_restore(message_id: int): + msg = MailMessage.query.get_or_404(message_id) + + if getattr(msg, "location", "") != "deleted": + flash("This message is not marked as deleted.", "warning") + return redirect(url_for("main.inbox_deleted_mails")) + + msg.location = "inbox" + if hasattr(msg, "deleted_at"): + msg.deleted_at = None + if hasattr(msg, "deleted_by_user_id"): + msg.deleted_by_user_id = None + + try: + db.session.commit() + except Exception as exc: + db.session.rollback() + flash("Could not restore this message due to a database error.", "danger") + _log_admin_event("inbox_restore_error", f"Failed to restore message {msg.id}: {exc}") + return redirect(url_for("main.inbox_deleted_mails")) + + _log_admin_event("inbox_restore", f"Restored deleted inbox message {msg.id}") + flash("Message restored to Inbox.", "success") + return redirect(url_for("main.inbox_deleted_mails")) + + +@main_bp.route("/inbox/reparse-all", methods=["POST"]) +@login_required +@roles_required("admin", "operator") +def inbox_reparse_all(): + # Re-run parsers for messages currently in the inbox. + # IMPORTANT: This action can be very expensive on a fresh environment with + # many inbox items. To avoid Gunicorn timeouts / gateway errors, we process + # in batches and stop once we approach a safe time budget. The user can run + # the action again to continue. + + base_q = MailMessage.query + if hasattr(MailMessage, "location"): + base_q = base_q.filter(MailMessage.location == "inbox") + + total = base_q.count() + + # Keep batches small enough for a single HTTP request. + batch_size = int(current_app.config.get("INBOX_REPARSE_BATCH_SIZE", 200)) + commit_every = int(current_app.config.get("INBOX_REPARSE_COMMIT_EVERY", 50)) + + # Stop before typical reverse-proxy / Gunicorn timeouts. + time_budget_s = float(current_app.config.get("INBOX_REPARSE_TIME_BUDGET_S", 20.0)) + + started_at = time.monotonic() + processed = 0 + parsed_ok = 0 + auto_approved = 0 + auto_approved_runs = [] + no_match = 0 + errors = 0 + partial = False + + last_id = None + + while True: + q = base_q + if last_id is not None: + q = q.filter(MailMessage.id < last_id) + + # Use keyset pagination on id to avoid large OFFSET scans. + batch = q.order_by(MailMessage.id.desc()).limit(batch_size).all() + if not batch: + break + + for msg in batch: + # If we are about to exceed the time budget, stop and return control. + if (time.monotonic() - started_at) >= time_budget_s: + partial = True + break + + processed += 1 + last_id = msg.id + + try: + parse_mail_message(msg) + + # Auto-approve if this job was already approved before (unique match across customers) + try: + # During re-parse we want to (re)apply auto-approve as well. + # Only attempt this for inbox messages with a successful parse that are not yet linked. + if ( + getattr(msg, "location", "inbox") == "inbox" + and getattr(msg, "parse_result", None) == "ok" + and getattr(msg, "job_id", None) is None + ): + # Match approved job on: From + Backup + Type + Job name + # Prevent session autoflush for every match lookup while we + # are still updating many messages in a loop. + with db.session.no_autoflush: + job = find_matching_job(msg) + + if job: + # Respect per-job flags. + if hasattr(job, "active") and not bool(job.active): + raise Exception("job not active") + if hasattr(job, "auto_approve") and not bool(job.auto_approve): + raise Exception("job auto_approve disabled") + + # Create a new run for the known job + run = JobRun( + job_id=job.id, + mail_message_id=msg.id, + # Some sources may not provide received_at; fall back to parsed_at/now. + run_at=(msg.received_at or getattr(msg, "parsed_at", None) or datetime.utcnow()), + status=msg.overall_status or None, + missed=False, + ) + + # Optional remark + if hasattr(run, "remark"): + run.remark = getattr(msg, "overall_message", None) + + # Optional storage metrics (for capacity graphs) + if hasattr(run, "storage_used_bytes") and hasattr(msg, "storage_used_bytes"): + run.storage_used_bytes = msg.storage_used_bytes + if hasattr(run, "storage_capacity_bytes") and hasattr(msg, "storage_capacity_bytes"): + run.storage_capacity_bytes = msg.storage_capacity_bytes + if hasattr(run, "storage_free_bytes") and hasattr(msg, "storage_free_bytes"): + run.storage_free_bytes = msg.storage_free_bytes + if hasattr(run, "storage_free_percent") and hasattr(msg, "storage_free_percent"): + run.storage_free_percent = msg.storage_free_percent + + db.session.add(run) + db.session.flush() # ensure run.id is available + auto_approved_runs.append((job.customer_id, job.id, run.id, msg.id)) + + msg.job_id = job.id + if hasattr(msg, "approved"): + msg.approved = True + if hasattr(msg, "approved_at"): + msg.approved_at = datetime.utcnow() + if hasattr(msg, "approved_by_id"): + msg.approved_by_id = None + if hasattr(msg, "location"): + msg.location = "history" + + auto_approved += 1 + + except Exception as _exc: + # Never fail the reparse due to auto-approve; keep message in inbox + current_app.logger.exception( + f"Auto-approve during reparse failed for message {getattr(msg,'id',None)}: {_exc}" + ) + + if msg.parse_result == "ok": + parsed_ok += 1 + elif msg.parse_result == "no_match": + no_match += 1 + else: + errors += 1 + + except Exception as exc: + errors += 1 + msg.parse_result = "error" + msg.parse_error = str(exc)[:500] + + # Commit periodically to keep the transaction small and to avoid + # autoflush overhead in subsequent queries. + if commit_every > 0 and (processed % commit_every) == 0: + try: + db.session.commit() + except Exception: + db.session.rollback() + + if partial: + break + + try: + db.session.commit() + except Exception as exc: + db.session.rollback() + flash("Re-parse failed while saving results. See logs for details.", "danger") + _log_admin_event("reparse_inbox_error", f"Re-parse inbox failed to commit: {exc}") + return redirect(url_for("main.inbox")) + + # Safety net: ensure auto-approve is also applied for all successfully parsed + # inbox messages without a job link. This covers cases where the in-loop + # auto-approve did not run due to session/flush issues. + if not partial: + try: + pending_q = MailMessage.query + if hasattr(MailMessage, "location"): + pending_q = pending_q.filter(MailMessage.location == "inbox") + pending_q = pending_q.filter(MailMessage.parse_result == "ok") + pending_q = pending_q.filter(MailMessage.job_id.is_(None)) + pending = pending_q.order_by(MailMessage.received_at.desc()).all() + + for msg in pending: + nested = db.session.begin_nested() + try: + with db.session.no_autoflush: + job = find_matching_job(msg) + if not job: + nested.commit() + continue + + if hasattr(job, "active") and not bool(job.active): + nested.commit() + continue + if hasattr(job, "auto_approve") and not bool(job.auto_approve): + nested.commit() + continue + + run = JobRun( + job_id=job.id, + mail_message_id=msg.id, + run_at=(msg.received_at or getattr(msg, "parsed_at", None) or datetime.utcnow()), + status=msg.overall_status or None, + missed=False, + ) + if hasattr(run, "remark"): + run.remark = getattr(msg, "overall_message", None) + if hasattr(run, "storage_used_bytes") and hasattr(msg, "storage_used_bytes"): + run.storage_used_bytes = msg.storage_used_bytes + if hasattr(run, "storage_capacity_bytes") and hasattr(msg, "storage_capacity_bytes"): + run.storage_capacity_bytes = msg.storage_capacity_bytes + if hasattr(run, "storage_free_bytes") and hasattr(msg, "storage_free_bytes"): + run.storage_free_bytes = msg.storage_free_bytes + if hasattr(run, "storage_free_percent") and hasattr(msg, "storage_free_percent"): + run.storage_free_percent = msg.storage_free_percent + + db.session.add(run) + db.session.flush() + auto_approved_runs.append((job.customer_id, job.id, run.id, msg.id)) + + msg.job_id = job.id + if hasattr(msg, "approved"): + msg.approved = True + if hasattr(msg, "approved_at"): + msg.approved_at = datetime.utcnow() + if hasattr(msg, "approved_by_id"): + msg.approved_by_id = None + if hasattr(msg, "location"): + msg.location = "history" + + auto_approved += 1 + + nested.commit() + + except Exception as _exc: + # Roll back only this message's work (savepoint) and continue. + try: + nested.rollback() + except Exception: + # If rollback itself fails, fall back to a full session rollback. + db.session.rollback() + current_app.logger.exception( + f"Auto-approve safety net during reparse failed for message {getattr(msg,'id',None)}: {_exc}" + ) + continue + + db.session.commit() + + except Exception as exc: + db.session.rollback() + current_app.logger.exception(f"Auto-approve safety net during reparse failed: {exc}") + + # Persist objects for auto-approved runs (must not block the reparse) + # NOTE: On very large batches this can still be expensive, but we only run + # it after DB commits so the UI request does not fail mid-transaction. + if auto_approved_runs and not partial: + persisted_objects = 0 + persisted_errors = 0 + for (customer_id, job_id, run_id, mail_message_id) in auto_approved_runs: + try: + persisted_objects += persist_objects_for_approved_run( + customer_id, job_id, run_id, mail_message_id + ) + except Exception as exc: + persisted_errors += 1 + _log_admin_event( + "object_persist_error", + f"Object persistence failed for auto-approved message {mail_message_id} (job {job_id}, run {run_id}): {exc}", + ) + + _log_admin_event( + "object_persist_auto_approve", + f"Persisted objects for auto-approved runs. runs={len(auto_approved_runs)}, objects={persisted_objects}, errors={persisted_errors}", + ) + + log_msg = ( + f"Re-parse inbox finished. processed={processed}/{total}, ok={parsed_ok}, " + f"auto_approved={auto_approved}, no_match={no_match}, errors={errors}, partial={partial}" + ) + _log_admin_event("reparse_inbox", log_msg) + + if partial: + remaining = max(0, total - processed) + flash("Re-parse started (batch mode).", "warning") + flash( + f"Processed: {processed}/{total}. Remaining: {remaining}. " + f"Parsed: {parsed_ok}, auto-approved: {auto_approved}, no match: {no_match}, errors: {errors}.", + "info", + ) + else: + flash("Re-parse finished.", "success") + flash( + f"Total: {total}, parsed: {parsed_ok}, auto-approved: {auto_approved}, no match: {no_match}, errors: {errors}", + "info", + ) + + return redirect(url_for("main.inbox")) diff --git a/containers/backupchecks/src/backend/app/main/routes_jobs.py b/containers/backupchecks/src/backend/app/main/routes_jobs.py new file mode 100644 index 0000000..8c559f6 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_jobs.py @@ -0,0 +1,384 @@ +from .routes_shared import * # noqa: F401,F403 +from .routes_shared import ( + _apply_overrides_to_run, + _describe_schedule, + _format_datetime, + _get_ui_timezone_name, + _infer_schedule_map_from_runs, + _schedule_map_to_desc, + _to_amsterdam_date, +) + +@main_bp.route("/jobs") +@login_required +@roles_required("admin", "operator", "viewer") +def jobs(): + # Join with customers for display + jobs = ( + Job.query + .outerjoin(Customer, Customer.id == Job.customer_id) + .add_columns( + Job.id, + Job.backup_software, + Job.backup_type, + Job.job_name, + Customer.name.label("customer_name"), + ) + .order_by(Customer.name.asc().nullslast(), Job.backup_software.asc(), Job.backup_type.asc(), Job.job_name.asc()) + .all() + ) + + rows = [] + for row in jobs: + # Depending on SQLAlchemy version, row may be tuple-like + job_id = row.id + backup_software = row.backup_software + backup_type = row.backup_type + job_name = row.job_name + customer_name = getattr(row, "customer_name", None) + rows.append( + { + "id": job_id, + "customer_name": customer_name or "", + "backup_software": backup_software or "", + "backup_type": backup_type or "", + "job_name": job_name or "", + } + ) + + can_manage_jobs = current_user.is_authenticated and get_active_role() in ("admin", "operator") + + return render_template( + "main/jobs.html", + jobs=rows, + can_manage_jobs=can_manage_jobs, + ) + + +@main_bp.route("/jobs/") +@login_required +@roles_required("admin", "operator", "viewer") +def job_detail(job_id: int): + job = Job.query.get_or_404(job_id) + + # History pagination + try: + page = int(request.args.get("page", "1")) + except ValueError: + page = 1 + if page < 1: + page = 1 + + per_page = 20 + query = JobRun.query.filter_by(job_id=job.id) + total_items = query.count() + total_pages = max(1, math.ceil(total_items / per_page)) if total_items else 1 + if page > total_pages: + page = total_pages + + runs = ( + query.order_by(JobRun.run_at.desc().nullslast(), JobRun.id.desc()) + .offset((page - 1) * per_page) + .limit(per_page) + .all() + ) + + # Tickets: mark runs that fall within the ticket active window + ticket_rows = [] + ticket_open_count = 0 + ticket_total_count = 0 + + remark_rows = [] + remark_open_count = 0 + remark_total_count = 0 + + run_dates = [] + run_date_map = {} + for r in runs: + rd = _to_amsterdam_date(r.run_at) or _to_amsterdam_date(datetime.utcnow()) + run_date_map[r.id] = rd + if rd: + run_dates.append(rd) + + if run_dates: + min_date = min(run_dates) + max_date = max(run_dates) + try: + rows = ( + db.session.execute( + text( + """ + SELECT t.active_from_date, t.resolved_at, t.ticket_code + FROM tickets t + JOIN ticket_scopes ts ON ts.ticket_id = t.id + WHERE ts.job_id = :job_id + AND t.active_from_date <= :max_date + AND ( + t.resolved_at IS NULL + OR ((t.resolved_at AT TIME ZONE 'UTC' AT TIME ZONE :ui_tz)::date) >= :min_date + ) + """ + ), + {"job_id": job.id, "min_date": min_date, + "ui_tz": _get_ui_timezone_name(), "max_date": max_date}, + ) + .mappings() + .all() + ) + for rr in rows: + active_from = rr.get("active_from_date") + resolved_at = rr.get("resolved_at") + resolved_date = _to_amsterdam_date(resolved_at) if resolved_at else None + ticket_rows.append({"active_from_date": active_from, "resolved_date": resolved_date, "ticket_code": rr.get("ticket_code")}) + except Exception: + ticket_rows = [] + + if run_dates: + min_date = min(run_dates) + max_date = max(run_dates) + try: + rows = ( + db.session.execute( + text( + """ + SELECT COALESCE( + r.active_from_date, + ((r.start_date AT TIME ZONE 'UTC' AT TIME ZONE :ui_tz)::date) + ) AS active_from_date, + r.resolved_at, + r.title, + r.body + FROM remarks r + JOIN remark_scopes rs ON rs.remark_id = r.id + WHERE rs.job_id = :job_id + AND COALESCE( + r.active_from_date, + ((r.start_date AT TIME ZONE 'UTC' AT TIME ZONE :ui_tz)::date) + ) <= :max_date + AND ( + r.resolved_at IS NULL + OR ((r.resolved_at AT TIME ZONE 'UTC' AT TIME ZONE :ui_tz)::date) >= :min_date + ) + """ + ), + {"job_id": job.id, "min_date": min_date, + "ui_tz": _get_ui_timezone_name(), "max_date": max_date}, + ) + .mappings() + .all() + ) + for rr in rows: + active_from = rr.get("active_from_date") + resolved_at = rr.get("resolved_at") + resolved_date = _to_amsterdam_date(resolved_at) if resolved_at else None + remark_rows.append({"active_from_date": active_from, "resolved_date": resolved_date, "title": rr.get("title"), "body": rr.get("body")}) + except Exception: + remark_rows = [] + + try: + ticket_total_count = ( + db.session.execute( + text( + """ + SELECT COUNT(*) + FROM tickets t + JOIN ticket_scopes ts ON ts.ticket_id = t.id + WHERE ts.job_id = :job_id + """ + ), + {"job_id": job.id}, + ).scalar() or 0 + ) + ticket_open_count = ( + db.session.execute( + text( + """ + SELECT COUNT(*) + FROM tickets t + JOIN ticket_scopes ts ON ts.ticket_id = t.id + WHERE ts.job_id = :job_id + AND t.resolved_at IS NULL + """ + ), + {"job_id": job.id}, + ).scalar() or 0 + ) + except Exception: + ticket_total_count = 0 + ticket_open_count = 0 + + try: + remark_total_count = ( + db.session.execute( + text( + """ + SELECT COUNT(*) + FROM remarks r + JOIN remark_scopes rs ON rs.remark_id = r.id + WHERE rs.job_id = :job_id + """ + ), + {"job_id": job.id}, + ).scalar() or 0 + ) + remark_open_count = ( + db.session.execute( + text( + """ + SELECT COUNT(*) + FROM remarks r + JOIN remark_scopes rs ON rs.remark_id = r.id + WHERE rs.job_id = :job_id + AND r.resolved_at IS NULL + """ + ), + {"job_id": job.id}, + ).scalar() or 0 + ) + except Exception: + remark_total_count = 0 + remark_open_count = 0 + + history_rows = [] + for r in runs: + status_display, override_applied, _override_level, _ov_id, _ov_reason = _apply_overrides_to_run(job, r) + rd = run_date_map.get(r.id) + run_day = "" + if rd: + # rd is an Amsterdam-local date + _days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] + try: + run_day = _days[int(rd.weekday())] + except Exception: + run_day = "" + has_ticket = False + has_remark = False + ticket_codes = [] + remark_items = [] + + if rd and ticket_rows: + for tr in ticket_rows: + af = tr.get("active_from_date") + resd = tr.get("resolved_date") + if af and af <= rd and (resd is None or resd >= rd): + has_ticket = True + code = (tr.get("ticket_code") or "").strip() + if code and code not in ticket_codes: + ticket_codes.append(code) + + if rd and remark_rows: + for rr in remark_rows: + af = rr.get("active_from_date") + resd = rr.get("resolved_date") + if af and af <= rd and (resd is None or resd >= rd): + has_remark = True + title = (rr.get("title") or "").strip() + body = (rr.get("body") or "").strip() + remark_items.append({"title": title, "body": body}) + + history_rows.append( + { + "id": r.id, + "run_day": run_day, + "run_at": _format_datetime(r.run_at), + "status": status_display or "", + "remark": r.remark or "", + "missed": bool(r.missed), + "override_applied": bool(override_applied), + "has_ticket": bool(has_ticket), + "has_remark": bool(has_remark), + "ticket_codes": ticket_codes, + "remark_items": remark_items, + "mail_message_id": r.mail_message_id, + "reviewed_by": (r.reviewed_by.username if getattr(r, "reviewed_by", None) else ""), + "reviewed_at": _format_datetime(r.reviewed_at) if r.reviewed_at else "", + } + ) + + has_prev = page > 1 + has_next = page < total_pages + + can_manage_jobs = current_user.is_authenticated and get_active_role() in ("admin", "operator") + + schedule_map = None + schedule_desc = _describe_schedule(job) + if schedule_desc.startswith("No schedule configured"): + schedule_map = _infer_schedule_map_from_runs(job.id) + schedule_desc = _schedule_map_to_desc(schedule_map) + else: + schedule_map = _infer_schedule_map_from_runs(job.id) + + # For convenience, also load customer + customer = None + if job.customer_id: + customer = Customer.query.get(job.customer_id) + + return render_template( + "main/job_detail.html", + job=job, + customer=customer, + schedule_desc=schedule_desc, + schedule_map=schedule_map, + history_rows=history_rows, + ticket_open_count=int(ticket_open_count or 0), + ticket_total_count=int(ticket_total_count or 0), + remark_open_count=int(remark_open_count or 0), + remark_total_count=int(remark_total_count or 0), + page=page, + total_pages=total_pages, + has_prev=has_prev, + has_next=has_next, + can_manage_jobs=can_manage_jobs, + ) + + +@main_bp.route("/jobs//delete", methods=["POST"]) +@login_required +@roles_required("admin", "operator") +def job_delete(job_id: int): + job = Job.query.get_or_404(job_id) + + try: + # Collect run ids for FK cleanup in auxiliary tables that may not have ON DELETE CASCADE + run_ids = [] + mail_message_ids = [] + + for run in job.runs: + if run.id is not None: + run_ids.append(run.id) + if run.mail_message_id: + mail_message_ids.append(run.mail_message_id) + + # Put related mails back into the inbox and unlink from job + if mail_message_ids: + msgs = MailMessage.query.filter(MailMessage.id.in_(mail_message_ids)).all() + for msg in msgs: + if hasattr(msg, "location"): + msg.location = "inbox" + msg.job_id = None + + # Ensure run_object_links doesn't block job_runs deletion (older schemas may miss ON DELETE CASCADE) + if run_ids: + db.session.execute( + text("DELETE FROM run_object_links WHERE run_id IN :run_ids").bindparams( + bindparam("run_ids", expanding=True) + ), + {"run_ids": run_ids}, + ) + + # Ensure job_object_links doesn't block jobs deletion (older schemas may miss ON DELETE CASCADE) + if job.id is not None: + db.session.execute( + text("DELETE FROM job_object_links WHERE job_id = :job_id"), + {"job_id": job.id}, + ) + + db.session.delete(job) + db.session.commit() + flash("Job deleted. Related mails are returned to the inbox.", "success") + except Exception as exc: + db.session.rollback() + print(f"[jobs] Failed to delete job: {exc}") + flash("Failed to delete job.", "danger") + + return redirect(url_for("main.jobs")) \ No newline at end of file diff --git a/containers/backupchecks/src/backend/app/main/routes_news.py b/containers/backupchecks/src/backend/app/main/routes_news.py new file mode 100644 index 0000000..69bcf13 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_news.py @@ -0,0 +1,32 @@ +from .routes_shared import * # noqa: F401,F403 + + +@main_bp.route("/news/read/", methods=["POST"]) +@login_required +def news_mark_read(news_id: int): + try: + item = NewsItem.query.get(news_id) + if not item: + return abort(404) + + # Only mark as read when authenticated user exists. + uid = getattr(current_user, "id", None) + if not uid: + return abort(401) + + existing = NewsRead.query.filter_by(news_item_id=news_id, user_id=uid).first() + if not existing: + db.session.add(NewsRead(news_item_id=news_id, user_id=uid)) + db.session.commit() + except Exception as exc: + try: + db.session.rollback() + except Exception: + pass + print(f"[news] Failed to mark read: {exc}") + + # Return to the previous page (dashboard by default) + ref = (request.headers.get("Referer") or "").strip() + if ref: + return redirect(ref) + return redirect(url_for("main.dashboard")) diff --git a/containers/backupchecks/src/backend/app/main/routes_overrides.py b/containers/backupchecks/src/backend/app/main/routes_overrides.py new file mode 100644 index 0000000..e783d14 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_overrides.py @@ -0,0 +1,371 @@ +from .routes_shared import * # noqa: F401,F403 +from .routes_shared import _format_datetime + +# When no explicit start date is provided, we treat overrides as retroactive so they +# can be applied to existing (not-yet-reviewed) runs as well. +_OVERRIDE_DEFAULT_START_AT = datetime(1970, 1, 1) + +@main_bp.route("/overrides") +@login_required +@roles_required("admin", "operator", "viewer") +def overrides(): + can_manage = get_active_role() in ("admin", "operator") + can_delete = get_active_role() == "admin" + + overrides_q = Override.query.order_by(Override.level.asc(), Override.start_at.desc()).all() + + # Preload jobs for selection in the form (for object-level overrides) + jobs_for_select = ( + Job.query.outerjoin(Customer, Job.customer_id == Customer.id) + .order_by( + Customer.name.asc(), + func.lower(func.coalesce(Job.backup_software, "")), + func.lower(func.coalesce(Job.backup_type, "")), + func.lower(func.coalesce(Job.job_name, "")), + ) + .all() + ) + + # Preload configured backup software/types for global override selection + backup_software_options = [ + r[0] + for r in db.session.query(Job.backup_software) + .filter(Job.backup_software.isnot(None), Job.backup_software != "") + .group_by(Job.backup_software) + .order_by(func.lower(Job.backup_software)) + .all() + ] + + backup_type_options = [ + r[0] + for r in db.session.query(Job.backup_type) + .filter(Job.backup_type.isnot(None), Job.backup_type != "") + .group_by(Job.backup_type) + .order_by(func.lower(Job.backup_type)) + .all() + ] + + + def _describe_scope(ov: Override) -> str: + lvl = (ov.level or "").lower() + details = [] + + if lvl == "global": + # Global: backup software / type based + if ov.backup_software: + details.append(ov.backup_software) + if ov.backup_type: + details.append(ov.backup_type) + scope = " / ".join(details) if details else "All jobs" + elif lvl == "object": + # Object-level: specific job (and optionally object name) + job = Job.query.get(ov.job_id) if ov.job_id else None + if job: + customer_name = job.customer.name if job.customer else "" + scope = f"{customer_name} / {job.backup_software or ''} / {job.backup_type or ''} / {job.job_name or ''}" + else: + scope = "(no job)" + if ov.object_name: + scope = scope + f" / object: {ov.object_name}" + else: + scope = "(unknown)" + + # Append match criteria + crit = [] + if ov.match_status: + crit.append(f"status == {ov.match_status}") + if ov.match_error_contains: + crit.append(f"error contains '{ov.match_error_contains}'") + if crit: + scope = scope + " [" + ", ".join(crit) + "]" + + return scope + + rows = [] + for ov in overrides_q: + rows.append( + { + "id": ov.id, + "level": ov.level or "", + "scope": _describe_scope(ov), + "start_at": _format_datetime(ov.start_at), + "end_at": _format_datetime(ov.end_at) if ov.end_at else "", + "active": bool(ov.active), + "treat_as_success": bool(ov.treat_as_success), + "comment": ov.comment or "", + "match_status": ov.match_status or "", + "match_error_contains": ov.match_error_contains or "", + } + ) + + return render_template( + "main/overrides.html", + overrides=rows, + can_manage=can_manage, + can_delete=can_delete, + jobs_for_select=jobs_for_select, + backup_software_options=backup_software_options, + backup_type_options=backup_type_options, + ) + + +@main_bp.route("/overrides/create", methods=["POST"]) +@login_required +@roles_required("admin", "operator") +def overrides_create(): + level = (request.form.get("level") or "").lower().strip() + comment = (request.form.get("comment") or "").strip() + treat_as_success = bool(request.form.get("treat_as_success")) + + backup_software = request.form.get("backup_software") or None + backup_type = request.form.get("backup_type") or None + + job_id_val = request.form.get("job_id") or "" + job_id = int(job_id_val) if job_id_val.isdigit() else None + object_name = request.form.get("object_name") or None + + match_status = (request.form.get("match_status") or "").strip() or None + match_error_contains = (request.form.get("match_error_contains") or "").strip() or None + + start_at_str = request.form.get("start_at") or "" + end_at_str = request.form.get("end_at") or "" + + now = datetime.utcnow() + try: + if start_at_str: + start_at = datetime.strptime(start_at_str, "%Y-%m-%dT%H:%M") + else: + start_at = _OVERRIDE_DEFAULT_START_AT + except Exception: + start_at = _OVERRIDE_DEFAULT_START_AT + + end_at = None + if end_at_str: + try: + end_at = datetime.strptime(end_at_str, "%Y-%m-%dT%H:%M") + except Exception: + end_at = None + + # Only support global and object level via UI + if level not in ("global", "object"): + flash("Invalid override level.", "danger") + return redirect(url_for("main.overrides")) + + ov = Override( + level=level, + backup_software=backup_software if level == "global" else None, + backup_type=backup_type if level == "global" else None, + job_id=job_id if level == "object" else None, + object_name=object_name if level == "object" else None, + match_status=match_status, + match_error_contains=match_error_contains, + treat_as_success=treat_as_success, + active=True, + comment=comment, + created_by=current_user.username, + start_at=start_at, + end_at=end_at, + ) + + db.session.add(ov) + db.session.commit() + + # Apply the new override to already existing runs so the UI reflects it immediately. + try: + job_ids = None + if ov.level == "object" and ov.job_id: + job_ids = [ov.job_id] + elif ov.level == "global": + q = Job.query + if ov.backup_software: + q = q.filter(func.lower(Job.backup_software) == func.lower(ov.backup_software)) + if ov.backup_type: + q = q.filter(func.lower(Job.backup_type) == func.lower(ov.backup_type)) + job_ids = [j.id for j in q.all()] + + _recompute_override_flags_for_runs(job_ids=job_ids, start_at=ov.start_at, end_at=ov.end_at, only_unreviewed=True) + except Exception: + pass + + flash("Override created.", "success") + return redirect(url_for("main.overrides")) + + +@main_bp.route("/overrides/update/", methods=["POST"]) +@login_required +@roles_required("admin", "operator") +def overrides_update(override_id: int): + ov = Override.query.get_or_404(override_id) + + # Keep old scope to recompute affected runs in both old and new scope. + old_level = ov.level + old_backup_software = ov.backup_software + old_backup_type = ov.backup_type + old_job_id = ov.job_id + old_start_at = ov.start_at + old_end_at = ov.end_at + + level = (request.form.get("level") or "").lower().strip() + comment = (request.form.get("comment") or "").strip() + treat_as_success = bool(request.form.get("treat_as_success")) + + backup_software = request.form.get("backup_software") or None + backup_type = request.form.get("backup_type") or None + + job_id_val = request.form.get("job_id") or "" + job_id = int(job_id_val) if job_id_val.isdigit() else None + object_name = request.form.get("object_name") or None + + match_status = (request.form.get("match_status") or "").strip() or None + match_error_contains = (request.form.get("match_error_contains") or "").strip() or None + + start_at_str = request.form.get("start_at") or "" + end_at_str = request.form.get("end_at") or "" + + now = datetime.utcnow() + # If the field is left empty, treat it as retroactive (default start). + # Otherwise parse datetime-local. + if not start_at_str: + start_at = _OVERRIDE_DEFAULT_START_AT + else: + try: + start_at = datetime.strptime(start_at_str, "%Y-%m-%dT%H:%M") + except Exception: + start_at = ov.start_at + + try: + if end_at_str: + end_at = datetime.fromisoformat(end_at_str) + else: + end_at = None + except Exception: + end_at = None + + if level not in ("global", "object"): + flash("Invalid override level.", "danger") + return redirect(url_for("main.overrides")) + + ov.level = level + ov.backup_software = backup_software if level == "global" else None + ov.backup_type = backup_type if level == "global" else None + ov.job_id = job_id if level == "object" else None + ov.object_name = object_name if level == "object" else None + ov.match_status = match_status + ov.match_error_contains = match_error_contains + ov.treat_as_success = treat_as_success + ov.comment = comment + ov.start_at = start_at + ov.end_at = end_at + ov.updated_by = current_user.username + ov.updated_at = now + + db.session.commit() + + # Recompute for union of old and new affected jobs, only for unreviewed runs. + try: + job_ids = set() + + def _job_ids_for_scope(level_val, bs, bt, jid): + if level_val == "object" and jid: + return {jid} + if level_val == "global": + q = Job.query + if bs: + q = q.filter(func.lower(Job.backup_software) == func.lower(bs)) + if bt: + q = q.filter(func.lower(Job.backup_type) == func.lower(bt)) + return {j.id for j in q.all()} + return set() + + job_ids |= _job_ids_for_scope(old_level, old_backup_software, old_backup_type, old_job_id) + job_ids |= _job_ids_for_scope(ov.level, ov.backup_software, ov.backup_type, ov.job_id) + + # Combine time windows so both old and new ranges are reprocessed. + combined_start = old_start_at if old_start_at else ov.start_at + if combined_start and ov.start_at and old_start_at: + combined_start = min(old_start_at, ov.start_at) + + combined_end = old_end_at if old_end_at else ov.end_at + if combined_end and ov.end_at and old_end_at: + combined_end = max(old_end_at, ov.end_at) + + _recompute_override_flags_for_runs( + job_ids=list(job_ids) if job_ids else None, + start_at=combined_start, + end_at=combined_end, + only_unreviewed=True, + ) + except Exception: + pass + + flash("Override updated.", "success") + return redirect(url_for("main.overrides")) + + +@main_bp.route("/overrides/delete/", methods=["POST"]) +@login_required +@roles_required("admin") +def overrides_delete(override_id: int): + ov = Override.query.get_or_404(override_id) + + # Capture scope before delete for recompute. + level = ov.level + backup_software = ov.backup_software + backup_type = ov.backup_type + job_id = ov.job_id + start_at = ov.start_at + end_at = ov.end_at + + db.session.delete(ov) + db.session.commit() + + try: + job_ids = None + if level == "object" and job_id: + job_ids = [job_id] + elif level == "global": + q = Job.query + if backup_software: + q = q.filter(func.lower(Job.backup_software) == func.lower(backup_software)) + if backup_type: + q = q.filter(func.lower(Job.backup_type) == func.lower(backup_type)) + job_ids = [j.id for j in q.all()] + + _recompute_override_flags_for_runs(job_ids=job_ids, start_at=start_at, end_at=end_at, only_unreviewed=True) + except Exception: + pass + + flash("Override deleted.", "success") + return redirect(url_for("main.overrides")) + + +@main_bp.route("/overrides//toggle", methods=["POST"]) +@login_required +@roles_required("admin", "operator") +def overrides_toggle(override_id: int): + ov = Override.query.get_or_404(override_id) + ov.active = not bool(ov.active) + ov.updated_by = current_user.username + ov.updated_at = datetime.utcnow() + db.session.commit() + + # Recompute existing runs for the affected scope. + try: + job_ids = None + if ov.level == "object" and ov.job_id: + job_ids = [ov.job_id] + elif ov.level == "global": + q = Job.query + if ov.backup_software: + q = q.filter(func.lower(Job.backup_software) == func.lower(ov.backup_software)) + if ov.backup_type: + q = q.filter(func.lower(Job.backup_type) == func.lower(ov.backup_type)) + job_ids = [j.id for j in q.all()] + + _recompute_override_flags_for_runs(job_ids=job_ids, start_at=ov.start_at, end_at=ov.end_at, only_unreviewed=True) + except Exception: + pass + + flash("Override status updated.", "success") + return redirect(url_for("main.overrides")) + diff --git a/containers/backupchecks/src/backend/app/main/routes_parsers.py b/containers/backupchecks/src/backend/app/main/routes_parsers.py new file mode 100644 index 0000000..42c99c9 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_parsers.py @@ -0,0 +1,98 @@ +from .routes_shared import * # noqa: F401,F403 + +@main_bp.route("/parsers") +@login_required +@roles_required("admin") +def parsers_overview(): + # Only show what is currently implemented in code. + # Currently implemented parsers: + # - 3CX (Backup Complete notifications) + # - Veeam (status mails in multiple variants) + parsers = [ + { + "name": "3CX backup complete", + "backup_software": "3CX", + "backup_types": [], + "order": 10, + "enabled": True, + "match": { + "subject_regex": r"^3CX Notification:\\s*Backup Complete\\s*-\\s*(.+)$", + }, + "description": "Parses 3CX backup notifications (Backup Complete).", + "examples": [ + { + "subject": "3CX Notification: Backup Complete - PBX01", + "from_address": "noreply@3cx.local", + "body_snippet": "Backup name: PBX01_2025-12-17.zip", + "parsed_result": { + "backup_software": "3CX", + "backup_type": "", + "job_name": "PBX01", + "objects": [ + { + "name": "PBX01_2025-12-17.zip", + "status": "Success", + "error_message": "", + } + ], + }, + } + ], + }, + { + "name": "Veeam status mails", + "backup_software": "Veeam", + "backup_types": [ + "Backup Job", + "Backup Copy Job", + "Replica Job", + "Replication job", + "Configuration Backup", + "Agent Backup job", + "Veeam Backup for Microsoft 365", + "Scale Out Back-up Repository", + ], + "order": 20, + "enabled": True, + "match": { + "subject_regex": r"\\[(Success|Warning|Failed)\\]\\s*(.+)$", + }, + "description": "Parses Veeam status mails. Job name/type are preferably extracted from the HTML header to avoid subject suffix noise.", + "examples": [ + { + "subject": "[Warning] Daily-VM-Backup (3 objects) 1 warning", + "from_address": "veeam@customer.local", + "body_snippet": "Backup job: Daily-VM-Backup\\n...", + "parsed_result": { + "backup_software": "Veeam", + "backup_type": "Backup job", + "job_name": "Daily-VM-Backup", + "objects": [ + {"name": "VM-APP01", "status": "Success", "error_message": ""}, + {"name": "VM-DB01", "status": "Warning", "error_message": "Low disk space"}, + ], + }, + }, + { + "subject": "[Success] Offsite-Repository", + "from_address": "veeam@customer.local", + "body_snippet": "Backup Copy job: Offsite-Repository\\n...", + "parsed_result": { + "backup_software": "Veeam", + "backup_type": "Backup Copy job", + "job_name": "Offsite-Repository", + "objects": [ + {"name": "Backup Copy Chain", "status": "Success", "error_message": ""} + ], + }, + }, + ], + }, + ] + + return render_template( + "main/parsers.html", + parsers=parsers, + ) + + diff --git a/containers/backupchecks/src/backend/app/main/routes_remarks.py b/containers/backupchecks/src/backend/app/main/routes_remarks.py new file mode 100644 index 0000000..2ceab03 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_remarks.py @@ -0,0 +1,64 @@ +from .routes_shared import * # noqa: F401,F403 +from .routes_shared import _format_datetime + +@main_bp.route("/remarks/", methods=["GET", "POST"]) +@login_required +@roles_required("admin", "operator", "viewer") +def remark_detail(remark_id: int): + remark = Remark.query.get_or_404(remark_id) + + if request.method == "POST": + if get_active_role() not in ("admin", "operator"): + abort(403) + remark.body = (request.form.get("body") or "").strip() or "" + try: + db.session.commit() + flash("Remark updated.", "success") + except Exception as exc: + db.session.rollback() + flash(f"Failed to update remark: {exc}", "danger") + return redirect(url_for("main.remark_detail", remark_id=remark.id)) + + scopes = RemarkScope.query.filter(RemarkScope.remark_id == remark.id).order_by(RemarkScope.id.asc()).all() + + runs = [] + try: + rows = ( + db.session.execute( + text( + """ + SELECT jr.id, jr.run_at, jr.status, j.job_name, c.name AS customer_name + FROM remark_job_runs rjr + JOIN job_runs jr ON jr.id = rjr.job_run_id + JOIN jobs j ON j.id = jr.job_id + LEFT JOIN customers c ON c.id = j.customer_id + WHERE rjr.remark_id = :remark_id + ORDER BY jr.run_at DESC + LIMIT 20 + """ + ), + {"remark_id": remark.id}, + ) + .mappings() + .all() + ) + for r in rows: + runs.append( + { + "id": r.get("id"), + "run_at": _format_datetime(r.get("run_at")), + "status": r.get("status") or "", + "job_name": r.get("job_name") or "", + "customer_name": r.get("customer_name") or "", + } + ) + except Exception: + runs = [] + + return render_template( + "main/remark_detail.html", + remark=remark, + scopes=scopes, + runs=runs, + ) + diff --git a/containers/backupchecks/src/backend/app/main/routes_reporting_api.py b/containers/backupchecks/src/backend/app/main/routes_reporting_api.py new file mode 100644 index 0000000..cfed6f9 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_reporting_api.py @@ -0,0 +1,461 @@ +from .routes_shared import * # noqa: F401,F403 + +from sqlalchemy import text +import json +import csv +import io + + +def _clamp_int(value, default: int, min_v: int, max_v: int) -> int: + try: + v = int(value) + except Exception: + v = default + if v < min_v: + v = min_v + if v > max_v: + v = max_v + return v + + +def _parse_iso_datetime(value: str) -> datetime: + value = (value or "").strip() + if not value: + raise ValueError("missing datetime") + # Accept "YYYY-MM-DD" as day start in UTC + if re.fullmatch(r"\d{4}-\d{2}-\d{2}", value): + return datetime.fromisoformat(value + "T00:00:00") + return datetime.fromisoformat(value) + + +def _require_reporting_role(): + # Phase 1: admin/operator/reporter can do the same. + # Viewer is intentionally excluded. + if get_active_role() not in ("admin", "operator", "reporter"): + return abort(403) + return None + + +def _parse_customer_scope(payload: dict) -> tuple[str, list[int]]: + scope = (payload.get("customer_scope") or "all").strip().lower() + if scope not in ("all", "single", "multiple"): + scope = "all" + + raw_ids = payload.get("customer_ids") + ids: list[int] = [] + if isinstance(raw_ids, list): + for v in raw_ids: + try: + ids.append(int(v)) + except Exception: + continue + + if scope == "single": + if len(ids) != 1: + raise ValueError("Single customer scope requires exactly 1 customer.") + elif scope == "multiple": + if len(ids) < 1: + raise ValueError("Multiple customer scope requires at least 1 customer.") + else: + ids = [] + + return scope, ids + + +@main_bp.route("/api/reports/customers", methods=["GET"]) +@login_required +def api_reports_customers(): + err = _require_reporting_role() + if err is not None: + return err + + rows = ( + db.session.query(Customer) + .filter(Customer.active.is_(True)) + .order_by(Customer.name.asc()) + .all() + ) + return { + "items": [ + {"id": int(c.id), "name": c.name or ""} + for c in rows + ] + } + + +@main_bp.route("/api/reports", methods=["GET"]) +@login_required +def api_reports_list(): + err = _require_reporting_role() + if err is not None: + return err + + rows = ( + db.session.query(ReportDefinition) + .order_by(ReportDefinition.created_at.desc()) + .limit(200) + .all() + ) + return { + "items": [ + { + "id": r.id, + "name": r.name, + "description": r.description or "", + "report_type": r.report_type, + "output_format": r.output_format, + "customer_scope": getattr(r, "customer_scope", "all") or "all", + "customer_ids": (json.loads(r.customer_ids) if getattr(r, "customer_ids", None) else []), + "period_start": r.period_start.isoformat() if r.period_start else "", + "period_end": r.period_end.isoformat() if r.period_end else "", + "schedule": r.schedule or "", + "created_at": r.created_at.isoformat() if r.created_at else "", + } + for r in rows + ] + } + + +@main_bp.route("/api/reports", methods=["POST"]) +@login_required +def api_reports_create(): + err = _require_reporting_role() + if err is not None: + return err + + payload = request.get_json(silent=True) or {} + name = (payload.get("name") or "").strip() or "Report" + description = (payload.get("description") or "").strip() or None + report_type = (payload.get("report_type") or "one-time").strip() or "one-time" + output_format = (payload.get("output_format") or "csv").strip() or "csv" + schedule = (payload.get("schedule") or "").strip() or None + + try: + customer_scope, customer_ids = _parse_customer_scope(payload) + except Exception as exc: + return {"error": str(exc)}, 400 + + period_start_raw = payload.get("period_start") or "" + period_end_raw = payload.get("period_end") or "" + try: + period_start = _parse_iso_datetime(period_start_raw) + period_end = _parse_iso_datetime(period_end_raw) + except Exception: + return {"error": "Invalid period_start or period_end. Use ISO datetime (YYYY-MM-DDTHH:MM:SS)."}, 400 + + if period_end <= period_start: + return {"error": "period_end must be after period_start."}, 400 + + r = ReportDefinition( + name=name, + description=description, + report_type=report_type, + output_format=output_format, + customer_scope=customer_scope, + customer_ids=json.dumps(customer_ids) if customer_ids else None, + period_start=period_start, + period_end=period_end, + schedule=schedule, + created_by_user_id=getattr(current_user, "id", None), + ) + db.session.add(r) + db.session.commit() + return {"id": r.id} + + +@main_bp.route("/api/reports//generate", methods=["POST"]) +@login_required +def api_reports_generate(report_id: int): + err = _require_reporting_role() + if err is not None: + return err + + report = ReportDefinition.query.get_or_404(report_id) + + scope = (getattr(report, "customer_scope", None) or "all").strip().lower() + raw_customer_ids = getattr(report, "customer_ids", None) or "" + customer_ids: list[int] = [] + if raw_customer_ids: + try: + customer_ids = [int(v) for v in (json.loads(raw_customer_ids) or [])] + except Exception: + customer_ids = [] + + # Clear existing snapshot + summary rows for this report to make generation idempotent. + db.session.execute(text("DELETE FROM report_object_summaries WHERE report_id = :rid"), {"rid": report_id}) + db.session.execute(text("DELETE FROM report_object_snapshots WHERE report_id = :rid"), {"rid": report_id}) + + # Snapshot generation (object-based) + # Join: run_object_links -> customer_objects -> customers + # run_object_links.run_id -> job_runs -> jobs + where_customer = "" + params = {"rid": report_id, "start_ts": report.period_start, "end_ts": report.period_end} + if scope in ("single", "multiple") and customer_ids: + where_customer = " AND c.id = ANY(:customer_ids) " + params["customer_ids"] = customer_ids + + db.session.execute( + text( + f''' + INSERT INTO report_object_snapshots + (report_id, object_name, job_id, job_name, customer_id, customer_name, + backup_software, backup_type, run_id, run_at, status, missed, + override_applied, reviewed_at, ticket_number, remark, created_at) + SELECT + :rid AS report_id, + co.object_name AS object_name, + j.id AS job_id, + j.job_name AS job_name, + c.id AS customer_id, + c.name AS customer_name, + j.backup_software AS backup_software, + j.backup_type AS backup_type, + jr.id AS run_id, + jr.run_at AS run_at, + COALESCE(rol.status, jr.status) AS status, + COALESCE(jr.missed, FALSE) AS missed, + COALESCE(jr.override_applied, FALSE) AS override_applied, + jr.reviewed_at AS reviewed_at, + NULL AS ticket_number, + jr.remark AS remark, + NOW() AS created_at + FROM run_object_links rol + JOIN customer_objects co ON co.id = rol.customer_object_id + JOIN customers c ON c.id = co.customer_id + JOIN job_runs jr ON jr.id = rol.run_id + JOIN jobs j ON j.id = jr.job_id + WHERE jr.run_at IS NOT NULL + AND jr.run_at >= :start_ts + AND jr.run_at < :end_ts + {where_customer} + ''' + ), + params, + ) + + # Summary aggregation per object + db.session.execute( + text( + ''' + INSERT INTO report_object_summaries + (report_id, object_name, total_runs, success_count, success_override_count, + warning_count, failed_count, missed_count, success_rate, created_at) + SELECT + :rid AS report_id, + s.object_name AS object_name, + COUNT(*)::INTEGER AS total_runs, + SUM(CASE WHEN (COALESCE(s.status,'') ILIKE 'success%' AND s.override_applied = FALSE) THEN 1 ELSE 0 END)::INTEGER AS success_count, + SUM(CASE WHEN (s.override_applied = TRUE) THEN 1 ELSE 0 END)::INTEGER AS success_override_count, + SUM(CASE WHEN (COALESCE(s.status,'') ILIKE 'warning%') THEN 1 ELSE 0 END)::INTEGER AS warning_count, + SUM(CASE WHEN (COALESCE(s.status,'') ILIKE 'fail%') THEN 1 ELSE 0 END)::INTEGER AS failed_count, + SUM(CASE WHEN (s.missed = TRUE) THEN 1 ELSE 0 END)::INTEGER AS missed_count, + CASE + WHEN COUNT(*) = 0 THEN 0.0 + ELSE ( + ( + SUM(CASE WHEN (COALESCE(s.status,'') ILIKE 'success%' AND s.override_applied = FALSE) THEN 1 ELSE 0 END) + + SUM(CASE WHEN (s.override_applied = TRUE) THEN 1 ELSE 0 END) + )::FLOAT / COUNT(*)::FLOAT + ) * 100.0 + END AS success_rate, + NOW() AS created_at + FROM report_object_snapshots s + WHERE s.report_id = :rid + GROUP BY s.object_name + ''' + ), + {"rid": report_id}, + ) + + # Return lightweight stats for UI feedback. + snapshot_count = ( + db.session.query(db.func.count(ReportObjectSnapshot.id)) + .filter(ReportObjectSnapshot.report_id == report_id) + .scalar() + or 0 + ) + summary_count = ( + db.session.query(db.func.count(ReportObjectSummary.id)) + .filter(ReportObjectSummary.report_id == report_id) + .scalar() + or 0 + ) + + db.session.commit() + return {"status": "ok", "snapshot_rows": int(snapshot_count), "summary_rows": int(summary_count)} + + +@main_bp.route("/api/reports//data", methods=["GET"]) +@login_required +def api_reports_data(report_id: int): + err = _require_reporting_role() + if err is not None: + return err + + ReportDefinition.query.get_or_404(report_id) + + view = (request.args.get("view") or "summary").strip().lower() + if view not in ("summary", "snapshot"): + view = "summary" + + limit = _clamp_int(request.args.get("limit"), default=100, min_v=1, max_v=500) + offset = _clamp_int(request.args.get("offset"), default=0, min_v=0, max_v=1_000_000) + + if view == "summary": + q = db.session.query(ReportObjectSummary).filter(ReportObjectSummary.report_id == report_id) + total = q.count() + rows = ( + q.order_by(ReportObjectSummary.object_name.asc()) + .offset(offset) + .limit(limit) + .all() + ) + return { + "view": "summary", + "total": int(total), + "limit": int(limit), + "offset": int(offset), + "items": [ + { + "object_name": r.object_name or "", + "total_runs": int(r.total_runs or 0), + "success_count": int(r.success_count or 0), + "success_override_count": int(r.success_override_count or 0), + "warning_count": int(r.warning_count or 0), + "failed_count": int(r.failed_count or 0), + "missed_count": int(r.missed_count or 0), + "success_rate": float(r.success_rate or 0.0), + } + for r in rows + ], + } + + q = db.session.query(ReportObjectSnapshot).filter(ReportObjectSnapshot.report_id == report_id) + total = q.count() + rows = ( + q.order_by(ReportObjectSnapshot.object_name.asc(), ReportObjectSnapshot.run_at.asc()) + .offset(offset) + .limit(limit) + .all() + ) + return { + "view": "snapshot", + "total": int(total), + "limit": int(limit), + "offset": int(offset), + "items": [ + { + "object_name": r.object_name or "", + "customer_id": int(r.customer_id) if r.customer_id is not None else "", + "customer_name": r.customer_name or "", + "job_id": r.job_id or "", + "job_name": r.job_name or "", + "backup_software": r.backup_software or "", + "backup_type": r.backup_type or "", + "run_id": r.run_id or "", + "run_at": r.run_at.isoformat() if r.run_at else "", + "status": r.status or "", + "missed": bool(r.missed), + "override_applied": bool(r.override_applied), + "reviewed_at": r.reviewed_at.isoformat() if r.reviewed_at else "", + "ticket_number": r.ticket_number or "", + "remark": (r.remark or "").replace("\r", " ").replace("\n", " ").strip(), + } + for r in rows + ], + } + + +@main_bp.route("/api/reports//export.csv", methods=["GET"]) +@login_required +def api_reports_export_csv(report_id: int): + err = _require_reporting_role() + if err is not None: + return err + + report = ReportDefinition.query.get_or_404(report_id) + view = (request.args.get("view") or "summary").strip().lower() + + if view not in ("summary", "snapshot"): + view = "summary" + + output = io.StringIO() + writer = csv.writer(output) + + if view == "summary": + writer.writerow([ + "object_name", + "total_runs", + "success_count", + "success_override_count", + "warning_count", + "failed_count", + "missed_count", + "success_rate", + ]) + rows = ( + db.session.query(ReportObjectSummary) + .filter(ReportObjectSummary.report_id == report_id) + .order_by(ReportObjectSummary.object_name.asc()) + .all() + ) + for r in rows: + writer.writerow([ + r.object_name or "", + int(r.total_runs or 0), + int(r.success_count or 0), + int(r.success_override_count or 0), + int(r.warning_count or 0), + int(r.failed_count or 0), + int(r.missed_count or 0), + float(r.success_rate or 0.0), + ]) + filename = f"report-{report_id}-summary.csv" + else: + writer.writerow([ + "object_name", + "customer_id", + "customer_name", + "job_id", + "job_name", + "backup_software", + "backup_type", + "run_id", + "run_at", + "status", + "missed", + "override_applied", + "reviewed_at", + "ticket_number", + "remark", + ]) + rows = ( + db.session.query(ReportObjectSnapshot) + .filter(ReportObjectSnapshot.report_id == report_id) + .order_by(ReportObjectSnapshot.object_name.asc(), ReportObjectSnapshot.run_at.asc()) + .all() + ) + for r in rows: + writer.writerow([ + r.object_name or "", + r.customer_id or "", + r.customer_name or "", + r.job_id or "", + r.job_name or "", + r.backup_software or "", + r.backup_type or "", + r.run_id or "", + r.run_at.isoformat() if r.run_at else "", + r.status or "", + "1" if r.missed else "0", + "1" if r.override_applied else "0", + r.reviewed_at.isoformat() if r.reviewed_at else "", + r.ticket_number or "", + (r.remark or "").replace("\r", " ").replace("\n", " ").strip(), + ]) + filename = f"report-{report_id}-snapshot.csv" + + csv_bytes = output.getvalue().encode("utf-8") + mem = io.BytesIO(csv_bytes) + mem.seek(0) + return send_file(mem, mimetype="text/csv", as_attachment=True, download_name=filename) diff --git a/containers/backupchecks/src/backend/app/main/routes_reports.py b/containers/backupchecks/src/backend/app/main/routes_reports.py new file mode 100644 index 0000000..74343c7 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_reports.py @@ -0,0 +1,23 @@ +from .routes_shared import * # noqa: F401,F403 + + +@main_bp.route("/reports") +@login_required +@roles_required("admin", "operator", "reporter", "viewer") +def reports(): + # Defaults are used by the Reports UI for quick testing. All values are UTC. + period_end = datetime.utcnow().replace(microsecond=0) + period_start = (period_end - timedelta(days=7)).replace(microsecond=0) + + return render_template( + "main/reports.html", + default_period_start=period_start.isoformat(), + default_period_end=period_end.isoformat(), + ) + + +@main_bp.route("/reports/new") +@login_required +@roles_required("admin", "operator", "reporter", "viewer") +def reports_new(): + return render_template("main/reports_new.html") diff --git a/containers/backupchecks/src/backend/app/main/routes_run_checks.py b/containers/backupchecks/src/backend/app/main/routes_run_checks.py new file mode 100644 index 0000000..3571dce --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_run_checks.py @@ -0,0 +1,693 @@ +from __future__ import annotations + +from datetime import date, datetime, time, timedelta, timezone + +from flask import jsonify, render_template, request +from flask_login import current_user, login_required +from sqlalchemy import and_, or_, func, text + +from .routes_shared import ( + _apply_overrides_to_run, + _format_datetime, + _get_ui_timezone, + _get_ui_timezone_name, + _get_or_create_settings, + _infer_schedule_map_from_runs, + _to_amsterdam_date, + main_bp, + roles_required, + get_active_role, +) +from ..database import db +from ..models import Customer, Job, JobRun, JobRunReviewEvent, MailMessage, User + +# Grace window for matching real runs to an expected schedule slot. +# A run within +/- 1 hour of the inferred schedule time counts as fulfilling the slot. +MISSED_GRACE_WINDOW = timedelta(hours=1) + + +def _utc_naive_from_local(dt_local: datetime) -> datetime: + """Convert a timezone-aware local datetime to UTC naive, matching DB convention.""" + if dt_local.tzinfo is None: + return dt_local + return dt_local.astimezone(timezone.utc).replace(tzinfo=None) + + +def _local_from_utc_naive(dt_utc_naive: datetime) -> datetime: + tz = _get_ui_timezone() + if not tz: + return dt_utc_naive + try: + if dt_utc_naive.tzinfo is None: + dt_utc_naive = dt_utc_naive.replace(tzinfo=timezone.utc) + return dt_utc_naive.astimezone(tz) + except Exception: + return dt_utc_naive + + +def _parse_hhmm(hhmm: str) -> tuple[int, int] | None: + try: + parts = (hhmm or "").strip().split(":") + if len(parts) < 2: + return None + hh = int(parts[0]) + mm = int(parts[1]) + if hh < 0 or hh > 23 or mm < 0 or mm > 59: + return None + return hh, mm + except Exception: + return None + + +def _get_default_missed_start_date() -> date: + # Prefer configured Daily Jobs missed start date. + settings = _get_or_create_settings() + if getattr(settings, "daily_jobs_start_date", None): + return settings.daily_jobs_start_date + # Sensible safety guard: do not generate missed runs for unbounded history. + return (_to_amsterdam_date(datetime.utcnow()) or datetime.utcnow().date()) - timedelta(days=90) + + +def _ensure_missed_runs_for_job(job: Job, start_from: date, end_inclusive: date) -> int: + """Generate missed JobRun rows for scheduled slots without a run, so Run Checks can review them. + + Returns number of inserted missed runs. + """ + tz = _get_ui_timezone() + schedule_map = _infer_schedule_map_from_runs(job.id) or {} + if not schedule_map: + return 0 + + today_local = _to_amsterdam_date(datetime.utcnow()) or datetime.utcnow().date() + if end_inclusive > today_local: + end_inclusive = today_local + + now_local_dt = datetime.now(tz) if tz else datetime.utcnow() + + # Remove any previously generated missed runs in this date window. + # Missed runs must be based on learned schedule from real mail-reported runs. + try: + start_local = datetime.combine(start_from, time.min) + end_local_excl = datetime.combine(end_inclusive + timedelta(days=1), time.min) + if tz: + start_local = start_local.replace(tzinfo=tz) + end_local_excl = end_local_excl.replace(tzinfo=tz) + start_utc_naive = _utc_naive_from_local(start_local) + end_utc_naive_excl = _utc_naive_from_local(end_local_excl) + + db.session.query(JobRun).filter( + JobRun.job_id == job.id, + JobRun.missed.is_(True), + JobRun.mail_message_id.is_(None), + JobRun.reviewed_at.is_(None), + JobRun.run_at.isnot(None), + JobRun.run_at >= start_utc_naive, + JobRun.run_at < end_utc_naive_excl, + ).delete(synchronize_session=False) + db.session.commit() + except Exception: + db.session.rollback() + + inserted = 0 + d = start_from + while d <= end_inclusive: + weekday = d.weekday() + times = schedule_map.get(weekday) or [] + if not times: + d = d + timedelta(days=1) + continue + + for hhmm in times: + hm = _parse_hhmm(hhmm) + if not hm: + continue + hh, mm = hm + + local_dt = datetime.combine(d, time(hour=hh, minute=mm)) + if tz: + local_dt = local_dt.replace(tzinfo=tz) + + # Only generate missed runs for past slots. + if local_dt > now_local_dt: + continue + + slot_utc_naive = _utc_naive_from_local(local_dt) + + # Consider any real run near the slot as fulfilling the schedule. + # Also avoid duplicates if a missed run already exists. + window_start = slot_utc_naive - MISSED_GRACE_WINDOW + window_end = slot_utc_naive + MISSED_GRACE_WINDOW + + exists = ( + db.session.query(JobRun.id) + .filter( + JobRun.job_id == job.id, + JobRun.run_at.isnot(None), + or_( + and_(JobRun.missed.is_(False), JobRun.mail_message_id.isnot(None)), + and_(JobRun.missed.is_(True), JobRun.mail_message_id.is_(None)), + ), + JobRun.run_at >= window_start, + JobRun.run_at <= window_end, + ) + .first() + ) + if exists: + continue + + miss = JobRun( + job_id=job.id, + run_at=slot_utc_naive, + status="Missed", + missed=True, + remark=None, + mail_message_id=None, + ) + db.session.add(miss) + inserted += 1 + + d = d + timedelta(days=1) + + if inserted: + db.session.commit() + return inserted + + +@main_bp.route("/run-checks") +@login_required +@roles_required("admin", "operator") +def run_checks_page(): + """Run Checks page: list jobs that have runs to review (including generated missed runs).""" + + include_reviewed = False + if get_active_role() == "admin": + include_reviewed = request.args.get("include_reviewed", "0") in ("1", "true", "yes", "on") + + # Generate missed runs since the last review per job so they show up in Run Checks. + # This is intentionally best-effort; any errors should not block page load. + try: + settings_start = _get_default_missed_start_date() + + last_reviewed_rows = ( + db.session.query(JobRun.job_id, func.max(JobRun.reviewed_at)) + .group_by(JobRun.job_id) + .all() + ) + last_reviewed_map = {int(jid): (dt if dt else None) for jid, dt in last_reviewed_rows} + + jobs = Job.query.all() + today_local = _to_amsterdam_date(datetime.utcnow()) or datetime.utcnow().date() + + for job in jobs: + last_rev = last_reviewed_map.get(int(job.id)) + if last_rev: + start_date = _to_amsterdam_date(last_rev) or settings_start + else: + start_date = settings_start + if start_date and start_date > today_local: + continue + _ensure_missed_runs_for_job(job, start_date, today_local) + except Exception: + # Don't block the page if missed-run generation fails. + pass + + # Aggregated per-job rows + base = ( + db.session.query( + Job.id.label("job_id"), + Job.job_name.label("job_name"), + Job.backup_software.label("backup_software"), + Job.backup_type.label("backup_type"), + Customer.name.label("customer_name"), + ) + .select_from(Job) + .outerjoin(Customer, Customer.id == Job.customer_id) + ) + + # Runs to show in the overview: unreviewed (or all if admin toggle enabled) + run_filter = [] + if not include_reviewed: + run_filter.append(JobRun.reviewed_at.is_(None)) + + # Last review per job must be derived from reviewed runs (independent of the overview run filter). + # The overview typically shows only unreviewed runs, so using the same filter would always yield NULL. + last_reviewed_ts = ( + db.session.query( + JobRun.job_id.label("job_id"), + func.max(JobRun.reviewed_at).label("last_reviewed_at"), + ) + .filter(JobRun.reviewed_at.isnot(None)) + .group_by(JobRun.job_id) + .subquery() + ) + + last_reviewed_pick = ( + db.session.query( + JobRun.job_id.label("job_id"), + func.max(JobRun.id).label("run_id"), + ) + .join( + last_reviewed_ts, + (JobRun.job_id == last_reviewed_ts.c.job_id) + & (JobRun.reviewed_at == last_reviewed_ts.c.last_reviewed_at), + ) + .group_by(JobRun.job_id) + .subquery() + ) + + last_reviewed = ( + db.session.query( + JobRun.job_id.label("job_id"), + JobRun.reviewed_at.label("last_reviewed_at"), + JobRun.reviewed_by_user_id.label("last_reviewed_by_user_id"), + ) + .join(last_reviewed_pick, JobRun.id == last_reviewed_pick.c.run_id) + .subquery() + ) + + agg = ( + db.session.query( + JobRun.job_id.label("job_id"), + func.count(JobRun.id).label("run_count"), + func.max(func.coalesce(JobRun.run_at, JobRun.created_at)).label("last_run_ts"), + ) + .group_by(JobRun.job_id) + ) + if run_filter: + agg = agg.filter(*run_filter) + + agg = agg.subquery() + + q = ( + base.join(agg, agg.c.job_id == Job.id) + .outerjoin(last_reviewed, last_reviewed.c.job_id == Job.id) + .add_columns( + last_reviewed.c.last_reviewed_at.label("last_reviewed_at"), + last_reviewed.c.last_reviewed_by_user_id.label("last_reviewed_by_user_id"), + ) + ) + # Sort for operational review: Customer > Backup > Type > Job + q = q.order_by( + Customer.name.asc().nullslast(), + Job.backup_software.asc().nullslast(), + Job.backup_type.asc().nullslast(), + Job.job_name.asc().nullslast(), + Job.id.asc(), + ) + + rows = q.limit(2000).all() + + # Ensure override flags are up-to-date for the runs shown in this overview. + # The Run Checks modal computes override status on-the-fly, but the overview + # aggregates by persisted JobRun.override_applied. Keep those flags aligned + # so jobs with overridden runs do not stay orange (Warning). + try: + from .routes_shared import _recompute_override_flags_for_runs + + job_ids_for_recompute = [int(r.job_id) for r in rows] + if job_ids_for_recompute: + _recompute_override_flags_for_runs(job_ids=job_ids_for_recompute, only_unreviewed=True) + except Exception: + pass + + # Per-job status indicators for the overview table (counts per status). + job_ids = [int(r.job_id) for r in rows] + status_map: dict[int, dict[str, int]] = {} + if job_ids: + s_q = ( + db.session.query( + JobRun.job_id.label("job_id"), + JobRun.status.label("status"), + JobRun.missed.label("missed"), + JobRun.override_applied.label("override_applied"), + func.count(JobRun.id).label("cnt"), + ) + .filter(JobRun.job_id.in_(job_ids)) + ) + if run_filter: + s_q = s_q.filter(*run_filter) + s_q = s_q.group_by(JobRun.job_id, JobRun.status, JobRun.missed, JobRun.override_applied) + + for jid, status, missed, override_applied, cnt in s_q.all(): + job_id = int(jid) + + label = (status or "").strip() or "Unknown" + if bool(missed) or (label.lower() == "missed"): + label = "Missed" + elif bool(override_applied): + # Keep the label consistent with other pages. + label = "Success (override)" + + status_map.setdefault(job_id, {}) + status_map[job_id][label] = status_map[job_id].get(label, 0) + int(cnt or 0) + + # Map reviewed-by usernames for admins + reviewed_by_map: dict[int, str] = {} + if get_active_role() == "admin": + user_ids = sorted({int(r.last_reviewed_by_user_id) for r in rows if getattr(r, "last_reviewed_by_user_id", None)}) + if user_ids: + users = User.query.filter(User.id.in_(user_ids)).all() + reviewed_by_map = {u.id: u.username for u in users} + + # Ticket/Remark indicators (active today) for faster reviewing. + today_local = _to_amsterdam_date(datetime.utcnow()) or datetime.utcnow().date() + + payload = [] + for r in rows: + job_id = int(r.job_id) + + # Status indicators for the overview (counts per status). + indicators: list[dict[str, object]] = [] + counts = status_map.get(job_id, {}) + if counts: + def _dot_for(label: str) -> str: + s = (label or "").strip().lower() + if s == "success": + return "dot-success" + if s == "warning": + return "dot-warning" + if s in ("failed", "error"): + return "dot-failed" + if s == "missed": + return "dot-missed" + if s == "expected": + return "dot-expected" + if "override" in s: + return "dot-override" + return "" + + # Keep a stable order (actionable first). + preferred = [ + "Failed", + "Error", + "Warning", + "Missed", + "Success", + "Expected", + "Success (override)", + ] + seen = set() + for k in preferred: + if k in counts: + indicators.append({"status": k, "count": int(counts.get(k, 0) or 0), "dot": _dot_for(k)}) + seen.add(k) + for k in sorted(counts.keys()): + if k not in seen: + indicators.append({"status": k, "count": int(counts.get(k, 0) or 0), "dot": _dot_for(k)}) + + has_active_ticket = False + has_active_remark = False + ui_tz = _get_ui_timezone_name() + try: + t_exists = db.session.execute( + text( + """ + SELECT 1 + FROM tickets t + JOIN ticket_scopes ts ON ts.ticket_id = t.id + WHERE ts.job_id = :job_id + AND t.active_from_date <= :run_date + AND ( + t.resolved_at IS NULL + OR ((t.resolved_at AT TIME ZONE 'UTC' AT TIME ZONE :ui_tz)::date) >= :run_date + ) + LIMIT 1 + """ + ), + {"job_id": job_id, "run_date": today_local, "ui_tz": ui_tz}, + ).first() + has_active_ticket = bool(t_exists) + + r_exists = db.session.execute( + text( + """ + SELECT 1 + FROM remarks r + JOIN remark_scopes rs ON rs.remark_id = r.id + WHERE rs.job_id = :job_id + AND COALESCE( + r.active_from_date, + ((r.start_date AT TIME ZONE 'UTC' AT TIME ZONE :ui_tz)::date) + ) <= :run_date + AND ( + r.resolved_at IS NULL + OR ((r.resolved_at AT TIME ZONE 'UTC' AT TIME ZONE :ui_tz)::date) >= :run_date + ) + LIMIT 1 + """ + ), + {"job_id": job_id, "run_date": today_local, "ui_tz": ui_tz}, + ).first() + has_active_remark = bool(r_exists) + except Exception: + has_active_ticket = False + has_active_remark = False + + last_run_ts = getattr(r, "last_run_ts", None) + last_run_at = _format_datetime(last_run_ts) if last_run_ts else "" + + last_reviewed_at = getattr(r, "last_reviewed_at", None) + last_reviewed_by = reviewed_by_map.get(getattr(r, "last_reviewed_by_user_id", None), "") + + payload.append( + { + "job_id": job_id, + "customer_name": r.customer_name or "-", + "job_name": r.job_name or "-", + "backup_software": r.backup_software or "-", + "backup_type": r.backup_type or "-", + "run_count": int(getattr(r, "run_count", 0) or 0), + "last_run_at": last_run_at or "-", + "status_counts": status_map.get(job_id, {}), + "status_indicators": indicators, + "has_active_ticket": bool(has_active_ticket), + "has_active_remark": bool(has_active_remark), + "last_reviewed_at": _format_datetime(last_reviewed_at) if (get_active_role() == "admin" and last_reviewed_at) else "", + "last_reviewed_by": last_reviewed_by if get_active_role() == "admin" else "", + } + ) + + return render_template( + "main/run_checks.html", + rows=payload, + is_admin=(get_active_role() == "admin"), + include_reviewed=include_reviewed, + ) + + +@main_bp.route("/api/run-checks/details") +@login_required +@roles_required("admin", "operator") +def run_checks_details(): + """Return runs for a job for the Run Checks modal.""" + try: + job_id = int(request.args.get("job_id", "0")) + except Exception: + job_id = 0 + if job_id <= 0: + return jsonify({"status": "error", "message": "Invalid parameters."}), 400 + + include_reviewed = False + if get_active_role() == "admin": + include_reviewed = request.args.get("include_reviewed", "0") in ("1", "true", "yes", "on") + + job = Job.query.get_or_404(job_id) + + q = JobRun.query.filter(JobRun.job_id == job.id) + if not include_reviewed: + q = q.filter(JobRun.reviewed_at.is_(None)) + + runs = q.order_by(func.coalesce(JobRun.run_at, JobRun.created_at).desc(), JobRun.id.desc()).limit(400).all() + + runs_payload = [] + for run in runs: + msg = MailMessage.query.get(run.mail_message_id) if run.mail_message_id else None + mail_meta = None + has_eml = False + body_html = "" + if msg: + mail_meta = { + "from_address": msg.from_address or "", + "subject": msg.subject or "", + "received_at": _format_datetime(msg.received_at), + } + body_html = msg.html_body or "" + has_eml = bool(getattr(msg, "eml_stored_at", None)) + + objects_payload = [] + try: + rows = ( + db.session.execute( + text( + """ + SELECT + co.object_name AS name, + rol.status AS status, + rol.error_message AS error_message + FROM run_object_links rol + JOIN customer_objects co ON co.id = rol.customer_object_id + WHERE rol.run_id = :run_id + ORDER BY co.object_name ASC + """ + ), + {"run_id": run.id}, + ) + .mappings() + .all() + ) + for rr in rows: + objects_payload.append( + { + "name": rr.get("name") or "", + "type": "", + "status": rr.get("status") or "", + "error_message": rr.get("error_message") or "", + } + ) + except Exception: + objects_payload = [] + + status_display = run.status or "-" + try: + status_display, _, _, _ov_id, _ov_reason = _apply_overrides_to_run(job, run) + except Exception: + status_display = run.status or "-" + + runs_payload.append( + { + "id": run.id, + "run_at": _format_datetime(run.run_at) if run.run_at else "-", + "status": status_display, + "remark": run.remark or "", + "missed": bool(run.missed), + "is_reviewed": bool(run.reviewed_at), + "reviewed_at": _format_datetime(run.reviewed_at) if (get_active_role() == "admin" and run.reviewed_at) else "", + "mail_message_id": run.mail_message_id, + "has_eml": bool(has_eml), + "mail": mail_meta, + "body_html": body_html, + "objects": objects_payload, + } + ) + + job_payload = { + "id": job.id, + "customer_name": job.customer.name if job.customer else "", + "backup_software": job.backup_software or "", + "backup_type": job.backup_type or "", + "job_name": job.job_name or "", + } + + if not runs_payload: + return jsonify({"status": "ok", "job": job_payload, "runs": [], "message": "No runs found."}) + + return jsonify({"status": "ok", "job": job_payload, "runs": runs_payload}) + + +@main_bp.post("/api/run-checks/mark-reviewed") +@login_required +@roles_required("admin", "operator") +def api_run_checks_mark_reviewed(): + data = request.get_json(silent=True) or {} + run_ids = data.get("run_ids") or [] + job_ids = data.get("job_ids") or [] + + # Backwards compatible: accept either run_ids or job_ids. + ids: list[int] = [] + if job_ids: + try: + ids = [int(x) for x in job_ids] + except Exception: + return jsonify({"status": "error", "message": "Invalid job_ids."}), 400 + if not ids: + return jsonify({"status": "ok", "updated": 0, "skipped": 0}) + + runs = JobRun.query.filter(JobRun.job_id.in_(ids)).all() + else: + try: + run_ids = [int(x) for x in run_ids] + except Exception: + return jsonify({"status": "error", "message": "Invalid run_ids."}), 400 + + if not run_ids: + return jsonify({"status": "ok", "updated": 0, "skipped": 0}) + + runs = JobRun.query.filter(JobRun.id.in_(run_ids)).all() + + now = datetime.utcnow() + updated = 0 + skipped = 0 + + for run in runs: + if run.reviewed_at is not None: + skipped += 1 + continue + run.reviewed_at = now + run.reviewed_by_user_id = current_user.id + db.session.add( + JobRunReviewEvent( + run_id=run.id, + action="REVIEWED", + actor_user_id=current_user.id, + ) + ) + updated += 1 + + db.session.commit() + return jsonify({"status": "ok", "updated": updated, "skipped": skipped}) + + +@main_bp.post("/api/run-checks/unmark-reviewed") +@login_required +@roles_required("admin") +def api_run_checks_unmark_reviewed(): + data = request.get_json(silent=True) or {} + run_ids = data.get("run_ids") or [] + job_ids = data.get("job_ids") or [] + note = data.get("note") + + runs = [] + if job_ids: + try: + job_ids = [int(x) for x in job_ids] + except Exception: + return jsonify({"status": "error", "message": "Invalid job_ids."}), 400 + + if not job_ids: + return jsonify({"status": "ok", "updated": 0, "skipped": 0}) + + runs = JobRun.query.filter(JobRun.job_id.in_(job_ids)).all() + else: + try: + run_ids = [int(x) for x in run_ids] + except Exception: + return jsonify({"status": "error", "message": "Invalid run_ids."}), 400 + + if not run_ids: + return jsonify({"status": "ok", "updated": 0, "skipped": 0}) + + runs = JobRun.query.filter(JobRun.id.in_(run_ids)).all() + + updated = 0 + skipped = 0 + + for run in runs: + if run.reviewed_at is None: + skipped += 1 + continue + + run.reviewed_at = None + run.reviewed_by_user_id = None + + db.session.add( + JobRunReviewEvent( + run_id=run.id, + action="UNREVIEWED", + actor_user_id=current_user.id, + note=(str(note)[:2000] if note else None), + ) + ) + updated += 1 + + db.session.commit() + return jsonify({"status": "ok", "updated": updated, "skipped": skipped}) diff --git a/containers/backupchecks/src/backend/app/main/routes_settings.py b/containers/backupchecks/src/backend/app/main/routes_settings.py new file mode 100644 index 0000000..9f45d26 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_settings.py @@ -0,0 +1,1112 @@ +from .routes_shared import * # noqa: F401,F403 +from .routes_shared import _get_database_size_bytes, _get_or_create_settings, _format_bytes, _get_free_disk_bytes, _log_admin_event + +@main_bp.route("/settings/jobs/delete-all", methods=["POST"]) +@login_required +@roles_required("admin") +def settings_jobs_delete_all(): + try: + jobs = Job.query.all() + + if not jobs: + flash("No jobs to delete.", "info") + return redirect(url_for("main.settings", section="general")) + + + + # Collect run ids for FK cleanup in auxiliary tables that may not have ON DELETE CASCADE + run_ids = [] + mail_message_ids = [] + + for job in jobs: + for run in job.runs: + if run.id is not None: + run_ids.append(run.id) + if run.mail_message_id: + mail_message_ids.append(run.mail_message_id) + + # Return related mails back to inbox and unlink from job + if mail_message_ids: + msgs = MailMessage.query.filter(MailMessage.id.in_(mail_message_ids)).all() + for msg in msgs: + if hasattr(msg, "location"): + msg.location = "inbox" + msg.job_id = None + + def _safe_execute(stmt, params): + try: + db.session.execute(stmt, params) + except Exception as cleanup_exc: + # Best-effort cleanup for differing DB schemas + print(f"[settings-jobs] Cleanup skipped: {cleanup_exc}") + + + # Ensure run_object_links doesn't block job_runs deletion (older schemas may miss ON DELETE CASCADE) + if run_ids: + db.session.execute( + text("DELETE FROM run_object_links WHERE run_id IN :run_ids").bindparams( + bindparam("run_ids", expanding=True) + ), + {"run_ids": run_ids}, + ) + + + + # Ensure job_object_links doesn't block jobs deletion (older schemas may miss ON DELETE CASCADE) + job_ids = [j.id for j in jobs] + if job_ids: + db.session.execute( + text("DELETE FROM job_object_links WHERE job_id IN :job_ids").bindparams( + bindparam("job_ids", expanding=True) + ), + {"job_ids": job_ids}, + ) + + # Clean up auxiliary FK tables that may reference job_runs/jobs without ON DELETE CASCADE (older schemas) + if run_ids: + _safe_execute( + text("DELETE FROM remark_job_runs WHERE job_run_id IN :run_ids").bindparams( + bindparam("run_ids", expanding=True) + ), + {"run_ids": run_ids}, + ) + _safe_execute( + text("DELETE FROM ticket_job_runs WHERE job_run_id IN :run_ids").bindparams( + bindparam("run_ids", expanding=True) + ), + {"run_ids": run_ids}, + ) + # Some schemas use remark_scopes for per-run remarks + _safe_execute( + text("DELETE FROM remark_scopes WHERE job_run_id IN :run_ids").bindparams( + bindparam("run_ids", expanding=True) + ), + {"run_ids": run_ids}, + ) + + if job_ids: + # ticket_scopes.job_id is a FK without ON DELETE CASCADE in some schemas + _safe_execute( + text("DELETE FROM ticket_scopes WHERE job_id IN :job_ids").bindparams( + bindparam("job_ids", expanding=True) + ), + {"job_ids": job_ids}, + ) + + # Some schemas use remark_scopes for per-job remarks + _safe_execute( + text("DELETE FROM remark_scopes WHERE job_id IN :job_ids").bindparams( + bindparam("job_ids", expanding=True) + ), + {"job_ids": job_ids}, + ) + # Overrides may reference jobs directly + _safe_execute( + text("DELETE FROM overrides WHERE job_id IN :job_ids").bindparams( + bindparam("job_ids", expanding=True) + ), + {"job_ids": job_ids}, + ) + + # Delete all jobs (runs/objects are cascaded via ORM relationships) + for job in jobs: + db.session.delete(job) + + db.session.commit() + flash("All jobs deleted. Related mails are returned to the inbox.", "success") + except Exception as exc: + db.session.rollback() + print(f"[settings-jobs] Failed to delete all jobs: {exc}") + flash("Failed to delete all jobs.", "danger") + + return redirect(url_for("main.settings")) + + +@main_bp.route("/settings/objects/backfill", methods=["POST"]) +@login_required +@roles_required("admin") +def settings_objects_backfill(): + """Backfill object persistence tables for existing approved runs. + + This repairs cases where mail_objects exist but run_object_links/job_object_links/customer_objects were not created. + """ + engine = db.get_engine() + + # Select runs that have mail_objects but no run_object_links yet + rows = [] + try: + with engine.begin() as conn: + rows = conn.execute( + text( + """ + SELECT + jr.id AS run_id, + jr.job_id AS job_id, + j.customer_id AS customer_id, + jr.mail_message_id AS mail_message_id + FROM job_runs jr + JOIN jobs j ON j.id = jr.job_id + WHERE jr.mail_message_id IS NOT NULL + AND EXISTS ( + SELECT 1 FROM mail_objects mo WHERE mo.mail_message_id = jr.mail_message_id + ) + AND NOT EXISTS ( + SELECT 1 FROM run_object_links rol WHERE rol.run_id = jr.id + ) + ORDER BY jr.id DESC + """ + ) + ).fetchall() + except Exception as exc: + flash("Backfill failed while selecting runs.", "danger") + _log_admin_event("object_backfill_error", f"Backfill select failed: {exc}") + return redirect(url_for("main.settings", section="general")) + + total = len(rows) + repaired_runs = 0 + repaired_objects = 0 + errors = 0 + + for r in rows: + try: + repaired_objects += persist_objects_for_approved_run( + int(r[2]), int(r[1]), int(r[0]), int(r[3]) + ) + repaired_runs += 1 + except Exception as exc: + errors += 1 + _log_admin_event( + "object_backfill_run_error", + f"Backfill failed for run {r[0]} (job {r[1]}, message {r[3]}): {exc}", + ) + + _log_admin_event( + "object_backfill", + f"Backfill finished. candidates={total}, repaired_runs={repaired_runs}, objects={repaired_objects}, errors={errors}", + ) + + if total == 0: + flash("No runs needed backfill.", "info") + else: + if errors == 0: + flash(f"Backfill complete. Repaired {repaired_runs} runs.", "success") + else: + flash( + f"Backfill complete with errors. Repaired {repaired_runs} runs, errors: {errors}.", + "warning", + ) + + return redirect(url_for("main.settings")) + + +@main_bp.route("/settings/jobs/export", methods=["GET"]) +@login_required +@roles_required("admin") +def settings_jobs_export(): + try: + jobs = Job.query.all() + payload = { + "schema": "approved_jobs_export_v1", + "exported_at": datetime.utcnow().isoformat() + "Z", + "counts": {"customers": 0, "jobs": 0}, + "customers": [], + "jobs": [], + } + + # Collect customers referenced by jobs (and ensure stable name mapping) + customer_by_id = {} + for job in jobs: + if job.customer_id and job.customer and job.customer.name: + customer_by_id[job.customer_id] = job.customer.name + + payload["customers"] = [{"name": name} for _, name in sorted(customer_by_id.items(), key=lambda x: x[1].lower())] + + for job in jobs: + payload["jobs"].append( + { + "customer_name": customer_by_id.get(job.customer_id), + "from_address": getattr(job, "from_address", None), + "backup_software": job.backup_software, + "backup_type": job.backup_type, + "job_name": job.job_name, + "schedule_type": job.schedule_type, + "schedule_days_of_week": job.schedule_days_of_week, + "schedule_day_of_month": job.schedule_day_of_month, + "schedule_times": job.schedule_times, + "auto_approve": bool(job.auto_approve), + "active": bool(job.active), + } + ) + + payload["counts"]["customers"] = len(payload["customers"]) + payload["counts"]["jobs"] = len(payload["jobs"]) + + filename = f"approved-jobs-export-{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}.json" + blob = json.dumps(payload, indent=2, ensure_ascii=False).encode("utf-8") + return send_file( + io.BytesIO(blob), + mimetype="application/json", + as_attachment=True, + download_name=filename, + ) + except Exception as exc: + print(f"[settings-jobs] Export failed: {exc}") + flash("Export failed.", "danger") + return redirect(url_for("main.settings", section="general")) + + +@main_bp.route("/settings/jobs/import", methods=["POST"]) +@login_required +@roles_required("admin") +def settings_jobs_import(): + upload = request.files.get("jobs_file") + if not upload or not upload.filename: + flash("No import file was provided.", "danger") + return redirect(url_for("main.settings", section="general")) + + try: + raw = upload.read() + payload = json.loads(raw.decode("utf-8")) + except Exception: + flash("Invalid JSON file.", "danger") + return redirect(url_for("main.settings", section="general")) + + if not isinstance(payload, dict) or payload.get("schema") != "approved_jobs_export_v1": + flash("Unsupported import file schema.", "danger") + return redirect(url_for("main.settings", section="general")) + + jobs = payload.get("jobs") or [] + if not isinstance(jobs, list): + flash("Invalid import file format (jobs).", "danger") + return redirect(url_for("main.settings", section="general")) + + created_customers = 0 + created_jobs = 0 + updated_jobs = 0 + + try: + for item in jobs: + if not isinstance(item, dict): + continue + + customer_name = (item.get("customer_name") or "").strip() + if not customer_name: + # jobs without customer are allowed, but we cannot map them meaningfully + customer = None + else: + customer = Customer.query.filter_by(name=customer_name).first() + if not customer: + customer = Customer(name=customer_name, active=True) + db.session.add(customer) + db.session.flush() + created_customers += 1 + + backup_software = (item.get("backup_software") or "").strip() or None + backup_type = (item.get("backup_type") or "").strip() or None + from_address = normalize_from_address(item.get("from_address")) + job_name = (item.get("job_name") or "").strip() or None + + # Match existing job using the same key we show in the UI + existing = None + q = Job.query + if customer and customer.id: + q = q.filter(Job.customer_id == customer.id) + else: + q = q.filter(Job.customer_id.is_(None)) + + if from_address is None: + q = q.filter(Job.from_address.is_(None)) + else: + q = q.filter(func.lower(Job.from_address) == from_address) + q = q.filter(Job.backup_software == backup_software) + q = q.filter(Job.backup_type == backup_type) + q = q.filter(Job.job_name == job_name) + + existing = q.first() + + def _bool(val, default=False): + if val is None: + return default + if isinstance(val, bool): + return val + if isinstance(val, (int, float)): + return bool(val) + if isinstance(val, str): + v = val.strip().lower() + if v in ("1", "true", "yes", "y", "on"): + return True + if v in ("0", "false", "no", "n", "off"): + return False + return default + + schedule_type = (item.get("schedule_type") or "").strip() or None + schedule_days_of_week = (item.get("schedule_days_of_week") or "").strip() or None + schedule_times = (item.get("schedule_times") or "").strip() or None + schedule_day_of_month = item.get("schedule_day_of_month") + if schedule_day_of_month in ("", None): + schedule_day_of_month = None + else: + try: + schedule_day_of_month = int(schedule_day_of_month) + except Exception: + schedule_day_of_month = None + + auto_approve = _bool(item.get("auto_approve"), default=True) + active = _bool(item.get("active"), default=True) + + if existing: + if hasattr(existing, "from_address"): + existing.from_address = from_address + existing.schedule_type = schedule_type + existing.schedule_days_of_week = schedule_days_of_week + existing.schedule_day_of_month = schedule_day_of_month + existing.schedule_times = schedule_times + existing.auto_approve = auto_approve + existing.active = active + updated_jobs += 1 + else: + job_kwargs = { + "customer_id": (customer.id if customer else None), + "backup_software": backup_software, + "backup_type": backup_type, + "job_name": job_name, + "schedule_type": schedule_type, + "schedule_days_of_week": schedule_days_of_week, + "schedule_day_of_month": schedule_day_of_month, + "schedule_times": schedule_times, + "auto_approve": auto_approve, + "active": active, + } + # Include from_address in the persisted job key so re-parse matching works after import + if hasattr(Job, "from_address"): + job_kwargs["from_address"] = from_address + new_job = Job(**job_kwargs) + db.session.add(new_job) + created_jobs += 1 + + db.session.commit() + flash( + f"Import completed. Customers created: {created_customers}. Jobs created: {created_jobs}. Jobs updated: {updated_jobs}.", + "success", + ) + except Exception as exc: + db.session.rollback() + print(f"[settings-jobs] Import failed: {exc}") + flash("Import failed.", "danger") + + return redirect(url_for("main.settings")) + + +@main_bp.route("/settings", methods=["GET", "POST"]) +@login_required +@roles_required("admin") +def settings(): + settings = _get_or_create_settings() + section = (request.args.get("section") or "general").strip().lower() or "general" + + if request.method == "POST": + # NOTE: The Settings UI has multiple tabs with separate forms. + # Only update values that are present in the submitted form, to avoid + # clearing unrelated settings when saving from another tab. + + if "graph_tenant_id" in request.form: + settings.graph_tenant_id = (request.form.get("graph_tenant_id") or "").strip() or None + if "graph_client_id" in request.form: + settings.graph_client_id = (request.form.get("graph_client_id") or "").strip() or None + if "graph_mailbox" in request.form: + settings.graph_mailbox = (request.form.get("graph_mailbox") or "").strip() or None + + if "graph_client_secret" in request.form: + client_secret = (request.form.get("graph_client_secret") or "").strip() + if client_secret: + settings.graph_client_secret = client_secret + + if "incoming_folder" in request.form: + settings.incoming_folder = (request.form.get("incoming_folder") or "").strip() or None + if "processed_folder" in request.form: + settings.processed_folder = (request.form.get("processed_folder") or "").strip() or None + + # UI display + if "ui_timezone" in request.form: + settings.ui_timezone = (request.form.get("ui_timezone") or "").strip() or "Europe/Amsterdam" + + # Daily Jobs + if "daily_jobs_start_date" in request.form: + daily_jobs_start_date_str = (request.form.get("daily_jobs_start_date") or "").strip() + if daily_jobs_start_date_str: + try: + settings.daily_jobs_start_date = datetime.strptime(daily_jobs_start_date_str, "%Y-%m-%d").date() + except Exception: + settings.daily_jobs_start_date = None + else: + settings.daily_jobs_start_date = None + + # Import configuration + if "auto_import_enabled" in request.form: + settings.auto_import_enabled = bool(request.form.get("auto_import_enabled")) + + if "auto_import_interval_minutes" in request.form: + try: + settings.auto_import_interval_minutes = int( + request.form.get("auto_import_interval_minutes") or settings.auto_import_interval_minutes + ) + except (ValueError, TypeError): + pass + + if "auto_import_cutoff_date" in request.form: + auto_import_cutoff_date_str = (request.form.get("auto_import_cutoff_date") or "").strip() + if auto_import_cutoff_date_str: + try: + settings.auto_import_cutoff_date = datetime.strptime(auto_import_cutoff_date_str, "%Y-%m-%d").date() + except Exception: + settings.auto_import_cutoff_date = None + else: + settings.auto_import_cutoff_date = None + + if ( + "auto_import_enabled" in request.form + or "auto_import_interval_minutes" in request.form + or "auto_import_cutoff_date" in request.form + or "manual_import_batch_size" in request.form + or "ingest_eml_retention_days" in request.form + ): + # Automatic importer batch size is fixed at 50 + settings.auto_import_max_items = 50 + + if "ingest_eml_retention_days" in request.form: + try: + settings.ingest_eml_retention_days = int( + request.form.get("ingest_eml_retention_days") or getattr(settings, "ingest_eml_retention_days", 7) + ) + except (ValueError, TypeError): + pass + + if settings.ingest_eml_retention_days not in (0, 7, 14): + settings.ingest_eml_retention_days = 7 + + if "manual_import_batch_size" in request.form: + try: + settings.manual_import_batch_size = int( + request.form.get("manual_import_batch_size") or settings.manual_import_batch_size + ) + except (ValueError, TypeError): + pass + + # Manual import batch size is configurable but limited to 50 + try: + settings.manual_import_batch_size = int(settings.manual_import_batch_size or 50) + except (ValueError, TypeError): + settings.manual_import_batch_size = 50 + if settings.manual_import_batch_size < 1: + settings.manual_import_batch_size = 1 + if settings.manual_import_batch_size > 50: + settings.manual_import_batch_size = 50 + + try: + db.session.commit() + flash("Settings have been saved.", "success") + + # If EML storage has been turned off, clear any stored blobs immediately. + try: + if getattr(settings, "ingest_eml_retention_days", 7) == 0: + MailMessage.query.filter(MailMessage.eml_blob.isnot(None)).update( + {MailMessage.eml_blob: None, MailMessage.eml_stored_at: None}, + synchronize_session=False, + ) + db.session.commit() + except Exception as exc: + db.session.rollback() + print(f"[settings] Failed to clear stored EML blobs: {exc}") + + except Exception as exc: + db.session.rollback() + print(f"[settings] Failed to save settings: {exc}") + flash("Failed to save settings.", "danger") + + return redirect(url_for("main.settings", section=section)) + + db_size_bytes = _get_database_size_bytes() + free_disk_bytes = _get_free_disk_bytes() + + db_size_human = _format_bytes(db_size_bytes) if db_size_bytes is not None else "unknown" + free_disk_human = _format_bytes(free_disk_bytes) if free_disk_bytes is not None else "unknown" + + free_disk_warning = False + if free_disk_bytes is not None: + two_gb = 2 * 1024 * 1024 * 1024 + free_disk_warning = free_disk_bytes < two_gb + + has_client_secret = bool(settings.graph_client_secret) + + # Common UI timezones (IANA names) + tz_options = [ + "Europe/Amsterdam", + "UTC", + "Europe/London", + "Europe/Paris", + "Europe/Berlin", + "Europe/Brussels", + "America/New_York", + "America/Chicago", + "America/Denver", + "America/Los_Angeles", + "Asia/Tokyo", + "Asia/Singapore", + ] + + + # News admin data (only when requested) + news_admin_items = [] + news_admin_stats = {} + total_users = 0 + if section == "news": + try: + total_users = int(User.query.count() or 0) + except Exception: + total_users = 0 + + try: + news_admin_items = NewsItem.query.order_by( + NewsItem.pinned.desc(), + NewsItem.publish_from.desc().nullslast(), + NewsItem.created_at.desc(), + ).all() + except Exception: + news_admin_items = [] + + try: + for item in news_admin_items: + read_count = int(NewsRead.query.filter_by(news_item_id=item.id).count() or 0) + news_admin_stats[item.id] = { + "read": read_count, + "unread": max(total_users - read_count, 0), + "total": total_users, + } + except Exception: + news_admin_stats = {} + + + return render_template( + "main/settings.html", + settings=settings, + db_size_human=db_size_human, + free_disk_human=free_disk_human, + free_disk_warning=free_disk_warning, + has_client_secret=has_client_secret, + tz_options=tz_options, + users=User.query.order_by(User.username.asc()).all(), + section=section, + news_admin_items=news_admin_items, + news_admin_stats=news_admin_stats, + ) + + + +@main_bp.route("/settings/news/create", methods=["POST"]) +@login_required +@roles_required("admin") +def settings_news_create(): + title = (request.form.get("title") or "").strip() + body = (request.form.get("body") or "").strip() + link_url = (request.form.get("link_url") or "").strip() or None + severity = (request.form.get("severity") or "info").strip().lower() or "info" + pinned = bool(request.form.get("pinned")) + active = bool(request.form.get("active")) + + publish_from_str = (request.form.get("publish_from") or "").strip() + publish_until_str = (request.form.get("publish_until") or "").strip() + + publish_from = None + publish_until = None + try: + if publish_from_str: + publish_from = datetime.strptime(publish_from_str, "%Y-%m-%dT%H:%M") + except Exception: + publish_from = None + try: + if publish_until_str: + publish_until = datetime.strptime(publish_until_str, "%Y-%m-%dT%H:%M") + except Exception: + publish_until = None + + if not title or not body: + flash("Title and body are required.", "danger") + return redirect(url_for("main.settings", section="news")) + + item = NewsItem( + title=title, + body=body, + link_url=link_url, + severity=severity if severity in ("info", "warning") else "info", + pinned=pinned, + active=active, + publish_from=publish_from, + publish_until=publish_until, + created_by_user_id=getattr(current_user, "id", None), + ) + db.session.add(item) + try: + db.session.commit() + flash("News item created.", "success") + except Exception as exc: + db.session.rollback() + print(f"[settings] Failed to create news item: {exc}") + flash("Failed to create news item.", "danger") + + return redirect(url_for("main.settings", section="news")) + + +@main_bp.route("/settings/news//update", methods=["POST"]) +@login_required +@roles_required("admin") +def settings_news_update(news_id: int): + item = NewsItem.query.get(news_id) + if not item: + return abort(404) + + title = (request.form.get("title") or "").strip() + body = (request.form.get("body") or "").strip() + link_url = (request.form.get("link_url") or "").strip() or None + severity = (request.form.get("severity") or "info").strip().lower() or "info" + pinned = bool(request.form.get("pinned")) + active = bool(request.form.get("active")) + + publish_from_str = (request.form.get("publish_from") or "").strip() + publish_until_str = (request.form.get("publish_until") or "").strip() + + publish_from = None + publish_until = None + try: + if publish_from_str: + publish_from = datetime.strptime(publish_from_str, "%Y-%m-%dT%H:%M") + except Exception: + publish_from = None + try: + if publish_until_str: + publish_until = datetime.strptime(publish_until_str, "%Y-%m-%dT%H:%M") + except Exception: + publish_until = None + + if not title or not body: + flash("Title and body are required.", "danger") + return redirect(url_for("main.settings", section="news")) + + item.title = title + item.body = body + item.link_url = link_url + item.severity = severity if severity in ("info", "warning") else "info" + item.pinned = pinned + item.active = active + item.publish_from = publish_from + item.publish_until = publish_until + + try: + db.session.commit() + flash("News item updated.", "success") + except Exception as exc: + db.session.rollback() + print(f"[settings] Failed to update news item: {exc}") + flash("Failed to update news item.", "danger") + + return redirect(url_for("main.settings", section="news")) + + +@main_bp.route("/settings/news//delete", methods=["POST"]) +@login_required +@roles_required("admin") +def settings_news_delete(news_id: int): + item = NewsItem.query.get(news_id) + if not item: + return abort(404) + + try: + db.session.delete(item) + db.session.commit() + flash("News item deleted.", "success") + except Exception as exc: + db.session.rollback() + print(f"[settings] Failed to delete news item: {exc}") + flash("Failed to delete news item.", "danger") + + return redirect(url_for("main.settings", section="news")) + + +@main_bp.route("/settings/news//reset_reads", methods=["POST"]) +@login_required +@roles_required("admin") +def settings_news_reset_reads(news_id: int): + try: + NewsRead.query.filter_by(news_item_id=news_id).delete(synchronize_session=False) + db.session.commit() + flash("Read status reset for this news item.", "success") + except Exception as exc: + db.session.rollback() + print(f"[settings] Failed to reset news reads: {exc}") + flash("Failed to reset read status.", "danger") + + return redirect(url_for("main.settings", section="news")) + + +@main_bp.route("/settings/news//reads") +@login_required +@roles_required("admin") +def settings_news_reads(news_id: int): + item = NewsItem.query.get(news_id) + if not item: + return abort(404) + + reads = [] + try: + reads = ( + db.session.query(NewsRead, User) + .join(User, User.id == NewsRead.user_id) + .filter(NewsRead.news_item_id == news_id) + .order_by(NewsRead.read_at.desc()) + .all() + ) + except Exception: + reads = [] + + return render_template("main/settings_news_reads.html", item=item, reads=reads) + + +@main_bp.route("/settings/users/create", methods=["POST"]) +@login_required +@roles_required("admin") +def settings_users_create(): + username = (request.form.get("new_username") or "").strip() + roles = [r.strip() for r in request.form.getlist("new_roles") if (r or "").strip()] + # Backwards compatible storage: comma-separated roles in the existing "role" column + role = ",".join(dict.fromkeys(roles)) if roles else "viewer" + password = (request.form.get("new_password") or "").strip() + + if not username: + flash("Username is required.", "danger") + return redirect(url_for("main.settings", section="general")) + + existing = User.query.filter_by(username=username).first() + if existing: + flash("Username already exists.", "danger") + return redirect(url_for("main.settings", section="general")) + + if not password: + flash("Password is required.", "danger") + return redirect(url_for("main.settings", section="general")) + + user = User(username=username, role=role) + user.set_password(password) + db.session.add(user) + try: + db.session.commit() + flash(f"User '{username}' has been created.", "success") + _log_admin_event("user_create", f"User '{username}' created with roles '{role}'.") + except Exception as exc: + db.session.rollback() + print(f"[settings-users] Failed to create user: {exc}") + flash("Failed to create user.", "danger") + + return redirect(url_for("main.settings", section="users")) + + +@main_bp.route("/settings/app-reset", methods=["POST"]) +@login_required +@roles_required("admin") +def settings_app_reset(): + # Require explicit confirmation to avoid accidental resets + confirmation = (request.form.get("confirm_reset") or "").strip().upper() + if confirmation != "RESET": + flash("Application reset cancelled. Type RESET to confirm.", "warning") + return redirect(url_for("main.settings", section="general")) + + # Reset all application data (including users). After this, the app will + # return to initial setup (create new admin). + try: + dialect_name = "" + try: + dialect_name = (db.engine.dialect.name or "").lower() + except Exception: + dialect_name = "" + + if dialect_name.startswith("postgres"): + # Postgres: fast and resets identities; CASCADE handles FKs. + db.session.execute( + text( + """TRUNCATE TABLE + ticket_job_runs, + remark_job_runs, + ticket_scopes, + remark_scopes, + tickets, + remarks, + mail_objects, + mail_messages, + job_objects, + job_runs, + jobs, + overrides, + customers, + admin_logs, + system_settings, + users + RESTART IDENTITY CASCADE""" + ) + ) + db.session.commit() + else: + # Fallback (e.g. SQLite): delete in FK-safe order. + for model in ( + TicketJobRun, + RemarkJobRun, + TicketScope, + RemarkScope, + Ticket, + Remark, + MailObject, + MailMessage, + JobObject, + JobRun, + Job, + Override, + Customer, + AdminLog, + SystemSettings, + User, + ): + db.session.query(model).delete() + db.session.commit() + + try: + logout_user() + except Exception: + pass + + flash("Application has been reset. Please create a new admin user.", "success") + return redirect(url_for("auth.initial_setup")) + except Exception as exc: + try: + db.session.rollback() + except Exception: + pass + flash(f"Reset failed: {exc}", "danger") + return redirect(url_for("main.settings", section="general")) + + +@main_bp.route("/settings/users//reset-password", methods=["POST"]) +@login_required +@roles_required("admin") +def settings_users_reset_password(user_id: int): + user = User.query.get_or_404(user_id) + + new_password = (request.form.get("reset_password") or "").strip() + if not new_password: + flash("New password is required.", "danger") + return redirect(url_for("main.settings", section="general")) + + user.set_password(new_password) + try: + db.session.commit() + flash(f"Password for '{user.username}' has been reset.", "success") + _log_admin_event("user_reset_password", f"Password reset for user '{user.username}'.") + except Exception as exc: + db.session.rollback() + print(f"[settings-users] Failed to reset password: {exc}") + flash("Failed to reset password.", "danger") + + return redirect(url_for("main.settings", section="users")) + + +@main_bp.route("/settings/users//delete", methods=["POST"]) +@login_required +@roles_required("admin") +def settings_users_delete(user_id: int): + user = User.query.get_or_404(user_id) + + # Prevent deleting the last admin user + if user.role == "admin": + admin_count = User.query.filter_by(role="admin").count() + if admin_count <= 1: + flash("Cannot delete the last admin account.", "danger") + return redirect(url_for("main.settings", section="general")) + + username = user.username + + try: + db.session.delete(user) + db.session.commit() + flash(f"User '{username}' has been deleted.", "success") + _log_admin_event("user_delete", f"User '{username}' deleted.") + except Exception as exc: + db.session.rollback() + print(f"[settings-users] Failed to delete user: {exc}") + flash("Failed to delete user.", "danger") + + return redirect(url_for("main.settings", section="users")) + + +@main_bp.route("/settings/mail-import", methods=["POST"]) +@login_required +@roles_required("admin") +def settings_mail_import(): + settings = _get_or_create_settings() + + # Prevent manual import when free disk is below 2 GB + free_disk_bytes = _get_free_disk_bytes() + if free_disk_bytes is not None: + two_gb = 2 * 1024 * 1024 * 1024 + if free_disk_bytes < two_gb: + flash("Manual mail import is blocked because free disk space is below 2 GB.", "danger") + _log_admin_event("mail_import_manual_blocked", "Manual mail import blocked: free disk space below 2 GB.") + return redirect(url_for("main.settings", section="general")) + + # Determine batch size (max 50) + try: + batch_size = int(request.form.get("manual_import_items") or settings.manual_import_batch_size or 50) + except (ValueError, TypeError): + batch_size = settings.manual_import_batch_size or 50 + + if batch_size <= 0: + batch_size = 1 + if batch_size > 50: + batch_size = 50 + + auto_approved_runs = [] + + try: + total_fetched, new_messages, auto_approved, auto_approved_runs, errors = run_manual_import( + settings, batch_size + ) + except MailImportError as exc: + msg = f"Manual mail import failed: {exc}" + _log_admin_event("mail_import_manual_error", msg) + flash(str(exc), "danger") + return redirect(url_for("main.settings", section="general")) + except Exception as exc: + msg = f"Unexpected error during manual mail import: {exc}" + _log_admin_event("mail_import_manual_error", msg) + flash("Unexpected error during manual mail import. See logs for details.", "danger") + return redirect(url_for("main.settings", section="general")) + + msg = f"Manual mail import finished. fetched={total_fetched}, new={new_messages}, auto_approved={auto_approved}, errors={len(errors)}" + _log_admin_event("mail_import_manual", msg) + + # Persist objects for auto-approved runs (must not block the request) + if auto_approved_runs: + persisted_objects = 0 + persisted_errors = 0 + for (customer_id, job_id, run_id, mail_message_id) in auto_approved_runs: + try: + persisted_objects += persist_objects_for_approved_run( + int(customer_id), int(job_id), int(run_id), int(mail_message_id) + ) + except Exception as exc: + persisted_errors += 1 + _log_admin_event( + "object_persist_error", + f"Object persistence failed for auto-approved message {mail_message_id} (job {job_id}, run {run_id}): {exc}", + ) + + _log_admin_event( + "object_persist_auto_approve", + f"Persisted objects for auto-approved runs (manual import). runs={len(auto_approved_runs)}, objects={persisted_objects}, errors={persisted_errors}", + ) + + if errors: + flash("Manual mail import finished with errors.", "warning") + else: + flash("Manual mail import finished.", "success") + + flash(f"Fetched: {total_fetched}, new: {new_messages}, auto-approved: {auto_approved}.", "info") + + for err in errors[:5]: + flash(f"Import error: {err}", "danger") + + return redirect(url_for("main.settings")) + + +@main_bp.route("/settings/folders") +@login_required +@roles_required("admin") +def settings_folders(): + settings = _get_or_create_settings() + + mailbox = (settings.graph_mailbox or "").strip() + if not mailbox: + return jsonify( + {"status": "error", "message": "Microsoft Graph mailbox is not configured."} + ), 400 + + try: + # Reuse the same token flow used by the mail importer. + from ..mail_importer import _get_access_token, _build_auth_headers, GRAPH_BASE_URL + + access_token = _get_access_token(settings) + headers = _build_auth_headers(access_token) + + def _graph_get_all(url: str): + items = [] + next_url = url + # Safety limit to avoid infinite loops if Graph behaves unexpectedly. + safety_pages = 0 + while next_url and safety_pages < 50: + safety_pages += 1 + resp = requests.get(next_url, headers=headers, timeout=20) + if resp.status_code != 200: + try: + payload = resp.json() + except Exception: + payload = {} + msg = payload.get("error", {}).get("message") or f"HTTP {resp.status_code}" + raise RuntimeError(msg) + + payload = resp.json() or {} + items.extend(payload.get("value", []) or []) + next_url = payload.get("@odata.nextLink") + + return items + + def _build_tree(parent_id: str | None, parent_path: str): + if parent_id is None: + url = f"{GRAPH_BASE_URL}/users/{mailbox}/mailFolders?$top=100" + else: + url = f"{GRAPH_BASE_URL}/users/{mailbox}/mailFolders/{parent_id}/childFolders?$top=100" + + folders = _graph_get_all(url) + + nodes = [] + for f in folders: + fid = f.get("id") + name = (f.get("displayName") or "").strip() + if not fid or not name: + continue + + path = name if not parent_path else f"{parent_path}/{name}" + + node = { + "displayName": name, + "id": fid, + "path": path, + "children": [], + } + + # Recursively load children (bounded by Graph and typical folder depth). + try: + node["children"] = _build_tree(fid, path) + except Exception: + # If child loading fails for a specific folder, keep it as a leaf. + node["children"] = [] + + nodes.append(node) + + # Stable order for UI + nodes.sort(key=lambda n: (n.get("displayName") or "").lower()) + return nodes + + folders_tree = _build_tree(None, "") + return jsonify({"status": "ok", "folders": folders_tree}) + + except Exception as exc: + try: + current_app.logger.exception("Failed to load mailbox folders from Microsoft Graph") + except Exception: + pass + return jsonify({"status": "error", "message": str(exc) or "Failed to load folders."}), 500 diff --git a/containers/backupchecks/src/backend/app/main/routes_shared.py b/containers/backupchecks/src/backend/app/main/routes_shared.py new file mode 100644 index 0000000..9f3dcf4 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_shared.py @@ -0,0 +1,819 @@ +import requests +from flask import current_app, g +import os +import io +import json +import re +import html as _html +import math +import datetime as datetime_module +from functools import wraps + +from datetime import datetime, timedelta +from zoneinfo import ZoneInfo +from flask import ( + Blueprint, + render_template, + abort, + request, + session, + redirect, + url_for, + flash, + jsonify, + Response, + send_file, +) +from flask_login import login_required, current_user, logout_user +from sqlalchemy import text, func, bindparam + +from ..email_utils import normalize_from_address +from ..job_matching import build_job_match_key, find_matching_job + +from ..database import db +from ..models import ( + SystemSettings, + AdminLog, + Customer, + Job, + JobRun, + JobObject, + MailMessage, + MailObject, + Override, + User, + Ticket, + TicketScope, + TicketJobRun, + Remark, + RemarkScope, + RemarkJobRun, + FeedbackItem, + FeedbackVote, + NewsItem, + NewsRead, + ReportDefinition, + ReportObjectSnapshot, + ReportObjectSummary, +) +from ..mail_importer import run_manual_import, MailImportError +from ..parsers import parse_mail_message +from ..object_persistence import persist_objects_for_approved_run + + +main_bp = Blueprint("main", __name__) + + +def _parse_roles(role_str: str) -> list[str]: + raw = (role_str or "").strip() + if not raw: + return ["viewer"] + parts = [p.strip() for p in raw.split(",")] + roles = [p for p in parts if p] + return roles or ["viewer"] + + +def get_active_role() -> str: + """Return the active role for the current session/user.""" + if not current_user.is_authenticated: + return "viewer" + try: + return current_user.active_role + except Exception: + roles = _parse_roles(getattr(current_user, "role", "")) + selected = (session.get("active_role") or "").strip() + if selected and selected in roles: + return selected + session["active_role"] = roles[0] + return roles[0] + + +def get_user_roles() -> list[str]: + """Return the list of roles assigned to the current user.""" + if not current_user.is_authenticated: + return [] + try: + roles_attr = getattr(current_user, "roles", None) + if roles_attr is None: + roles_attr = getattr(current_user, "role", "") + if isinstance(roles_attr, (list, tuple, set)): + roles = [str(r).strip() for r in roles_attr if str(r).strip()] + else: + roles = _parse_roles(str(roles_attr)) + return roles + except Exception: + return _parse_roles(getattr(current_user, "role", "")) + + +@main_bp.app_context_processor +def _inject_role_context(): + return {"active_role": get_active_role(), "user_roles": get_user_roles()} + + +def roles_required(*roles): + """Require one of the given roles for the wrapped view.""" + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if not current_user.is_authenticated: + return abort(401) + if get_active_role() not in roles: + return abort(403) + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def _send_mail_message_eml_download(msg: MailMessage): + """Return a RFC822 (.eml) download response for a MailMessage. + + Raises 404 when no blob is stored. + """ + if not msg: + return abort(404) + + blob = getattr(msg, "eml_blob", None) + if not blob: + return abort(404) + + filename = f"message-{msg.id}.eml" + return send_file( + io.BytesIO(blob), + as_attachment=True, + download_name=filename, + mimetype="message/rfc822", + ) + + +@main_bp.route("/message//eml") +@login_required +@roles_required("admin", "operator", "viewer") +def message_eml(message_id: int): + """Download the stored raw EML for any message (Inbox, History, linked to jobs/runs).""" + msg = MailMessage.query.get_or_404(message_id) + return _send_mail_message_eml_download(msg) + + +def _get_or_create_settings() -> SystemSettings: + settings = SystemSettings.query.first() + if settings is None: + settings = SystemSettings( + auto_import_enabled=False, + auto_import_interval_minutes=15, + auto_import_max_items=50, + manual_import_batch_size=50, + auto_import_cutoff_date=datetime.utcnow().date(), + ingest_eml_retention_days=7, + ) + db.session.add(settings) + db.session.commit() + return settings + + +def _get_ui_timezone_name() -> str: + """Return the configured UI timezone name (IANA), with a safe fallback.""" + try: + settings = _get_or_create_settings() + name = (getattr(settings, "ui_timezone", None) or "").strip() + if name: + return name + except Exception: + pass + try: + return (current_app.config.get("TIMEZONE") or "Europe/Amsterdam").strip() + except Exception: + return "Europe/Amsterdam" + + +def _get_ui_timezone() -> ZoneInfo: + """Return a ZoneInfo instance for UI rendering. Falls back to UTC if invalid.""" + tz = getattr(g, "_ui_tz", None) + if tz is not None: + return tz + name = _get_ui_timezone_name() + try: + tz = ZoneInfo(name) + except Exception: + tz = ZoneInfo("UTC") + g._ui_tz = tz + return tz + + + +def _format_bytes(num_bytes: int) -> str: + if num_bytes is None: + return "unknown" + + step = 1024.0 + units = ["B", "KB", "MB", "GB", "TB"] + size = float(num_bytes) + unit = 0 + while size >= step and unit < len(units) - 1: + size /= step + unit += 1 + return f"{size:.2f} {units[unit]}" + + +def _get_database_size_bytes(): + try: + result = db.session.execute(text("SELECT pg_database_size(current_database())")) + return int(result.scalar() or 0) + except Exception as exc: + print(f"[settings] Failed to read database size: {exc}") + return None + + +def _get_free_disk_bytes(): + try: + stat = os.statvfs("/") + return int(stat.f_bavail * stat.f_frsize) + except Exception as exc: + print(f"[settings] Failed to read free disk space: {exc}") + return None + + +def _format_datetime(dt): + if not dt: + return "-" + try: + tz = _get_ui_timezone() + if hasattr(dt, "tzinfo") and dt.tzinfo is None: + dt = dt.replace(tzinfo=datetime_module.timezone.utc) + if hasattr(dt, "astimezone"): + dt = dt.astimezone(tz) + return dt.strftime("%d-%m-%Y %H:%M:%S") + except Exception: + return "-" + +def _apply_overrides_to_run(job: Job, run: JobRun): + """Determine effective status for a run, taking overrides into account. + + Returns a tuple: + (display_status, override_applied, override_level, override_id, override_reason) + + override_level is one of: None, "global", "object". + override_id is the matched overrides.id when applied. + override_reason is a short human-readable reason for reporting. + """ + if not run: + return "", False, None, None, None + + base_status = (run.status or "").strip() or "-" + # Some ingested runs only have created_at populated. Fall back to created_at + # so overrides can still be evaluated and recomputed retroactively. + run_at = getattr(run, "run_at", None) or getattr(run, "created_at", None) + + # No timestamp at all -> do not attempt override matching. + if not run_at: + return base_status, False, None, None, None + + def _reason_for(ov: Override) -> str: + parts = [] + try: + parts.append(f"id={ov.id}") + except Exception: + pass + try: + lvl = (getattr(ov, "level", None) or "").strip() + if lvl: + parts.append(f"level={lvl}") + except Exception: + pass + try: + ms = (getattr(ov, "match_status", None) or "").strip() + if ms: + parts.append(f"status={ms}") + except Exception: + pass + try: + mec = (getattr(ov, "match_error_contains", None) or "").strip() + if mec: + parts.append(f"contains={mec}") + except Exception: + pass + try: + cm = (getattr(ov, "comment", None) or "").strip() + if cm: + parts.append(f"comment={cm}") + except Exception: + pass + return "; ".join(parts) or "override applied" + + def _load_run_object_rows() -> list[dict]: + """Load persisted run objects (run_object_links + customer_objects). + + This is the primary source for object-level status/error reporting. + """ + try: + rows = ( + db.session.execute( + text( + """ + SELECT co.object_name AS object_name, + rol.status AS status, + rol.error_message AS error_message + FROM run_object_links rol + JOIN customer_objects co ON co.id = rol.customer_object_id + WHERE rol.run_id = :run_id + ORDER BY co.object_name + """ + ), + {"run_id": run.id}, + ) + .mappings() + .all() + ) + return [dict(r) for r in rows] + except Exception: + return [] + + def _norm(s: str | None) -> str: + return (s or "").strip() + + def _contains(haystack: str | None, needle: str | None) -> bool: + if not needle: + return True + if not haystack: + return False + return needle.lower() in haystack.lower() + + def _matches_status(candidate: str | None, expected: str | None) -> bool: + if not expected: + return True + return _norm(candidate).lower() == _norm(expected).lower() + + def _matches_object_name(candidate: str | None, expected: str | None) -> bool: + if not expected: + return True + cand = _norm(candidate) + exp = _norm(expected) + if not cand: + return False + # Support '*' wildcard for convenience (glob-style). + if "*" in exp: + try: + import fnmatch + + return fnmatch.fnmatch(cand.lower(), exp.lower()) + except Exception: + return cand.lower() == exp.lower() + return cand.lower() == exp.lower() + + def _is_in_window(ov: Override) -> bool: + if not ov.active: + return False + if ov.start_at and run_at < ov.start_at: + return False + if ov.end_at and run_at > ov.end_at: + return False + return True + + # Load all potentially applicable overrides. Object-level overrides take precedence. + try: + overrides_q = Override.query.filter(Override.active.is_(True)).all() + except Exception: + overrides_q = [] + + applicable_object_overrides: list[Override] = [] + applicable_global_overrides: list[Override] = [] + + for ov in overrides_q: + lvl = (_norm(getattr(ov, "level", "")) or "").lower() + if lvl == "object": + if ov.job_id != job.id: + continue + if not _is_in_window(ov): + continue + applicable_object_overrides.append(ov) + elif lvl == "global": + if ov.backup_software and _norm(job.backup_software).lower() != _norm(ov.backup_software).lower(): + continue + if ov.backup_type and _norm(job.backup_type).lower() != _norm(ov.backup_type).lower(): + continue + if not _is_in_window(ov): + continue + applicable_global_overrides.append(ov) + + # Persisted run-object rows (run_object_links + customer_objects) are the primary + # source for object-level error/status matching. + run_object_rows = _load_run_object_rows() + + # Helper for evaluating a global override against the run itself. + def _matches_global(ov: Override) -> bool: + if not _matches_status(run.status, ov.match_status): + return False + + # Global overrides should match both the run-level remark and any object-level error messages. + if ov.match_error_contains: + if _contains(run.remark, ov.match_error_contains): + return True + + # Check persisted run-object error messages. + for row in run_object_rows or []: + if _contains(row.get("error_message"), ov.match_error_contains): + return True + + objs = [] + try: + objs = list(run.objects) if hasattr(run, "objects") else [] + except Exception: + objs = [] + for obj in objs or []: + if _contains(getattr(obj, "error_message", None), ov.match_error_contains): + return True + return False + + return True + + # Helper for evaluating an object override against objects within the run. + def _matches_object(ov: Override) -> bool: + # Prefer persisted rows. + for row in run_object_rows or []: + if not _matches_object_name(row.get("object_name"), ov.object_name): + continue + if not _matches_status(row.get("status"), ov.match_status): + continue + if not _contains(row.get("error_message"), ov.match_error_contains): + continue + return True + + # Fallback to legacy JobObject relationship (older schemas). + objs = [] + try: + objs = list(run.objects) if hasattr(run, "objects") else [] + except Exception: + objs = [] + for obj in objs or []: + if not _matches_object_name(getattr(obj, "object_name", None), ov.object_name): + continue + if not _matches_status(getattr(obj, "status", None), ov.match_status): + continue + if not _contains(getattr(obj, "error_message", None), ov.match_error_contains): + continue + return True + + return False + + # Evaluate object-level overrides first. + for ov in applicable_object_overrides: + if _matches_object(ov): + if ov.treat_as_success: + return "Success (override)", True, "object", ov.id, _reason_for(ov) + return base_status, True, "object", ov.id, _reason_for(ov) + + # Evaluate global overrides. + for ov in applicable_global_overrides: + if _matches_global(ov): + if ov.treat_as_success: + return "Success (override)", True, "global", ov.id, _reason_for(ov) + return base_status, True, "global", ov.id, _reason_for(ov) + + return base_status, False, None, None, None + + +def _recompute_override_flags_for_runs(job_ids: list[int] | None = None, start_at: datetime | None = None, end_at: datetime | None = None, only_unreviewed: bool = True) -> int: + """Recompute JobRun.override_applied for already existing runs. + + This is used when an override is created/toggled so existing runs immediately reflect the + current override configuration. + + Returns number of updated runs. + """ + q = JobRun.query + if only_unreviewed: + q = q.filter(JobRun.reviewed_at.is_(None)) + if job_ids: + q = q.filter(JobRun.job_id.in_(job_ids)) + if start_at: + q = q.filter(func.coalesce(JobRun.run_at, JobRun.created_at) >= start_at) + if end_at: + q = q.filter(func.coalesce(JobRun.run_at, JobRun.created_at) <= end_at) + + try: + runs = q.all() + except Exception: + runs = [] + + updated = 0 + for run in runs: + job = None + try: + job = Job.query.get(run.job_id) + except Exception: + job = None + if not job: + continue + + _status, applied, lvl, ov_id, ov_reason = _apply_overrides_to_run(job, run) + + applied_bool = bool(applied) + changed = False + + if bool(getattr(run, "override_applied", False)) != applied_bool: + run.override_applied = applied_bool + changed = True + + # Populate reporting metadata (safe for older schemas that might not yet have columns). + try: + if getattr(run, "override_applied_override_id", None) != (ov_id if applied_bool else None): + run.override_applied_override_id = ov_id if applied_bool else None + changed = True + except Exception: + pass + try: + if getattr(run, "override_applied_level", None) != (lvl if applied_bool else None): + run.override_applied_level = lvl if applied_bool else None + changed = True + except Exception: + pass + try: + if getattr(run, "override_applied_reason", None) != (ov_reason if applied_bool else None): + run.override_applied_reason = ov_reason if applied_bool else None + changed = True + except Exception: + pass + + if changed: + updated += 1 + + if updated: + try: + db.session.commit() + except Exception: + db.session.rollback() + return 0 + + return updated + + +def _log_admin_event(event_type: str, message: str, details: str | None = None) -> None: + """Store an admin-level log entry and enforce a 7-day retention window.""" + try: + username = current_user.username if current_user.is_authenticated else None + except Exception: + username = None + + entry = AdminLog( + user=username, + event_type=event_type, + message=message, + details=details, + ) + db.session.add(entry) + + # Enforce retention: keep only the last 7 days + try: + cutoff = datetime.utcnow() - timedelta(days=7) + AdminLog.query.filter(AdminLog.created_at < cutoff).delete(synchronize_session=False) + except Exception: + # If cleanup fails we still try to commit the new entry + pass + + try: + db.session.commit() + except Exception as exc: + db.session.rollback() + print(f"[admin-log] Failed to write log entry: {exc}") + + + +# ------------------------- +# Inbox +# ------------------------- + + + +# ------------------------- +# Customers +# ------------------------- + + + +# ------------------------- +# Jobs +# ------------ +# ------------------------- +# Customers import / export +# ------------------------- + + + +# ------------------------- +# Jobs +# ------------------------- + + + +def _infer_schedule_map_from_runs(job_id: int): + """Infer weekly schedule blocks (15-min) from historical runs. + + Returns dict weekday->sorted list of 'HH:MM' strings in configured UI local time. + """ + schedule = {i: [] for i in range(7)} # 0=Mon .. 6=Sun + + # Certain job types are informational and should never participate in schedule + # inference or Expected/Missed logic (no schedule is applicable). + try: + job = Job.query.get(job_id) + if job: + bs = (job.backup_software or '').strip().lower() + bt = (job.backup_type or '').strip().lower() + # Informational types that should never participate in schedule inference + # or Expected/Missed generation. + if bs == 'veeam' and bt == 'license key': + return schedule + if bs == 'synology' and bt == 'account protection': + return schedule + if bs == 'syncovery' and bt == 'syncovery': + return schedule + except Exception: + pass + try: + # Only infer schedules from real runs that came from mail reports. + # Synthetic "Missed" rows must never influence schedule inference. + runs = ( + JobRun.query + .filter( + JobRun.job_id == job_id, + JobRun.run_at.isnot(None), + JobRun.missed.is_(False), + JobRun.mail_message_id.isnot(None), + ) + .order_by(JobRun.run_at.desc()) + .limit(500) + .all() + ) + except Exception: + runs = [] + + if not runs: + return schedule + + # Convert run_at to UI local time and bucket into 15-minute blocks + try: + tz = _get_ui_timezone() + except Exception: + tz = None + + seen = {i: set() for i in range(7)} + for r in runs: + if not r.run_at: + continue + dt = r.run_at + if tz is not None: + try: + if dt.tzinfo is None: + # DB stores UTC naive timestamps. Convert them to configured UI timezone. + dt = dt.replace(tzinfo=datetime_module.timezone.utc).astimezone(tz) + else: + dt = dt.astimezone(tz) + except Exception: + pass + + wd = dt.weekday() + minute_bucket = (dt.minute // 15) * 15 + hh = dt.hour + tstr = f"{hh:02d}:{minute_bucket:02d}" + seen[wd].add(tstr) + + for wd in range(7): + schedule[wd] = sorted(seen[wd]) + + return schedule + + +def _schedule_map_to_desc(schedule_map): + weekday_names = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] + any_times = any(schedule_map.get(i) for i in range(7)) + if not any_times: + return "No schedule configured yet (no runs found)." + parts = [] + for i in range(7): + times = schedule_map.get(i) or [] + if times: + parts.append(f"{weekday_names[i]}: " + ", ".join(times)) + else: + parts.append(f"{weekday_names[i]}: —") + return " | ".join(parts) + + +def _describe_schedule(job: Job) -> str: + if not job: + return "No schedule configured." + + if not job.schedule_type: + return "No schedule configured yet." + + times = job.schedule_times or "" + times_display = times + if times: + try: + # try to normalize to comma-separated + if isinstance(times, str): + parts = [p.strip() for p in times.split(",") if p.strip()] + times_display = ", ".join(parts) + except Exception: + pass + + stype = job.schedule_type.lower() + if stype == "daily": + if times_display: + return f"Runs daily at {times_display}." + return "Runs daily." + + if stype == "weekly": + days = job.schedule_days_of_week or "" + if days and times_display: + return f"Runs weekly on {days} at {times_display}." + if days: + return f"Runs weekly on {days}." + return "Runs weekly." + + if stype == "monthly": + dom = job.schedule_day_of_month + if dom and times_display: + return f"Runs monthly on day {dom} at {times_display}." + if dom: + return f"Runs monthly on day {dom}." + return "Runs monthly." + + if stype == "yearly": + dom = job.schedule_day_of_month + if dom and times_display: + return f"Runs yearly on day {dom} at {times_display}." + if dom: + return f"Runs yearly on day {dom}." + return "Runs yearly." + + return "No schedule configured." + + + +# ------------------------- +# Daily jobs / overrides / reports / logging +# ------------------------- + + + +# ------------------------- +# Settings +# ------------------------- + + + +# --------------------------------------------------------------------------- +# Tickets & Remarks +# --------------------------------------------------------------------------- + +def _amsterdam_tz(): + # Backward-compatible helper: keep name but use configured UI timezone. + try: + return _get_ui_timezone() + except Exception: + return None + + +def _to_amsterdam_date(dt_utc_naive: datetime | None) -> datetime_module.date | None: + if not dt_utc_naive: + return None + tz = _amsterdam_tz() + if not tz: + return dt_utc_naive.date() + try: + if dt_utc_naive.tzinfo is None: + dt_utc = dt_utc_naive.replace(tzinfo=datetime_module.timezone.utc) + else: + dt_utc = dt_utc_naive.astimezone(datetime_module.timezone.utc) + return dt_utc.astimezone(tz).date() + except Exception: + return dt_utc_naive.date() + + +def _next_ticket_code(now_utc: datetime) -> str: + day = _to_amsterdam_date(now_utc) or now_utc.date() + prefix = f"T{day.strftime('%Y%m%d')}." + + # Find max sequence for today + try: + max_code = ( + db.session.query(Ticket.ticket_code) + .filter(Ticket.ticket_code.like(prefix + "%")) + .order_by(Ticket.ticket_code.desc()) + .limit(1) + .scalar() + ) + except Exception: + max_code = None + + seq = 1 + if max_code and max_code.startswith(prefix): + try: + seq = int(max_code.split(".")[-1]) + 1 + except Exception: + seq = 1 + + return f"{prefix}{seq:04d}" + diff --git a/containers/backupchecks/src/backend/app/main/routes_tickets.py b/containers/backupchecks/src/backend/app/main/routes_tickets.py new file mode 100644 index 0000000..a1cc6f1 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_tickets.py @@ -0,0 +1,335 @@ +from .routes_shared import * # noqa: F401,F403 +from .routes_shared import _format_datetime + +@main_bp.route("/tickets") +@login_required +@roles_required("admin", "operator", "viewer") +def tickets_page(): + tab = (request.args.get("tab") or "tickets").strip().lower() + if tab not in ("tickets", "remarks"): + tab = "tickets" + + active = (request.args.get("active") or "1").strip() + active_only = active != "0" + + q = (request.args.get("q") or "").strip() + try: + customer_id = int(request.args.get("customer_id") or 0) + except Exception: + customer_id = 0 + + backup_software = (request.args.get("backup_software") or "").strip() + backup_type = (request.args.get("backup_type") or "").strip() + + customers = Customer.query.order_by(Customer.name.asc()).all() + + tickets = [] + remarks = [] + + if tab == "tickets": + query = Ticket.query + if active_only: + query = query.filter(Ticket.resolved_at.is_(None)) + if q: + like_q = f"%{q}%" + query = query.filter( + (Ticket.ticket_code.ilike(like_q)) + | (Ticket.description.ilike(like_q)) + ) + + if customer_id or backup_software or backup_type: + query = query.join(TicketScope, TicketScope.ticket_id == Ticket.id) + if customer_id: + query = query.filter(TicketScope.customer_id == customer_id) + if backup_software: + query = query.filter(TicketScope.backup_software == backup_software) + if backup_type: + query = query.filter(TicketScope.backup_type == backup_type) + + query = query.order_by(Ticket.resolved_at.isnot(None), Ticket.start_date.desc()) + tickets_raw = query.limit(500).all() + + ticket_ids = [t.id for t in tickets_raw] + customer_map = {} + run_count_map = {} + + if ticket_ids: + try: + rows = ( + db.session.execute( + text( + """ + SELECT ts.ticket_id, c.name + FROM ticket_scopes ts + JOIN customers c ON c.id = ts.customer_id + WHERE ts.ticket_id = ANY(:ids) + AND ts.customer_id IS NOT NULL + """ + ), + {"ids": ticket_ids}, + ) + .fetchall() + ) + for tid, cname in rows: + customer_map.setdefault(int(tid), []) + if cname and cname not in customer_map[int(tid)]: + customer_map[int(tid)].append(cname) + except Exception: + customer_map = {} + + try: + rows = ( + db.session.execute( + text( + """ + SELECT ticket_id, COUNT(*) + FROM ticket_job_runs + WHERE ticket_id = ANY(:ids) + GROUP BY ticket_id + """ + ), + {"ids": ticket_ids}, + ) + .fetchall() + ) + for tid, cnt in rows: + run_count_map[int(tid)] = int(cnt or 0) + except Exception: + run_count_map = {} + + for t in tickets_raw: + customers_for_ticket = customer_map.get(t.id) or [] + if customers_for_ticket: + customer_display = customers_for_ticket[0] + if len(customers_for_ticket) > 1: + customer_display += f" +{len(customers_for_ticket)-1}" + else: + customer_display = "-" + + # Scope summary: best-effort from first scope + scope_summary = "-" + first_job_id = None + try: + s = TicketScope.query.filter(TicketScope.ticket_id == t.id).order_by(TicketScope.id.asc()).first() + if s: + parts = [] + if s.backup_software: + parts.append(s.backup_software) + if s.backup_type: + parts.append(s.backup_type) + if s.job_id: + first_job_id = int(s.job_id) + job = Job.query.get(s.job_id) + if job and job.job_name: + parts.append(job.job_name) + scope_summary = " / ".join([p for p in parts if p]) or "-" + except Exception: + scope_summary = "-" + + tickets.append( + { + "id": t.id, + "ticket_code": t.ticket_code, + "description": t.description or "", + "active_from_date": str(getattr(t, "active_from_date", "") or ""), + "start_date": _format_datetime(t.start_date), + "resolved_at": _format_datetime(t.resolved_at) if t.resolved_at else "", + "active": t.resolved_at is None, + "customers": customer_display, + "scope_summary": scope_summary, + "linked_runs": run_count_map.get(t.id, 0), + "job_id": first_job_id, + } + ) + + else: + query = Remark.query + if active_only: + query = query.filter(Remark.resolved_at.is_(None)) + if q: + like_q = f"%{q}%" + query = query.filter(Remark.body.ilike(like_q)) + + if customer_id or backup_software or backup_type: + query = query.join(RemarkScope, RemarkScope.remark_id == Remark.id) + if customer_id: + query = query.filter(RemarkScope.customer_id == customer_id) + if backup_software: + query = query.filter(RemarkScope.backup_software == backup_software) + if backup_type: + query = query.filter(RemarkScope.backup_type == backup_type) + + query = query.order_by(Remark.resolved_at.isnot(None), Remark.start_date.desc()) + remarks_raw = query.limit(500).all() + + remark_ids = [r.id for r in remarks_raw] + customer_map = {} + run_count_map = {} + + if remark_ids: + try: + rows = ( + db.session.execute( + text( + """ + SELECT rs.remark_id, c.name + FROM remark_scopes rs + JOIN customers c ON c.id = rs.customer_id + WHERE rs.remark_id = ANY(:ids) + AND rs.customer_id IS NOT NULL + """ + ), + {"ids": remark_ids}, + ) + .fetchall() + ) + for rid, cname in rows: + customer_map.setdefault(int(rid), []) + if cname and cname not in customer_map[int(rid)]: + customer_map[int(rid)].append(cname) + except Exception: + customer_map = {} + + try: + rows = ( + db.session.execute( + text( + """ + SELECT remark_id, COUNT(*) + FROM remark_job_runs + WHERE remark_id = ANY(:ids) + GROUP BY remark_id + """ + ), + {"ids": remark_ids}, + ) + .fetchall() + ) + for rid, cnt in rows: + run_count_map[int(rid)] = int(cnt or 0) + except Exception: + run_count_map = {} + + for r in remarks_raw: + customers_for_remark = customer_map.get(r.id) or [] + if customers_for_remark: + customer_display = customers_for_remark[0] + if len(customers_for_remark) > 1: + customer_display += f" +{len(customers_for_remark)-1}" + else: + customer_display = "-" + + scope_summary = "-" + first_job_id = None + try: + s = RemarkScope.query.filter(RemarkScope.remark_id == r.id).order_by(RemarkScope.id.asc()).first() + if s: + parts = [] + if s.backup_software: + parts.append(s.backup_software) + if s.backup_type: + parts.append(s.backup_type) + if s.job_id: + first_job_id = int(s.job_id) + job = Job.query.get(s.job_id) + if job and job.job_name: + parts.append(job.job_name) + scope_summary = " / ".join([p for p in parts if p]) or "-" + except Exception: + scope_summary = "-" + + preview = (r.body or "") + if len(preview) > 80: + preview = preview[:77] + "..." + + remarks.append( + { + "id": r.id, + "preview": preview, + "start_date": _format_datetime(r.start_date) if r.start_date else "-", + "resolved_at": _format_datetime(r.resolved_at) if r.resolved_at else "", + "active": r.resolved_at is None, + "customers": customer_display, + "scope_summary": scope_summary, + "linked_runs": run_count_map.get(r.id, 0), + "job_id": first_job_id, + } + ) + + return render_template( + "main/tickets.html", + tab=tab, + active_only=active_only, + q=q, + customer_id=customer_id, + backup_software=backup_software, + backup_type=backup_type, + customers=customers, + tickets=tickets, + remarks=remarks, + ) + + +@main_bp.route("/tickets/", methods=["GET", "POST"]) +@login_required +@roles_required("admin", "operator", "viewer") +def ticket_detail(ticket_id: int): + ticket = Ticket.query.get_or_404(ticket_id) + + if request.method == "POST": + if get_active_role() not in ("admin", "operator"): + abort(403) + ticket.description = (request.form.get("description") or "").strip() or None + try: + db.session.commit() + flash("Ticket updated.", "success") + except Exception as exc: + db.session.rollback() + flash(f"Failed to update ticket: {exc}", "danger") + return redirect(url_for("main.ticket_detail", ticket_id=ticket.id)) + + # Scopes + scopes = TicketScope.query.filter(TicketScope.ticket_id == ticket.id).order_by(TicketScope.id.asc()).all() + + # Linked runs + runs = [] + try: + rows = ( + db.session.execute( + text( + """ + SELECT jr.id, jr.run_at, jr.status, j.job_name, c.name AS customer_name + FROM ticket_job_runs tjr + JOIN job_runs jr ON jr.id = tjr.job_run_id + JOIN jobs j ON j.id = jr.job_id + LEFT JOIN customers c ON c.id = j.customer_id + WHERE tjr.ticket_id = :ticket_id + ORDER BY jr.run_at DESC + LIMIT 20 + """ + ), + {"ticket_id": ticket.id}, + ) + .mappings() + .all() + ) + for r in rows: + runs.append( + { + "id": r.get("id"), + "run_at": _format_datetime(r.get("run_at")), + "status": r.get("status") or "", + "job_name": r.get("job_name") or "", + "customer_name": r.get("customer_name") or "", + } + ) + except Exception: + runs = [] + + return render_template( + "main/ticket_detail.html", + ticket=ticket, + scopes=scopes, + runs=runs, + ) + diff --git a/containers/backupchecks/src/backend/app/main/routes_user_settings.py b/containers/backupchecks/src/backend/app/main/routes_user_settings.py new file mode 100644 index 0000000..3ff1b92 --- /dev/null +++ b/containers/backupchecks/src/backend/app/main/routes_user_settings.py @@ -0,0 +1,38 @@ +from flask import render_template, redirect, url_for, flash, request +from flask_login import login_required, current_user + +from ..database import db +from .routes_shared import main_bp + + +@main_bp.route("/user-settings", methods=["GET", "POST"]) +@login_required +def user_settings(): + """User self-service settings. + + Currently allows the logged-in user to change their own password. + """ + + if request.method == "POST": + current_password = request.form.get("current_password") or "" + new_password = (request.form.get("new_password") or "").strip() + confirm_password = (request.form.get("confirm_password") or "").strip() + + if not current_user.check_password(current_password): + flash("Current password is incorrect.", "danger") + return render_template("main/user_settings.html") + + if not new_password: + flash("New password is required.", "danger") + return render_template("main/user_settings.html") + + if new_password != confirm_password: + flash("Passwords do not match.", "danger") + return render_template("main/user_settings.html") + + current_user.set_password(new_password) + db.session.commit() + flash("Password updated.", "success") + return redirect(url_for("main.user_settings")) + + return render_template("main/user_settings.html") diff --git a/containers/backupchecks/src/backend/app/migrations.py b/containers/backupchecks/src/backend/app/migrations.py new file mode 100644 index 0000000..a93baf1 --- /dev/null +++ b/containers/backupchecks/src/backend/app/migrations.py @@ -0,0 +1,1365 @@ +from sqlalchemy import inspect, text + +from .database import db + + +def _column_exists(table_name: str, column_name: str) -> bool: + """Return True if the given column exists on the given table.""" + engine = db.get_engine() + inspector = inspect(engine) + columns = [col["name"] for col in inspector.get_columns(table_name)] + return column_name in columns + + +def _is_column_nullable(table_name: str, column_name: str) -> bool: + """Return True if the given column is nullable on the given table.""" + engine = db.get_engine() + inspector = inspect(engine) + for col in inspector.get_columns(table_name): + if col["name"] == column_name: + # SQLAlchemy reports "nullable" for the column + return bool(col.get("nullable", False)) + return False + + +def migrate_add_username_to_users() -> None: + """Ensure users.username column exists and is NOT NULL and UNIQUE. + + This migration is written to be idempotent: it will only apply changes + when needed and can safely run on every startup. + """ + table = "users" + column = "username" + + print("[migrations] Checking users.username column...") + if _column_exists(table, column): + print("[migrations] users.username already exists, skipping.") + return + + engine = db.get_engine() + with engine.begin() as conn: + print("[migrations] Adding users.username column...") + conn.execute( + text('ALTER TABLE "users" ADD COLUMN username VARCHAR(255)') + ) + print("[migrations] Backfilling users.username from email or id...") + # Use email if available, otherwise fallback to a generated name + conn.execute( + text( + """ + UPDATE "users" + SET username = + CASE + WHEN email IS NOT NULL AND email <> '' THEN email + ELSE 'user_' || id::text + END + WHERE username IS NULL OR username = ''; + """ + ) + ) + print("[migrations] Applying NOT NULL and UNIQUE constraints on users.username...") + conn.execute( + text( + 'ALTER TABLE "users" ALTER COLUMN username SET NOT NULL' + ) + ) + conn.execute( + text( + 'CREATE UNIQUE INDEX IF NOT EXISTS users_username_key ' + 'ON "users" (username);' + ) + ) + + print("[migrations] migrate_add_username_to_users completed.") + + +def migrate_make_email_nullable() -> None: + """Ensure users.email column is nullable. + + Older schemas may have email as NOT NULL. We want to allow NULL so + that username becomes the primary login identifier. + """ + table = "users" + column = "email" + + print("[migrations] Ensuring users.email is nullable...") + if _is_column_nullable(table, column): + print("[migrations] users.email is already nullable, skipping.") + return + + engine = db.get_engine() + with engine.begin() as conn: + print("[migrations] Altering users.email to DROP NOT NULL...") + conn.execute( + text('ALTER TABLE "users" ALTER COLUMN email DROP NOT NULL') + ) + + print("[migrations] migrate_make_email_nullable completed.") + +def migrate_system_settings_ui_timezone() -> None: + """Add UI timezone column to system_settings if missing. + + Column: + - ui_timezone (VARCHAR(64) NOT NULL DEFAULT 'Europe/Amsterdam') + """ + table = "system_settings" + column = "ui_timezone" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for system_settings ui_timezone migration: {exc}") + return + + try: + if _column_exists(table, column): + print("[migrations] system_settings.ui_timezone already exists.") + return + + with engine.begin() as conn: + conn.execute( + text( + f'ALTER TABLE "{table}" ADD COLUMN {column} VARCHAR(64) NOT NULL DEFAULT \'Europe/Amsterdam\'' + ) + ) + + print("[migrations] migrate_system_settings_ui_timezone completed.") + except Exception as exc: + print(f"[migrations] Failed to migrate system_settings.ui_timezone: {exc}") + + + +def migrate_mail_messages_columns() -> None: + """Ensure new columns on mail_messages exist. + + This migration adds the following columns if they do not yet exist: + - job_id (INTEGER, nullable) + - location (VARCHAR, default 'inbox') + - html_body (TEXT, nullable) + - text_body (TEXT, nullable) + """ + table = "mail_messages" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for mail_messages migration: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + # Table does not exist yet; it will be created by db.create_all() + print(f"[migrations] mail_messages table not found, skipping mail_messages column migration: {exc}") + return + + with engine.begin() as conn: + if "job_id" not in existing_columns: + print('[migrations] Adding mail_messages.job_id column...') + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN job_id INTEGER')) + + if "location" not in existing_columns: + print('[migrations] Adding mail_messages.location column with default "inbox"...') + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN location VARCHAR(64)')) + conn.execute( + text( + 'UPDATE "mail_messages" SET location = \'inbox\' ' + 'WHERE location IS NULL OR location = \'\'' + ) + ) + + if "html_body" not in existing_columns: + print('[migrations] Adding mail_messages.html_body column...') + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN html_body TEXT')) + + if "text_body" not in existing_columns: + print('[migrations] Adding mail_messages.text_body column...') + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN text_body TEXT')) + + print("[migrations] migrate_mail_messages_columns completed.") + +def migrate_mail_messages_parse_columns() -> None: + """Add parse-related columns to mail_messages if missing. + + Columns: + - overall_status (VARCHAR(32), nullable) + - parse_result (VARCHAR(32), nullable) + - parse_error (VARCHAR(512), nullable) + """ + table = "mail_messages" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for mail_messages parse migration: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] mail_messages table not found for parse migration, skipping: {exc}") + return + + with engine.begin() as conn: + if "overall_status" not in existing_columns: + print("[migrations] Adding mail_messages.overall_status column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN overall_status VARCHAR(32)')) + + if "parse_result" not in existing_columns: + print("[migrations] Adding mail_messages.parse_result column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN parse_result VARCHAR(32)')) + + if "parse_error" not in existing_columns: + print("[migrations] Adding mail_messages.parse_error column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN parse_error VARCHAR(512)')) + + print("[migrations] migrate_mail_messages_parse_columns completed.") + + + +def migrate_mail_messages_approval_columns() -> None: + """Add approval-related columns to mail_messages if missing. + + Columns: + - approved (BOOLEAN NOT NULL DEFAULT FALSE) + - approved_at (TIMESTAMP NULL) + - approved_by_id (INTEGER NULL, FK to users.id) + """ + table = "mail_messages" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for mail_messages approval migration: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] mail_messages table not found for approval migration, skipping: {exc}") + return + + with engine.begin() as conn: + if "approved" not in existing_columns: + print("[migrations] Adding mail_messages.approved column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN approved BOOLEAN NOT NULL DEFAULT FALSE')) + + if "approved_at" not in existing_columns: + print("[migrations] Adding mail_messages.approved_at column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN approved_at TIMESTAMP')) + + if "approved_by_id" not in existing_columns: + print("[migrations] Adding mail_messages.approved_by_id column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN approved_by_id INTEGER REFERENCES users(id)')) + + print("[migrations] migrate_mail_messages_approval_columns completed.") + + +def migrate_mail_messages_soft_delete_columns() -> None: + """Add soft-delete columns to mail_messages if missing. + + Columns: + - deleted_at (TIMESTAMP) + - deleted_by_user_id (INTEGER REFERENCES users(id)) + """ + table = "mail_messages" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for mail_messages soft-delete migration: {exc}") + return + + try: + _ = inspect(engine).get_columns(table) + except Exception as exc: + print(f"[migrations] mail_messages table not found for soft-delete migration, skipping: {exc}") + return + + def _column_exists_on_conn(conn, table_name: str, column_name: str) -> bool: + dialect = conn.dialect.name + if dialect == "sqlite": + rows = conn.execute(text(f'PRAGMA table_info("{table_name}")')).fetchall() + return any(r[1] == column_name for r in rows) + # Postgres / others + row = conn.execute( + text( + """ + SELECT 1 + FROM information_schema.columns + WHERE table_name = :table_name + AND column_name = :column_name + LIMIT 1 + """ + ), + {"table_name": table_name, "column_name": column_name}, + ).fetchone() + return row is not None + + with engine.begin() as conn: + # Avoid startup hangs if the table is busy (Postgres requires an ACCESS EXCLUSIVE lock for ADD COLUMN) + if conn.dialect.name == "postgresql": + try: + conn.execute(text("SET LOCAL lock_timeout = '5s'")) + except Exception as exc: + print(f"[migrations] Could not set lock_timeout for soft-delete migration: {exc}") + + try: + if not _column_exists_on_conn(conn, table, "deleted_at"): + print("[migrations] Adding mail_messages.deleted_at column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN deleted_at TIMESTAMP')) + + if not _column_exists_on_conn(conn, table, "deleted_by_user_id"): + print("[migrations] Adding mail_messages.deleted_by_user_id column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN deleted_by_user_id INTEGER REFERENCES users(id)')) + except Exception as exc: + # Do not fail app startup; admin can retry during a quiet moment. + print(f"[migrations] Soft-delete migration skipped due to error (likely lock/duplicate): {exc}") + return + + print("[migrations] migrate_mail_messages_soft_delete_columns completed.") + + + +def migrate_remarks_active_from_date() -> None: + """Ensure remarks.active_from_date exists and is populated. + + Remarks were historically only linked to a specific JobRun. We now store the + intended Daily Jobs run date (Europe/Amsterdam) from which the remark becomes + active as a DATE column (similar to tickets.active_from_date). + + This migration is idempotent. + """ + table = "remarks" + column = "active_from_date" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for remarks migration: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] remarks table not found for active_from_date migration, skipping: {exc}") + return + + if column in existing_columns: + print("[migrations] remarks.active_from_date already exists, skipping.") + return + + print("[migrations] Adding remarks.active_from_date column...") + with engine.begin() as conn: + conn.execute(text('ALTER TABLE "remarks" ADD COLUMN active_from_date DATE')) + + # Backfill from start_date (stored UTC naive) converted to Europe/Amsterdam date. + # If start_date is NULL, fall back to created_at. + conn.execute( + text( + """ + UPDATE remarks + SET active_from_date = ( + COALESCE( + (start_date AT TIME ZONE 'UTC' AT TIME ZONE 'Europe/Amsterdam')::date, + (created_at AT TIME ZONE 'UTC' AT TIME ZONE 'Europe/Amsterdam')::date + ) + ) + WHERE active_from_date IS NULL + """ + ) + ) + + print("[migrations] remarks.active_from_date added and backfilled.") + + +def migrate_overrides_match_columns() -> None: + """Add match_status and match_error_contains columns to overrides table if missing.""" + engine = db.get_engine() + inspector = inspect(engine) + try: + existing_columns = [col["name"] for col in inspector.get_columns("overrides")] + except Exception: + # If the table does not exist yet, db.create_all will create it with the new columns. + print("[migrations] overrides table not found; skipping migrate_overrides_match_columns.") + return + + with engine.begin() as conn: + if "match_status" not in existing_columns: + print("[migrations] Adding overrides.match_status column...") + conn.execute(text('ALTER TABLE "overrides" ADD COLUMN match_status VARCHAR(32)')) + + if "match_error_contains" not in existing_columns: + print("[migrations] Adding overrides.match_error_contains column...") + conn.execute(text('ALTER TABLE "overrides" ADD COLUMN match_error_contains VARCHAR(255)')) + + print("[migrations] migrate_overrides_match_columns completed.") + + +def migrate_users_theme_preference() -> None: + """Add users.theme_preference column if missing. + + Values: + - 'auto' (default) + - 'light' + - 'dark' + + This migration is idempotent and safe to run on every startup. + """ + table = "users" + column = "theme_preference" + + print("[migrations] Checking users.theme_preference column...") + if _column_exists(table, column): + print("[migrations] users.theme_preference already exists, skipping.") + return + + engine = db.get_engine() + with engine.begin() as conn: + print("[migrations] Adding users.theme_preference column...") + conn.execute( + text( + 'ALTER TABLE "users" ' + 'ADD COLUMN IF NOT EXISTS theme_preference VARCHAR(16) ' + "DEFAULT 'auto'" + ) + ) + print("[migrations] Backfilling users.theme_preference to 'auto' where NULL/empty...") + conn.execute( + text( + """ + UPDATE "users" + SET theme_preference = 'auto' + WHERE theme_preference IS NULL OR theme_preference = ''; + """ + ) + ) + + print("[migrations] migrate_users_theme_preference completed.") + + +def migrate_system_settings_eml_retention() -> None: + """Add ingest_eml_retention_days to system_settings if missing. + + Values: + - 0 = disabled + - 7 / 14 = retention days + """ + table = "system_settings" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for system_settings EML retention migration: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] system_settings table not found for EML retention migration, skipping: {exc}") + return + + if "ingest_eml_retention_days" in existing_columns: + print("[migrations] system_settings.ingest_eml_retention_days already exists, skipping.") + return + + print("[migrations] Adding system_settings.ingest_eml_retention_days column...") + with engine.begin() as conn: + conn.execute(text('ALTER TABLE "system_settings" ADD COLUMN ingest_eml_retention_days INTEGER')) + conn.execute(text('UPDATE "system_settings" SET ingest_eml_retention_days = 7 WHERE ingest_eml_retention_days IS NULL')) + conn.execute(text('ALTER TABLE "system_settings" ALTER COLUMN ingest_eml_retention_days SET DEFAULT 7')) + conn.execute(text('ALTER TABLE "system_settings" ALTER COLUMN ingest_eml_retention_days SET NOT NULL')) + print("[migrations] migrate_system_settings_eml_retention completed.") + + + +def migrate_system_settings_auto_import_cutoff_date() -> None: + """Add auto_import_cutoff_date to system_settings if missing. + + This date is used by the automatic importer to only fetch messages with + receivedDateTime >= cutoff. Older messages remain in the inbox untouched. + """ + table = "system_settings" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for system_settings auto_import_cutoff_date migration: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] system_settings table not found for auto_import_cutoff_date migration, skipping: {exc}") + return + + if "auto_import_cutoff_date" in existing_columns: + print("[migrations] system_settings.auto_import_cutoff_date already exists, skipping.") + return + + print("[migrations] Adding system_settings.auto_import_cutoff_date column...") + with engine.begin() as conn: + conn.execute(text('ALTER TABLE "system_settings" ADD COLUMN auto_import_cutoff_date DATE')) + + print("[migrations] migrate_system_settings_auto_import_cutoff_date completed.") + + +def migrate_system_settings_daily_jobs_start_date() -> None: + """Add daily_jobs_start_date to system_settings if missing. + + This value is used by the Daily Jobs view to decide from which date missing + expected runs should start being marked as 'Missed'. + """ + table = "system_settings" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for system_settings daily_jobs_start_date migration: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] system_settings table not found for daily_jobs_start_date migration, skipping: {exc}") + return + + if "daily_jobs_start_date" in existing_columns: + print("[migrations] system_settings.daily_jobs_start_date already exists, skipping.") + return + + print("[migrations] Adding system_settings.daily_jobs_start_date column...") + with engine.begin() as conn: + conn.execute(text('ALTER TABLE "system_settings" ADD COLUMN daily_jobs_start_date DATE')) + + print("[migrations] migrate_system_settings_daily_jobs_start_date completed.") + + + +def migrate_mail_messages_eml_columns() -> None: + """Add optional raw EML columns to mail_messages if missing. + + Columns: + - eml_blob (BYTEA, nullable) + - eml_stored_at (TIMESTAMP, nullable) + """ + table = "mail_messages" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for mail_messages EML migration: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] mail_messages table not found for EML migration, skipping: {exc}") + return + + with engine.begin() as conn: + if "eml_blob" not in existing_columns: + print("[migrations] Adding mail_messages.eml_blob column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN eml_blob BYTEA')) + + if "eml_stored_at" not in existing_columns: + print("[migrations] Adding mail_messages.eml_stored_at column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN eml_stored_at TIMESTAMP')) + + print("[migrations] migrate_mail_messages_eml_columns completed.") + + +def migrate_mail_messages_storage_metrics() -> None: + """Add storage metric columns to mail_messages if missing. + + These columns are used for storing normalized capacity usage values (bytes/percent) + extracted from certain report types (e.g. Veeam SOBR). + """ + table = "mail_messages" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for mail_messages storage metrics migration: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] mail_messages table not found for storage metrics migration, skipping: {exc}") + return + + with engine.begin() as conn: + if "storage_used_bytes" not in existing_columns: + print("[migrations] Adding mail_messages.storage_used_bytes column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN storage_used_bytes BIGINT')) + if "storage_capacity_bytes" not in existing_columns: + print("[migrations] Adding mail_messages.storage_capacity_bytes column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN storage_capacity_bytes BIGINT')) + if "storage_free_bytes" not in existing_columns: + print("[migrations] Adding mail_messages.storage_free_bytes column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN storage_free_bytes BIGINT')) + if "storage_free_percent" not in existing_columns: + print("[migrations] Adding mail_messages.storage_free_percent column...") + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN storage_free_percent FLOAT')) + + print("[migrations] migrate_mail_messages_storage_metrics completed.") + + +def migrate_job_runs_storage_metrics() -> None: + """Add storage metric columns to job_runs if missing. + + These columns allow capacity monitoring over time per approved job run. + """ + table = "job_runs" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for job_runs storage metrics migration: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] job_runs table not found for storage metrics migration, skipping: {exc}") + return + + with engine.begin() as conn: + if "storage_used_bytes" not in existing_columns: + print("[migrations] Adding job_runs.storage_used_bytes column...") + conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN storage_used_bytes BIGINT')) + if "storage_capacity_bytes" not in existing_columns: + print("[migrations] Adding job_runs.storage_capacity_bytes column...") + conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN storage_capacity_bytes BIGINT')) + if "storage_free_bytes" not in existing_columns: + print("[migrations] Adding job_runs.storage_free_bytes column...") + conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN storage_free_bytes BIGINT')) + if "storage_free_percent" not in existing_columns: + print("[migrations] Adding job_runs.storage_free_percent column...") + conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN storage_free_percent FLOAT')) + + print("[migrations] migrate_job_runs_storage_metrics completed.") + + + +def migrate_jobs_from_address() -> None: + """Add from_address column to jobs table if missing.""" + engine = db.get_engine() + inspector = inspect(engine) + try: + existing_columns = [col["name"] for col in inspector.get_columns("jobs")] + except Exception: + # If the table does not exist yet, db.create_all will create it with the new columns. + print("[migrations] jobs table not found; skipping migrate_jobs_from_address.") + return + + if "from_address" not in existing_columns: + with engine.begin() as conn: + print("[migrations] Adding jobs.from_address column...") + conn.execute(text('ALTER TABLE "jobs" ADD COLUMN from_address VARCHAR(512);')) + + +def migrate_news_tables() -> None: + """Create news tables for dashboard announcements. + + Tables: + - news_items + - news_reads (per-user read tracking) + + This migration is idempotent. + """ + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for news tables migration: {exc}") + return + + inspector = inspect(engine) + + try: + tables = set(inspector.get_table_names()) + except Exception as exc: + print(f"[migrations] Could not inspect tables for news migration: {exc}") + return + + with engine.begin() as conn: + if "news_items" not in tables: + print('[migrations] Creating table "news_items"...') + conn.execute( + text( + ''' + CREATE TABLE IF NOT EXISTS "news_items" ( + id SERIAL PRIMARY KEY, + title VARCHAR(255) NOT NULL, + body TEXT NOT NULL, + link_url VARCHAR(2048), + severity VARCHAR(32) NOT NULL DEFAULT 'info', + pinned BOOLEAN NOT NULL DEFAULT FALSE, + active BOOLEAN NOT NULL DEFAULT TRUE, + publish_from TIMESTAMP NULL, + publish_until TIMESTAMP NULL, + created_by_user_id INTEGER NULL REFERENCES "users"(id), + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() + ); + ''' + ) + ) + + if "news_reads" not in tables: + print('[migrations] Creating table "news_reads"...') + conn.execute( + text( + ''' + CREATE TABLE IF NOT EXISTS "news_reads" ( + id SERIAL PRIMARY KEY, + news_item_id INTEGER NOT NULL REFERENCES "news_items"(id) ON DELETE CASCADE, + user_id INTEGER NOT NULL REFERENCES "users"(id) ON DELETE CASCADE, + read_at TIMESTAMP NOT NULL DEFAULT NOW() + ); + ''' + ) + ) + conn.execute( + text( + 'CREATE UNIQUE INDEX IF NOT EXISTS news_reads_unique ON "news_reads" (news_item_id, user_id);' + ) + ) + conn.execute( + text( + 'CREATE INDEX IF NOT EXISTS news_reads_user_idx ON "news_reads" (user_id);' + ) + ) + conn.execute( + text( + 'CREATE INDEX IF NOT EXISTS news_reads_item_idx ON "news_reads" (news_item_id);' + ) + ) + + print("[migrations] migrate_news_tables completed.") + + +def run_migrations() -> None: + print("[migrations] Starting migrations...") + migrate_add_username_to_users() + migrate_make_email_nullable() + migrate_users_theme_preference() + migrate_system_settings_eml_retention() + migrate_system_settings_auto_import_cutoff_date() + migrate_system_settings_daily_jobs_start_date() + migrate_system_settings_ui_timezone() + migrate_mail_messages_columns() + migrate_mail_messages_parse_columns() + migrate_mail_messages_approval_columns() + migrate_mail_messages_soft_delete_columns() + migrate_mail_messages_eml_columns() + migrate_mail_messages_overall_message() + migrate_mail_messages_storage_metrics() + migrate_job_runs_storage_metrics() + migrate_jobs_from_address() + migrate_mail_objects_table() + migrate_object_persistence_tables() + migrate_feedback_tables() + migrate_tickets_active_from_date() + migrate_remarks_active_from_date() + migrate_overrides_match_columns() + migrate_job_runs_review_tracking() + migrate_job_runs_override_metadata() + migrate_news_tables() + migrate_reporting_tables() + print("[migrations] All migrations completed.") + + +def migrate_job_runs_override_metadata() -> None: + """Add override metadata columns to job_runs for reporting. + + - job_runs.override_applied_override_id (INTEGER NULL) + - job_runs.override_applied_level (VARCHAR(16) NULL) + - job_runs.override_applied_reason (TEXT NULL) + + This migration is idempotent. + """ + + table = "job_runs" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for job_runs override metadata: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] job_runs table not found; skipping migrate_job_runs_override_metadata: {exc}") + return + + with engine.begin() as conn: + if "override_applied_override_id" not in existing_columns: + print('[migrations] Adding job_runs.override_applied_override_id column...') + conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN override_applied_override_id INTEGER')) + + if "override_applied_level" not in existing_columns: + print('[migrations] Adding job_runs.override_applied_level column...') + conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN override_applied_level VARCHAR(16)')) + + if "override_applied_reason" not in existing_columns: + print('[migrations] Adding job_runs.override_applied_reason column...') + conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN override_applied_reason TEXT')) + + print("[migrations] migrate_job_runs_override_metadata completed.") + + +def migrate_job_runs_review_tracking() -> None: + """Add review tracking for job runs and create the review events audit table. + + - job_runs.reviewed_at (TIMESTAMP NULL) + - job_runs.reviewed_by_user_id (INTEGER NULL, FK users.id) + - job_run_review_events (append-only audit trail) + + This migration is idempotent. + """ + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for job run review tracking: {exc}") + return + + inspector = inspect(engine) + + # job_runs columns + table = "job_runs" + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] job_runs table not found for review tracking migration, skipping: {exc}") + return + + with engine.begin() as conn: + if "reviewed_at" not in existing_columns: + print('[migrations] Adding job_runs.reviewed_at column...') + conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN reviewed_at TIMESTAMP')) + + if "reviewed_by_user_id" not in existing_columns: + print('[migrations] Adding job_runs.reviewed_by_user_id column...') + conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN reviewed_by_user_id INTEGER')) + # FK is optional; don't fail on existing schemas. + try: + conn.execute( + text( + 'ALTER TABLE "job_runs" ' + 'ADD CONSTRAINT job_runs_reviewed_by_user_id_fkey ' + 'FOREIGN KEY (reviewed_by_user_id) REFERENCES users(id) ' + 'ON DELETE SET NULL' + ) + ) + except Exception as exc: + print(f"[migrations] Could not add FK job_runs.reviewed_by_user_id -> users.id (continuing): {exc}") + + conn.execute( + text('CREATE INDEX IF NOT EXISTS idx_job_runs_reviewed_at ON "job_runs" (reviewed_at)') + ) + + # Audit table + conn.execute( + text( + ''' + CREATE TABLE IF NOT EXISTS job_run_review_events ( + id SERIAL PRIMARY KEY, + run_id INTEGER NOT NULL REFERENCES job_runs(id) ON DELETE CASCADE, + action VARCHAR(32) NOT NULL, + actor_user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE RESTRICT, + note TEXT NULL, + created_at TIMESTAMP NOT NULL DEFAULT NOW() + ) + ''' + ) + ) + conn.execute( + text( + 'CREATE INDEX IF NOT EXISTS idx_job_run_review_events_run_id ON "job_run_review_events" (run_id)' + ) + ) + conn.execute( + text( + 'CREATE INDEX IF NOT EXISTS idx_job_run_review_events_created_at ON "job_run_review_events" (created_at)' + ) + ) + + print("[migrations] migrate_job_runs_review_tracking completed.") + + +def migrate_tickets_active_from_date() -> None: + """Ensure tickets.active_from_date exists and is populated. + + Tickets were historically "active" from tickets.start_date. We now store the + intended Daily Jobs run date (Europe/Amsterdam) from which the ticket becomes + active as a DATE column. + + This migration is idempotent. + """ + table = "tickets" + column = "active_from_date" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for tickets migration: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] tickets table not found for active_from_date migration, skipping: {exc}") + return + + if column in existing_columns: + print("[migrations] tickets.active_from_date already exists, skipping.") + return + + print("[migrations] Adding tickets.active_from_date column...") + with engine.begin() as conn: + conn.execute(text('ALTER TABLE "tickets" ADD COLUMN active_from_date DATE')) + + # Backfill from start_date (stored UTC naive) converted to Europe/Amsterdam date. + # If conversion fails for any row, it falls back to start_date::date. + conn.execute( + text( + """ + UPDATE "tickets" + SET active_from_date = ( + (start_date AT TIME ZONE 'UTC' AT TIME ZONE 'Europe/Amsterdam')::date + ) + WHERE active_from_date IS NULL; + """ + ) + ) + + conn.execute(text('ALTER TABLE "tickets" ALTER COLUMN active_from_date SET NOT NULL')) + conn.execute( + text( + 'CREATE INDEX IF NOT EXISTS idx_tickets_active_from_date ON "tickets" (active_from_date)' + ) + ) + + print("[migrations] migrate_tickets_active_from_date completed.") + + + +def migrate_mail_messages_overall_message() -> None: + """Add overall_message column to mail_messages if missing.""" + table = "mail_messages" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for mail_messages overall_message migration: {exc}") + return + + inspector = inspect(engine) + try: + existing_columns = {col["name"] for col in inspector.get_columns(table)} + except Exception as exc: + print(f"[migrations] mail_messages table not found for overall_message migration, skipping: {exc}") + return + + if "overall_message" in existing_columns: + print("[migrations] mail_messages.overall_message already exists, skipping.") + return + + print("[migrations] Adding mail_messages.overall_message column...") + with engine.begin() as conn: + conn.execute(text('ALTER TABLE "mail_messages" ADD COLUMN overall_message TEXT')) + print("[migrations] migrate_mail_messages_overall_message completed.") + + +def migrate_mail_objects_table() -> None: + """Ensure the mail_objects table exists. + + This table stores the individual objects detected in a mail message + (for example VMs in a Veeam report) together with their status and + optional error message. + """ + engine = db.get_engine() + inspector = inspect(engine) + + try: + tables = inspector.get_table_names() + except Exception as exc: + print(f"[migrations] Could not inspect tables for mail_objects: {exc}") + return + + if "mail_objects" in tables: + # Ensure new columns exist on existing installations + try: + cols = [c.get("name") for c in inspector.get_columns("mail_objects")] + except Exception as exc: + print(f"[migrations] Could not inspect columns for mail_objects: {exc}") + return + + if "object_type" not in cols: + print("[migrations] Adding mail_objects.object_type column...") + with engine.begin() as conn: + conn.execute(text('ALTER TABLE "mail_objects" ADD COLUMN object_type VARCHAR(128)')) + print("[migrations] mail_objects.object_type column added.") + else: + print("[migrations] mail_objects table already exists, skipping.") + return + + print("[migrations] Creating mail_objects table...") + with engine.begin() as conn: + conn.execute( + text( + ''' + CREATE TABLE mail_objects ( + id SERIAL PRIMARY KEY, + mail_message_id INTEGER NOT NULL REFERENCES mail_messages(id) ON DELETE CASCADE, + object_name VARCHAR(512) NOT NULL, + object_type VARCHAR(128), + status VARCHAR(64), + error_message TEXT, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP + ) + ''' + ) + ) + print("[migrations] mail_objects table created.") +def migrate_object_persistence_tables() -> None: + """Create tables for object persistence used for reporting. + + This migration is idempotent and safe to run on every startup. + """ + engine = db.get_engine() + with engine.begin() as conn: + # 1) customer_objects + conn.execute( + text( + ''' + CREATE TABLE IF NOT EXISTS customer_objects ( + id SERIAL PRIMARY KEY, + customer_id INTEGER NOT NULL REFERENCES customers(id) ON DELETE CASCADE, + object_name TEXT NOT NULL, + object_type TEXT NULL, + first_seen_at TIMESTAMP NOT NULL DEFAULT NOW(), + last_seen_at TIMESTAMP NOT NULL DEFAULT NOW(), + UNIQUE(customer_id, object_name) + ) + ''' + ) + ) + conn.execute( + text( + 'CREATE INDEX IF NOT EXISTS idx_customer_objects_customer_name ON customer_objects (customer_id, object_name)' + ) + ) + + # 2) job_object_links + conn.execute( + text( + ''' + CREATE TABLE IF NOT EXISTS job_object_links ( + id SERIAL PRIMARY KEY, + job_id INTEGER NOT NULL REFERENCES jobs(id) ON DELETE CASCADE, + customer_object_id INTEGER NOT NULL REFERENCES customer_objects(id) ON DELETE CASCADE, + first_seen_at TIMESTAMP NOT NULL DEFAULT NOW(), + last_seen_at TIMESTAMP NOT NULL DEFAULT NOW(), + UNIQUE(job_id, customer_object_id) + ) + ''' + ) + ) + conn.execute( + text( + 'CREATE INDEX IF NOT EXISTS idx_job_object_links_job_id ON job_object_links (job_id)' + ) + ) + conn.execute( + text( + 'CREATE INDEX IF NOT EXISTS idx_job_object_links_customer_object_id ON job_object_links (customer_object_id)' + ) + ) + + # 3) run_object_links + conn.execute( + text( + ''' + CREATE TABLE IF NOT EXISTS run_object_links ( + id SERIAL PRIMARY KEY, + run_id INTEGER NOT NULL REFERENCES job_runs(id) ON DELETE CASCADE, + customer_object_id INTEGER NOT NULL REFERENCES customer_objects(id) ON DELETE CASCADE, + status TEXT NULL, + error_message TEXT NULL, + observed_at TIMESTAMP NOT NULL DEFAULT NOW(), + UNIQUE(run_id, customer_object_id) + ) + ''' + ) + ) + conn.execute( + text( + 'CREATE INDEX IF NOT EXISTS idx_run_object_links_run_id ON run_object_links (run_id)' + ) + ) + conn.execute( + text( + 'CREATE INDEX IF NOT EXISTS idx_run_object_links_customer_object_id ON run_object_links (customer_object_id)' + ) + ) + + print("[migrations] object persistence tables ensured.") + + print("[migrations] Ensuring ticket and remark tables...") + conn.execute(text(""" + CREATE TABLE IF NOT EXISTS tickets ( + id SERIAL PRIMARY KEY, + ticket_code VARCHAR(32) UNIQUE NOT NULL, + title VARCHAR(255), + description TEXT, + start_date TIMESTAMP NOT NULL, + resolved_at TIMESTAMP, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL + ); + """)) + conn.execute(text(""" + CREATE TABLE IF NOT EXISTS ticket_scopes ( + id SERIAL PRIMARY KEY, + ticket_id INTEGER NOT NULL REFERENCES tickets(id) ON DELETE CASCADE, + scope_type VARCHAR(32) NOT NULL, + customer_id INTEGER REFERENCES customers(id), + backup_software VARCHAR(128), + backup_type VARCHAR(128), + job_id INTEGER REFERENCES jobs(id), + job_name_match VARCHAR(255), + job_name_match_mode VARCHAR(32), + created_at TIMESTAMP NOT NULL + ); + """)) + conn.execute(text(""" + CREATE TABLE IF NOT EXISTS ticket_job_runs ( + id SERIAL PRIMARY KEY, + ticket_id INTEGER NOT NULL REFERENCES tickets(id) ON DELETE CASCADE, + job_run_id INTEGER NOT NULL REFERENCES job_runs(id) ON DELETE CASCADE, + linked_at TIMESTAMP NOT NULL, + link_source VARCHAR(64) NOT NULL, + UNIQUE(ticket_id, job_run_id) + ); + """)) + conn.execute(text(""" + CREATE TABLE IF NOT EXISTS remarks ( + id SERIAL PRIMARY KEY, + title VARCHAR(255), + body TEXT NOT NULL, + start_date TIMESTAMP, + resolved_at TIMESTAMP, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL + ); + """)) + conn.execute(text(""" + CREATE TABLE IF NOT EXISTS remark_scopes ( + id SERIAL PRIMARY KEY, + remark_id INTEGER NOT NULL REFERENCES remarks(id) ON DELETE CASCADE, + scope_type VARCHAR(32) NOT NULL, + customer_id INTEGER REFERENCES customers(id), + backup_software VARCHAR(128), + backup_type VARCHAR(128), + job_id INTEGER REFERENCES jobs(id), + job_name_match VARCHAR(255), + job_name_match_mode VARCHAR(32), + job_run_id INTEGER REFERENCES job_runs(id), + created_at TIMESTAMP NOT NULL + ); + """)) + conn.execute(text(""" + CREATE TABLE IF NOT EXISTS remark_job_runs ( + id SERIAL PRIMARY KEY, + remark_id INTEGER NOT NULL REFERENCES remarks(id) ON DELETE CASCADE, + job_run_id INTEGER NOT NULL REFERENCES job_runs(id) ON DELETE CASCADE, + linked_at TIMESTAMP NOT NULL, + link_source VARCHAR(64) NOT NULL, + UNIQUE(remark_id, job_run_id) + ); + """)) + print("[migrations] Ticket and remark tables ensured.") + + +def migrate_feedback_tables() -> None: + """Ensure feedback board tables exist. + + Tables: + - feedback_items + - feedback_votes (unique per item/user) + + This migration is idempotent and safe to run on every startup. + """ + engine = db.get_engine() + with engine.begin() as conn: + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS feedback_items ( + id SERIAL PRIMARY KEY, + item_type VARCHAR(16) NOT NULL, + title VARCHAR(255) NOT NULL, + description TEXT NOT NULL, + component VARCHAR(255), + status VARCHAR(16) NOT NULL DEFAULT 'open', + created_by_user_id INTEGER NOT NULL REFERENCES users(id), + resolved_by_user_id INTEGER REFERENCES users(id), + resolved_at TIMESTAMP, + deleted_by_user_id INTEGER REFERENCES users(id), + deleted_at TIMESTAMP, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() + ); + """ + ) + ) + + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS feedback_votes ( + id SERIAL PRIMARY KEY, + feedback_item_id INTEGER NOT NULL REFERENCES feedback_items(id) ON DELETE CASCADE, + user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + UNIQUE(feedback_item_id, user_id) + ); + """ + ) + ) + + conn.execute( + text( + """ + CREATE INDEX IF NOT EXISTS idx_feedback_items_status_type + ON feedback_items (status, item_type); + """ + ) + ) + conn.execute( + text( + """ + CREATE INDEX IF NOT EXISTS idx_feedback_votes_item + ON feedback_votes (feedback_item_id); + """ + ) + ) + conn.execute( + text( + """ + CREATE INDEX IF NOT EXISTS idx_feedback_items_deleted_at + ON feedback_items (deleted_at); + """ + ) + ) + + print("[migrations] Feedback tables ensured.") + + +def migrate_reporting_tables() -> None: + """Create reporting tables used for snapshot-based exports. + + This migration is idempotent and safe to run on every startup. + """ + engine = db.get_engine() + with engine.begin() as conn: + conn.execute( + text( + ''' + CREATE TABLE IF NOT EXISTS report_definitions ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) NOT NULL, + description TEXT NULL, + report_type VARCHAR(32) NOT NULL DEFAULT 'one-time', + output_format VARCHAR(16) NOT NULL DEFAULT 'csv', + customer_scope VARCHAR(16) NOT NULL DEFAULT 'all', + customer_ids TEXT NULL, + period_start TIMESTAMP NOT NULL, + period_end TIMESTAMP NOT NULL, + schedule VARCHAR(255) NULL, + created_by_user_id INTEGER NULL REFERENCES users(id) ON DELETE SET NULL, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() + ) + ''' + ) + ) + + conn.execute( + text( + ''' + CREATE TABLE IF NOT EXISTS report_object_snapshots ( + id SERIAL PRIMARY KEY, + report_id INTEGER NOT NULL REFERENCES report_definitions(id) ON DELETE CASCADE, + object_name TEXT NOT NULL, + job_id INTEGER NULL, + job_name TEXT NULL, + customer_id INTEGER NULL, + customer_name TEXT NULL, + backup_software TEXT NULL, + backup_type TEXT NULL, + run_id INTEGER NULL, + run_at TIMESTAMP NULL, + status TEXT NULL, + missed BOOLEAN NOT NULL DEFAULT FALSE, + override_applied BOOLEAN NOT NULL DEFAULT FALSE, + reviewed_at TIMESTAMP NULL, + ticket_number TEXT NULL, + remark TEXT NULL, + created_at TIMESTAMP NOT NULL DEFAULT NOW() + ) + ''' + ) + ) + + conn.execute( + text( + ''' + CREATE TABLE IF NOT EXISTS report_object_summaries ( + id SERIAL PRIMARY KEY, + report_id INTEGER NOT NULL REFERENCES report_definitions(id) ON DELETE CASCADE, + object_name TEXT NOT NULL, + total_runs INTEGER NOT NULL DEFAULT 0, + success_count INTEGER NOT NULL DEFAULT 0, + success_override_count INTEGER NOT NULL DEFAULT 0, + warning_count INTEGER NOT NULL DEFAULT 0, + failed_count INTEGER NOT NULL DEFAULT 0, + missed_count INTEGER NOT NULL DEFAULT 0, + success_rate DOUBLE PRECISION NOT NULL DEFAULT 0.0, + created_at TIMESTAMP NOT NULL DEFAULT NOW() + ) + ''' + ) + ) + + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_report_definitions_created_at ON report_definitions (created_at)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_report_snapshots_report_id ON report_object_snapshots (report_id)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_report_snapshots_object_name ON report_object_snapshots (object_name)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_report_snapshots_run_at ON report_object_snapshots (run_at)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_report_summaries_report_id ON report_object_summaries (report_id)")) + conn.execute(text("CREATE INDEX IF NOT EXISTS idx_report_summaries_object_name ON report_object_summaries (object_name)")) + + # Ensure new columns exist when upgrading older schemas. + conn.execute(text("ALTER TABLE report_definitions ADD COLUMN IF NOT EXISTS customer_scope VARCHAR(16) NOT NULL DEFAULT 'all'")) + conn.execute(text("ALTER TABLE report_definitions ADD COLUMN IF NOT EXISTS customer_ids TEXT NULL")) + conn.execute(text("ALTER TABLE report_object_snapshots ADD COLUMN IF NOT EXISTS customer_id INTEGER NULL")) + + print("[migrations] reporting tables created/verified.") diff --git a/containers/backupchecks/src/backend/app/models.py b/containers/backupchecks/src/backend/app/models.py new file mode 100644 index 0000000..a4aa009 --- /dev/null +++ b/containers/backupchecks/src/backend/app/models.py @@ -0,0 +1,625 @@ +from datetime import datetime +from flask_login import UserMixin +from flask import session, has_request_context +from werkzeug.security import generate_password_hash, check_password_hash + +from .database import db + + +class User(db.Model, UserMixin): + __tablename__ = "users" + + id = db.Column(db.Integer, primary_key=True) + + # username is the primary login identifier + username = db.Column(db.String(255), unique=True, nullable=False) + + # email is kept for future use and may be NULL + email = db.Column(db.String(255), nullable=True) + + password_hash = db.Column(db.String(255), nullable=False) + role = db.Column(db.String(50), nullable=False, default="viewer") + # UI theme preference: 'auto' (follow OS), 'light', 'dark' + theme_preference = db.Column(db.String(16), nullable=False, default="auto") + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + + def set_password(self, password: str) -> None: + self.password_hash = generate_password_hash(password) + + def check_password(self, password: str) -> bool: + return check_password_hash(self.password_hash, password) + + @property + def roles(self) -> list[str]: + """Return all assigned roles. + + The database stores roles as a comma-separated string for backwards + compatibility with older schemas. + """ + raw = (self.role or "").strip() + if not raw: + return ["viewer"] + parts = [p.strip() for p in raw.split(",")] + roles = [p for p in parts if p] + return roles or ["viewer"] + + @property + def active_role(self) -> str: + """Return the currently active role for this user. + + When a request context exists, the active role is stored in the session. + If the stored role is not assigned to the user, it falls back to the + first assigned role. + """ + default_role = self.roles[0] + if not has_request_context(): + return default_role + selected = (session.get("active_role") or "").strip() + if selected and selected in self.roles: + return selected + session["active_role"] = default_role + return default_role + + def set_active_role(self, role: str) -> None: + """Set the active role in the current session (if possible).""" + if not has_request_context(): + return + role = (role or "").strip() + if role and role in self.roles: + session["active_role"] = role + else: + session["active_role"] = self.roles[0] + + + @property + def is_admin(self) -> bool: + return self.active_role == "admin" + + +class SystemSettings(db.Model): + __tablename__ = "system_settings" + + id = db.Column(db.Integer, primary_key=True) + + # Graph / mail settings + graph_tenant_id = db.Column(db.String(255), nullable=True) + graph_client_id = db.Column(db.String(255), nullable=True) + graph_client_secret = db.Column(db.String(255), nullable=True) + graph_mailbox = db.Column(db.String(255), nullable=True) + + incoming_folder = db.Column(db.String(255), nullable=True) + processed_folder = db.Column(db.String(255), nullable=True) + + # Import configuration + auto_import_enabled = db.Column(db.Boolean, nullable=False, default=False) + auto_import_interval_minutes = db.Column(db.Integer, nullable=False, default=15) + auto_import_max_items = db.Column(db.Integer, nullable=False, default=50) + manual_import_batch_size = db.Column(db.Integer, nullable=False, default=50) + auto_import_cutoff_date = db.Column(db.Date, nullable=True) + + # Debug storage: store raw EML in database for a limited retention window. + # 0 = disabled, 7/14 = retention days. + ingest_eml_retention_days = db.Column(db.Integer, nullable=False, default=7) + + # Daily Jobs: from which date 'Missed' status should start to be applied. + daily_jobs_start_date = db.Column(db.Date, nullable=True) + + # UI display timezone (IANA name). Used for rendering times in the web interface. + ui_timezone = db.Column(db.String(64), nullable=False, default="Europe/Amsterdam") + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + updated_at = db.Column( + db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False + ) + + + +class AdminLog(db.Model): + __tablename__ = "admin_logs" + + id = db.Column(db.Integer, primary_key=True) + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + + user = db.Column(db.String(255), nullable=True) + event_type = db.Column(db.String(64), nullable=False) + message = db.Column(db.Text, nullable=False) + details = db.Column(db.Text, nullable=True) + +class Customer(db.Model): + __tablename__ = "customers" + + id = db.Column(db.Integer, primary_key=True) + name = db.Column(db.String(255), unique=True, nullable=False) + active = db.Column(db.Boolean, nullable=False, default=True) + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + updated_at = db.Column( + db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False + ) + +class Override(db.Model): + __tablename__ = "overrides" + + id = db.Column(db.Integer, primary_key=True) + + # Level of the override: global or object (job-level is no longer used) + level = db.Column(db.String(20), nullable=False) + + # Scope for global overrides (optional wildcard fields) + backup_software = db.Column(db.String(255), nullable=True) + backup_type = db.Column(db.String(255), nullable=True) + + # Scope for object overrides + job_id = db.Column(db.Integer, db.ForeignKey("jobs.id"), nullable=True) + object_name = db.Column(db.String(255), nullable=True) + + # Matching criteria on object status / error message + match_status = db.Column(db.String(32), nullable=True) + match_error_contains = db.Column(db.String(255), nullable=True) + + # Behaviour flags + treat_as_success = db.Column(db.Boolean, nullable=False, default=True) + active = db.Column(db.Boolean, nullable=False, default=True) + + # Validity window + start_at = db.Column(db.DateTime, nullable=False) + end_at = db.Column(db.DateTime, nullable=True) + + # Management metadata + comment = db.Column(db.Text, nullable=True) + created_by = db.Column(db.String(255), nullable=True) + updated_by = db.Column(db.String(255), nullable=True) + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + updated_at = db.Column( + db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False + ) + +class Job(db.Model): + __tablename__ = "jobs" + + id = db.Column(db.Integer, primary_key=True) + + customer_id = db.Column(db.Integer, db.ForeignKey("customers.id"), nullable=True) + + backup_software = db.Column(db.String(128), nullable=True) + backup_type = db.Column(db.String(128), nullable=True) + job_name = db.Column(db.String(512), nullable=True) + + from_address = db.Column(db.String(512), nullable=True) + + schedule_type = db.Column(db.String(32), nullable=True) # daily, weekly, monthly, yearly + schedule_days_of_week = db.Column(db.String(64), nullable=True) # e.g. "Mon,Tue,Wed" + schedule_day_of_month = db.Column(db.Integer, nullable=True) # 1-31 + schedule_times = db.Column(db.String(255), nullable=True) # e.g. "01:00,13:15" + + auto_approve = db.Column(db.Boolean, nullable=False, default=True) + active = db.Column(db.Boolean, nullable=False, default=True) + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + updated_at = db.Column( + db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False + ) + + customer = db.relationship( + "Customer", + backref=db.backref("jobs", lazy="dynamic"), + lazy="joined", + ) + + +class JobRun(db.Model): + __tablename__ = "job_runs" + + id = db.Column(db.Integer, primary_key=True) + + job_id = db.Column(db.Integer, db.ForeignKey("jobs.id"), nullable=False) + mail_message_id = db.Column(db.Integer, db.ForeignKey("mail_messages.id"), nullable=True) + + run_at = db.Column(db.DateTime, nullable=True) + status = db.Column(db.String(64), nullable=True) + remark = db.Column(db.Text, nullable=True) + missed = db.Column(db.Boolean, nullable=False, default=False) + override_applied = db.Column(db.Boolean, nullable=False, default=False) + + # Override metadata for reporting/auditing. + # These are populated when override flags are recomputed. + override_applied_override_id = db.Column(db.Integer, nullable=True) + override_applied_level = db.Column(db.String(16), nullable=True) + override_applied_reason = db.Column(db.Text, nullable=True) + + # Optional storage metrics (e.g. for repository capacity monitoring) + storage_used_bytes = db.Column(db.BigInteger, nullable=True) + storage_capacity_bytes = db.Column(db.BigInteger, nullable=True) + storage_free_bytes = db.Column(db.BigInteger, nullable=True) + storage_free_percent = db.Column(db.Float, nullable=True) + + # Run review (Run Checks) + reviewed_at = db.Column(db.DateTime, nullable=True) + reviewed_by_user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=True) + + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + updated_at = db.Column( + db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False + ) + + job = db.relationship( + "Job", + backref=db.backref("runs", lazy="dynamic", cascade="all, delete-orphan"), + ) + + reviewed_by = db.relationship("User", foreign_keys=[reviewed_by_user_id]) + + +class JobRunReviewEvent(db.Model): + __tablename__ = "job_run_review_events" + + id = db.Column(db.Integer, primary_key=True) + run_id = db.Column(db.Integer, db.ForeignKey("job_runs.id"), nullable=False) + action = db.Column(db.String(32), nullable=False) # REVIEWED | UNREVIEWED + actor_user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False) + note = db.Column(db.Text, nullable=True) + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + + run = db.relationship( + "JobRun", + backref=db.backref("review_events", lazy="dynamic", cascade="all, delete-orphan"), + ) + actor = db.relationship("User", foreign_keys=[actor_user_id]) + + +class JobObject(db.Model): + __tablename__ = "job_objects" + + id = db.Column(db.Integer, primary_key=True) + + job_run_id = db.Column(db.Integer, db.ForeignKey("job_runs.id"), nullable=False) + object_name = db.Column(db.String(512), nullable=False) + object_type = db.Column(db.String(128), nullable=True) + status = db.Column(db.String(64), nullable=True) + error_message = db.Column(db.Text, nullable=True) + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + + job_run = db.relationship( + "JobRun", + backref=db.backref("objects", lazy="dynamic", cascade="all, delete-orphan"), + ) + + +class MailMessage(db.Model): + __tablename__ = "mail_messages" + + id = db.Column(db.Integer, primary_key=True) + + # Basic mail metadata + message_id = db.Column(db.String(512), unique=True, nullable=True) + from_address = db.Column(db.String(512), nullable=True) + subject = db.Column(db.String(1024), nullable=True) + received_at = db.Column(db.DateTime, nullable=True) + + # Parsed backup metadata + backup_software = db.Column(db.String(128), nullable=True) + backup_type = db.Column(db.String(128), nullable=True) + job_name = db.Column(db.String(512), nullable=True) + + from_address = db.Column(db.String(512), nullable=True) + overall_status = db.Column(db.String(32), nullable=True) + overall_message = db.Column(db.Text, nullable=True) + parse_result = db.Column(db.String(32), nullable=True) + parse_error = db.Column(db.String(512), nullable=True) + + parsed_at = db.Column(db.DateTime, nullable=True) + + # Optional storage metrics (e.g. repository capacity monitoring) + storage_used_bytes = db.Column(db.BigInteger, nullable=True) + storage_capacity_bytes = db.Column(db.BigInteger, nullable=True) + storage_free_bytes = db.Column(db.BigInteger, nullable=True) + storage_free_percent = db.Column(db.Float, nullable=True) + + + # Link back to Job and location (inbox/history) + job_id = db.Column(db.Integer, db.ForeignKey("jobs.id"), nullable=True) + location = db.Column(db.String(32), nullable=False, default="inbox") + + # Raw / rendered content storage (for inline popup) + html_body = db.Column(db.Text, nullable=True) + text_body = db.Column(db.Text, nullable=True) + + # Optional raw RFC822 message storage (debug) - controlled by SystemSettings.ingest_eml_retention_days + eml_blob = db.Column(db.LargeBinary, nullable=True) + eml_stored_at = db.Column(db.DateTime, nullable=True) + + # Approval metadata + approved = db.Column(db.Boolean, nullable=False, default=False) + approved_at = db.Column(db.DateTime, nullable=True) + approved_by_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=True) + + # Soft-delete metadata (Inbox delete -> Admin restore) + deleted_at = db.Column(db.DateTime, nullable=True) + deleted_by_user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=True) + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + updated_at = db.Column( + db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False + ) + + deleted_by_user = db.relationship("User", foreign_keys=[deleted_by_user_id]) + +class MailObject(db.Model): + __tablename__ = "mail_objects" + + id = db.Column(db.Integer, primary_key=True) + mail_message_id = db.Column(db.Integer, db.ForeignKey("mail_messages.id"), nullable=False) + object_name = db.Column(db.String(512), nullable=False) + object_type = db.Column(db.String(128), nullable=True) + status = db.Column(db.String(64), nullable=True) + error_message = db.Column(db.Text, nullable=True) + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + + + +class Ticket(db.Model): + __tablename__ = "tickets" + id = db.Column(db.Integer, primary_key=True) + ticket_code = db.Column(db.String(32), unique=True, nullable=False) + title = db.Column(db.String(255)) + description = db.Column(db.Text) + + # Date (Europe/Amsterdam) from which this ticket should be considered active + # for the scoped job(s) in Daily Jobs / Job Details views. + active_from_date = db.Column(db.Date, nullable=False) + + # Audit timestamp: when the ticket was created (UTC, naive) + start_date = db.Column(db.DateTime, nullable=False) + resolved_at = db.Column(db.DateTime) + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False) + + +class TicketScope(db.Model): + __tablename__ = "ticket_scopes" + id = db.Column(db.Integer, primary_key=True) + ticket_id = db.Column(db.Integer, db.ForeignKey("tickets.id"), nullable=False) + scope_type = db.Column(db.String(32), nullable=False) + customer_id = db.Column(db.Integer, db.ForeignKey("customers.id")) + backup_software = db.Column(db.String(128)) + backup_type = db.Column(db.String(128)) + job_id = db.Column(db.Integer, db.ForeignKey("jobs.id")) + job_name_match = db.Column(db.String(255)) + job_name_match_mode = db.Column(db.String(32)) + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + + +class TicketJobRun(db.Model): + __tablename__ = "ticket_job_runs" + id = db.Column(db.Integer, primary_key=True) + ticket_id = db.Column(db.Integer, db.ForeignKey("tickets.id"), nullable=False) + job_run_id = db.Column(db.Integer, db.ForeignKey("job_runs.id"), nullable=False) + linked_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + link_source = db.Column(db.String(64), nullable=False) + __table_args__ = (db.UniqueConstraint("ticket_id", "job_run_id", name="uq_ticket_job_run"),) + + +class Remark(db.Model): + __tablename__ = "remarks" + id = db.Column(db.Integer, primary_key=True) + title = db.Column(db.String(255)) + body = db.Column(db.Text, nullable=False) + + # Date (Europe/Amsterdam) from which this remark should be considered active + # for the scoped job(s) in Daily Jobs / Job Details views. + active_from_date = db.Column(db.Date) + + start_date = db.Column(db.DateTime) + resolved_at = db.Column(db.DateTime) + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False) + + +class RemarkScope(db.Model): + __tablename__ = "remark_scopes" + id = db.Column(db.Integer, primary_key=True) + remark_id = db.Column(db.Integer, db.ForeignKey("remarks.id"), nullable=False) + scope_type = db.Column(db.String(32), nullable=False) + customer_id = db.Column(db.Integer, db.ForeignKey("customers.id")) + backup_software = db.Column(db.String(128)) + backup_type = db.Column(db.String(128)) + job_id = db.Column(db.Integer, db.ForeignKey("jobs.id")) + job_name_match = db.Column(db.String(255)) + job_name_match_mode = db.Column(db.String(32)) + job_run_id = db.Column(db.Integer, db.ForeignKey("job_runs.id")) + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + + +class RemarkJobRun(db.Model): + __tablename__ = "remark_job_runs" + id = db.Column(db.Integer, primary_key=True) + remark_id = db.Column(db.Integer, db.ForeignKey("remarks.id"), nullable=False) + job_run_id = db.Column(db.Integer, db.ForeignKey("job_runs.id"), nullable=False) + linked_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + link_source = db.Column(db.String(64), nullable=False) + __table_args__ = (db.UniqueConstraint("remark_id", "job_run_id", name="uq_remark_job_run"),) + + +class FeedbackItem(db.Model): + __tablename__ = "feedback_items" + + id = db.Column(db.Integer, primary_key=True) + + # bug | feature + item_type = db.Column(db.String(16), nullable=False) + title = db.Column(db.String(255), nullable=False) + description = db.Column(db.Text, nullable=False) + component = db.Column(db.String(255), nullable=True) + + # open | resolved + status = db.Column(db.String(16), nullable=False, default="open") + + created_by_user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False) + resolved_by_user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=True) + resolved_at = db.Column(db.DateTime, nullable=True) + + deleted_by_user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=True) + deleted_at = db.Column(db.DateTime, nullable=True) + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + updated_at = db.Column( + db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False + ) + + +class FeedbackVote(db.Model): + __tablename__ = "feedback_votes" + + id = db.Column(db.Integer, primary_key=True) + feedback_item_id = db.Column( + db.Integer, db.ForeignKey("feedback_items.id"), nullable=False + ) + user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False) + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + + __table_args__ = ( + db.UniqueConstraint( + "feedback_item_id", "user_id", name="uq_feedback_vote_item_user" + ), + ) + + +class NewsItem(db.Model): + __tablename__ = "news_items" + + id = db.Column(db.Integer, primary_key=True) + + title = db.Column(db.String(255), nullable=False) + body = db.Column(db.Text, nullable=False) + + link_url = db.Column(db.String(2048), nullable=True) + + severity = db.Column(db.String(32), nullable=False, default="info") # info, warning + pinned = db.Column(db.Boolean, nullable=False, default=False) + active = db.Column(db.Boolean, nullable=False, default=True) + + publish_from = db.Column(db.DateTime, nullable=True) + publish_until = db.Column(db.DateTime, nullable=True) + + created_by_user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=True) + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + updated_at = db.Column( + db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False + ) + + +class NewsRead(db.Model): + __tablename__ = "news_reads" + + id = db.Column(db.Integer, primary_key=True) + + news_item_id = db.Column(db.Integer, db.ForeignKey("news_items.id"), nullable=False) + user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False) + + read_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + +# --- Reporting (phase 1: raw data foundation) --------------------------------- + +class ReportDefinition(db.Model): + __tablename__ = "report_definitions" + + id = db.Column(db.Integer, primary_key=True) + + name = db.Column(db.String(255), nullable=False) + description = db.Column(db.Text, nullable=True) + + # one-time | scheduled + report_type = db.Column(db.String(32), nullable=False, default="one-time") + + # csv | pdf (pdf is future) + output_format = db.Column(db.String(16), nullable=False, default="csv") + + # customer scope for report generation + # all | single | multiple + customer_scope = db.Column(db.String(16), nullable=False, default="all") + # JSON encoded list of customer ids. NULL/empty when scope=all. + customer_ids = db.Column(db.Text, nullable=True) + + period_start = db.Column(db.DateTime, nullable=False) + period_end = db.Column(db.DateTime, nullable=False) + + # For scheduled reports in later phases (cron / RRULE style string) + schedule = db.Column(db.String(255), nullable=True) + + created_by_user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=True) + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + updated_at = db.Column( + db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False + ) + + created_by = db.relationship("User", foreign_keys=[created_by_user_id]) + + +class ReportObjectSnapshot(db.Model): + __tablename__ = "report_object_snapshots" + + id = db.Column(db.Integer, primary_key=True) + + report_id = db.Column(db.Integer, db.ForeignKey("report_definitions.id"), nullable=False) + + # Object identity (from customer_objects.object_name) + object_name = db.Column(db.Text, nullable=False) + + # Job identity + job_id = db.Column(db.Integer, nullable=True) + job_name = db.Column(db.Text, nullable=True) + customer_id = db.Column(db.Integer, nullable=True) + customer_name = db.Column(db.Text, nullable=True) + + backup_software = db.Column(db.Text, nullable=True) + backup_type = db.Column(db.Text, nullable=True) + + # Run identity + run_id = db.Column(db.Integer, nullable=True) + run_at = db.Column(db.DateTime, nullable=True) + + status = db.Column(db.Text, nullable=True) + missed = db.Column(db.Boolean, nullable=False, default=False) + override_applied = db.Column(db.Boolean, nullable=False, default=False) + + reviewed_at = db.Column(db.DateTime, nullable=True) + ticket_number = db.Column(db.Text, nullable=True) + remark = db.Column(db.Text, nullable=True) + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + + report = db.relationship( + "ReportDefinition", + backref=db.backref("object_snapshots", lazy="dynamic", cascade="all, delete-orphan"), + ) + + +class ReportObjectSummary(db.Model): + __tablename__ = "report_object_summaries" + + id = db.Column(db.Integer, primary_key=True) + + report_id = db.Column(db.Integer, db.ForeignKey("report_definitions.id"), nullable=False) + object_name = db.Column(db.Text, nullable=False) + + total_runs = db.Column(db.Integer, nullable=False, default=0) + success_count = db.Column(db.Integer, nullable=False, default=0) + success_override_count = db.Column(db.Integer, nullable=False, default=0) + warning_count = db.Column(db.Integer, nullable=False, default=0) + failed_count = db.Column(db.Integer, nullable=False, default=0) + missed_count = db.Column(db.Integer, nullable=False, default=0) + + success_rate = db.Column(db.Float, nullable=False, default=0.0) + + created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) + + report = db.relationship( + "ReportDefinition", + backref=db.backref("object_summaries", lazy="dynamic", cascade="all, delete-orphan"), + ) diff --git a/containers/backupchecks/src/backend/app/object_persistence.py b/containers/backupchecks/src/backend/app/object_persistence.py new file mode 100644 index 0000000..0113992 --- /dev/null +++ b/containers/backupchecks/src/backend/app/object_persistence.py @@ -0,0 +1,132 @@ +from __future__ import annotations + +from sqlalchemy import text + +from .database import db + + + +def _update_override_applied_for_run(job_id: int, run_id: int) -> None: + """Update JobRun.override_applied after run_object_links has been persisted. + + This ensures overview counters (dashboard/run checks) reflect override state even when + the relevant error message only becomes available after object persistence. + """ + try: + # Lazy imports to avoid heavy imports / circular references at module import time. + from .models import Job, JobRun # noqa + from .main.routes_shared import _apply_overrides_to_run # noqa + + job = Job.query.get(int(job_id)) + run = JobRun.query.get(int(run_id)) + if not job or not run: + return + + _status, applied, _lvl = _apply_overrides_to_run(job, run) + applied_bool = bool(applied) + if bool(getattr(run, "override_applied", False)) != applied_bool: + run.override_applied = applied_bool + db.session.commit() + except Exception: + try: + db.session.rollback() + except Exception: + pass + + +def persist_objects_for_approved_run(customer_id: int, job_id: int, run_id: int, mail_message_id: int) -> int: + """Persist parsed objects for reporting. + + Copies entries from mail_objects into: + - customer_objects (unique per customer + object_name) + - job_object_links (job <-> customer_object) + - run_object_links (run <-> customer_object with status/error) + + Returns number of objects processed. + """ + engine = db.get_engine() + processed = 0 + + with engine.begin() as conn: + rows = conn.execute( + text( + """ + SELECT object_name, object_type, status, error_message + FROM mail_objects + WHERE mail_message_id = :mail_message_id + ORDER BY id + """ + ), + {"mail_message_id": mail_message_id}, + ).fetchall() + + for r in rows: + object_name = (r[0] or "").strip() + if not object_name: + continue + + object_type = r[1] + status = r[2] + error_message = r[3] + + # 1) Upsert customer_objects and get id + customer_object_id = conn.execute( + text( + """ + INSERT INTO customer_objects (customer_id, object_name, object_type, first_seen_at, last_seen_at) + VALUES (:customer_id, :object_name, :object_type, NOW(), NOW()) + ON CONFLICT (customer_id, object_name) + DO UPDATE SET + last_seen_at = NOW(), + object_type = COALESCE(EXCLUDED.object_type, customer_objects.object_type) + RETURNING id + """ + ), + { + "customer_id": customer_id, + "object_name": object_name, + "object_type": object_type, + }, + ).scalar() + + # 2) Upsert job_object_links + conn.execute( + text( + """ + INSERT INTO job_object_links (job_id, customer_object_id, first_seen_at, last_seen_at) + VALUES (:job_id, :customer_object_id, NOW(), NOW()) + ON CONFLICT (job_id, customer_object_id) + DO UPDATE SET last_seen_at = NOW() + """ + ), + { + "job_id": job_id, + "customer_object_id": customer_object_id, + }, + ) + + # 3) Upsert run_object_links + conn.execute( + text( + """ + INSERT INTO run_object_links (run_id, customer_object_id, status, error_message, observed_at) + VALUES (:run_id, :customer_object_id, :status, :error_message, NOW()) + ON CONFLICT (run_id, customer_object_id) + DO UPDATE SET + status = EXCLUDED.status, + error_message = EXCLUDED.error_message, + observed_at = NOW() + """ + ), + { + "run_id": run_id, + "customer_object_id": customer_object_id, + "status": status, + "error_message": error_message, + }, + ) + + processed += 1 + + return processed + diff --git a/containers/backupchecks/src/backend/app/parsers/__init__.py b/containers/backupchecks/src/backend/app/parsers/__init__.py new file mode 100644 index 0000000..d6df393 --- /dev/null +++ b/containers/backupchecks/src/backend/app/parsers/__init__.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +from datetime import datetime +import html +from typing import Tuple, Optional, List, Dict + +from ..database import db +from ..models import MailMessage, MailObject +from .threecx import try_parse_3cx +from .synology import try_parse_synology +from .boxafe import try_parse_boxafe +from .nakivo import try_parse_nakivo +from .veeam import try_parse_veeam +from .rdrive import try_parse_rdrive +from .syncovery import try_parse_syncovery + + +def _sanitize_text(value: object) -> object: + """Normalize parsed text fields. + + Some senders include HTML entities (e.g. "&") in parsed header fields. + We decode those so values are stored/displayed consistently. + """ + if not isinstance(value, str): + return value + # html.unescape also handles numeric entities. + v = html.unescape(value) + # Keep normalization minimal and safe. + return v.strip() + + +def _clear_mail_objects(msg: MailMessage) -> None: + """Delete existing MailObject rows for this message.""" + MailObject.query.filter_by(mail_message_id=msg.id).delete() + + +def _store_mail_objects(msg: MailMessage, objects: List[Dict]) -> None: + """Persist parsed objects for this mail message. + + Each item in ``objects`` is expected to be a dict with keys: + - name (required) + - status (optional) + - error_message (optional) + """ + for item in objects or []: + name = (item.get("name") or "").strip() + if not name: + continue + object_type = (item.get("type") or item.get("object_type") or None) + if isinstance(object_type, str): + object_type = object_type.strip() or None + status = (item.get("status") or None) or None + error_message = item.get("error_message") or None + db.session.add( + MailObject( + mail_message_id=msg.id, + object_name=name, + object_type=object_type, + status=status, + error_message=error_message, + ) + ) + + +def parse_mail_message(msg: MailMessage) -> None: + """Parse a single MailMessage and update its parsed fields. + + This function is intentionally conservative: if no parser matches the + message, it simply sets parse_result to "no_match". Any unexpected errors + are caught and stored on the message so they are visible in the UI. + """ + # Reset parse metadata first + msg.backup_software = None + msg.backup_type = None + msg.job_name = None + msg.overall_status = None + msg.overall_message = None + + # Optional parsed storage metrics (for graphing capacity usage) + if hasattr(msg, 'storage_used_bytes'): + msg.storage_used_bytes = None + if hasattr(msg, 'storage_capacity_bytes'): + msg.storage_capacity_bytes = None + if hasattr(msg, 'storage_free_bytes'): + msg.storage_free_bytes = None + if hasattr(msg, 'storage_free_percent'): + msg.storage_free_percent = None + msg.parse_error = None + msg.parsed_at = datetime.utcnow() + + # Clear existing objects for this mail (if it has an id already) + if msg.id is not None: + _clear_mail_objects(msg) + + try: + handled, result, objects = try_parse_3cx(msg) + if not handled: + handled, result, objects = try_parse_synology(msg) + if not handled: + handled, result, objects = try_parse_boxafe(msg) + if not handled: + handled, result, objects = try_parse_rdrive(msg) + if not handled: + handled, result, objects = try_parse_nakivo(msg) + if not handled: + handled, result, objects = try_parse_veeam(msg) + if not handled: + handled, result, objects = try_parse_syncovery(msg) + except Exception as exc: + msg.parse_result = "error" + msg.parse_error = str(exc)[:500] + return + + if not handled: + # No parser recognised this message + msg.parse_result = "no_match" + return + + # Apply parsed result + msg.backup_software = _sanitize_text(result.get("backup_software")) + msg.backup_type = _sanitize_text(result.get("backup_type")) + msg.job_name = _sanitize_text(result.get("job_name")) + msg.overall_status = result.get("overall_status") + msg.overall_message = _sanitize_text(result.get("overall_message")) + + # Apply optional storage metrics if supported by the DB model + if hasattr(msg, 'storage_used_bytes'): + msg.storage_used_bytes = result.get('storage_used_bytes') + if hasattr(msg, 'storage_capacity_bytes'): + msg.storage_capacity_bytes = result.get('storage_capacity_bytes') + if hasattr(msg, 'storage_free_bytes'): + msg.storage_free_bytes = result.get('storage_free_bytes') + if hasattr(msg, 'storage_free_percent'): + msg.storage_free_percent = result.get('storage_free_percent') + msg.parse_result = "ok" + + # Store parsed objects (they will be committed by the caller) + if msg.id is not None: + _store_mail_objects(msg, objects or []) diff --git a/containers/backupchecks/src/backend/app/parsers/boxafe.py b/containers/backupchecks/src/backend/app/parsers/boxafe.py new file mode 100644 index 0000000..76cdf4f --- /dev/null +++ b/containers/backupchecks/src/backend/app/parsers/boxafe.py @@ -0,0 +1,109 @@ +from __future__ import annotations + +import re +from typing import Dict, Tuple, List, Optional + +from ..models import MailMessage + + +_SUBJECT_RE = re.compile( + r"^\[(?P[^\]]+)\]\[(?PBoxafe)\]\s+Notification\s+from\s+your\s+device:\s*(?P.+?)\s*$", + flags=re.IGNORECASE, +) + + +def _normalize_html(value: str) -> str: + """Normalize HTML content.""" + if not value: + return "" + if isinstance(value, bytes): + try: + value = value.decode("utf-8", errors="ignore") + except Exception: + value = str(value) + value = value.replace("\x00", "").replace("\ufeff", "") + return value + + +def _strip_html_tags(value: str) -> str: + if not value: + return "" + value = re.sub(r"<\s*br\s*/?>", "\n", value, flags=re.IGNORECASE) + value = re.sub(r"

", "\n", value, flags=re.IGNORECASE) + value = re.sub(r"", "\n", value, flags=re.IGNORECASE) + value = re.sub(r"<[^>]+>", "", value) + value = re.sub(r"[ \t\r\f\v]+", " ", value) + value = re.sub(r"\n\s*\n+", "\n", value) + return value.strip() + + +def _map_status(raw: str) -> Optional[str]: + if not raw: + return None + r = raw.strip().lower() + if r in {"warning", "warn"}: + return "Warning" + if r in {"success", "succeeded", "ok", "information", "info"}: + return "Success" + if r in {"failed", "failure", "error", "critical"}: + return "Failed" + # fallback: capitalize first letter + return raw.strip().capitalize() or None + + +def _extract_field(text: str, label: str) -> Optional[str]: + if not text: + return None + m = re.search(rf"(?im)^\s*{re.escape(label)}\s*:\s*(.+?)\s*$", text) + if not m: + return None + val = (m.group(1) or "").strip() + return val or None + + +def try_parse_boxafe(msg: MailMessage) -> Tuple[bool, Dict, List[Dict]]: + """Parse Boxafe notification mails.""" + subject = (getattr(msg, "subject", None) or "").strip() + m = _SUBJECT_RE.match(subject) + if not m: + return False, {}, [] + + severity = (m.group("severity") or "").strip() + device = (m.group("device") or "").strip() + + html = _normalize_html(getattr(msg, "html_body", None) or "") + text = _strip_html_tags(html) + + category = _extract_field(text, "Category") + body_severity = _extract_field(text, "Severity") + message = _extract_field(text, "Message") + + overall_status = _map_status(body_severity or severity) + + backup_type = category or "Shared Drives" + + # Boxafe "Domain Accounts" notifications are not job/object based. + # Example: "[Boxafe] Backed up Email for 0 user(s)." + if (category or "").strip().lower() == "domain accounts": + if message and re.search(r"\bbacked\s+up\s+email\b", message, flags=re.IGNORECASE): + backup_type = "Domain Accounts Email" + elif message and re.search(r"\bbacked\s+up\s+contact\b", message, flags=re.IGNORECASE): + backup_type = "Domain Accounts Contact" + elif message and re.search(r"\bbacked\s+up\s+drive\b", message, flags=re.IGNORECASE): + backup_type = "Domain Accounts Drive" + elif message and re.search(r"\bbacked\s+up\s+calendar\b", message, flags=re.IGNORECASE): + backup_type = "Domain Accounts Calendar" + else: + backup_type = "Domain Accounts" + + result: Dict[str, object] = { + "backup_software": "Boxafe", + "backup_type": backup_type, + "job_name": device, + "overall_status": overall_status, + "overall_message": message or (body_severity or severity), + } + + objects: List[Dict] = [] + + return True, result, objects diff --git a/containers/backupchecks/src/backend/app/parsers/nakivo.py b/containers/backupchecks/src/backend/app/parsers/nakivo.py new file mode 100644 index 0000000..1539255 --- /dev/null +++ b/containers/backupchecks/src/backend/app/parsers/nakivo.py @@ -0,0 +1,198 @@ +from __future__ import annotations + +import re +import html as htmllib +from typing import Dict, Tuple, List + +from ..models import MailMessage + + +_STATUS_MAP = { + "successful": "Success", + "success": "Success", + "failed": "Error", + "failure": "Error", + "warning": "Warning", + "warnings": "Warning", + "partially successful": "Warning", + "partial": "Warning", +} + + +def _normalize_html(html: str) -> str: + if not html: + return "" + if isinstance(html, bytes): + try: + html = html.decode("utf-8", errors="ignore") + except Exception: + html = str(html) + html = html.replace("\x00", "") + html = html.replace("\ufeff", "") + return html + + +def _html_to_text(html: str) -> str: + """Lightweight HTML to text conversion suitable for regex parsing.""" + html = _normalize_html(html) + if not html: + return "" + # Remove script/style blocks + html = re.sub(r"(?is)<(script|style).*?>.*?", " ", html) + # Insert newlines / tabs for table-ish structures + html = re.sub(r"(?i)", "\n", html) + html = re.sub(r"(?i)", "\n", html) + html = re.sub(r"(?i)", "\n", html) + html = re.sub(r"(?i)", "\t", html) + # Strip remaining tags + html = re.sub(r"(?s)<.*?>", " ", html) + text = htmllib.unescape(html) + # Normalize whitespace but keep tabs/newlines + text = text.replace("\r", "") + text = re.sub(r"[ ]+", " ", text) + text = re.sub(r"\t+", "\t", text) + text = re.sub(r"\n+", "\n", text) + text = re.sub(r" *\t *", "\t", text) + text = re.sub(r" *\n *", "\n", text) + return text.strip() + + +_SUBJECT_RE = re.compile(r'^"(?P.+?)"\s+job:\s+(?P.+?)\s*$', re.IGNORECASE) + + +def _is_nakivo_vmware_job(subject: str, text: str) -> bool: + subj = (subject or "").lower() + if " job:" in subj and subj.startswith('"') and "nakivo" in (text or "").lower(): + return True + # Many NAKIVO mails contain the backup type in the body navigation: + t = (text or "").lower() + has_type = ("backup job for vmware" in t) or ("replication job for vmware" in t) + return has_type and ("job run report" in t or "nakivo" in t) + + +def _extract_backup_type(text: str) -> str: + if not text: + return "Backup job for VMware" + m = re.search( + r"\b(?P(backup job for vmware|replication job for vmware))\b", + text, + re.IGNORECASE, + ) + if not m: + return "Backup job for VMware" + tt = (m.group("t") or "").strip().lower() + if "replication" in tt: + return "Replication job for VMware" + return "Backup job for VMware" + + +def _extract_vm_objects(text: str) -> List[str]: + """Extract VM/object names from the 'Virtual Machines' section.""" + if not text: + return [] + + # Keep order and avoid duplicates. + objects: List[str] = [] + seen = set() + + # Work line-based because the HTML is heavily table driven. + lines = [ln.strip() for ln in text.split("\n")] + # Find the *last* 'Virtual Machines' section header. The report has a 'Contents' navigation + # near the top that also contains 'Virtual Machines', which must be ignored. + start_idx = None + for i, ln in enumerate(lines): + if re.fullmatch(r"virtual machines:?", ln, re.IGNORECASE): + start_idx = i + 1 + if start_idx is None: + return [] + + stop_re = re.compile( + r"^(summary|target storage|alarms\s*&\s*notifications|last run:?|data\s*&\s*speed:?|duration:?|status:?|priority:?|started:?|finished:?|contents)$", + re.IGNORECASE, + ) + + name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9._-]{1,}$") + for ln in lines[start_idx:]: + if not ln: + continue + if stop_re.match(ln): + break + # VM/object names are typically on their own line. + if name_re.match(ln) and ln.lower() not in {"virtual", "machines"}: + if ln not in seen: + seen.add(ln) + objects.append(ln) + + return objects + + +def try_parse_nakivo(msg: MailMessage) -> Tuple[bool, Dict, List[Dict]]: + """Parse NAKIVO VMware run reports (Backup and Replication).""" + subject = getattr(msg, "subject", None) or "" + html_body = getattr(msg, "html_body", None) or "" + text = _html_to_text(html_body) + + if not _is_nakivo_vmware_job(subject, text): + return False, {}, [] + + job_name = None + status_raw = None + + m = _SUBJECT_RE.match(subject.strip()) + if m: + job_name = (m.group("job") or "").strip() + status_raw = (m.group("status") or "").strip() + + # Fallback: first occurrence of the backup type is followed by the job name in many templates. + if not job_name: + # Try to find a line with the backup type and then a nearby token that looks like a hostname/job name. + # This is intentionally conservative. + mt = re.search( + r"(backup|replication) job for vmware\b[\s\S]{0,400}?(?P[A-Za-z0-9._-]{3,})\t", + text, + re.IGNORECASE, + ) + if mt: + job_name = mt.group("job").strip() + + if not status_raw: + # Look for a standalone status token near the top of the report + ms = re.search(r"\b(successful|failed|warning|warnings|partially successful)\b", text, re.IGNORECASE) + if ms: + status_raw = ms.group(1) + + overall_status = "Unknown" + if status_raw: + overall_status = _STATUS_MAP.get(status_raw.strip().lower(), "Unknown") + + backup_type = _extract_backup_type(text) + + result: Dict = { + "backup_software": "NAKIVO", + "backup_type": backup_type, + "job_name": job_name or "", + "overall_status": overall_status, + "overall_message": overall_status, + } + + objects: List[Dict] = [] + for obj in _extract_vm_objects(text): + objects.append( + { + "name": obj, + "status": overall_status, + "error_message": "" if overall_status == "Success" else overall_status, + } + ) + + # Fallback: if we couldn't extract objects, use the job name. + if not objects and job_name: + objects.append( + { + "name": job_name, + "status": overall_status, + "error_message": "" if overall_status == "Success" else overall_status, + } + ) + + return True, result, objects diff --git a/containers/backupchecks/src/backend/app/parsers/rdrive.py b/containers/backupchecks/src/backend/app/parsers/rdrive.py new file mode 100644 index 0000000..0f575b7 --- /dev/null +++ b/containers/backupchecks/src/backend/app/parsers/rdrive.py @@ -0,0 +1,115 @@ +from __future__ import annotations + +import re +from typing import Dict, Tuple, List + +from ..models import MailMessage + + +def _strip_html_tags(value: str) -> str: + """Minimal HTML-to-text conversion for parser fallback.""" + if not value: + return "" + v = re.sub(r"<\s*br\s*/?>", "\n", value, flags=re.IGNORECASE) + v = re.sub(r"", "\n", v, flags=re.IGNORECASE) + v = re.sub(r"", "\n", v, flags=re.IGNORECASE) + v = re.sub(r"", "\n", v, flags=re.IGNORECASE) + v = re.sub(r"<[^>]+>", "", v) + return v + + +def _get_body_text(msg: MailMessage) -> str: + if msg.text_body: + return msg.text_body + if msg.html_body: + return _strip_html_tags(msg.html_body) + return "" + + +def _parse_overall_status(subject: str, body: str) -> str: + s = (subject or "").lower() + b = (body or "").lower() + + if "completed successfully" in s or re.search(r"\bsuccess\b", b): + return "Success" + if "completed with errors" in s or "failed" in s or re.search(r"\berror\b", b): + return "Error" + if "completed with warnings" in s or re.search(r"\bwarning\b", b): + return "Warning" + return "Unknown" + + +def _parse_job_name(subject: str) -> str: + """Extract job name from subject. + + Example subject: + "Process completed successfully. RDrive SRV001 Autobedrijf Eric Boom" + + We take the first two tokens after the first dot as the job name. + """ + s = (subject or "").strip() + if "." in s: + s = s.split(".", 1)[1].strip() + # Take first two words, which in practice yields e.g. "RDrive SRV001". + tokens = [t for t in re.split(r"\s+", s) if t] + if len(tokens) >= 2: + return f"{tokens[0]} {tokens[1]}".strip() + return (s or "").strip() or "R-Drive Image" + + +def _parse_primary_object(job_name: str, subject: str, body: str) -> str: + """Best-effort extraction of the primary machine/object name. + + Prefer the second token of the parsed job name (e.g. SRV001), otherwise + attempt to find a host-like token in the subject/body. + """ + parts = [p for p in re.split(r"\s+", (job_name or "").strip()) if p] + if len(parts) >= 2: + return parts[1] + + # Fallback: look for common host tokens like SRV001. + hay = "\n".join([(subject or ""), (body or "")]) + m = re.search(r"\b([A-Z]{2,6}\d{1,5})\b", hay) + if m: + return m.group(1) + return "" + + +def try_parse_rdrive(msg: MailMessage) -> Tuple[bool, Dict, List[Dict]]: + """Parse R-Drive Image report mails.""" + subject = (msg.subject or "").strip() + from_addr = (msg.from_address or "").lower() + body = _get_body_text(msg) + + # Identification: subject pattern and/or body signature. + is_rdrive = ( + "process completed" in subject.lower() + or "r-drive image" in body.lower() + or "rtt mailer" in body.lower() + or "r-drive" in from_addr + ) + + if not is_rdrive: + return False, {}, [] + + overall_status = _parse_overall_status(subject, body) + job_name = _parse_job_name(subject) + + obj_name = _parse_primary_object(job_name, subject, body) + objects: List[Dict] = [] + if obj_name: + objects.append({"name": obj_name, "status": overall_status, "error_message": ""}) + + overall_message = "" + # Keep overall message compact; use the first sentence of the subject if present. + if subject: + overall_message = subject.split(".", 1)[0].strip().rstrip(".") + + result: Dict = { + "backup_software": "R-Drive Image", + "backup_type": "R-Drive Image", + "job_name": job_name, + "overall_status": overall_status, + "overall_message": overall_message, + } + return True, result, objects diff --git a/containers/backupchecks/src/backend/app/parsers/registry.py b/containers/backupchecks/src/backend/app/parsers/registry.py new file mode 100644 index 0000000..89ebaf6 --- /dev/null +++ b/containers/backupchecks/src/backend/app/parsers/registry.py @@ -0,0 +1,445 @@ +PARSER_DEFINITIONS = [ + { + "name": "veeam_backup_job", + "backup_software": "Veeam", + "backup_types": ["Backup Job"], + "order": 10, + "enabled": True, + "match": { + "from_contains": "veeam", + "subject_contains": "Backup Job", + }, + "description": "Parses standard Veeam Backup Job reports.", + "example": { + "subject": 'Veeam Backup Job \"Daily-VM-Backup\" finished with WARNING', + "from_address": "veeam@customer.local", + "body_snippet": "Job 'Daily-VM-Backup' finished with warnings. See details for each VM below...", + "parsed_result": { + "backup_software": "Veeam", + "backup_type": "Backup Job", + "job_name": "Daily-VM-Backup", + "objects": [ + { + "name": "VM-APP01", + "status": "Success", + "error_message": "", + }, + { + "name": "VM-DB01", + "status": "Warning", + "error_message": "Low disk space warning", + }, + { + "name": "VM-FS01", + "status": "Failed", + "error_message": "VSS error 0x800423f4", + }, + ], + }, + }, + }, + { + "name": "veeam_replication_job", + "backup_software": "Veeam", + "backup_types": ["Replication job", "Replica Job"], + "order": 15, + "enabled": True, + "match": { + "from_contains": "veeam", + "subject_contains": "Replication", + }, + "description": "Parses Veeam Replication/Replica job reports.", + "example": { + "subject": 'Veeam Replication job "DR-Replicate" finished successfully', + "from_address": "veeam@customer.local", + "body_snippet": "Replication job: DR-Replicate finished successfully.", + "parsed_result": { + "backup_software": "Veeam", + "backup_type": "Replication job", + "job_name": "DR-Replicate", + "objects": [ + { + "name": "VM-APP01", + "status": "Success", + "error_message": "", + } + ], + }, + }, + }, + { + "name": "veeam_agent_backup", + "backup_software": "Veeam", + "backup_types": ["Agent Backup job"], + "order": 16, + "enabled": True, + "match": { + "from_contains": "veeam", + "subject_contains": "Agent Backup", + }, + "description": "Parses Veeam Agent Backup job reports.", + "example": { + "subject": 'Veeam Agent Backup job "SERVER01" finished with WARNING', + "from_address": "veeam@customer.local", + "body_snippet": "Agent Backup job: SERVER01 finished with warnings.", + "parsed_result": { + "backup_software": "Veeam", + "backup_type": "Agent Backup job", + "job_name": "SERVER01", + "objects": [ + { + "name": "SERVER01", + "status": "Warning", + "error_message": "", + } + ], + }, + }, + }, + { + "name": "veeam_m365_backup", + "backup_software": "Veeam", + "backup_types": ["Veeam Backup for Microsoft 365"], + "order": 17, + "enabled": True, + "match": { + "from_contains": "veeam", + "subject_contains": "Microsoft 365", + }, + "description": "Parses Veeam Backup for Microsoft 365 job reports.", + "example": { + "subject": 'Veeam Backup for Microsoft 365 "Tenant - Daily M365" finished successfully', + "from_address": "veeam@customer.local", + "body_snippet": "Veeam Backup for Microsoft 365 report for job 'Daily M365'...", + "parsed_result": { + "backup_software": "Veeam", + "backup_type": "Veeam Backup for Microsoft 365", + "job_name": "Daily M365", + "objects": [ + { + "name": "https://tenant.sharepoint.com", + "status": "Success", + "error_message": "", + } + ], + }, + }, + }, + { + "name": "veeam_backup_copy", + "backup_software": "Veeam", + "backup_types": ["Backup Copy Job"], + "order": 20, + "enabled": True, + "match": { + "from_contains": "veeam", + "subject_contains": "Backup Copy Job", + }, + "description": "Parses Veeam Backup Copy Job status mails.", + "example": { + "subject": 'Veeam Backup Copy Job \"Offsite-Repository\" finished successfully', + "from_address": "veeam@customer.local", + "body_snippet": "Job 'Offsite-Repository' finished successfully.", + "parsed_result": { + "backup_software": "Veeam", + "backup_type": "Backup Copy Job", + "job_name": "Offsite-Repository", + "objects": [ + { + "name": "Backup Copy Chain", + "status": "Success", + "error_message": "", + } + ], + }, + }, + }, + { + "name": "nakivo_vmware_backup", + "backup_software": "NAKIVO", + "backup_types": ["Backup job for VMware"], + "order": 30, + "enabled": True, + "match": { + "from_contains": "nakivo", + "subject_contains": "Backup job", + }, + "description": "Parses NAKIVO VMware backup job notifications.", + "example": { + "subject": "Backup job for VMware 'Daily-Infra-Backup' completed with warnings", + "from_address": "backup@nakivo.local", + "body_snippet": "Job 'Daily-Infra-Backup' has completed with warnings.", + "parsed_result": { + "backup_software": "NAKIVO", + "backup_type": "Backup job for VMware", + "job_name": "Daily-Infra-Backup", + "objects": [ + { + "name": "ESX-Host01", + "status": "Success", + "error_message": "", + }, + { + "name": "VM-LOG01", + "status": "Warning", + "error_message": "Low free space on datastore", + }, + ], + }, + }, + }, + { + "name": "nakivo_vmware_replication", + "backup_software": "NAKIVO", + "backup_types": ["Replication job for VMware"], + "order": 40, + "enabled": True, + "match": { + "from_contains": "nakivo", + "subject_contains": "Replication job", + }, + "description": "Parses NAKIVO VMware replication job notifications.", + "example": { + "subject": "Replication job for VMware 'DR-Replicate' completed successfully", + "from_address": "backup@nakivo.local", + "body_snippet": "Job 'DR-Replicate' has completed successfully.", + "parsed_result": { + "backup_software": "NAKIVO", + "backup_type": "Replication job for VMware", + "job_name": "DR-Replicate", + "objects": [ + { + "name": "VM-APP01", + "status": "Success", + "error_message": "", + }, + ], + }, + }, + }, + { + "name": "panel3_failure", + "backup_software": "Panel3", + "backup_types": ["Backup Job"], + "order": 50, + "enabled": True, + "match": { + "from_contains": "panel3", + "subject_contains": "Backup failed", + }, + "description": "Parses Panel3 backup failure notifications. No mail = success.", + "example": { + "subject": "Panel3 backup failed on 'Webserver01'", + "from_address": "backup@panel3.local", + "body_snippet": "The backup on server 'Webserver01' has failed.", + "parsed_result": { + "backup_software": "Panel3", + "backup_type": "Backup Job", + "job_name": "Webserver01-backup", + "objects": [ + { + "name": "Webserver01", + "status": "Failed", + "error_message": "Backup process exited with error code 1", + } + ], + }, + }, + }, + { + "name": "synology_active_backup", + "backup_software": "Synology", + "backup_types": ["Active Backup"], + "order": 25, + "enabled": True, + "match": { + "subject_contains": "Active Backup", + }, + "description": "Parses Synology Active Backup notifications (e.g. Active Backup for Google Workspace).", + "example": { + "subject": "NAS - Active Backup for Google Workspace - back-uptaak [Google D-Two] is gedeeltelijk voltooid", + "from_address": "nas@customer.local", + "body_snippet": "Back-up [Google D-Two] is voltooid, maar van sommige items kon geen back-up worden gemaakt.\n- Mijn schijf: succes: 0; waarschuwing: 11; fout: 0\n...", + "parsed_result": { + "backup_software": "Synology", + "backup_type": "Active Backup", + "job_name": "Google D-Two", + "overall_status": "Warning", + "overall_message": "11 warning(s)", + "objects": [], + }, + }, + }, + { + "name": "synology_hyperbackup", + "backup_software": "Synology", + "backup_types": ["Hyperbackup"], + "order": 27, + "enabled": True, + "match": { + "subject_contains_any": ["Hyper Backup", "Gegevensback-uptaak", "Data backup task"], + "body_contains_any": ["Hyper Backup", "Taaknaam:", "Task name:"], + }, + "description": "Parses Synology Hyper Backup task notifications.", + "example": { + "subject": "Gegevensback-uptaak op NAS01 is mislukt", + "from_address": "nas01@yourdomain", + "body_snippet": "Gegevensback-uptaak op NAS01 is mislukt\nTaaknaam: Data backup - NAS thuis\nGa naar Hyper Backup > Logboek voor meer informatie.", + "parsed_result": { + "backup_software": "Synology", + "backup_type": "Hyperbackup", + "job_name": "Data backup - NAS thuis", + "overall_status": "Failed", + "overall_message": "Failed", + "objects": [], + }, + }, + }, + + { + "name": "synology_rsync", + "backup_software": "Synology", + "backup_types": ["R-Sync"], + "order": 28, + "enabled": True, + "match": { + "subject_contains_any": ["R-Sync", "Rsync"], + "body_contains_any": ["Back-uptaak:", "Backup task:"], + }, + "description": "Parses Synology Network Backup / R-Sync task notifications.", + "example": { + "subject": "Synology NAS Network backup - R-Sync ASP-NAS02 completed", + "from_address": "nas@customer.local", + "body_snippet": "Uw back-uptaak R-Sync ASP-NAS02 is nu voltooid.\n\nBack-uptaak: R-Sync ASP-NAS02\n...", + "parsed_result": { + "backup_software": "Synology", + "backup_type": "R-Sync", + "job_name": "R-Sync ASP-NAS02", + "overall_status": "Success", + "overall_message": "Success", + "objects": [], + }, + }, + }, + + { + "name": "nakivo_vmware_backup_job", + "backup_software": "NAKIVO", + "backup_types": ["Backup job for VMware"], + "order": 30, + "enabled": True, + "match": { + "from_contains": "nakivo", + "subject_contains": "job:", + }, + "description": "Parses NAKIVO Backup & Replication reports for VMware backup jobs.", + "example": { + "subject": '"exchange01.kuiperbv.nl" job: Successful', + "from_address": "NAKIVO Backup & Replication ", + "body_snippet": "Job Run Report... Backup job for VMware ... Successful", + "parsed_result": { + "backup_software": "NAKIVO", + "backup_type": "Backup job for VMware", + "job_name": "exchange01.kuiperbv.nl", + "objects": [ + { + "name": "exchange01.kuiperbv.nl", + "status": "Success", + "error_message": "", + } + ], + }, + }, + }, + + + { + "name": "syncovery_backup", + "backup_software": "Syncovery", + "backup_types": ["Backup Job"], + "order": 60, + "enabled": True, + "match": { + "from_contains": "syncovery", + "subject_contains": "Profile", + }, + "description": "Parses Syncovery profile result mails.", + "example": { + "subject": "Syncovery Profile 'Fileserver01' completed successfully", + "from_address": "syncovery@backup.local", + "body_snippet": "Profile 'Fileserver01' completed successfully.", + "parsed_result": { + "backup_software": "Syncovery", + "backup_type": "Backup Job", + "job_name": "Fileserver01", + "objects": [ + { + "name": "Fileserver01", + "status": "Success", + "error_message": "", + } + ], + }, + }, + }, + + { + "name": "rdrive_image", + "backup_software": "R-Drive Image", + "backup_types": ["R-Drive Image"], + "order": 65, + "enabled": True, + "match": { + "from_contains": "rdrive", + "subject_contains": "Process completed", + }, + "description": "Parses R-Drive Image completion mails.", + "example": { + "subject": "Process completed successfully. RDrive SRV001 Autobedrijf Eric Boom", + "from_address": "rdrive@customer.local", + "body_snippet": "R-Drive Image 6.0 ... Operation completed successfully", + "parsed_result": { + "backup_software": "R-Drive Image", + "backup_type": "R-Drive Image", + "job_name": "RDrive SRV001", + "objects": [ + { + "name": "SRV001", + "status": "Success", + "error_message": "", + } + ], + }, + }, + }, + + { + "name": "syncovery_daily_summary", + "backup_software": "Syncovery", + "backup_types": ["Syncovery"], + "order": 70, + "enabled": True, + "match": { + "from_contains": "syncovery", + "subject_contains": "Daily Summary", + }, + "description": "Parses Syncovery Daily Summary reports.", + "example": { + "subject": "[Syncovery v10.16.15] Daily Summary - ASP-Management.asp.scalahosting.solutions", + "from_address": "syncovery@scalahosting.solutions", + "body_snippet": "Syncovery v10... Daily Summary ... Jobs Run Fine: 1 ...", + "parsed_result": { + "backup_software": "Syncovery", + "backup_type": "Syncovery", + "job_name": "ASP-Management.asp.scalahosting.solutions", + "objects": [ + { + "name": "Xelion SCP Backup JvG", + "status": "Success", + "error_message": "", + } + ], + }, + }, + }, +] diff --git a/containers/backupchecks/src/backend/app/parsers/syncovery.py b/containers/backupchecks/src/backend/app/parsers/syncovery.py new file mode 100644 index 0000000..1d2c17e --- /dev/null +++ b/containers/backupchecks/src/backend/app/parsers/syncovery.py @@ -0,0 +1,208 @@ +from __future__ import annotations + +import re +import html +from typing import Dict, Tuple, List + +from ..models import MailMessage + + +def _strip_html_tags(value: str) -> str: + """Minimal HTML-to-text conversion for Syncovery daily summary mails.""" + if not value: + return "" + v = re.sub(r"<\s*br\s*/?>", "\n", value, flags=re.IGNORECASE) + v = re.sub(r"", "\n", v, flags=re.IGNORECASE) + v = re.sub(r"", "\n", v, flags=re.IGNORECASE) + v = re.sub(r"", "\n", v, flags=re.IGNORECASE) + v = re.sub(r"<[^>]+>", "", v) + + # Decode entities (  etc.) and normalize non-breaking spaces. + v = html.unescape(v) + v = v.replace("\xa0", " ") + return v + + +def _get_body_text(msg: MailMessage) -> str: + if msg.text_body: + return msg.text_body + if msg.html_body: + return _strip_html_tags(msg.html_body) + return "" + + +def _normalize_ws(value: str) -> str: + return re.sub(r"\s+", " ", (value or "").strip()) + + +def _parse_job_name(subject: str, body: str) -> str: + """Extract the Syncovery instance/host name from subject or body.""" + s = _normalize_ws(subject) + m = re.search(r"Daily\s+Summary\s*-\s*(.+)$", s, flags=re.IGNORECASE) + if m: + return m.group(1).strip() + + # Fallback: body often contains a 'Running on :' line. + b = _normalize_ws(body) + m = re.search(r"Running\s+on\s*:\s*([^\s]+)", b, flags=re.IGNORECASE) + if m: + return m.group(1).strip() + + return "Syncovery" + + +def _parse_int_stat(body: str, label: str) -> int: + b = body or "" + # e.g. "Jobs Run Fine: 1" or "Jobs Run Fine:   1" + m = re.search(rf"{re.escape(label)}\s*:\s*(\d+)\b", b, flags=re.IGNORECASE) + if m: + try: + return int(m.group(1)) + except Exception: + return 0 + # Fallback: allow label without colon when HTML-to-text conversion got messy + m = re.search(rf"{re.escape(label)}\s+(\d+)\b", b, flags=re.IGNORECASE) + if m: + try: + return int(m.group(1)) + except Exception: + return 0 + return 0 + + +def _parse_objects(body: str) -> List[Dict]: + """Extract per-job lines listed in the Daily Summary.""" + objects: List[Dict] = [] + lines = [l.strip() for l in (body or "").splitlines() if l.strip()] + + # Syncovery daily summary has a "Jobs Run Fine" section with lines like: + # "27-12-2025 23:00:01 Xelion SCP Backup JvG 1 copied ..." + # Also a "Jobs Run With Errors" section in similar format. + current_status: str | None = None + i = 0 + while i < len(lines): + line = lines[i] + low = line.lower() + if "jobs run fine" in low: + current_status = "Success" + i += 1 + continue + if "jobs run with errors" in low: + current_status = "Error" + i += 1 + continue + if current_status is None: + i += 1 + continue + + # Common HTML-to-text output splits table cells across lines: + # 1) timestamp + # 2) job name + # 3) details + ts = re.match(r"^(\d{2}[-/.]\d{2}[-/.]\d{4})\s+(\d{2}:\d{2}:\d{2})\b", line) + if ts: + job = "" + details = "" + if i + 1 < len(lines): + job = lines[i + 1].strip() + if i + 2 < len(lines): + details = lines[i + 2].strip() + + if job: + objects.append( + { + "name": job, + "status": current_status, + "error_message": "" if current_status == "Success" else details, + } + ) + i += 3 + continue + + m = re.match( + r"^(\d{2}[-/.]\d{2}[-/.]\d{4})\s+(\d{2}:\d{2}:\d{2})\s+(.+?)\s{2,}(.+)$", + line, + ) + if m: + job = m.group(3).strip() + details = m.group(4).strip() + if job: + objects.append( + { + "name": job, + "status": current_status, + "error_message": "" if current_status == "Success" else details, + } + ) + continue + + # Looser match when spacing collapses: timestamp + job + "Duration:" marker + m = re.match( + r"^(\d{2}[-/.]\d{2}[-/.]\d{4})\s+(\d{2}:\d{2}:\d{2})\s+(.+?)\s+Duration\s*:\s*(.+)$", + line, + flags=re.IGNORECASE, + ) + if m: + job = m.group(3).strip() + if job: + objects.append({"name": job, "status": current_status, "error_message": ""}) + + i += 1 + + # De-duplicate while preserving order + seen = set() + uniq: List[Dict] = [] + for o in objects: + key = (o.get("name") or "").strip().lower() + if not key or key in seen: + continue + seen.add(key) + uniq.append(o) + return uniq + + +def try_parse_syncovery(msg: MailMessage) -> Tuple[bool, Dict, List[Dict]]: + """Parse Syncovery Daily Summary mails.""" + subject_raw = (msg.subject or "").strip() + subject = _normalize_ws(subject_raw) + from_addr = (msg.from_address or "").lower() + body = _get_body_text(msg) + + is_syncovery = ( + "syncovery" in subject.lower() + or "daily summary" in subject.lower() + or "syncovery" in from_addr + or "syncovery" in (body or "").lower() + ) + if not is_syncovery: + return False, {}, [] + + job_name = _parse_job_name(subject_raw, body) + + jobs_fine = _parse_int_stat(body, "Jobs Run Fine") + jobs_errors = _parse_int_stat(body, "Jobs Run With Errors") + scheduled_not_run = _parse_int_stat(body, "Scheduled Jobs Not Run") + + if jobs_errors > 0: + overall_status = "Error" + elif scheduled_not_run > 0: + overall_status = "Warning" + elif jobs_fine > 0: + overall_status = "Success" + else: + overall_status = "Unknown" + + overall_message = ( + f"Jobs fine: {jobs_fine}, errors: {jobs_errors}, scheduled not run: {scheduled_not_run}" + ) + + objects = _parse_objects(body) + + result: Dict = { + "backup_software": "Syncovery", + "backup_type": "Syncovery", + "job_name": job_name, + "overall_status": overall_status, + "overall_message": overall_message, + } + return True, result, objects diff --git a/containers/backupchecks/src/backend/app/parsers/synology.py b/containers/backupchecks/src/backend/app/parsers/synology.py new file mode 100644 index 0000000..64cfcff --- /dev/null +++ b/containers/backupchecks/src/backend/app/parsers/synology.py @@ -0,0 +1,417 @@ +from __future__ import annotations + +import re +from typing import Dict, Tuple, List, Optional + +from ..models import MailMessage + +# Synology notification parsers +# +# Supported: +# - Active Backup (Synology): notifications from Active Backup (e.g. Google Workspace) +# - Hyper Backup (Synology): task notifications from Hyper Backup +# - Account Protection (Synology): DSM Account Protection lockout notifications + +_BR_RE = re.compile(r"<\s*br\s*/?\s*>", re.I) +_TAG_RE = re.compile(r"<[^>]+>") +_WS_RE = re.compile(r"[\t\r\f\v ]+") + + +# Synology DSM Account Protection (NL/EN) +# Subject often contains "account [user]" and body contains "Accountbeveiliging" (NL) +# or "Account Protection" (EN). +_ACCOUNT_PROTECTION_MARKER_RE = re.compile(r"account\s*(?:protection|beveiliging)", re.I) +_ACCOUNT_NAME_RE = re.compile(r"\baccount\s*\[(?P[^\]]+)\]", re.I) + + + +def _html_to_text(value: str) -> str: + """Convert a small HTML mail body to plain text (best-effort).""" + if not value: + return "" + s = value + s = _BR_RE.sub("\n", s) + s = _TAG_RE.sub("", s) + s = s.replace(" ", " ") + s = s.replace("\u00a0", " ") + return _WS_RE.sub(" ", s).strip() + + +def _extract_job_name_active_backup(subject: str, text: str) -> Optional[str]: + # Prefer [Job Name] pattern in subject + m = re.search(r"\[([^\]]+)\]", subject or "") + if m: + return m.group(1).strip() + + # Fallback: body sometimes contains "Back-up [Job] is ..." + m = re.search(r"Back-?up\s*\[([^\]]+)\]", text or "", re.I) + if m: + return m.group(1).strip() + + return None + + +def _is_synology_account_protection(subject: str, text: str) -> bool: + subj = subject or "" + t = text or "" + + # Marker can appear in subject or body. + if _ACCOUNT_PROTECTION_MARKER_RE.search(subj) or _ACCOUNT_PROTECTION_MARKER_RE.search(t): + return True + + # Some NL subjects don't mention "Accountbeveiliging" but do include "account [user]". + return _ACCOUNT_NAME_RE.search(subj) is not None and ( + "aanmeldingspogingen" in (t.lower()) + or "login" in (t.lower()) + or "sign-in" in (t.lower()) + ) + + +def _parse_account_protection(subject: str, text: str) -> Tuple[bool, Dict, List[Dict]]: + # Extract account name from subject or body. + m = _ACCOUNT_NAME_RE.search(subject or "") or _ACCOUNT_NAME_RE.search(text or "") + if not m: + return False, {}, [] + + job_name = (m.group("name") or "").strip() + if not job_name: + return False, {}, [] + + # Account protection notifications are informational and do not have schedules. + overall_status = "Warning" + overall_message = "Account Protection triggered" + + # Provide a clearer message when we can detect the common lockout phrasing. + haystack = (text or "").lower() + if "mislukte" in haystack or "failed" in haystack: + overall_message = "Too many failed login attempts" + + result: Dict = { + "backup_software": "Synology", + "backup_type": "Account Protection", + "job_name": job_name, + "overall_status": overall_status, + "overall_message": overall_message, + } + + # No objects for this notification type. + return True, result, [] + + +# Example (NL): +# "- Mijn schijf: succes: 0; waarschuwing: 11; fout: 0" +# Example (EN): +# "- My Drive: success: 0; warning: 11; error: 0" +_SERVICE_LINE_RE = re.compile( + r"^\s*[-•]\s*(?P[^:]+):\s*" + r"(?:succes|success):\s*(?P\d+)\s*;\s*" + r"(?:waarschuwing|warning):\s*(?P\d+)\s*;\s*" + r"(?:fout|error):\s*(?P\d+)\s*$", + re.I, +) + + +def _extract_totals(text: str) -> Tuple[int, int, int]: + success = warning = error = 0 + for line in (text or "").splitlines(): + m = _SERVICE_LINE_RE.match(line.strip()) + if not m: + continue + success += int(m.group("success")) + warning += int(m.group("warning")) + error += int(m.group("error")) + return success, warning, error + + +_ABB_SUBJECT_RE = re.compile(r"\bactive\s+backup\s+for\s+business\b", re.I) + +# Example (NL): +# "De back-uptaak vSphere-Task-1 op KANTOOR-NEW is voltooid." +# Example (EN): +# "The backup task vSphere-Task-1 on KANTOOR-NEW has completed." +_ABB_COMPLETED_RE = re.compile( + r"\b(?:de\s+)?back-?up\s*taak\s+(?P.+?)\s+op\s+(?P.+?)\s+is\s+voltooid\b" + r"|\b(?:the\s+)?back-?up\s+task\s+(?P.+?)\s+on\s+(?P.+?)\s+(?:is\s+)?(?:completed|finished|has\s+completed)\b", + re.I, +) + +_ABB_FAILED_RE = re.compile( + r"\b(?:de\s+)?back-?up\s*taak\s+.+?\s+op\s+.+?\s+is\s+mislukt\b" + r"|\b(?:the\s+)?back-?up\s+task\s+.+?\s+on\s+.+?\s+(?:has\s+)?failed\b", + re.I, +) + +_ABB_DEVICE_LIST_RE = re.compile(r"^\s*(?:Apparaatlijst|Device\s+list)\s*:\s*(?P.+?)\s*$", re.I) + + +def _is_synology_active_backup_for_business(subject: str, text: str) -> bool: + subj = subject or "" + if _ABB_SUBJECT_RE.search(subj): + return True + t = text or "" + return _ABB_SUBJECT_RE.search(t) is not None + + +def _parse_active_backup_for_business(subject: str, text: str) -> Tuple[bool, Dict, List[Dict]]: + haystack = f"{subject}\n{text}" + m = _ABB_COMPLETED_RE.search(haystack) + if not m: + # Not our ABB format + return False, {}, [] + + job_name = (m.group("job") or m.group("job_en") or "").strip() + host = (m.group("host") or m.group("host_en") or "").strip() + + overall_status = "Success" + overall_message = "Success" + if _ABB_FAILED_RE.search(haystack): + overall_status = "Error" + overall_message = "Failed" + + objects: List[Dict] = [] + for line in (text or "").splitlines(): + mm = _ABB_DEVICE_LIST_RE.match(line.strip()) + if not mm: + continue + raw_list = (mm.group("list") or "").strip() + # "DC01, SQL01" + for name in [p.strip() for p in raw_list.split(",")]: + if name: + objects.append({"name": name, "status": overall_status}) + + result = { + "backup_software": "Synology", + "backup_type": "Active Backup for Business", + "job_name": job_name, + "overall_status": overall_status, + "overall_message": overall_message, + } + + # Provide a slightly nicer overall message when host is available + if host and overall_message in ("Success", "Failed"): + result["overall_message"] = f"{overall_message} ({host})" + + return True, result, objects + +def _is_synology_active_backup(subject: str, text: str) -> bool: + # Keep matching conservative to avoid false positives. + subj = (subject or "").lower() + if "active backup" in subj: + return True + + # Fallback for senders that don't include it in the subject + t = (text or "").lower() + return "active backup" in t and ("adminconsole" in t or "back-up" in t or "backup" in t) + + +def _is_synology_hyper_backup(subject: str, text: str) -> bool: + # Subject often does not mention Hyper Backup; body typically contains it. + s = (subject or "").lower() + t = (text or "").lower() + + if "hyper backup" in s: + return True + + # Dutch/English variants that appear in Hyper Backup task notifications. + if ("hyper backup" in t) and ( + ("taaknaam:" in t) + or ("task name:" in t) + or ("gegevensback-uptaak" in t) + or ("data backup task" in t) + ): + return True + + # Newer task notification variant (often used for cloud destinations like HiDrive) + # does not always include "Hyper Backup" in the subject/body but contains these fields. + if ("backup task:" in t) and ("backup destination:" in t): + return True + + # Dutch task notification variant (e.g. "Uw back-uptaak ... is nu voltooid") + return ("back-uptaak:" in t) and ("back-updoel:" in t) + + +_HB_TASKNAME_RE = re.compile( + r"^(?:Taaknaam|Task name|Back-uptaak|Backup Task|Backup task)\s*:\s*(?P.+)$", + re.I | re.M, +) +_HB_BACKUP_TASK_RE = re.compile(r"^Backup Task\s*:\s*(?P.+)$", re.I | re.M) +_HB_FAILED_RE = re.compile(r"\bis\s+mislukt\b|\bhas\s+failed\b|\bfailed\b", re.I) +_HB_SUCCESS_RE = re.compile( + r"\bis\s+(?:nu\s+)?voltooid\b|\bhas\s+completed\b|\bsuccessful\b|\bgeslaagd\b", + re.I, +) +_HB_WARNING_RE = re.compile(r"\bgedeeltelijk\s+voltooid\b|\bpartially\s+completed\b|\bwarning\b|\bwaarschuwing\b", re.I) + + +# Synology Network Backup / R-Sync task notifications +# Example (NL): +# "Uw back-uptaak R-Sync ASP-NAS02 is nu voltooid." +# "Back-uptaak: R-Sync ASP-NAS02" +# Example (EN): +# "Your backup task R-Sync ASP-NAS02 has completed." +# "Backup task: R-Sync ASP-NAS02" +_RSYNC_MARKER_RE = re.compile(r"\br-?sync\b", re.I) +_RSYNC_TASK_RE = re.compile(r"^(?:Back-uptaak|Backup\s+task)\s*:\s*(?P.+)$", re.I | re.M) +_RSYNC_FAILED_RE = re.compile(r"\bis\s+mislukt\b|\bhas\s+failed\b|\bfailed\b", re.I) +_RSYNC_WARNING_RE = re.compile(r"\bgedeeltelijk\s+voltooid\b|\bpartially\s+completed\b|\bwarning\b|\bwaarschuwing\b", re.I) +_RSYNC_SUCCESS_RE = re.compile(r"\bis\s+(?:nu\s+)?voltooid\b|\bhas\s+completed\b|\bcompleted\b|\bsuccessful\b|\bgeslaagd\b", re.I) + + +def _is_synology_rsync(subject: str, text: str) -> bool: + s = subject or "" + t = text or "" + if _RSYNC_MARKER_RE.search(s) or _RSYNC_MARKER_RE.search(t): + # Require the task field to reduce false positives + return _RSYNC_TASK_RE.search(t) is not None + return False + + +def _parse_rsync(subject: str, text: str) -> Tuple[bool, Dict, List[Dict]]: + m = _RSYNC_TASK_RE.search(text or "") + if not m: + return False, {}, [] + + job_name = (m.group("name") or "").strip() + if not job_name: + return False, {}, [] + + overall_status = "Success" + overall_message = "Success" + + haystack = f"{subject}\n{text}" + if _RSYNC_FAILED_RE.search(haystack): + overall_status = "Failed" + overall_message = "Failed" + elif _RSYNC_WARNING_RE.search(haystack): + overall_status = "Warning" + overall_message = "Warning" + elif _RSYNC_SUCCESS_RE.search(haystack): + overall_status = "Success" + overall_message = "Success" + + result: Dict = { + "backup_software": "Synology", + "backup_type": "R-Sync", + "job_name": job_name, + "overall_status": overall_status, + "overall_message": overall_message, + } + + # R-Sync notifications do not provide an object list. + return True, result, [] + + +def _parse_hyper_backup(subject: str, text: str) -> Tuple[bool, Dict, List[Dict]]: + m = _HB_TASKNAME_RE.search(text or "") or _HB_BACKUP_TASK_RE.search(text or "") + if not m: + return False, {}, [] + + job_name = (m.group("name") or "").strip() + if not job_name: + return False, {}, [] + + overall_status = "Success" + overall_message = "Success" + + # Determine status. Prefer explicit failure first. + haystack = f"{subject}\n{text}" + if _HB_FAILED_RE.search(haystack): + overall_status = "Failed" + overall_message = "Failed" + elif _HB_WARNING_RE.search(haystack): + overall_status = "Warning" + overall_message = "Warning" + elif _HB_SUCCESS_RE.search(haystack): + overall_status = "Success" + overall_message = "Success" + + backup_type = "Hyperbackup" + # If this is the "Backup Task:" variant and it references HiDrive/Strato, classify as Strato HiDrive. + if _HB_BACKUP_TASK_RE.search(text or "") and re.search(r"\bhidrive\b|\bstrato\b", (text or ""), re.I): + backup_type = "Strato HiDrive" + + result: Dict = { + "backup_software": "Synology", + "backup_type": backup_type, + "job_name": job_name, + "overall_status": overall_status, + "overall_message": overall_message, + } + + # Hyper Backup task notifications do not include an object list. + return True, result, [] + + +def _parse_active_backup(subject: str, text: str) -> Tuple[bool, Dict, List[Dict]]: + job_name = _extract_job_name_active_backup(subject, text) + if not job_name: + # Not our Synology Active Backup format + return False, {}, [] + + success, warning, error = _extract_totals(text) + + if error > 0: + overall_status = "Error" + overall_message = f"{error} error(s), {warning} warning(s)" + elif warning > 0: + overall_status = "Warning" + overall_message = f"{warning} warning(s)" + else: + overall_status = "Success" + overall_message = "Success" + + result: Dict = { + "backup_software": "Synology", + "backup_type": "Active Backup", + "job_name": job_name, + "overall_status": overall_status, + "overall_message": overall_message, + } + + # This mail type typically summarizes per-service counts only (no per-item list), + # so we intentionally return an empty object list. + return True, result, [] + + +def try_parse_synology(msg: MailMessage) -> Tuple[bool, Dict, List[Dict]]: + """Parse Synology notifications (Active Backup / Hyper Backup).""" + subject = getattr(msg, "subject", None) or "" + + # Microsoft Graph stores either html_body (contentType=html) or text_body. + # Some senders only provide text/plain, so parsers must fall back to text_body. + html_body = getattr(msg, "html_body", None) or "" + text_body = getattr(msg, "text_body", None) or "" + + # If html_body is empty, treat text_body as already-normalized text. + text = _html_to_text(html_body) if html_body else (text_body or "") + + # DSM Account Protection (informational; no schedule) + if _is_synology_account_protection(subject, text): + ok, result, objects = _parse_account_protection(subject, text) + if ok: + return True, result, objects + + # R-Sync first (otherwise it may be misclassified as Hyper Backup due to shared fields) + if _is_synology_rsync(subject, text): + ok, result, objects = _parse_rsync(subject, text) + if ok: + return True, result, objects + + # Hyper Backup next (subject often doesn't include it) + if _is_synology_hyper_backup(subject, text): + ok, result, objects = _parse_hyper_backup(subject, text) + if ok: + return True, result, objects + + + # Active Backup for Business (ABB) + if _is_synology_active_backup_for_business(subject, text): + ok, result, objects = _parse_active_backup_for_business(subject, text) + if ok: + return True, result, objects + + # Active Backup + if not _is_synology_active_backup(subject, text): + return False, {}, [] + + return _parse_active_backup(subject, text) \ No newline at end of file diff --git a/containers/backupchecks/src/backend/app/parsers/threecx.py b/containers/backupchecks/src/backend/app/parsers/threecx.py new file mode 100644 index 0000000..bfd7c1b --- /dev/null +++ b/containers/backupchecks/src/backend/app/parsers/threecx.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +import re +from typing import Dict, Tuple, List + +from ..models import MailMessage + + +def try_parse_3cx(msg: MailMessage) -> Tuple[bool, Dict, List[Dict]]: + """Parse 3CX backup notification e-mails. + + Expected: + Subject: '3CX Notification: Backup Complete - ' + Body contains: 'Backup name: ' + """ + subject = (msg.subject or "").strip() + if not subject: + return False, {}, [] + + m = re.match(r"^3CX Notification:\s*Backup Complete\s*-\s*(.+)$", subject, flags=re.IGNORECASE) + if not m: + return False, {}, [] + + job_name = m.group(1).strip() + + body = (getattr(msg, "text_body", None) or getattr(msg, "body", None) or "") + # Some sources store plain text in html_body; fall back if needed. + if not body: + body = getattr(msg, "html_body", None) or "" + + backup_file = None + m_file = re.search(r"^\s*Backup\s+name\s*:\s*(.+?)\s*$", body, flags=re.IGNORECASE | re.MULTILINE) + if m_file: + backup_file = m_file.group(1).strip() + + objects: List[Dict] = [] + if backup_file: + objects.append( + { + "name": backup_file, + "status": "Success", + "error_message": None, + } + ) + + result = { + "backup_software": "3CX", + "backup_type": None, + "job_name": job_name, + "overall_status": "Success", + "overall_message": None, + } + + return True, result, objects diff --git a/containers/backupchecks/src/backend/app/parsers/veeam.py b/containers/backupchecks/src/backend/app/parsers/veeam.py new file mode 100644 index 0000000..be9955b --- /dev/null +++ b/containers/backupchecks/src/backend/app/parsers/veeam.py @@ -0,0 +1,900 @@ +from __future__ import annotations + +import re +from typing import Dict, Tuple, List, Optional + +from ..models import MailMessage + + +# Supported backup types we recognise in the subject +VEEAM_BACKUP_TYPES = [ + "Backup Job", + "Backup Copy Job", + "Replica Job", + "Replication job", + "Configuration Backup", + "Agent Backup job", + "Veeam Backup for Microsoft 365", + "Scale-out Backup Repository", + "Health Check", +] + + +def _strip_html_tags(value: str) -> str: + """Very small helper to strip HTML tags from a string.""" + if not value: + return "" + value = re.sub(r"<\s*br\s*/?>", "\n", value, flags=re.IGNORECASE) + value = re.sub(r"

", "\n", value, flags=re.IGNORECASE) + value = re.sub(r"<[^>]+>", "", value) + return re.sub(r"\s+", " ", value).strip() + + +def _html_to_text_preserve_lines(html: str) -> str: + """Convert HTML to text while preserving meaningful line breaks. + + We intentionally keep
, , , and

as line breaks so that + multi-line warning/error blocks remain readable and can be stored as-is + for overrides. + """ + html = _normalize_html(html) + if not html: + return "" + + # Normalize common line-break-ish tags to newlines. + v = html + v = re.sub(r"<\s*br\s*/?>", "\n", v, flags=re.IGNORECASE) + v = re.sub(r"", "\n", v, flags=re.IGNORECASE) + v = re.sub(r"", "\n", v, flags=re.IGNORECASE) + v = re.sub(r"", "\n", v, flags=re.IGNORECASE) + + # Strip remaining tags. + v = re.sub(r"<[^>]+>", "", v) + + # Basic entity cleanup. + v = v.replace(" ", " ") + + # Normalize whitespace per line but keep line boundaries. + lines: List[str] = [] + for line in re.split(r"\r?\n", v): + line = re.sub(r"\s+", " ", (line or "")).strip() + if line: + lines.append(line) + return "\n".join(lines).strip() + + +def _extract_configuration_job_overall_message(html: str) -> Optional[str]: + """Extract a detailed overall message for Veeam Configuration Job mails. + + These mails often contain the real problem statement as a multi-line + warning/error block inside a table cell (not as objects and not in + blocks). + """ + + text = _html_to_text_preserve_lines(html) + if not text: + return None + + wanted_lines: List[str] = [] + for line in text.split("\n"): + # Example: + # 26-12-2025 10:00:23 Warning Skipping server certificate backup because encryption is disabled + if re.match(r"^\d{2}-\d{2}-\d{4}\s+\d{2}:\d{2}:\d{2}\s+(Warning|Failed|Error)\b", line): + wanted_lines.append(line) + + if not wanted_lines: + return None + + return "\n".join(wanted_lines).strip() or None + + +def _normalize_html(html: str) -> str: + """Normalize HTML content from mail bodies. + + Some sources can yield HTML strings with embedded NUL characters (e.g. + after a UTF-16 conversion). Those NULs break regex-based parsing. + """ + if not html: + return "" + # Be defensive: ensure we have a string. + if isinstance(html, bytes): + try: + html = html.decode("utf-8", errors="ignore") + except Exception: + html = str(html) + # Remove NULs and BOMs that may be present in some HTML payloads. + html = html.replace("\x00", "") + html = html.replace("\ufeff", "") + return html + + + +def _parse_storage_size_to_bytes(value: str) -> Optional[int]: + """Parse a human-readable storage size into bytes. + + Supports decimal comma (e.g. "107,6 TB") and units: + B, KB, MB, GB, TB, PB. + + Returns None when parsing fails. + """ + if not value: + return None + + v = value.strip() + v = v.replace("\xa0", " ") + + m = re.search(r"(?i)([0-9]+(?:[\.,][0-9]+)?)\s*(B|KB|MB|GB|TB|PB)\b", v) + if not m: + return None + + num_raw = (m.group(1) or "").replace(",", ".") + try: + num = float(num_raw) + except Exception: + return None + + unit = (m.group(2) or "B").upper() + mult = { + "B": 1, + "KB": 1024, + "MB": 1024**2, + "GB": 1024**3, + "TB": 1024**4, + "PB": 1024**5, + }.get(unit, 1) + + try: + return int(num * mult) + except Exception: + return None + + +def _parse_free_percent(value: str) -> Optional[float]: + if not value: + return None + m = re.search(r"(?i)([0-9]+(?:[\.,][0-9]+)?)\s*%\s*free", value) + if not m: + return None + raw = (m.group(1) or "").replace(",", ".") + try: + return float(raw) + except Exception: + return None + + +def _parse_sobr_capacity_metrics_from_html(html: str) -> Dict: + """Extract repository-level capacity metrics for Veeam SOBR mails.""" + html = _normalize_html(html) + if not html: + return {} + + def _cell_value(label: str) -> Optional[str]: + # Matches: LABELVALUE + m = re.search( + rf"(?is)\s*{re.escape(label)}\s*\s*\s*]*>(.*?)", + html, + ) + if not m: + return None + return _strip_html_tags(m.group(1)).strip() or None + + used_raw = _cell_value("Used Space") + cap_raw = _cell_value("Capacity") + + used_b = _parse_storage_size_to_bytes(used_raw or "") + cap_b = _parse_storage_size_to_bytes(cap_raw or "") + + free_b = None + if used_b is not None and cap_b is not None: + free_b = max(cap_b - used_b, 0) + + free_pct = _parse_free_percent(cap_raw or "") + if free_pct is None and used_b is not None and cap_b: + try: + free_pct = float(round(((cap_b - used_b) / cap_b) * 100.0, 2)) + except Exception: + free_pct = None + + metrics = { + "storage_used_bytes": used_b, + "storage_capacity_bytes": cap_b, + "storage_free_bytes": free_b, + "storage_free_percent": free_pct, + "storage_used_raw": used_raw, + "storage_capacity_raw": cap_raw, + } + + # Only return when at least one numeric metric was parsed. + if used_b is None and cap_b is None: + return {} + return metrics + + +def _parse_sobr_extents_from_html(html: str) -> List[Dict]: + """Extract SOBR extents as objects. + + Rows contain columns: Extent | Capacity | Used Space | Status. + Status is usually "Online". + """ + html = _normalize_html(html) + if not html: + return [] + + snippet = re.sub(r"\s+", " ", html) + + row_pattern = re.compile(r"]*>(.*?)", re.IGNORECASE) + cell_pattern = re.compile(r"]*>(.*?)", re.IGNORECASE) + + objects: List[Dict] = [] + for row_match in row_pattern.finditer(snippet): + row_html = row_match.group(1) + cells = cell_pattern.findall(row_html) + if len(cells) < 3: + continue + + plain = [_strip_html_tags(c).strip() for c in cells] + if not plain or not plain[0]: + continue + + # Skip header rows + if plain[0].lower() in ["extent", "type", "name", "object"]: + continue + + extent_name = plain[0] + cap_raw = plain[1] if len(plain) > 1 else "" + used_raw = plain[2] if len(plain) > 2 else "" + status_raw = plain[3] if len(plain) > 3 else None + + # Heuristic: extent rows contain at least one size value. + if not re.search(r"(?i)\b(TB|GB|MB|KB|PB)\b", cap_raw + " " + used_raw): + continue + + obj = { + "name": extent_name, + "type": "Extent", + "status": (status_raw or "").strip() or None, + "error_message": None, + } + + # Attach metrics in error_message as a compact string for visibility (optional). + used_b = _parse_storage_size_to_bytes(used_raw) + cap_b = _parse_storage_size_to_bytes(cap_raw) + free_pct = _parse_free_percent(cap_raw) + + # Keep numeric values in the object dict as well (ignored by DB today but useful for future). + obj["used_bytes"] = used_b + obj["capacity_bytes"] = cap_b + obj["free_percent"] = free_pct + + objects.append(obj) + + # Deduplicate by name while preserving order + seen=set() + unique=[] + for o in objects: + n=o.get('name') + if not n or n in seen: + continue + seen.add(n) + unique.append(o) + + return unique + +# ---- Scale-out Backup Repository (SOBR) wrapper ---- + +def _parse_sobr_from_html(html: str) -> Tuple[List[Dict], Dict]: + """Parse Veeam Scale-out Backup Repository report HTML. + + Returns (objects, metrics_dict). Objects are extents. + """ + objects = _parse_sobr_extents_from_html(html) + metrics = _parse_sobr_capacity_metrics_from_html(html) + return objects, metrics + +def _extract_overall_error_block(html: str) -> Optional[str]: + """Extract a detailed error block (if any) from Veeam HTML reports. + + Some Veeam mails (e.g. Backup Copy jobs) contain a message block like: + Cannot connect ... + """ + if not html: + return None + + m = re.search(r']*class="small_label"[^>]*>(.*?)', html, flags=re.IGNORECASE | re.DOTALL) + if not m: + return None + + text = _strip_html_tags(m.group(1)) + text = text.strip() + return text or None + + +def _extract_processing_overall_message(html: str) -> Optional[str]: + """Extract the Veeam "Processing " overall message when present. + + Some Veeam Backup Job templates include a "Processing " marker + in the session details header. We store that marker as overall_message so + that it is available for override rules. + """ + if not html: + return None + + html = _normalize_html(html) + # Look for a small_label span containing "Processing ". + m = re.search( + r'(?is)]*class="small_label"[^>]*>\s*(Processing\s+[^<\r\n]+?)\s*', + html, + ) + if not m: + return None + + text = _strip_html_tags(m.group(1) or "").strip() + if not text.lower().startswith("processing "): + return None + return text or None + + +def _extract_m365_overall_details_message(html: str) -> Optional[str]: + """Extract overall Details message from Veeam Backup for Microsoft 365 summary. + + Veeam Backup for Microsoft 365 emails often place an important warning/info + in the summary header table under the "Details" column (rowspan cell). + This message is not part of the object list and should be stored as the + overall message for the job run. + """ + if not html: + return None + + # Look for the summary "Details" cell (typically a header_td with rowspan). + candidates = re.findall( + r']*rowspan\s*=\s*["\']?\s*2\s*["\']?[^>]*>(.*?)', + html, + flags=re.IGNORECASE | re.DOTALL, + ) + if not candidates: + return None + + texts: List[str] = [] + for raw in candidates: + text = _strip_html_tags(raw) + text = re.sub(r"\s+", " ", (text or "")).strip() + if not text: + continue + + # Skip obvious non-messages + if text.lower() in {"details"}: + continue + + # Ignore tiny fragments + if len(text) < 20: + continue + + texts.append(text) + + if not texts: + return None + + # Prefer specific permission/role warnings over generic processed-count warnings. + for t in texts: + if "Missing application permissions" in t or "Missing application roles" in t: + return t + + # If we have multiple candidates, avoid selecting the generic "Warning X of X objects processed" + generic_re = re.compile(r"^Warning\s+\d+\s+of\s+\d+\s+objects\s+processed\s*$", re.IGNORECASE) + filtered = [t for t in texts if not generic_re.match(t)] + if filtered: + return filtered[0] + + return texts[0] + + + + +def _parse_objects_from_html(html: str) -> Tuple[Optional[str], List[Dict]]: + """Try to extract objects from a Veeam HTML body. + + Returns (overall_message, objects). + Objects are returned as dicts: {name, type?, status?, error_message?} + """ + html = _normalize_html(html) + if not html: + return None, [] + + overall_message: Optional[str] = None + objects: List[Dict] = [] + + snippet = re.sub(r"\s+", " ", html) + + row_pattern = re.compile(r"]*>(.*?)", re.IGNORECASE) + cell_pattern = re.compile(r"]*>(.*?)", re.IGNORECASE) + + for row_match in row_pattern.finditer(snippet): + row_html = row_match.group(1) + cells = cell_pattern.findall(row_html) + if len(cells) < 2: + continue + + # Convert cells to plain text + plain = [_strip_html_tags(c) for c in cells] + plain = [p.strip() for p in plain] + + # Skip header rows + if plain and plain[0].lower() in ["type", "name", "object"]: + continue + + # Health Check reports contain a banner row "Health Check Summary". + # It often has a second column with the overall status (e.g. "Error") + # and can be incorrectly treated as an object row. + if plain and (plain[0] or "").strip().lower() == "health check summary": + continue + + # Veeam header lines like "Backup job:", "Replication job:", "Backup Copy job:", "Agent Backup job:", etc. + # These are overall banners and must never become an object. + if plain: + first = plain[0].strip() + first_l = first.lower() + if re.match(r"^(backup|replication|replica|backup\s+copy|configuration|agent\s+backup)\s+job\b", first_l): + # Keep a short overall message if present (mainly useful for failures), + # but never store it as an object. + if len(plain) >= 2: + msg_parts = [p for p in plain[1:] if p] + if msg_parts: + filtered_parts: List[str] = [] + for part in msg_parts: + p = (part or "").strip() + pl = p.lower() + # Ignore banner-only status markers and VM count summaries. + if pl in ["success", "succeeded", "warning", "failed", "error"]: + continue + if "vms processed" in pl or re.match(r"^\d+\s+of\s+\d+\s+vms\s+processed$", pl): + continue + # Microsoft 365 reports can contain a generic processed-count warning + # in the banner (e.g. "Warning 90 of 90 objects processed"). That + # is not the real issue description and must not block extraction + # of the summary "Details" message. + if "objects processed" in pl and re.match( + r"^warning\s+\d+\s+of\s+\d+\s+objects\s+processed$", pl + ): + continue + filtered_parts.append(p) + if filtered_parts: + overall_message = " ".join(filtered_parts).strip() + continue + + # Microsoft 365 layout: first col Type, second col Name, third col Status + # (name is not always a URL; can be e.g. "Discovery Search Mailbox"). + if len(plain) >= 3: + t = plain[0] + name = plain[1] + status = plain[2] + + lowered_status = (status or "").lower() + if t and name and any(k in lowered_status for k in ["success", "succeeded", "warning", "failed", "error"]): + # Best-effort details: only keep details when not-success (to avoid storing + # e.g. sizes/durations as "error_message" for successful objects). + details_text = "" + if not any(k in lowered_status for k in ["success", "succeeded"]): + if len(plain) > 3: + for v in reversed(plain[3:]): + if v and v.strip(): + details_text = v.strip() + break + + objects.append( + { + "name": name, + "type": t, + "status": status or None, + "error_message": details_text or None, + } + ) + continue + + # Generic job object layout (common for Backup Job / Backup Copy Job): + # Name | Status | Start time | End time | Size | Read | Transferred | Duration | Details + # We treat column 1 as the object name and column 2 as the status. + if len(plain) >= 2: + name_candidate = (plain[0] or "").strip() + status_candidate = (plain[1] or "").strip() + lowered_status_candidate = status_candidate.lower() + + # Do not treat the Health Check Summary banner as an object. + if name_candidate.lower() == "health check summary": + continue + + if name_candidate and any( + key in lowered_status_candidate for key in ["success", "succeeded", "warning", "failed", "error"] + ): + details_text: Optional[str] = None + + # Only keep details when the object is not successful. + if not any(k in lowered_status_candidate for k in ["success", "succeeded"]): + # Prefer the last non-empty cell beyond the status column. + for idx in range(len(cells) - 1, 1, -1): + raw_cell_html = cells[idx] or "" + t = _html_to_text_preserve_lines(raw_cell_html) + if t: + details_text = t + break + + objects.append( + { + "name": name_candidate, + "type": None, + "status": status_candidate or None, + "error_message": details_text or None, + } + ) + continue + + # Fallback layout: first cell object name, second cell status + name_raw = plain[0] if plain else "" + status_raw = plain[1] if len(plain) > 1 else "" + + if not name_raw: + continue + + # Health Check reports contain a header row ("Health Check Summary") that looks + # like an object row but must never be treated as an object. + if name_raw.strip().lower() == "health check summary": + continue + + lowered_status = status_raw.lower() + if not any(key in lowered_status for key in ["success", "succeeded", "warning", "failed", "error"]): + continue + + objects.append( + { + "name": name_raw, + "type": None, + "status": status_raw.strip() or None, + "error_message": None, + } + ) + + # Prefer a "Processing " marker when present (used by Backup Job templates). + processing_msg = _extract_processing_overall_message(html) + if processing_msg: + overall_message = processing_msg + + # Veeam Backup for Microsoft 365 often places the overall warning/info in the + # summary header "Details" cell instead of an error span or object row. + m365_details_msg = _extract_m365_overall_details_message(html) + if m365_details_msg: + # Always prefer the M365 summary "Details" message over the generic + # processed-count warning that can appear in banner/header rows. + generic_processed_re = re.compile( + r"^Warning\s+\d+\s+of\s+\d+\s+objects\s+processed\s*$", re.IGNORECASE + ) + if (not overall_message) or generic_processed_re.match(overall_message or ""): + overall_message = m365_details_msg + else: + # If we already have a banner message, still override when it is a + # generic processed-count warning. + if "objects processed" in (overall_message or "").lower() and generic_processed_re.match( + (overall_message or "").strip() + ): + overall_message = m365_details_msg + elif not overall_message: + overall_message = None + + # If we still did not get an overall header message, try to extract the error block + if not overall_message: + overall_message = _extract_overall_error_block(html) + + # Deduplicate by (type, name) while keeping order. In Veeam M365 reports the + # same "name" can appear under multiple object types (e.g. Mailbox and OneDrive). + seen = set() + unique_objects: List[Dict] = [] + for obj in objects: + n = obj.get("name") + t = obj.get("type") or "" + key = (t, n) + if not n or key in seen: + continue + seen.add(key) + unique_objects.append(obj) + + return overall_message, unique_objects + + +def _detect_backup_type_from_html(msg: MailMessage) -> Optional[str]: + html = _normalize_html(getattr(msg, "html_body", None) or "") + if "veeam backup for microsoft 365" in html.lower(): + return "Veeam Backup for Microsoft 365" + return None + + +def _detect_status_from_html(html: str) -> Optional[str]: + """Best-effort overall status detection from Veeam HTML. + + Some Veeam templates do not include a [Success]/[Warning]/[Failed] marker + in the subject (notably some Veeam Backup for Microsoft 365 mails). In + those cases the HTML header usually contains a status cell with class + reportSuccess/reportWarning/reportFailed. + """ + html = _normalize_html(html) + if not html: + return None + + m = re.search(r'(?i)class="report\s*(Success|Warning|Failed)"', html) + if m: + return m.group(1).capitalize() + + # Fallback: look for a standalone status word in the header. + m2 = re.search(r'(?i)\b(Success|Succeeded|Warning|Failed|Error)\b', html) + if not m2: + return None + + word = (m2.group(1) or "").lower() + if word in ["success", "succeeded"]: + return "Success" + if word in ["warning"]: + return "Warning" + return "Failed" + + +def _extract_job_name_and_type_from_html(html: str) -> Tuple[Optional[str], Optional[str]]: + """Extract job name and type from the Veeam HTML banner/header. + + The Veeam HTML report usually contains a colored header like: + Backup job: + This is more reliable than the subject which may include counters like + (1 objects) 1 warning + """ + html = _normalize_html(html) + if not html: + return None, None + + m = re.search( + r"(?i)\b(backup\s+copy\s+job|backup\s+job|replica\s+job|replication\s+job|agent\s+backup\s+job|configuration\s+backup|scale-out\s+backup\s+repository)\s*:\s*([^<\r\n]+)", + html, + ) + if not m: + return None, None + + raw_type = re.sub(r"\s+", " ", (m.group(1) or "")).strip() + raw_name = re.sub(r"\s+", " ", (m.group(2) or "")).strip() + raw_name = raw_name.rstrip("-–—:").strip() + + type_norm_map = { + "backup job": "Backup job", + "backup copy job": "Backup Copy job", + "replica job": "Replica job", + "replication job": "Replication job", + "agent backup job": "Agent Backup job", + "configuration backup": "Configuration Backup", + "scale-out backup repository": "Scale-out Backup Repository", + } + backup_type = type_norm_map.get(raw_type.lower(), raw_type) + + return raw_name or None, backup_type or None + + +def _clean_job_name_from_rest(rest: str) -> str: + """Strip common suffixes like '(3 objects)' '(2 machines)' '(1 VMs)' and issues counter.""" + cleaned = rest.strip() + + # Remove trailing issues suffix like ", 0 issues" + cleaned = re.sub(r"\s*,\s*\d+\s+issues\s*$", "", cleaned, flags=re.IGNORECASE).strip() + + # Remove trailing count in parentheses + cleaned = re.sub(r"\(\d+\s+(objects?|machines?|vms?)\)\s*$", "", cleaned, flags=re.IGNORECASE).strip() + + return cleaned + + +def _strip_retry_suffix(job_name: Optional[str]) -> Optional[str]: + """Remove a trailing "(Retry)" / "(Retry N)" suffix from the job name. + + Some backup products append retry counters to the job name. We do not + want retries to create distinct jobs in the UI/database. + """ + if not job_name: + return job_name + + cleaned = re.sub(r"\s*\(\s*Retry(?:\s*\d+)?\s*\)\s*$", "", job_name, flags=re.IGNORECASE).strip() + return cleaned or None + + +def try_parse_veeam(msg: MailMessage) -> Tuple[bool, Dict, List[Dict]]: + """Try to parse a Veeam backup report mail. + + Returns: + (handled, result_dict, objects_list) + """ + subject = (msg.subject or "").strip() + if not subject: + return False, {}, [] + + # Some subjects can be prefixed (e.g. "SUPPORT EXPIRED") before the [Success]/[Warning] marker. + # Prefer the bracketed marker, but also support subjects like: + # Veeam Backup Job "X" finished with WARNING + # License / compliance mails can use [Error] instead of [Failed]. + m_status = re.search(r"(?i)\[(Success|Warning|Failed|Error)\]\s*(.*)$", subject) + m_finished = re.search(r"(?i)\bfinished\s+with\s+(Success|Warning|Failed|Error)\b", subject) + + html_body = _normalize_html(getattr(msg, "html_body", None) or "") + html_lower = html_body.lower() + + # Special-case: Veeam Backup for Microsoft 365 mails can come without a + # subject marker. Detect via HTML and extract status from the banner. + is_m365 = "veeam backup for microsoft 365" in html_lower + + # If we cannot detect a status marker and this is not an M365 report, + # we still try to parse when the subject strongly indicates a Veeam report. + if not m_status and not m_finished and not is_m365: + lowered = subject.lower() + if not any(k in lowered for k in ["veeam", "backup job", "backup copy job", "replica job", "configuration backup", "health check"]): + return False, {}, [] + + if m_status: + status_word = m_status.group(1) + rest = m_status.group(2) + elif m_finished: + status_word = m_finished.group(1) + rest = subject + else: + status_word = _detect_status_from_html(html_body) or "Success" + rest = subject + + + # License Key mails: these are compliance/renewal notifications and should never + # participate in schedule inference (no Expected/Missed). They also have no objects. + subject_lower = subject.lower() + is_license_key = ( + 'license key' in subject_lower + or 'new license key is not available' in subject_lower + or 'license support has expired' in html_lower + ) + if is_license_key: + # Normalize common variants to keep the rest of the app consistent. + status_norm = (status_word or '').strip().lower() + if status_norm == 'succeeded': + status_norm = 'success' + if status_norm == 'failed': + status_norm = 'failed' + if status_norm == 'error': + status_word = 'Error' + elif status_norm == 'warning': + status_word = 'Warning' + elif status_norm == 'success': + status_word = 'Success' + elif status_norm == 'failed': + status_word = 'Failed' + result = { + 'backup_software': 'Veeam', + 'backup_type': 'License Key', + 'job_name': 'License Key', + 'overall_status': status_word or 'Error', + 'overall_message': 'Current license support has expired, but new license key is not available', + } + return True, result, [] + + # Configuration Job detection (may not have object details) + subj_lower = subject.lower() + is_config_job = ('backup configuration job' in subj_lower) or ('configuration backup for' in html_lower) + + # Extract job name/type from the HTML banner if present (more reliable than subject). + html_job_name, html_header_type = _extract_job_name_and_type_from_html(html_body) + + # Prefer HTML-driven backup type detection for special layouts (M365). + html_backup_type = _detect_backup_type_from_html(msg) + backup_type = html_backup_type or html_header_type + + # Otherwise determine backup type by looking for one of the known phrases in the subject + if not backup_type: + for candidate in VEEAM_BACKUP_TYPES: + if candidate.lower() in rest.lower(): + backup_type = candidate + break + + if not backup_type: + lowered = rest.lower() + if "replica" in lowered: + backup_type = "Replication job" + else: + backup_type = "Backup Job" + if is_config_job: + backup_type = "Configuration Job" + + # Normalize known backup type variants + if (backup_type or '').lower() == 'scale-out backup repository': + backup_type = 'Scale-out Backup Repository' + + # Normalize Health Check casing + if (backup_type or '').lower() == 'health check': + backup_type = 'Health Check' + + # Job name + job_name: Optional[str] = None + if not is_config_job and html_job_name: + job_name = html_job_name + elif is_config_job: + m_cfg = re.search( + r"Configuration\s+Backup\s+for\s+([^<\n\r]+)", + _normalize_html(getattr(msg, "html_body", None) or ""), + flags=re.IGNORECASE, + ) + if m_cfg: + job_name = m_cfg.group(1).strip() + else: + job_name = _clean_job_name_from_rest(rest) + else: + # Job name in quotes + job_match = re.search(r'\"([^\"]+)\"', rest) + if job_match: + job_name = job_match.group(1).strip() + else: + # M365 subject often: " - (N objects), X issues" + if backup_type == "Veeam Backup for Microsoft 365" and " - " in rest: + job_part = rest.split(" - ", 1)[1] + job_name = _clean_job_name_from_rest(job_part) + else: + job_name = _clean_job_name_from_rest(rest) + + # Health Check mails should always yield a stable job name. + if (backup_type or '').lower() == 'health check': + job_name = 'Health Check' + + # Health Check subjects usually look like: "Health check (N objects)". + # We normalize casing to keep job names stable. + if (backup_type or "").lower() == "health check": + job_name = "Health Check" + + + # SOBR subjects look like: "Scale-out Backup Repository: ". + if (backup_type or '').lower() == 'scale-out backup repository': + m_sobr = re.search(r"(?i)scale-?out\s+backup\s+repository\s*:\s*(.+)$", rest) + if m_sobr: + job_name = (m_sobr.group(1) or '').strip().rstrip('-–—:').strip() or job_name + + # Do not let retry counters create distinct job names. + job_name = _strip_retry_suffix(job_name) + + # Health Check reports should always map to a stable job name. + if (backup_type or '').lower() == 'health check': + job_name = 'Health Check' + + # Scale-out Backup Repository job name is the repository name after ':' + if (backup_type or '').lower() == 'scale-out backup repository': + m_repo = re.search(r'(?i)scale-?out\s+backup\s+repository\s*:\s*(.+)$', rest) + if m_repo: + job_name = (m_repo.group(1) or '').strip() + + + result: Dict = { + "backup_software": "Veeam", + "backup_type": backup_type, + "job_name": _strip_retry_suffix(job_name), + "overall_status": status_word, + } + + overall_message, objects = _parse_objects_from_html(html_body) + + # Scale-out Backup Repository parsing: extents + storage metrics + if (backup_type or '').lower() == 'scale-out backup repository': + sobr_objects, sobr_metrics = _parse_sobr_from_html(html_body) + if sobr_objects: + objects = sobr_objects + for k, v in (sobr_metrics or {}).items(): + result[k] = v + + if is_config_job: + objects = [] + + # Configuration Job mails usually store the meaningful warning/error + # text in a table cell rather than in an error span or object list. + # Extract and store it so it becomes visible in details and can be + # used for overrides. + if status_word != "Success" and not overall_message: + overall_message = _extract_configuration_job_overall_message(html_body) + + + # Keep detailed overall message for non-success states, and always keep + # the "Processing " marker when present (used for overrides/rules). + if overall_message: + if status_word != "Success" or overall_message.lower().startswith("processing "): + result["overall_message"] = overall_message + + return True, result, objects diff --git a/containers/backupchecks/src/frontend/package.json b/containers/backupchecks/src/frontend/package.json new file mode 100644 index 0000000..e69de29 diff --git a/containers/backupchecks/src/static/css/layout.css b/containers/backupchecks/src/static/css/layout.css new file mode 100644 index 0000000..a1655a7 --- /dev/null +++ b/containers/backupchecks/src/static/css/layout.css @@ -0,0 +1,16 @@ +/* Global layout constraints + - Consistent content width across all pages + - Optimized for 1080p while preventing further widening on higher resolutions +*/ + +/* Default pages: use more horizontal space on 1920x1080 */ +main.content-container { + width: min(96vw, 1840px); + max-width: 1840px; +} + +/* Dashboard: keep the original width */ +main.dashboard-container { + width: min(90vw, 1728px); + max-width: 1728px; +} diff --git a/containers/backupchecks/src/static/css/status-text.css b/containers/backupchecks/src/static/css/status-text.css new file mode 100644 index 0000000..6533c22 --- /dev/null +++ b/containers/backupchecks/src/static/css/status-text.css @@ -0,0 +1,40 @@ +/* Status text coloring (only the text inside the status column/cell) */ +.status-text { font-weight: 600; } + +/* Core statuses */ +.status-success { color: var(--bs-success); } +.status-warning { color: var(--bs-warning); } +.status-error { color: var(--bs-danger); } +.status-failed { color: var(--bs-danger); } + +/* Missed: neutral */ +.status-missed { color: var(--bs-secondary); } + +/* Expected: neutral */ +.status-expected { color: var(--bs-secondary); } + +/* Override applied indicator */ +.status-override { color: var(--bs-primary); } + +/* Status icons: same shape, only color differs */ +.status-dot { + display: inline-block; + width: 0.7rem; + height: 0.7rem; + border-radius: 999px; + vertical-align: middle; +} + +.dot-success { background: var(--bs-success); } +.dot-warning { background: var(--bs-warning); } +.dot-failed { background: var(--bs-danger); } +.dot-missed { background: var(--bs-secondary); } +.dot-expected { + background: #fff; + border: 2px solid var(--bs-secondary); + box-sizing: border-box; +} +.dot-override { background: var(--bs-primary); } + +/* Optional: make empty status look normal */ +.status-text:empty { font-weight: inherit; } diff --git a/containers/backupchecks/src/static/favicon.ico b/containers/backupchecks/src/static/favicon.ico new file mode 100644 index 0000000..bc952f0 Binary files /dev/null and b/containers/backupchecks/src/static/favicon.ico differ diff --git a/containers/backupchecks/src/templates/auth/initial_setup.html b/containers/backupchecks/src/templates/auth/initial_setup.html new file mode 100644 index 0000000..c244707 --- /dev/null +++ b/containers/backupchecks/src/templates/auth/initial_setup.html @@ -0,0 +1,43 @@ +{% extends "layout/base.html" %} +{% block content %} +
+
+

Initial admin setup

+

Create the first admin user for this Backupchecks installation.

+
+
+ + +
+
+ + +
+
+ + +
+ +
+
+
+{% endblock %} diff --git a/containers/backupchecks/src/templates/auth/login.html b/containers/backupchecks/src/templates/auth/login.html new file mode 100644 index 0000000..0980662 --- /dev/null +++ b/containers/backupchecks/src/templates/auth/login.html @@ -0,0 +1,44 @@ +{% extends "layout/base.html" %} +{% block content %} +
+
+

Login

+
+
+ + +
+
+ + +
+
+ + +
+ + +
+
+
+{% endblock %} diff --git a/containers/backupchecks/src/templates/auth/password_reset_request.html b/containers/backupchecks/src/templates/auth/password_reset_request.html new file mode 100644 index 0000000..9a56f4c --- /dev/null +++ b/containers/backupchecks/src/templates/auth/password_reset_request.html @@ -0,0 +1,31 @@ +{% extends "layout/base.html" %} +{% block content %} +
+
+

Password reset

+

This is a placeholder page. Password reset is not fully implemented yet.

+
+
+ + +
+
+ + +
+ +
+
+
+{% endblock %} diff --git a/containers/backupchecks/src/templates/layout/base.html b/containers/backupchecks/src/templates/layout/base.html new file mode 100644 index 0000000..5ae1890 --- /dev/null +++ b/containers/backupchecks/src/templates/layout/base.html @@ -0,0 +1,179 @@ + +{% set _theme_pref = (current_user.theme_preference if current_user.is_authenticated else 'auto') %} + + + + Backupchecks + + + + + + + + + + + +
+ {% with messages = get_flashed_messages(with_categories=true) %} + {% if messages %} +
+ {% for category, message in messages %} + + {% endfor %} +
+ {% endif %} + {% endwith %} + + {% block content %}{% endblock %} +
+ + + + \ No newline at end of file diff --git a/containers/backupchecks/src/templates/main/changelog.html b/containers/backupchecks/src/templates/main/changelog.html new file mode 100644 index 0000000..1bfcad1 --- /dev/null +++ b/containers/backupchecks/src/templates/main/changelog.html @@ -0,0 +1,78 @@ +{% extends 'layout/base.html' %} + +{% block content %} +
+
+

Changelog

+
Product versions and changes.
+
+
+ +{# Completed (summary) #} +
+
+
Completed
+ History +
+
+ {% if changelog.completed_summary and changelog.completed_summary|length > 0 %} +
+ {% for item in changelog.completed_summary %} +
+

+ +

+
+
+ {% if item.overview and item.overview|length > 0 %} + {% for p in item.overview %} +

{{ p }}

+ {% endfor %} + {% endif %} + + {% if item.categories and item.categories|length > 0 %} + {% for cat in item.categories %} +
{{ cat.category }}
+ + {# NOTE: 'items' is a dict key; use bracket notation to avoid calling dict.items() #} + {% if cat['items'] and cat['items']|length > 0 %} + {% for it in cat['items'] %} +
+ {% if it.title %} +
{{ it.title }}
+ {% endif %} + {% if it.details and it.details|length > 0 %} +
    + {% for d in it.details %} +
  • {{ d }}
  • + {% endfor %} +
+ {% endif %} +
+ {% endfor %} + {% else %} +
No items in this section.
+ {% endif %} + {% endfor %} + {% elif item.highlights and item.highlights|length > 0 %} +
    + {% for h in item.highlights %} +
  • {{ h }}
  • + {% endfor %} +
+ {% else %} +
No details.
+ {% endif %} +
+
+
+ {% endfor %} +
+ {% else %} +
No completed items.
+ {% endif %} +
+
+{% endblock %} diff --git a/containers/backupchecks/src/templates/main/customers.html b/containers/backupchecks/src/templates/main/customers.html new file mode 100644 index 0000000..153a546 --- /dev/null +++ b/containers/backupchecks/src/templates/main/customers.html @@ -0,0 +1,174 @@ +{% extends "layout/base.html" %} +{% block content %} +

Customers

+ +{% if can_manage %} +
+
+ +
+ + +
+ +
+ +
+ + +
+ + Export CSV +
+{% endif %} + +
+ + + + + + + {% if can_manage %} + + {% endif %} + + + + {% if customers %} + {% for c in customers %} + + + + + {% if can_manage %} + + {% endif %} + + {% endfor %} + {% else %} + + + + {% endif %} + +
CustomerActiveNumber of jobsActions
{{ c.name }} + {% if c.active %} + Active + {% else %} + Inactive + {% endif %} + + {% if c.job_count > 0 %} + {{ c.job_count }} + {% else %} + 0 + {% endif %} + + +
+ +
+
+ No customers found. +
+
+ +{% if can_manage %} + + + + +{% endif %} +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/daily_jobs.html b/containers/backupchecks/src/templates/main/daily_jobs.html new file mode 100644 index 0000000..8a42b15 --- /dev/null +++ b/containers/backupchecks/src/templates/main/daily_jobs.html @@ -0,0 +1,819 @@ +{% extends "layout/base.html" %} + +{% block content %} +

Daily Jobs

+ +
+
+ + +
+
+ +
+
+ +
+ + + + + + + + + + + + + {% if rows %} + {% for row in rows %} + + + + + + + + + {% endfor %} + {% else %} + + + + {% endif %} + +
CustomerBackupTypeJob nameTimeLast result
{{ row.customer_name or "-" }}{{ row.backup_software or "-" }}{{ row.backup_type or "-" }}{{ row.job_name or "-" }}{{ row.expected_time }} + {% set _s = (row.last_status or "")|lower %} + {% set _dot = "" %} + {% if row.last_override_applied %}{% set _dot = "dot-override" %} + {% elif _s == "success" %}{% set _dot = "dot-success" %} + {% elif _s == "warning" %}{% set _dot = "dot-warning" %} + {% elif _s == "error" %}{% set _dot = "dot-failed" %} + {% elif _s == "failed" %}{% set _dot = "dot-failed" %} + {% elif _s == "missed" %}{% set _dot = "dot-missed" %} + {% elif _s == "expected" %}{% set _dot = "dot-expected" %} + {% endif %} + {% if _dot %}{% endif %} + {% if row.has_active_ticket or row.has_active_remark %} + + {% if row.has_active_ticket %}🎫{% endif %} + {% if row.has_active_remark %}💬{% endif %} + + {% endif %} + {% if row.run_count > 1 %} + {{ row.run_count }} runs + {% elif row.run_count == 1 %} + 1 run + {% endif %} +
+ No jobs are scheduled for this date. +
+
+ + + + + + + + +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/dashboard.html b/containers/backupchecks/src/templates/main/dashboard.html new file mode 100644 index 0000000..e244619 --- /dev/null +++ b/containers/backupchecks/src/templates/main/dashboard.html @@ -0,0 +1,173 @@ +{% extends "layout/base.html" %} +{% block main_class %}container dashboard-container{% endblock %} +{% block content %} +

Dashboard

+ +
+
+
+
+
Inbox
+
{{ inbox_count }}
+
Open items
+
+
+
+ +
+
+
+
+
+
Success
+
{{ jobs_success_count }}
+
+
+
+
+
+
+
Success (override)
+
{{ jobs_success_override_count }}
+
+
+
+
+
+
+
Expected
+
{{ jobs_expected_count }}
+
+
+
+
+
+
+
Warning
+
{{ jobs_warning_count }}
+
+
+
+
+
+
+
Failed
+
{{ jobs_error_count }}
+
+
+
+
+
+
+
Missed
+
{{ jobs_missed_count }}
+
+
+
+
+
+
+ + + +
+
Legend
+
+
+
Success — job run completed successfully
+
Failed — job run failed, action required
+
Warning — job run completed with a warning
+
Missed — job run expected but did not execute
+
Expected — job run not yet due
+
Success (override) — marked as successful via override
+
+
+
+{% if news_items %} +
+
+ News + {% if active_role == 'admin' %} + Manage + {% endif %} +
+
+
+ {% for item in news_items %} +
+
+
{{ item.title }}
+
+ {% if item.pinned %} + Pinned + {% endif %} + {% if item.severity == 'warning' %} + Warning + {% else %} + Info + {% endif %} +
+
+
{{ item.body }}
+
+ {% if item.link_url %} + Open link + {% endif %} +
+ +
+
+
+ {% endfor %} +
+
+
+{% endif %} +
+

Backupchecks provides a centralized and consistent overview of the health and reliability of all backups within your environment. The platform collects backup results from multiple backup solutions and normalizes them into a single, clear and consistent status model. This enables teams to monitor backup quality across different vendors and environments in a predictable and uniform way.

+

Backup results are imported and evaluated automatically. Each backup run is analyzed and assigned a status such as Success, Warning, Failed, or Success (override). These statuses are determined by interpreting exit codes, detected error messages, log content, and configured rules, ensuring that the reported outcome reflects the real operational impact rather than raw technical output alone.

+

The dashboard provides an at-a-glance overview of the current backup situation:

+
    +
  • A consolidated summary of all monitored backup jobs and their latest known results.
  • +
  • Clear counters for successful, warning, failed, and overridden runs.
  • +
  • Immediate visibility into environments that require attention or follow-up.
  • +
+

The Daily Jobs view shows the most recent run per backup job, grouped by customer, backup software, and backup type. This view is intended for high-level monitoring and trend awareness. It reflects the latest state of each job, but it does not replace the daily operational review process.

+

Daily operational validation is performed from the Run Checks page. This page acts as the primary workspace for reviewing backup runs. All runs that require attention are listed here, allowing operators to systematically review results and decide on the appropriate next step. The main objective of this process is to actively review backup runs and keep the Run Checks page clear.

+

When reviewing a run, operators assess whether the result is acceptable, requires follow-up, or can be treated as successful. A run can be marked as reviewed once it has been checked, even if additional actions are required. Marking a run as reviewed confirms that the result has been acknowledged and assessed, and prevents it from repeatedly appearing as unprocessed.

+

If a backup run requires further investigation or corrective action, operators can add a remark or reference an external ticket number. After adding this information, the run can still be marked as reviewed, ensuring that it no longer blocks daily checks.

+

Reviewed runs that require follow-up retain their status until they are explicitly marked as resolved. The reviewed state remains in place to indicate that the run has been handled operationally, while the resolved state confirms that the underlying issue has been fully addressed.

+

Overrides can be applied during this process when a warning or error is known, accepted, or considered non-critical. Overrides allow such runs to be treated as successful for reporting and dashboard purposes, while preserving the original messages and maintaining a full audit trail.

+

The ultimate goal of the Run Checks workflow is to maintain an empty or near-empty Run Checks page.

+

Backupchecks is designed as a monitoring, validation, and control platform. It does not replace your backup software, but enhances it by adding structured review workflows, consistent reporting, and operational clarity across all backup solutions.

+ + + + +
+ +{% if active_role == 'admin' %} +
+
+ System status +
+
+
+
+ Database size: {{ db_size_human }} +
+
+ Free disk space: + {% if free_disk_warning %} + {{ free_disk_human }} + (mail import will be blocked below 2 GB) + {% else %} + {{ free_disk_human }} + {% endif %} +
+
+
+
+{% endif %} + +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/feedback.html b/containers/backupchecks/src/templates/main/feedback.html new file mode 100644 index 0000000..cd59203 --- /dev/null +++ b/containers/backupchecks/src/templates/main/feedback.html @@ -0,0 +1,98 @@ +{% extends "layout/base.html" %} + +{% block content %} +
+

Feedback

+ New +
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+ + + + + + + + + + + + + {% if not items %} + + + + {% endif %} + + {% for i in items %} + + + + + + + + + {% endfor %} + +
VotesTitleTypeComponentStatusCreated
No items found.
+
+ + +
+
+ {{ i.title }} + {% if i.created_by %} +
by {{ i.created_by }}
+ {% endif %} +
+ {% if i.item_type == 'bug' %} + Bug + {% else %} + Feature + {% endif %} + {{ i.component or '-' }} + {% if i.status == 'resolved' %} + Resolved + {% else %} + Open + {% endif %} + +
{{ i.created_at }}
+
Updated {{ i.updated_at }}
+
+
+{% endblock %} diff --git a/containers/backupchecks/src/templates/main/feedback_detail.html b/containers/backupchecks/src/templates/main/feedback_detail.html new file mode 100644 index 0000000..729e2d7 --- /dev/null +++ b/containers/backupchecks/src/templates/main/feedback_detail.html @@ -0,0 +1,80 @@ +{% extends "layout/base.html" %} + +{% block content %} +
+
+

{{ item.title }}

+
+ {% if item.item_type == 'bug' %} + Bug + {% else %} + Feature + {% endif %} + {% if item.status == 'resolved' %} + Resolved + {% else %} + Open + {% endif %} + by {{ created_by_name }} +
+
+ Back +
+ +
+
+
+
+ {% if item.component %} +
Component: {{ item.component }}
+ {% endif %} +
{{ item.description }}
+
+ +
+
+ +
+
+
+

Actions

+ + {% if active_role == 'admin' %} + {% if item.status == 'resolved' %} +
+ + +
+ {% else %} +
+ + +
+ {% endif %} + +
+ +
+ {% else %} +
Only administrators can resolve or delete items.
+ {% endif %} +
+
+
+
+{% endblock %} diff --git a/containers/backupchecks/src/templates/main/feedback_new.html b/containers/backupchecks/src/templates/main/feedback_new.html new file mode 100644 index 0000000..ebc6b05 --- /dev/null +++ b/containers/backupchecks/src/templates/main/feedback_new.html @@ -0,0 +1,37 @@ +{% extends "layout/base.html" %} + +{% block content %} +
+

New Feedback

+ Back +
+ +
+
+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+
+ +
+{% endblock %} diff --git a/containers/backupchecks/src/templates/main/inbox.html b/containers/backupchecks/src/templates/main/inbox.html new file mode 100644 index 0000000..95e8c78 --- /dev/null +++ b/containers/backupchecks/src/templates/main/inbox.html @@ -0,0 +1,331 @@ +{% extends "layout/base.html" %} + + + +{# Pager macro must be defined before it is used #} +{% macro pager(position, page, total_pages, has_prev, has_next) -%} +
+
+ {% if has_prev %} + Previous + {% else %} + + {% endif %} + {% if has_next %} + Next + {% else %} + + {% endif %} +
+ + {% if current_user.is_authenticated and active_role in ["admin", "operator"] %} +
+ +
+ {% endif %} + +
+ Page {{ page }} of {{ total_pages }} +
+ + + +
+
+
+{%- endmacro %} + +{% block content %} +

Inbox

+ +{{ pager("top", page, total_pages, has_prev, has_next) }} + +
+ + + + + + + + + + + + + + + + {% if rows %} + {% for row in rows %} + + + + + + + + + + + + {% endfor %} + {% else %} + + + + {% endif %} + +
FromSubjectDate / timeBackupTypeJob nameOverallParsedEML
{{ row.from_address }}{{ row.subject }}{{ row.received_at }}{{ row.backup_software }}{{ row.backup_type }}{{ row.job_name }}{{ row.overall_status }}{{ row.parsed_at }} + {% if row.has_eml %} + EML + {% endif %} +
+ No messages found. +
+
+ +{{ pager("bottom", page, total_pages, has_prev, has_next) }} + + + + + + +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/inbox_deleted.html b/containers/backupchecks/src/templates/main/inbox_deleted.html new file mode 100644 index 0000000..94d7ab6 --- /dev/null +++ b/containers/backupchecks/src/templates/main/inbox_deleted.html @@ -0,0 +1,200 @@ +{% extends "layout/base.html" %} + +{# Pager macro must be defined before it is used #} +{% macro pager(position, page, total_pages, has_prev, has_next) -%} +
+
+ {% if has_prev %} + Previous + {% else %} + + {% endif %} + {% if has_next %} + Next + {% else %} + + {% endif %} +
+ +
+ Page {{ page }} of {{ total_pages }} +
+ + + +
+
+
+{%- endmacro %} + +{% block content %} +

Deleted mails

+ +{{ pager("top", page, total_pages, has_prev, has_next) }} + +
+ + + + + + + + + + + + + + {% if rows %} + {% for row in rows %} + + + + + + + + + + {% endfor %} + {% else %} + + + + {% endif %} + +
FromSubjectReceivedDeleted byDeleted atEMLActions
{{ row.from_address }}{{ row.subject }}{{ row.received_at }}{{ row.deleted_by }}{{ row.deleted_at }} + {% if row.has_eml %} + EML + {% endif %} + +
+ +
+
+ No deleted messages found. +
+
+ +{{ pager("bottom", page, total_pages, has_prev, has_next) }} + + + + + + +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/job_detail.html b/containers/backupchecks/src/templates/main/job_detail.html new file mode 100644 index 0000000..382a7bc --- /dev/null +++ b/containers/backupchecks/src/templates/main/job_detail.html @@ -0,0 +1,386 @@ +{% extends "layout/base.html" %} +{% block content %} +

Job details

+ +
+
+
+
Customer
+
{{ job.customer.name if job.customer else "" }}
+ +
Backup
+
{{ job.backup_software }}
+ +
Type
+
{{ job.backup_type }}
+ +
Job name
+
{{ job.job_name }}
+ +
Tickets
+
{{ ticket_open_count }} open / {{ ticket_total_count }} total
+ +
Remarks
+
{{ remark_open_count }} open / {{ remark_total_count }} total
+ {% if schedule_map %} +
Schedule (inferred)
+
+
+ + + + + + + + + + + + + +
DayTimes (15 min blocks)
Mon{{ ', '.join(schedule_map[0]) if schedule_map[0] else '—' }}
Tue{{ ', '.join(schedule_map[1]) if schedule_map[1] else '—' }}
Wed{{ ', '.join(schedule_map[2]) if schedule_map[2] else '—' }}
Thu{{ ', '.join(schedule_map[3]) if schedule_map[3] else '—' }}
Fri{{ ', '.join(schedule_map[4]) if schedule_map[4] else '—' }}
Sat{{ ', '.join(schedule_map[5]) if schedule_map[5] else '—' }}
Sun{{ ', '.join(schedule_map[6]) if schedule_map[6] else '—' }}
+
+
+ {% endif %} +
+
+
+ +{% if can_manage_jobs %} +
+ +
+{% endif %} + +

Job history

+ +
+ + + + + + + + + + {% if active_role == 'admin' %} + + + {% endif %} + + + + {% if history_rows %} + {% for r in history_rows %} + + + + {% set _s = (r.status or "")|lower %} + {% set _is_override = (r.override_applied is defined and r.override_applied) or ('override' in _s) %} + {% set _dot = '' %} + {% if _is_override %}{% set _dot = 'dot-override' %} + {% elif _s == 'success' %}{% set _dot = 'dot-success' %} + {% elif _s == 'warning' %}{% set _dot = 'dot-warning' %} + {% elif _s == 'error' %}{% set _dot = 'dot-failed' %} + {% elif _s == 'failed' %}{% set _dot = 'dot-failed' %} + {% elif _s == 'missed' %}{% set _dot = 'dot-missed' %} + {% endif %} + + + + + {% if active_role == 'admin' %} + + + {% endif %} + + {% endfor %} + {% else %} + + + + {% endif %} + +
Day runRun timeStatusTicketsRemarksOverrideReviewed byReviewed at
{{ r.run_day }}{{ r.run_at }}{% if _dot %}{% endif %}{{ r.status }}{% if r.ticket_codes %}🎫 {{ r.ticket_codes|join(", ") }}{% endif %}{% if r.has_remark %}💬{% endif %}{% if r.override_applied %}Override{% endif %}{{ r.reviewed_by }}{{ r.reviewed_at }}
+ No runs found. +
+
+ +{% if total_pages > 1 %} +
+
+ {% if has_prev %} + Previous + {% else %} + + {% endif %} + {% if has_next %} + Next + {% else %} + + {% endif %} +
+
+ Page {{ page }} of {{ total_pages }} +
+ + + +
+
+
+{% endif %} + + + + + + + + + +{% endblock %} \ No newline at end of file diff --git a/containers/backupchecks/src/templates/main/jobs.html b/containers/backupchecks/src/templates/main/jobs.html new file mode 100644 index 0000000..1d7ddde --- /dev/null +++ b/containers/backupchecks/src/templates/main/jobs.html @@ -0,0 +1,35 @@ +{% extends "layout/base.html" %} +{% block content %} +

Jobs

+ +
+ + + + + + + + + + + {% if jobs %} + {% for j in jobs %} + + + + + + + {% endfor %} + {% else %} + + + + {% endif %} + +
CustomerBackupTypeJob name
{{ j.customer_name }}{{ j.backup_software }}{{ j.backup_type }}{{ j.job_name }}
+ No jobs found. +
+
+{% endblock %} diff --git a/containers/backupchecks/src/templates/main/logging.html b/containers/backupchecks/src/templates/main/logging.html new file mode 100644 index 0000000..f1ce5c3 --- /dev/null +++ b/containers/backupchecks/src/templates/main/logging.html @@ -0,0 +1,90 @@ +{% extends "layout/base.html" %} +{% block content %} +

Logging

+ + + +
+
+ Admin activity (last 7 days) +
+
+
+ + + + + + + + + + + + + + + + + + + {% if logs %} + {% for log in logs %} + + + + + + + + {% endfor %} + {% else %} + + + + {% endif %} + +
TimeUserEventMessageDetails
{{ log.created_at }}{{ log.user or "-" }}{{ log.event_type }}{{ log.message }}{{ log.details }}
+ No log entries available. +
+
+ + {% if total_pages and total_pages > 1 %} + + {% endif %} +
+
+{% endblock %} diff --git a/containers/backupchecks/src/templates/main/overrides.html b/containers/backupchecks/src/templates/main/overrides.html new file mode 100644 index 0000000..0af9056 --- /dev/null +++ b/containers/backupchecks/src/templates/main/overrides.html @@ -0,0 +1,249 @@ +{% extends "layout/base.html" %} + +{% block content %} +

Overrides

+ +{% if can_manage %} +
+
+ Add override +
+
+
+ + +
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ +
+ + +
+
+
+ + +
+
+ + +
+
+
+
+{% endif %} + +
+
+ Existing overrides +
+
+
+ + + + + + + + + + {% if can_manage %} + + {% endif %} + + + + {% if overrides %} + {% for ov in overrides %} + + + + + + + + {% if can_manage %} + + {% endif %} + + {% endfor %} + {% else %} + + + + {% endif %} + +
LevelScopeFromUntilActiveCommentActions
{{ ov.level }}{{ ov.scope }}{{ ov.start_at }}{{ ov.end_at or "-" }} + {% if ov.active %} + Active + {% else %} + Inactive + {% endif %} + {{ ov.comment }} + +
+ +
+ {% if can_delete %} +
+ +
+ {% endif %} +
+ No overrides defined. +
+
+
+
+ + + + +{% endblock %} \ No newline at end of file diff --git a/containers/backupchecks/src/templates/main/overrides_edit.html b/containers/backupchecks/src/templates/main/overrides_edit.html new file mode 100644 index 0000000..a752f3a --- /dev/null +++ b/containers/backupchecks/src/templates/main/overrides_edit.html @@ -0,0 +1,107 @@ +{% extends "layout/base.html" %} + +{% block content %} +

Edit override

+ + + +
+
+ Override #{{ ov.id }} +
+
+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ +
+ + +
+
+
+ +
+ + +
+
+
+ + +
+
+ + Cancel +
+
+
+
+ +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/parsers.html b/containers/backupchecks/src/templates/main/parsers.html new file mode 100644 index 0000000..fca4506 --- /dev/null +++ b/containers/backupchecks/src/templates/main/parsers.html @@ -0,0 +1,358 @@ +{% extends "layout/base.html" %} + +{% block content %} +

Parsers

+ +
+
+ Mail processing flow +
+
+
    +
  1. + Retrieval
    + Mail is retrieved from Microsoft Graph (manual or automatic import) and stored in the database as a MailMessage record + including sender, subject, received time and message body (HTML and plain text). +
  2. +
  3. + Preprocessing
    + The message body is normalized (line endings, character set) so that the parsers can work with a consistent format. +
  4. +
  5. + Parser selection
    + All active parsers are evaluated in a fixed order. For each parser, match criteria like sender address, subject text and body + snippets are checked. +
  6. +
  7. + Parsing
    + As soon as a parser matches, it extracts: +
      +
    • Backup software (for example: Veeam, NAKIVO, Panel3, Syncovery)
    • +
    • Backup type (for example: Backup Job, Backup Copy Job, Replication job for VMware)
    • +
    • Job name
    • +
    • Objects within the job (for example: VMs, servers, repositories) including their status and any error message
    • +
    +
  8. +
  9. + Storage and linkage
    + The parsed result is stored in the database: +
      +
    • Job – one record per backup job
    • +
    • JobRun – one record per run of a job
    • +
    • JobObject – one record per object inside a run
    • +
    + The mail itself remains linked to the run via mail_message_id. +
  10. +
  11. + Inbox and approval
    + If a job for this mail has not been approved yet, the mail appears in the Inbox with the parsed result. After approval the run + is shown on the Jobs and Daily Jobs pages. +
  12. +
+
+
+ +
+
+ Available parsers +
+
+
+ + + + + + + + + + + + + {% if parsers %} + {% for p in parsers %} + + + + + + + + + {% endfor %} + {% else %} + + + + {% endif %} + +
NameBackup softwareBackup type(s)Match criteriaOrderEnabled
{{ p.name }}{{ p.backup_software }} + {% if p.backup_types %} + {{ p.backup_types | join(", ") }} + {% else %} + - + {% endif %} + + {% set parts = [] %} + {% if p.match.from_contains %} + {% set _ = parts.append("from contains '" ~ p.match.from_contains ~ "'") %} + {% endif %} + {% if p.match.subject_contains %} + {% set _ = parts.append("subject contains '" ~ p.match.subject_contains ~ "'") %} + {% endif %} + {% if p.match.subject_regex %} + {% set _ = parts.append("subject matches /" ~ p.match.subject_regex ~ "/") %} + {% endif %} + {% if parts %} + {{ parts | join(", ") }} + {% else %} + - + {% endif %} + {{ p.order }} + {% if p.enabled %} + Enabled + {% else %} + Disabled + {% endif %} +
+ No parsers defined. +
+
+
+
+ +{% if parsers %} + {% for p in parsers %} +
+
+ {{ p.name }} – {{ p.backup_software }}{% if p.backup_types %} ({{ p.backup_types | join(", ") }}){% endif %} +
+
+

{{ p.description }}

+ + {% set ex_list = p.examples if p.examples is defined else ([p.example] if p.example is defined else []) %} + + {# Add supported Veeam backup-type examples that are configured/used in this installation. #} + {% if p.backup_software == 'Veeam' %} + {% set veeam_examples = [ + { + 'from_address': 'Veeam Backup & Replication ', + 'subject': '[Success] Replication Job: VMware - DR Copy', + 'body_snippet': 'Replication job "VMware - DR Copy" finished successfully\nObjects processed: 2\nVM01 - Success\nVM02 - Success', + 'parsed_result': { + 'backup_software': 'Veeam', + 'backup_type': 'Replication Job', + 'job_name': 'VMware - DR Copy', + 'objects': [ + {'name': 'VM01', 'status': 'success', 'error_message': ''}, + {'name': 'VM02', 'status': 'success', 'error_message': ''} + ] + } + }, + { + 'from_address': 'Veeam Agent ', + 'subject': '[Warning] Agent Backup Job: LAPTOP-123', + 'body_snippet': 'Agent backup job "LAPTOP-123" finished with warnings\n1 of 1 objects processed\nLAPTOP-123 - Warning', + 'parsed_result': { + 'backup_software': 'Veeam', + 'backup_type': 'Agent Backup', + 'job_name': 'LAPTOP-123', + 'objects': [ + {'name': 'LAPTOP-123', 'status': 'warning', 'error_message': 'Finished with warnings'} + ] + } + }, + { + 'from_address': 'Veeam Backup for Microsoft 365 ', + 'subject': '[Success] Microsoft 365 Backup Job: M365 Daily', + 'body_snippet': 'Microsoft 365 backup job "M365 Daily" finished successfully\nObjects processed: 3\nExchange - Success\nSharePoint - Success\nOneDrive - Success', + 'parsed_result': { + 'backup_software': 'Veeam', + 'backup_type': 'Microsoft 365 Backup', + 'job_name': 'M365 Daily', + 'objects': [ + {'type': 'Exchange', 'name': 'Exchange', 'status': 'success', 'error_message': ''}, + {'type': 'SharePoint', 'name': 'SharePoint', 'status': 'success', 'error_message': ''}, + {'type': 'OneDrive', 'name': 'OneDrive', 'status': 'success', 'error_message': ''} + ] + } + } + + , + { + 'from_address': 'Veeam Backup & Replication ', + 'subject': '[Success] Scale-out Backup Repository: CEPH RBD', + 'body_snippet': 'Scale-out Backup Repository: CEPH RBD +Extents: +- CEPH RBD1 +- CEPH RBD2 +Used Space: 107,6 TB +Capacity: 200 TB (66% free)', + 'parsed_result': { + 'backup_software': 'Veeam', + 'backup_type': 'Scale-out Backup Repository', + 'job_name': 'CEPH RBD', + 'overall_status': 'Success', + 'objects': [ + {'type': 'Extent', 'name': 'CEPH RBD1', 'status': 'Online', 'error_message': ''}, + {'type': 'Extent', 'name': 'CEPH RBD2', 'status': 'Online', 'error_message': ''} + ] + } + }, + { + 'from_address': 'Veeam Backup & Replication ', + 'subject': '[Success] Health check (1 objects)', + 'body_snippet': 'Health Check Summary\nSuccess: 1\nWarnings: 0\nErrors: 0\n\nObjects:\nHealthCheck Blygold DC01 - Success', + 'parsed_result': { + 'backup_software': 'Veeam', + 'backup_type': 'Health Check', + 'job_name': 'Health Check', + 'overall_status': 'Success', + 'objects': [ + {'name': 'HealthCheck Blygold DC01', 'status': 'Success', 'error_message': ''} + ] + } + } + ] %} + {% set ex_list = ex_list + veeam_examples %} + {% endif %} + + {# Add Synology Active Backup examples. #} + {% if p.backup_software == 'Synology' %} + {% set synology_examples = [ + { + 'from_address': 'Synology Active Backup ', + 'subject': 'NAS - Active Backup for Google Workspace - back-uptaak [Google D-Two] is gedeeltelijk voltooid', + 'body_snippet': 'Back-up [Google D-Two] is voltooid, maar van sommige items kon geen back-up worden gemaakt.\n- Mijn schijf: succes: 0; waarschuwing: 11; fout: 0\n- Mail: succes: 0; waarschuwing: 11; fout: 0\nStarttijd: 2025-12-18 00:00:02\nEindtijd: 2025-12-18 00:01:38', + 'parsed_result': { + 'backup_software': 'Synology', + 'backup_type': 'Active Backup', + 'job_name': 'Google D-Two', + 'overall_status': 'Warning', + 'objects': [] + } + } + + , + { + 'from_address': 'SCHWARZNAS ', + 'subject': 'Gegevensback-uptaak op SCHWARZNAS is mislukt', + 'body_snippet': 'Gegevensback-uptaak op SCHWARZNAS is mislukt.\nTaaknaam: Data backup - NAS thuis\nGa naar Hyper Backup > Logboek voor meer informatie.', + 'parsed_result': { + 'backup_software': 'Synology', + 'backup_type': 'Hyperbackup', + 'job_name': 'Data backup - NAS thuis', + 'overall_status': 'Failed', + 'objects': [] + } + } + + + , + { + 'from_address': 'DiskStation ', + 'subject': '[Golfpark NAS] HiDrive cloud backup - NAS Backup - Strato HiDrive successful on DiskStation', + 'body_snippet': 'Your backup task NAS Backup - Strato HiDrive is now complete.\n\nBackup Task: NAS Backup - Strato HiDrive\nBackup Destination: /users/.../DiskStation_1.hbk\nStart Time: Wed, Dec 17 2025 23:00:01\nDuration: 17 Minute 9 Second', + 'parsed_result': { + 'backup_software': 'Synology', + 'backup_type': 'Strato HiDrive', + 'job_name': 'NAS Backup - Strato HiDrive', + 'overall_status': 'Success', + 'objects': [] + } + }] %} + {% set ex_list = ex_list + synology_examples %} + {% endif %} + + + {# Add NAKIVO VMware examples. #} + {% if p.backup_software == 'NAKIVO' %} + {% set nakivo_examples = [ + { + 'from_address': 'NAKIVO Backup & Replication ', + 'subject': '"exchange01.kuiperbv.nl" job: Successful', + 'body_snippet': 'Job Run Report\nBackup job for VMware\nexchange01.kuiperbv.nl\nSuccessful', + 'parsed_result': { + 'backup_software': 'NAKIVO', + 'backup_type': 'Backup job for VMware', + 'job_name': 'exchange01.kuiperbv.nl', + 'overall_status': 'Success', + 'objects': [ + {'name': 'exchange01.kuiperbv.nl', 'status': 'success', 'error_message': ''} + ] + } + } + ] %} + {% set ex_list = ex_list + nakivo_examples %} + {% endif %} + + {% for ex in ex_list %} +
Example {{ loop.index }}
+
+
From
+
{{ ex.from_address }}
+ +
Subject
+
{{ ex.subject }}
+ +
Body snippet
+
+
{{ ex.body_snippet }}
+
+
+ +
Parsed result
+
+
Backup software
+
{{ ex.parsed_result.backup_software }}
+ +
Backup type
+
{{ ex.parsed_result.backup_type }}
+ +
Job name
+
{{ ex.parsed_result.job_name }}
+
+ +
+ + + + + + + + + + + {% for obj in ex.parsed_result.objects %} + + + + {% set _s = (obj.status or "")|lower %} + {% set _dot = '' %} + {% if _s == 'success' %}{% set _dot = 'dot-success' %} + {% elif _s == 'warning' %}{% set _dot = 'dot-warning' %} + {% elif _s == 'error' %}{% set _dot = 'dot-failed' %} + {% elif _s == 'failed' %}{% set _dot = 'dot-failed' %} + {% elif _s == 'missed' %}{% set _dot = 'dot-missed' %} + {% endif %} + + + + {% endfor %} + +
TypeObjectStatusError message
{{ obj.type or '' }}{{ obj.name }}{% if _dot %}{% endif %}{{ obj.status }}{{ obj.error_message }}
+
+ + {% if not loop.last %} +
+ {% endif %} + {% endfor %} + +
+
+ {% endfor %} +{% endif %} + +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/remark_detail.html b/containers/backupchecks/src/templates/main/remark_detail.html new file mode 100644 index 0000000..3509fbf --- /dev/null +++ b/containers/backupchecks/src/templates/main/remark_detail.html @@ -0,0 +1,96 @@ +{% extends "layout/base.html" %} + +{% block content %} +
+

Remark

+ Back +
+ +
+
+
+ {% if remark.resolved_at %}âś… Resolved{% else %}đź’¬ Active{% endif %} + Start: {{ remark.start_date.strftime('%d-%m-%Y %H:%M:%S') if remark.start_date else '-' }} + {% if remark.resolved_at %} + Resolved: {{ remark.resolved_at.strftime('%d-%m-%Y %H:%M:%S') }} + {% endif %} +
+ +
+ + +
+ {% if active_role in ['admin','operator'] %} +
+ + {% if not remark.resolved_at %} + + {% endif %} +
+ {% endif %} +
+
+
+ +
+
+
+
+
Scopes
+ {% if scopes %} +
    + {% for s in scopes %} +
  • +
    Type: {{ s.scope_type }}
    +
    Customer: {{ s.customer_id or '-' }}
    +
    Backup: {{ s.backup_software or '-' }}
    +
    Type: {{ s.backup_type or '-' }}
    +
    Job: {{ s.job_id or '-' }}
    +
    Job run: {{ s.job_run_id or '-' }}
    +
    Job name match: {{ s.job_name_match or '-' }}
    +
  • + {% endfor %} +
+ {% else %} +
No scopes.
+ {% endif %} +
+
+
+ +
+
+
+
Linked runs (last 20)
+ {% if runs %} +
+ + + + + + + + + + + {% for r in runs %} + + + + + + + {% endfor %} + +
Run atCustomerJobStatus
{{ r.run_at }}{{ r.customer_name }}{{ r.job_name }}{{ r.status }}
+
+ {% else %} +
No linked runs.
+ {% endif %} +
+
+
+
+ +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/reports.html b/containers/backupchecks/src/templates/main/reports.html new file mode 100644 index 0000000..a24da4c --- /dev/null +++ b/containers/backupchecks/src/templates/main/reports.html @@ -0,0 +1,438 @@ +{% extends "layout/base.html" %} +{% block content %} + +
+
+

Reports

+
Create report definitions and generate raw output for testing.
+
+ +
+ +
+
+
+
+
+
Report definitions
+
One-time reports are supported. Scheduling is a placeholder for now.
+
+
+ +
+
+
+
+ + + + + + + + + + + + + + + +
NameTypePeriodFormatActions
Loading…
+
+
+ +
+
+ +
+
+
+
Scheduling (placeholder)
+
This is a preview of the future scheduling UI.
+
+
+
+ + +
Coming soon.
+
+ +
+ + +
+ +
+ + +
+ +
+ +
+ + UTC +
+
+ +
+ Scheduling is not active yet. These controls are disabled on purpose. +
+
+
+
+
+ + + + + + + +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/reports_new.html b/containers/backupchecks/src/templates/main/reports_new.html new file mode 100644 index 0000000..ac5b88f --- /dev/null +++ b/containers/backupchecks/src/templates/main/reports_new.html @@ -0,0 +1,357 @@ +{% extends "layout/base.html" %} +{% block content %} + +
+
+

New report

+
Create a one-time report definition. Generate output from the Reports overview.
+
+
+ Back +
+
+ +
+
+
+
+
Report basics
+
Fields below are designed to scale as reporting grows.
+
+
+
+ +
+
+ + +
+ +
+ + +
+ +
+ + +
+
+ +
+ +
Reporting period (UTC)
+
Pick a day from the month calendar and set the time next to it.
+ +
+
+ +
+ + + UTC +
+
+
+ +
+ + + UTC +
+
+ +
+
+ + + +
+
+
+ +
+ +
Scope selection
+
Generate for a single customer, multiple customers, or all customers.
+ +
+
+
+ + + + + + + + +
+
+ +
+ + +
Search will be added later. For MVP this is a simple dropdown.
+
+ +
+ + +
Hold Ctrl/Cmd to select multiple customers.
+
+ +
+
+ Jobs selection is set to all jobs for each selected customer in this iteration. +
+
+
+ +
+ +
+ + Cancel +
+
+
+
+ +
+
+
+
Scheduling (placeholder)
+
Disabled for now. Will be enabled in a future iteration.
+
+
+
+ + +
+ +
+ + +
+ +
+
Scheduling is not active yet.
+
+
+
+
+
+ + + +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/run_checks.html b/containers/backupchecks/src/templates/main/run_checks.html new file mode 100644 index 0000000..541a854 --- /dev/null +++ b/containers/backupchecks/src/templates/main/run_checks.html @@ -0,0 +1,942 @@ +{% extends "layout/base.html" %} + +{% block content %} +
+

Run Checks

+
+ + {% if is_admin %} + + {% endif %} +
+
+ +
+ +
+ + + + + + + + + + + + + + {% if rows %} + {% for r in rows %} + + + + + + + + + + {% endfor %} + {% else %} + + + + {% endif %} + +
+ + CustomerBackupTypeJobIndicatorsAction
+ + {{ r.customer_name }}{{ r.backup_software }}{{ r.backup_type }}{{ r.job_name }} + {% if r.status_indicators %} + + {% for si in r.status_indicators %} + + {% if si.dot %}{% endif %} + {{ si.count }} + + {% endfor %} + + {% endif %} + {% if r.has_active_ticket or r.has_active_remark %} + + {% if r.has_active_ticket %}🎫{% endif %} + {% if r.has_active_remark %}💬{% endif %} + + {% endif %} + + + Job page +
+ No runs to check. +
+
+ + + + + + + +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/settings.html b/containers/backupchecks/src/templates/main/settings.html new file mode 100644 index 0000000..d033ed5 --- /dev/null +++ b/containers/backupchecks/src/templates/main/settings.html @@ -0,0 +1,655 @@ +{% extends "layout/base.html" %} +{% block content %} + +
+
+

Settings

+
Configure mail import, display options and maintenance actions.
+
+
+ + + + +
+
System status
+
+
+
+
+
Database size
+
{{ db_size_human }}
+
+
+
+
+
Free disk space
+
+ {% if free_disk_warning %} + {{ free_disk_human }} + (mail import will be blocked below 2 GB) + {% else %} + {{ free_disk_human }} + {% endif %} +
+
+
+
+
+
+ + +{% if section == 'general' %} +
+
+
Mail (Microsoft Graph)
+
+
+
+ + +
+
+ + +
+ +
+ + +
Leave empty to keep the existing secret.
+
+
+ + +
+ +
+ +
+ + +
+
Select the folder where backup report e-mails are fetched from.
+
+
+ +
+ + +
+
Select the folder where processed e-mails are moved to.
+
+
+
+
+ +
+
Daily Jobs
+
+
+
+ + +
Missed checks start after this date. Older runs are used to learn schedules.
+
+
+
+
+ +
+
Display
+
+
+
+ + +
Controls how timestamps are shown in the web interface (Logging, Jobs, Daily Jobs, Run Checks, etc.).
+
+
+
+
+ +
+ +
+
+{% endif %} + + +{% if section == 'users' %} +
+
User management
+
+
+ + + + + + + + + + {% if users %} + {% for user in users %} + + + + + + {% endfor %} + {% else %} + + + + {% endif %} + +
UsernameRolesActions
{{ user.username }}{{ (user.role or '')|replace(',', ', ') }} +
+
+
+ + +
+
+ {% set is_last_admin = (user.role == 'admin' and (users | selectattr('role', 'equalto', 'admin') | list | length) <= 1) %} +
+ +
+
+
No users found.
+
+ +
Create new user
+
+
+ + +
+
+ +
+
+
+ + +
+
+ + +
+
+
+
+ + +
+
+ + +
+
+
+
+
+ + +
+
+ +
+
+
+
+{% endif %} + + +{% if section == 'imports' %} +
+
+
Import configuration
+
+
+ + +
+ +
+
+ + +
+
+ + +
Messages older than this date are ignored and remain in the inbox.
+
+
+ + +
Default is 50 items per manual import.
+
+ +
+ + +
When enabled, the raw .eml is stored in the database and can be downloaded from Inbox. Older EML data is removed automatically.
+
+
+
+
+ +
+ +
+
+ + +
+
Manual mail import
+
+

Trigger a one-time mail import using the Microsoft Graph settings in General. The number of items is limited to 50.

+
+
+ + +
+
+ +
+
+

Results (counts and any errors) are shown as notifications and recorded on the Logging page.

+
+
+{% endif %} + + +{% if section == 'maintenance' %} +
+
+
+
Approved jobs export / import
+
+

Export and import previously approved jobs (customers and job definitions). Useful when starting with a clean installation and restoring your job list.

+ + + +
+ +
+
+
+ + +
+
+ +
+
+
Use a JSON export created by this application.
+
+
+
+
+
+
+ +
+
+
Object maintenance
+
+

Rebuild object links for existing approved runs (repairs missing reporting links).

+
+ +
+
+
+
+ +
+
+
Jobs maintenance
+
+

Delete all jobs and job runs. Related mails will be returned to the Inbox.

+
+ +
+
+
+
+
+ + +
+
Danger zone
+
+

+ Resetting will permanently delete all application data (customers, jobs, runs, logs, tickets, remarks and users). + After reset you will be redirected to the initial setup to create a new admin account. +

+ +
+
+ + +
+
+ +
+
+
+
+ + +{% endif %} + +{% if section == 'general' %} + + + + + + + +{% endif %} + +{% if section == 'news' %} + +
+
News
+
+
+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+
+
+ + +
+
+ + +
+
+
+
+
+ +
+
+ + {% if news_admin_items %} +
+ {% for item in news_admin_items %} +
+

+ +

+
+
+
+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+
+
+ + +
+
+ + +
+
+
+
+
+ +
+
+ +
+
+ View reads +
+ +
+
+
+ +
+
+
+
+
+ {% endfor %} +
+ {% else %} +
No news items yet.
+ {% endif %} +
+
+ +{% endif %} + +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/settings_news_reads.html b/containers/backupchecks/src/templates/main/settings_news_reads.html new file mode 100644 index 0000000..aa7dcea --- /dev/null +++ b/containers/backupchecks/src/templates/main/settings_news_reads.html @@ -0,0 +1,40 @@ +{% extends "layout/base.html" %} +{% block content %} +
+
+

News reads

+
{{ item.title }}
+
+ +
+ +
+
Read by
+
+ {% if reads %} +
+ + + + + + + + + {% for read, user in reads %} + + + + + {% endfor %} + +
UserRead at
{{ user.username }}{{ read.read_at }}
+
+ {% else %} +
No reads yet.
+ {% endif %} +
+
+{% endblock %} diff --git a/containers/backupchecks/src/templates/main/ticket_detail.html b/containers/backupchecks/src/templates/main/ticket_detail.html new file mode 100644 index 0000000..74b3ae5 --- /dev/null +++ b/containers/backupchecks/src/templates/main/ticket_detail.html @@ -0,0 +1,96 @@ +{% extends "layout/base.html" %} + +{% block content %} +
+

Ticket {{ ticket.ticket_code }}

+ Back +
+ +
+
+
+ {% if ticket.resolved_at %}✅ Resolved{% else %}🎫 Active{% endif %} + Active from: {{ ticket.active_from_date.strftime('%d-%m-%Y') if ticket.active_from_date else '-' }} + Start: {{ ticket.start_date.strftime('%d-%m-%Y %H:%M:%S') if ticket.start_date else '-' }} + {% if ticket.resolved_at %} + Resolved: {{ ticket.resolved_at.strftime('%d-%m-%Y %H:%M:%S') }} + {% endif %} +
+ +
+ + +
+ {% if active_role in ['admin','operator'] %} +
+ + {% if not ticket.resolved_at %} + + {% endif %} +
+ {% endif %} +
+
+
+ +
+
+
+
+
Scopes
+ {% if scopes %} +
    + {% for s in scopes %} +
  • +
    Type: {{ s.scope_type }}
    +
    Customer: {{ s.customer_id or '-' }}
    +
    Backup: {{ s.backup_software or '-' }}
    +
    Type: {{ s.backup_type or '-' }}
    +
    Job: {{ s.job_id or '-' }}
    +
    Job name match: {{ s.job_name_match or '-' }}
    +
  • + {% endfor %} +
+ {% else %} +
No scopes.
+ {% endif %} +
+
+
+ +
+
+
+
Linked runs (last 20)
+ {% if runs %} +
+ + + + + + + + + + + {% for r in runs %} + + + + + + + {% endfor %} + +
Run atCustomerJobStatus
{{ r.run_at }}{{ r.customer_name }}{{ r.job_name }}{{ r.status }}
+
+ {% else %} +
No linked runs.
+ {% endif %} +
+
+
+
+ +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/tickets.html b/containers/backupchecks/src/templates/main/tickets.html new file mode 100644 index 0000000..61d6b4e --- /dev/null +++ b/containers/backupchecks/src/templates/main/tickets.html @@ -0,0 +1,169 @@ +{% extends "layout/base.html" %} + +{% block content %} +

Tickets & Remarks

+ + + +
+ + +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ + Reset +
+
+ +{% if tab == 'tickets' %} +
+ + + + + + + + + + + + + + + + {% if tickets %} + {% for t in tickets %} + + + + + + + + + + + + {% endfor %} + {% else %} + + + + {% endif %} + +
StatusTicket codeCustomersScopeLinked runsActive fromStart dateResolved atActions
+ {% if t.active %} + 🎫 Active + {% else %} + ✅ Resolved + {% endif %} + {{ t.ticket_code }}{{ t.customers }}{{ t.scope_summary }}{{ t.linked_runs }}{{ t.active_from_date }}{{ t.start_date }}{{ t.resolved_at }} + View / Edit + {% if t.active and t.job_id %} + Job page + {% endif %} + {% if t.active and (active_role in ['admin','operator']) %} +
+ +
+ {% endif %} +
No tickets found.
+
+{% else %} +
+ + + + + + + + + + + + + + + {% if remarks %} + {% for r in remarks %} + + + + + + + + + + + {% endfor %} + {% else %} + + + + {% endif %} + +
StatusRemarkCustomersScopeLinked runsStart dateResolved atActions
+ {% if r.active %} + đź’¬ Active + {% else %} + âś… Resolved + {% endif %} + +
{{ r.preview }}
+
{{ r.customers }}{{ r.scope_summary }}{{ r.linked_runs }}{{ r.start_date }}{{ r.resolved_at }} + View / Edit + {% if r.active and r.job_id %} + Job page + {% endif %} + {% if r.active and (active_role in ['admin','operator']) %} +
+ +
+ {% endif %} +
No remarks found.
+
+{% endif %} + +{% endblock %} diff --git a/containers/backupchecks/src/templates/main/user_settings.html b/containers/backupchecks/src/templates/main/user_settings.html new file mode 100644 index 0000000..afe3203 --- /dev/null +++ b/containers/backupchecks/src/templates/main/user_settings.html @@ -0,0 +1,33 @@ +{% extends "layout/base.html" %} + +{% block content %} +
+

User Settings

+
+ +
+
+

Change password

+
+
+ + +
+ +
+ + +
+ +
+ + +
+ +
+ +
+
+
+
+{% endblock %} diff --git a/deploy/backupchecks-stack.yml b/deploy/backupchecks-stack.yml new file mode 100644 index 0000000..1043794 --- /dev/null +++ b/deploy/backupchecks-stack.yml @@ -0,0 +1,54 @@ +version: "3.8" + +services: + backupchecks: + image: gitea.oskamp.info/ivooskamp/backupchecks:dev + container_name: backupchecks + restart: unless-stopped + depends_on: + - postgres + environment: + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + DB_HOST: backupchecks-postgres + DB_PORT: 5432 + ports: + - "8080:8080" + networks: + - backupnet + + postgres: + image: postgres:16 + container_name: backupchecks-postgres + restart: unless-stopped + environment: + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + volumes: + - /docker/appdata/backupchecks/backupchecks-postgres:/var/lib/postgresql/data + networks: + - backupnet + + adminer: + image: adminer:latest + container_name: backupchecks-adminer + restart: unless-stopped + ports: + - "8081:8080" + networks: + - backupnet + +networks: + backupnet: + driver: bridge + +POSTGRES_DB=backup +POSTGRES_USER=backup +POSTGRES_PASSWORD=Changeme +DB_HOST=backupchecks-postgres +DB_PORT=5432 +APP_PORT=8080 +APP_ENV=production +APP_SECRET_KEY=Changeme \ No newline at end of file diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 0000000..e69de29 diff --git a/docs/changelog.md b/docs/changelog.md new file mode 100644 index 0000000..a95cec6 --- /dev/null +++ b/docs/changelog.md @@ -0,0 +1,620 @@ +## v20251231-01-dashboard-description + +- Added a comprehensive explanatory section to the Dashboard, positioned directly below the legend. +- Introduced detailed documentation describing how Backupchecks collects, normalizes, and evaluates backup results across multiple backup solutions. +- Clarified the purpose and behavior of dashboard counters, job statuses, and override indicators. +- Expanded the explanation of the Daily Jobs and Run Checks workflows, including the review, follow-up, resolution, and override processes. +- Documented the operational goal of maintaining an empty or near-empty Run Checks page to ensure full review and accountability. +- Clearly positioned Backupchecks as a monitoring, validation, and control platform that enhances, but does not replace, existing backup software. + +--- + +## v20251231-02-daily-first-open-dashboard + +- Added logic to automatically redirect users to the Dashboard on their first website visit of each day. +- The redirect is applied per browser session after authentication, ensuring normal navigation afterwards. +- Excluded API endpoints and static asset requests from the redirect to prevent unintended side effects. +- Ensured the behavior is transparent to users while improving daily visibility of overall system status. + +--- + +## v20251231-03-dashboard-news-settings-sections + +- Added a Dashboard News section displaying active announcements per user. +- Implemented per-user read tracking, allowing users to mark news items as read so they disappear only for that user. +- Added admin management for News items in Settings (create, edit, delete, pin, severity, active state, publish window). +- Added admin visibility into read status per news item, including read counts and user timestamps. +- Implemented admin action to reset read status for a news item. +- Refactored the Settings page into separate sections to improve usability and prevent overly long pages. + +--- + +## v20251231-04-settings-tabs-restructure + +### Settings +- Removed the collapse-based layout from **Settings → General** and converted it to a fixed section view for improved clarity. +- Moved **Import configuration** from **Settings → General** to the dedicated **Imports** section. +- Moved **User Management** from **Settings → Maintenance** to the dedicated **Users** section. +- Moved **Manual mail import** from **Settings → Maintenance** to the **Imports** section. +- Simplified and clarified the overall Settings structure by grouping related functionality into dedicated sections. + +--- + +## v20251231-05-reporting-raw-data-foundation + +### Reporting – Foundation +- Introduced the initial reporting foundation focused on raw data collection. +- Added support for object-based reporting across multiple jobs. +- Implemented immutable report snapshot data to ensure auditability. +- Added aggregated summary data to calculate success, warning, failed, missed, and override metrics per object. +- Prepared reporting structures to support CSV output and future PDF generation. +- Enabled both one-time and scheduled report concepts at the data and API level. +- Ensured report data is reusable for downloads and email delivery. +- Aligned reporting permissions so admin, operator, and reporter roles have equal access in phase 1. +- Designed the reporting model to be extensible for future RBAC, UI, and visualization enhancements. + +--- + +## v20251231-06-reports-ui-placeholders + +- Added an initial Reports UI to create, view, and manage report definitions for testing purposes. +- Implemented UI components to generate and download raw report data as CSV. +- Added scheduling placeholders (non-functional) to preview future report scheduling options. +- Prepared the Reports page layout for future extensions such as automated delivery and scheduling. + +--- + +## v20251231-07-reports-raw-data-ui + +- Added raw data preview functionality to the Reports page, allowing users to view report data directly in the UI without downloading a CSV. +- Introduced a modal-based raw data viewer with pagination support for large datasets. +- Implemented backend support to generate report data on demand for UI preview usage. +- Added API endpoints to retrieve report raw data with configurable limits and offsets. +- Prepared the reporting flow to support future export options (CSV/PDF) using the same data foundation. + +--- + +## v20251231-08-reports-generation-enabled + +- Enabled actual report generation from the Reports page. +- Connected the “Generate report” action to the backend generation logic. +- Ensured raw data can be used to build a report without requiring a schedule. +- Kept scheduling functionality as a placeholder only, as intended. +- Improved UI state handling after report generation (status and available actions). + +--- + +## v20260101-01-reports-new-report-button-fix + +- Fixed the “New report” button on the Reports page so it correctly opens the report creation modal. +- Ensured report-related JavaScript is initialized after DOMContentLoaded to avoid issues with unloaded dependencies. +- Restored the ability to start creating a new report from the UI. + +--- + +## v20260101-02-reports-new-report-page-multicustomer + +- Replaced the “New report” modal with a dedicated page for creating reports. +- Added a full Create Report page with support for future expansion of report options. +- Implemented month-based date selection with clickable days and separate time inputs. +- Added quick-select options for: + - First day of the current month + - First day of the last month +- Added the ability to select a customer for which the report is generated. +- Extended reporting to support generating reports for multiple customers in a single report. +- Updated report generation logic to handle single-customer, multi-customer, and all-customer scopes. +- Prepared reporting output for use cases such as account management and operations-wide overviews. + +================================================================================================================================================ +## v0.1.14 + +### Daily Jobs +- Introduced a consistent, case-insensitive multi-level sort order for the Daily Jobs overview: Customer → Backup Software → Backup Type → Job Name. +- Fixed backend ordering issues to ensure server-side data no longer overrides the intended sort logic. +- Ensured sorting is applied before serialization so the UI always reflects the correct order. +- Improved predictability and readability of job listings across environments. + +### Veeam Backup for Microsoft 365 +- Improved parsing of overall warning messages to correctly extract and display permission- and role-related issues. +- Added support for combined permission and role warnings in M365 reports. +- Ensured detailed permission warnings take precedence over generic “X of X objects processed” messages. +- Fixed incorrect overall message selection and filtered out misleading banner fragments. +- Resolved an indentation error in the parser that caused backend startup failures, restoring stability. + +### Overrides – Configuration and Matching +- Replaced free-text inputs with dropdowns for Backup Software and Backup Type in Overrides, including: + - Alphabetical sorting + - Preselection of existing values + - A global option at the top of each dropdown +- Fixed PostgreSQL compatibility issues by replacing DISTINCT queries with GROUP BY while preserving case-insensitive sorting. +- Ensured Overrides endpoints no longer crash due to invalid query constructions. + +### Overrides – Application, Editing, and Deletion +- Made newly created overrides apply immediately and retroactively to all unreviewed runs by default. +- Added full support for editing existing overrides and reapplying changes to unreviewed runs. +- Restricted override deletion to Admin users and ensured proper reprocessing after removal. +- Fixed datetime handling in override edit flows so unchanged values are preserved and NULL constraint violations are avoided. +- Ensured Admin users always see delete actions by consistently passing permission flags to the UI. + +### Overrides – Matching Logic Improvements +- Extended override matching to use persisted run_object_links joined with customer_objects instead of legacy or non-existent relationships. +- Improved global override matching by resolving backup software and type from MailMessage data when missing on jobs. +- Added support for matching against object-level error messages as well as run-level remarks. +- Ensured all override matching remains case-insensitive and consistent across run-level and object-level evaluations. + +### Overrides – UI Indicators and Reporting +- Introduced a blue status indicator for runs and jobs where overrides are applied. +- Updated status labels to display “Success (override)” for clearer distinction without changing canonical stored statuses. +- Added persistent override reporting metadata to job runs, including applied override ID, level, and reason. +- Ensured dashboards, Daily Jobs, Run Checks, and popups correctly propagate and display override-based success states. +- Fixed multiple UI rendering issues so overridden runs are no longer misclassified as warnings or missed jobs. + +### Daily Jobs Popups +- Fixed popup loading failures and backend unpacking errors related to override handling. +- Ensured popup details consistently load correct run data. +- Aligned popup override detection and status coloring with Run Checks and Daily Jobs overviews. + +--- + +## v0.1.13 + +This release focuses on improving visibility and consistency of Tickets and Remarks across Run Checks and Job Details, alongside several UI fixes and backend stability improvements. + +### Highlights +- Added clear visual indicators for active Tickets and Remarks in the Run Checks overview. +- Enhanced Job Details and Job History to display actual ticket numbers and related remark messages, both in tables and popups. +- Improved navigation consistency by adding direct “Job page” links for Tickets and Remarks. + +### Improvements +- Job History popups now reliably show associated ticket numbers and remark content. +- Backend job history data is enriched to support consistent UI rendering. +- Missed-run detection now includes a ±1 hour tolerance window and respects the configured UI timezone. +- Run Checks UI is simplified by hiding last-reviewed columns (data is still retained in the backend). + +### Fixes +- Resolved a backend indentation issue that caused Gunicorn startup failures. +- Made frontend parsing of ticket/remark data more robust against malformed or unexpected payloads. +- Fixed JSON encoding issues in HTML data attributes to prevent popup rendering errors. + +### Changelog Update +- Simplified the changelog by removing “Current Version” and “In testing” sections. +- The changelog now only shows completed changes. + +--- + +## v0.1.12 + +### Dashboard & UI +- Corrected dashboard counters so **Expected**, **Missed**, and **Success (override)** statuses are shown accurately. +- Added dedicated counters for Expected and Success (override). +- Fixed layout issues on the Inbox dashboard tiles and improved label wrapping. +- Added safe timezone fallbacks to prevent incorrect status aggregation. +- Restored missing status icons and symbols across Dashboard and Daily Jobs views. +- Cleaned up Job Details UI by removing redundant columns and clarifying schedule display. +- Extended Job History with weekday labels and review metadata (Admin-only visibility). + +### Stability & Reliability +- Fixed a Gunicorn startup crash caused by incorrect Python indentation. +- Improved migration robustness for soft-delete columns to prevent startup 502 errors on busy databases. +- Prevented duplicate or unintended regeneration of reviewed “Missed” runs. + +### Inbox & Mail Handling +- Introduced soft-delete for Inbox messages with full Admin restore capability. +- Added an Admin-only “Deleted mails” page with audit details (who/when). +- Added popup previews for deleted mails without requiring restore. +- Improved HTML mail handling by extracting content from HTML attachments when the body is empty. +- Added an Admin maintenance action to backfill HTML bodies from existing attachments. + +### Feedback & Settings +- Changed Feedback behavior so resolved items remain visible until explicitly deleted. +- Restricted feedback deletion to Admin users only. +- Added a User Settings page allowing users to change their own password securely. + +### Backup Parser Enhancements +- Improved Veeam parsing: + - Fixed Health Check Summary parsing. + - Normalized job names by stripping “(Retry)”. + - Added and refined License Key parsing with correct status detection and exclusions from scheduling logic. +- Added and expanded Synology support: + - Active Backup for Business (NL/EN). + - R-Sync (NL/EN). + - Account Protection notifications (informational, no scheduling). +- Added new parsers: + - R-Drive Image. + - Syncovery. +- Ensured correct handling of objects, statuses, and scheduling exclusions where applicable. + +### Changelog +- Removed the “Planned” section from the Changelog. +- Future planning is now handled exclusively via the Feedback page. + +--- + +## v0.1.11 + +### Stability & Bug Fixes +- Fixed multiple page crashes caused by missing imports after refactoring (Jobs, Feedback, Run Checks, Inbox, Daily Jobs). +- Resolved Jinja2 template errors and SQL/runtime issues related to timezone handling. +- Improved robustness by explicitly importing shared helpers to prevent NameError exceptions. + +### Run Checks & Review Workflow +- Introduced a new **Run Checks** page to review job runs independently from Daily Jobs. +- Displays all unreviewed runs with no time-based filtering. +- Supports bulk review actions and per-job review via popups. +- Added admin-only features: show reviewed runs, unmark reviewed runs, reviewer metadata, and full audit logging. +- Enhanced popups to group runs per job, include missed runs, and show ticket/remark indicators. +- Added per-job and per-popup status summaries using visual indicators only. + +### UI & Visual Consistency +- Unified all job and run status indicators to a single shape differentiated by color. +- Added a clear status legend to the Dashboard, including the new **Expected** state. +- Removed textual status labels across Daily Jobs and Run Checks for a cleaner UI. +- Improved table layouts and widened content areas for better use of 1080p screens. +- Ensured consistent indicator rendering across all pages. + +### Timezone & Display Improvements +- Added a configurable timezone setting in Settings. +- Updated all frontend date/time rendering to use the configured timezone instead of UTC. +- Fixed offset issues and restored missing timestamps across multiple pages. + +### Missed Runs Logic +- Refined missed run detection to rely only on historically received mail reports. +- Prevented synthetic or never-run schedules from generating false missed runs. + +### Settings & Maintenance +- Stabilized **Delete all jobs** by adding schema-tolerant cleanup of all related foreign key references. +- Refactored the Settings page layout using accordions and cards for improved clarity. +- Improved alignment and usability of import/export and user management sections. + +### Roles & Access Control +- Added support for multiple roles per user with an active role switcher. +- Fixed role-based menu rendering and ensured permissions are evaluated against the active role. +- Ensured role switching consistently redirects to the Dashboard. + +### Theme & UX Fixes +- Fixed manual theme switching (Light/Dark/Auto) and ensured user preferences persist. +- Corrected Inbox EML download functionality by restoring the missing shared import. + +Overall, this release significantly improves stability, review workflows, visual consistency, timezone correctness, and administrative reliability, while refining the operator experience and access control model. + +--- + +## v0.1.10 + +### Performance & Stability +- Reworked **Re-parse all** to process inbox messages in controlled batches, preventing gateway and Gunicorn timeouts on large inboxes. +- Added execution time guards to stop processing before proxy limits are reached. +- Optimized job-matching queries and disabled session autoflush during batch operations to reduce database load. +- Ensured auto-approval and persistence logic only finalize after a full, successful re-parse cycle. +- Restored stable backend startup by fixing decorator ordering issues that caused 502 errors. + +### Job Matching & Parsing +- Fixed approved job imports to persist `from_address`, ensuring correct matching during re-parse. +- Improved Veeam Backup Job parsing: + - Extracted and stored multi-line warnings/errors and object-level details with preserved line breaks. + - Ignored VM summary lines (e.g., “X of X VMs processed”) for overall status detection. + - Prevented incorrect overall warnings when issues are object-level only. + - Fixed regressions to ensure backup objects are consistently detected, stored, and displayed across all views. + +### UI & UX Improvements +- Added EML download support for Job Details and Daily Jobs, with automatic availability handling and proper 404s when missing. +- Improved rendering to preserve line breaks (pre-wrap) in remarks, overall messages, and object details. +- Reduced visual clutter by moving overall status/messages out of tables and into context-specific popups. +- Standardized changelog version display by removing date suffixes. +- Reordered main navigation for better consistency. + +### Daily Jobs & Status Accuracy +- Clarified Daily Jobs status logic: + - Introduced **Expected** for backups not yet due. + - Reserved **Missed** only for jobs past their final expected run time. +- Added last remark excerpts and ensured object details are visible in Daily Jobs popups. + +### Tickets, Remarks & Overrides +- Introduced run-date–scoped ticket activity with `active_from_date`, ensuring accurate historical and current visibility. +- Implemented identical scoping for remarks, preserving visibility across runs even after resolution. +- Fixed resolve actions to redirect properly in the UI while keeping JSON responses for API/AJAX. +- Improved override handling so changes apply immediately to existing job runs with correct priority resolution. + +### New Features +- Added a **Feedback** board with per-user upvoting, admin moderation (resolve/reopen, soft delete), database migrations, and navigation entry. + +### Navigation +- Updated menu order to: Inbox, Customers, Jobs, Daily Jobs, Tickets, Overrides, Reports, Settings, Logging, Changelog, Feedback. + +--- + +## v0.1.9 + +### Changelog System Improvements +- Added and maintained multiple **Completed** changelog entries (v0.1.2 through v0.1.8) with correct release dates. +- Ensured all existing Completed, Testing, and Planned changelog entries were preserved without loss. +- Migrated the Completed changelog from markdown-based content to a **structured, non-markdown format** aligned with the Planned section. +- Simplified changelog rendering logic to use explicit section titles and bullet handling instead of full markdown parsing. +- Standardized formatting across all versions for long-term maintainability and consistent UI rendering. + +### Bug Fixes & Stability +- Fixed multiple backend **Python syntax and runtime errors** related to changelog definitions (missing commas, indentation issues, invalid list entries). +- Resolved rendering issues where markdown content was displayed as plain text or collapsed incorrectly. +- Restored application startup stability by fixing missing imports (`re`, `html`) and indentation errors in changelog-related routes. + +### Refactoring & Maintainability +- Refactored a large `routes.py` file into multiple smaller route modules. +- Introduced a shared routes module for common imports, helpers, and access control. +- Fixed NameError issues after refactoring by explicitly importing underscored helper functions that are not included via wildcard imports. +- Ensured all split route modules retained full functional parity with the original implementation. + +### Release Management Updates +- Moved versions through **Testing → Completed** states correctly: + - v0.1.7 marked as Completed (2025-12-23). + - v0.1.8 added as Completed (2025-12-24) and restored as Current Version. + - Testing advanced to v0.1.9. +- Updated v0.1.8 release notes to document consistent job-matching and auto-approval behavior across all mail processing flows. +- Verified no regressions in changelog structure or rendering after updates. + +Overall, v20251225 focused on **hardening the changelog system**, improving backend stability, cleaning up technical debt in routing, and ensuring consistent, reliable release tracking across the application. :contentReference[oaicite:0]{index=0} + +--- + +## v0.1.8 + +### Overview +This release focuses on making job matching and auto-approval behavior fully consistent across manual inbox actions, automatic mail imports, and the “Re-parse all” process. It also fixes a critical backend startup issue introduced in the re-parse logic. + +### Key Changes +- Introduced a single, shared job-matching helper based on a full unique key: + - From address + - Backup software + - Backup type + - Job name +- Updated manual inbox approval to reuse existing jobs when the unique key matches, instead of relying on customer-only matching. +- Aligned inbox “Re-parse all” auto-approve logic with the same shared matching behavior. +- Fixed automatic mail import auto-approve so it correctly: + - Creates a JobRun + - Marks the mail as approved + - Moves the mail to history when a matching job exists + +### Re-parse All Improvements +- Auto-approve is now executed during “Re-parse all”, not only on initial mail import. +- After re-parsing, all successfully parsed mails without a linked job are re-evaluated against existing jobs using the full unique key. +- When a matching active job with auto-approve enabled is found: + - The mail is automatically approved + - The mail is linked to the job + - The mail is moved to history + - A corresponding job run is created and shown in Job History + +### Fixes +- Resolved an issue where “Re-parse all” previously only updated parse metadata and skipped auto-approve logic, causing historical mails not to appear in job history. +- Fixed a SyntaxError in the re-parse auto-approve logic that caused backend startup failures (Bad Gateway). +- Corrected try/except structure and indentation to ensure re-parse auto-approve runs safely per mail without breaking the overall process. + +### Result +- Job matching and auto-approval behavior is now consistent across all mail processing flows. +- Historical mails are correctly linked to jobs and visible in job history. +- Backend stability during startup and re-parse operations is restored. + +--- + +## v0.1.7 + +### Key Features +- Introduced **export and import functionality for approved jobs**, allowing approved job definitions to be migrated between clean installations via JSON. +- Import process automatically creates missing customers and updates existing jobs based on a unique job identity to prevent duplicates. + +### Versioning & Changelog +- Promoted version **v0.1.7** from *Testing* to *Completed*. +- Introduced **v0.1.8** as the new *Testing* release. +- Updated the changelog structure and testing notes to reflect active export/import functionality. + +### Parser Enhancements +- **Boxafe** + - Improved parsing for Shared Drives, Domain Accounts (Email, Contact, Drive, Calendar), and handling of *Warning* statuses. + - Corrected object detection logic, ensuring no false objects are created when jobs contain no object data. + - Removed object parsing for Shared Drives backups entirely. +- **Synology Hyper Backup** + - Added full support for Dutch notification emails. + - Improved status detection for Dutch phrasing. + - Confirmed that no objects are parsed for Hyper Backup jobs. +- **Veeam** + - Added support for **Scale-out Backup Repository (SOBR)** notifications, including storage capacity metrics suitable for logging and graphing. + - Added support for **Veeam Health Check** reports with correct object filtering to exclude summary-only entries. + +### Job Approval & Auto-Approval Logic +- Significantly refined approved job matching logic to prevent cross-customer approvals. +- Improved auto-approve behavior during **Re-parse all**, Inbox reprocessing, and Graph imports. +- Enhanced resilience against case differences, whitespace, unicode characters, and hidden formatting issues. +- Iteratively refined normalization strategy: + - Ultimately simplified matching to rely primarily on a normalized **From address**, while ensuring parser-consistent values for Backup, Type, and Job Name. +- Ensured deterministic behavior during reprocessing by preventing mutation of message data. + +### Stability Fixes +- Fixed multiple crashes and approval errors caused by undefined or incorrect job name handling. +- Resolved issues with duplicate job records (including `NULL` customer IDs) that previously blocked auto-approval. +- Ensured consistent JobRun creation and mail linking during automatic imports and re-parsing. + +### Notes +- Due to changes in approval matching logic, **previously approved jobs are expected to be recreated** after these updates. + +--- + +## v0.1.6 + +### Fixed +- Corrected auto-approve logic to ensure it is properly applied during automatic mail imports. +- Prevented the **Re-parse all** action from re-processing emails that were already approved. +- Ensured approved status is always respected and never overwritten during re-parsing or automatic imports. +- Fixed multiple Jinja2 `TemplateSyntaxError` issues in the base layout that caused 500 Internal Server Errors. +- Restored correct rendering of all pages affected by template errors, including Dashboard, Parsers, and Changelog. +- Resolved Changelog page rendering issues by fixing dictionary access in templates and avoiding conflicts with built-in methods. + +### Added +- Introduced a centralized **Changelog** page containing: + - Active production version + - Testing version + - Planned / Todo items + - Completed changes +- Added the Changelog entry point to the main navigation. +- Applied a clear versioning convention, e.g. `v0.1.7 (v20251222-03)` for testing releases. +- Marked version `0.1.6` as the active production release. + +### Planned +- Export and import of jobs to allow restoring approved jobs after a clean installation. +- Always register “New license key is not available” as an error. +- Support for a scale-out backup repository Cloud Connect Immutable parser. +- Ability to attach EML files to Daily Jobs and Job Details. +- Fix for Light/Dark theme switching so users can properly change themes. +- Restrict ticket creation and editing to Operator and Admin roles only. + +### Known Bugs +- Emails that were previously approved remain in the Inbox instead of being removed, even though they appear auto-approved and linked to Jobs. + +--- + +## v0.1.5 + +### Overview +This release focuses on restoring Microsoft Graph functionality, improving application reliability, and introducing a robust reset mechanism to allow a clean restart of the application state. + +### Key Fixes +- Restored Microsoft Graph folder retrieval by fixing an incorrect import that caused a `ModuleNotFoundError`. +- Resolved failures in the automatic mail importer caused by `signal`-based timeout handling by replacing it with a thread-safe mechanism. +- Fixed backend startup crashes and Bad Gateway errors related to the automatic mail importer. +- Implemented missing backend logic required for automatic imports to function correctly. + +### New Features +- Added an **Application Reset** option in the Settings page. +- Introduced a confirmation step to prevent accidental resets. + +### Improvements & Changes +- Implemented full backend support for a complete application reset. +- Reset now clears all application data, including: + - Approved and pending jobs + - Imported and processed emails + - Daily job runs + - Logs + - User-defined settings (system defaults are preserved) +- Ensured database cleanup runs in the correct order to respect foreign key constraints. +- Aligned automatic mail import logic with the existing manual import flow for consistent behavior. +- Applied the automatic import cutoff date directly via a Microsoft Graph `$filter`, leaving older emails untouched in the inbox. + +### Result +- Graph API functionality is fully restored. +- Automatic mail import runs reliably on its configured schedule. +- The application can now be safely reset to a clean, fresh-install state when needed. + +--- + +## v0.1.4 + +This release focuses on improving backend stability, database reliability, and consistency in object parsing and mail handling. + +### Key Changes +- Database migrations for tickets and remarks were stabilized by running each migration in its own transaction scope, preventing closed-connection errors during startup. +- Backend startup issues causing Gunicorn failures and 502 Bad Gateway errors were resolved. +- The title field was fully removed from tickets and remarks, simplifying both backend validation and UI forms to use only descriptive content. +- Manual mail imports were aligned with the **Re-parse all** behavior, ensuring immediate and consistent object detection. +- Object visibility on the Daily Jobs page was corrected for previously approved jobs. +- Manual imports were hardened against Microsoft Graph timeouts by adding DNS preflight checks and safely skipping message moves when Graph is unreachable. + +### Improvements +- Eliminated `ResourceClosedError` exceptions during backend boot. +- Increased reliability of migrations and overall application startup. +- Ensured object parsing is consistently re-evaluated on every job run, with correct detection of added or removed objects. +- Prevented internal server errors and Gunicorn worker timeouts caused by long-running external Graph operations. + +Overall, v0.1.4 significantly improves robustness, consistency, and fault tolerance across database migrations, job parsing, and manual mail imports. + +--- + +## v0.1.3 + +### Logging & Stability +- Fixed logging persistence so log entries are consistently stored in the database. +- Resolved cases where certain log events were not stored due to object lifecycle handling. +- Improved reliability of log creation during background/asynchronous processes. +- Corrected log retrieval so stored logs are properly fetched and shown in the web UI. +- Added pagination to the logging overview (20 entries per page). +- Extended the logging view to show all available log fields and fixed missing columns in the UI. + +### UI & Table Layout Improvements +- Improved the logging page usability by placing pagination controls at the top and keeping them available at the bottom. +- Increased logging table width to better fit a 1080p layout. +- Fixed column layout so all columns remain in consistent positions regardless of content length. +- Updated status styling to use colored text only within the status column (Success, Warning, Error/Failed, Missed), including clear differentiation for overrides. +- Fixed JavaScript errors in the Daily Jobs popup that prevented rendering. + +### Jobs & Daily Jobs Enhancements +- Standardized default sorting for both Jobs and Daily Jobs tables (Customer → Backup → Type → Job name). +- Persisted the Daily Jobs start date setting in the database and ensured it reloads correctly in the Settings UI. +- Corrected missed-status calculation to start from the configured Daily Jobs start date. +- Improved Daily Jobs table readability: + - Moved the number of runs into a dedicated Runs column. + - Prevented layout shifting caused by variable text in the Last result column. + - Restored the original Runs visual representation and adjusted placement for better readability. + - Reduced the Last result column width so only status text is shown and the Runs column remains visible. + +### Parsing & Data Normalization +- Stripped retry suffixes like “(Retry 1)”, “(Retry 2)”, etc. from job names so retries don’t create separate job identities. +- Extended the NAKIVO parser to support VMware Replication job emails: + - Detects job type (Backup vs Replication) based on email content. + - Improves replication job name parsing. + - Extracts VM names from the Objects/Virtual Machines section. + - Maps overall job status correctly for replication reports. + +### Tickets & Remarks (New Feature Area) +- Added database schema for globally unique, persistent tickets linked to job runs for long-term reporting: + - New tables: tickets, ticket_scopes, ticket_job_runs. + - Ticket codes are globally unique using format TYYYYMMDD.NNNN. + - Tickets require at least one customer scope. +- Added database schema for remarks with scoped attachment and persistent linkage to job runs: + - New tables: remarks, remark_scopes, remark_job_runs. +- Implemented a new Tickets page with tabbed navigation (Tickets / Remarks): + - Overviews with filtering. + - Detail views showing scopes, linked job runs, and actions. + - Added indicators in Daily Jobs to show active tickets and/or remarks. + - Added ticket/remark management in the job run popup (create, edit, resolve). + - Introduced consistent icons for tickets, remarks, actions, and status indicators. +- Added backend API endpoints for listing/creating/updating/resolving/linking tickets and remarks, plus an endpoint to retrieve all alerts for a specific job run. + +--- + +## v0.1.2 + +This release focuses on improved parser support, more robust data cleanup, and a fully reworked in-app logging and object persistence system. + +### Parser & support +- Extended the Synology Hyper Backup mail parser with proper recognition of Strato HiDrive backups. +- Added support for parsing job names from the “Backup Task:” field for Strato HiDrive. +- Correct handling of successful runs without listed objects. +- Added a Strato HiDrive example to the parser templates for validation and reference. + +### Administration & cleanup +- Introduced an admin-only action to delete all jobs in a single operation. +- Ensured related run mails are moved back to the Inbox when jobs are deleted. +- Fixed foreign key constraint issues by enforcing the correct deletion order: + - run_object_links first + - job_object_links next + - then job runs and jobs +- Stabilized the “Delete all jobs” action to fully clean up all related data. + +### Logging (reworked) +- Moved logging away from container/stdout logging to in-app logging. +- Introduced AdminLog-based logging for: + - Mail import + - Auto-approval + - Manual job approval + - Job deletion +- Added detailed logging per imported and auto-approved email. +- Added summary logging at the end of each mail import run. +- Ensured all relevant events are logged exclusively via the AdminLog table and visible on the Logging page. + +### Object persistence +- Restored persistence of parsed objects after manual approval of inbox mails. +- Restored persistence of parsed objects during auto-approval (reparse-all). +- Ensured objects from approved mails are: + - Upserted into `customer_objects` + - Linked to jobs via `job_object_links` (with first/last seen tracking) + - Linked to runs via `run_object_links` (with status and error details) +- Added centralized helper logic to ensure consistent object persistence. +- Added an admin-only maintenance action to backfill missing object links for already approved runs. +- Object persistence failures no longer block mail approval. +- Daily Jobs and Run detail views correctly display objects again for both new and historical runs after backfilling. \ No newline at end of file diff --git a/docs/manifest.md b/docs/manifest.md new file mode 100644 index 0000000..2e6d117 --- /dev/null +++ b/docs/manifest.md @@ -0,0 +1,69 @@ +# Backupchecks repository manifest + +This document describes the main directories and important files in the Backupchecks repository. + +## Top-level + +- `containers/backupchecks/` + Docker build context for the application image. Everything under this directory is copied into the image. + +- `docs/` + Documentation that is not baked into the image (design notes, migration policy, etc.). + +- `deploy/` + Portainer stack files and deployment notes. These files are NOT copied into the image. + +## containers/backupchecks + +- `Dockerfile` + Builds the Backupchecks application image. + - Installs Python dependencies from `requirements.txt`. + - Copies the `src/` tree into `/app/src`. + - Starts the app using Gunicorn and the `create_app()` factory. + +- `requirements.txt` + Python dependencies for the application. + +- `src/backend/app/` + Backend application package. + + - `__init__.py` + Application factory (`create_app()`), database initialization and migrations bootstrap. + + - `config.py` + Reads configuration from environment variables (database connection, app env, timezone). + + - `database.py` + SQLAlchemy database instance (`db`). + + - `models.py` + ORM models, including: + - `User` – login user with `username`, optional `email`, `password_hash`, `role`. + - `SystemSettings` – persisted configuration for Graph, folders and import behaviour. + + - `auth/` + Authentication and authorization: + - `__init__.py` – login manager setup. + - `routes.py` – login, logout, initial admin setup, password reset placeholder. + + - `main/` + Main UI and navigation: + - `routes.py` – dashboard, Inbox, Customers, Jobs, Daily Jobs, Overrides, Reports, Settings, Logging. + + - `migrations.py` + In-image SQL migrations. See `docs/migrations.md` for details. + +- `src/templates/` + Jinja2 templates used by the Flask application. + + - `layout/base.html` + Base layout, navbar, flash messages. + + - `auth/*.html` + Authentication templates (login, initial admin setup, password reset request). + + - `main/*.html` + Page templates for dashboard, Inbox, Customers, Jobs, Daily Jobs, Overrides, Reports, Settings, Logging. + +- `src/static/` + Static assets (CSS, JS, images). Currently minimal, can grow as the UI is developed. diff --git a/docs/migrations.md b/docs/migrations.md new file mode 100644 index 0000000..7b50ca7 --- /dev/null +++ b/docs/migrations.md @@ -0,0 +1,67 @@ +# Database migrations policy + +This document describes how database schema changes are handled for Backupchecks . + +## Overview + +- The baseline schema is defined in `backend/app/models.py`. +- On application startup, the following is executed inside `create_app()`: + 1. `db.create_all()` – creates any missing tables and columns defined in the models. + 2. `run_migrations()` – executes in-image SQL migrations from `backend/app/migrations.py`. + +This approach allows: +- Clean databases to be created automatically. +- Existing databases to be upgraded in-place without manual SQL. +- Safe repeated restarts: migrations are idempotent and can be run multiple times. + +## Adding migrations + +When you change the schema in a way that is not automatically covered by `db.create_all()` (for example, +altering column nullability, adding constraints, backfilling data), follow these steps: + +1. Add or adjust the corresponding model(s) in `models.py`. +2. In `migrations.py`: + - Add a new function, for example: + + - `def migrate_xyz():` + - Perform the required SQL using `db.get_engine()` and `sqlalchemy.text`. + - Always check the current state first (e.g. whether a column or constraint already exists). + - Call this function from `run_migrations()` in the correct order. + +3. Do NOT remove older migration functions. They must remain so that: + - Existing databases can still be upgraded from older versions. + - New installations run all migrations but older ones become no-ops because their checks see that the + changes are already applied. + +4. Each migration must be **idempotent**: + - It should detect whether its change is already in place and then exit without error. + - This allows `run_migrations()` to be executed on every startup. + +## Current migrations (initial set) + +Implemented in `backend/app/migrations.py`: + +- `migrate_add_username_to_users()` + - Adds a `username` column to the `users` table if it does not exist. + - Backfills `username` from `email` where possible. + - Sets `username` to `NOT NULL`. + - Adds a UNIQUE constraint on `users.username`. + +- `migrate_make_email_nullable()` + - Ensures the `email` column on `users` is nullable. + - If the column is currently `NOT NULL`, the migration executes: + `ALTER TABLE "users" ALTER COLUMN email DROP NOT NULL`. + +- `run_migrations()` + - Calls the above migrations in order. + - Logs progress to stdout so changes are visible in container / Portainer logs. + +## Future changes + +- Every time you introduce a non-trivial schema change, update: + - `backend/app/models.py` + - `backend/app/migrations.py` + - This document (`docs/migrations.md`) – add a short description of the new migration. + +- When sharing the repository state (for example in a ZIP), always include the current `migrations.py` + and this document so the migration history and policy are clear. diff --git a/version.txt b/version.txt new file mode 100644 index 0000000..600c007 --- /dev/null +++ b/version.txt @@ -0,0 +1 @@ +v0.1.14