From 58268a49067c6a224d1aed4eda2222f1dd338550 Mon Sep 17 00:00:00 2001 From: Ivo Oskamp Date: Sun, 22 Mar 2026 19:34:40 +0100 Subject: [PATCH] Implement library/home/stats/backup updates and refresh docs --- containers/novela/main.py | 3 + containers/novela/migrations.py | 41 +- containers/novela/routers/backup.py | 712 +++++++++++++++++++++-- containers/novela/routers/common.py | 21 +- containers/novela/routers/library.py | 352 ++++++++++- containers/novela/static/library.css | 183 ++++++ containers/novela/static/library.js | 378 +++++++++++- containers/novela/templates/backup.html | 147 ++++- containers/novela/templates/home.html | 351 +++++++++-- containers/novela/templates/library.html | 5 +- docs/BLUEPRINT.md | 420 ------------- docs/TECHNICAL.md | 246 +++++--- docs/changelog-develop.md | 47 ++ stack/novela.env | 3 - 14 files changed, 2235 insertions(+), 674 deletions(-) delete mode 100644 docs/BLUEPRINT.md create mode 100644 docs/changelog-develop.md diff --git a/containers/novela/main.py b/containers/novela/main.py index d32781a..9aba2f4 100644 --- a/containers/novela/main.py +++ b/containers/novela/main.py @@ -6,6 +6,7 @@ from fastapi.staticfiles import StaticFiles from db import close_pool, init_pool from migrations import run_migrations +from routers.backup import start_backup_scheduler, stop_backup_scheduler from routers import ( backup_router, editor_router, @@ -20,9 +21,11 @@ from routers import ( async def lifespan(app: FastAPI): init_pool() run_migrations() + await start_backup_scheduler() try: yield finally: + await stop_backup_scheduler() close_pool() diff --git a/containers/novela/migrations.py b/containers/novela/migrations.py index b1b02d1..ef42d19 100644 --- a/containers/novela/migrations.py +++ b/containers/novela/migrations.py @@ -123,12 +123,14 @@ def migrate_create_credentials() -> None: CREATE TABLE IF NOT EXISTS credentials ( id SERIAL PRIMARY KEY, site VARCHAR(255) UNIQUE NOT NULL, - username VARCHAR(255) NOT NULL, - password VARCHAR(255) NOT NULL, + username TEXT NOT NULL, + password TEXT NOT NULL, updated_at TIMESTAMP DEFAULT NOW() ) """ ) + _exec("ALTER TABLE credentials ALTER COLUMN username TYPE TEXT") + _exec("ALTER TABLE credentials ALTER COLUMN password TYPE TEXT") def migrate_create_break_patterns() -> None: @@ -191,6 +193,40 @@ def migrate_create_backup_log() -> None: ) +def migrate_create_perf_indexes() -> None: + # Match library list sorting and common filters. + _exec( + """ + CREATE INDEX IF NOT EXISTS idx_library_sort_coalesce + ON library ( + (COALESCE(publisher, '')), + (COALESCE(author, '')), + (COALESCE(series, '')), + series_index, + (COALESCE(title, '')) + ) + """ + ) + _exec("CREATE INDEX IF NOT EXISTS idx_library_needs_review ON library (needs_review)") + _exec("CREATE INDEX IF NOT EXISTS idx_library_archived ON library (archived)") + + # Speeds grouped reads + recent-read lookups. + _exec( + """ + CREATE INDEX IF NOT EXISTS idx_reading_sessions_filename_readat + ON reading_sessions (filename, read_at DESC) + """ + ) + + # Helps ORDER BY filename, tag fetch for tag-map construction. + _exec( + """ + CREATE INDEX IF NOT EXISTS idx_book_tags_filename_tag + ON book_tags (filename, tag) + """ + ) + + def run_migrations() -> None: migrate_create_library() migrate_create_book_tags() @@ -200,4 +236,5 @@ def run_migrations() -> None: migrate_create_credentials() migrate_create_break_patterns() migrate_create_backup_log() + migrate_create_perf_indexes() migrate_seed_break_patterns() diff --git a/containers/novela/routers/backup.py b/containers/novela/routers/backup.py index 6f71447..0ce7ee9 100644 --- a/containers/novela/routers/backup.py +++ b/containers/novela/routers/backup.py @@ -1,3 +1,5 @@ +import asyncio +import hashlib import json import os import shutil @@ -22,14 +24,21 @@ LIBRARY_DIR = Path(os.environ.get("LIBRARY_DIR", "library")) CONFIG_DIR = Path(os.environ.get("CONFIG_DIR", "config")) CONFIG_DIR.mkdir(parents=True, exist_ok=True) MANIFEST_PATH = CONFIG_DIR / "backup_manifest.json" -DROPBOX_ROOT = (os.environ.get("DROPBOX_BACKUP_ROOT", "/novela") or "/novela").rstrip("/") +DEFAULT_DROPBOX_ROOT = "/novela" +DEFAULT_RETENTION_COUNT = 14 +DEFAULT_SCHEDULE_ENABLED = False +DEFAULT_SCHEDULE_INTERVAL_HOURS = 24 + + +BACKUP_TASKS: dict[int, asyncio.Task] = {} +SCHEDULER_TASK: asyncio.Task | None = None def _now_iso() -> str: return datetime.now(timezone.utc).isoformat() -def _load_manifest() -> dict[str, dict[str, float | int]]: +def _load_manifest() -> dict[str, dict[str, float | int | str]]: if not MANIFEST_PATH.exists(): return {} try: @@ -41,22 +50,25 @@ def _load_manifest() -> dict[str, dict[str, float | int]]: return {} -def _save_manifest(manifest: dict[str, dict[str, float | int]]) -> None: +def _save_manifest(manifest: dict[str, dict[str, float | int | str]]) -> None: MANIFEST_PATH.write_text(json.dumps(manifest, indent=2, sort_keys=True), encoding="utf-8") -def _load_dropbox_token() -> str: +def _dropbox_credential_details() -> dict: with get_db_conn() as conn: with conn: with conn.cursor() as cur: - cur.execute("SELECT username, password FROM credentials WHERE site = 'dropbox' LIMIT 1") + cur.execute( + "SELECT username, password, updated_at FROM credentials WHERE site = 'dropbox' LIMIT 1" + ) row = cur.fetchone() if not row: - return "" + return {"configured": False, "token": "", "updated_at": None} - username_raw, password_raw = row + username_raw, password_raw, updated_at = row username = decrypt_value(username_raw) password = decrypt_value(password_raw) + token = (password or username or "").strip() if not is_encrypted_value(username_raw) or not is_encrypted_value(password_raw): cur.execute( @@ -64,11 +76,252 @@ def _load_dropbox_token() -> str: UPDATE credentials SET username = %s, password = %s, updated_at = NOW() WHERE site = 'dropbox' + RETURNING updated_at """, (encrypt_value(username), encrypt_value(password)), ) + upd = cur.fetchone() + if upd: + updated_at = upd[0] - return (password or username or "").strip() + return { + "configured": bool(token), + "token": token, + "updated_at": updated_at.isoformat() if updated_at else None, + } + + +def _load_dropbox_token() -> str: + return _dropbox_credential_details().get("token", "") + + +def _normalize_dropbox_root(value: str | None) -> str: + root = (value or "").strip() or DEFAULT_DROPBOX_ROOT + if not root.startswith("/"): + root = "/" + root + root = "/" + "/".join(part for part in root.split("/") if part) + return root or DEFAULT_DROPBOX_ROOT + + +def _dropbox_root_details() -> dict: + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + "SELECT username, password, updated_at FROM credentials WHERE site = 'dropbox_backup_root' LIMIT 1" + ) + row = cur.fetchone() + if not row: + env_val = os.environ.get("DROPBOX_BACKUP_ROOT", DEFAULT_DROPBOX_ROOT) + return { + "root": _normalize_dropbox_root(env_val), + "updated_at": None, + } + + username_raw, password_raw, updated_at = row + username = decrypt_value(username_raw) + password = decrypt_value(password_raw) + root = _normalize_dropbox_root(password or username or DEFAULT_DROPBOX_ROOT) + + if not is_encrypted_value(username_raw) or not is_encrypted_value(password_raw): + cur.execute( + """ + UPDATE credentials + SET username = %s, password = %s, updated_at = NOW() + WHERE site = 'dropbox_backup_root' + RETURNING updated_at + """, + (encrypt_value(""), encrypt_value(root)), + ) + upd = cur.fetchone() + if upd: + updated_at = upd[0] + + return { + "root": root, + "updated_at": updated_at.isoformat() if updated_at else None, + } + + +def _load_dropbox_root() -> str: + return _dropbox_root_details().get("root", DEFAULT_DROPBOX_ROOT) + + +def _dropbox_retention_details() -> dict: + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + "SELECT username, password, updated_at FROM credentials WHERE site = 'dropbox_backup_retention' LIMIT 1" + ) + row = cur.fetchone() + if not row: + return {"retention_count": DEFAULT_RETENTION_COUNT, "updated_at": None} + + username_raw, password_raw, updated_at = row + username = decrypt_value(username_raw) + password = decrypt_value(password_raw) + raw = (password or username or "").strip() + try: + retention_count = max(1, int(raw)) + except Exception: + retention_count = DEFAULT_RETENTION_COUNT + + if not is_encrypted_value(username_raw) or not is_encrypted_value(password_raw): + cur.execute( + """ + UPDATE credentials + SET username = %s, password = %s, updated_at = NOW() + WHERE site = 'dropbox_backup_retention' + RETURNING updated_at + """, + (encrypt_value(""), encrypt_value(str(retention_count))), + ) + upd = cur.fetchone() + if upd: + updated_at = upd[0] + + return { + "retention_count": retention_count, + "updated_at": updated_at.isoformat() if updated_at else None, + } + + +def _load_dropbox_retention_count() -> int: + return int(_dropbox_retention_details().get("retention_count", DEFAULT_RETENTION_COUNT)) + + +def _dropbox_schedule_details() -> dict: + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + "SELECT username, password, updated_at FROM credentials WHERE site = 'dropbox_backup_schedule' LIMIT 1" + ) + row = cur.fetchone() + if not row: + return { + "enabled": DEFAULT_SCHEDULE_ENABLED, + "interval_hours": DEFAULT_SCHEDULE_INTERVAL_HOURS, + "updated_at": None, + } + + username_raw, password_raw, updated_at = row + username = decrypt_value(username_raw) + password = decrypt_value(password_raw) + raw = (password or username or "").strip().lower() + + enabled = False + interval_hours = DEFAULT_SCHEDULE_INTERVAL_HOURS + try: + obj = json.loads(raw) if raw.startswith("{") else None + except Exception: + obj = None + + if isinstance(obj, dict): + enabled = bool(obj.get("enabled", DEFAULT_SCHEDULE_ENABLED)) + try: + interval_hours = max(1, int(obj.get("interval_hours", DEFAULT_SCHEDULE_INTERVAL_HOURS))) + except Exception: + interval_hours = DEFAULT_SCHEDULE_INTERVAL_HOURS + else: + parts = raw.split(":") + if len(parts) == 2: + enabled = parts[0] in {"1", "true", "yes", "on"} + try: + interval_hours = max(1, int(parts[1])) + except Exception: + interval_hours = DEFAULT_SCHEDULE_INTERVAL_HOURS + + norm = json.dumps({"enabled": enabled, "interval_hours": interval_hours}, separators=(",", ":")) + if not is_encrypted_value(username_raw) or not is_encrypted_value(password_raw): + cur.execute( + """ + UPDATE credentials + SET username = %s, password = %s, updated_at = NOW() + WHERE site = 'dropbox_backup_schedule' + RETURNING updated_at + """, + (encrypt_value(""), encrypt_value(norm)), + ) + upd = cur.fetchone() + if upd: + updated_at = upd[0] + + return { + "enabled": enabled, + "interval_hours": interval_hours, + "updated_at": updated_at.isoformat() if updated_at else None, + } + + +def _load_backup_schedule() -> tuple[bool, int]: + d = _dropbox_schedule_details() + return bool(d.get("enabled", DEFAULT_SCHEDULE_ENABLED)), int(d.get("interval_hours", DEFAULT_SCHEDULE_INTERVAL_HOURS)) + + +def _save_backup_schedule(enabled: bool, interval_hours: int) -> None: + interval = max(1, int(interval_hours)) + payload = json.dumps({"enabled": bool(enabled), "interval_hours": interval}, separators=(",", ":")) + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO credentials (site, username, password, updated_at) + VALUES ('dropbox_backup_schedule', %s, %s, NOW()) + ON CONFLICT (site) DO UPDATE + SET username = EXCLUDED.username, + password = EXCLUDED.password, + updated_at = NOW() + """, + (encrypt_value(""), encrypt_value(payload)), + ) + + +def _dropbox_join(root: str, *parts: str) -> str: + clean_root = _normalize_dropbox_root(root) + segs = [p.strip("/") for p in parts if p and p.strip("/")] + if clean_root == "/": + return "/" + "/".join(segs) if segs else "/" + if not segs: + return clean_root + return clean_root + "/" + "/".join(segs) + + +def _save_dropbox_root(root: str) -> None: + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO credentials (site, username, password, updated_at) + VALUES ('dropbox_backup_root', %s, %s, NOW()) + ON CONFLICT (site) DO UPDATE + SET username = EXCLUDED.username, + password = EXCLUDED.password, + updated_at = NOW() + """, + (encrypt_value(""), encrypt_value(_normalize_dropbox_root(root))), + ) + + +def _save_dropbox_retention_count(retention_count: int) -> None: + val = max(1, int(retention_count)) + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO credentials (site, username, password, updated_at) + VALUES ('dropbox_backup_retention', %s, %s, NOW()) + ON CONFLICT (site) DO UPDATE + SET username = EXCLUDED.username, + password = EXCLUDED.password, + updated_at = NOW() + """, + (encrypt_value(""), encrypt_value(str(val))), + ) def _dbx() -> dropbox.Dropbox: @@ -105,6 +358,48 @@ def _dropbox_upload_bytes(client: dropbox.Dropbox, target_path: str, data: bytes return len(data) +def _dropbox_exists(client: dropbox.Dropbox, path: str) -> bool: + try: + client.files_get_metadata(path) + return True + except ApiError as e: + text = str(e).lower() + if "not_found" in text or "path/not_found" in text: + return False + raise + + +def _dropbox_list_files_recursive(client: dropbox.Dropbox, root: str) -> list[str]: + paths: list[str] = [] + try: + res = client.files_list_folder(root, recursive=True) + except ApiError as e: + text = str(e).lower() + if "not_found" in text or "path/not_found" in text: + return [] + raise + + while True: + for entry in res.entries: + if isinstance(entry, dropbox.files.FileMetadata): + paths.append(entry.path_lower or entry.path_display or "") + if not res.has_more: + break + res = client.files_list_folder_continue(res.cursor) + return [p for p in paths if p] + + +def _dropbox_delete_paths(client: dropbox.Dropbox, paths: list[str]) -> int: + deleted = 0 + for p in paths: + try: + client.files_delete_v2(p) + deleted += 1 + except ApiError: + pass + return deleted + + def _iter_library_files() -> list[Path]: if not LIBRARY_DIR.exists(): return [] @@ -116,6 +411,23 @@ def _current_file_state(path: Path) -> dict[str, float | int]: return {"mtime": st.st_mtime, "size": st.st_size} +def _sha256_file(path: Path) -> str: + h = hashlib.sha256() + with path.open("rb") as f: + for chunk in iter(lambda: f.read(1024 * 1024), b""): + h.update(chunk) + return h.hexdigest() + + +def _snapshot_name() -> str: + stamp = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S") + return f"snapshot-{stamp}.json" + + +def _object_path(objects_root: str, sha256: str) -> str: + return _dropbox_join(objects_root, sha256[:2], sha256) + + def _pg_dump_cmd(tmp_path: Path) -> list[str]: return [ "pg_dump", @@ -153,6 +465,39 @@ def _run_pg_dump() -> tuple[bytes, str]: tmp_path.unlink(missing_ok=True) +def _has_running_backup() -> bool: + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + SELECT id + FROM backup_log + WHERE status = 'running' AND finished_at IS NULL + ORDER BY started_at DESC + """ + ) + rows = [int(r[0]) for r in cur.fetchall()] + if not rows: + return False + + active_ids = set(BACKUP_TASKS.keys()) + stale_ids = [rid for rid in rows if rid not in active_ids] + if stale_ids: + cur.execute( + """ + UPDATE backup_log + SET status = 'error', + error_msg = COALESCE(error_msg, 'Interrupted: service restart or crash'), + finished_at = NOW() + WHERE id = ANY(%s) + """, + (stale_ids,), + ) + + return any(rid in active_ids for rid in rows) + + def _insert_backup_log_running() -> int: with get_db_conn() as conn: with conn: @@ -185,6 +530,62 @@ def _finish_backup_log(log_id: int, *, status: str, files_count: int | None, siz ) +def _list_snapshot_paths(client: dropbox.Dropbox, snapshots_root: str) -> list[str]: + files = _dropbox_list_files_recursive(client, snapshots_root) + return sorted([p for p in files if p.endswith(".json")], reverse=True) + + +def _load_snapshot_data(client: dropbox.Dropbox, snapshot_path: str) -> dict: + _meta, res = client.files_download(snapshot_path) + raw = res.content + parsed = json.loads(raw.decode("utf-8", errors="replace")) + return parsed if isinstance(parsed, dict) else {} + + +def _enforce_snapshot_retention( + client: dropbox.Dropbox, + snapshots_root: str, + keep_count: int, +) -> tuple[list[str], list[str]]: + all_snapshots = _list_snapshot_paths(client, snapshots_root) + keep = max(1, int(keep_count)) + kept = all_snapshots[:keep] + to_delete = all_snapshots[keep:] + if to_delete: + _dropbox_delete_paths(client, to_delete) + return kept, to_delete + + +def _collect_hashes_from_snapshots(client: dropbox.Dropbox, snapshot_paths: list[str]) -> set[str]: + used: set[str] = set() + for path in snapshot_paths: + try: + snap = _load_snapshot_data(client, path) + except Exception: + continue + files = snap.get("files", {}) if isinstance(snap, dict) else {} + if not isinstance(files, dict): + continue + for item in files.values(): + if not isinstance(item, dict): + continue + sha = str(item.get("sha256") or "").lower() + if len(sha) == 64 and all(c in "0123456789abcdef" for c in sha): + used.add(sha) + return used + + +def _prune_orphan_objects(client: dropbox.Dropbox, objects_root: str, referenced_hashes: set[str]) -> int: + object_files = _dropbox_list_files_recursive(client, objects_root) + to_delete: list[str] = [] + for p in object_files: + name = Path(p).name.lower() + if len(name) == 64 and all(c in "0123456789abcdef" for c in name): + if name not in referenced_hashes: + to_delete.append(p) + return _dropbox_delete_paths(client, to_delete) + + def _run_backup_internal(*, dry_run: bool) -> tuple[int, int]: client = None if dry_run else _dbx() manifest = _load_manifest() @@ -192,30 +593,76 @@ def _run_backup_internal(*, dry_run: bool) -> tuple[int, int]: uploaded_count = 0 uploaded_size = 0 - new_manifest: dict[str, dict[str, float | int]] = {} + new_manifest: dict[str, dict[str, float | int | str]] = {} + + dropbox_root = _load_dropbox_root() + retention_count = _load_dropbox_retention_count() + + objects_root = _dropbox_join(dropbox_root, "library_objects") + snapshots_root = _dropbox_join(dropbox_root, "library_snapshots") - library_root = f"{DROPBOX_ROOT}/library" if client is not None: - _ensure_dropbox_dir(client, library_root) + _ensure_dropbox_dir(client, objects_root) + _ensure_dropbox_dir(client, snapshots_root) + + snapshot_files: dict[str, dict[str, float | int | str]] = {} for path in files: rel = path.relative_to(LIBRARY_DIR).as_posix() state = _current_file_state(path) - new_manifest[rel] = state + prev = manifest.get(rel, {}) if isinstance(manifest.get(rel), dict) else {} - if manifest.get(rel) == state: - continue - - data = path.read_bytes() - target = f"{library_root}/{rel}" - if client is not None: - uploaded_size += _dropbox_upload_bytes(client, target, data) + sha256 = "" + if ( + prev + and prev.get("mtime") == state["mtime"] + and prev.get("size") == state["size"] + and isinstance(prev.get("sha256"), str) + ): + sha256 = str(prev.get("sha256")) else: - uploaded_size += len(data) + sha256 = _sha256_file(path) + + entry = {"mtime": state["mtime"], "size": state["size"], "sha256": sha256} + new_manifest[rel] = entry + snapshot_files[rel] = entry + + object_target = _object_path(objects_root, sha256) + if client is not None: + if not _dropbox_exists(client, object_target): + data = path.read_bytes() + uploaded_size += _dropbox_upload_bytes(client, object_target, data) + uploaded_count += 1 + else: + # Dry run reports potential upload work for changed objects. + if not prev or prev.get("sha256") != sha256: + uploaded_size += int(state["size"]) + uploaded_count += 1 + + snapshot = { + "created_at": _now_iso(), + "retention_count": retention_count, + "files": snapshot_files, + } + snapshot_data = json.dumps(snapshot, sort_keys=True, separators=(",", ":")).encode("utf-8") + snapshot_name = _snapshot_name() + snapshot_target = _dropbox_join(snapshots_root, snapshot_name) + + if client is not None: + uploaded_size += _dropbox_upload_bytes(client, snapshot_target, snapshot_data) + uploaded_count += 1 + + kept_snapshots, _deleted_snapshots = _enforce_snapshot_retention( + client, snapshots_root, retention_count + ) + referenced_hashes = _collect_hashes_from_snapshots(client, kept_snapshots) + _prune_orphan_objects(client, objects_root, referenced_hashes) + else: + uploaded_size += len(snapshot_data) uploaded_count += 1 dump_data, dump_name = _run_pg_dump() - dump_target = f"{DROPBOX_ROOT}/postgres/{dump_name}" + dump_target = _dropbox_join(dropbox_root, "postgres", dump_name) if client is not None: uploaded_size += _dropbox_upload_bytes(client, dump_target, dump_data) else: @@ -235,6 +682,97 @@ async def backup_page(request: Request): return templates.TemplateResponse(request, template, {"active": "backup"}) +@router.get("/api/backup/credentials") +async def backup_dropbox_credentials(): + details = _dropbox_credential_details() + root_details = _dropbox_root_details() + retention_details = _dropbox_retention_details() + token = details.get("token", "") + preview = "" + if token: + preview = f"{token[:4]}...{token[-4:]}" if len(token) >= 10 else "(configured)" + return { + "configured": bool(token), + "token_preview": preview, + "updated_at": details.get("updated_at"), + "dropbox_root": root_details.get("root", DEFAULT_DROPBOX_ROOT), + "root_updated_at": root_details.get("updated_at"), + "retention_count": int(retention_details.get("retention_count", DEFAULT_RETENTION_COUNT)), + "retention_updated_at": retention_details.get("updated_at"), + "schedule_enabled": _dropbox_schedule_details().get("enabled", DEFAULT_SCHEDULE_ENABLED), + "schedule_interval_hours": _dropbox_schedule_details().get("interval_hours", DEFAULT_SCHEDULE_INTERVAL_HOURS), + "schedule_updated_at": _dropbox_schedule_details().get("updated_at"), + } + + +@router.post("/api/backup/credentials") +async def backup_dropbox_credentials_save(request: Request): + body = {} + try: + body = await request.json() + except Exception: + pass + + try: + existing_token = _load_dropbox_token() + token = (body.get("token") or "").strip() or existing_token + if not token: + return {"ok": False, "error": "Dropbox token is required."} + + dropbox_root = _normalize_dropbox_root(body.get("dropbox_root") or _load_dropbox_root()) + raw_retention = body.get("retention_count", _load_dropbox_retention_count()) + try: + retention_count = max(1, int(raw_retention)) + except Exception: + retention_count = DEFAULT_RETENTION_COUNT + + schedule_enabled = bool(body.get("schedule_enabled", _load_backup_schedule()[0])) + raw_interval = body.get("schedule_interval_hours", _load_backup_schedule()[1]) + try: + schedule_interval_hours = max(1, int(raw_interval)) + except Exception: + schedule_interval_hours = DEFAULT_SCHEDULE_INTERVAL_HOURS + + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO credentials (site, username, password, updated_at) + VALUES ('dropbox', %s, %s, NOW()) + ON CONFLICT (site) DO UPDATE + SET username = EXCLUDED.username, + password = EXCLUDED.password, + updated_at = NOW() + """, + (encrypt_value(""), encrypt_value(token)), + ) + + _save_dropbox_root(dropbox_root) + _save_dropbox_retention_count(retention_count) + _save_backup_schedule(schedule_enabled, schedule_interval_hours) + return { + "ok": True, + "dropbox_root": dropbox_root, + "retention_count": retention_count, + "schedule_enabled": schedule_enabled, + "schedule_interval_hours": schedule_interval_hours, + } + except Exception as e: + return {"ok": False, "error": str(e)} + + +@router.delete("/api/backup/credentials") +async def backup_dropbox_credentials_delete(): + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + "DELETE FROM credentials WHERE site IN ('dropbox', 'dropbox_backup_root', 'dropbox_backup_retention', 'dropbox_backup_schedule')" + ) + return {"ok": True} + + @router.get("/api/backup/health") async def backup_health(): token_present = bool(_load_dropbox_token()) @@ -249,10 +787,16 @@ async def backup_health(): except Exception as e: dropbox_error = str(e) + dropbox_root = _load_dropbox_root() + retention_count = _load_dropbox_retention_count() + schedule_enabled, schedule_interval_hours = _load_backup_schedule() + return { "token_present": token_present, "dropbox_ok": dropbox_ok, "dropbox_error": dropbox_error, + "dropbox_root": dropbox_root, + "retention_count": retention_count, "pg_dump_available": bool(pg_dump_path), "pg_dump_path": pg_dump_path, "library_exists": LIBRARY_DIR.exists(), @@ -313,6 +857,87 @@ async def backup_history(): ] +def _start_backup_task(*, dry_run: bool) -> int: + log_id = _insert_backup_log_running() + task = asyncio.create_task(_run_backup_job(log_id, dry_run)) + BACKUP_TASKS[log_id] = task + return log_id + + +def _is_scheduled_backup_due(interval_hours: int) -> bool: + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute( + """ + SELECT finished_at + FROM backup_log + WHERE status = 'success' AND finished_at IS NOT NULL + ORDER BY finished_at DESC + LIMIT 1 + """ + ) + row = cur.fetchone() + if not row or not row[0]: + return True + + last = row[0] + if last.tzinfo is None: + last = last.replace(tzinfo=timezone.utc) + now = datetime.now(timezone.utc) + return (now - last).total_seconds() >= max(1, int(interval_hours)) * 3600 + + +async def _scheduler_loop() -> None: + while True: + try: + enabled, interval_hours = _load_backup_schedule() + if enabled and not _has_running_backup() and _is_scheduled_backup_due(interval_hours): + _start_backup_task(dry_run=False) + except Exception: + # Keep scheduler alive; errors are visible in backup history when runs fail. + pass + await asyncio.sleep(60) + + +async def start_backup_scheduler() -> None: + global SCHEDULER_TASK + if SCHEDULER_TASK is None or SCHEDULER_TASK.done(): + SCHEDULER_TASK = asyncio.create_task(_scheduler_loop()) + + +async def stop_backup_scheduler() -> None: + global SCHEDULER_TASK + if SCHEDULER_TASK is not None: + SCHEDULER_TASK.cancel() + try: + await SCHEDULER_TASK + except asyncio.CancelledError: + pass + SCHEDULER_TASK = None + + +async def _run_backup_job(log_id: int, dry_run: bool) -> None: + try: + files_count, size_bytes = await asyncio.to_thread(_run_backup_internal, dry_run=dry_run) + _finish_backup_log( + log_id, + status="success", + files_count=files_count, + size_bytes=size_bytes, + error_msg=None, + ) + except Exception as e: + _finish_backup_log( + log_id, + status="error", + files_count=None, + size_bytes=None, + error_msg=str(e), + ) + finally: + BACKUP_TASKS.pop(log_id, None) + + @router.post("/api/backup/run") async def run_backup(request: Request): body = {} @@ -322,38 +947,21 @@ async def run_backup(request: Request): pass dry_run = bool(body.get("dry_run", False)) - log_id = _insert_backup_log_running() - try: - files_count, size_bytes = _run_backup_internal(dry_run=dry_run) - _finish_backup_log( - log_id, - status="success", - files_count=files_count, - size_bytes=size_bytes, - error_msg=None, - ) - return { - "ok": True, - "backup_id": log_id, - "status": "success", - "dry_run": dry_run, - "files_count": files_count, - "size_bytes": size_bytes, - "finished_at": _now_iso(), - } - except Exception as e: - _finish_backup_log( - log_id, - status="error", - files_count=None, - size_bytes=None, - error_msg=str(e), - ) + if _has_running_backup(): return { "ok": False, - "backup_id": log_id, - "status": "error", - "dry_run": dry_run, - "error": str(e), + "status": "running", + "error": "A backup is already running.", "finished_at": _now_iso(), } + + log_id = _start_backup_task(dry_run=dry_run) + + return { + "ok": True, + "backup_id": log_id, + "status": "running", + "dry_run": dry_run, + "message": "Backup started in background.", + "started_at": _now_iso(), + } diff --git a/containers/novela/routers/common.py b/containers/novela/routers/common.py index f71ace1..493ddb6 100644 --- a/containers/novela/routers/common.py +++ b/containers/novela/routers/common.py @@ -346,22 +346,23 @@ def list_library_json() -> list[dict]: l.series, l.series_index, l.publication_status, l.want_to_read, l.archived, l.needs_review, l.updated_at, rp.progress, rp.cfi, rp.page, - COUNT(rs.id)::int AS read_count, - MAX(rs.read_at) AS last_read + COALESCE(rs.read_count, 0)::int AS read_count, + rs.last_read, + (cc.filename IS NOT NULL) AS has_cached_cover FROM library l LEFT JOIN reading_progress rp ON rp.filename = l.filename - LEFT JOIN reading_sessions rs ON rs.filename = l.filename - GROUP BY l.filename, l.media_type, l.title, l.author, l.publisher, l.has_cover, - l.series, l.series_index, l.publication_status, l.want_to_read, - l.archived, l.needs_review, l.updated_at, rp.progress, rp.cfi, rp.page + LEFT JOIN ( + SELECT filename, COUNT(*)::int AS read_count, MAX(read_at) AS last_read + FROM reading_sessions + GROUP BY filename + ) rs ON rs.filename = l.filename + LEFT JOIN library_cover_cache cc ON cc.filename = l.filename ORDER BY COALESCE(l.publisher, ''), COALESCE(l.author, ''), COALESCE(l.series, ''), l.series_index, COALESCE(l.title, '') """ ) rows = cur.fetchall() - cur.execute("SELECT filename, tag, tag_type FROM book_tags ORDER BY tag") + cur.execute("SELECT filename, tag, tag_type FROM book_tags ORDER BY filename, tag") tags = cur.fetchall() - cur.execute("SELECT filename FROM library_cover_cache") - cached = {r[0] for r in cur.fetchall()} tag_map: dict[str, list[dict]] = {} for filename, tag, tag_type in tags: @@ -377,7 +378,7 @@ def list_library_json() -> list[dict]: "author": r[3] or "", "publisher": r[4] or "", "has_cover": bool(r[5]), - "has_cached_cover": r[0] in cached, + "has_cached_cover": bool(r[18]), "series": r[6] or "", "series_index": r[7] or 0, "publication_status": r[8] or "", diff --git a/containers/novela/routers/library.py b/containers/novela/routers/library.py index 2e441e4..0e4ed23 100644 --- a/containers/novela/routers/library.py +++ b/containers/novela/routers/library.py @@ -71,13 +71,18 @@ async def library_page(request: Request): @router.get("/api/library") -async def api_library(): - _sync_disk_to_db() +async def api_library(rescan: bool = False, include_file_info: bool = False): + # Fast path: avoid expensive full disk scan on every library page load. + # Use /library/rescan (or ?rescan=true) when a full sync is needed. + if rescan: + _sync_disk_to_db() + books = list_library_json() - for b in books: - p = resolve_library_path(b["filename"]) - if p and p.exists(): - b.update(relative_file_info(p)) + if include_file_info: + for b in books: + p = resolve_library_path(b["filename"]) + if p and p.exists(): + b.update(relative_file_info(p)) return books @@ -308,6 +313,46 @@ async def library_archive(filename: str): return {"ok": True, "archived": val} +@router.post("/library/new/mark-reviewed") +async def library_mark_new_reviewed(request: Request): + body = await request.json() + filenames = body.get("filenames", []) + if not isinstance(filenames, list): + return {"error": "filenames must be a list"} + + cleaned: list[str] = [] + seen: set[str] = set() + for raw in filenames: + if not isinstance(raw, str): + continue + name = raw.strip() + if not name or name in seen: + continue + full = resolve_library_path(name) + if full is None: + continue + cleaned.append(name) + seen.add(name) + + if not cleaned: + return {"ok": True, "updated": 0} + + placeholders = ", ".join(["%s"] * len(cleaned)) + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + f""" + UPDATE library + SET needs_review = FALSE, updated_at = NOW() + WHERE filename IN ({placeholders}) + """, + tuple(cleaned), + ) + updated = cur.rowcount or 0 + return {"ok": True, "updated": updated} + + @router.get("/home", response_class=HTMLResponse) async def home_page(request: Request): return templates.TemplateResponse(request, "home.html", {"active": "home"}) @@ -319,30 +364,165 @@ async def api_home(): with conn.cursor() as cur: cur.execute( """ - SELECT l.filename, l.title, l.author, l.media_type, + SELECT l.filename, l.title, l.author, l.has_cover, + l.series, l.series_index, l.publication_status, + l.media_type, COALESCE(rp.progress, 0) AS progress, - MAX(rs.read_at) AS last_read - FROM library l - LEFT JOIN reading_progress rp ON rp.filename = l.filename - LEFT JOIN reading_sessions rs ON rs.filename = l.filename - GROUP BY l.filename, l.title, l.author, l.media_type, rp.progress - ORDER BY last_read DESC NULLS LAST, l.updated_at DESC - LIMIT 30 + rp.cfi + FROM reading_progress rp + JOIN library l ON l.filename = rp.filename + WHERE rp.progress > 0 + AND l.archived = FALSE + ORDER BY rp.updated_at DESC """ ) - rows = cur.fetchall() + cr_rows = cur.fetchall() + + cur.execute( + """ + SELECT l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type + FROM library l + LEFT JOIN reading_sessions rs ON rs.filename = l.filename + LEFT JOIN reading_progress rp ON rp.filename = l.filename + WHERE COALESCE(l.series, '') = '' + AND l.filename NOT LIKE '%/Series/%' + AND l.archived = FALSE + AND rs.id IS NULL + AND COALESCE(rp.progress, 0) = 0 + AND EXISTS ( + SELECT 1 + FROM book_tags bt + WHERE bt.filename = l.filename + AND bt.tag = 'Shorts' + AND bt.tag_type IN ('tag', 'subject') + ) + GROUP BY l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type + ORDER BY RANDOM() + """ + ) + shorts_rows = cur.fetchall() + + cur.execute( + """ + SELECT l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type + FROM library l + LEFT JOIN reading_sessions rs ON rs.filename = l.filename + LEFT JOIN reading_progress rp ON rp.filename = l.filename + WHERE COALESCE(l.series, '') = '' + AND l.filename NOT LIKE '%/Series/%' + AND l.archived = FALSE + AND rs.id IS NULL + AND COALESCE(rp.progress, 0) = 0 + AND NOT EXISTS ( + SELECT 1 + FROM book_tags bt + WHERE bt.filename = l.filename + AND bt.tag = 'Shorts' + AND bt.tag_type IN ('tag', 'subject') + ) + GROUP BY l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type + ORDER BY RANDOM() + """ + ) + novels_rows = cur.fetchall() + + cur.execute( + """ + SELECT l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type, + MAX(rs.read_at) AS last_read + FROM library l + JOIN reading_sessions rs ON rs.filename = l.filename + WHERE COALESCE(l.series, '') = '' + AND l.filename NOT LIKE '%/Series/%' + AND l.archived = FALSE + AND EXISTS ( + SELECT 1 + FROM book_tags bt + WHERE bt.filename = l.filename + AND bt.tag = 'Shorts' + AND bt.tag_type IN ('tag', 'subject') + ) + GROUP BY l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type + ORDER BY MAX(rs.read_at) ASC + """ + ) + shorts_read_rows = cur.fetchall() + + cur.execute( + """ + SELECT l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type, + MAX(rs.read_at) AS last_read + FROM library l + JOIN reading_sessions rs ON rs.filename = l.filename + WHERE COALESCE(l.series, '') = '' + AND l.filename NOT LIKE '%/Series/%' + AND l.archived = FALSE + AND NOT EXISTS ( + SELECT 1 + FROM book_tags bt + WHERE bt.filename = l.filename + AND bt.tag = 'Shorts' + AND bt.tag_type IN ('tag', 'subject') + ) + GROUP BY l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type + ORDER BY MAX(rs.read_at) ASC + """ + ) + novels_read_rows = cur.fetchall() + + def simple(rows): + return [ + { + "filename": r[0], + "title": r[1] or "", + "author": r[2] or "", + "has_cover": bool(r[3]), + "publication_status": r[4] or "", + "media_type": r[5] or "epub", + "progress": 0, + "series": "", + "series_index": 0, + } + for r in rows + ] + + def simple_read(rows): + return [ + { + "filename": r[0], + "title": r[1] or "", + "author": r[2] or "", + "has_cover": bool(r[3]), + "publication_status": r[4] or "", + "media_type": r[5] or "epub", + "last_read": r[6].isoformat() if r[6] else None, + "progress": 0, + "series": "", + "series_index": 0, + } + for r in rows + ] + return { "continue_reading": [ { "filename": r[0], "title": r[1] or "", "author": r[2] or "", - "media_type": r[3], - "progress": r[4] or 0, - "last_read": r[5].isoformat() if r[5] else None, + "has_cover": bool(r[3]), + "series": r[4] or "", + "series_index": r[5] or 0, + "publication_status": r[6] or "", + "media_type": r[7] or "epub", + "progress": r[8] or 0, + "progress_cfi": r[9], } - for r in rows - ] + for r in cr_rows + ], + "shorts_unread": simple(shorts_rows), + "novels_unread": simple(novels_rows), + "shorts_read": simple_read(shorts_read_rows), + "novels_read": simple_read(novels_read_rows), } @@ -357,10 +537,13 @@ async def api_stats(): with conn.cursor() as cur: cur.execute("SELECT COUNT(*)::int FROM library") total_books = cur.fetchone()[0] + cur.execute("SELECT COUNT(*)::int FROM reading_sessions") total_reads = cur.fetchone()[0] + cur.execute("SELECT COUNT(DISTINCT filename)::int FROM reading_sessions") unique_books_read = cur.fetchone()[0] + cur.execute( """ SELECT media_type, COUNT(*)::int @@ -370,14 +553,143 @@ async def api_stats(): """ ) by_type = [{"media_type": r[0], "count": r[1]} for r in cur.fetchall()] + + cur.execute( + """ + WITH months AS ( + SELECT date_trunc('month', CURRENT_DATE) - (n * interval '1 month') AS month_start + FROM generate_series(11, 0, -1) AS n + ), counts AS ( + SELECT date_trunc('month', read_at) AS month_start, COUNT(*)::int AS cnt + FROM reading_sessions + WHERE read_at >= date_trunc('month', CURRENT_DATE) - interval '11 months' + GROUP BY 1 + ) + SELECT to_char(m.month_start, 'YYYY-MM') AS month, COALESCE(c.cnt, 0)::int AS count + FROM months m + LEFT JOIN counts c ON c.month_start = m.month_start + ORDER BY m.month_start + """ + ) + reads_by_month = [{"month": r[0], "count": r[1]} for r in cur.fetchall()] + + cur.execute( + """ + SELECT EXTRACT(DOW FROM read_at)::int AS dow, COUNT(*)::int + FROM reading_sessions + GROUP BY 1 + """ + ) + reads_by_dow = [0] * 7 + for dow, count in cur.fetchall(): + idx = (int(dow) + 6) % 7 + reads_by_dow[idx] = int(count) + + cur.execute( + """ + SELECT EXTRACT(HOUR FROM read_at)::int AS hour, COUNT(*)::int + FROM reading_sessions + GROUP BY 1 + """ + ) + reads_by_hour = [0] * 24 + for hour, count in cur.fetchall(): + h = int(hour) + if 0 <= h <= 23: + reads_by_hour[h] = int(count) + + cur.execute( + """ + SELECT bt.tag AS name, COUNT(DISTINCT bt.filename)::int AS count + FROM book_tags bt + JOIN library l ON l.filename = bt.filename + WHERE bt.tag_type IN ('genre', 'subgenre') + GROUP BY bt.tag + ORDER BY count DESC, name ASC + """ + ) + genre_counts = [{"name": r[0], "count": r[1]} for r in cur.fetchall()] + + cur.execute( + """ + SELECT publisher AS name, COUNT(*)::int AS count + FROM library + WHERE COALESCE(TRIM(publisher), '') <> '' + GROUP BY publisher + ORDER BY count DESC, name ASC + """ + ) + publisher_counts = [{"name": r[0], "count": r[1]} for r in cur.fetchall()] + + cur.execute( + """ + SELECT + COALESCE(NULLIF(TRIM(l.title), ''), l.filename) AS title, + COALESCE(l.author, '') AS author, + COUNT(*)::int AS count + FROM reading_sessions rs + JOIN library l ON l.filename = rs.filename + GROUP BY l.filename, l.title, l.author + ORDER BY count DESC, MAX(rs.read_at) DESC + LIMIT 10 + """ + ) + top_books = [{"title": r[0], "author": r[1], "count": r[2]} for r in cur.fetchall()] + + cur.execute( + """ + SELECT + COALESCE(NULLIF(TRIM(l.title), ''), l.filename) AS title, + COALESCE(l.author, '') AS author, + COALESCE(l.publisher, '') AS publisher, + rs.read_at, + COALESCE( + array_remove( + array_agg(DISTINCT CASE WHEN bt.tag_type IN ('genre', 'subgenre') THEN bt.tag END), + NULL + ), + ARRAY[]::text[] + ) AS genres + FROM reading_sessions rs + JOIN library l ON l.filename = rs.filename + LEFT JOIN book_tags bt ON bt.filename = l.filename + GROUP BY rs.id, l.filename, l.title, l.author, l.publisher, rs.read_at + ORDER BY rs.read_at DESC + LIMIT 50 + """ + ) + history = [ + { + "title": r[0], + "author": r[1], + "publisher": r[2], + "read_at": r[3].isoformat() if r[3] else None, + "genres": list(r[4] or []), + } + for r in cur.fetchall() + ] + + fav_genre = genre_counts[0]["name"] if genre_counts else None + fav_publisher = publisher_counts[0]["name"] if publisher_counts else None + return { "total_books": total_books, "total_reads": total_reads, "unique_books_read": unique_books_read, "by_media_type": by_type, + "reads_by_month": reads_by_month, + "reads_by_dow": reads_by_dow, + "reads_by_hour": reads_by_hour, + "genre_counts": genre_counts, + "publisher_counts": publisher_counts, + "fav_genre": fav_genre, + "fav_publisher": fav_publisher, + "top_books": top_books, + "history": history, "generated_at": datetime.now(timezone.utc).isoformat(), } + @router.get("/library/list") async def library_list_compat(): return await api_library() diff --git a/containers/novela/static/library.css b/containers/novela/static/library.css index 7dd7a60..5edce11 100644 --- a/containers/novela/static/library.css +++ b/containers/novela/static/library.css @@ -454,3 +454,186 @@ html, body { border-top: 1px solid var(--border); padding-top: 0.8rem; } + +/* ── New view controls + list mode ─────────────────────────────────────── */ + +.new-controls { + margin-bottom: 1rem; +} + +.new-controls-bar { + display: flex; + flex-wrap: wrap; + align-items: center; + justify-content: space-between; + gap: 0.6rem; + border: 1px solid var(--border); + background: rgba(34, 31, 27, 0.5); + border-radius: var(--radius); + padding: 0.55rem 0.6rem; +} + +.new-view-toggle { + display: inline-flex; + gap: 0.3rem; +} + +.new-actions { + position: relative; + display: flex; + flex-wrap: wrap; + align-items: center; + justify-content: flex-end; + gap: 0.45rem; +} + +.btn.btn-view, +.btn.btn-light, +.btn.btn-mark-reviewed { + border: 1px solid var(--border); + background: var(--surface2); + color: var(--text-dim); +} + +.btn.btn-view.active { + border-color: rgba(200, 120, 58, 0.45); + background: rgba(200, 120, 58, 0.16); + color: var(--accent2); +} + +.btn.btn-light:hover, +.btn.btn-view:hover { + color: var(--text); +} + +.btn.btn-mark-reviewed { + border-color: rgba(107, 170, 107, 0.35); + background: rgba(107, 170, 107, 0.14); + color: var(--success); +} + +.btn.btn-mark-reviewed:hover { + background: rgba(107, 170, 107, 0.24); +} + +.btn.btn-mark-reviewed:disabled { + opacity: 0.45; + cursor: not-allowed; +} + +.new-selection-count { + font-family: var(--mono); + font-size: 0.68rem; + color: var(--text-dim); + padding: 0 0.1rem; +} + +.new-columns-menu { + display: none; + position: absolute; + top: calc(100% + 0.4rem); + right: 0; + z-index: 20; + border: 1px solid var(--border); + border-radius: var(--radius); + background: var(--surface); + min-width: 190px; + max-height: 260px; + overflow: auto; + padding: 0.35rem; + box-shadow: 0 8px 24px rgba(0, 0, 0, 0.35); +} + +.new-columns-menu.visible { + display: block; +} + +.new-col-item { + display: flex; + align-items: center; + gap: 0.45rem; + padding: 0.3rem 0.35rem; + border-radius: 4px; + font-family: var(--mono); + font-size: 0.68rem; + color: var(--text-dim); +} + +.new-col-item:hover { + background: var(--surface2); + color: var(--text); +} + +.new-list-wrap { + overflow: auto; + border: 1px solid var(--border); + border-radius: var(--radius); + background: rgba(34, 31, 27, 0.48); +} + +.new-list-table { + width: 100%; + min-width: 980px; + border-collapse: collapse; +} + +.new-list-table thead th { + text-align: left; + padding: 0.55rem 0.5rem; + font-family: var(--mono); + font-size: 0.63rem; + letter-spacing: 0.06em; + text-transform: uppercase; + color: var(--accent2); + border-bottom: 1px solid var(--border); + background: rgba(15, 14, 12, 0.35); +} + +.new-list-table tbody td { + padding: 0.52rem 0.5rem; + border-bottom: 1px solid rgba(46, 42, 36, 0.55); + font-size: 0.74rem; + color: var(--text); + vertical-align: top; +} + +.new-list-table tbody tr { + cursor: pointer; +} + +.new-list-table tbody tr:hover { + background: rgba(200, 120, 58, 0.08); +} + +.new-col-select { + width: 34px; + min-width: 34px; + text-align: center; +} + +.new-list-table .col-title { + font-weight: 700; +} + +.new-list-table .col-center { + text-align: center; +} + +@media (max-width: 900px) { + .main { + padding: 1.2rem 1rem 2rem; + } + + .new-controls-bar { + align-items: stretch; + } + + .new-actions { + justify-content: flex-start; + } + + .new-columns-menu { + left: 0; + right: auto; + } +} diff --git a/containers/novela/static/library.js b/containers/novela/static/library.js index aff079a..5ec49a8 100644 --- a/containers/novela/static/library.js +++ b/containers/novela/static/library.js @@ -9,6 +9,28 @@ let coverB64 = null; let importInProgress = false; const MISSING_PUBLISHER_KEY = '__missing__'; const MISSING_PUBLISHER_LABEL = 'No publisher'; +const IMPORT_EXTENSIONS = ['.epub', '.pdf', '.cbr', '.cbz']; +const NEW_VIEW_MODE_KEY = 'novela.new.viewMode'; +const NEW_VISIBLE_COLUMNS_KEY = 'novela.new.visibleColumns'; +const NEW_DEFAULT_COLUMNS = ['publisher', 'author', 'series', 'volume', 'title', 'has_cover', 'updated', 'genres', 'subgenres', 'tags', 'status']; +const NEW_COLUMN_DEFS = [ + { id: 'publisher', label: 'Publisher' }, + { id: 'author', label: 'Author' }, + { id: 'series', label: 'Series' }, + { id: 'volume', label: 'Volume' }, + { id: 'title', label: 'Title' }, + { id: 'has_cover', label: 'Has cover' }, + { id: 'updated', label: 'Updated' }, + { id: 'genres', label: 'Genres' }, + { id: 'subgenres', label: 'Sub-genres' }, + { id: 'tags', label: 'Tags' }, + { id: 'status', label: 'Status' }, +]; + +let newViewMode = loadNewViewMode(); +let newVisibleColumns = loadNewVisibleColumns(); +let newSelectedFilenames = new Set(); +let newLastToggledIndex = null; // ── Placeholder cover generation ─────────────────────────────────────────── @@ -108,14 +130,19 @@ function updateCounts() { if (archEl) archEl.textContent = archCount || ''; } +function _filenameBase(filename) { + const leaf = String(filename || '').split('/').pop() || ''; + return leaf.replace(/\.[^.]+$/, ''); +} + function bookAuthor(b) { if (b.author) return b.author; - const parts = b.filename.replace(/\.epub$/, '').split('-'); + const parts = _filenameBase(b.filename).split('-'); return (parts[1] ?? '').replace(/_/g, ' '); } function bookTitle(b) { - return b.title || (b.filename.replace(/\.epub$/, '').split('-')[2] ?? '').replace(/_/g, ' '); + return b.title || (_filenameBase(b.filename).split('-')[2] ?? '').replace(/_/g, ' '); } function normalizePublisherName(value) { @@ -189,6 +216,11 @@ function _applyView(view, param) { view === 'genre' ? `Genre: ${param || ''}` : view === 'search' ? `Search: "${param || ''}"` : ''; + if (view !== 'new') { + newSelectedFilenames.clear(); + newLastToggledIndex = null; + } + const showBack = view === 'series-detail' || view === 'author-detail' || view === 'publisher-detail'; document.getElementById('back-btn').style.display = showBack ? '' : 'none'; @@ -211,6 +243,7 @@ window.addEventListener('popstate', e => { function renderGrid() { const active = activeBooks(); + if (currentView !== 'new') hideNewControls(); if (currentView === 'all') renderBooksGrid(active); else if (currentView === 'wtr') renderBooksGrid(active.filter(b => b.want_to_read)); else if (currentView === 'series') renderSeriesGrid(); @@ -220,11 +253,326 @@ function renderGrid() { else if (currentView === 'publishers') renderPublishersView(); else if (currentView === 'publisher-detail') renderPublisherDetail(currentParam); else if (currentView === 'archived') renderBooksGrid(archivedBooks()); - else if (currentView === 'new') renderBooksGrid(active.filter(b => b.needs_review)); + else if (currentView === 'new') renderNewBooksView(active.filter(b => b.needs_review)); else if (currentView === 'genre') renderGenreView(currentParam); else if (currentView === 'search') renderSearchResults(currentParam); } +// ── New view (bulk review + list/grid toggle) ───────────────────────────── + +function loadNewViewMode() { + try { + const raw = localStorage.getItem(NEW_VIEW_MODE_KEY); + return raw === 'list' ? 'list' : 'grid'; + } catch { + return 'grid'; + } +} + +function loadNewVisibleColumns() { + try { + const raw = localStorage.getItem(NEW_VISIBLE_COLUMNS_KEY); + if (!raw) return [...NEW_DEFAULT_COLUMNS]; + const parsed = JSON.parse(raw); + if (!Array.isArray(parsed)) return [...NEW_DEFAULT_COLUMNS]; + const allowed = new Set(NEW_COLUMN_DEFS.map(c => c.id)); + const saved = new Set(parsed.filter(v => typeof v === 'string' && allowed.has(v))); + const normalized = NEW_COLUMN_DEFS.map(c => c.id).filter(id => saved.has(id)); + if (!normalized.length) return [...NEW_DEFAULT_COLUMNS]; + return normalized; + } catch { + return [...NEW_DEFAULT_COLUMNS]; + } +} + +function persistNewColumns() { + try { + localStorage.setItem(NEW_VISIBLE_COLUMNS_KEY, JSON.stringify(newVisibleColumns)); + } catch { + // ignore storage failures + } +} + +function persistNewViewMode() { + try { + localStorage.setItem(NEW_VIEW_MODE_KEY, newViewMode); + } catch { + // ignore storage failures + } +} + +function hideNewControls() { + const controls = document.getElementById('new-controls'); + if (!controls) return; + controls.style.display = 'none'; + controls.innerHTML = ''; +} + +function setNewViewMode(mode) { + if (mode !== 'grid' && mode !== 'list') return; + newViewMode = mode; + if (mode === 'grid') { + newSelectedFilenames.clear(); + newLastToggledIndex = null; + } + persistNewViewMode(); + renderGrid(); +} + +function toggleNewColumnsMenu(ev) { + ev?.stopPropagation(); + const menu = document.getElementById('new-columns-menu'); + if (!menu) return; + menu.classList.toggle('visible'); +} + +function toggleNewColumn(columnId) { + const set = new Set(newVisibleColumns); + if (set.has(columnId)) set.delete(columnId); + else set.add(columnId); + + const ordered = NEW_COLUMN_DEFS.map(c => c.id).filter(id => set.has(id)); + newVisibleColumns = ordered.length ? ordered : ['title']; + persistNewColumns(); + renderGrid(); +} + +function toggleSelectAllNewRows(checked, books) { + if (checked) { + books.forEach(b => newSelectedFilenames.add(b.filename)); + newLastToggledIndex = books.length ? books.length - 1 : null; + } else { + books.forEach(b => newSelectedFilenames.delete(b.filename)); + newLastToggledIndex = null; + } + renderNewControls(books); + if (newViewMode === 'list') { + const rowChecks = document.querySelectorAll('.new-row-select'); + rowChecks.forEach(cb => { cb.checked = checked; }); + } +} + +function toggleNewRowWithShift(filename, checked, shiftPressed) { + const books = activeBooks().filter(b => b.needs_review); + const filenames = books.map(b => b.filename); + const idx = filenames.indexOf(filename); + if (idx === -1) return; + + const doRange = !!(shiftPressed && newLastToggledIndex !== null); + if (doRange) { + const start = Math.min(newLastToggledIndex, idx); + const end = Math.max(newLastToggledIndex, idx); + for (let i = start; i <= end; i++) { + const name = filenames[i]; + if (checked) newSelectedFilenames.add(name); + else newSelectedFilenames.delete(name); + } + } else { + if (checked) newSelectedFilenames.add(filename); + else newSelectedFilenames.delete(filename); + } + + newLastToggledIndex = idx; + renderNewControls(books); + renderNewBooksList(books); +} + +function handleNewRowCheckboxClick(filename, checkboxEl, ev) { + ev?.stopPropagation(); + const shiftPressed = !!(ev && ev.shiftKey); + toggleNewRowWithShift(filename, !!checkboxEl?.checked, shiftPressed); +} + +function clearNewSelection(books) { + books.forEach(b => newSelectedFilenames.delete(b.filename)); + newLastToggledIndex = null; + renderGrid(); +} + +async function markSelectedNewAsReviewed(books) { + const selected = books.filter(b => newSelectedFilenames.has(b.filename)).map(b => b.filename); + if (!selected.length) return; + + const btn = document.getElementById('btn-mark-reviewed'); + if (btn) btn.disabled = true; + + try { + const resp = await fetch('/library/new/mark-reviewed', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ filenames: selected }), + }); + const result = await resp.json(); + if (!resp.ok || result.error) { + alert(result.error || 'Could not mark books as reviewed.'); + return; + } + + const selectedSet = new Set(selected); + allBooks.forEach(b => { + if (selectedSet.has(b.filename)) b.needs_review = false; + }); + selected.forEach(f => newSelectedFilenames.delete(f)); + updateCounts(); + renderGrid(); + } catch { + alert('Could not mark books as reviewed.'); + } finally { + if (btn) btn.disabled = false; + } +} + +function tagValuesByType(book, type) { + return (book.tags || []) + .filter(t => t && t.tag_type === type && t.tag) + .map(t => t.tag); +} + +function bookGenres(book) { + const explicit = tagValuesByType(book, 'genre'); + if (explicit.length) return explicit; + return (book.tags || []) + .filter(t => t && t.tag_type === 'subject' && t.tag) + .map(t => t.tag); +} + +function bookSubgenres(book) { + return tagValuesByType(book, 'subgenre'); +} + +function bookPlainTags(book) { + return tagValuesByType(book, 'tag'); +} + +function formatUpdated(iso) { + if (!iso) return ''; + const d = new Date(iso); + if (Number.isNaN(d.getTime())) return ''; + const y = d.getFullYear(); + const m = String(d.getMonth() + 1).padStart(2, '0'); + const day = String(d.getDate()).padStart(2, '0'); + return `${y}-${m}-${day}`; +} + +function newCellText(book, colId) { + if (colId === 'publisher') return publisherDisplayName(bookPublisherKey(book)); + if (colId === 'author') return bookAuthor(book); + if (colId === 'series') return book.series || ''; + if (colId === 'title') return bookTitle(book); + if (colId === 'has_cover') return book.has_cover ? 'Yes' : 'No'; + if (colId === 'updated') return formatUpdated(book.updated_at); + if (colId === 'genres') return bookGenres(book).join(', '); + if (colId === 'subgenres') return bookSubgenres(book).join(', '); + if (colId === 'tags') return bookPlainTags(book).join(', '); + if (colId === 'volume') return book.series_index > 0 ? String(book.series_index) : ''; + if (colId === 'status') return book.publication_status || ''; + return ''; +} + +function renderNewControls(books) { + const controls = document.getElementById('new-controls'); + if (!controls) return; + if (currentView !== 'new') { + hideNewControls(); + return; + } + + const validFilenames = new Set(books.map(b => b.filename)); + newSelectedFilenames.forEach(filename => { + if (!validFilenames.has(filename)) newSelectedFilenames.delete(filename); + }); + + const listMode = newViewMode === 'list'; + const selectedCount = listMode + ? books.filter(b => newSelectedFilenames.has(b.filename)).length + : 0; + const allSelected = listMode && !!books.length && selectedCount === books.length; + + controls.style.display = ''; + controls.innerHTML = ` +
+
+ + +
+
+ ${listMode ? ` + +
+ ${NEW_COLUMN_DEFS.map(col => ` + + `).join('')} +
+ ${selectedCount} selected + + + + ` : ` + Switch to List to select multiple books + `} +
+
`; +} + +function renderNewBooksList(books) { + const container = document.getElementById('grid-container'); + if (!books.length) { + container.innerHTML = '
No newly imported books waiting for metadata review.
'; + return; + } + + const cols = NEW_COLUMN_DEFS.filter(c => newVisibleColumns.includes(c.id)); + const selectedCount = books.filter(b => newSelectedFilenames.has(b.filename)).length; + const allSelected = selectedCount === books.length; + + container.innerHTML = ` +
+ + + + + ${cols.map(c => ``).join('')} + + + + ${books.map(b => ` + + + ${cols.map(c => { + const value = newCellText(b, c.id); + if (c.id === 'title') return ``; + if (c.id === 'has_cover') return ``; + return ``; + }).join('')} + + `).join('')} + +
${esc(c.label)}
${esc(value)}${esc(value)}${esc(value)}
+
`; + + container.querySelectorAll('.new-list-row').forEach(row => { + row.addEventListener('click', () => { + const filename = row.getAttribute('data-filename') || ''; + if (!filename) return; + location.href = `/library/book/${encodeURIComponent(filename)}`; + }); + }); +} + +function renderNewBooksView(books) { + renderNewControls(books); + if (newViewMode === 'list') { + renderNewBooksList(books); + return; + } + renderBooksGrid(books); +} + // ── Book grid (All / WTR / Author detail) ───────────────────────────────── function renderBooksGrid(books) { @@ -237,7 +585,7 @@ function renderBooksGrid(books) { currentView === 'new' ? 'No newly imported books waiting for metadata review.' : currentView === 'genre' ? `No books tagged "${esc(currentParam || '')}".` : currentView === 'search' ? `No results for "${esc(currentParam || '')}".` : - 'No EPUBs yet. Convert a story first.' + 'No books yet. Import EPUB, PDF or CBR/CBZ to get started.' }`; return; } @@ -855,7 +1203,9 @@ function openImportPicker() { function onImportFilesSelected(fileList) { if (!fileList || !fileList.length) return; - uploadImportedFiles(Array.from(fileList)); + const files = Array.from(fileList).filter(f => IMPORT_EXTENSIONS.some(ext => f.name.toLowerCase().endsWith(ext))); + if (!files.length) return; + uploadImportedFiles(files); const input = document.getElementById('import-file-input'); if (input) input.value = ''; } @@ -868,7 +1218,7 @@ async function uploadImportedFiles(files) { importInProgress = true; zone?.classList.add('uploading'); - if (title) title.textContent = 'Importing EPUBs…'; + if (title) title.textContent = 'Importing files…'; if (sub) sub.textContent = `${files.length} file(s) selected`; const form = new FormData(); @@ -883,8 +1233,8 @@ async function uploadImportedFiles(files) { const importedCount = (data.imported || []).length; const skippedCount = (data.skipped || []).length; if (title) title.textContent = importedCount - ? `Imported ${importedCount} EPUB(s)` - : 'No EPUBs imported'; + ? `Imported ${importedCount} file(s)` + : 'No files imported'; if (sub) sub.textContent = skippedCount ? `${skippedCount} skipped` : 'Ready for next import'; @@ -896,7 +1246,7 @@ async function uploadImportedFiles(files) { importInProgress = false; zone?.classList.remove('uploading'); setTimeout(() => { - if (title) title.textContent = 'Drop EPUB files here'; + if (title) title.textContent = 'Drop EPUB, PDF or CBR/CBZ files here'; if (sub) sub.textContent = 'or click to choose files'; }, 1200); } @@ -954,12 +1304,20 @@ if (importZone) { }); importZone.addEventListener('drop', e => { if (importInProgress) return; - const files = Array.from(e.dataTransfer?.files || []).filter(f => f.name.toLowerCase().endsWith('.epub')); + const files = Array.from(e.dataTransfer?.files || []).filter(f => IMPORT_EXTENSIONS.some(ext => f.name.toLowerCase().endsWith(ext))); if (!files.length) return; uploadImportedFiles(files); }); } +document.addEventListener('click', e => { + const menu = document.getElementById('new-columns-menu'); + if (!menu) return; + const toggleBtn = e.target && e.target.closest ? e.target.closest('.new-actions .btn-light') : null; + if (menu.contains(e.target) || toggleBtn) return; + menu.classList.remove('visible'); +}); + loadLibrary().then(() => { const hash = window.location.hash.slice(1); let view = 'all', param = null; diff --git a/containers/novela/templates/backup.html b/containers/novela/templates/backup.html index a777d52..403ae08 100644 --- a/containers/novela/templates/backup.html +++ b/containers/novela/templates/backup.html @@ -90,6 +90,25 @@ .btn.primary { border-color: rgba(200,120,58,0.45); background: rgba(200,120,58,0.12); } .btn:disabled { opacity: 0.5; cursor: not-allowed; } + .field-label { + display: block; + font-family: var(--mono); + font-size: 0.72rem; + color: var(--text-dim); + margin-bottom: 0.4rem; + } + .field-input { + width: 100%; + border: 1px solid var(--border); + background: var(--surface2); + color: var(--text); + border-radius: 6px; + padding: 0.55rem 0.7rem; + font-family: var(--mono); + font-size: 0.78rem; + margin-bottom: 0.7rem; + } + .status-line { margin-top: 0.7rem; font-family: var(--mono); font-size: 0.74rem; } .ok { color: var(--ok); } .warn { color: var(--warn); } @@ -114,6 +133,29 @@
Backup
+
+
Dropbox Settings
+ + + + + + + + + + +
+ + + +
+
+
+
Run

@@ -182,6 +224,9 @@ rowHtml('Dropbox token', d.token_present ? 'present' : 'missing'), rowHtml('Dropbox auth', fmtStatus(d.dropbox_ok)), rowHtml('Dropbox error', d.dropbox_error || '-'), + rowHtml('Dropbox root', d.dropbox_root || '/novela'), + rowHtml('Snapshots keep', d.retention_count ?? 14), + rowHtml('Schedule', d.schedule_enabled ? `enabled (${d.schedule_interval_hours || 24}h)` : 'disabled'), rowHtml('pg_dump', d.pg_dump_available ? (d.pg_dump_path || 'available') : 'missing'), rowHtml('Library exists', fmtStatus(d.library_exists)), rowHtml('Library path', d.library_path || '-'), @@ -230,6 +275,100 @@ `).join(''); } + async function loadDropboxSettings() { + const out = document.getElementById('dropbox-status'); + const tokenEl = document.getElementById('dropbox-token'); + const rootEl = document.getElementById('dropbox-root'); + const retentionEl = document.getElementById('retention-count'); + const scheduleEnabledEl = document.getElementById('schedule-enabled'); + const scheduleHoursEl = document.getElementById('schedule-hours'); + out.className = 'status-line'; + out.textContent = 'Loading Dropbox settings...'; + try { + const r = await fetch('/api/backup/credentials'); + const d = await r.json(); + tokenEl.value = ''; + rootEl.value = d.dropbox_root || '/novela'; + retentionEl.value = d.retention_count ?? 14; + scheduleEnabledEl.value = String(!!d.schedule_enabled); + scheduleHoursEl.value = d.schedule_interval_hours ?? 24; + if (d.configured) { + out.className = 'status-line ok'; + out.textContent = `Configured (${d.token_preview || 'token set'})${d.updated_at ? ` • updated ${d.updated_at}` : ''}`; + } else { + out.className = 'status-line warn'; + out.textContent = 'No Dropbox token configured.'; + } + } catch (e) { + out.className = 'status-line err'; + out.textContent = `Failed to load settings: ${e}`; + } + } + + async function saveDropboxToken() { + const out = document.getElementById('dropbox-status'); + const token = (document.getElementById('dropbox-token').value || '').trim(); + const dropboxRoot = (document.getElementById('dropbox-root').value || '').trim(); + const retentionCount = Math.max(1, parseInt((document.getElementById('retention-count').value || '14').trim(), 10) || 14); + const scheduleEnabled = document.getElementById('schedule-enabled').value === 'true'; + const scheduleIntervalHours = Math.max(1, parseInt((document.getElementById('schedule-hours').value || '24').trim(), 10) || 24); + out.className = 'status-line warn'; + out.textContent = 'Saving backup settings...'; + try { + const r = await fetch('/api/backup/credentials', { + method: 'POST', + headers: {'Content-Type': 'application/json'}, + body: JSON.stringify({ + token, + dropbox_root: dropboxRoot, + retention_count: retentionCount, + schedule_enabled: scheduleEnabled, + schedule_interval_hours: scheduleIntervalHours + }), + }); + const raw = await r.text(); + let d; + try { + d = JSON.parse(raw); + } catch (_) { + throw new Error(`HTTP ${r.status}: ${raw.slice(0, 180) || 'non-JSON response'}`); + } + if (!d.ok) throw new Error(d.error || 'save failed'); + out.className = 'status-line ok'; + out.textContent = `Backup settings saved. Root: ${d.dropbox_root || dropboxRoot || '/novela'} • keep: ${d.retention_count || retentionCount} • schedule: ${(d.schedule_enabled ? 'on' : 'off')} (${d.schedule_interval_hours || scheduleIntervalHours}h)`; + await Promise.all([loadDropboxSettings(), loadHealth()]); + } catch (e) { + out.className = 'status-line err'; + out.textContent = `Save failed: ${e}`; + } + } + + async function clearDropboxToken() { + if (!confirm('Remove Dropbox token for backup?')) return; + const out = document.getElementById('dropbox-status'); + out.className = 'status-line warn'; + out.textContent = 'Removing token...'; + try { + await fetch('/api/backup/credentials', {method: 'DELETE'}); + out.className = 'status-line ok'; + out.textContent = 'Dropbox token removed.'; + document.getElementById('dropbox-token').value = ''; + document.getElementById('dropbox-root').value = '/novela'; + document.getElementById('retention-count').value = 14; + document.getElementById('schedule-enabled').value = 'false'; + document.getElementById('schedule-hours').value = 24; + await Promise.all([loadDropboxSettings(), loadHealth()]); + } catch (e) { + out.className = 'status-line err'; + out.textContent = `Remove failed: ${e}`; + } + } + + function toggleDropboxToken() { + const el = document.getElementById('dropbox-token'); + el.type = el.type === 'password' ? 'text' : 'password'; + } + async function runBackup(dryRun) { const btnDry = document.getElementById('btn-dry'); const btnLive = document.getElementById('btn-live'); @@ -249,7 +388,11 @@ const d = await r.json(); if (d.ok) { out.className = 'status-line ok'; - out.textContent = `Backup ${d.status}. id=${d.backup_id}, files=${d.files_count}, bytes=${d.size_bytes}, dry_run=${d.dry_run}`; + if (d.status === 'running') { + out.textContent = `Backup started in background. id=${d.backup_id}, dry_run=${d.dry_run}`; + } else { + out.textContent = `Backup ${d.status}. id=${d.backup_id}, files=${d.files_count}, bytes=${d.size_bytes}, dry_run=${d.dry_run}`; + } } else { out.className = 'status-line err'; out.textContent = `Backup failed: ${d.error || 'unknown error'}`; @@ -265,7 +408,7 @@ } async function refreshAll() { - await Promise.all([loadHealth(), loadStatus(), loadHistory()]); + await Promise.all([loadDropboxSettings(), loadHealth(), loadStatus(), loadHistory()]); } refreshAll(); diff --git a/containers/novela/templates/home.html b/containers/novela/templates/home.html index fff06a5..1490d2e 100644 --- a/containers/novela/templates/home.html +++ b/containers/novela/templates/home.html @@ -21,7 +21,71 @@ .main { margin-left: var(--sidebar); min-height: 100vh; padding: 2rem 2.5rem 4rem; } - /* ── Section header ──────────────────────────────────────────────── */ + .main-header { + display: flex; + align-items: center; + justify-content: space-between; + margin-bottom: 1.75rem; + } + .main-title { + font-family: var(--mono); + font-size: 0.7rem; + letter-spacing: 0.12em; + text-transform: uppercase; + color: var(--accent); + } + + .search-wrap { position: relative; display: flex; align-items: center; } + .search-icon { position: absolute; left: 0.5rem; color: var(--text-faint); pointer-events: none; } + .search-input { + background: var(--surface); border: 1px solid var(--border); + border-radius: var(--radius); color: var(--text); + font-family: var(--mono); font-size: 0.78rem; + padding: 0.4rem 1.8rem 0.4rem 2rem; + outline: none; width: 220px; + transition: border-color 0.15s, width 0.2s; + } + .search-input:focus { border-color: var(--accent); width: 280px; } + .search-input::placeholder { color: var(--text-faint); } + .search-clear { + position: absolute; right: 0.4rem; + background: none; border: none; color: var(--text-faint); + cursor: pointer; font-size: 1rem; line-height: 1; padding: 0 0.1rem; + } + .search-clear:hover { color: var(--text-dim); } + + .import-dropzone { + border: 1px dashed var(--border); + background: rgba(34, 31, 27, 0.45); + border-radius: var(--radius); + padding: 0.9rem 1rem; + margin-bottom: 1.1rem; + cursor: pointer; + transition: border-color 0.15s, background 0.15s; + } + .import-dropzone:hover { border-color: var(--accent); } + .import-dropzone.dragover { + border-color: var(--accent2); + background: rgba(200, 120, 58, 0.12); + } + .import-dropzone.uploading { + opacity: 0.8; + cursor: progress; + } + .import-title { + font-family: var(--mono); + font-size: 0.72rem; + text-transform: uppercase; + letter-spacing: 0.08em; + color: var(--accent2); + } + .import-sub { + margin-top: 0.25rem; + font-family: var(--mono); + font-size: 0.68rem; + color: var(--text-dim); + } + .section-block { margin-bottom: 2.5rem; } .section-header { display: flex; align-items: baseline; justify-content: space-between; @@ -40,7 +104,6 @@ } .section-more:hover { color: var(--accent); } - /* ── Horizontal scroll row ───────────────────────────────────────── */ .h-row { display: flex; gap: 1rem; overflow-x: auto; padding-bottom: 0.75rem; } .h-row::-webkit-scrollbar { height: 4px; } .h-row::-webkit-scrollbar-thumb { background: var(--border); border-radius: 4px; } @@ -71,7 +134,6 @@ .h-progress-fill { height: 100%; background: var(--accent); border-radius: 2px; } .h-pct { font-family: var(--mono); font-size: 0.6rem; color: var(--text-dim); } - /* ── Full grid ───────────────────────────────────────────────────── */ .grid-header { display: flex; align-items: center; gap: 0.75rem; margin-bottom: 1.75rem; } @@ -119,10 +181,26 @@ font-size: 0.82rem; padding: 4rem 2rem; } - /* ── Responsive ────────────────────────────────────────────── */ @media (max-width: 768px) { - .main { margin-left: 0; padding: 4rem 1rem 4rem; } - .cover-grid { grid-template-columns: repeat(auto-fill, minmax(130px, 1fr)); gap: 1rem; } + .main { + margin-left: 0; + padding: 4rem 1rem 4rem; + } + + .main-header { + flex-wrap: wrap; + gap: 0.75rem; + margin-bottom: 1.25rem; + } + + .cover-grid { + grid-template-columns: repeat(auto-fill, minmax(130px, 1fr)); + gap: 1rem; + } + + .search-input { width: 100%; } + .search-input:focus { width: 100%; } + .search-wrap { flex: 1; min-width: 0; } } @@ -131,8 +209,23 @@ {% include "_sidebar.html" %}

+
+
Home
+
+ + + + + +
+
+ +
+ +
Drop EPUB, PDF or CBR/CBZ files here
+
or click to choose files
+
-
- -
diff --git a/containers/novela/templates/library.html b/containers/novela/templates/library.html index 9995902..1041403 100644 --- a/containers/novela/templates/library.html +++ b/containers/novela/templates/library.html @@ -32,10 +32,11 @@
- -
Drop EPUB files here
+ +
Drop EPUB, PDF or CBR/CBZ files here
or click to choose files
+
Loading…
diff --git a/docs/BLUEPRINT.md b/docs/BLUEPRINT.md deleted file mode 100644 index 5917940..0000000 --- a/docs/BLUEPRINT.md +++ /dev/null @@ -1,420 +0,0 @@ -# Novela 2.0 - Blauwdruk - -> Vervangt repository `story-grabber`. Nieuwe repo: **Novela**. -> Stack: FastAPI · Jinja2 · plain JS · PostgreSQL 16 · Docker / Portainer - ---- - -## 1. Doelstelling - -Novela 2.0 is een volledig zelfgehoste media-bibliotheek en e-reader voor epub, pdf en cbr/cbz. -Het vervangt Kavita (library), Calibre (metadata), en Sigil (epub editor) in een web-applicatie. - -Kernprincipe: **de database is de snelle index, het bestand is de bron van waarheid.** -Elke schrijfactie raakt altijd beide: eerst het bestand, dan de database. Lezen gaat altijd via de database. - ---- - -## 2. Wat behouden blijft uit v1 - -| Module | Bestand | Toelichting | -|---|---|---| -| EPUB bouw | `epub.py` | `make_epub`, `make_chapter_xhtml`, `add_cover_to_epub` | -| EPUB lezen/schrijven | `epub.py` | `read_epub_file`, `write_epub_file` | -| XHTML conversie | `xhtml.py` | `element_to_xhtml`, `is_break_element`, `configure_break_patterns` | -| Scrapers | `scrapers/` | base, awesomedude, gayauthors, plugin-patroon blijft | -| SSE job streaming | `main.py` | `JOBS` dict + `/events/{job_id}` `StreamingResponse` | -| Migrations patroon | `migrations.py` | idempotente `CREATE IF NOT EXISTS`, `run_migrations()` bij startup | -| Cover cache | DB tabel | `library_cover_cache`, WebP thumbnails 300x450 | -| Reading progress | DB tabel | CFI voor epub, paginanummer voor pdf/cbr | -| Reading sessions | DB tabel | leesgeschiedenis per boek | -| Break patterns | DB tabel | regex + css_class patronen voor scene-breaks | - ---- - -## 3. Projectstructuur - -```text -novela/ -├── containers/ -│ └── novela/ -│ ├── main.py -│ ├── migrations.py -│ ├── db.py -│ ├── epub.py -│ ├── xhtml.py -│ ├── pdf.py -│ ├── cbr.py -│ ├── routers/ -│ │ ├── __init__.py -│ │ ├── library.py -│ │ ├── reader.py -│ │ ├── editor.py -│ │ ├── grabber.py -│ │ ├── backup.py -│ │ └── settings.py -│ ├── scrapers/ -│ ├── static/ -│ ├── templates/ -│ ├── requirements.txt -│ └── Dockerfile -├── stack/ -│ ├── stack.yml -│ └── novela.env -└── docs/ - ├── BLUEPRINT.md - └── TECHNICAL.md -``` - ---- - -## 4. Bibliotheek op schijf - -`output/` wordt `library/`. - -```text -library/ -├── epub/ -│ └── {Publisher}/ -│ └── {Author}/ -│ ├── Stories/ -│ │ └── {Titel}.epub -│ └── Series/ -│ └── {Serienaam}/ -│ └── {001 - Titel}.epub -├── pdf/ -│ └── {Author}/ -│ └── {Titel}.pdf -├── comics/ -│ └── {Author of Serienaam}/ -│ └── {001 - Titel}.cbr -└── covers/ -``` - -Naamgeving-regels: -- Ongeldige tekens weg: `< > : " / \\ | ? *` en control chars -- Max 80 tekens per map-segment, 140 voor bestandsnaam -- Bij conflict: `Titel (2).epub`, `Titel (3).epub`, enz. - -Hernoemen na metadata-bewerking: -- Bestand verplaatsen op schijf -- DB-verwijzingen updaten: `library`, `book_tags`, `reading_progress`, `reading_sessions`, `library_cover_cache` -- Lege mappen opruimen - ---- - -## 5. Database schema - -### 5.1 `library` - -```sql -CREATE TABLE library ( - id SERIAL PRIMARY KEY, - filename VARCHAR(600) UNIQUE NOT NULL, - media_type VARCHAR(10) NOT NULL DEFAULT 'epub', - title VARCHAR(500), - author VARCHAR(255), - publisher VARCHAR(255), - series VARCHAR(500), - series_index INTEGER DEFAULT 0, - publication_status VARCHAR(100), - has_cover BOOLEAN DEFAULT FALSE, - description TEXT DEFAULT '', - source_url VARCHAR(1000), - publish_date DATE, - archived BOOLEAN DEFAULT FALSE, - want_to_read BOOLEAN DEFAULT FALSE, - needs_review BOOLEAN DEFAULT FALSE, - created_at TIMESTAMP DEFAULT NOW(), - updated_at TIMESTAMP DEFAULT NOW() -); -``` - -### 5.2 `book_tags` - -```sql -CREATE TABLE book_tags ( - id SERIAL PRIMARY KEY, - filename VARCHAR(600) NOT NULL REFERENCES library(filename) ON DELETE CASCADE, - tag VARCHAR(255) NOT NULL, - tag_type VARCHAR(20) NOT NULL, - UNIQUE (filename, tag, tag_type) -); -CREATE INDEX idx_book_tags_filename ON book_tags (filename); -``` - -`tag_type`: -- `genre` -- `subgenre` -- `tag` -- `subject` - -### 5.3 `reading_progress` - -```sql -CREATE TABLE reading_progress ( - id SERIAL PRIMARY KEY, - filename VARCHAR(600) UNIQUE NOT NULL REFERENCES library(filename) ON DELETE CASCADE, - cfi TEXT, - page INTEGER, - progress INTEGER DEFAULT 0, - updated_at TIMESTAMP DEFAULT NOW() -); -``` - -### 5.4 `reading_sessions` - -```sql -CREATE TABLE reading_sessions ( - id SERIAL PRIMARY KEY, - filename VARCHAR(600) NOT NULL REFERENCES library(filename) ON DELETE CASCADE, - read_at TIMESTAMP DEFAULT NOW() -); -CREATE INDEX idx_reading_sessions_filename ON reading_sessions (filename); -``` - -### 5.5 `library_cover_cache` - -```sql -CREATE TABLE library_cover_cache ( - filename VARCHAR(600) PRIMARY KEY REFERENCES library(filename) ON DELETE CASCADE, - mime_type VARCHAR(100) NOT NULL, - thumb_webp BYTEA NOT NULL, - updated_at TIMESTAMP DEFAULT NOW() -); -``` - -### 5.6 `credentials` - -```sql -CREATE TABLE credentials ( - id SERIAL PRIMARY KEY, - site VARCHAR(255) UNIQUE NOT NULL, - username VARCHAR(255) NOT NULL, - password VARCHAR(255) NOT NULL, - updated_at TIMESTAMP DEFAULT NOW() -); -``` - -### 5.7 `break_patterns` - -```sql -CREATE TABLE break_patterns ( - id SERIAL PRIMARY KEY, - pattern_type VARCHAR(20) NOT NULL, - pattern TEXT NOT NULL, - enabled BOOLEAN DEFAULT TRUE, - is_default BOOLEAN DEFAULT FALSE, - created_at TIMESTAMP DEFAULT NOW(), - UNIQUE (pattern_type, pattern) -); -``` - -### 5.8 `backup_log` - -```sql -CREATE TABLE backup_log ( - id SERIAL PRIMARY KEY, - status VARCHAR(20) NOT NULL, - files_count INTEGER, - size_bytes BIGINT, - error_msg TEXT, - started_at TIMESTAMP DEFAULT NOW(), - finished_at TIMESTAMP -); -``` - ---- - -## 6. Schrijfprincipe: bestand en database synchroon - -Volgorde per bewerking: -1. Bewerk bestand op schijf -2. Update database -3. Retourneer succes - -Nooit alleen DB updaten zonder bestand. - ---- - -## 7. Coverstrategie - -Opslaan: -- EPUB cover in bestand (`OEBPS/Images/cover.{ext}`) -- Thumbnail als `300x450` WebP in `library_cover_cache` - -Ontbrekende cover: -- Als geen cover: voeg tag `Cover Missing` toe -- UI upload schrijft cover in EPUB en cache - -Opvragen: -- Primair: `/library/cover-cached/{filename}` -- Fallback: `/library/cover/{filename}` - -PDF en CBR: -- PDF: eerste pagina als thumbnail -- CBR/CBZ: eerste afbeelding als thumbnail - ---- - -## 8. Verwijder-flow - -`DELETE /library/file/{filename}`: -1. Verwijder bestand -2. Prune lege mappen -3. Delete uit `library` (cascade verwijdert gerelateerde tabellen) - ---- - -## 9. Router-overzicht - -### 9.1 `routers/library.py` -- `GET /library` -- `GET /api/library` -- `POST /library/rescan` -- `POST /library/import` -- `DELETE /library/file/{filename}` -- `GET /library/cover/{filename}` -- `GET /library/cover-cached/{filename}` -- `POST /library/cover/{filename}` -- `POST /library/want-to-read/{filename}` -- `POST /library/archive/{filename}` -- `GET /home` -- `GET /api/home` -- `GET /stats` -- `GET /api/stats` - -### 9.2 `routers/reader.py` -- `GET /library/read/{filename}` -- `GET /library/book/{filename}` -- `PATCH /library/book/{filename}` -- `GET /library/epub/{filename}` -- `GET /library/chapters/{filename}` -- `GET /library/chapter/{index}/{filename}` -- `GET /library/chapter-img/{path}` -- `GET /library/pdf/{filename}` -- `GET /library/cbr/{filename}/{page}` -- `GET /library/progress/{filename}` -- `POST /library/progress/{filename}` -- `DELETE /library/progress/{filename}` -- `POST /library/mark-read/{filename}` -- `GET /api/genres` - -### 9.3 `routers/editor.py` -- `GET /library/editor/{filename}` -- `GET /api/edit/chapter/{index}/{filename}` -- `POST /api/edit/chapter/{index}/{filename}` -- `POST /api/edit/chapter/add/{filename}` -- `DELETE /api/edit/chapter/{index}/{filename}` - -### 9.4 `routers/grabber.py` -- `GET /grabber` -- `POST /preload` -- `POST /convert` -- `GET /events/{job_id}` -- `GET /debug` -- `POST /debug/run` -- `GET /credentials` -- `POST /credentials` -- `DELETE /credentials/{site}` - -### 9.5 `routers/backup.py` -- `GET /backup` -- `GET /api/backup/status` -- `POST /api/backup/run` -- `GET /api/backup/history` - -### 9.6 `routers/settings.py` -- `GET /settings` -- `GET /api/break-patterns` -- `POST /api/break-patterns` -- `PATCH /api/break-patterns/{id}` -- `DELETE /api/break-patterns/{id}` -- `DELETE /api/reading-history` - ---- - -## 10. Nieuwe modules - -### 10.1 `db.py` -Gedeelde psycopg2 connection pool (`init_pool`, `get_conn`, `release_conn`). - -### 10.2 `pdf.py` -PyMuPDF rendering (`pdf_render_page`), page count en cover thumb. - -### 10.3 `cbr.py` -RAR/ZIP paginalijst, page extract en cover thumb. - ---- - -## 11. Cover-flow per mediatype - -| Actie | EPUB | PDF | CBR/CBZ | -|---|---|---|---| -| Cover import | Uit OPF/Images | Eerste pagina render | Eerste image uit archief | -| Thumbnail | Pillow -> WebP | PyMuPDF + Pillow -> WebP | Pillow -> WebP | -| Opslag | EPUB + cache | cache | cache | -| Cover vervangen | Ja | Nee | Nee | -| Geen cover | `Cover Missing` tag | `Cover Missing` tag | `Cover Missing` tag | - ---- - -## 12. Database-opzet - -- Start met schone v2 database -- Geen migratiepad vanuit v1 data -- `run_migrations()` op startup -- `CREATE TABLE IF NOT EXISTS` overal idempotent - ---- - -## 13. Docker stack - -Zie [`stack/stack.yml`](../stack/stack.yml). - -Belangrijk: -- App container expose `8099 -> 8000` -- PostgreSQL 16 -- Adminer op `8098` -- `NOVELA_MASTER_KEY` in `stack/novela.env` en doorgifte in `stack/stack.yml` voor encrypted credentials - ---- - -## 14. Requirements - -Zie [`containers/novela/requirements.txt`](../containers/novela/requirements.txt). - ---- - -## 15. Bestanden klaarzetten - -Bron: `/docker/develop/story-grabber/containers/story-grabber`. -Doel: `/docker/develop/novela/containers/novela`. - -Overnemen: -- `epub.py` -- `xhtml.py` -- `scrapers/*` -- `static/*` -- `templates/*` - -Nieuw schrijven: -- `main.py`, `db.py`, `pdf.py`, `cbr.py`, `migrations.py` -- `routers/*` - ---- - -## 16. Bouw-volgorde - -1. `db.py` -2. `migrations.py` -3. `main.py` -4. `routers/library.py` -5. `routers/reader.py` -6. `routers/editor.py` -7. `routers/grabber.py` -8. `routers/settings.py` -9. `pdf.py` + reader uitbreiding -10. `cbr.py` + reader uitbreiding -11. `routers/backup.py` -12. `routers/library.py` uitbreiden voor pdf/cbr import diff --git a/docs/TECHNICAL.md b/docs/TECHNICAL.md index 6e09903..6281a15 100644 --- a/docs/TECHNICAL.md +++ b/docs/TECHNICAL.md @@ -1,100 +1,168 @@ -# Novela 2.0 - Technical Plan +# Novela 2.0 - Technical Status (Develop) ## Scope -Dit document beschrijft de technische uitvoering van de blauwdruk in implementeerbare stappen. +Dit document beschrijft de actuele technische status van de `develop` codebase. +Dit document is de primaire technische documentatie voor de huidige codebase. -## Architectural Rules -- Bestand is source of truth. -- Database is snelle index. -- Schrijfacties: eerst bestand, dan DB. -- Lezen: primair uit DB, met scan/rescan voor recovery. - -## Data Integrity Rules -- Alle child-tabellen refereren `library(filename)` met `ON DELETE CASCADE`. -- Verwijderen van een boek is een enkel `DELETE FROM library` na file-delete. -- Rename-flow moet `filename` synchroon aanpassen in: - - `library` - - `book_tags` - - `reading_progress` - - `reading_sessions` - - `library_cover_cache` - -## Runtime Lifecycle -- Startup: +## Architecture +- Stack: FastAPI, Jinja2 templates, plain JS, PostgreSQL 16, Docker. +- Startup lifecycle (`main.py`): 1. `init_pool()` 2. `run_migrations()` - 3. routers mounten -- Shutdown: - 1. `close_pool()` + 3. `start_backup_scheduler()` + 4. routers mounten +- Shutdown lifecycle: + 1. `stop_backup_scheduler()` + 2. `close_pool()` +- Source-of-truth regel: bestand op schijf leidend, database als index/cache. -## Module Responsibilities -- `db.py`: pool ownership + connection helpers. -- `migrations.py`: schema + seeds. -- `routers/library.py`: import/scan/delete/cover/home/stats. -- `routers/reader.py`: lezen + progress + metadata patch + epub editor endpoints. -- `routers/editor.py`: uiteindelijke dedicated editor routes (kan initieel delegaten). -- `routers/grabber.py`: scraper orchestration + credentials + SSE. -- `routers/backup.py`: Dropbox sync + pg dump + logging. -- `routers/settings.py`: break patterns + cleaning endpoints. +## Router Status -## Endpoint Contract Notes -- Alle file routes gebruiken veilige path-resolutie tegen traversal. -- Cover endpoint gedrag: - - cached eerst - - fallback raw extract - - anders 404 -- Progress payload: - - EPUB: `{ cfi, progress }` - - PDF/CBR: `{ page, progress }` +### `routers/library.py` +- `GET /library` +- `GET /api/library` +- `POST /library/rescan` +- `POST /library/import` (EPUB/PDF/CBR/CBZ) +- `DELETE /library/file/{filename}` +- `GET /library/cover/{filename}` +- `GET /library/cover-cached/{filename}` +- `POST /library/cover/{filename}` (EPUB) +- `POST /library/want-to-read/{filename}` +- `POST /library/archive/{filename}` +- `POST /library/new/mark-reviewed` (bulk `needs_review=false`) +- `GET /home` +- `GET /api/home` +- `GET /stats` +- `GET /api/stats` +- `GET /library/list` (compat) -## Backup Plan -- `POST /api/backup/run`: - - insert `running` in `backup_log` - - sync files naar Dropbox (incremental op mtime+size) - - draai `pg_dump` en upload `.sql` - - update `backup_log` naar `success`/`error` -- OAuth token opslag via `credentials` (`site='dropbox'`) en encrypted-at-rest (Fernet) in de database. -- Beheer via webinterface op `/credentials-manager` (site: `dropbox`, token in password veld). -- Legacy plaintext credentials worden automatisch gemigreerd naar encrypted bij uitlezen. +`GET /api/library` draait standaard in fast-path (DB-only, geen full disk rescan). +Voor geforceerde sync: `GET /api/library?rescan=true` of `POST /library/rescan`. +`include_file_info=true` is optioneel voor bestandsgrootte/mtime verrijking. -## Migration Plan from Current State -1. Behoud v1 stabiele modules (`epub.py`, `xhtml.py`, scrapers, templates/static). -2. Introduceer nieuwe routers zonder bestaande frontend te breken (compat routes waar nodig). -3. Schakel library root om naar `library/`. -4. Activeer PDF/CBR scan en reader paden. -5. Split editor-routes uit reader naar dedicated `editor.py`. -6. Volledige scrape->epub flow migreren naar `grabber.py`. -7. Backup volledig afronden (Dropbox + pg_dump). +`/api/home` levert: +- `continue_reading` +- `shorts_unread` +- `novels_unread` +- `shorts_read` +- `novels_read` -## Test Matrix -- Import: - - EPUB met/zonder cover - - PDF 1+ pagina - - CBR/CBZ met images -- Reader: - - EPUB CFI save/load - - PDF page render + page progress - - CBR page render + page progress -- Metadata edit: - - rename path - - db references geupdate - - old row cleanup -- Delete: - - file weg - - lege dirs gepruned - - cascade records weg -- Break patterns: - - create/update/delete/enable -- Grabber: - - preload/debug - - convert job events -- Backup: - - status/history - - success/error logging +`/api/stats` levert naast totals ook chart- en history-data voor `stats.html`: +- `reads_by_month`, `reads_by_dow`, `reads_by_hour` +- `genre_counts`, `publisher_counts`, `fav_genre`, `fav_publisher` +- `top_books`, `history` -## Deployment Notes -- Docker image bouwt vanuit `containers/novela`. -- Stack uit `stack/stack.yml` met env uit `stack/novela.env`. -- `NOVELA_MASTER_KEY` is verplicht voor encrypt/decrypt van credentials in de database en moet stabiel blijven na initiele ingebruikname. -- Postgres volume persistent. -- Library mount persistent. +Home-secties filteren series uit met: +- `COALESCE(series, '') = ''` +- `filename NOT LIKE '%/Series/%'` + +Read-secties op Home zijn gesorteerd op oudste eerst: +- `shorts_read`: `ORDER BY MAX(read_at) ASC` +- `novels_read`: `ORDER BY MAX(read_at) ASC` + +### `routers/reader.py` +- Epub serving/chapters/images +- Reader pagina + book detail +- Metadata patch (`PATCH /library/book/{filename}`) +- Progress read/write/delete +- Mark-as-read +- PDF render endpoint +- CBR/CBZ page endpoint +- Genres endpoint + +### `routers/editor.py` +- Editor pagina +- Chapter get/save +- Chapter add +- Chapter delete + +### `routers/grabber.py` +- Grabber pagina + convert/debug flows +- SSE events +- Credentials beheer voor scraper-sites +- Credentials manager UI (`/credentials-manager`) + +### `routers/backup.py` +- `GET /backup` +- `GET/POST/DELETE /api/backup/credentials` +- `GET /api/backup/health` +- `GET /api/backup/status` +- `GET /api/backup/history` +- `POST /api/backup/run` + +## Backup & Security +- Dropbox token encrypted-at-rest in `credentials` (`site='dropbox'`). +- Dropbox backup root encrypted opgeslagen in `credentials` (`site='dropbox_backup_root'`). +- Retentie (`snapshots to keep`) encrypted opgeslagen in `credentials` (`site='dropbox_backup_retention'`). +- Backup schedule (`enabled` + `interval_hours`) encrypted opgeslagen in `credentials` (`site='dropbox_backup_schedule'`). +- Encryptie via `NOVELA_MASTER_KEY` (Fernet). + +Implementatie: +- Versie-gebaseerde backups met deduplicatie: + - bestandsobjecten in Dropbox: `library_objects/{sha256_prefix}/{sha256}` + - snapshots in Dropbox: `library_snapshots/snapshot-YYYYMMDD-HHMMSS.json` +- Elke run maakt een nieuwe snapshot (versie) en uploadt alleen ontbrekende objecten. +- Retentie verwijdert oude snapshots boven de ingestelde limiet. +- Orphan object-prune verwijdert objecten die niet meer door retained snapshots worden gerefereerd. +- Lokale manifestcache (`config/backup_manifest.json`) versnelt changeddetectie. +- Database backup via `pg_dump` naar Dropbox `postgres/`. +- `POST /api/backup/run` start altijd als background task en geeft direct status terug. +- Scheduler draait in achtergrond (`start_backup_scheduler`) en triggert op interval als backup aanstaat. +- Concurrentierestrictie: slechts 1 backup tegelijk. +- Bij container restart/crash worden stale `running` logs automatisch gemarkeerd als interrupted/error. + +## Environment +`stack/novela.env` bevat nu minimaal: +- `POSTGRES_DB` +- `POSTGRES_USER` +- `POSTGRES_PASSWORD` +- `NOVELA_MASTER_KEY` +- `CONFIG_DIR` + +Dropbox settings verlopen via webinterface op `/backup`. + +## UI Notes +- Library import accepteert: EPUB/PDF/CBR/CBZ. +- Home heeft dezelfde importmogelijkheden. +- Home heeft zoekfunctionaliteit. +- Home header/dropzone uitlijning gelijkgetrokken met Library (zoek rechtsboven, dropzone eronder). +- `New` view ondersteunt `Grid` en `List` mode. +- Bulk selectie + `Remove from New` werkt alleen in `List` mode. +- `List` mode heeft kolomfilter (aan/uit) met kolommen: + - Publisher + - Author + - Series + - Volume + - Title + - Has cover + - Updated + - Genres + - Sub-genres + - Tags + - Status +- `List` mode ondersteunt multi-select met `Shift+klik` range-select op checkboxes. +- `Grid` mode toont geen selectie-checkboxes of bulkacties. +- Backup pagina ondersteunt: + - handmatige run en dry-run + - instellingen voor Dropbox root + - retentie-aantal snapshots + - geplande backup (aan/uit + interval in uren) + - status + history overzicht + +## Known Conventions +- Verwijderen boek: bestand verwijderen, lege mappen prunen, daarna `DELETE FROM library` (cascade op child-tabellen). +- Cover strategie: + - EPUB: cover uit bestand + cache + - PDF/CBR: thumbnail via cover cache + + +## Performance Notes +- Library-load geoptimaliseerd voor grote datasets: + - `list_library_json()` gebruikt pre-aggregatie voor `reading_sessions`. + - `has_cached_cover` komt direct uit SQL join i.p.v. losse volledige cache-fetch. +- Nieuwe migrations-indexen: + - `idx_library_sort_coalesce` + - `idx_library_needs_review` + - `idx_library_archived` + - `idx_reading_sessions_filename_readat` + - `idx_book_tags_filename_tag` diff --git a/docs/changelog-develop.md b/docs/changelog-develop.md new file mode 100644 index 0000000..08680a0 --- /dev/null +++ b/docs/changelog-develop.md @@ -0,0 +1,47 @@ +# Changelog Develop + +Dit bestand houdt wijzigingen op de `develop` lijn bij. +`changelog.md` wordt later gebruikt voor release-samenvattingen. + +## 2026-03-22 +- Blueprint en technische documentatie toegevoegd in `docs/`. +- Router-splitsing en bootstrapstructuur afgerond (`main.py`, routers, migrations, db pool). +- Media support uitgebreid naar EPUB/PDF/CBR/CBZ in import- en scanflow. +- Home UI uitgebreid met: + - import-dropzone voor EPUB/PDF/CBR/CBZ + - zoekfunctie + - uitlijning gelijk aan Library (zoek rechtsboven, dropzone eronder) +- Library UI importteksten en drag/drop filtering bijgewerkt voor multi-format. +- Library `New` view uitgebreid: + - `Grid`/`List` toggle + - kolomfilter in `List` + - multi-select + bulk `Remove from New` + - selectie alleen in `List` mode + - `Shift+klik` range-select op checkboxes +- Nieuwe route toegevoegd: `POST /library/new/mark-reviewed` (bulk `needs_review=false`). +- Library performance verbeterd: + - `/api/library` fast-path (geen full rescan per page-load) + - optionele `rescan=true`/`include_file_info=true` + - SQL-optimalisatie in `list_library_json()` + - extra DB-indexen voor schaal +- `/api/home` hersteld naar volledige dataset-output: + - `continue_reading` + - `shorts_unread` + - `novels_unread` + - `shorts_read` + - `novels_read` +- Home-sectiefilters expliciet zonder serieboeken gezet. +- Home read-volgorde gecorrigeerd: in `shorts_read` en `novels_read` staat de oudste bovenaan (`ORDER BY MAX(read_at) ASC`). +- Statistics pagina hersteld: `/api/stats` levert weer volledige payload voor charts, favorieten, topboeken en reading history. +- Backup verbeterd: + - Dropbox token encrypted opgeslagen in DB + - Dropbox backup root instelbaar via webinterface en encrypted in DB + - versie-gebaseerde snapshots + object-store deduplicatie in Dropbox (`library_snapshots` / `library_objects`) + - instelbare snapshot-retentie (`snapshots to keep`) via backup settings + - object prune op basis van retained snapshots + - geplande backup (enable + interval in uren) + - backup runs als background process zodat navigeren op site door kan lopen + - herstel op stale running state na restart/crash (oude running logs markeren als interrupted/error) + - dry-run ondersteuning op nieuwe flow +- Docker image aangepast met `postgresql-client` voor `pg_dump`. +- Meerdere test builds uitgevoerd en gepusht naar `gitea.oskamp.info/ivooskamp/novela:dev`. diff --git a/stack/novela.env b/stack/novela.env index e35d906..ccf0412 100644 --- a/stack/novela.env +++ b/stack/novela.env @@ -6,8 +6,5 @@ POSTGRES_PASSWORD=change-me # Keep this stable after first use; changing it breaks decrypt of existing credentials. NOVELA_MASTER_KEY=change-me-long-random-secret -# Dropbox root-map voor backup uploads (default: /novela) -DROPBOX_BACKUP_ROOT=/novela - # Map voor backup manifest/config binnen container (default: config) CONFIG_DIR=config