diff --git a/containers/novela/epub.py b/containers/novela/epub.py index 986d430..59c9a96 100644 --- a/containers/novela/epub.py +++ b/containers/novela/epub.py @@ -21,48 +21,125 @@ def detect_image_format(data: bytes, base: str) -> tuple[str, str]: def add_cover_to_epub(epub_path, cover_data: bytes) -> None: - """Add a cover image to an existing EPUB and remove the Cover Missing tag.""" + """Replace (or add) the cover image in an existing EPUB.""" cover_filename, cover_media_type = detect_image_format(cover_data, "cover") - # Read existing zip into memory with open(epub_path, "rb") as f: original = f.read() + with zipfile.ZipFile(io.BytesIO(original), "r") as zin: + names = zin.namelist() + + # Locate the OPF via META-INF/container.xml + opf_path = "OEBPS/content.opf" + try: + container = zin.read("META-INF/container.xml").decode("utf-8", errors="replace") + m = re.search(r'full-path\s*=\s*["\']([^"\']+)["\']', container) + if m: + opf_path = m.group(1) + except Exception: + pass + opf_dir = opf_path.rsplit("/", 1)[0] if "/" in opf_path else "" + + # Parse OPF to find the existing cover image path + old_cover_zip_path: str | None = None + try: + opf_text = zin.read(opf_path).decode("utf-8", errors="replace") + # Find item with id="cover*" that is an image + for m in re.finditer( + r']+id=["\']cover[^"\']*["\'][^>]*/?>', + opf_text, + ): + href_m = re.search(r'href=["\']([^"\']+)["\']', m.group(0)) + if href_m: + href = href_m.group(1) + zip_path = (opf_dir + "/" + href).lstrip("/") if opf_dir else href + # Normalise ../ segments + parts, resolved = zip_path.split("/"), [] + for p in parts: + if p == ".." and resolved: + resolved.pop() + else: + resolved.append(p) + old_cover_zip_path = "/".join(resolved) + break + except Exception: + pass + + # Decide where to write the new cover (same folder as old, or Images/ next to OPF) + if old_cover_zip_path: + cover_dir = old_cover_zip_path.rsplit("/", 1)[0] if "/" in old_cover_zip_path else "" + else: + cover_dir = (opf_dir + "/Images").lstrip("/") if opf_dir else "OEBPS/Images" + new_cover_zip_path = (cover_dir + "/" + cover_filename).lstrip("/") + + # Rebuild the ZIP buf = io.BytesIO() with zipfile.ZipFile(io.BytesIO(original), "r") as zin, \ zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zout: - # Copy mimetype uncompressed first - info = zin.getinfo("mimetype") zout.writestr(zipfile.ZipInfo("mimetype"), zin.read("mimetype"), compress_type=zipfile.ZIP_STORED) for item in zin.infolist(): if item.filename == "mimetype": continue + # Drop the old cover image (will be replaced below) + if old_cover_zip_path and item.filename == old_cover_zip_path: + continue data = zin.read(item.filename) - - if item.filename == "OEBPS/content.opf": - data = _patch_opf(data.decode("utf-8"), cover_filename, cover_media_type).encode("utf-8") - + if item.filename == opf_path: + data = _patch_opf( + data.decode("utf-8"), + cover_filename, + cover_media_type, + old_cover_zip_path, + opf_dir, + ).encode("utf-8") zout.writestr(item, data) - # Add the cover image - zout.writestr(f"OEBPS/Images/{cover_filename}", cover_data) + # Write the new cover image + zout.writestr(new_cover_zip_path, cover_data) with open(epub_path, "wb") as f: f.write(buf.getvalue()) -def _patch_opf(opf: str, cover_filename: str, cover_media_type: str) -> str: - """Insert cover into OPF manifest/metadata and remove Cover Missing dc:subject.""" +def _patch_opf( + opf: str, + cover_filename: str, + cover_media_type: str, + old_cover_zip_path: str | None, + opf_dir: str, +) -> str: + """Replace or insert the cover manifest item and cover meta in an OPF.""" # Remove "Cover Missing" dc:subject opf = re.sub(r'\s*Cover Missing', '', opf) - # Add cover manifest item before - cover_item = f'' + # Remove existing cover manifest item(s) with id starting with "cover" + opf = re.sub(r'\s*]+id=["\']cover[^"\']*["\'][^>]*/>', '', opf) + opf = re.sub(r'\s*]+id=["\']cover[^"\']*["\'][^>]*>', '', opf) + # Remove existing + opf = re.sub(r'\s*]+name=["\']cover["\'][^>]*/>', '', opf) + + # Compute relative href from OPF dir to the new cover + # new cover is placed in the same folder as the old one, relative to OPF + cover_href = cover_filename # same dir as OPF → just the filename + if old_cover_zip_path: + old_dir = old_cover_zip_path.rsplit("/", 1)[0] if "/" in old_cover_zip_path else "" + if old_dir != opf_dir: + # Make relative: e.g. opf_dir=EPUB, old_dir=EPUB/images → href=images/cover.jpg + if opf_dir and old_dir.startswith(opf_dir + "/"): + cover_href = old_dir[len(opf_dir) + 1:] + "/" + cover_filename + else: + cover_href = cover_filename + else: + cover_href = cover_filename + else: + cover_href = "Images/" + cover_filename + + cover_item = f'' opf = opf.replace("", f' {cover_item}\n ') - # Add cover meta before cover_meta = '' opf = opf.replace("", f' {cover_meta}\n ') diff --git a/containers/novela/migrate_paths.py b/containers/novela/migrate_paths.py new file mode 100644 index 0000000..04fc322 --- /dev/null +++ b/containers/novela/migrate_paths.py @@ -0,0 +1,259 @@ +""" +One-time migration: move all library files to the correct path structure +and update all database references. + +Target structure: + epub/{publisher}/{author}/Stories/{title}.epub + epub/{publisher}/{author}/Series/{series}/{idx:03d} - {title}.epub + pdf/{publisher}/{author}/{title}.pdf + comics/{publisher}/{author}/{title}.cbr|cbz + +Run inside the novela container: + python migrate_paths.py [--execute] + Without --execute: dry-run only (no files moved, no DB changes). +""" + +import os +import re +import sys +from pathlib import Path + +import psycopg2 + +LIBRARY_DIR = Path("library") +LIBRARY_ROOT = LIBRARY_DIR.resolve() +DRY_RUN = "--execute" not in sys.argv + + +# --------------------------------------------------------------------------- +# Path helpers (mirrors common.py / reader.py logic) +# --------------------------------------------------------------------------- + +def _clean(value: str, fallback: str, max_len: int) -> str: + txt = re.sub(r"\s+", " ", (value or "").strip()) + txt = re.sub(r'[<>:"/\\|?*\x00-\x1f]', "", txt) + txt = re.sub(r"\.+$", "", txt).strip() + if not txt: + txt = fallback + return txt[:max_len] + + +def _coerce_index(value) -> int: + try: + return max(1, min(999, int(value or 1))) + except Exception: + return 1 + + +def correct_rel_path(filename: str, title: str, author: str, publisher: str, + series: str, series_index: int) -> Path: + """Compute the correct relative path for a book based on current metadata.""" + ext = Path(filename).suffix.lower() + + pub = _clean(publisher, "Unknown Publisher", 80) + auth = _clean(author, "Unknown Author", 80) + ttl = _clean(title or Path(filename).stem, "Untitled", 140) + + if ext == ".epub": + series_name = _clean(series or "", "", 80) + if series_name: + idx = _coerce_index(series_index) + return Path("epub") / pub / auth / "Series" / series_name / f"{idx:03d} - {ttl}.epub" + return Path("epub") / pub / auth / "Stories" / f"{ttl}.epub" + + if ext == ".pdf": + return Path("pdf") / pub / auth / f"{ttl}.pdf" + + # .cbr / .cbz + comics_ext = ext if ext in {".cbr", ".cbz"} else ".cbr" + return Path("comics") / pub / auth / f"{ttl}{comics_ext}" + + +def ensure_unique(rel_path: Path, exclude_current: Path) -> Path: + """Add (2), (3), … suffix if target already exists (and isn't the current file).""" + candidate = rel_path + counter = 2 + while True: + full = (LIBRARY_DIR / candidate).resolve() + if full == exclude_current.resolve(): + return candidate + if not full.exists(): + return candidate + candidate = rel_path.with_name( + f"{rel_path.stem} ({counter}){rel_path.suffix}" + ) + counter += 1 + + +def prune_empty_dirs(start: Path) -> None: + cur = start.resolve() + while cur != LIBRARY_ROOT: + try: + cur.rmdir() + except OSError: + return + cur = cur.parent + + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + +def main(): + db_url = ( + f"host=novela-db " + f"dbname={os.environ['POSTGRES_DB']} " + f"user={os.environ['POSTGRES_USER']} " + f"password={os.environ['POSTGRES_PASSWORD']}" + ) + conn = psycopg2.connect(db_url) + + with conn.cursor() as cur: + cur.execute(""" + SELECT filename, title, author, publisher, series, series_index + FROM library + ORDER BY filename + """) + books = cur.fetchall() + + print(f"Total books in DB: {len(books)}") + print(f"Mode: {'DRY RUN' if DRY_RUN else '*** EXECUTE ***'}") + print() + + moves = [] + skipped_missing = [] + skipped_same = [] + conflicts = [] + + for (filename, title, author, publisher, series, series_index) in books: + old_path = (LIBRARY_DIR / filename).resolve() + + new_rel = correct_rel_path(filename, title or "", author or "", + publisher or "", series or "", series_index or 0) + new_rel = ensure_unique(new_rel, old_path) + new_path = (LIBRARY_DIR / new_rel).resolve() + + if not old_path.exists(): + skipped_missing.append(filename) + continue + + if old_path == new_path: + skipped_same.append(filename) + continue + + # Sanity: target already exists and is a different file + if new_path.exists() and new_path != old_path: + conflicts.append((filename, new_rel.as_posix())) + continue + + moves.append((filename, old_path, new_rel.as_posix(), new_path)) + + # Report + print(f"Already correct: {len(skipped_same)}") + print(f"File missing: {len(skipped_missing)}") + print(f"Conflicts: {len(conflicts)}") + print(f"To move: {len(moves)}") + print() + + if skipped_missing: + print("=== MISSING FILES (skipped) ===") + for f in skipped_missing: + print(f" {f}") + print() + + if conflicts: + print("=== CONFLICTS (skipped) ===") + for old, new in conflicts: + print(f" {old}") + print(f" → {new} (target exists!)") + print() + + if not moves: + print("Nothing to do.") + conn.close() + return + + print("=== MOVES ===") + for old_fn, old_path, new_fn, new_path in moves: + print(f" {old_fn}") + print(f" → {new_fn}") + print() + + if DRY_RUN: + print("Dry run complete. Run with --execute to apply changes.") + conn.close() + return + + # Execute + print("Applying changes...") + moved = 0 + errors = [] + prunable = set() + + for old_fn, old_path, new_fn, new_path in moves: + try: + # Move file + new_path.parent.mkdir(parents=True, exist_ok=True) + old_path.rename(new_path) + prunable.add(old_path.parent) + + # Update DB in a transaction + with conn: + with conn.cursor() as cur: + # Copy library row with new filename + cur.execute(""" + INSERT INTO library ( + filename, title, author, publisher, has_cover, media_type, + series, series_index, publication_status, want_to_read, + source_url, archived, needs_review, updated_at, + publish_date, description, rating + ) + SELECT %s, title, author, publisher, has_cover, media_type, + series, series_index, publication_status, want_to_read, + source_url, archived, needs_review, updated_at, + publish_date, description, rating + FROM library WHERE filename = %s + """, (new_fn, old_fn)) + + # Update child tables + for table in ("book_tags", "reading_progress", + "reading_sessions", "library_cover_cache"): + cur.execute( + f"UPDATE {table} SET filename = %s WHERE filename = %s", + (new_fn, old_fn) + ) + + # Delete old library row (cascade removes any remaining child rows) + cur.execute("DELETE FROM library WHERE filename = %s", (old_fn,)) + + moved += 1 + print(f" [{moved}/{len(moves)}] {old_fn} → {new_fn}") + + except Exception as e: + errors.append((old_fn, str(e))) + # Try to move file back if DB failed + if new_path.exists() and not old_path.exists(): + try: + old_path.parent.mkdir(parents=True, exist_ok=True) + new_path.rename(old_path) + except Exception: + pass + print(f" ERROR: {old_fn}: {e}") + + # Prune empty directories + print("\nPruning empty directories...") + for d in prunable: + prune_empty_dirs(d) + + print() + print(f"Done. Moved: {moved}, Errors: {len(errors)}, Skipped (conflict): {len(conflicts)}, Missing: {len(skipped_missing)}") + if errors: + print("\nErrors:") + for fn, err in errors: + print(f" {fn}: {err}") + + conn.close() + + +if __name__ == "__main__": + main() diff --git a/containers/novela/migrations.py b/containers/novela/migrations.py index b716753..7a95446 100644 --- a/containers/novela/migrations.py +++ b/containers/novela/migrations.py @@ -197,6 +197,23 @@ def migrate_add_rating() -> None: _exec("ALTER TABLE library ADD COLUMN IF NOT EXISTS rating SMALLINT NOT NULL DEFAULT 0") +def migrate_create_bookmarks() -> None: + _exec( + """ + CREATE TABLE IF NOT EXISTS bookmarks ( + id SERIAL PRIMARY KEY, + filename VARCHAR(600) NOT NULL REFERENCES library(filename) ON DELETE CASCADE, + chapter_index INTEGER NOT NULL DEFAULT 0, + scroll_frac REAL NOT NULL DEFAULT 0, + chapter_title VARCHAR(500) NOT NULL DEFAULT '', + note TEXT NOT NULL DEFAULT '', + created_at TIMESTAMPTZ DEFAULT NOW() + ) + """ + ) + _exec("CREATE INDEX IF NOT EXISTS idx_bookmarks_filename ON bookmarks (filename)") + + def migrate_remove_cover_missing_tag() -> None: _exec("DELETE FROM book_tags WHERE tag = 'Cover Missing' AND tag_type = 'tag'") @@ -235,6 +252,15 @@ def migrate_create_perf_indexes() -> None: ) +def migrate_series_suffix() -> None: + _exec( + """ + ALTER TABLE library + ADD COLUMN IF NOT EXISTS series_suffix VARCHAR(10) NOT NULL DEFAULT '' + """ + ) + + def run_migrations() -> None: migrate_create_library() migrate_create_book_tags() @@ -248,3 +274,5 @@ def run_migrations() -> None: migrate_seed_break_patterns() migrate_add_rating() migrate_remove_cover_missing_tag() + migrate_create_bookmarks() + migrate_series_suffix() diff --git a/containers/novela/recover_decock049.py b/containers/novela/recover_decock049.py new file mode 100644 index 0000000..9479f35 --- /dev/null +++ b/containers/novela/recover_decock049.py @@ -0,0 +1,256 @@ +""" +One-time recovery: retrieve 049 - De Cock en het lijk op drift.epub from +Dropbox backup, place it at the correct library path, and re-insert the DB row. + +Run inside the novela container: + python recover_decock049.py [--execute] + Without --execute: dry-run only (shows what would be restored). +""" + +import json +import os +import sys +from pathlib import Path + +import dropbox +import psycopg2 +from security import decrypt_value + +DRY_RUN = "--execute" not in sys.argv +LIBRARY_DIR = Path("library") + +TARGET_REL = "epub/Unknown Publisher/A.C. Baantjer/Series/De Cock (Series)/049 - De Cock en het lijk op drift.epub" +SEARCH_KEYWORDS = ["de cock", "049", "lijk op drift"] + + +def _db_conn(): + return psycopg2.connect( + f"host=novela-db " + f"dbname={os.environ['POSTGRES_DB']} " + f"user={os.environ['POSTGRES_USER']} " + f"password={os.environ['POSTGRES_PASSWORD']}" + ) + + +def _load_dropbox_token(conn) -> str: + with conn.cursor() as cur: + cur.execute( + "SELECT username, password FROM credentials WHERE site = 'dropbox' LIMIT 1" + ) + row = cur.fetchone() + if not row: + raise RuntimeError("No Dropbox token in credentials table.") + username_raw, password_raw = row + username = decrypt_value(username_raw) + password = decrypt_value(password_raw) + token = (password or username or "").strip() + if not token: + raise RuntimeError("Dropbox token is empty.") + return token + + +def _load_dropbox_root(conn) -> str: + with conn.cursor() as cur: + cur.execute( + "SELECT username, password FROM credentials WHERE site = 'dropbox_backup_root' LIMIT 1" + ) + row = cur.fetchone() + if not row: + return "/novela" + _, password_raw = row + root = decrypt_value(password_raw).strip() or "/novela" + if not root.startswith("/"): + root = "/" + root + return root + + +def _dropbox_join(root: str, *parts: str) -> str: + segs = [p.strip("/") for p in parts if p and p.strip("/")] + base = root.rstrip("/") + return base + "/" + "/".join(segs) if segs else base + + +def _list_snapshots(client, snapshots_root: str) -> list[str]: + paths = [] + try: + res = client.files_list_folder(snapshots_root, recursive=False) + except Exception as e: + raise RuntimeError(f"Cannot list snapshots folder '{snapshots_root}': {e}") + while True: + for entry in res.entries: + if isinstance(entry, dropbox.files.FileMetadata): + if entry.name.endswith(".json"): + paths.append(entry.path_display) + if not res.has_more: + break + res = client.files_list_folder_continue(res.cursor) + return sorted(paths, reverse=True) # newest first + + +def _load_snapshot(client, path: str) -> dict: + _meta, resp = client.files_download(path) + return json.loads(resp.content.decode("utf-8", errors="replace")) + + +def _find_file_in_snapshot(snap: dict) -> tuple[str, str] | None: + """Return (rel_path, sha256) for De Cock 049, or None.""" + files = snap.get("files", {}) + for rel, info in files.items(): + rel_lower = rel.lower() + if all(kw in rel_lower for kw in SEARCH_KEYWORDS): + sha256 = info.get("sha256", "") + return rel, sha256 + return None + + +def _download_object(client, objects_root: str, sha256: str) -> bytes: + obj_path = _dropbox_join(objects_root, sha256[:2], sha256) + print(f" Downloading object: {obj_path}") + _meta, resp = client.files_download(obj_path) + return resp.content + + +def _insert_db_row(conn, filename: str, snap_entry: dict, orig_filename: str) -> None: + """Copy library row from orig_filename if it exists, else insert minimal row.""" + with conn.cursor() as cur: + # Check if orig row exists in DB + cur.execute("SELECT * FROM library WHERE filename = %s LIMIT 1", (orig_filename,)) + orig = cur.fetchone() + + if orig: + cols = [desc.name for desc in conn.cursor().description] if False else None + # Fetch column names separately + with conn.cursor() as cur2: + cur2.execute( + "SELECT column_name FROM information_schema.columns " + "WHERE table_name='library' ORDER BY ordinal_position" + ) + cols = [r[0] for r in cur2.fetchall()] + + with conn.cursor() as cur3: + cur3.execute( + f"SELECT {', '.join(cols)} FROM library WHERE filename = %s LIMIT 1", + (orig_filename,), + ) + row = cur3.fetchone() + if row: + data = dict(zip(cols, row)) + data["filename"] = filename + col_list = ", ".join(data.keys()) + placeholders = ", ".join(["%s"] * len(data)) + cur3.execute( + f"INSERT INTO library ({col_list}) VALUES ({placeholders}) " + f"ON CONFLICT (filename) DO NOTHING", + list(data.values()), + ) + print(f" DB row copied from '{orig_filename}' → '{filename}'") + return + + # No orig row: insert minimal + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO library (filename, title, author, publisher, series, series_index, + media_type, has_cover, needs_review) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) + ON CONFLICT (filename) DO NOTHING + """, + ( + filename, + "De Cock en het lijk op drift", + "A.C. Baantjer", + "Unknown Publisher", + "De Cock (Series)", + 49, + "epub", + False, + True, + ), + ) + print(f" DB row inserted (minimal) for '{filename}'") + + +def main(): + print(f"Mode: {'DRY RUN' if DRY_RUN else '*** EXECUTE ***'}") + print() + + conn = _db_conn() + + token = _load_dropbox_token(conn) + dropbox_root = _load_dropbox_root(conn) + print(f"Dropbox root: {dropbox_root}") + + client = dropbox.Dropbox(token, timeout=120) + try: + acct = client.users_get_current_account() + print(f"Dropbox account: {acct.email}") + except Exception as e: + print(f"ERROR: Dropbox auth failed: {e}") + conn.close() + return + + objects_root = _dropbox_join(dropbox_root, "library_objects") + snapshots_root = _dropbox_join(dropbox_root, "library_snapshots") + + print(f"\nListing snapshots in: {snapshots_root}") + snapshots = _list_snapshots(client, snapshots_root) + print(f"Found {len(snapshots)} snapshots.") + for s in snapshots: + print(f" {s}") + + found_rel = None + found_sha256 = None + found_snapshot = None + + for snap_path in snapshots: + print(f"\nSearching snapshot: {snap_path}") + snap = _load_snapshot(client, snap_path) + result = _find_file_in_snapshot(snap) + if result: + found_rel, found_sha256 = result + found_snapshot = snap_path + print(f" FOUND: {found_rel}") + print(f" sha256: {found_sha256}") + break + else: + print(" Not found in this snapshot.") + + if not found_rel: + print("\nERROR: File not found in any snapshot. Cannot recover.") + conn.close() + return + + target_path = LIBRARY_DIR / TARGET_REL + print(f"\nTarget path: {target_path}") + + if target_path.exists(): + print("File already exists at target path. Nothing to do.") + conn.close() + return + + if DRY_RUN: + print(f"\nDry run: would download sha256={found_sha256}") + print(f" and write to: {target_path}") + print("\nRun with --execute to apply.") + conn.close() + return + + # Download + data = _download_object(client, objects_root, found_sha256) + print(f" Downloaded {len(data):,} bytes.") + + # Write file + target_path.parent.mkdir(parents=True, exist_ok=True) + target_path.write_bytes(data) + print(f" Written to: {target_path}") + + # DB + with conn: + _insert_db_row(conn, TARGET_REL, {}, found_rel) + + print(f"\nDone. File recovered to: {target_path}") + conn.close() + + +if __name__ == "__main__": + main() diff --git a/containers/novela/routers/backup.py b/containers/novela/routers/backup.py index 0ce7ee9..9b218a2 100644 --- a/containers/novela/routers/backup.py +++ b/containers/novela/routers/backup.py @@ -7,8 +7,10 @@ import subprocess from datetime import datetime, timezone from pathlib import Path from tempfile import NamedTemporaryFile +from urllib.parse import urlencode import dropbox +import httpx from dropbox.exceptions import ApiError, AuthError from fastapi import APIRouter, Request from fastapi.responses import HTMLResponse @@ -31,6 +33,7 @@ DEFAULT_SCHEDULE_INTERVAL_HOURS = 24 BACKUP_TASKS: dict[int, asyncio.Task] = {} +BACKUP_PROGRESS: dict[int, dict] = {} # log_id → {done, total, phase} SCHEDULER_TASK: asyncio.Task | None = None @@ -95,6 +98,66 @@ def _load_dropbox_token() -> str: return _dropbox_credential_details().get("token", "") +def _load_dropbox_app_key() -> str: + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + "SELECT password FROM credentials WHERE site = 'dropbox_app_key' LIMIT 1" + ) + row = cur.fetchone() + if not row: + return "" + return decrypt_value(row[0]).strip() + + +def _load_dropbox_app_secret() -> str: + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + "SELECT password FROM credentials WHERE site = 'dropbox_app_secret' LIMIT 1" + ) + row = cur.fetchone() + if not row: + return "" + return decrypt_value(row[0]).strip() + + +def _save_dropbox_app_key(app_key: str) -> None: + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO credentials (site, username, password, updated_at) + VALUES ('dropbox_app_key', %s, %s, NOW()) + ON CONFLICT (site) DO UPDATE + SET username = EXCLUDED.username, + password = EXCLUDED.password, + updated_at = NOW() + """, + (encrypt_value(""), encrypt_value(app_key.strip())), + ) + + +def _save_dropbox_app_secret(app_secret: str) -> None: + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO credentials (site, username, password, updated_at) + VALUES ('dropbox_app_secret', %s, %s, NOW()) + ON CONFLICT (site) DO UPDATE + SET username = EXCLUDED.username, + password = EXCLUDED.password, + updated_at = NOW() + """, + (encrypt_value(""), encrypt_value(app_secret.strip())), + ) + + def _normalize_dropbox_root(value: str | None) -> str: root = (value or "").strip() or DEFAULT_DROPBOX_ROOT if not root.startswith("/"): @@ -325,14 +388,36 @@ def _save_dropbox_retention_count(retention_count: int) -> None: def _dbx() -> dropbox.Dropbox: + """ + Maak een Dropbox client aan. + + Voorkeursvolgorde: + 1. App key + app secret + refresh token -> automatische token refresh + 2. Legacy access token (achterwaartse compatibiliteit) + """ token = _load_dropbox_token() if not token: raise RuntimeError("Dropbox token not found in credentials (site='dropbox').") - client = dropbox.Dropbox(token, timeout=120) + + app_key = _load_dropbox_app_key() + app_secret = _load_dropbox_app_secret() + try: + if app_key and app_secret: + client = dropbox.Dropbox( + oauth2_refresh_token=token, + app_key=app_key, + app_secret=app_secret, + timeout=120, + ) + else: + # Fallback: legacy access token + client = dropbox.Dropbox(token, timeout=120) + client.users_get_current_account() except AuthError as e: raise RuntimeError(f"Dropbox auth failed: {e}") + return client @@ -586,10 +671,17 @@ def _prune_orphan_objects(client: dropbox.Dropbox, objects_root: str, referenced return _dropbox_delete_paths(client, to_delete) -def _run_backup_internal(*, dry_run: bool) -> tuple[int, int]: +def _run_backup_internal(*, dry_run: bool, progress_key: int | None = None) -> tuple[int, int]: + def _prog(done: int, total: int, phase: str) -> None: + if progress_key is not None: + BACKUP_PROGRESS[progress_key] = {"done": done, "total": total, "phase": phase} + client = None if dry_run else _dbx() manifest = _load_manifest() files = _iter_library_files() + total_files = len(files) + + _prog(0, total_files, "scanning") uploaded_count = 0 uploaded_size = 0 @@ -607,7 +699,8 @@ def _run_backup_internal(*, dry_run: bool) -> tuple[int, int]: snapshot_files: dict[str, dict[str, float | int | str]] = {} - for path in files: + for idx, path in enumerate(files): + _prog(idx, total_files, "uploading") rel = path.relative_to(LIBRARY_DIR).as_posix() state = _current_file_state(path) prev = manifest.get(rel, {}) if isinstance(manifest.get(rel), dict) else {} @@ -639,6 +732,8 @@ def _run_backup_internal(*, dry_run: bool) -> tuple[int, int]: uploaded_size += int(state["size"]) uploaded_count += 1 + _prog(total_files, total_files, "snapshot") + snapshot = { "created_at": _now_iso(), "retention_count": retention_count, @@ -661,6 +756,8 @@ def _run_backup_internal(*, dry_run: bool) -> tuple[int, int]: uploaded_size += len(snapshot_data) uploaded_count += 1 + _prog(total_files, total_files, "pg_dump") + dump_data, dump_name = _run_pg_dump() dump_target = _dropbox_join(dropbox_root, "postgres", dump_name) if client is not None: @@ -691,10 +788,15 @@ async def backup_dropbox_credentials(): preview = "" if token: preview = f"{token[:4]}...{token[-4:]}" if len(token) >= 10 else "(configured)" + + app_key = _load_dropbox_app_key() + app_secret = _load_dropbox_app_secret() + return { "configured": bool(token), "token_preview": preview, "updated_at": details.get("updated_at"), + "app_key_configured": bool(app_key and app_secret), "dropbox_root": root_details.get("root", DEFAULT_DROPBOX_ROOT), "root_updated_at": root_details.get("updated_at"), "retention_count": int(retention_details.get("retention_count", DEFAULT_RETENTION_COUNT)), @@ -719,6 +821,9 @@ async def backup_dropbox_credentials_save(request: Request): if not token: return {"ok": False, "error": "Dropbox token is required."} + app_key = (body.get("app_key") or "").strip() + app_secret = (body.get("app_secret") or "").strip() + dropbox_root = _normalize_dropbox_root(body.get("dropbox_root") or _load_dropbox_root()) raw_retention = body.get("retention_count", _load_dropbox_retention_count()) try: @@ -748,6 +853,11 @@ async def backup_dropbox_credentials_save(request: Request): (encrypt_value(""), encrypt_value(token)), ) + if app_key: + _save_dropbox_app_key(app_key) + if app_secret: + _save_dropbox_app_secret(app_secret) + _save_dropbox_root(dropbox_root) _save_dropbox_retention_count(retention_count) _save_backup_schedule(schedule_enabled, schedule_interval_hours) @@ -768,7 +878,14 @@ async def backup_dropbox_credentials_delete(): with conn: with conn.cursor() as cur: cur.execute( - "DELETE FROM credentials WHERE site IN ('dropbox', 'dropbox_backup_root', 'dropbox_backup_retention', 'dropbox_backup_schedule')" + """DELETE FROM credentials WHERE site IN ( + 'dropbox', + 'dropbox_app_key', + 'dropbox_app_secret', + 'dropbox_backup_root', + 'dropbox_backup_retention', + 'dropbox_backup_schedule' + )""" ) return {"ok": True} @@ -797,6 +914,8 @@ async def backup_health(): "dropbox_error": dropbox_error, "dropbox_root": dropbox_root, "retention_count": retention_count, + "schedule_enabled": schedule_enabled, + "schedule_interval_hours": schedule_interval_hours, "pg_dump_available": bool(pg_dump_path), "pg_dump_path": pg_dump_path, "library_exists": LIBRARY_DIR.exists(), @@ -917,8 +1036,11 @@ async def stop_backup_scheduler() -> None: async def _run_backup_job(log_id: int, dry_run: bool) -> None: + BACKUP_PROGRESS[log_id] = {"done": 0, "total": 0, "phase": "starting"} try: - files_count, size_bytes = await asyncio.to_thread(_run_backup_internal, dry_run=dry_run) + files_count, size_bytes = await asyncio.to_thread( + _run_backup_internal, dry_run=dry_run, progress_key=log_id + ) _finish_backup_log( log_id, status="success", @@ -936,6 +1058,115 @@ async def _run_backup_job(log_id: int, dry_run: bool) -> None: ) finally: BACKUP_TASKS.pop(log_id, None) + BACKUP_PROGRESS.pop(log_id, None) + + +@router.post("/api/backup/oauth/prepare") +async def oauth_prepare(request: Request): + """ + Sla app key + secret op en geef de Dropbox autorisatie-URL terug. + De gebruiker opent deze URL in de browser en krijgt een code te zien. + Gebruikt token_access_type=offline voor een refresh token dat niet verloopt. + """ + body = {} + try: + body = await request.json() + except Exception: + pass + + app_key = (body.get("app_key") or "").strip() + app_secret = (body.get("app_secret") or "").strip() + + if not app_key or not app_secret: + return {"ok": False, "error": "app_key and app_secret are required."} + + _save_dropbox_app_key(app_key) + _save_dropbox_app_secret(app_secret) + + params = urlencode({ + "client_id": app_key, + "response_type": "code", + "token_access_type": "offline", + }) + auth_url = f"https://www.dropbox.com/oauth2/authorize?{params}" + + return {"ok": True, "auth_url": auth_url} + + +@router.post("/api/backup/oauth/exchange") +async def oauth_exchange(request: Request): + """ + Wissel de door de gebruiker ingevoerde autorisatiecode in voor een refresh token. + Slaat het refresh token op als het Dropbox-token. + """ + body = {} + try: + body = await request.json() + except Exception: + pass + + code = (body.get("code") or "").strip() + if not code: + return {"ok": False, "error": "Authorization code is required."} + + app_key = _load_dropbox_app_key() + app_secret = _load_dropbox_app_secret() + + if not app_key or not app_secret: + return {"ok": False, "error": "App key and secret not found. Run prepare step first."} + + try: + async with httpx.AsyncClient(timeout=30) as client: + resp = await client.post( + "https://api.dropbox.com/oauth2/token", + data={ + "code": code, + "grant_type": "authorization_code", + }, + auth=(app_key, app_secret), + ) + resp.raise_for_status() + data = resp.json() + except httpx.HTTPStatusError as e: + return {"ok": False, "error": f"Dropbox API error: {e.response.status_code} {e.response.text[:200]}"} + except Exception as e: + return {"ok": False, "error": str(e)} + + refresh_token = data.get("refresh_token", "").strip() + if not refresh_token: + return {"ok": False, "error": "No refresh token in Dropbox response. Make sure token_access_type=offline was used."} + + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO credentials (site, username, password, updated_at) + VALUES ('dropbox', %s, %s, NOW()) + ON CONFLICT (site) DO UPDATE + SET username = EXCLUDED.username, + password = EXCLUDED.password, + updated_at = NOW() + """, + (encrypt_value(""), encrypt_value(refresh_token)), + ) + + return {"ok": True, "message": "Refresh token saved. Dropbox is now connected."} + + +@router.get("/api/backup/progress") +async def backup_progress(): + if not BACKUP_PROGRESS: + return {"running": False} + log_id = max(BACKUP_PROGRESS.keys()) + p = BACKUP_PROGRESS[log_id] + return { + "running": True, + "log_id": log_id, + "done": p.get("done", 0), + "total": p.get("total", 0), + "phase": p.get("phase", ""), + } @router.post("/api/backup/run") diff --git a/containers/novela/routers/common.py b/containers/novela/routers/common.py index 13e38ae..ab5c36f 100644 --- a/containers/novela/routers/common.py +++ b/containers/novela/routers/common.py @@ -52,21 +52,41 @@ def media_type_from_suffix(path: Path) -> str: return "" +def parse_volume_str(value: int | str | None) -> tuple[int, str]: + """Parse a volume string like '21a' or '0' into (index, suffix). + + Returns (0, '') for anything unparseable. + index is clamped to 0–999; suffix is lowercased alpha only, max 5 chars. + """ + s = str(value or "").strip() + m = re.match(r"^(\d+)([a-zA-Z]*)$", s) + if m: + idx = max(0, min(999, int(m.group(1)))) + suffix = m.group(2).lower()[:5] + return idx, suffix + try: + return max(0, min(999, int(float(s)))), "" + except Exception: + return 0, "" + + def coerce_series_index(value: int | str | None) -> int: try: - return max(1, min(999, int(value or 1))) + return max(0, min(999, int(value or 0))) except Exception: - return 1 + return 0 -def make_rel_path(*, media_type: str, publisher: str, author: str, title: str, series: str, series_index: int | str | None, ext: str = "") -> Path: +def make_rel_path(*, media_type: str, publisher: str, author: str, title: str, series: str, series_index: int | str | None, series_suffix: str = "", ext: str = "") -> Path: if media_type == "epub": pub = clean_segment(publisher, "Unknown Publisher", 80) auth = clean_segment(author, "Unknown Author", 80) ttl = clean_segment(title, "Untitled", 140) series_name = clean_segment(series, "", 80) if series_name: - return Path("epub") / pub / auth / "Series" / series_name / f"{coerce_series_index(series_index):03d} - {ttl}.epub" + idx = coerce_series_index(series_index) + sfx = re.sub(r"[^a-z]", "", (series_suffix or "").lower())[:5] + return Path("epub") / pub / auth / "Series" / series_name / f"{idx:03d}{sfx} - {ttl}.epub" return Path("epub") / pub / auth / "Stories" / f"{ttl}.epub" if media_type == "pdf": @@ -195,6 +215,7 @@ def scan_epub(path: Path) -> dict: "has_cover": False, "series": "", "series_index": 0, + "series_suffix": "", "title": "", "publication_status": "", "author": "", @@ -233,6 +254,9 @@ def scan_epub(path: Path) -> dict: out["series_index"] = int(float(m.group(1))) except Exception: out["series_index"] = 0 + m = re.search(r']*name="novela:series_suffix"[^>]*content="([^"]+)"', opf, re.IGNORECASE) + if m: + out["series_suffix"] = re.sub(r"[^a-z]", "", m.group(1).lower())[:5] m = re.search(r']*name="publication_status"[^>]*content="([^"]+)"', opf, re.IGNORECASE) if m: out["publication_status"] = _html.unescape(m.group(1).strip()) @@ -311,9 +335,9 @@ def upsert_book(conn, filename: str, meta: dict, tags: list[tuple[str, str]] | N cur.execute( """ INSERT INTO library (filename, media_type, title, author, publisher, has_cover, - series, series_index, publication_status, source_url, + series, series_index, series_suffix, publication_status, source_url, publish_date, description, needs_review, want_to_read, rating, updated_at) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, FALSE, %s, NOW()) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, FALSE, %s, NOW()) ON CONFLICT (filename) DO UPDATE SET media_type = EXCLUDED.media_type, title = COALESCE(NULLIF(EXCLUDED.title, ''), library.title), @@ -322,6 +346,7 @@ def upsert_book(conn, filename: str, meta: dict, tags: list[tuple[str, str]] | N has_cover = (library.has_cover OR EXCLUDED.has_cover), series = COALESCE(NULLIF(EXCLUDED.series, ''), library.series), series_index = CASE WHEN COALESCE(EXCLUDED.series_index, 0) > 0 THEN EXCLUDED.series_index ELSE library.series_index END, + series_suffix = COALESCE(NULLIF(EXCLUDED.series_suffix, ''), library.series_suffix), publication_status = COALESCE(NULLIF(EXCLUDED.publication_status, ''), library.publication_status), source_url = COALESCE(NULLIF(EXCLUDED.source_url, ''), library.source_url), publish_date = COALESCE(EXCLUDED.publish_date, library.publish_date), @@ -338,6 +363,7 @@ def upsert_book(conn, filename: str, meta: dict, tags: list[tuple[str, str]] | N bool(meta.get("has_cover", False)), meta.get("series", ""), meta.get("series_index", 0), + meta.get("series_suffix", ""), meta.get("publication_status", ""), meta.get("source_url", ""), meta.get("publish_date") or None, @@ -380,7 +406,8 @@ def list_library_json() -> list[dict]: COALESCE(rs.read_count, 0)::int AS read_count, rs.last_read, (cc.filename IS NOT NULL) AS has_cached_cover, - l.rating + l.rating, + COALESCE(l.series_suffix, '') AS series_suffix FROM library l LEFT JOIN reading_progress rp ON rp.filename = l.filename LEFT JOIN ( @@ -413,6 +440,7 @@ def list_library_json() -> list[dict]: "has_cached_cover": bool(r[18]), "series": r[6] or "", "series_index": r[7] or 0, + "series_suffix": r[20] or "", "publication_status": r[8] or "", "want_to_read": bool(r[9]), "archived": bool(r[10]), diff --git a/containers/novela/routers/library.py b/containers/novela/routers/library.py index 68291d2..3c3c8b9 100644 --- a/containers/novela/routers/library.py +++ b/containers/novela/routers/library.py @@ -219,10 +219,30 @@ async def library_cover(filename: str): if mt == "epub": from routers.common import extract_cover_from_epub + # Serve from cache when available (e.g. after a cover upload) + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute( + "SELECT thumb_webp, mime_type FROM library_cover_cache WHERE filename = %s", + (filename,), + ) + row = cur.fetchone() + if row and row[0]: + return Response(content=bytes(row[0]), media_type=row[1] or "image/webp") + + # Fall back to extracting directly from the EPUB file extracted = extract_cover_from_epub(full) if not extracted: return Response(status_code=404) raw, mime = extracted + # Warm the cache for next time + try: + thumb = make_cover_thumb_webp(raw) + with get_db_conn() as conn: + with conn: + upsert_cover_cache(conn, filename, "image/webp", thumb) + except Exception: + pass return Response(content=raw, media_type=mime) if mt in {"pdf", "cbr"}: diff --git a/containers/novela/routers/reader.py b/containers/novela/routers/reader.py index e722b69..d3c17df 100644 --- a/containers/novela/routers/reader.py +++ b/containers/novela/routers/reader.py @@ -14,7 +14,7 @@ from fastapi import APIRouter, Request from fastapi.responses import FileResponse, HTMLResponse, JSONResponse, Response from fastapi.templating import Jinja2Templates -from cbr import cbr_get_page +from cbr import cbr_get_page, cbr_page_count from db import get_db_conn from epub import read_epub_file, write_epub_file from pdf import pdf_page_count, pdf_render_page @@ -267,6 +267,7 @@ def _sync_epub_metadata( description: str, series: str, series_index: int | str | None, + series_suffix: str = "", subjects: list[str], ) -> None: """Write edited metadata back into OPF so DB and EPUB stay aligned.""" @@ -344,8 +345,11 @@ def _sync_epub_metadata( set_named_meta('calibre:series', series_val) if series_val: set_named_meta('calibre:series_index', str(_coerce_series_index(series_index))) + sfx = re.sub(r"[^a-z]", "", (series_suffix or "").lower())[:5] + set_named_meta('novela:series_suffix', sfx) else: set_named_meta('calibre:series_index', '') + set_named_meta('novela:series_suffix', '') _rewrite_epub_entries(epub_path, {opf_path: str(opf).encode('utf-8')}) @@ -391,9 +395,9 @@ def _clean_segment(value: str, fallback: str, max_len: int = 100) -> str: def _coerce_series_index(value: int | str | None) -> int: try: - return max(1, min(999, int(value or 1))) + return max(0, min(999, int(value or 0))) except (TypeError, ValueError): - return 1 + return 0 def _make_rel_path( @@ -403,6 +407,7 @@ def _make_rel_path( title: str, series: str, series_index: int | str | None, + series_suffix: str = "", ext: str = ".epub", ) -> Path: auth = _clean_segment(author, "Unknown Author", 80) @@ -413,7 +418,8 @@ def _make_rel_path( series_name = _clean_segment(series, "", 120) if series_name: idx = _coerce_series_index(series_index) - return Path("epub") / pub / auth / "Series" / series_name / f"{idx:03d} - {ttl}.epub" + sfx = re.sub(r"[^a-z]", "", (series_suffix or "").lower())[:5] + return Path("epub") / pub / auth / "Series" / series_name / f"{idx:03d}{sfx} - {ttl}.epub" return Path("epub") / pub / auth / "Stories" / f"{ttl}.epub" if ext == ".pdf": @@ -500,7 +506,7 @@ async def get_chapter_html(filename: str, index: int): resolved.pop() else: resolved.append(p) - img["src"] = f"/library/chapter-img/{'/'.join(resolved[1:])}?filename={filename}" + img["src"] = f"/library/chapter-img/{'/'.join(resolved)}?filename={filename}" return Response(str(body), media_type="text/html") @@ -514,7 +520,16 @@ async def get_chapter_image(path: str, filename: str): return Response(status_code=404) try: with zf.ZipFile(epub_path, "r") as z: - data = z.read("OEBPS/" + path) + names = z.namelist() + if path in names: + data = z.read(path) + else: + # Case-insensitive fallback + target = path.lower() + match = next((n for n in names if n.lower() == target), None) + if match is None: + return Response(status_code=404) + data = z.read(match) except KeyError: return Response(status_code=404) ext = path.rsplit(".", 1)[-1].lower() @@ -626,7 +641,7 @@ async def book_detail_page(filename: str, request: Request): """ SELECT title, author, publisher, has_cover, series, series_index, publication_status, want_to_read, source_url, archived, publish_date, description, - rating + rating, COALESCE(series_suffix, '') AS series_suffix FROM library WHERE filename = %s """, (filename,), @@ -640,6 +655,7 @@ async def book_detail_page(filename: str, request: Request): "has_cover": lib_row[3] or False, "series": lib_row[4] or "", "series_index": lib_row[5] or 0, + "series_suffix": lib_row[13] or "", "publication_status": lib_row[6] or "", "want_to_read": lib_row[7] or False, "source_url": lib_row[8] or "", @@ -703,6 +719,15 @@ async def book_detail_page(filename: str, request: Request): row = cur.fetchone() progress = row[1] or 0 if row else 0 cfi = row[0] if row else None + + series_is_indexed = False + if entry.get("series"): + cur.execute( + "SELECT COUNT(*) FROM library WHERE series = %s AND series_index > 0", + (entry["series"],), + ) + series_is_indexed = (cur.fetchone()[0] or 0) > 0 + return templates.TemplateResponse(request, "book.html", { "active": "book", "filename": filename, @@ -710,6 +735,7 @@ async def book_detail_page(filename: str, request: Request): "author": entry["author"], "series": entry["series"], "series_index": entry["series_index"], + "series_suffix": entry["series_suffix"], "genres": genres, "subgenres": subgenres, "tags": tags_list, @@ -726,6 +752,7 @@ async def book_detail_page(filename: str, request: Request): "progress": progress, "cfi": cfi, "rating": entry.get("rating", 0), + "series_is_indexed": series_is_indexed, }) @@ -752,6 +779,21 @@ async def api_genres(type: str | None = None): return JSONResponse(result) +@router.get("/api/suggestions") +async def api_suggestions(type: str | None = None): + """Return distinct non-empty values for author, publisher, or series, sorted alphabetically.""" + col_map = {"author": "author", "publisher": "publisher", "series": "series"} + col = col_map.get(type or "") + if not col: + return JSONResponse([]) + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute( + f"SELECT DISTINCT {col} FROM library WHERE {col} IS NOT NULL AND {col} <> '' ORDER BY {col}" + ) + return JSONResponse([r[0] for r in cur.fetchall()]) + + @router.patch("/library/book/{filename:path}") async def book_update(filename: str, request: Request): """Update book metadata and tags, and rename/move the file when needed.""" @@ -764,7 +806,8 @@ async def book_update(filename: str, request: Request): author = body.get("author", "") publisher = body.get("publisher", "") series = body.get("series", "") - series_index = _coerce_series_index(body.get("series_index", 1)) + from routers.common import parse_volume_str + series_index, series_suffix = parse_volume_str(body.get("series_index", "")) ext = old_path.suffix.lower() target_rel = _make_rel_path( @@ -773,6 +816,7 @@ async def book_update(filename: str, request: Request): title=title, series=series, series_index=series_index, + series_suffix=series_suffix, ext=ext, ) target_rel = _ensure_unique_rel_path(target_rel, exclude=old_path) @@ -798,6 +842,7 @@ async def book_update(filename: str, request: Request): description=body.get("description", ""), series=series, series_index=series_index if series else 0, + series_suffix=series_suffix if series else "", subjects=(body.get("genres", []) + body.get("subgenres", []) + body.get("tags", [])), ) @@ -812,17 +857,18 @@ async def book_update(filename: str, request: Request): """ INSERT INTO library ( filename, title, author, publisher, has_cover, - series, series_index, publication_status, + series, series_index, series_suffix, publication_status, source_url, publish_date, description, archived, needs_review, updated_at ) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, FALSE, FALSE, NOW()) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, FALSE, FALSE, NOW()) ON CONFLICT (filename) DO UPDATE SET title = EXCLUDED.title, author = EXCLUDED.author, publisher = EXCLUDED.publisher, series = EXCLUDED.series, series_index = EXCLUDED.series_index, + series_suffix = EXCLUDED.series_suffix, publication_status = EXCLUDED.publication_status, source_url = EXCLUDED.source_url, publish_date = EXCLUDED.publish_date, @@ -838,6 +884,7 @@ async def book_update(filename: str, request: Request): has_cover, series, series_index if series else 0, + series_suffix if series else "", body.get("publication_status", ""), body.get("source_url", ""), body.get("publish_date") or None, @@ -959,8 +1006,20 @@ async def library_pdf_page(filename: str, page: int = 0, dpi: int = 150): return JSONResponse({"error": str(e)}, status_code=500) -@router.get("/library/cbr/{filename:path}/{page:int}") -async def library_cbr_page(filename: str, page: int): +@router.get("/api/cbr/info/{filename:path}") +async def cbr_info(filename: str): + path = resolve_library_path(filename) + if path is None or not path.exists(): + return JSONResponse({"error": "not found"}, status_code=404) + try: + count = cbr_page_count(path) + return JSONResponse({"page_count": count}) + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=500) + + +@router.get("/library/cbr/{filename:path}") +async def library_cbr_page(filename: str, page: int = 0): path = resolve_library_path(filename) if path is None: return JSONResponse({"error": "Invalid filename"}, status_code=400) @@ -975,3 +1034,123 @@ async def library_cbr_page(filename: str, page: int): return JSONResponse({"error": "Page out of range"}, status_code=416) except Exception as e: return JSONResponse({"error": str(e)}, status_code=500) + + +# --------------------------------------------------------------------------- +# Bookmark routes +# --------------------------------------------------------------------------- + +@router.get("/library/bookmarks/{filename:path}") +async def get_bookmarks(filename: str): + if resolve_library_path(filename) is None: + return JSONResponse({"error": "Invalid filename"}, status_code=400) + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute( + """ + SELECT id, chapter_index, scroll_frac, chapter_title, note, + created_at AT TIME ZONE 'UTC' + FROM bookmarks WHERE filename = %s ORDER BY created_at DESC + """, + (filename,), + ) + rows = cur.fetchall() + return JSONResponse([ + { + "id": r[0], + "chapter_index": r[1], + "scroll_frac": r[2], + "chapter_title": r[3], + "note": r[4], + "created_at": r[5].isoformat() + "Z" if r[5] else None, + } + for r in rows + ]) + + +@router.post("/library/bookmarks/{filename:path}") +async def add_bookmark(filename: str, request: Request): + if resolve_library_path(filename) is None: + return JSONResponse({"error": "Invalid filename"}, status_code=400) + body = await request.json() + chapter_index = int(body.get("chapter_index", 0)) + scroll_frac = float(body.get("scroll_frac", 0.0)) + chapter_title = str(body.get("chapter_title", ""))[:500] + note = str(body.get("note", "")) + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO bookmarks (filename, chapter_index, scroll_frac, chapter_title, note) + VALUES (%s, %s, %s, %s, %s) + RETURNING id, created_at AT TIME ZONE 'UTC' + """, + (filename, chapter_index, scroll_frac, chapter_title, note), + ) + row = cur.fetchone() + return JSONResponse({ + "ok": True, + "id": row[0], + "created_at": row[1].isoformat() + "Z" if row[1] else None, + }) + + +@router.patch("/library/bookmarks/{bookmark_id}") +async def update_bookmark(bookmark_id: int, request: Request): + body = await request.json() + note = str(body.get("note", "")) + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + "UPDATE bookmarks SET note = %s WHERE id = %s", + (note, bookmark_id), + ) + if cur.rowcount == 0: + return JSONResponse({"error": "not found"}, status_code=404) + return JSONResponse({"ok": True}) + + +@router.delete("/library/bookmarks/{bookmark_id}") +async def delete_bookmark(bookmark_id: int): + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute("DELETE FROM bookmarks WHERE id = %s", (bookmark_id,)) + if cur.rowcount == 0: + return JSONResponse({"error": "not found"}, status_code=404) + return JSONResponse({"ok": True}) + + +@router.get("/api/bookmarks") +async def api_all_bookmarks(): + """Return all bookmarks across all books, enriched with book title/author.""" + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute( + """ + SELECT b.id, b.filename, b.chapter_index, b.scroll_frac, + b.chapter_title, b.note, + b.created_at AT TIME ZONE 'UTC', + l.title, l.author + FROM bookmarks b + LEFT JOIN library l ON l.filename = b.filename + ORDER BY b.created_at DESC + """, + ) + rows = cur.fetchall() + return JSONResponse([ + { + "id": r[0], + "filename": r[1], + "chapter_index": r[2], + "scroll_frac": r[3], + "chapter_title": r[4], + "note": r[5], + "created_at": r[6].isoformat() + "Z" if r[6] else None, + "book_title": r[7] or r[1], + "book_author": r[8] or "", + } + for r in rows + ]) diff --git a/containers/novela/static/book.js b/containers/novela/static/book.js index 6723aeb..fb19d92 100644 --- a/containers/novela/static/book.js +++ b/containers/novela/static/book.js @@ -261,24 +261,89 @@ const genreInput = new PillInput('genre-box', 'genre-input', 'genre-dro const subgenreInput = new PillInput('subgenre-box', 'subgenre-input', 'subgenre-dropdown'); const tagInput = new PillInput('tag-box', 'tag-input', 'tag-dropdown'); +// ── TextSuggest — single-value autocomplete for plain text inputs ─────────── + +class TextSuggest { + constructor(inputId, dropdownId) { + this.input = document.getElementById(inputId); + this.dropdown = document.getElementById(dropdownId); + this.all = []; + this.ddIndex = -1; + this.input.addEventListener('input', () => this._onInput()); + this.input.addEventListener('keydown', (e) => this._onKeydown(e)); + this.input.addEventListener('blur', () => setTimeout(() => this._hide(), 150)); + } + + setSuggestions(all) { this.all = all; } + + _show(items) { + if (!items.length) { this._hide(); return; } + this.dropdown.innerHTML = items.map(v => + `
${v}
` + ).join(''); + this.dropdown.querySelectorAll('.genre-option').forEach(el => { + el.onmousedown = (e) => { e.preventDefault(); this.input.value = el.dataset.val; this._hide(); }; + }); + this.dropdown.style.display = 'block'; + this.ddIndex = -1; + } + + _hide() { this.dropdown.style.display = 'none'; this.ddIndex = -1; } + + _onInput() { + const q = this.input.value.trim().toLowerCase(); + if (!q) { this._hide(); return; } + this._show(this.all.filter(v => v.toLowerCase().includes(q))); + } + + _onKeydown(e) { + const opts = this.dropdown.querySelectorAll('.genre-option'); + if (e.key === 'ArrowDown') { + e.preventDefault(); + this.ddIndex = Math.min(this.ddIndex + 1, opts.length - 1); + opts.forEach((o, i) => o.classList.toggle('active', i === this.ddIndex)); + } else if (e.key === 'ArrowUp') { + e.preventDefault(); + this.ddIndex = Math.max(this.ddIndex - 1, -1); + opts.forEach((o, i) => o.classList.toggle('active', i === this.ddIndex)); + } else if (e.key === 'Enter' && this.ddIndex >= 0 && opts[this.ddIndex]) { + e.preventDefault(); + this.input.value = opts[this.ddIndex].dataset.val; + this._hide(); + } else if (e.key === 'Escape') { + this._hide(); + } + } +} + +const authorSuggest = new TextSuggest('ed-author', 'author-dropdown'); +const publisherSuggest = new TextSuggest('ed-publisher', 'publisher-dropdown'); +const seriesSuggest = new TextSuggest('ed-series', 'series-dropdown'); + // ── Edit panel ───────────────────────────────────────────────────────────── async function openEdit() { - const [allGenres, allSubgenres, allTags] = await Promise.all([ + const [allGenres, allSubgenres, allTags, allAuthors, allPublishers, allSeries] = await Promise.all([ fetch('/api/genres?type=genre').then(r => r.json()), fetch('/api/genres?type=subgenre').then(r => r.json()), fetch('/api/genres?type=tag').then(r => r.json()), + fetch('/api/suggestions?type=author').then(r => r.json()), + fetch('/api/suggestions?type=publisher').then(r => r.json()), + fetch('/api/suggestions?type=series').then(r => r.json()), ]); genreInput.setSuggestions(allGenres); subgenreInput.setSuggestions(allSubgenres); tagInput.setSuggestions(allTags); + authorSuggest.setSuggestions(allAuthors); + publisherSuggest.setSuggestions(allPublishers); + seriesSuggest.setSuggestions(allSeries); document.getElementById('ed-title').value = BOOK.title; document.getElementById('ed-author').value = BOOK.author; document.getElementById('ed-publisher').value = BOOK.publisher; document.getElementById('ed-series').value = BOOK.series; - document.getElementById('ed-series-index').value = BOOK.series_index; - document.getElementById('ed-status').value = BOOK.publication_status; + document.getElementById('ed-series-index').value = BOOK.series_index + (BOOK.series_suffix || ''); + document.getElementById('ed-status').value = BOOK.publication_status || 'Complete'; document.getElementById('ed-url').value = BOOK.source_url; document.getElementById('ed-publish-date').value = BOOK.publish_date; document.getElementById('ed-description').value = BOOK.description; diff --git a/containers/novela/static/library.css b/containers/novela/static/library.css index 22d19c4..826ce23 100644 --- a/containers/novela/static/library.css +++ b/containers/novela/static/library.css @@ -551,6 +551,21 @@ html, body { cursor: not-allowed; } +.btn.btn-bulk-delete { + border: 1px solid rgba(200, 90, 58, 0.35); + background: rgba(200, 90, 58, 0.14); + color: var(--error); +} + +.btn.btn-bulk-delete:hover { + background: rgba(200, 90, 58, 0.24); +} + +.btn.btn-bulk-delete:disabled { + opacity: 0.45; + cursor: not-allowed; +} + .new-selection-count { font-family: var(--mono); font-size: 0.68rem; @@ -667,3 +682,114 @@ html, body { right: auto; } } + +/* ── Bookmark cards ─────────────────────────────────────────────────────── */ + +.bm-card { + display: flex; + gap: 1rem; + background: var(--surface); + border: 1px solid var(--border); + border-radius: var(--radius); + padding: 0.9rem 1rem; + margin-bottom: 0.75rem; + max-width: 720px; +} + +.bm-card-cover { + flex-shrink: 0; + width: 60px; height: 90px; + border-radius: 3px; + overflow: hidden; + display: block; + background: var(--surface2); +} + +.bm-card-cover img { + width: 100%; height: 100%; + object-fit: cover; +} + +.bm-card-body { + flex: 1; + min-width: 0; + display: flex; + flex-direction: column; + gap: 0.2rem; +} + +.bm-card-book { + font-family: var(--serif); + font-size: 0.9rem; + color: var(--text); + font-weight: 700; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +.bm-card-author { + font-family: var(--mono); + font-size: 0.7rem; + color: var(--text-dim); +} + +.bm-card-chapter { + font-family: var(--mono); + font-size: 0.72rem; + color: var(--accent); + margin-top: 0.15rem; +} + +.bm-card-note { + font-family: var(--mono); + font-size: 0.75rem; + color: var(--text-dim); + margin-top: 0.3rem; + white-space: pre-wrap; + line-height: 1.5; +} + +.bm-card-meta { + font-family: var(--mono); + font-size: 0.65rem; + color: var(--text-faint); + margin-top: auto; + padding-top: 0.4rem; +} + +.bm-card-actions { + display: flex; + gap: 0.5rem; + margin-top: 0.5rem; +} + +.btn-small { + display: inline-flex; + align-items: center; + padding: 0.25rem 0.65rem; + border-radius: var(--radius); + font-family: var(--mono); + font-size: 0.68rem; + cursor: pointer; + border: 1px solid var(--border); + background: none; + color: var(--text-dim); + text-decoration: none; + transition: color 0.12s, border-color 0.12s; +} + +.btn-small:hover { + color: var(--text); + border-color: var(--text-faint); +} + +.btn-small.btn-danger { + color: var(--error); + border-color: rgba(200,90,58,0.3); +} + +.btn-small.btn-danger:hover { + background: rgba(200,90,58,0.1); + border-color: var(--error); +} diff --git a/containers/novela/static/library.js b/containers/novela/static/library.js index a9e4b30..5b936a8 100644 --- a/containers/novela/static/library.js +++ b/containers/novela/static/library.js @@ -36,6 +36,8 @@ let newSelectedFilenames = new Set(); let newLastToggledIndex = null; let allViewMode = loadAllViewMode(); let allVisibleColumns = loadAllVisibleColumns(); +let allSelectedFilenames = new Set(); +let allLastToggledIndex = null; // ── Placeholder cover generation ─────────────────────────────────────────── @@ -133,6 +135,9 @@ function updateCounts() { if (newEl) newEl.textContent = newCount || ''; const archEl = document.getElementById('count-archived'); if (archEl) archEl.textContent = archCount || ''; + const ratedCount = active.filter(b => b.rating > 0).length; + const ratedEl = document.getElementById('count-rated'); + if (ratedEl) ratedEl.textContent = ratedCount || ''; } function _filenameBase(filename) { @@ -177,6 +182,8 @@ function _viewUrl(view, param) { if (view === 'publishers') return '/library#publishers'; if (view === 'publisher-detail') return '/library#publishers/' + encodeURIComponent(param || ''); if (view === 'archived') return '/library#archived'; + if (view === 'bookmarks') return '/library#bookmarks'; + if (view === 'rated') return '/library#rated'; if (view === 'new') return '/library#new'; if (view === 'genre') return '/library#genre/' + encodeURIComponent(param || ''); return '/library'; @@ -192,7 +199,7 @@ function _applyView(view, param) { if (si) { si.value = ''; document.getElementById('search-clear').style.display = 'none'; } } - ['nav-all','nav-wtr','nav-new','nav-series','nav-authors','nav-publishers','nav-archived'].forEach(id => { + ['nav-all','nav-wtr','nav-new','nav-series','nav-authors','nav-publishers','nav-archived','nav-bookmarks','nav-rated'].forEach(id => { const el = document.getElementById(id); if (el) el.classList.remove('active'); }); @@ -203,6 +210,8 @@ function _applyView(view, param) { 'publishers': 'nav-publishers', 'publisher-detail': 'nav-publishers', 'new': 'nav-new', 'archived': 'nav-archived', + 'bookmarks': 'nav-bookmarks', + 'rated': 'nav-rated', }; const el = document.getElementById(activeMap[view]); if (el) el.classList.add('active'); @@ -218,6 +227,8 @@ function _applyView(view, param) { view === 'publisher-detail' ? publisherDisplayName(param || '') : view === 'new' ? 'New' : view === 'archived' ? 'Archived' : + view === 'bookmarks' ? 'Bookmarks' : + view === 'rated' ? 'Rated' : view === 'genre' ? `Genre: ${param || ''}` : view === 'search' ? `Search: "${param || ''}"` : ''; @@ -225,6 +236,10 @@ function _applyView(view, param) { newSelectedFilenames.clear(); newLastToggledIndex = null; } + if (view !== 'all') { + allSelectedFilenames.clear(); + allLastToggledIndex = null; + } const showBack = view === 'series-detail' || view === 'author-detail' || view === 'publisher-detail'; document.getElementById('back-btn').style.display = showBack ? '' : 'none'; @@ -262,6 +277,8 @@ function renderGrid() { else if (currentView === 'new') renderNewBooksView(active.filter(b => b.needs_review)); else if (currentView === 'genre') renderGenreView(currentParam); else if (currentView === 'search') renderSearchResults(currentParam); + else if (currentView === 'bookmarks') renderBookmarksView(); + else if (currentView === 'rated') renderRatedView(); } // ── New view (bulk review + list/grid toggle) ───────────────────────────── @@ -498,6 +515,22 @@ async function rateBook(filename, rating) { } catch {} } +// Returns the set of series names where at least one book has series_index > 0. +// Used to decide whether to show [0] labels for index-0 books in indexed series. +function indexedSeriesSet() { + const set = new Set(); + for (const b of allBooks) { + if (b.series && b.series_index > 0) set.add(b.series); + } + return set; +} + +function seriesVolLabel(book, indexedSeries) { + if (book.series_index > 0 || book.series_suffix) return String(book.series_index) + (book.series_suffix || ''); + if (book.series && indexedSeries.has(book.series)) return '0'; + return ''; +} + function newCellText(book, colId) { if (colId === 'publisher') return publisherDisplayName(bookPublisherKey(book)); if (colId === 'author') return bookAuthor(book); @@ -508,7 +541,7 @@ function newCellText(book, colId) { if (colId === 'genres') return bookGenres(book).join(', '); if (colId === 'subgenres') return bookSubgenres(book).join(', '); if (colId === 'tags') return bookPlainTags(book).join(', '); - if (colId === 'volume') return book.series_index > 0 ? String(book.series_index) : ''; + if (colId === 'volume') return seriesVolLabel(book, indexedSeriesSet()); if (colId === 'status') return book.publication_status || ''; if (colId === 'rating') return starsText(book.rating); return ''; @@ -663,6 +696,10 @@ function hideAllControls() { function setAllViewMode(mode) { if (mode !== 'grid' && mode !== 'list') return; allViewMode = mode; + if (mode === 'grid') { + allSelectedFilenames.clear(); + allLastToggledIndex = null; + } persistAllViewMode(); renderGrid(); } @@ -687,6 +724,14 @@ function toggleAllColumn(columnId) { function renderAllControls() { const controls = document.getElementById('all-controls'); if (!controls) return; + + const books = activeBooks(); + const listMode = allViewMode === 'list'; + const selectedCount = listMode + ? books.filter(b => allSelectedFilenames.has(b.filename)).length + : 0; + const allSelected = listMode && !!books.length && selectedCount === books.length; + controls.style.display = ''; controls.innerHTML = `
@@ -694,7 +739,7 @@ function renderAllControls() {
- ${allViewMode === 'list' ? ` + ${listMode ? `
@@ -705,6 +750,12 @@ function renderAllControls() { `).join('')}
+ ${selectedCount} selected + + +
` : ''} `; @@ -717,17 +768,22 @@ function renderAllBooksList(books) { return; } const cols = NEW_COLUMN_DEFS.filter(c => allVisibleColumns.includes(c.id)); + const selectedCount = books.filter(b => allSelectedFilenames.has(b.filename)).length; + const allSelected = !!books.length && selectedCount === books.length; + container.innerHTML = `
+ ${cols.map(c => ``).join('')} ${books.map(b => ` + ${cols.map(c => { const value = newCellText(b, c.id); if (c.id === 'title') return ``; @@ -741,7 +797,8 @@ function renderAllBooksList(books) { `; container.querySelectorAll('.all-list-row').forEach(row => { - row.addEventListener('click', () => { + row.addEventListener('click', e => { + if (e.target.type === 'checkbox') return; const filename = row.getAttribute('data-filename') || ''; if (!filename) return; location.href = `/library/book/${encodeURIComponent(filename)}`; @@ -758,16 +815,94 @@ function renderAllView(books) { } } +function toggleSelectAllAllRows(checked, books) { + if (checked) { + books.forEach(b => allSelectedFilenames.add(b.filename)); + allLastToggledIndex = books.length ? books.length - 1 : null; + } else { + books.forEach(b => allSelectedFilenames.delete(b.filename)); + allLastToggledIndex = null; + } + renderAllControls(); + const rowChecks = document.querySelectorAll('.all-row-select'); + rowChecks.forEach(cb => { cb.checked = checked; }); +} + +function toggleAllRowWithShift(filename, checked, shiftPressed) { + const books = activeBooks(); + const filenames = books.map(b => b.filename); + const idx = filenames.indexOf(filename); + if (idx === -1) return; + + const doRange = !!(shiftPressed && allLastToggledIndex !== null); + if (doRange) { + const start = Math.min(allLastToggledIndex, idx); + const end = Math.max(allLastToggledIndex, idx); + for (let i = start; i <= end; i++) { + const name = filenames[i]; + if (checked) allSelectedFilenames.add(name); + else allSelectedFilenames.delete(name); + } + } else { + if (checked) allSelectedFilenames.add(filename); + else allSelectedFilenames.delete(filename); + } + + allLastToggledIndex = idx; + renderAllControls(); + renderAllBooksList(books); +} + +function handleAllRowCheckboxClick(filename, checkboxEl, ev) { + ev?.stopPropagation(); + toggleAllRowWithShift(filename, !!checkboxEl?.checked, !!(ev && ev.shiftKey)); +} + +function clearAllSelection() { + activeBooks().forEach(b => allSelectedFilenames.delete(b.filename)); + allLastToggledIndex = null; + renderGrid(); +} + +function deleteSelectedBooks() { + const count = allSelectedFilenames.size; + if (!count) return; + document.getElementById('bulk-delete-count').textContent = count; + document.getElementById('bulk-delete-overlay').classList.add('visible'); +} + +function closeBulkDeleteDialog() { + document.getElementById('bulk-delete-overlay').classList.remove('visible'); +} + +async function confirmBulkDelete() { + const filenames = [...allSelectedFilenames]; + if (!filenames.length) return; + const btn = document.getElementById('bulk-delete-btn'); + if (btn) btn.disabled = true; + for (const filename of filenames) { + try { + await fetch(`/library/file/${encodeURIComponent(filename)}`, { method: 'DELETE' }); + } catch {} + } + closeBulkDeleteDialog(); + allSelectedFilenames.clear(); + allLastToggledIndex = null; + await loadLibrary(); +} + // ── Book grid (All / WTR / Author detail) ───────────────────────────────── function renderBooksGrid(books) { - const container = document.getElementById('grid-container'); + const container = document.getElementById('grid-container'); + const idxSeries = indexedSeriesSet(); if (!books.length) { container.innerHTML = `
${ currentView === 'wtr' ? 'No books marked as Want to Read. Star a book to add it here.' : currentView === 'archived' ? 'No archived books. Archive a book from its detail page.' : currentView === 'new' ? 'No newly imported books waiting for metadata review.' : + currentView === 'rated' ? 'No rated books yet. Rate a book from its detail page.' : currentView === 'genre' ? `No books tagged "${esc(currentParam || '')}".` : currentView === 'search' ? `No results for "${esc(currentParam || '')}".` : 'No books yet. Import EPUB, PDF or CBR/CBZ to get started.' @@ -803,8 +938,9 @@ function renderBooksGrid(books) { } const starClass = b.want_to_read ? 'btn-star starred' : 'btn-star'; + const seriesVol = seriesVolLabel(b, idxSeries); const seriesText = b.series - ? `${esc(b.series)}${b.series_index ? ' [' + b.series_index + ']' : ''}` + ? `${esc(b.series)}${seriesVol ? ' [' + esc(String(seriesVol)) + ']' : ''}` : ''; card.innerHTML = ` @@ -870,7 +1006,10 @@ function groupBySeries() { if (!map[b.series]) map[b.series] = []; map[b.series].push(b); } - for (const s of Object.values(map)) s.sort((a, b) => a.series_index - b.series_index); + for (const s of Object.values(map)) s.sort((a, b) => { + if (a.series_index !== b.series_index) return a.series_index - b.series_index; + return (a.series_suffix || '').localeCompare(b.series_suffix || ''); + }); return map; } @@ -963,10 +1102,19 @@ function renderSeriesGrid() { // ── Series detail ────────────────────────────────────────────────────────── function getSeriesSlots(books) { - const indexed = books.filter(b => b.series_index > 0); - const unindexed = books.filter(b => b.series_index === 0 || !b.series_index); - if (indexed.length === 0) return books; + // Treat books as indexed (including index 0) only when at least one book + // has series_index > 0 — this preserves the "unindexed flat list" behaviour + // for series where no indices were ever assigned. + const hasPositiveIndex = books.some(b => b.series_index > 0); + if (!hasPositiveIndex) return books; + // Sort indexed books by (series_index, series_suffix) so 21 < 21a < 21b < 22. + const indexed = [...books].sort((a, b) => { + if (a.series_index !== b.series_index) return a.series_index - b.series_index; + return (a.series_suffix || '').localeCompare(b.series_suffix || ''); + }); + + // Build slot map keyed by numeric index only (for gap detection). const byIndex = {}; for (const b of indexed) { if (!byIndex[b.series_index]) byIndex[b.series_index] = []; @@ -980,13 +1128,14 @@ function getSeriesSlots(books) { if (byIndex[i]) for (const b of byIndex[i]) slots.push(b); else slots.push({ missing: true, series_index: i }); } - return [...unindexed, ...slots]; + return slots; } function renderSeriesDetail(seriesName) { - const map = groupBySeries(); - const books = map[seriesName] || []; - const slots = getSeriesSlots(books); + const map = groupBySeries(); + const books = map[seriesName] || []; + const hasPositiveIndex = books.some(b => b.series_index > 0); + const slots = getSeriesSlots(books); const container = document.getElementById('grid-container'); if (!slots.length) { @@ -1001,10 +1150,10 @@ function renderSeriesDetail(seriesName) { const wrapper = document.createElement('div'); wrapper.className = 'series-slot' + (slot.missing ? ' slot-missing' : ''); - if (slot.series_index) { + if (hasPositiveIndex || slot.series_index > 0 || slot.series_suffix) { const lbl = document.createElement('div'); lbl.className = 'slot-index-label'; - lbl.textContent = `#${slot.series_index}`; + lbl.textContent = `#${slot.series_index}${slot.series_suffix || ''}`; wrapper.appendChild(lbl); } @@ -1237,6 +1386,93 @@ function clearSearch() { switchView('all'); } +// ── Bookmarks view ───────────────────────────────────────────────────────── + +async function renderBookmarksView() { + const container = document.getElementById('grid-container'); + container.innerHTML = '
Loading bookmarks…
'; + + let bookmarks; + try { + const resp = await fetch('/api/bookmarks'); + bookmarks = await resp.json(); + } catch { + container.innerHTML = '
Failed to load bookmarks.
'; + return; + } + + if (!bookmarks.length) { + container.innerHTML = '
No bookmarks yet. Tap the Bookmark button in the reader to save your place.
'; + const el = document.getElementById('count-bookmarks'); + if (el) el.textContent = ''; + return; + } + + const el = document.getElementById('count-bookmarks'); + if (el) el.textContent = bookmarks.length || ''; + + container.innerHTML = bookmarks.map(bm => { + const date = bm.created_at ? _fmtDate(bm.created_at) : ''; + const note = bm.note ? `
${esc(bm.note)}
` : ''; + const coverUrl = `/library/cover/${encodeURIComponent(bm.filename)}`; + return ` +
+ + + +
+
${esc(bm.book_title)}
+
${esc(bm.book_author)}
+
${esc(bm.chapter_title)}
+ ${note} +
${date}
+ +
+
`; + }).join(''); +} + +async function deleteBookmark(id) { + if (!confirm('Delete this bookmark?')) return; + const resp = await fetch(`/library/bookmarks/${id}`, { method: 'DELETE' }); + const data = await resp.json(); + if (data.ok) { + const card = document.getElementById(`bmc-${id}`); + if (card) card.remove(); + // Update count + const remaining = document.querySelectorAll('.bm-card').length; + const el = document.getElementById('count-bookmarks'); + if (el) el.textContent = remaining || ''; + if (!remaining) { + document.getElementById('grid-container').innerHTML = + '
No bookmarks yet. Tap the Bookmark button in the reader to save your place.
'; + } + } else { + alert('Could not delete bookmark.'); + } +} + +function _fmtDate(isoStr) { + try { + const s = /[Zz+\-]\d*$/.test(isoStr.trim()) ? isoStr : isoStr + 'Z'; + return new Date(s).toLocaleDateString(undefined, { year: 'numeric', month: 'short', day: 'numeric' }); + } catch { return ''; } +} + +// ── Rated view ───────────────────────────────────────────────────────────── + +function renderRatedView() { + const books = activeBooks() + .filter(b => b.rating > 0) + .sort((a, b) => (b.rating - a.rating) || bookTitle(a).localeCompare(bookTitle(b))); + renderBooksGrid(books); +} + // ── Author detail ────────────────────────────────────────────────────────── function renderAuthorDetail(authorName) { @@ -1247,6 +1483,7 @@ function renderAuthorDetail(authorName) { const sb = b.series || '\uffff'; if (sa !== sb) return sa.localeCompare(sb); if (a.series_index !== b.series_index) return a.series_index - b.series_index; + if ((a.series_suffix || '') !== (b.series_suffix || '')) return (a.series_suffix || '').localeCompare(b.series_suffix || ''); return bookTitle(a).localeCompare(bookTitle(b)); }); renderBooksGrid(books); @@ -1261,6 +1498,7 @@ function renderPublisherDetail(publisherName) { const sb = b.series || '\uffff'; if (sa !== sb) return sa.localeCompare(sb); if (a.series_index !== b.series_index) return a.series_index - b.series_index; + if ((a.series_suffix || '') !== (b.series_suffix || '')) return (a.series_suffix || '').localeCompare(b.series_suffix || ''); return bookTitle(a).localeCompare(bookTitle(b)); }); renderBooksGrid(books); @@ -1464,7 +1702,7 @@ document.getElementById('search-input').addEventListener('input', function() { if (q) { currentView = 'search'; currentParam = q; - ['nav-all','nav-wtr','nav-new','nav-series','nav-authors','nav-publishers','nav-archived'].forEach(id => { + ['nav-all','nav-wtr','nav-new','nav-series','nav-authors','nav-publishers','nav-archived','nav-bookmarks','nav-rated'].forEach(id => { const el = document.getElementById(id); if (el) el.classList.remove('active'); }); @@ -1524,6 +1762,8 @@ loadLibrary().then(() => { else if (hash.startsWith('publisher/')) { view = 'publisher-detail'; param = decodeURIComponent(hash.slice(10)); } else if (hash === 'archived') view = 'archived'; else if (hash === 'new') view = 'new'; + else if (hash === 'bookmarks') view = 'bookmarks'; + else if (hash === 'rated') view = 'rated'; else if (hash.startsWith('genre/')) { view = 'genre'; param = decodeURIComponent(hash.slice(6)); } history.replaceState({ view, param }, '', _viewUrl(view, param)); _applyView(view, param); diff --git a/containers/novela/static/sidebar.css b/containers/novela/static/sidebar.css index b2fdab1..e726e08 100644 --- a/containers/novela/static/sidebar.css +++ b/containers/novela/static/sidebar.css @@ -109,6 +109,43 @@ html { .btn-rescan:hover { background: var(--surface2); color: var(--text); } .btn-rescan:disabled { opacity: 0.5; cursor: not-allowed; } +.backup-status-bar { + display: flex; + align-items: center; + gap: 0.45rem; + width: 100%; + padding: 0.35rem 0.6rem; + border-radius: var(--radius); + font-family: var(--mono); + font-size: 0.68rem; + color: var(--accent); + text-decoration: none; + transition: background 0.12s, opacity 0.12s; + margin-bottom: 0.25rem; +} +.backup-status-bar:hover { background: var(--surface2); opacity: 0.85; } + +.backup-dot { + flex-shrink: 0; + width: 7px; + height: 7px; + border-radius: 50%; + background: var(--border); +} +.backup-dot.dot-ok { background: var(--ok, #7fbe7f); } +.backup-dot.dot-err { background: var(--err, #d0674c); } +.backup-dot.dot-dim { background: var(--text-dim); opacity: 0.5; } +.backup-dot.dot-running { + background: var(--warn, #d2b063); + animation: backup-pulse 1.2s ease-in-out infinite; +} +@keyframes backup-pulse { + 0%, 100% { opacity: 1; } + 50% { opacity: 0.3; } +} + +.backup-status-text { overflow: hidden; text-overflow: ellipsis; white-space: nowrap; } + /* ── Mobile hamburger ──────────────────────────────────────────────────── */ .sidebar-toggle { diff --git a/containers/novela/templates/_sidebar.html b/containers/novela/templates/_sidebar.html index 786feef..5c3cbd4 100644 --- a/containers/novela/templates/_sidebar.html +++ b/containers/novela/templates/_sidebar.html @@ -109,6 +109,26 @@ +
  • + + + + + Bookmarks + + +
  • +
  • + + + + + Rated + + +
  • @@ -171,6 +191,10 @@ + +
    +
    +
    Delete books
    +

    Delete selected book(s)?
    Files will be permanently removed from disk. This cannot be undone.

    +
    + + +
    +
    +
    +
    diff --git a/containers/novela/templates/reader.html b/containers/novela/templates/reader.html index 3bbd193..7287fa2 100644 --- a/containers/novela/templates/reader.html +++ b/containers/novela/templates/reader.html @@ -67,6 +67,58 @@ .btn-header:hover { color: var(--text); border-color: var(--text-faint); } .btn-header-read { color: var(--success); border-color: rgba(107,170,107,0.3); } .btn-header-read:hover { background: rgba(107,170,107,0.08); border-color: var(--success); } + .btn-header-bm { color: var(--accent); border-color: rgba(200,120,58,0.3); } + .btn-header-bm:hover { background: rgba(200,120,58,0.08); border-color: var(--accent); } + + /* ── Bookmark modal ── */ + .bm-overlay { + display: none; position: fixed; inset: 0; + background: rgba(0,0,0,0.55); z-index: 300; + align-items: center; justify-content: center; + } + .bm-overlay.open { display: flex; } + .bm-modal { + background: var(--surface); + border: 1px solid var(--border); + border-radius: var(--radius); + padding: 1.4rem 1.5rem; + width: min(420px, 92vw); + } + .bm-title { + font-family: var(--mono); font-size: 0.7rem; + letter-spacing: 0.1em; text-transform: uppercase; + color: var(--accent); margin-bottom: 1rem; + } + .bm-chapter { + font-family: var(--mono); font-size: 0.72rem; + color: var(--text-dim); margin-bottom: 1rem; + white-space: nowrap; overflow: hidden; text-overflow: ellipsis; + } + .bm-label { + font-family: var(--mono); font-size: 0.7rem; + color: var(--text-dim); margin-bottom: 0.4rem; display: block; + } + .bm-textarea { + width: 100%; min-height: 90px; + background: var(--surface2); border: 1px solid var(--border); + border-radius: var(--radius); + font-family: var(--mono); font-size: 0.78rem; + color: var(--text); padding: 0.55rem 0.75rem; + resize: vertical; line-height: 1.5; + margin-bottom: 1rem; + } + .bm-textarea:focus { outline: none; border-color: var(--accent); } + .bm-actions { display: flex; gap: 0.6rem; justify-content: flex-end; } + .bm-btn { + padding: 0.4rem 1rem; + border-radius: var(--radius); + font-family: var(--mono); font-size: 0.72rem; + cursor: pointer; border: 1px solid var(--border); + } + .bm-btn-cancel { background: none; color: var(--text-dim); } + .bm-btn-cancel:hover { border-color: var(--text-faint); color: var(--text); } + .bm-btn-save { background: var(--accent); color: #fff; border-color: var(--accent); } + .bm-btn-save:hover { filter: brightness(1.1); } /* ── Settings drawer ── */ .settings-overlay { @@ -226,6 +278,20 @@ + +
    +
    +
    Add bookmark
    +
    + + +
    + + +
    +
    +
    +
    @@ -273,6 +339,12 @@
    +
  • ${esc(c.label)}
    ${esc(value)}