commit 0ae181706d92ea2dc7068725f6b32e9957003019 Author: Ivo Oskamp Date: Sun Mar 22 16:13:45 2026 +0100 Bootstrap Novela 2.0 implementation and docs diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2620661 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +.files/ + +.last-branch diff --git a/build-and-push.sh b/build-and-push.sh new file mode 100755 index 0000000..b024899 --- /dev/null +++ b/build-and-push.sh @@ -0,0 +1,269 @@ +#!/usr/bin/env bash +set -euo pipefail + +# ============================================================================ +# build-and-push.sh +# Location: repo root (e.g. /docker/develop/novela) +# +# Purpose: +# - Automatic version bump: +# 1 = patch, 2 = minor, 3 = major, t = test +# - Test builds: only update :dev (no commit/tag) +# - Release builds: update version.txt, commit, tag, push (to the current branch) +# - Build & push Docker images for each service under ./containers/* +# - Preflight checks: Docker daemon up, logged in to registry, valid names/tags +# - Summary: show all images + tags built and pushed +# - Branch visibility: +# - Shows currently checked out branch (authoritative) +# - Reads .last-branch for info (if present) when BRANCH is not set +# - Writes the current branch back to .last-branch at the end +# +# Usage: +# BRANCH= ./build-and-push.sh [bump] # BRANCH is optional; informative only +# ./build-and-push.sh [bump] +# If [bump] is omitted, you will be prompted (default = t). +# ============================================================================ + +DOCKER_REGISTRY="gitea.oskamp.info" +DOCKER_NAMESPACE="ivooskamp" + +VERSION_FILE="version.txt" +START_VERSION="v0.1.0" +COMPOSE_DIR="containers" +LAST_BRANCH_FILE=".last-branch" # stored in repo root + +# --- Input: prompt if missing ------------------------------------------------ +BUMP="${1:-}" +if [[ -z "${BUMP}" ]]; then + echo "Select bump type: [1] patch, [2] minor, [3] major, [t] test (default: t)" + read -r BUMP + BUMP="${BUMP:-t}" +fi + +if [[ "$BUMP" != "1" && "$BUMP" != "2" && "$BUMP" != "3" && "$BUMP" != "t" ]]; then + echo "[ERROR] Unknown bump type '$BUMP' (use 1, 2, 3, or t)." + exit 1 +fi + +# --- Helpers ----------------------------------------------------------------- +read_version() { + if [[ -f "$VERSION_FILE" ]]; then + tr -d ' \t\n\r' < "$VERSION_FILE" + else + echo "$START_VERSION" + fi +} + +write_version() { + echo "$1" > "$VERSION_FILE" +} + +bump_version() { + local cur="$1" + local kind="$2" + local core="${cur#v}" + IFS='.' read -r MA MI PA <<< "$core" + case "$kind" in + 1) PA=$((PA + 1));; + 2) MI=$((MI + 1)); PA=0;; + 3) MA=$((MA + 1)); MI=0; PA=0;; + *) echo "[ERROR] Unknown bump kind"; exit 1;; + esac + echo "v${MA}.${MI}.${PA}" +} + +check_docker_ready() { + if ! docker info >/dev/null 2>&1; then + echo "[ERROR] Docker daemon not reachable. Is Docker running and do you have permission to use it?" + exit 1 + fi +} + +ensure_registry_login() { + local cfg="${HOME}/.docker/config.json" + if [[ ! -f "$cfg" ]]; then + echo "[ERROR] Docker config not found at $cfg. Please login: docker login ${DOCKER_REGISTRY}" + exit 1 + fi + if ! grep -q "\"${DOCKER_REGISTRY}\"" "$cfg"; then + echo "[ERROR] No registry auth found for ${DOCKER_REGISTRY}. Please run: docker login ${DOCKER_REGISTRY}" + exit 1 + fi +} + +validate_repo_component() { + local comp="$1" + if [[ ! "$comp" =~ ^[a-z0-9]+([._-][a-z0-9]+)*$ ]]; then + echo "[ERROR] Invalid repository component '$comp'." + echo " Must match: ^[a-z0-9]+([._-][a-z0-9]+)*$ (lowercase, digits, ., _, - as separators)." + return 1 + fi +} + +validate_tag() { + local tag="$1" + local len="${#tag}" + if (( len < 1 || len > 128 )); then + echo "[ERROR] Invalid tag length ($len). Must be between 1 and 128 characters." + return 1 + fi + if [[ ! "$tag" =~ ^[A-Za-z0-9_][A-Za-z0-9_.-]*$ ]]; then + echo "[ERROR] Invalid tag '$tag'. Allowed: [A-Za-z0-9_.-], must start with alphanumeric or underscore." + return 1 + fi +} + +# --- Preflight --------------------------------------------------------------- +if [[ ! -d ".git" ]]; then + echo "[ERROR] Not a git repository (.git missing)." + exit 1 +fi + +if [[ ! -d "$COMPOSE_DIR" ]]; then + echo "[ERROR] '$COMPOSE_DIR' directory missing. Expected ./containers// with a Dockerfile." + exit 1 +fi + +check_docker_ready +ensure_registry_login +validate_repo_component "$DOCKER_NAMESPACE" + +# Detect currently checked out branch (authoritative for this script) +DETECTED_BRANCH="$(git branch --show-current 2>/dev/null || true)" +if [[ -z "$DETECTED_BRANCH" ]]; then + DETECTED_BRANCH="$(git symbolic-ref --quiet --short HEAD 2>/dev/null || true)" +fi +if [[ -z "$DETECTED_BRANCH" ]]; then + # Try to derive from upstream + UPSTREAM_REF_DERIVED="$(git rev-parse --abbrev-ref --symbolic-full-name @{u} 2>/dev/null || true)" + if [[ -n "$UPSTREAM_REF_DERIVED" ]]; then + DETECTED_BRANCH="${UPSTREAM_REF_DERIVED#origin/}" + fi +fi +if [[ -z "$DETECTED_BRANCH" ]]; then + DETECTED_BRANCH="main" +fi + +# Optional signals: BRANCH env and .last-branch (informational only) +ENV_BRANCH="${BRANCH:-}" +LAST_BRANCH_FILE_PATH="$(pwd)/$LAST_BRANCH_FILE" +LAST_BRANCH_VALUE="" +if [[ -z "$ENV_BRANCH" && -f "$LAST_BRANCH_FILE_PATH" ]]; then + LAST_BRANCH_VALUE="$(tr -d ' \t\n\r' < "$LAST_BRANCH_FILE_PATH")" +fi + +UPSTREAM_REF="$(git rev-parse --abbrev-ref --symbolic-full-name @{u} 2>/dev/null || echo "origin/$DETECTED_BRANCH")" +HEAD_SHA="$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")" + +echo "[INFO] Repo: $(pwd)" +echo "[INFO] Current branch: $DETECTED_BRANCH" +echo "[INFO] Upstream: $UPSTREAM_REF" +echo "[INFO] HEAD (sha): $HEAD_SHA" + +if [[ -n "$ENV_BRANCH" && "$ENV_BRANCH" != "$DETECTED_BRANCH" ]]; then + echo "[WARNING] BRANCH='$ENV_BRANCH' differs from checked out branch '$DETECTED_BRANCH'." + echo "[WARNING] This script does not switch branches; continuing on '$DETECTED_BRANCH'." +fi + +if [[ -n "$LAST_BRANCH_VALUE" && "$LAST_BRANCH_VALUE" != "$DETECTED_BRANCH" && -z "$ENV_BRANCH" ]]; then + echo "[INFO] .last-branch suggests '$LAST_BRANCH_VALUE', but current checkout is '$DETECTED_BRANCH'." + echo "[INFO] If you intended to build '$LAST_BRANCH_VALUE', switch branches first (use update-and-build.sh)." +fi + +# --- Versioning -------------------------------------------------------------- +CURRENT_VERSION="$(read_version)" +NEW_VERSION="$CURRENT_VERSION" +DO_TAG_AND_BUMP=true + +if [[ "$BUMP" == "t" ]]; then + echo "[INFO] Test build: keeping version $CURRENT_VERSION; will only update :dev." + DO_TAG_AND_BUMP=false +else + NEW_VERSION="$(bump_version "$CURRENT_VERSION" "$BUMP")" + echo "[INFO] New version: $NEW_VERSION" +fi + +if $DO_TAG_AND_BUMP; then + validate_tag "$NEW_VERSION" +fi +validate_tag "latest" + +# --- Version update + VCS ops (release builds only) -------------------------- +if $DO_TAG_AND_BUMP; then + echo "[INFO] Writing $NEW_VERSION to $VERSION_FILE" + write_version "$NEW_VERSION" + + echo "[INFO] Git add + commit (branch: $DETECTED_BRANCH)" + git add "$VERSION_FILE" + git commit -m "Release $NEW_VERSION on branch $DETECTED_BRANCH (bump type $BUMP)" + + echo "[INFO] Git tag $NEW_VERSION" + git tag -a "$NEW_VERSION" -m "Release $NEW_VERSION" + + echo "[INFO] Git push + tags" + git push origin "$DETECTED_BRANCH" + git push --tags +else + echo "[INFO] Skipping commit/tagging (test build)." +fi + +# --- Build & push per service ------------------------------------------------ +shopt -s nullglob +services=( "$COMPOSE_DIR"/* ) +if [[ ${#services[@]} -eq 0 ]]; then + echo "[ERROR] No services found under $COMPOSE_DIR" + exit 1 +fi + +BUILT_IMAGES=() + +for svc_path in "${services[@]}"; do + [[ -d "$svc_path" ]] || continue + svc="$(basename "$svc_path")" + dockerfile="$svc_path/Dockerfile" + + validate_repo_component "$svc" + + if [[ ! -f "$dockerfile" ]]; then + echo "[WARNING] Skipping '${svc}': Dockerfile not found in ${svc_path}" + continue + fi + + IMAGE_BASE="${DOCKER_REGISTRY}/${DOCKER_NAMESPACE}/${svc}" + + if $DO_TAG_AND_BUMP; then + echo "============================================================" + echo "[INFO] Building ${svc} -> tags: ${NEW_VERSION}, latest" + echo "============================================================" + docker build -t "${IMAGE_BASE}:${NEW_VERSION}" -t "${IMAGE_BASE}:dev" "$svc_path" + docker push "${IMAGE_BASE}:${NEW_VERSION}" + docker push "${IMAGE_BASE}:dev" + BUILT_IMAGES+=("${IMAGE_BASE}:${NEW_VERSION}" "${IMAGE_BASE}:dev") + else + echo "============================================================" + echo "[INFO] Test build ${svc} -> tag: latest" + echo "============================================================" + docker build -t "${IMAGE_BASE}:dev" "$svc_path" + docker push "${IMAGE_BASE}:dev" + BUILT_IMAGES+=("${IMAGE_BASE}:dev") + fi +done + +# --- Persist current branch to .last-branch ---------------------------------- +# (This helps script 1 to preselect next time, and is informative if you run script 2 standalone) +echo "$DETECTED_BRANCH" > "$LAST_BRANCH_FILE_PATH" + +# --- Summary ----------------------------------------------------------------- +echo "" +echo "============================================================" +echo "[SUMMARY] Build & push complete (branch: $DETECTED_BRANCH)" +if $DO_TAG_AND_BUMP; then + echo "[INFO] Release version: $NEW_VERSION" +else + echo "[INFO] Test build (no version bump)" +fi +echo "[INFO] Images pushed:" +for img in "${BUILT_IMAGES[@]}"; do + echo " - $img" +done +echo "============================================================" diff --git a/containers/novela/Dockerfile b/containers/novela/Dockerfile new file mode 100644 index 0000000..7477bfb --- /dev/null +++ b/containers/novela/Dockerfile @@ -0,0 +1,17 @@ +FROM python:3.12-slim + +WORKDIR /app + +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + libmagic1 \ + unrar-free \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements.txt /app/requirements.txt +RUN pip install --no-cache-dir -r /app/requirements.txt + +COPY . /app + +EXPOSE 8000 +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/containers/novela/__pycache__/cbr.cpython-311.pyc b/containers/novela/__pycache__/cbr.cpython-311.pyc new file mode 100644 index 0000000..bd13a81 Binary files /dev/null and b/containers/novela/__pycache__/cbr.cpython-311.pyc differ diff --git a/containers/novela/__pycache__/db.cpython-311.pyc b/containers/novela/__pycache__/db.cpython-311.pyc new file mode 100644 index 0000000..646338e Binary files /dev/null and b/containers/novela/__pycache__/db.cpython-311.pyc differ diff --git a/containers/novela/__pycache__/epub.cpython-311.pyc b/containers/novela/__pycache__/epub.cpython-311.pyc new file mode 100644 index 0000000..58ad153 Binary files /dev/null and b/containers/novela/__pycache__/epub.cpython-311.pyc differ diff --git a/containers/novela/__pycache__/main.cpython-311.pyc b/containers/novela/__pycache__/main.cpython-311.pyc new file mode 100644 index 0000000..cd8b800 Binary files /dev/null and b/containers/novela/__pycache__/main.cpython-311.pyc differ diff --git a/containers/novela/__pycache__/migrations.cpython-311.pyc b/containers/novela/__pycache__/migrations.cpython-311.pyc new file mode 100644 index 0000000..c45f876 Binary files /dev/null and b/containers/novela/__pycache__/migrations.cpython-311.pyc differ diff --git a/containers/novela/__pycache__/pdf.cpython-311.pyc b/containers/novela/__pycache__/pdf.cpython-311.pyc new file mode 100644 index 0000000..7f0c80c Binary files /dev/null and b/containers/novela/__pycache__/pdf.cpython-311.pyc differ diff --git a/containers/novela/__pycache__/security.cpython-311.pyc b/containers/novela/__pycache__/security.cpython-311.pyc new file mode 100644 index 0000000..89aed21 Binary files /dev/null and b/containers/novela/__pycache__/security.cpython-311.pyc differ diff --git a/containers/novela/__pycache__/xhtml.cpython-311.pyc b/containers/novela/__pycache__/xhtml.cpython-311.pyc new file mode 100644 index 0000000..52319ba Binary files /dev/null and b/containers/novela/__pycache__/xhtml.cpython-311.pyc differ diff --git a/containers/novela/cbr.py b/containers/novela/cbr.py new file mode 100644 index 0000000..621e7a6 --- /dev/null +++ b/containers/novela/cbr.py @@ -0,0 +1,61 @@ +from io import BytesIO +from pathlib import Path +import zipfile + +import rarfile +from PIL import Image, ImageOps + +SUPPORTED_IMG = {".jpg", ".jpeg", ".png", ".webp", ".gif", ".bmp"} + + + +def _is_cbz(path: Path) -> bool: + return path.suffix.lower() == ".cbz" + + +def cbr_page_list(path: Path) -> list[str]: + if _is_cbz(path): + with zipfile.ZipFile(path) as zf: + names = [n for n in zf.namelist() if Path(n).suffix.lower() in SUPPORTED_IMG] + else: + with rarfile.RarFile(path) as rf: + names = [n for n in rf.namelist() if Path(n).suffix.lower() in SUPPORTED_IMG] + return sorted(names) + + +def cbr_page_count(path: Path) -> int: + return len(cbr_page_list(path)) + + +def cbr_get_page(path: Path, page_num: int) -> tuple[bytes, str]: + pages = cbr_page_list(path) + if page_num < 0 or page_num >= len(pages): + raise IndexError("Page out of range") + name = pages[page_num] + ext = Path(name).suffix.lower().lstrip(".") + mime = { + "jpg": "image/jpeg", + "jpeg": "image/jpeg", + "png": "image/png", + "webp": "image/webp", + "gif": "image/gif", + "bmp": "image/bmp", + }.get(ext, "image/jpeg") + + if _is_cbz(path): + with zipfile.ZipFile(path) as zf: + return zf.read(name), mime + with rarfile.RarFile(path) as rf: + return rf.read(name), mime + + +def cbr_cover_thumb(path: Path) -> bytes: + data, _ = cbr_get_page(path, 0) + with Image.open(BytesIO(data)) as im: + im = ImageOps.exif_transpose(im) + if im.mode not in ("RGB", "RGBA"): + im = im.convert("RGB") + thumb = ImageOps.fit(im, (300, 450), method=Image.Resampling.LANCZOS) + out = BytesIO() + thumb.save(out, format="WEBP", quality=82, method=6) + return out.getvalue() diff --git a/containers/novela/db.py b/containers/novela/db.py new file mode 100644 index 0000000..97a6d8c --- /dev/null +++ b/containers/novela/db.py @@ -0,0 +1,55 @@ +import os +from contextlib import contextmanager + +import psycopg2 +from psycopg2 import pool + +_pool: pool.ThreadedConnectionPool | None = None + + +def _db_config() -> dict: + return { + "host": os.environ.get("POSTGRES_HOST", "postgres"), + "port": int(os.environ.get("POSTGRES_PORT", 5432)), + "dbname": os.environ.get("POSTGRES_DB", "novela"), + "user": os.environ.get("POSTGRES_USER", "novela"), + "password": os.environ.get("POSTGRES_PASSWORD", ""), + } + + +def init_pool(minconn: int = 2, maxconn: int = 10) -> None: + global _pool + if _pool is None: + _pool = pool.ThreadedConnectionPool(minconn=minconn, maxconn=maxconn, **_db_config()) + + +def close_pool() -> None: + global _pool + if _pool is not None: + _pool.closeall() + _pool = None + + +def get_conn(): + global _pool + if _pool is None: + init_pool() + return _pool.getconn() # type: ignore[union-attr] + + +def release_conn(conn) -> None: + if _pool is not None and conn is not None: + _pool.putconn(conn) + + +@contextmanager +def get_db_conn(): + conn = get_conn() + try: + yield conn + finally: + release_conn(conn) + + +def direct_connect(): + return psycopg2.connect(**_db_config()) diff --git a/containers/novela/epub.py b/containers/novela/epub.py new file mode 100644 index 0000000..986d430 --- /dev/null +++ b/containers/novela/epub.py @@ -0,0 +1,355 @@ +import io +import re +import zipfile +from html import escape as he + + +def detect_image_format(data: bytes, base: str) -> tuple[str, str]: + """Return (filename_with_ext, media_type) detected from image magic bytes. + + base -- filename stem without extension, e.g. 'cover' or 'ch001_img002' + """ + if data[:2] == b'\xff\xd8': + return f"{base}.jpg", "image/jpeg" + if data[:8] == b'\x89PNG\r\n\x1a\n': + return f"{base}.png", "image/png" + if data[:4] == b'RIFF' and data[8:12] == b'WEBP': + return f"{base}.webp", "image/webp" + if data[:3] == b'GIF': + return f"{base}.gif", "image/gif" + return f"{base}.jpg", "image/jpeg" # fallback + + +def add_cover_to_epub(epub_path, cover_data: bytes) -> None: + """Add a cover image to an existing EPUB and remove the Cover Missing tag.""" + cover_filename, cover_media_type = detect_image_format(cover_data, "cover") + + # Read existing zip into memory + with open(epub_path, "rb") as f: + original = f.read() + + buf = io.BytesIO() + with zipfile.ZipFile(io.BytesIO(original), "r") as zin, \ + zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zout: + + # Copy mimetype uncompressed first + info = zin.getinfo("mimetype") + zout.writestr(zipfile.ZipInfo("mimetype"), zin.read("mimetype"), compress_type=zipfile.ZIP_STORED) + + for item in zin.infolist(): + if item.filename == "mimetype": + continue + data = zin.read(item.filename) + + if item.filename == "OEBPS/content.opf": + data = _patch_opf(data.decode("utf-8"), cover_filename, cover_media_type).encode("utf-8") + + zout.writestr(item, data) + + # Add the cover image + zout.writestr(f"OEBPS/Images/{cover_filename}", cover_data) + + with open(epub_path, "wb") as f: + f.write(buf.getvalue()) + + +def _patch_opf(opf: str, cover_filename: str, cover_media_type: str) -> str: + """Insert cover into OPF manifest/metadata and remove Cover Missing dc:subject.""" + # Remove "Cover Missing" dc:subject + opf = re.sub(r'\s*Cover Missing', '', opf) + + # Add cover manifest item before + cover_item = f'' + opf = opf.replace("", f' {cover_item}\n ') + + # Add cover meta before + cover_meta = '' + opf = opf.replace("", f' {cover_meta}\n ') + + return opf + + +def make_chapter_xhtml(title: str, content_html: str, chapter_num: int) -> str: + t = he(title) + return f""" + + + + + {t} + + + +

{t}

+{content_html} + + +""" + + +def make_intro_xhtml(book_title: str, author: str, book_info: dict) -> str: + """Generate the intro page XHTML with genres, description, source and date.""" + parts = [] + # Optional illustration from the story index page (e.g. awesomedude.org) + if book_info.get("index_image_name"): + img = he(book_info["index_image_name"]) + parts.append(f'
') + if book_info.get("genres"): + parts.append(f'

Genres: {he(", ".join(book_info["genres"]))}

') + if book_info.get("subgenres"): + parts.append(f'

Sub-genres: {he(", ".join(book_info["subgenres"]))}

') + if book_info.get("tags"): + parts.append(f'

Tags: {he(", ".join(book_info["tags"]))}

') + if book_info.get("description"): + parts.append("
") + for para in book_info["description"].split("\n\n"): + if para.strip(): + parts.append(f"

{he(para.strip())}

") + parts.append("
") + if book_info.get("source_url"): + parts.append(f'

Source: {he(book_info["source_url"])}

') + if book_info.get("updated_date"): + parts.append(f'

Updated: {he(book_info["updated_date"])}

') + content = "\n".join(parts) + t = he(book_title) + a = he(author) + return f""" + + + + + {t} + + + +

{t}

+

by {a}

+{content} + + +""" + + +def make_epub( + book_title: str, + author: str, + chapters: list[dict], + cover_data: bytes | None, + break_img_data: bytes, + book_id: str, + book_info: dict | None = None, +) -> bytes: + """Build a complete EPUB 2.0 in-memory and return the bytes.""" + buf = io.BytesIO() + with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zf: + # mimetype must be first and uncompressed + zf.writestr( + zipfile.ZipInfo("mimetype"), + "application/epub+zip", + compress_type=zipfile.ZIP_STORED, + ) + + zf.writestr( + "META-INF/container.xml", + """ + + + + +""", + ) + + css = open("static/epub-style.css", "r", encoding="utf-8").read() + zf.writestr("OEBPS/Styles/style.css", css) + zf.writestr("OEBPS/Images/break.png", break_img_data) + + info = book_info or {} + + # Optional intro illustration (e.g. index page image from awesomedude.org) + if info.get("index_image_data"): + zf.writestr(f"OEBPS/Images/{info['index_image_name']}", info["index_image_data"]) + + has_cover = cover_data is not None + cover_filename = "" + cover_media_type = "" + if has_cover: + cover_filename, cover_media_type = detect_image_format(cover_data, "cover") + zf.writestr(f"OEBPS/Images/{cover_filename}", cover_data) + + zf.writestr("OEBPS/Text/intro.xhtml", make_intro_xhtml(book_title, author, info)) + + # Chapter images + for ch in chapters: + for img in ch.get("images", []): + zf.writestr(img["epub_path"], img["data"]) + + chapter_files = [] + for i, ch in enumerate(chapters, 1): + fname = f"chapter{i:03d}.xhtml" + zf.writestr(f"OEBPS/Text/{fname}", ch["xhtml"]) + chapter_files.append((fname, ch["title"])) + + # Manifest + manifest_items = [] + if has_cover: + manifest_items.append( + f'' + ) + # Chapter images + for ch in chapters: + for img in ch.get("images", []): + img_id = img["epub_path"].split("/")[-1].replace(".", "_") + manifest_items.append( + f'' + ) + if info.get("index_image_name"): + manifest_items.append( + f'' + ) + manifest_items.append('') + manifest_items.append('') + manifest_items.append('') + for i, (fname, _) in enumerate(chapter_files, 1): + manifest_items.append(f'') + manifest_items.append('') + + spine_items = [''] + [ + f'' for i in range(1, len(chapter_files) + 1) + ] + + cover_meta = f'' if has_cover else "" + + subject_items = "".join( + f"\n {he(g)}" + for g in info.get("genres", []) + info.get("subgenres", []) + info.get("tags", []) + ) + desc_item = ( + f"\n {he(info['description'].replace(chr(10), ' '))}" + if info.get("description") else "" + ) + date_item = ( + f"\n {he(info['updated_date'])}" + if info.get("updated_date") else "" + ) + source_item = ( + f"\n {he(info['source_url'])}" + if info.get("source_url") else "" + ) + publisher_item = ( + f"\n {he(info['publisher'])}" + if info.get("publisher") else "" + ) + series_items = "" + if info.get("series"): + s = he(info["series"]) + idx = int(info.get("series_index", 1)) + series_items = ( + f'\n ' + f'\n ' + ) + status_item = ( + f'\n ' + if info.get("publication_status") else "" + ) + + opf = f""" + + + {he(book_title)} + {he(author)} + en + {book_id} + {cover_meta}{subject_items}{desc_item}{date_item}{source_item}{publisher_item}{series_items}{status_item} + + + {"".join(manifest_items)} + + + {"".join(spine_items)} + +""" + zf.writestr("OEBPS/content.opf", opf) + + # TOC NCX + nav_points = [ + """ + Book Info + + """ + ] + for i, (fname, title) in enumerate(chapter_files, 1): + nav_points.append( + f""" + {he(title)} + + """ + ) + + ncx = f""" + + + + + + + + + {he(book_title)} + +{"".join(nav_points)} + +""" + zf.writestr("OEBPS/toc.ncx", ncx) + + return buf.getvalue() + + +def read_epub_file(epub_path, internal_path: str) -> str: + """Read a single file from the EPUB zip and return it as a UTF-8 string.""" + with zipfile.ZipFile(epub_path, "r") as z: + return z.read(internal_path).decode("utf-8", errors="replace") + + +def write_epub_file(epub_path, internal_path: str, content: str) -> None: + """Replace a single file inside the EPUB zip (full zip rewrite). + + If OEBPS/Images/break.png is missing from the zip it is added automatically, + so break-image inserts made in the editor render correctly in older EPUBs. + """ + with open(epub_path, "rb") as f: + original = f.read() + + break_img_path = "OEBPS/Images/break.png" + buf = io.BytesIO() + with zipfile.ZipFile(io.BytesIO(original), "r") as zin, \ + zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zout: + + zout.writestr( + zipfile.ZipInfo("mimetype"), zin.read("mimetype"), + compress_type=zipfile.ZIP_STORED, + ) + + names = zin.namelist() + has_break = break_img_path in names + + for item in zin.infolist(): + if item.filename == "mimetype": + continue + if item.filename == internal_path: + zout.writestr(item, content.encode("utf-8")) + else: + zout.writestr(item, zin.read(item.filename)) + + if not has_break: + try: + zout.writestr(break_img_path, open("static/break.png", "rb").read()) + except Exception: + pass + + with open(epub_path, "wb") as f: + f.write(buf.getvalue()) diff --git a/containers/novela/main.py b/containers/novela/main.py new file mode 100644 index 0000000..d32781a --- /dev/null +++ b/containers/novela/main.py @@ -0,0 +1,42 @@ +from contextlib import asynccontextmanager + +from fastapi import FastAPI +from fastapi.responses import RedirectResponse +from fastapi.staticfiles import StaticFiles + +from db import close_pool, init_pool +from migrations import run_migrations +from routers import ( + backup_router, + editor_router, + grabber_router, + library_router, + reader_router, + settings_router, +) + + +@asynccontextmanager +async def lifespan(app: FastAPI): + init_pool() + run_migrations() + try: + yield + finally: + close_pool() + + +app = FastAPI(lifespan=lifespan) +app.mount("/static", StaticFiles(directory="static"), name="static") + +app.include_router(library_router) +app.include_router(reader_router) +app.include_router(editor_router) +app.include_router(grabber_router) +app.include_router(settings_router) +app.include_router(backup_router) + + +@app.get("/") +async def index_redirect(): + return RedirectResponse(url="/home", status_code=302) diff --git a/containers/novela/migrations.py b/containers/novela/migrations.py new file mode 100644 index 0000000..b1b02d1 --- /dev/null +++ b/containers/novela/migrations.py @@ -0,0 +1,203 @@ +import re + +from db import direct_connect + +_DEFAULT_REGEX = [ + r"^\s*[\*\-]{3,}\s*$", + r"^\s*[·•◦‣⁃]\s*[·•◦‣⁃]\s*[·•◦‣⁃]\s*$", + r"^\s*~{2,}\s*$", + r"^\s*={3,}\s*$", + r"^\s*#{3,}\s*$", + r"^\s*[oO0]{1,3}\s*$", + r"^\s*[-–—]\s*[oO0]\s*[-–—]\s*$", + r"^\s*[<>]+\s*[·•*]\s*[<>]+\s*$", +] +_DEFAULT_CSS = [ + "hr", + "separator", + "section-break", + "divider", + "break", + "chapterbreak", + "scene-break", + "scenebreak", +] + + +def _exec(sql: str) -> None: + conn = direct_connect() + try: + with conn: + with conn.cursor() as cur: + cur.execute(sql) + finally: + conn.close() + + +def migrate_create_library() -> None: + _exec( + """ + CREATE TABLE IF NOT EXISTS library ( + id SERIAL PRIMARY KEY, + filename VARCHAR(600) UNIQUE NOT NULL, + media_type VARCHAR(10) NOT NULL DEFAULT 'epub', + title VARCHAR(500), + author VARCHAR(255), + publisher VARCHAR(255), + series VARCHAR(500), + series_index INTEGER DEFAULT 0, + publication_status VARCHAR(100), + has_cover BOOLEAN DEFAULT FALSE, + description TEXT DEFAULT '', + source_url VARCHAR(1000), + publish_date DATE, + archived BOOLEAN DEFAULT FALSE, + want_to_read BOOLEAN DEFAULT FALSE, + needs_review BOOLEAN DEFAULT FALSE, + created_at TIMESTAMP DEFAULT NOW(), + updated_at TIMESTAMP DEFAULT NOW() + ) + """ + ) + + +def migrate_create_book_tags() -> None: + _exec( + """ + CREATE TABLE IF NOT EXISTS book_tags ( + id SERIAL PRIMARY KEY, + filename VARCHAR(600) NOT NULL REFERENCES library(filename) ON DELETE CASCADE, + tag VARCHAR(255) NOT NULL, + tag_type VARCHAR(20) NOT NULL, + UNIQUE (filename, tag, tag_type) + ) + """ + ) + _exec("CREATE INDEX IF NOT EXISTS idx_book_tags_filename ON book_tags (filename)") + + +def migrate_create_reading_progress() -> None: + _exec( + """ + CREATE TABLE IF NOT EXISTS reading_progress ( + id SERIAL PRIMARY KEY, + filename VARCHAR(600) UNIQUE NOT NULL REFERENCES library(filename) ON DELETE CASCADE, + cfi TEXT, + page INTEGER, + progress INTEGER DEFAULT 0, + updated_at TIMESTAMP DEFAULT NOW() + ) + """ + ) + + +def migrate_create_reading_sessions() -> None: + _exec( + """ + CREATE TABLE IF NOT EXISTS reading_sessions ( + id SERIAL PRIMARY KEY, + filename VARCHAR(600) NOT NULL REFERENCES library(filename) ON DELETE CASCADE, + read_at TIMESTAMP DEFAULT NOW() + ) + """ + ) + _exec("CREATE INDEX IF NOT EXISTS idx_reading_sessions_filename ON reading_sessions (filename)") + + +def migrate_create_library_cover_cache() -> None: + _exec( + """ + CREATE TABLE IF NOT EXISTS library_cover_cache ( + filename VARCHAR(600) PRIMARY KEY REFERENCES library(filename) ON DELETE CASCADE, + mime_type VARCHAR(100) NOT NULL, + thumb_webp BYTEA NOT NULL, + updated_at TIMESTAMP DEFAULT NOW() + ) + """ + ) + + +def migrate_create_credentials() -> None: + _exec( + """ + CREATE TABLE IF NOT EXISTS credentials ( + id SERIAL PRIMARY KEY, + site VARCHAR(255) UNIQUE NOT NULL, + username VARCHAR(255) NOT NULL, + password VARCHAR(255) NOT NULL, + updated_at TIMESTAMP DEFAULT NOW() + ) + """ + ) + + +def migrate_create_break_patterns() -> None: + _exec( + """ + CREATE TABLE IF NOT EXISTS break_patterns ( + id SERIAL PRIMARY KEY, + pattern_type VARCHAR(20) NOT NULL, + pattern TEXT NOT NULL, + enabled BOOLEAN DEFAULT TRUE, + is_default BOOLEAN DEFAULT FALSE, + created_at TIMESTAMP DEFAULT NOW(), + UNIQUE (pattern_type, pattern) + ) + """ + ) + + +def migrate_seed_break_patterns() -> None: + conn = direct_connect() + try: + with conn: + with conn.cursor() as cur: + for pat in _DEFAULT_REGEX: + re.compile(pat) + cur.execute( + """ + INSERT INTO break_patterns (pattern_type, pattern, is_default) + VALUES ('regex', %s, TRUE) + ON CONFLICT (pattern_type, pattern) DO NOTHING + """, + (pat,), + ) + for pat in _DEFAULT_CSS: + cur.execute( + """ + INSERT INTO break_patterns (pattern_type, pattern, is_default) + VALUES ('css_class', %s, TRUE) + ON CONFLICT (pattern_type, pattern) DO NOTHING + """, + (pat,), + ) + finally: + conn.close() + + +def migrate_create_backup_log() -> None: + _exec( + """ + CREATE TABLE IF NOT EXISTS backup_log ( + id SERIAL PRIMARY KEY, + status VARCHAR(20) NOT NULL, + files_count INTEGER, + size_bytes BIGINT, + error_msg TEXT, + started_at TIMESTAMP DEFAULT NOW(), + finished_at TIMESTAMP + ) + """ + ) + + +def run_migrations() -> None: + migrate_create_library() + migrate_create_book_tags() + migrate_create_reading_progress() + migrate_create_reading_sessions() + migrate_create_library_cover_cache() + migrate_create_credentials() + migrate_create_break_patterns() + migrate_create_backup_log() + migrate_seed_break_patterns() diff --git a/containers/novela/pdf.py b/containers/novela/pdf.py new file mode 100644 index 0000000..d6c2e79 --- /dev/null +++ b/containers/novela/pdf.py @@ -0,0 +1,68 @@ +from pathlib import Path + +import fitz +from PIL import Image, ImageOps + +COVER_W = 300 +COVER_H = 450 + + +def pdf_page_count(path: Path) -> int: + with fitz.open(path) as doc: + return doc.page_count + + +def pdf_render_page(path: Path, page_num: int, dpi: int = 150) -> bytes: + with fitz.open(path) as doc: + if page_num < 0 or page_num >= doc.page_count: + raise IndexError("Page out of range") + page = doc.load_page(page_num) + mat = fitz.Matrix(dpi / 72.0, dpi / 72.0) + pix = page.get_pixmap(matrix=mat, alpha=False) + return pix.tobytes("png") + + +def _webp_thumb_from_image(path: Path) -> bytes: + with Image.open(path) as im: + im = ImageOps.exif_transpose(im) + if im.mode not in ("RGB", "RGBA"): + im = im.convert("RGB") + thumb = ImageOps.fit(im, (COVER_W, COVER_H), method=Image.Resampling.LANCZOS) + from io import BytesIO + + out = BytesIO() + thumb.save(out, format="WEBP", quality=82, method=6) + return out.getvalue() + + +def pdf_cover_thumb(path: Path) -> bytes: + with fitz.open(path) as doc: + if doc.page_count == 0: + raise ValueError("PDF has no pages") + page = doc.load_page(0) + pix = page.get_pixmap(matrix=fitz.Matrix(1.5, 1.5), alpha=False) + tmp = path.with_suffix(".cover.tmp.png") + try: + pix.save(tmp) + return _webp_thumb_from_image(tmp) + finally: + if tmp.exists(): + tmp.unlink(missing_ok=True) + + +def pdf_scan_metadata(path: Path) -> dict: + with fitz.open(path) as doc: + meta = doc.metadata or {} + return { + "title": (meta.get("title") or path.stem or "").strip(), + "author": (meta.get("author") or "").strip(), + "publisher": (meta.get("producer") or "").strip(), + "description": (meta.get("subject") or "").strip(), + "source_url": "", + "series": "", + "series_index": 0, + "publication_status": "", + "has_cover": doc.page_count > 0, + "subjects": [], + "publish_date": "", + } diff --git a/containers/novela/requirements.txt b/containers/novela/requirements.txt new file mode 100644 index 0000000..85697da --- /dev/null +++ b/containers/novela/requirements.txt @@ -0,0 +1,14 @@ +fastapi==0.115.5 +uvicorn[standard]==0.32.1 +httpx==0.27.2 +beautifulsoup4==4.12.3 +lxml==5.3.0 +python-multipart==0.0.12 +psycopg2-binary==2.9.10 +jinja2==3.1.4 +Pillow==11.0.0 +pymupdf==1.24.0 +rarfile==4.2 +dropbox==12.0.2 +apscheduler==3.10.4 +cryptography==44.0.1 diff --git a/containers/novela/routers/__init__.py b/containers/novela/routers/__init__.py new file mode 100644 index 0000000..4c1b8cb --- /dev/null +++ b/containers/novela/routers/__init__.py @@ -0,0 +1,15 @@ +from routers.backup import router as backup_router +from routers.editor import router as editor_router +from routers.grabber import router as grabber_router +from routers.library import router as library_router +from routers.reader import router as reader_router +from routers.settings import router as settings_router + +__all__ = [ + "library_router", + "reader_router", + "editor_router", + "grabber_router", + "backup_router", + "settings_router", +] diff --git a/containers/novela/routers/__pycache__/__init__.cpython-311.pyc b/containers/novela/routers/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..99f671b Binary files /dev/null and b/containers/novela/routers/__pycache__/__init__.cpython-311.pyc differ diff --git a/containers/novela/routers/__pycache__/backup.cpython-311.pyc b/containers/novela/routers/__pycache__/backup.cpython-311.pyc new file mode 100644 index 0000000..6398ec4 Binary files /dev/null and b/containers/novela/routers/__pycache__/backup.cpython-311.pyc differ diff --git a/containers/novela/routers/__pycache__/common.cpython-311.pyc b/containers/novela/routers/__pycache__/common.cpython-311.pyc new file mode 100644 index 0000000..41df148 Binary files /dev/null and b/containers/novela/routers/__pycache__/common.cpython-311.pyc differ diff --git a/containers/novela/routers/__pycache__/editor.cpython-311.pyc b/containers/novela/routers/__pycache__/editor.cpython-311.pyc new file mode 100644 index 0000000..e217624 Binary files /dev/null and b/containers/novela/routers/__pycache__/editor.cpython-311.pyc differ diff --git a/containers/novela/routers/__pycache__/grabber.cpython-311.pyc b/containers/novela/routers/__pycache__/grabber.cpython-311.pyc new file mode 100644 index 0000000..3992117 Binary files /dev/null and b/containers/novela/routers/__pycache__/grabber.cpython-311.pyc differ diff --git a/containers/novela/routers/__pycache__/library.cpython-311.pyc b/containers/novela/routers/__pycache__/library.cpython-311.pyc new file mode 100644 index 0000000..a0a082f Binary files /dev/null and b/containers/novela/routers/__pycache__/library.cpython-311.pyc differ diff --git a/containers/novela/routers/__pycache__/reader.cpython-311.pyc b/containers/novela/routers/__pycache__/reader.cpython-311.pyc new file mode 100644 index 0000000..bfe8708 Binary files /dev/null and b/containers/novela/routers/__pycache__/reader.cpython-311.pyc differ diff --git a/containers/novela/routers/__pycache__/settings.cpython-311.pyc b/containers/novela/routers/__pycache__/settings.cpython-311.pyc new file mode 100644 index 0000000..96b8fa1 Binary files /dev/null and b/containers/novela/routers/__pycache__/settings.cpython-311.pyc differ diff --git a/containers/novela/routers/backup.py b/containers/novela/routers/backup.py new file mode 100644 index 0000000..6f71447 --- /dev/null +++ b/containers/novela/routers/backup.py @@ -0,0 +1,359 @@ +import json +import os +import shutil +import subprocess +from datetime import datetime, timezone +from pathlib import Path +from tempfile import NamedTemporaryFile + +import dropbox +from dropbox.exceptions import ApiError, AuthError +from fastapi import APIRouter, Request +from fastapi.responses import HTMLResponse +from fastapi.templating import Jinja2Templates + +from db import get_db_conn +from security import decrypt_value, encrypt_value, is_encrypted_value + +templates = Jinja2Templates(directory="templates") +router = APIRouter() + +LIBRARY_DIR = Path(os.environ.get("LIBRARY_DIR", "library")) +CONFIG_DIR = Path(os.environ.get("CONFIG_DIR", "config")) +CONFIG_DIR.mkdir(parents=True, exist_ok=True) +MANIFEST_PATH = CONFIG_DIR / "backup_manifest.json" +DROPBOX_ROOT = (os.environ.get("DROPBOX_BACKUP_ROOT", "/novela") or "/novela").rstrip("/") + + +def _now_iso() -> str: + return datetime.now(timezone.utc).isoformat() + + +def _load_manifest() -> dict[str, dict[str, float | int]]: + if not MANIFEST_PATH.exists(): + return {} + try: + data = json.loads(MANIFEST_PATH.read_text(encoding="utf-8")) + if isinstance(data, dict): + return data + except Exception: + pass + return {} + + +def _save_manifest(manifest: dict[str, dict[str, float | int]]) -> None: + MANIFEST_PATH.write_text(json.dumps(manifest, indent=2, sort_keys=True), encoding="utf-8") + + +def _load_dropbox_token() -> str: + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute("SELECT username, password FROM credentials WHERE site = 'dropbox' LIMIT 1") + row = cur.fetchone() + if not row: + return "" + + username_raw, password_raw = row + username = decrypt_value(username_raw) + password = decrypt_value(password_raw) + + if not is_encrypted_value(username_raw) or not is_encrypted_value(password_raw): + cur.execute( + """ + UPDATE credentials + SET username = %s, password = %s, updated_at = NOW() + WHERE site = 'dropbox' + """, + (encrypt_value(username), encrypt_value(password)), + ) + + return (password or username or "").strip() + + +def _dbx() -> dropbox.Dropbox: + token = _load_dropbox_token() + if not token: + raise RuntimeError("Dropbox token not found in credentials (site='dropbox').") + client = dropbox.Dropbox(token, timeout=120) + try: + client.users_get_current_account() + except AuthError as e: + raise RuntimeError(f"Dropbox auth failed: {e}") + return client + + +def _ensure_dropbox_dir(client: dropbox.Dropbox, path: str) -> None: + if not path or path == "/": + return + parts = [p for p in path.split("/") if p] + cur = "" + for p in parts: + cur += "/" + p + try: + client.files_create_folder_v2(cur) + except ApiError: + pass + + +def _dropbox_upload_bytes(client: dropbox.Dropbox, target_path: str, data: bytes) -> int: + parent = str(Path(target_path).parent).replace("\\", "/") + if not parent.startswith("/"): + parent = "/" + parent + _ensure_dropbox_dir(client, parent) + client.files_upload(data, target_path, mode=dropbox.files.WriteMode.overwrite, mute=True) + return len(data) + + +def _iter_library_files() -> list[Path]: + if not LIBRARY_DIR.exists(): + return [] + return [p for p in LIBRARY_DIR.rglob("*") if p.is_file()] + + +def _current_file_state(path: Path) -> dict[str, float | int]: + st = path.stat() + return {"mtime": st.st_mtime, "size": st.st_size} + + +def _pg_dump_cmd(tmp_path: Path) -> list[str]: + return [ + "pg_dump", + "-h", + os.environ.get("POSTGRES_HOST", "postgres"), + "-p", + str(os.environ.get("POSTGRES_PORT", "5432")), + "-U", + os.environ.get("POSTGRES_USER", "novela"), + "-d", + os.environ.get("POSTGRES_DB", "novela"), + "-f", + str(tmp_path), + ] + + +def _run_pg_dump() -> tuple[bytes, str]: + db = os.environ.get("POSTGRES_DB", "novela") + env = os.environ.copy() + env["PGPASSWORD"] = os.environ.get("POSTGRES_PASSWORD", "") + + with NamedTemporaryFile(suffix=".sql", delete=False) as tmp: + tmp_path = Path(tmp.name) + + try: + cmd = _pg_dump_cmd(tmp_path) + proc = subprocess.run(cmd, env=env, capture_output=True, text=True) + if proc.returncode != 0: + stderr = (proc.stderr or "").strip() + raise RuntimeError(f"pg_dump failed: {stderr or 'unknown error'}") + data = tmp_path.read_bytes() + stamp = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S") + return data, f"{db}-{stamp}.sql" + finally: + tmp_path.unlink(missing_ok=True) + + +def _insert_backup_log_running() -> int: + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO backup_log (status, started_at) + VALUES ('running', NOW()) + RETURNING id + """ + ) + return int(cur.fetchone()[0]) + + +def _finish_backup_log(log_id: int, *, status: str, files_count: int | None, size_bytes: int | None, error_msg: str | None) -> None: + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + UPDATE backup_log + SET status = %s, + files_count = %s, + size_bytes = %s, + error_msg = %s, + finished_at = NOW() + WHERE id = %s + """, + (status, files_count, size_bytes, error_msg, log_id), + ) + + +def _run_backup_internal(*, dry_run: bool) -> tuple[int, int]: + client = None if dry_run else _dbx() + manifest = _load_manifest() + files = _iter_library_files() + + uploaded_count = 0 + uploaded_size = 0 + new_manifest: dict[str, dict[str, float | int]] = {} + + library_root = f"{DROPBOX_ROOT}/library" + if client is not None: + _ensure_dropbox_dir(client, library_root) + + for path in files: + rel = path.relative_to(LIBRARY_DIR).as_posix() + state = _current_file_state(path) + new_manifest[rel] = state + + if manifest.get(rel) == state: + continue + + data = path.read_bytes() + target = f"{library_root}/{rel}" + if client is not None: + uploaded_size += _dropbox_upload_bytes(client, target, data) + else: + uploaded_size += len(data) + uploaded_count += 1 + + dump_data, dump_name = _run_pg_dump() + dump_target = f"{DROPBOX_ROOT}/postgres/{dump_name}" + if client is not None: + uploaded_size += _dropbox_upload_bytes(client, dump_target, dump_data) + else: + uploaded_size += len(dump_data) + uploaded_count += 1 + + if not dry_run: + _save_manifest(new_manifest) + return uploaded_count, uploaded_size + + +@router.get("/backup", response_class=HTMLResponse) +async def backup_page(request: Request): + template = "backup.html" + if not Path("templates/backup.html").exists(): + template = "settings.html" + return templates.TemplateResponse(request, template, {"active": "backup"}) + + +@router.get("/api/backup/health") +async def backup_health(): + token_present = bool(_load_dropbox_token()) + pg_dump_path = shutil.which("pg_dump") + + dropbox_ok = False + dropbox_error = None + if token_present: + try: + _dbx() + dropbox_ok = True + except Exception as e: + dropbox_error = str(e) + + return { + "token_present": token_present, + "dropbox_ok": dropbox_ok, + "dropbox_error": dropbox_error, + "pg_dump_available": bool(pg_dump_path), + "pg_dump_path": pg_dump_path, + "library_exists": LIBRARY_DIR.exists(), + "library_path": str(LIBRARY_DIR.resolve()), + } + + +@router.get("/api/backup/status") +async def backup_status(): + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute( + """ + SELECT id, status, files_count, size_bytes, error_msg, started_at, finished_at + FROM backup_log + ORDER BY started_at DESC + LIMIT 1 + """ + ) + row = cur.fetchone() + if not row: + return {"status": "never"} + return { + "id": row[0], + "status": row[1], + "files_count": row[2], + "size_bytes": row[3], + "error_msg": row[4], + "started_at": row[5].isoformat() if row[5] else None, + "finished_at": row[6].isoformat() if row[6] else None, + } + + +@router.get("/api/backup/history") +async def backup_history(): + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute( + """ + SELECT id, status, files_count, size_bytes, error_msg, started_at, finished_at + FROM backup_log + ORDER BY started_at DESC + LIMIT 20 + """ + ) + rows = cur.fetchall() + return [ + { + "id": r[0], + "status": r[1], + "files_count": r[2], + "size_bytes": r[3], + "error_msg": r[4], + "started_at": r[5].isoformat() if r[5] else None, + "finished_at": r[6].isoformat() if r[6] else None, + } + for r in rows + ] + + +@router.post("/api/backup/run") +async def run_backup(request: Request): + body = {} + try: + body = await request.json() + except Exception: + pass + dry_run = bool(body.get("dry_run", False)) + + log_id = _insert_backup_log_running() + try: + files_count, size_bytes = _run_backup_internal(dry_run=dry_run) + _finish_backup_log( + log_id, + status="success", + files_count=files_count, + size_bytes=size_bytes, + error_msg=None, + ) + return { + "ok": True, + "backup_id": log_id, + "status": "success", + "dry_run": dry_run, + "files_count": files_count, + "size_bytes": size_bytes, + "finished_at": _now_iso(), + } + except Exception as e: + _finish_backup_log( + log_id, + status="error", + files_count=None, + size_bytes=None, + error_msg=str(e), + ) + return { + "ok": False, + "backup_id": log_id, + "status": "error", + "dry_run": dry_run, + "error": str(e), + "finished_at": _now_iso(), + } diff --git a/containers/novela/routers/common.py b/containers/novela/routers/common.py new file mode 100644 index 0000000..f71ace1 --- /dev/null +++ b/containers/novela/routers/common.py @@ -0,0 +1,431 @@ +import base64 +import html as _html +import io +import posixpath +import re +import zipfile as zf +from datetime import datetime, timezone +from pathlib import Path + +import psycopg2 +from bs4 import BeautifulSoup +from PIL import Image, ImageOps, UnidentifiedImageError + +from cbr import cbr_cover_thumb, cbr_page_count +from db import get_db_conn +from pdf import pdf_cover_thumb, pdf_page_count, pdf_scan_metadata + +LIBRARY_DIR = Path("library") +LIBRARY_DIR.mkdir(exist_ok=True) +LIBRARY_ROOT = LIBRARY_DIR.resolve() +COVER_W = 300 +COVER_H = 450 + + +def clean_segment(value: str, fallback: str, max_len: int) -> str: + txt = re.sub(r"\s+", " ", (value or "").strip()) + txt = re.sub(r'[<>:"/\\|?*\x00-\x1f]', "", txt) + txt = re.sub(r"\.+$", "", txt).strip() + return (txt or fallback)[:max_len] + + +def resolve_library_path(filename: str) -> Path | None: + rel = Path(filename) + if rel.is_absolute() or any(part in {"", ".", ".."} for part in rel.parts): + return None + candidate = (LIBRARY_DIR / rel).resolve() + try: + candidate.relative_to(LIBRARY_ROOT) + except ValueError: + return None + return candidate + + +def media_type_from_suffix(path: Path) -> str: + ext = path.suffix.lower() + if ext == ".epub": + return "epub" + if ext == ".pdf": + return "pdf" + if ext in {".cbr", ".cbz"}: + return "cbr" + return "" + + +def coerce_series_index(value: int | str | None) -> int: + try: + return max(1, min(999, int(value or 1))) + except Exception: + return 1 + + +def make_rel_path(*, media_type: str, publisher: str, author: str, title: str, series: str, series_index: int | str | None) -> Path: + if media_type == "epub": + pub = clean_segment(publisher, "Unknown Publisher", 80) + auth = clean_segment(author, "Unknown Author", 80) + ttl = clean_segment(title, "Untitled", 140) + series_name = clean_segment(series, "", 80) + if series_name: + return Path("epub") / pub / auth / "Series" / series_name / f"{coerce_series_index(series_index):03d} - {ttl}.epub" + return Path("epub") / pub / auth / "Stories" / f"{ttl}.epub" + + if media_type == "pdf": + auth = clean_segment(author, "Unknown Author", 80) + ttl = clean_segment(title, "Untitled", 140) + return Path("pdf") / auth / f"{ttl}.pdf" + + auth = clean_segment(author, "Unknown", 80) + ttl = clean_segment(title, "Untitled", 140) + return Path("comics") / auth / f"{ttl}.cbr" + + +def ensure_unique_rel_path(rel_path: Path) -> Path: + candidate = rel_path + suffix = candidate.suffix + stem = candidate.stem + counter = 2 + while (LIBRARY_DIR / candidate).exists(): + candidate = rel_path.with_name(f"{stem} ({counter}){suffix}") + counter += 1 + return candidate + + +def extract_cover_from_epub(epub_path: Path) -> tuple[bytes, str] | None: + try: + with zf.ZipFile(epub_path, "r") as z: + names = z.namelist() + cover = next((n for n in names if "/Images/cover." in n or n.lower().endswith("/cover.jpg")), "") + if not cover: + return None + data = z.read(cover) + ext = Path(cover).suffix.lower() + mt = { + ".jpg": "image/jpeg", + ".jpeg": "image/jpeg", + ".png": "image/png", + ".webp": "image/webp", + ".gif": "image/gif", + }.get(ext, "image/jpeg") + return data, mt + except Exception: + return None + + +def make_cover_thumb_webp(image_bytes: bytes) -> bytes: + with Image.open(io.BytesIO(image_bytes)) as im: + im = ImageOps.exif_transpose(im) + if im.mode not in ("RGB", "RGBA"): + im = im.convert("RGB") + thumb = ImageOps.fit(im, (COVER_W, COVER_H), method=Image.Resampling.LANCZOS, centering=(0.5, 0.5)) + out = io.BytesIO() + thumb.save(out, format="WEBP", quality=82, method=6) + return out.getvalue() + + +def upsert_cover_cache(conn, filename: str, mime_type: str, thumb_webp: bytes) -> None: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO library_cover_cache (filename, mime_type, thumb_webp, updated_at) + VALUES (%s, %s, %s, NOW()) + ON CONFLICT (filename) DO UPDATE SET + mime_type = EXCLUDED.mime_type, + thumb_webp = EXCLUDED.thumb_webp, + updated_at = NOW() + """, + (filename, mime_type, psycopg2.Binary(thumb_webp)), + ) + + +def ensure_cover_cache_for_book(conn, filename: str, full_path: Path, media_type: str) -> bool: + try: + if media_type == "epub": + raw = extract_cover_from_epub(full_path) + if not raw: + return False + data, mt = raw + thumb = make_cover_thumb_webp(data) + upsert_cover_cache(conn, filename, mt, thumb) + return True + if media_type == "pdf": + thumb = pdf_cover_thumb(full_path) + upsert_cover_cache(conn, filename, "image/webp", thumb) + return True + if media_type == "cbr": + thumb = cbr_cover_thumb(full_path) + upsert_cover_cache(conn, filename, "image/webp", thumb) + return True + except (UnidentifiedImageError, OSError, ValueError, RuntimeError): + return False + return False + + +def prune_empty_dirs(start_dir: Path) -> None: + cur = start_dir.resolve() + try: + cur.relative_to(LIBRARY_ROOT) + except Exception: + return + while cur != LIBRARY_ROOT: + try: + cur.rmdir() + except OSError: + return + cur = cur.parent + + +def _find_opf_path(names: set[str], container_xml: str | None) -> str | None: + opf_path = "OEBPS/content.opf" + if container_xml: + m = re.search(r"full-path\s*=\s*['\"]([^'\"]+)['\"]", container_xml) + if m: + opf_path = m.group(1) + if opf_path in names: + return opf_path + candidates = sorted(n for n in names if n.lower().endswith(".opf")) + return candidates[0] if candidates else None + + +def scan_epub(path: Path) -> dict: + out = { + "has_cover": False, + "series": "", + "series_index": 0, + "title": "", + "publication_status": "", + "author": "", + "publisher": "", + "source_url": "", + "publish_date": "", + "subjects": [], + "description": "", + } + try: + with zf.ZipFile(path, "r") as z: + names = set(z.namelist()) + out["has_cover"] = extract_cover_from_epub(path) is not None + container_xml = z.read("META-INF/container.xml").decode("utf-8", errors="replace") if "META-INF/container.xml" in names else None + opf_path = _find_opf_path(names, container_xml) + if not opf_path or opf_path not in names: + return out + opf = z.read(opf_path).decode("utf-8", errors="replace") + + def _find(pat: str) -> str: + m = re.search(pat, opf, re.DOTALL | re.IGNORECASE) + return _html.unescape(m.group(1).strip()) if m else "" + + out["title"] = _find(r"<(?:dc:)?title[^>]*>(.*?)") + out["author"] = _find(r"<(?:dc:)?creator[^>]*>(.*?)") + out["publisher"] = _find(r"<(?:dc:)?publisher[^>]*>(.*?)") + out["source_url"] = _find(r"<(?:dc:)?source[^>]*>(.*?)") + out["description"] = _find(r"<(?:dc:)?description[^>]*>(.*?)") + + m = re.search(r']*name="calibre:series"[^>]*content="([^"]+)"', opf, re.IGNORECASE) + if m: + out["series"] = _html.unescape(m.group(1).strip()) + m = re.search(r']*name="calibre:series_index"[^>]*content="([^"]+)"', opf, re.IGNORECASE) + if m: + try: + out["series_index"] = int(float(m.group(1))) + except Exception: + out["series_index"] = 0 + m = re.search(r']*name="publication_status"[^>]*content="([^"]+)"', opf, re.IGNORECASE) + if m: + out["publication_status"] = _html.unescape(m.group(1).strip()) + + pd = _find(r"<(?:dc:)?date[^>]*>(.*?)") + if pd: + date_candidate = pd.split("T", 1)[0] + try: + out["publish_date"] = datetime.fromisoformat(date_candidate).date().isoformat() + except Exception: + out["publish_date"] = "" + + out["subjects"] = [ + _html.unescape(s.strip()) + for s in re.findall(r"<(?:dc:)?subject[^>]*>(.*?)", opf, re.DOTALL | re.IGNORECASE) + if s.strip() + ] + except Exception: + pass + return out + + +def scan_media(path: Path) -> dict: + mt = media_type_from_suffix(path) + if mt == "epub": + meta = scan_epub(path) + elif mt == "pdf": + meta = pdf_scan_metadata(path) + elif mt == "cbr": + meta = { + "title": path.stem, + "author": "", + "publisher": "", + "series": "", + "series_index": 0, + "publication_status": "", + "has_cover": cbr_page_count(path) > 0, + "description": "", + "source_url": "", + "publish_date": "", + "subjects": [], + } + else: + meta = {} + meta["media_type"] = mt + return meta + + +def upsert_book(conn, filename: str, meta: dict, tags: list[tuple[str, str]] | None = None) -> None: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO library (filename, media_type, title, author, publisher, has_cover, + series, series_index, publication_status, source_url, + publish_date, description, needs_review, want_to_read, updated_at) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, FALSE, NOW()) + ON CONFLICT (filename) DO UPDATE SET + media_type = EXCLUDED.media_type, + title = COALESCE(NULLIF(EXCLUDED.title, ''), library.title), + author = COALESCE(NULLIF(EXCLUDED.author, ''), library.author), + publisher = COALESCE(NULLIF(EXCLUDED.publisher, ''), library.publisher), + has_cover = (library.has_cover OR EXCLUDED.has_cover), + series = COALESCE(NULLIF(EXCLUDED.series, ''), library.series), + series_index = CASE WHEN COALESCE(EXCLUDED.series_index, 0) > 0 THEN EXCLUDED.series_index ELSE library.series_index END, + publication_status = COALESCE(NULLIF(EXCLUDED.publication_status, ''), library.publication_status), + source_url = COALESCE(NULLIF(EXCLUDED.source_url, ''), library.source_url), + publish_date = COALESCE(EXCLUDED.publish_date, library.publish_date), + description = COALESCE(NULLIF(EXCLUDED.description, ''), library.description), + updated_at = NOW() + """, + ( + filename, + meta.get("media_type", "epub"), + meta.get("title", ""), + meta.get("author", ""), + meta.get("publisher", ""), + bool(meta.get("has_cover", False)), + meta.get("series", ""), + meta.get("series_index", 0), + meta.get("publication_status", ""), + meta.get("source_url", ""), + meta.get("publish_date") or None, + meta.get("description", ""), + bool(meta.get("needs_review", False)), + ), + ) + + if tags is not None: + cur.execute("DELETE FROM book_tags WHERE filename = %s", (filename,)) + rows = [] + seen: set[tuple[str, str]] = set() + for tag, ttype in tags: + t = (tag or "").strip() + tp = (ttype or "").strip() + if not t or not tp: + continue + key = (t.casefold(), tp) + if key in seen: + continue + seen.add(key) + rows.append((filename, t, tp)) + if rows: + cur.executemany( + "INSERT INTO book_tags (filename, tag, tag_type) VALUES (%s, %s, %s) ON CONFLICT (filename, tag, tag_type) DO NOTHING", + rows, + ) + + +def list_library_json() -> list[dict]: + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute( + """ + SELECT l.filename, l.media_type, l.title, l.author, l.publisher, l.has_cover, + l.series, l.series_index, l.publication_status, l.want_to_read, + l.archived, l.needs_review, l.updated_at, + rp.progress, rp.cfi, rp.page, + COUNT(rs.id)::int AS read_count, + MAX(rs.read_at) AS last_read + FROM library l + LEFT JOIN reading_progress rp ON rp.filename = l.filename + LEFT JOIN reading_sessions rs ON rs.filename = l.filename + GROUP BY l.filename, l.media_type, l.title, l.author, l.publisher, l.has_cover, + l.series, l.series_index, l.publication_status, l.want_to_read, + l.archived, l.needs_review, l.updated_at, rp.progress, rp.cfi, rp.page + ORDER BY COALESCE(l.publisher, ''), COALESCE(l.author, ''), COALESCE(l.series, ''), l.series_index, COALESCE(l.title, '') + """ + ) + rows = cur.fetchall() + cur.execute("SELECT filename, tag, tag_type FROM book_tags ORDER BY tag") + tags = cur.fetchall() + cur.execute("SELECT filename FROM library_cover_cache") + cached = {r[0] for r in cur.fetchall()} + + tag_map: dict[str, list[dict]] = {} + for filename, tag, tag_type in tags: + tag_map.setdefault(filename, []).append({"tag": tag, "tag_type": tag_type}) + + out = [] + for r in rows: + out.append( + { + "filename": r[0], + "media_type": r[1], + "title": r[2] or "", + "author": r[3] or "", + "publisher": r[4] or "", + "has_cover": bool(r[5]), + "has_cached_cover": r[0] in cached, + "series": r[6] or "", + "series_index": r[7] or 0, + "publication_status": r[8] or "", + "want_to_read": bool(r[9]), + "archived": bool(r[10]), + "needs_review": bool(r[11]), + "updated_at": r[12].isoformat() if r[12] else None, + "progress": r[13] or 0, + "progress_cfi": r[14], + "page": r[15], + "read_count": r[16] or 0, + "last_read": r[17].isoformat() if r[17] else None, + "tags": tag_map.get(r[0], []), + } + ) + return out + + +def ensure_cover_missing_tag(conn, filename: str, has_cover: bool) -> None: + with conn.cursor() as cur: + if has_cover: + cur.execute( + "DELETE FROM book_tags WHERE filename = %s AND tag = 'Cover Missing' AND tag_type = 'tag'", + (filename,), + ) + return + cur.execute( + """ + INSERT INTO book_tags (filename, tag, tag_type) + VALUES (%s, 'Cover Missing', 'tag') + ON CONFLICT (filename, tag, tag_type) DO NOTHING + """, + (filename,), + ) + + +def normalize_site(raw: str) -> str: + raw = (raw or "").strip() + if "://" in raw: + from urllib.parse import urlparse + + raw = urlparse(raw).netloc + return re.sub(r"^www\.", "", raw).lower() + + +def relative_file_info(path: Path) -> dict: + stat = path.stat() + return { + "size": stat.st_size, + "modified": datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat(), + } diff --git a/containers/novela/routers/editor.py b/containers/novela/routers/editor.py new file mode 100644 index 0000000..0d704e6 --- /dev/null +++ b/containers/novela/routers/editor.py @@ -0,0 +1,434 @@ +import html as _html +import posixpath +import re +import uuid +import zipfile as zf +from pathlib import Path + +from bs4 import BeautifulSoup +from fastapi import APIRouter, Request +from fastapi.responses import HTMLResponse, JSONResponse, Response +from fastapi.templating import Jinja2Templates + +from db import get_db_conn +from epub import read_epub_file, write_epub_file + +router = APIRouter() +templates = Jinja2Templates(directory="templates") + +OUTPUT_DIR = Path("library") +OUTPUT_ROOT = OUTPUT_DIR.resolve() + + +def _resolve_output_path(filename: str) -> Path | None: + rel = Path(filename) + if rel.is_absolute() or any(part in {"", ".", ".."} for part in rel.parts): + return None + candidate = (OUTPUT_DIR / rel).resolve() + try: + candidate.relative_to(OUTPUT_ROOT) + except ValueError: + return None + return candidate + + +def _norm(base_dir: str, rel: str) -> str: + rel = (rel or "").split("#", 1)[0].strip() + if not rel: + return "" + joined = posixpath.normpath(posixpath.join(base_dir, rel)) + return joined.lstrip("./") + + +def _epub_spine(path: Path) -> list[dict]: + with zf.ZipFile(path, "r") as z: + names = set(z.namelist()) + + opf_path = "OEBPS/content.opf" + try: + container_xml = z.read("META-INF/container.xml").decode("utf-8", errors="replace") + m = re.search(r"full-path\\s*=\\s*['\"]([^'\"]+)['\"]", container_xml) + if m: + opf_path = m.group(1) + except Exception: + pass + + if opf_path not in names: + candidates = [n for n in names if n.lower().endswith(".opf")] + if not candidates: + return [] + opf_path = sorted(candidates)[0] + + opf_xml = z.read(opf_path).decode("utf-8", errors="replace") + opf = BeautifulSoup(opf_xml, "xml") + opf_dir = posixpath.dirname(opf_path) + + manifest: dict[str, str] = {} + for item in opf.find_all("item"): + iid = item.get("id") + href = item.get("href") + if iid and href: + manifest[iid] = _norm(opf_dir, href) + + spine_idrefs: list[str] = [] + spine_tag = opf.find("spine") + toc_id = spine_tag.get("toc") if spine_tag else None + if spine_tag: + for ir in spine_tag.find_all("itemref"): + rid = ir.get("idref") + if rid: + spine_idrefs.append(rid) + + hrefs = [manifest[rid] for rid in spine_idrefs if rid in manifest] + href_to_title: dict[str, str] = {} + + ncx_path = "" + if toc_id and toc_id in manifest: + ncx_path = manifest[toc_id] + elif "toc.ncx" in names: + ncx_path = "toc.ncx" + elif "OEBPS/toc.ncx" in names: + ncx_path = "OEBPS/toc.ncx" + + if ncx_path and ncx_path in names: + try: + ncx_xml = z.read(ncx_path).decode("utf-8", errors="replace") + ncx = BeautifulSoup(ncx_xml, "xml") + ncx_dir = posixpath.dirname(ncx_path) + for np in ncx.find_all("navPoint"): + content = np.find("content") + label_tag = np.find("text") + src = content.get("src") if content else "" + label = label_tag.get_text(strip=True) if label_tag else "" + if src and label: + href_to_title[_norm(ncx_dir, src)] = _html.unescape(label) + except Exception: + pass + + chapters = [] + for i, href in enumerate(hrefs): + base = posixpath.basename(href) + title = href_to_title.get(href, re.sub(r"\.(xhtml|html|htm)$", "", base, flags=re.I)) + chapters.append({"index": i, "title": title or f"Chapter {i+1}", "href": href}) + return chapters + + +def _norm_href(base_dir: str, rel: str) -> str: + rel = (rel or "").split("#", 1)[0].strip() + if not rel: + return "" + return posixpath.normpath(posixpath.join(base_dir, rel)).lstrip("./") + + +def _find_opf_path(names: set[str], container_xml: str | None) -> str | None: + opf_path = "OEBPS/content.opf" + if container_xml: + m = re.search(r"full-path\s*=\s*['\"]([^'\"]+)['\"]", container_xml) + if m: + opf_path = m.group(1) + if opf_path in names: + return opf_path + candidates = sorted(n for n in names if n.lower().endswith(".opf")) + return candidates[0] if candidates else None + + +def _make_new_chapter_xhtml(title: str) -> str: + safe_title = _html.escape((title or "New chapter").strip() or "New chapter") + return ( + "\n" + "\n" + "\n" + "\n" + " \n" + f" {safe_title}\n" + " \n" + "\n" + "\n" + f"

{safe_title}

\n" + "

\n" + "\n" + "\n" + ) + + +def _rewrite_epub_entries(epub_path: Path, updates: dict[str, bytes], remove_paths: set[str] | None = None) -> None: + remove_paths = set(remove_paths or set()) + tmp = epub_path.with_suffix(".tmp.epub") + with zf.ZipFile(epub_path, "r") as zin, zf.ZipFile(tmp, "w", compression=zf.ZIP_DEFLATED) as zout: + names = zin.namelist() + for name in names: + if name in remove_paths: + continue + if name in updates: + zout.writestr(name, updates[name]) + else: + zout.writestr(name, zin.read(name)) + for name, data in updates.items(): + if name not in names: + zout.writestr(name, data) + tmp.replace(epub_path) + + +@router.get("/library/editor/{filename:path}", response_class=HTMLResponse) +async def editor_page(filename: str, request: Request): + path = _resolve_output_path(filename) + if path is None or not path.exists(): + return HTMLResponse("Not found", status_code=404) + + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute("SELECT title FROM library WHERE filename = %s", (filename,)) + row = cur.fetchone() + title = row[0] if row and row[0] else filename + + return templates.TemplateResponse(request, "editor.html", {"filename": filename, "title": title}) + + +@router.get("/api/edit/chapter/{index:int}/{filename:path}") +async def get_edit_chapter(filename: str, index: int): + path = _resolve_output_path(filename) + if path is None or not path.exists(): + return Response(status_code=404) + spine = _epub_spine(path) + if index < 0 or index >= len(spine): + return Response(status_code=404) + ch = spine[index] + content = read_epub_file(path, ch["href"]) + return JSONResponse({"index": index, "href": ch["href"], "title": ch["title"], "content": content}) + + +@router.post("/api/edit/chapter/{index:int}/{filename:path}") +async def save_edit_chapter(filename: str, index: int, request: Request): + path = _resolve_output_path(filename) + if path is None: + return JSONResponse({"error": "not found"}, status_code=404) + if not path.exists(): + return JSONResponse({"error": "File not found"}, status_code=404) + body = await request.json() + content = body.get("content", "") + if not content: + return JSONResponse({"error": "No content"}, status_code=400) + spine = _epub_spine(path) + if index < 0 or index >= len(spine): + return JSONResponse({"error": "Chapter not found"}, status_code=404) + href = spine[index]["href"] + try: + write_epub_file(path, href, content) + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=500) + return JSONResponse({"ok": True}) + + +@router.post("/api/edit/chapter/add/{filename:path}") +async def add_edit_chapter(filename: str, request: Request): + path = _resolve_output_path(filename) + if path is None: + return JSONResponse({"error": "not found"}, status_code=404) + if not path.exists(): + return JSONResponse({"error": "File not found"}, status_code=404) + + body = await request.json() + title = (body.get("title") or "New chapter").strip() or "New chapter" + after_index = body.get("after_index", -1) + try: + after_index = int(after_index) + except Exception: + after_index = -1 + + with zf.ZipFile(path, "r") as z: + names = set(z.namelist()) + container_xml = z.read("META-INF/container.xml").decode("utf-8", errors="replace") if "META-INF/container.xml" in names else None + opf_path = _find_opf_path(names, container_xml) + if not opf_path: + return JSONResponse({"error": "OPF not found"}, status_code=400) + + opf_xml = z.read(opf_path).decode("utf-8", errors="replace") + opf = BeautifulSoup(opf_xml, "xml") + opf_dir = posixpath.dirname(opf_path) + + manifest = {} + for item in opf.find_all("item"): + iid = item.get("id") + href = item.get("href") + if iid and href: + manifest[iid] = _norm_href(opf_dir, href) + + spine_tag = opf.find("spine") + if not spine_tag: + return JSONResponse({"error": "Invalid OPF spine"}, status_code=400) + itemrefs = spine_tag.find_all("itemref") + current_len = len(itemrefs) + + if after_index < -1: + after_index = -1 + if after_index >= current_len: + after_index = current_len - 1 + + ref_dir_rel = "Text" + if current_len > 0 and after_index >= 0: + ref_idref = itemrefs[after_index].get("idref", "") + ref_abs = manifest.get(ref_idref, "") + if ref_abs: + ref_rel = posixpath.relpath(ref_abs, opf_dir) + ref_dir_rel = posixpath.dirname(ref_rel) or "" + + while True: + stem = f"chapter_added_{uuid.uuid4().hex[:8]}" + rel = posixpath.join(ref_dir_rel, f"{stem}.xhtml") if ref_dir_rel else f"{stem}.xhtml" + abs_path = _norm_href(opf_dir, rel) + if abs_path not in names: + break + + existing_ids = {item.get("id") for item in opf.find_all("item") if item.get("id")} + i = 1 + new_id = f"ch_add_{i:03d}" + while new_id in existing_ids: + i += 1 + new_id = f"ch_add_{i:03d}" + + manifest_tag = opf.find("manifest") + if not manifest_tag: + return JSONResponse({"error": "Invalid OPF manifest"}, status_code=400) + + new_item = opf.new_tag("item") + new_item["id"] = new_id + new_item["href"] = rel + new_item["media-type"] = "application/xhtml+xml" + manifest_tag.append(new_item) + + new_itemref = opf.new_tag("itemref") + new_itemref["idref"] = new_id + if after_index >= 0 and after_index + 1 < len(itemrefs): + itemrefs[after_index + 1].insert_before(new_itemref) + else: + spine_tag.append(new_itemref) + + toc_id = spine_tag.get("toc") + ncx_path = manifest.get(toc_id, "") if toc_id else "" + if not ncx_path: + for item in opf.find_all("item"): + mt = (item.get("media-type") or "").lower() + if mt == "application/x-dtbncx+xml" and item.get("href"): + ncx_path = _norm_href(opf_dir, item.get("href")) + break + + updates: dict[str, bytes] = {opf_path: str(opf).encode("utf-8")} + if ncx_path and ncx_path in names: + ncx_xml = z.read(ncx_path).decode("utf-8", errors="replace") + ncx = BeautifulSoup(ncx_xml, "xml") + nav_map = ncx.find("navMap") + if nav_map: + nav_points = nav_map.find_all("navPoint") + np = ncx.new_tag("navPoint") + np["id"] = f"{new_id}_nav" + label = ncx.new_tag("navLabel") + text = ncx.new_tag("text") + text.string = title + label.append(text) + content = ncx.new_tag("content") + ncx_dir = posixpath.dirname(ncx_path) + content["src"] = posixpath.relpath(abs_path, ncx_dir) + np.append(label) + np.append(content) + + insert_pos = after_index + 1 + if 0 <= insert_pos < len(nav_points): + nav_points[insert_pos].insert_before(np) + else: + nav_map.append(np) + + for idx, node in enumerate(nav_map.find_all("navPoint"), 1): + node["playOrder"] = str(idx) + + updates[ncx_path] = str(ncx).encode("utf-8") + + updates[abs_path] = _make_new_chapter_xhtml(title).encode("utf-8") + _rewrite_epub_entries(path, updates) + + new_spine = _epub_spine(path) + new_index = min(max(after_index + 1, 0), max(len(new_spine) - 1, 0)) + return JSONResponse({"ok": True, "index": new_index, "count": len(new_spine)}) + + +@router.delete("/api/edit/chapter/{index:int}/{filename:path}") +async def delete_edit_chapter(filename: str, index: int): + path = _resolve_output_path(filename) + if path is None: + return JSONResponse({"error": "not found"}, status_code=404) + if not path.exists(): + return JSONResponse({"error": "File not found"}, status_code=404) + + with zf.ZipFile(path, "r") as z: + names = set(z.namelist()) + container_xml = z.read("META-INF/container.xml").decode("utf-8", errors="replace") if "META-INF/container.xml" in names else None + opf_path = _find_opf_path(names, container_xml) + if not opf_path: + return JSONResponse({"error": "OPF not found"}, status_code=400) + + opf_xml = z.read(opf_path).decode("utf-8", errors="replace") + opf = BeautifulSoup(opf_xml, "xml") + opf_dir = posixpath.dirname(opf_path) + + manifest = {} + for item in opf.find_all("item"): + iid = item.get("id") + href = item.get("href") + if iid and href: + manifest[iid] = _norm_href(opf_dir, href) + + spine_tag = opf.find("spine") + if not spine_tag: + return JSONResponse({"error": "Invalid OPF spine"}, status_code=400) + itemrefs = spine_tag.find_all("itemref") + + if index < 0 or index >= len(itemrefs): + return JSONResponse({"error": "Chapter not found"}, status_code=404) + if len(itemrefs) <= 1: + return JSONResponse({"error": "Cannot delete the last chapter"}, status_code=400) + + target_idref = itemrefs[index].get("idref", "") + target_href = manifest.get(target_idref, "") + if not target_href: + return JSONResponse({"error": "Chapter target missing in manifest"}, status_code=400) + + itemrefs[index].decompose() + + manifest_tag = opf.find("manifest") + if manifest_tag: + for item in manifest_tag.find_all("item"): + if item.get("id") == target_idref: + item.decompose() + break + + toc_id = spine_tag.get("toc") + ncx_path = manifest.get(toc_id, "") if toc_id else "" + if not ncx_path: + for item in opf.find_all("item"): + mt = (item.get("media-type") or "").lower() + if mt == "application/x-dtbncx+xml" and item.get("href"): + ncx_path = _norm_href(opf_dir, item.get("href")) + break + + updates: dict[str, bytes] = {opf_path: str(opf).encode("utf-8")} + remove_paths: set[str] = {target_href} + + if ncx_path and ncx_path in names: + ncx_xml = z.read(ncx_path).decode("utf-8", errors="replace") + ncx = BeautifulSoup(ncx_xml, "xml") + nav_map = ncx.find("navMap") + if nav_map: + ncx_dir = posixpath.dirname(ncx_path) + for np in nav_map.find_all("navPoint"): + content = np.find("content") + src = content.get("src") if content else "" + if src and _norm_href(ncx_dir, src) == target_href: + np.decompose() + for idx, node in enumerate(nav_map.find_all("navPoint"), 1): + node["playOrder"] = str(idx) + updates[ncx_path] = str(ncx).encode("utf-8") + + _rewrite_epub_entries(path, updates, remove_paths) + new_spine = _epub_spine(path) + new_index = min(index, max(len(new_spine) - 1, 0)) + return JSONResponse({"ok": True, "index": new_index, "count": len(new_spine)}) diff --git a/containers/novela/routers/grabber.py b/containers/novela/routers/grabber.py new file mode 100644 index 0000000..c7f540a --- /dev/null +++ b/containers/novela/routers/grabber.py @@ -0,0 +1,473 @@ +import asyncio +import base64 +import json +import traceback +import uuid +from datetime import datetime, timezone +from typing import AsyncGenerator +from urllib.parse import urljoin, urlparse + +import httpx +from bs4 import Tag +from fastapi import APIRouter, Request +from fastapi.responses import HTMLResponse, StreamingResponse +from fastapi.templating import Jinja2Templates + +from db import get_db_conn +from epub import detect_image_format, make_chapter_xhtml, make_epub +from routers.common import ( + LIBRARY_DIR, + ensure_cover_cache_for_book, + ensure_cover_missing_tag, + ensure_unique_rel_path, + make_rel_path, + normalize_site, + upsert_book, +) +from scrapers import get_scraper +from scrapers.base import HEADERS +from security import decrypt_value, encrypt_value, is_encrypted_value +from xhtml import configure_break_patterns, element_to_xhtml, is_break_element + +templates = Jinja2Templates(directory="templates") +router = APIRouter() + +JOBS: dict[str, dict] = {} + + +def _load_all_credentials() -> dict: + out = {} + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute("SELECT site, username, password FROM credentials ORDER BY site") + rows = cur.fetchall() + for site, username_raw, password_raw in rows: + username = decrypt_value(username_raw) + password = decrypt_value(password_raw) + out[site] = {"username": username, "password": password} + + if not is_encrypted_value(username_raw) or not is_encrypted_value(password_raw): + cur.execute( + """ + UPDATE credentials + SET username = %s, password = %s, updated_at = NOW() + WHERE site = %s + """, + (encrypt_value(username), encrypt_value(password), site), + ) + return out + + +def _domain(url: str) -> str: + raw = (url or "").strip() + if "://" in raw: + raw = urlparse(raw).netloc + return normalize_site(raw) + + +def _load_break_patterns() -> None: + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute( + "SELECT pattern_type, pattern FROM break_patterns WHERE enabled = TRUE ORDER BY id" + ) + rows = cur.fetchall() + configure_break_patterns( + regex_strings=[r[1] for r in rows if r[0] == "regex"], + css_classes=[r[1] for r in rows if r[0] == "css_class"], + ) + + +def _next_series_index(series: str) -> int: + if not series: + return 1 + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute( + "SELECT COALESCE(MAX(series_index), 0) FROM library WHERE series = %s", + (series,), + ) + return (cur.fetchone()[0] or 0) + 1 + + +@router.get("/grabber", response_class=HTMLResponse) +async def grabber_page(request: Request): + from pathlib import Path + + tmpl = "grabber.html" if Path("templates/grabber.html").exists() else "index.html" + return templates.TemplateResponse(request, tmpl, {"active": "grabber"}) + + +@router.get("/convert", response_class=HTMLResponse) +async def convert_page(request: Request): + from pathlib import Path + + tmpl = "grabber.html" if Path("templates/grabber.html").exists() else "index.html" + return templates.TemplateResponse(request, tmpl, {"active": "grabber"}) + + +@router.get("/credentials-manager", response_class=HTMLResponse) +async def credentials_manager_page(request: Request): + return templates.TemplateResponse(request, "credentials.html", {"active": "credentials"}) + + +@router.get("/debug", response_class=HTMLResponse) +async def debug_page(request: Request): + return templates.TemplateResponse(request, "debug.html", {"active": "debug"}) + + +@router.post("/debug/run") +async def debug_run(request: Request): + body = await request.json() + url = (body.get("url") or "").strip() + if not url: + return {"error": "No URL provided"} + + creds = _load_all_credentials().get(_domain(url), {}) + username = creds.get("username", "") + password = creds.get("password", "") + + try: + scraper = get_scraper(url) + except ValueError as e: + return {"error": str(e)} + + result: dict = {} + try: + async with httpx.AsyncClient(headers=HEADERS, follow_redirects=True, timeout=30) as client: + if username: + await scraper.login(client, username, password) + book = await scraper.fetch_book_info(client, url) + result = { + "title": book.get("title", ""), + "author": book.get("author", ""), + "publisher": book.get("publisher", ""), + "series": book.get("series", ""), + "chapter_count": len(book.get("chapters", [])), + "chapter_method": book.get("chapter_method", ""), + "genres": book.get("genres", []), + "subgenres": book.get("subgenres", []), + "tags": book.get("tags", []), + "description": book.get("description", ""), + "publication_status": book.get("publication_status", ""), + } + except Exception: + result["error"] = traceback.format_exc() + return result + + +@router.get("/credentials") +async def get_credentials(): + return _load_all_credentials() + + +@router.post("/credentials") +async def save_credential(request: Request): + body = await request.json() + site = normalize_site(body.get("site", "")) + if not site: + return {"error": "No site provided"} + + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO credentials (site, username, password, updated_at) + VALUES (%s, %s, %s, NOW()) + ON CONFLICT (site) DO UPDATE + SET username = EXCLUDED.username, + password = EXCLUDED.password, + updated_at = NOW() + """, + (site, encrypt_value(body.get("username", "")), encrypt_value(body.get("password", ""))), + ) + return {"ok": True} + + +@router.delete("/credentials/{site:path}") +async def delete_credential(site: str): + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute("DELETE FROM credentials WHERE site = %s", (normalize_site(site),)) + return {"ok": True} + + +@router.post("/preload") +async def preload(request: Request): + body = await request.json() + url = (body.get("url") or "").strip() + if not url: + return {"error": "No URL provided"} + + creds = _load_all_credentials().get(_domain(url), {}) + username = creds.get("username", "") + password = creds.get("password", "") + + try: + scraper = get_scraper(url) + except ValueError as e: + return {"error": str(e)} + + async with httpx.AsyncClient(headers=HEADERS, follow_redirects=True, timeout=30) as client: + if username: + await scraper.login(client, username, password) + book = await scraper.fetch_book_info(client, url) + series = book.get("series", "") + hint = int(book.get("series_index_hint", 0) or 0) + return { + "title": book.get("title", ""), + "author": book.get("author", ""), + "publisher": book.get("publisher", ""), + "series": series, + "series_index_next": hint if hint else _next_series_index(series), + "genres": book.get("genres", []), + "subgenres": book.get("subgenres", []), + "tags": book.get("tags", []), + "description": book.get("description", ""), + "updated_date": book.get("updated_date", ""), + "publication_status": book.get("publication_status", ""), + } + + +async def scrape_book(job_id: str, url: str, username: str, password: str) -> None: + job = JOBS[job_id] + + def send(event: str, data: dict): + job["events"].append({"event": event, "data": data}) + + try: + await _run_scrape(job_id, url, username, password, send) + except Exception as e: + send("error", {"message": f"Unexpected error: {e}"}) + job["done"] = True + + +async def _run_scrape(job_id: str, url: str, username: str, password: str, send) -> None: + job = JOBS[job_id] + send("status", {"message": "Connecting..."}) + + scraper = get_scraper(url) + + async with httpx.AsyncClient(headers=HEADERS, follow_redirects=True, timeout=30) as client: + if username: + send("status", {"message": "Logging in..."}) + await scraper.login(client, username, password) + + book = await scraper.fetch_book_info(client, url) + book_title = book["title"] + author = book["author"] + send("meta", {"title": book_title, "author": author}) + + if not book.get("chapters"): + send("error", {"message": "No chapters found. Check the URL or credentials."}) + job["done"] = True + return + + send("chapters", {"chapters": [c["title"] for c in book["chapters"]]}) + send("status", {"message": f"Found {len(book['chapters'])} chapters. Downloading..."}) + + cover_data: bytes | None = job.pop("cover_upload", None) + + tags = list(book.get("tags", [])) + if len(book["chapters"]) < 4 and "Shorts" not in tags: + tags.append("Shorts") + if cover_data is None and "Cover Missing" not in tags: + tags.append("Cover Missing") + + status_map = {"Long-Term Hold": "Hiatus"} + pub_status = status_map.get(book.get("publication_status", ""), book.get("publication_status", "")) + + series = book.get("series", "") + series_index = int(job.get("series_index", 1) or 1) + + updated_date_override = (job.pop("updated_date_override", "") or "").strip() + final_updated_date = ( + updated_date_override + or book.get("updated_date", "") + or datetime.now(timezone.utc).strftime("%Y-%m-%d") + ) + + book_info = { + "genres": book.get("genres", []), + "subgenres": book.get("subgenres", []), + "tags": tags, + "description": book.get("description", ""), + "updated_date": final_updated_date, + "source_url": book.get("source_url", ""), + "publisher": book.get("publisher", ""), + "series": series, + "series_index": series_index, + "publication_status": pub_status, + } + + _load_break_patterns() + break_img_data = open("static/break.png", "rb").read() + + chapters = [] + for i, ch in enumerate(book["chapters"], 1): + send("progress", {"current": i, "total": len(book["chapters"]), "title": ch["title"]}) + try: + ch_data = await scraper.fetch_chapter(client, ch) + content_el = ch_data["content_el"] + + chapter_images = [] + if content_el: + img_counter = 1 + for img_tag in content_el.find_all("img"): + if is_break_element(img_tag): + continue + src = img_tag.get("src", "") + if not src or src.startswith("data:"): + img_tag.decompose() + continue + try: + img_resp = await client.get(urljoin(ch["url"], src)) + if img_resp.status_code == 200: + img_name, img_mime = detect_image_format( + img_resp.content, f"ch{i:03d}_img{img_counter:03d}" + ) + img_tag["src"] = f"../Images/{img_name}" + img_tag["alt"] = img_tag.get("alt", "") + chapter_images.append( + { + "epub_path": f"OEBPS/Images/{img_name}", + "data": img_resp.content, + "media_type": img_mime, + } + ) + img_counter += 1 + else: + img_tag.decompose() + except Exception: + img_tag.decompose() + + xhtml_parts = [] + if content_el: + all_p = content_el.find_all("p") + empty_p = sum( + 1 + for p in all_p + if not [c for c in p.children if isinstance(c, Tag)] + and not p.get_text().replace("\xa0", "").strip() + ) + filled_p = len(all_p) - empty_p + empty_p_is_spacer = filled_p > 0 and empty_p >= filled_p * 0.5 + for child in content_el.children: + part = element_to_xhtml(child, empty_p_is_spacer=empty_p_is_spacer) + if part.strip(): + xhtml_parts.append(part) + + content_xhtml = "\n".join(xhtml_parts) + chapter_xhtml = make_chapter_xhtml(ch_data["title"], content_xhtml, i) + chapters.append({"title": ch_data["title"], "xhtml": chapter_xhtml, "images": chapter_images}) + await asyncio.sleep(0.2) + except Exception as e: + send("warning", {"message": f"Chapter {i} skipped: {e}"}) + + if not chapters: + send("error", {"message": "No chapters could be processed."}) + job["done"] = True + return + + send("status", {"message": "Building EPUB..."}) + book_id = str(uuid.uuid4()) + epub_bytes = make_epub(book_title, author, chapters, cover_data, break_img_data, book_id, book_info) + + rel = ensure_unique_rel_path( + make_rel_path( + media_type="epub", + publisher=book_info.get("publisher", ""), + author=author, + title=book_title, + series=series, + series_index=series_index, + ) + ) + out_path = LIBRARY_DIR / rel + out_path.parent.mkdir(parents=True, exist_ok=True) + out_path.write_bytes(epub_bytes) + + rel_filename = rel.as_posix() + job["filename"] = rel_filename + + book_meta = { + "media_type": "epub", + "has_cover": cover_data is not None, + "series": book_info.get("series", ""), + "series_index": series_index if book_info.get("series") else 0, + "title": book_title, + "publication_status": book_info.get("publication_status", ""), + "author": author, + "publisher": book_info.get("publisher", ""), + "source_url": book_info.get("source_url", ""), + "description": book_info.get("description", ""), + "publish_date": final_updated_date, + "needs_review": False, + } + book_tags = ( + [(g, "genre") for g in book_info.get("genres", [])] + + [(g, "subgenre") for g in book_info.get("subgenres", [])] + + [(g, "tag") for g in book_info.get("tags", [])] + ) + + with get_db_conn() as conn: + with conn: + upsert_book(conn, rel_filename, book_meta, book_tags) + ensure_cover_missing_tag(conn, rel_filename, bool(book_meta["has_cover"])) + ensure_cover_cache_for_book(conn, rel_filename, out_path, "epub") + + send("done", {"filename": rel_filename, "title": book_title, "chapters": len(chapters)}) + job["done"] = True + + +@router.post("/convert") +async def convert(request: Request): + body = await request.json() + url = (body.get("url") or "").strip() + if not url: + return {"error": "No URL provided"} + + creds = _load_all_credentials().get(_domain(url), {}) + username = creds.get("username", "") + password = creds.get("password", "") + + job_id = str(uuid.uuid4()) + job: dict = {"events": [], "done": False, "filename": None} + + cover_b64 = body.get("cover_b64") + if cover_b64: + try: + job["cover_upload"] = base64.b64decode(cover_b64) + except Exception: + pass + + job["series_index"] = int(body.get("series_index", 1) or 1) + job["updated_date_override"] = (body.get("updated_date") or "").strip() + + JOBS[job_id] = job + asyncio.create_task(scrape_book(job_id, url, username, password)) + return {"job_id": job_id, "using_credentials": bool(username)} + + +@router.get("/events/{job_id}") +async def events(job_id: str): + if job_id not in JOBS: + return StreamingResponse(iter([]), media_type="text/event-stream") + + async def stream() -> AsyncGenerator[str, None]: + sent = 0 + while True: + job = JOBS.get(job_id, {}) + evts = job.get("events", []) + while sent < len(evts): + evt = evts[sent] + yield f"event: {evt['event']}\ndata: {json.dumps(evt['data'])}\n\n" + sent += 1 + if job.get("done") and sent >= len(evts): + break + await asyncio.sleep(0.2) + + return StreamingResponse(stream(), media_type="text/event-stream") diff --git a/containers/novela/routers/library.py b/containers/novela/routers/library.py new file mode 100644 index 0000000..2e441e4 --- /dev/null +++ b/containers/novela/routers/library.py @@ -0,0 +1,383 @@ +import base64 +import uuid +from datetime import datetime, timezone +from pathlib import Path + +from fastapi import APIRouter, File, Request, UploadFile +from fastapi.responses import HTMLResponse, Response +from fastapi.templating import Jinja2Templates +from PIL import UnidentifiedImageError + +from db import get_db_conn +from epub import add_cover_to_epub +from routers.common import ( + LIBRARY_DIR, + ensure_cover_cache_for_book, + ensure_cover_missing_tag, + ensure_unique_rel_path, + list_library_json, + make_cover_thumb_webp, + make_rel_path, + media_type_from_suffix, + prune_empty_dirs, + relative_file_info, + resolve_library_path, + scan_media, + upsert_book, + upsert_cover_cache, +) + +templates = Jinja2Templates(directory="templates") +router = APIRouter() + + +def _collect_files() -> list[Path]: + files: list[Path] = [] + for ext in ("*.epub", "*.pdf", "*.cbr", "*.cbz"): + files.extend(LIBRARY_DIR.rglob(ext)) + return files + + +def _sync_disk_to_db() -> int: + files = _collect_files() + synced = 0 + with get_db_conn() as conn: + with conn: + for p in files: + rel = p.relative_to(LIBRARY_DIR).as_posix() + meta = scan_media(p) + if not meta.get("media_type"): + continue + tags = [(s, "subject") for s in meta.get("subjects", [])] + upsert_book(conn, rel, meta, tags) + ensure_cover_missing_tag(conn, rel, bool(meta.get("has_cover"))) + if bool(meta.get("has_cover")): + ensure_cover_cache_for_book(conn, rel, p, meta["media_type"]) + synced += 1 + + with conn.cursor() as cur: + cur.execute("SELECT filename FROM library") + db_files = {r[0] for r in cur.fetchall()} + disk_files = {p.relative_to(LIBRARY_DIR).as_posix() for p in files} + for missing in db_files - disk_files: + with conn.cursor() as cur: + cur.execute("DELETE FROM library WHERE filename = %s", (missing,)) + return synced + + +@router.get("/library", response_class=HTMLResponse) +async def library_page(request: Request): + return templates.TemplateResponse(request, "library.html", {"active": "library"}) + + +@router.get("/api/library") +async def api_library(): + _sync_disk_to_db() + books = list_library_json() + for b in books: + p = resolve_library_path(b["filename"]) + if p and p.exists(): + b.update(relative_file_info(p)) + return books + + +@router.post("/library/rescan") +async def library_rescan(): + scanned = _sync_disk_to_db() + return {"ok": True, "scanned": scanned} + + +@router.post("/library/import") +async def library_import(files: list[UploadFile] = File(...)): + imported: list[str] = [] + skipped: list[dict[str, str]] = [] + with get_db_conn() as conn: + with conn: + for upload in files: + try: + name = upload.filename or "upload.bin" + suffix = Path(name).suffix.lower() + if suffix not in {".epub", ".pdf", ".cbr", ".cbz"}: + skipped.append({"file": name, "reason": "Unsupported file type"}) + continue + + data = await upload.read() + if not data: + skipped.append({"file": name, "reason": "Empty upload"}) + continue + + tmp = LIBRARY_DIR / f".import-{uuid.uuid4().hex}{suffix}" + tmp.parent.mkdir(parents=True, exist_ok=True) + tmp.write_bytes(data) + + meta = scan_media(tmp) + media_type = meta.get("media_type") + if not media_type: + tmp.unlink(missing_ok=True) + skipped.append({"file": name, "reason": "Could not detect media type"}) + continue + + rel = ensure_unique_rel_path( + make_rel_path( + media_type=media_type, + publisher=meta.get("publisher", ""), + author=meta.get("author", ""), + title=meta.get("title") or Path(name).stem, + series=meta.get("series", ""), + series_index=meta.get("series_index", 0), + ) + ) + dest = LIBRARY_DIR / rel + dest.parent.mkdir(parents=True, exist_ok=True) + tmp.replace(dest) + + rel_name = rel.as_posix() + meta["needs_review"] = True + tags = [(s, "subject") for s in meta.get("subjects", [])] + upsert_book(conn, rel_name, meta, tags) + ensure_cover_missing_tag(conn, rel_name, bool(meta.get("has_cover"))) + ensure_cover_cache_for_book(conn, rel_name, dest, media_type) + imported.append(rel_name) + except Exception as e: + skipped.append({"file": upload.filename or "upload", "reason": str(e)}) + finally: + await upload.close() + + return {"ok": True, "imported": imported, "skipped": skipped} + + +@router.delete("/library/file/{filename:path}") +async def library_delete(filename: str): + full = resolve_library_path(filename) + if full is None: + return {"error": "Invalid filename"} + if not full.exists(): + return {"error": "File not found"} + + parent = full.parent + full.unlink() + prune_empty_dirs(parent) + + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute("DELETE FROM library WHERE filename = %s", (filename,)) + return {"ok": True} + + +@router.get("/library/cover-cached/{filename:path}") +async def library_cover_cached(filename: str): + full = resolve_library_path(filename) + if full is None or not full.exists(): + return Response(status_code=404) + + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + "SELECT thumb_webp FROM library_cover_cache WHERE filename = %s", + (filename,), + ) + row = cur.fetchone() + if row and row[0]: + return Response(content=bytes(row[0]), media_type="image/webp") + + cur.execute("SELECT media_type FROM library WHERE filename = %s", (filename,)) + row = cur.fetchone() + mt = row[0] if row else media_type_from_suffix(full) + if not ensure_cover_cache_for_book(conn, filename, full, mt): + return Response(status_code=404) + + cur.execute( + "SELECT thumb_webp FROM library_cover_cache WHERE filename = %s", + (filename,), + ) + row = cur.fetchone() + if row and row[0]: + return Response(content=bytes(row[0]), media_type="image/webp") + return Response(status_code=404) + + +@router.get("/library/cover/{filename:path}") +async def library_cover(filename: str): + full = resolve_library_path(filename) + if full is None or not full.exists(): + return Response(status_code=404) + + mt = media_type_from_suffix(full) + if mt == "epub": + from routers.common import extract_cover_from_epub + + extracted = extract_cover_from_epub(full) + if not extracted: + return Response(status_code=404) + raw, mime = extracted + return Response(content=raw, media_type=mime) + + if mt in {"pdf", "cbr"}: + with get_db_conn() as conn: + with conn: + if ensure_cover_cache_for_book(conn, filename, full, mt): + with conn.cursor() as cur: + cur.execute( + "SELECT thumb_webp FROM library_cover_cache WHERE filename = %s", + (filename,), + ) + row = cur.fetchone() + if row and row[0]: + return Response(content=bytes(row[0]), media_type="image/webp") + return Response(status_code=404) + + +@router.post("/library/cover/{filename:path}") +async def library_add_cover(filename: str, request: Request): + full = resolve_library_path(filename) + if full is None or not full.exists(): + return {"error": "File not found"} + if media_type_from_suffix(full) != "epub": + return {"error": "Cover upload is only supported for EPUB"} + + body = await request.json() + cover_b64 = body.get("cover_b64", "") + if not cover_b64: + return {"error": "No image provided"} + + try: + cover_data = base64.b64decode(cover_b64) + add_cover_to_epub(full, cover_data) + except Exception as e: + return {"error": str(e)} + + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO library (filename, media_type, has_cover, updated_at) + VALUES (%s, 'epub', TRUE, NOW()) + ON CONFLICT (filename) DO UPDATE SET has_cover = TRUE, updated_at = NOW() + """, + (filename,), + ) + try: + thumb = make_cover_thumb_webp(cover_data) + upsert_cover_cache(conn, filename, "image/webp", thumb) + except (UnidentifiedImageError, OSError, ValueError): + pass + ensure_cover_missing_tag(conn, filename, True) + return {"ok": True} + + +@router.post("/library/want-to-read/{filename:path}") +async def library_want_to_read(filename: str): + full = resolve_library_path(filename) + if full is None: + return {"error": "Invalid filename"} + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute("SELECT want_to_read FROM library WHERE filename = %s", (filename,)) + row = cur.fetchone() + if not row: + return {"error": "Not found"} + val = not bool(row[0]) + cur.execute( + "UPDATE library SET want_to_read = %s, updated_at = NOW() WHERE filename = %s", + (val, filename), + ) + return {"ok": True, "want_to_read": val} + + +@router.post("/library/archive/{filename:path}") +async def library_archive(filename: str): + full = resolve_library_path(filename) + if full is None: + return {"error": "Invalid filename"} + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute("SELECT archived FROM library WHERE filename = %s", (filename,)) + row = cur.fetchone() + if not row: + return {"error": "Not found"} + val = not bool(row[0]) + cur.execute( + "UPDATE library SET archived = %s, updated_at = NOW() WHERE filename = %s", + (val, filename), + ) + return {"ok": True, "archived": val} + + +@router.get("/home", response_class=HTMLResponse) +async def home_page(request: Request): + return templates.TemplateResponse(request, "home.html", {"active": "home"}) + + +@router.get("/api/home") +async def api_home(): + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute( + """ + SELECT l.filename, l.title, l.author, l.media_type, + COALESCE(rp.progress, 0) AS progress, + MAX(rs.read_at) AS last_read + FROM library l + LEFT JOIN reading_progress rp ON rp.filename = l.filename + LEFT JOIN reading_sessions rs ON rs.filename = l.filename + GROUP BY l.filename, l.title, l.author, l.media_type, rp.progress + ORDER BY last_read DESC NULLS LAST, l.updated_at DESC + LIMIT 30 + """ + ) + rows = cur.fetchall() + return { + "continue_reading": [ + { + "filename": r[0], + "title": r[1] or "", + "author": r[2] or "", + "media_type": r[3], + "progress": r[4] or 0, + "last_read": r[5].isoformat() if r[5] else None, + } + for r in rows + ] + } + + +@router.get("/stats", response_class=HTMLResponse) +async def stats_page(request: Request): + return templates.TemplateResponse(request, "stats.html", {"active": "stats"}) + + +@router.get("/api/stats") +async def api_stats(): + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute("SELECT COUNT(*)::int FROM library") + total_books = cur.fetchone()[0] + cur.execute("SELECT COUNT(*)::int FROM reading_sessions") + total_reads = cur.fetchone()[0] + cur.execute("SELECT COUNT(DISTINCT filename)::int FROM reading_sessions") + unique_books_read = cur.fetchone()[0] + cur.execute( + """ + SELECT media_type, COUNT(*)::int + FROM library + GROUP BY media_type + ORDER BY media_type + """ + ) + by_type = [{"media_type": r[0], "count": r[1]} for r in cur.fetchall()] + return { + "total_books": total_books, + "total_reads": total_reads, + "unique_books_read": unique_books_read, + "by_media_type": by_type, + "generated_at": datetime.now(timezone.utc).isoformat(), + } + +@router.get("/library/list") +async def library_list_compat(): + return await api_library() diff --git a/containers/novela/routers/reader.py b/containers/novela/routers/reader.py new file mode 100644 index 0000000..57af742 --- /dev/null +++ b/containers/novela/routers/reader.py @@ -0,0 +1,993 @@ +""" +reader.py — In-browser EPUB reader routes. + +Registered in main.py via app.include_router(reader.router). +Shared low-level helpers (_db_conn, _scan_epub) are defined locally to +avoid circular imports with main.py. +""" + +import html as _html +import io +import os +import posixpath +import re +import uuid +import zipfile as zf +from datetime import datetime +from pathlib import Path + +import psycopg2 +from bs4 import BeautifulSoup +from fastapi import APIRouter, Request +from fastapi.responses import FileResponse, HTMLResponse, JSONResponse, Response +from fastapi.templating import Jinja2Templates + +from cbr import cbr_get_page +from epub import read_epub_file, write_epub_file +from pdf import pdf_render_page + +router = APIRouter() +templates = Jinja2Templates(directory="templates") + +OUTPUT_DIR = Path("library") +OUTPUT_ROOT = OUTPUT_DIR.resolve() + + +# --------------------------------------------------------------------------- +# Shared helpers (local copies — avoids circular imports with main.py) +# --------------------------------------------------------------------------- + +def _db_conn(): + return psycopg2.connect( + host=os.environ.get("POSTGRES_HOST", "postgres"), + port=int(os.environ.get("POSTGRES_PORT", 5432)), + dbname=os.environ.get("POSTGRES_DB", "novela"), + user=os.environ.get("POSTGRES_USER", "novela"), + password=os.environ.get("POSTGRES_PASSWORD", ""), + ) + + +def _scan_epub(path: Path) -> dict: + """Inspect an EPUB zip and return metadata dict.""" + has_cover = False + series = "" + series_index = 0 + title = "" + publication_status = "" + author = "" + publisher = "" + source_url = "" + publish_date = "" + subjects: list[str] = [] + description = "" + try: + with zf.ZipFile(path, "r") as z: + names = set(z.namelist()) + has_cover = any(n.lower().endswith((".jpg", ".jpeg", ".png", ".webp", ".gif")) and "cover" in n.lower() for n in names) + container_xml = z.read("META-INF/container.xml").decode("utf-8", errors="replace") if "META-INF/container.xml" in names else None + opf_path = _find_opf_path(names, container_xml) + if opf_path and opf_path in names: + opf = z.read(opf_path).decode("utf-8", errors="replace") + m = re.search(r'<(?:dc:)?title[^>]*>(.*?)', opf, re.DOTALL | re.IGNORECASE) + if m: + title = _html.unescape(m.group(1).strip()) + m = re.search(r'<(?:dc:)?creator[^>]*>(.*?)', opf, re.DOTALL | re.IGNORECASE) + if m: + author = _html.unescape(m.group(1).strip()) + m = re.search(r'<(?:dc:)?publisher[^>]*>(.*?)', opf, re.DOTALL | re.IGNORECASE) + if m: + publisher = _html.unescape(m.group(1).strip()) + m = re.search(r']*name="calibre:series"[^>]*content="([^"]+)"', opf, re.IGNORECASE) + if m: + series = _html.unescape(m.group(1).strip()) + mi = re.search(r']*name="calibre:series_index"[^>]*content="([^"]+)"', opf, re.IGNORECASE) + if mi: + try: + series_index = int(float(mi.group(1))) + except Exception: + series_index = 0 + ms = re.search(r']*name="publication_status"[^>]*content="([^"]+)"', opf, re.IGNORECASE) + if ms: + publication_status = _html.unescape(ms.group(1).strip()) + m = re.search(r'<(?:dc:)?source[^>]*>(.*?)', opf, re.DOTALL | re.IGNORECASE) + if m: + source_url = _html.unescape(m.group(1).strip()) + m = re.search(r'<(?:dc:)?date[^>]*>(.*?)', opf, re.DOTALL | re.IGNORECASE) + if m: + publish_date = _html.unescape(m.group(1).strip()) + date_candidate = publish_date.split('T', 1)[0] + try: + parsed_date = datetime.fromisoformat(date_candidate).date() + publish_date = parsed_date.isoformat() if parsed_date.year >= 1900 else '' + except Exception: + publish_date = '' + subjects = [ + _html.unescape(s.strip()) + for s in re.findall(r'<(?:dc:)?subject[^>]*>(.*?)', opf, re.DOTALL | re.IGNORECASE) + if s.strip() + ] + m = re.search(r'<(?:dc:)?description[^>]*>(.*?)', opf, re.DOTALL | re.IGNORECASE) + if m: + description = _html.unescape(m.group(1).strip()) + except Exception: + pass + return { + "has_cover": has_cover, + "series": series, + "series_index": series_index, + "title": title, + "publication_status": publication_status, + "author": author, + "publisher": publisher, + "source_url": source_url, + "publish_date": publish_date, + "subjects": subjects, + "description": description, + } + + +# --------------------------------------------------------------------------- +# EPUB helpers +# --------------------------------------------------------------------------- + +def _epub_spine(path: Path) -> list[dict]: + """Return ordered list of {index, title, href} for all spine items. + + Supports both EPUB2 (toc.ncx) and EPUB3 (nav.xhtml), and respects + the OPF location declared in META-INF/container.xml. + """ + def _norm(base_dir: str, rel: str) -> str: + rel = (rel or '').split('#', 1)[0].strip() + if not rel: + return '' + joined = posixpath.normpath(posixpath.join(base_dir, rel)) + return joined.lstrip('./') + + with zf.ZipFile(path, 'r') as z: + names = set(z.namelist()) + + opf_path = 'OEBPS/content.opf' + try: + container_xml = z.read('META-INF/container.xml').decode('utf-8', errors='replace') + m = re.search(r"full-path\\s*=\\s*['\"]([^'\"]+)['\"]", container_xml) + if m: + opf_path = m.group(1) + except Exception: + pass + if opf_path not in names: + # fallback for malformed books + candidates = [n for n in names if n.lower().endswith('.opf')] + if not candidates: + return [] + opf_path = sorted(candidates)[0] + + opf_xml = z.read(opf_path).decode('utf-8', errors='replace') + opf = BeautifulSoup(opf_xml, 'xml') + opf_dir = posixpath.dirname(opf_path) + + manifest: dict[str, str] = {} + for item in opf.find_all('item'): + iid = item.get('id') + href = item.get('href') + if iid and href: + manifest[iid] = _norm(opf_dir, href) + + spine_idrefs: list[str] = [] + spine_tag = opf.find('spine') + toc_id = spine_tag.get('toc') if spine_tag else None + if spine_tag: + for ir in spine_tag.find_all('itemref'): + rid = ir.get('idref') + if rid: + spine_idrefs.append(rid) + + hrefs = [manifest[rid] for rid in spine_idrefs if rid in manifest] + + href_to_title: dict[str, str] = {} + + # EPUB2: NCX titles + ncx_path = '' + if toc_id and toc_id in manifest: + ncx_path = manifest[toc_id] + elif 'toc.ncx' in names: + ncx_path = 'toc.ncx' + elif 'OEBPS/toc.ncx' in names: + ncx_path = 'OEBPS/toc.ncx' + + if ncx_path and ncx_path in names: + try: + ncx_xml = z.read(ncx_path).decode('utf-8', errors='replace') + ncx = BeautifulSoup(ncx_xml, 'xml') + ncx_dir = posixpath.dirname(ncx_path) + for np in ncx.find_all('navPoint'): + content = np.find('content') + label_tag = np.find('text') + src = content.get('src') if content else '' + label = label_tag.get_text(strip=True) if label_tag else '' + if src and label: + href_to_title[_norm(ncx_dir, src)] = _html.unescape(label) + except Exception: + pass + + # EPUB3: nav.xhtml titles (fallback) + if not href_to_title: + nav_item = None + for item in opf.find_all('item'): + props = (item.get('properties') or '').split() + if 'nav' in props: + nav_item = item + break + if nav_item and nav_item.get('href'): + nav_path = _norm(opf_dir, nav_item.get('href')) + if nav_path in names: + try: + nav_xml = z.read(nav_path).decode('utf-8', errors='replace') + nav = BeautifulSoup(nav_xml, 'lxml') + nav_dir = posixpath.dirname(nav_path) + for a in nav.select('nav a[href]'): + src = a.get('href', '') + label = a.get_text(' ', strip=True) + if src and label: + href_to_title[_norm(nav_dir, src)] = _html.unescape(label) + except Exception: + pass + + chapters = [] + for i, href in enumerate(hrefs): + base = posixpath.basename(href) + title = href_to_title.get(href, re.sub(r'\.(xhtml|html|htm)$', '', base, flags=re.I)) + chapters.append({'index': i, 'title': title or f'Chapter {i+1}', 'href': href}) + return chapters + + + +def _norm_href(base_dir: str, rel: str) -> str: + rel = (rel or '').split('#', 1)[0].strip() + if not rel: + return '' + return posixpath.normpath(posixpath.join(base_dir, rel)).lstrip('./') + + +def _find_opf_path(names: set[str], container_xml: str | None) -> str | None: + opf_path = 'OEBPS/content.opf' + if container_xml: + m = re.search(r'full-path\s*=\s*[\'"]([^\'"]+)[\'"]', container_xml) + if m: + opf_path = m.group(1) + if opf_path in names: + return opf_path + candidates = sorted(n for n in names if n.lower().endswith('.opf')) + return candidates[0] if candidates else None + + +def _make_new_chapter_xhtml(title: str) -> str: + safe_title = _html.escape((title or 'New chapter').strip() or 'New chapter') + return ( + '\n' + '\n' + '\n' + '\n' + ' \n' + f' {safe_title}\n' + ' \n' + '\n' + '\n' + f'

{safe_title}

\n' + '

\n' + '\n' + '\n' + ) + + +def _tag_local(name: str | None) -> str: + if not name: + return '' + return name.split(':', 1)[-1].lower() + + +def _sync_epub_metadata( + epub_path: Path, + *, + title: str, + author: str, + publisher: str, + publication_status: str, + source_url: str, + publish_date: str, + description: str, + series: str, + series_index: int | str | None, + subjects: list[str], +) -> None: + """Write edited metadata back into OPF so DB and EPUB stay aligned.""" + with zf.ZipFile(epub_path, 'r') as z: + names = set(z.namelist()) + container_xml = z.read('META-INF/container.xml').decode('utf-8', errors='replace') if 'META-INF/container.xml' in names else None + opf_path = _find_opf_path(names, container_xml) + if not opf_path or opf_path not in names: + return + opf_xml = z.read(opf_path).decode('utf-8', errors='replace') + + opf = BeautifulSoup(opf_xml, 'xml') + metadata = opf.find(lambda t: _tag_local(getattr(t, 'name', None)) == 'metadata') + if not metadata: + return + + def set_dc(local_name: str, value: str) -> None: + existing = [t for t in metadata.find_all(lambda t: _tag_local(getattr(t, 'name', None)) == local_name)] + tag_name = existing[0].name if existing else f'dc:{local_name}' + for t in existing: + t.decompose() + if value: + nt = opf.new_tag(tag_name) + nt.string = value + metadata.append(nt) + + def set_named_meta(key: str, value: str) -> None: + existing = [ + t for t in metadata.find_all(lambda t: _tag_local(getattr(t, 'name', None)) == 'meta') + if (t.get('name') or '').strip() == key + ] + tag_name = existing[0].name if existing else 'meta' + for t in existing: + t.decompose() + if value: + nt = opf.new_tag(tag_name) + nt['name'] = key + nt['content'] = value + metadata.append(nt) + + set_dc('title', (title or '').strip()) + set_dc('creator', (author or '').strip()) + set_dc('publisher', (publisher or '').strip()) + set_dc('source', (source_url or '').strip()) + date_value = (publish_date or '').strip() + if date_value: + date_candidate = date_value.split('T', 1)[0] + try: + parsed_date = datetime.fromisoformat(date_candidate).date() + date_value = parsed_date.isoformat() if parsed_date.year >= 1900 else '' + except Exception: + date_value = '' + set_dc('date', date_value) + set_dc('description', (description or '').strip()) + + # Replace subjects from editor tags (genres + subgenres + tags). + for t in [t for t in metadata.find_all(lambda t: _tag_local(getattr(t, 'name', None)) == 'subject')]: + t.decompose() + seen: set[str] = set() + for raw in subjects: + val = (raw or '').strip() + if not val: + continue + key = val.casefold() + if key in seen: + continue + seen.add(key) + nt = opf.new_tag('dc:subject') + nt.string = val + metadata.append(nt) + + set_named_meta('publication_status', (publication_status or '').strip()) + + series_val = (series or '').strip() + set_named_meta('calibre:series', series_val) + if series_val: + set_named_meta('calibre:series_index', str(_coerce_series_index(series_index))) + else: + set_named_meta('calibre:series_index', '') + + _rewrite_epub_entries(epub_path, {opf_path: str(opf).encode('utf-8')}) + + +def _rewrite_epub_entries(epub_path: Path, updates: dict[str, bytes], remove_paths: set[str] | None = None) -> None: + remove_paths = remove_paths or set() + with open(epub_path, 'rb') as f: + original = f.read() + + out = io.BytesIO() + with zf.ZipFile(io.BytesIO(original), 'r') as zin, zf.ZipFile(out, 'w', zf.ZIP_DEFLATED) as zout: + existing = set() + for item in zin.infolist(): + name = item.filename + existing.add(name) + if name in remove_paths: + continue + data = updates.get(name) + if data is None: + data = zin.read(name) + ctype = zf.ZIP_STORED if name == 'mimetype' else zf.ZIP_DEFLATED + zout.writestr(name, data, compress_type=ctype) + + for name, data in updates.items(): + if name in existing or name in remove_paths: + continue + ctype = zf.ZIP_STORED if name == 'mimetype' else zf.ZIP_DEFLATED + zout.writestr(name, data, compress_type=ctype) + + with open(epub_path, 'wb') as f: + f.write(out.getvalue()) + + +def _resolve_output_path(filename: str) -> Path | None: + rel = Path(filename) + if rel.is_absolute() or any(part in {"", ".", ".."} for part in rel.parts): + return None + candidate = (OUTPUT_DIR / rel).resolve() + try: + candidate.relative_to(OUTPUT_ROOT) + except ValueError: + return None + return candidate + + +def _prune_empty_output_dirs(start_dir: Path) -> None: + """Remove empty parent directories under OUTPUT_DIR, but never OUTPUT_DIR itself.""" + try: + cur = start_dir.resolve() + cur.relative_to(OUTPUT_ROOT) + except Exception: + return + + while cur != OUTPUT_ROOT: + try: + cur.rmdir() + except OSError: + break + cur = cur.parent + + +def _clean_segment(value: str, fallback: str, max_len: int = 100) -> str: + txt = re.sub(r"\s+", " ", (value or "").strip()) + txt = re.sub(r'[<>:"/\\|?*\x00-\x1f]', "", txt) + txt = re.sub(r"\.+$", "", txt).strip() + if not txt: + txt = fallback + return txt[:max_len] + + +def _coerce_series_index(value: int | str | None) -> int: + try: + return max(1, min(999, int(value or 1))) + except (TypeError, ValueError): + return 1 + + +def _make_rel_path( + *, + publisher: str, + author: str, + title: str, + series: str, + series_index: int | str | None, +) -> Path: + pub_dir = _clean_segment(publisher, "Unknown Publisher", 80) + author_dir = _clean_segment(author, "Unknown Author", 80) + clean_title = _clean_segment(title, "Untitled", 140) + clean_series = _clean_segment(series, "", 120) + if clean_series: + idx = _coerce_series_index(series_index) + filename = f"{idx:03d} - {clean_title}.epub" + return Path(pub_dir) / author_dir / "Series" / clean_series / filename + return Path(pub_dir) / author_dir / "Stories" / f"{clean_title}.epub" + + +def _ensure_unique_rel_path(rel_path: Path, *, exclude: Path | None = None) -> Path: + base = rel_path.with_suffix(".epub") + candidate = base + counter = 2 + while True: + full = (OUTPUT_DIR / candidate).resolve() + if exclude is not None and full == exclude.resolve(): + return candidate + if not full.exists(): + return candidate + candidate = base.with_name(f"{base.stem} ({counter}){base.suffix}") + counter += 1 + + +def _guard(filename: str) -> bool: + """Return True if the filename contains path-traversal characters.""" + return "/" in filename or "\\" in filename or ".." in filename + + +# --------------------------------------------------------------------------- +# Routes +# --------------------------------------------------------------------------- + +@router.get("/library/epub/{filename:path}") +async def library_epub(filename: str): + """Serve EPUB inline (no Content-Disposition: attachment) for the reader.""" + path = _resolve_output_path(filename) + if path is None: + return Response(status_code=404) + if not path.exists(): + return Response(status_code=404) + return FileResponse(path, media_type="application/epub+zip") + + +@router.get("/library/chapters/{filename:path}") +async def get_chapter_list(filename: str): + path = _resolve_output_path(filename) + if path is None: + return Response(status_code=404) + if not path.exists(): + return Response(status_code=404) + return _epub_spine(path) + + +@router.get("/library/chapter/{index}/{filename:path}") +async def get_chapter_html(filename: str, index: int): + """Extract a single chapter from the EPUB and return it as an HTML fragment.""" + path = _resolve_output_path(filename) + if path is None: + return Response(status_code=404) + if not path.exists(): + return Response(status_code=404) + spine = _epub_spine(path) + if index < 0 or index >= len(spine): + return Response(status_code=404) + href = spine[index]["href"] + with zf.ZipFile(path, "r") as z: + xhtml = z.read(href).decode("utf-8", errors="replace") + soup = BeautifulSoup(xhtml, "lxml") + body = soup.find("body") + if not body: + return Response("

No content.

", media_type="text/html") + # Rewrite relative image paths to the chapter-image API endpoint + href_dir = href.rsplit("/", 1)[0] # e.g. "OEBPS/Text" + for img in body.find_all("img"): + src = img.get("src", "") + if src and not src.startswith("http") and not src.startswith("data:"): + parts = href_dir.split("/") + src.split("/") + resolved: list[str] = [] + for p in parts: + if p == "..": + if resolved: + resolved.pop() + else: + resolved.append(p) + img["src"] = f"/library/chapter-img/{'/'.join(resolved[1:])}?filename={filename}" + return Response(str(body), media_type="text/html") + + +@router.get("/library/chapter-img/{path:path}") +async def get_chapter_image(path: str, filename: str): + """Serve an image extracted from the EPUB zip.""" + epub_path = _resolve_output_path(filename) + if epub_path is None: + return Response(status_code=404) + if not epub_path.exists(): + return Response(status_code=404) + try: + with zf.ZipFile(epub_path, "r") as z: + data = z.read("OEBPS/" + path) + except KeyError: + return Response(status_code=404) + ext = path.rsplit(".", 1)[-1].lower() + mt = {"jpg": "image/jpeg", "jpeg": "image/jpeg", "png": "image/png", + "webp": "image/webp", "gif": "image/gif"}.get(ext, "image/octet-stream") + return Response(content=data, media_type=mt) + + +@router.get("/library/progress/{filename:path}") +async def get_progress(filename: str): + if _resolve_output_path(filename) is None: + return {"error": "Invalid filename"} + conn = _db_conn() + try: + with conn.cursor() as cur: + cur.execute( + "SELECT cfi, page, progress FROM reading_progress WHERE filename = %s", + (filename,), + ) + row = cur.fetchone() + return {"cfi": row[0], "progress": row[1] or 0} if row else {"cfi": None, "progress": 0} + finally: + conn.close() + + +@router.delete("/library/progress/{filename:path}") +async def clear_progress(filename: str): + """Remove reading progress so the book returns to unread state. + + Reading sessions (mark-as-read history) are intentionally left intact. + """ + if _resolve_output_path(filename) is None: + return {"error": "Invalid filename"} + conn = _db_conn() + try: + with conn: + with conn.cursor() as cur: + cur.execute("DELETE FROM reading_progress WHERE filename = %s", (filename,)) + finally: + conn.close() + return {"ok": True} + + +@router.post("/library/progress/{filename:path}") +async def save_progress(filename: str, request: Request): + if _resolve_output_path(filename) is None: + return {"error": "Invalid filename"} + body = await request.json() + cfi = body.get("cfi", "") + page = body.get("page") + if page is not None: + try: + page = int(page) + except Exception: + page = None + progress = max(0, min(100, int(body.get("progress", 0)))) + conn = _db_conn() + try: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO reading_progress (filename, cfi, page, progress, updated_at) + VALUES (%s, %s, %s, %s, NOW()) + ON CONFLICT (filename) DO UPDATE + SET cfi = EXCLUDED.cfi, + page = EXCLUDED.page, + progress = EXCLUDED.progress, + updated_at = NOW() + """, + (filename, cfi, page, progress), + ) + finally: + conn.close() + return {"ok": True} + + +@router.post("/library/mark-read/{filename:path}") +async def library_mark_read(filename: str, request: Request): + if _resolve_output_path(filename) is None: + return {"error": "Invalid filename"} + path = _resolve_output_path(filename) + if path is None or not path.exists(): + return {"error": "File not found"} + body = {} + try: + body = await request.json() + except Exception: + pass + read_at = body.get("read_at") # ISO datetime string, or None for now + conn = _db_conn() + try: + with conn: + with conn.cursor() as cur: + if read_at: + cur.execute( + "INSERT INTO reading_sessions (filename, read_at) VALUES (%s, %s)", + (filename, read_at), + ) + else: + cur.execute( + "INSERT INTO reading_sessions (filename) VALUES (%s)", + (filename,), + ) + cur.execute("DELETE FROM reading_progress WHERE filename = %s", (filename,)) + finally: + conn.close() + return {"ok": True} + + +@router.get("/library/book/{filename:path}", response_class=HTMLResponse) +async def book_detail_page(filename: str, request: Request): + path = _resolve_output_path(filename) + if path is None: + return HTMLResponse("Not found", status_code=404) + if not path.exists(): + return HTMLResponse("Not found", status_code=404) + conn = _db_conn() + try: + with conn.cursor() as cur: + cur.execute( + """ + SELECT title, author, publisher, has_cover, series, series_index, + publication_status, want_to_read, source_url, archived, publish_date, description + FROM library WHERE filename = %s + """, + (filename,), + ) + lib_row = cur.fetchone() + if lib_row: + entry = { + "title": lib_row[0] or "", + "author": lib_row[1] or "", + "publisher": lib_row[2] or "", + "has_cover": lib_row[3] or False, + "series": lib_row[4] or "", + "series_index": lib_row[5] or 0, + "publication_status": lib_row[6] or "", + "want_to_read": lib_row[7] or False, + "source_url": lib_row[8] or "", + "archived": lib_row[9] or False, + "publish_date": lib_row[10].isoformat() if lib_row[10] else "", + "description": lib_row[11] or "", + } + # Supplement empty fields from EPUB metadata + if not entry["source_url"] or not entry["publish_date"] or not entry["description"]: + epub_meta = _scan_epub(path) + if not entry["source_url"]: + entry["source_url"] = epub_meta.get("source_url", "") + if not entry["publish_date"]: + entry["publish_date"] = epub_meta.get("publish_date", "") + if not entry["description"]: + entry["description"] = epub_meta.get("description", "") + else: + entry = _scan_epub(path) + entry.setdefault("want_to_read", False) + entry.setdefault("archived", False) + entry.setdefault("publish_date", "") + entry.setdefault("description", "") + + cur.execute( + "SELECT tag, tag_type FROM book_tags WHERE filename = %s ORDER BY tag_type, tag", + (filename,), + ) + genres: list[str] = [] + subgenres: list[str] = [] + tags_list: list[str] = [] + rows = cur.fetchall() + for tag, tag_type in rows: + if tag_type == "genre": + genres.append(tag) + elif tag_type == "subgenre": + subgenres.append(tag) + else: + tags_list.append(tag) + + if not rows: + # Fallback for books where tags only exist in OPF after DB loss/rebuild. + epub_meta = _scan_epub(path) + for subject in epub_meta.get("subjects", []): + if subject not in tags_list: + tags_list.append(subject) + + cur.execute( + "SELECT COUNT(*)::int, MAX(read_at) FROM reading_sessions WHERE filename = %s", + (filename,), + ) + row = cur.fetchone() + read_count = row[0] or 0 + last_read = row[1].isoformat() if row[1] else None + + cur.execute( + "SELECT cfi, progress FROM reading_progress WHERE filename = %s", + (filename,), + ) + row = cur.fetchone() + progress = row[1] or 0 if row else 0 + cfi = row[0] if row else None + finally: + conn.close() + return templates.TemplateResponse(request, "book.html", { + "active": "book", + "filename": filename, + "title": entry["title"], + "author": entry["author"], + "series": entry["series"], + "series_index": entry["series_index"], + "genres": genres, + "subgenres": subgenres, + "tags": tags_list, + "publisher": entry["publisher"], + "publication_status": entry["publication_status"], + "publish_date": entry.get("publish_date", ""), + "has_cover": entry["has_cover"], + "want_to_read": entry["want_to_read"], + "archived": entry["archived"], + "source_url": entry.get("source_url", ""), + "description": entry.get("description", ""), + "read_count": read_count, + "last_read": last_read, + "progress": progress, + "cfi": cfi, + }) + + +@router.get("/api/genres") +async def api_genres(type: str | None = None): + """Return all distinct tags from book_tags, sorted alphabetically. + + Optional ``type`` query parameter filters by tag_type (genre, subgenre, tag). + """ + conn = _db_conn() + try: + with conn.cursor() as cur: + if type == "tag": + cur.execute( + "SELECT DISTINCT tag FROM book_tags WHERE tag_type IN ('tag', 'subject') ORDER BY tag" + ) + elif type: + cur.execute( + "SELECT DISTINCT tag FROM book_tags WHERE tag_type = %s ORDER BY tag", + (type,), + ) + else: + cur.execute("SELECT DISTINCT tag FROM book_tags ORDER BY tag") + result = [r[0] for r in cur.fetchall()] + return JSONResponse(result) + finally: + conn.close() + + +@router.patch("/library/book/{filename:path}") +async def book_update(filename: str, request: Request): + """Update book metadata and tags, and rename/move the file when needed.""" + old_path = _resolve_output_path(filename) + if old_path is None or not old_path.exists(): + return JSONResponse({"error": "not found"}, status_code=404) + + body = await request.json() + title = body.get("title", "") + author = body.get("author", "") + publisher = body.get("publisher", "") + series = body.get("series", "") + series_index = _coerce_series_index(body.get("series_index", 1)) + + target_rel = _make_rel_path( + publisher=publisher, + author=author, + title=title, + series=series, + series_index=series_index, + ) + target_rel = _ensure_unique_rel_path(target_rel, exclude=old_path) + new_filename = target_rel.as_posix() + new_path = (OUTPUT_DIR / target_rel).resolve() + + moved = False + old_parent_to_prune: Path | None = None + if new_path != old_path: + new_path.parent.mkdir(parents=True, exist_ok=True) + old_path.replace(new_path) + moved = True + old_parent_to_prune = old_path.parent + + conn = _db_conn() + try: + _sync_epub_metadata( + new_path, + title=title, + author=author, + publisher=publisher, + publication_status=body.get("publication_status", ""), + source_url=body.get("source_url", ""), + publish_date=body.get("publish_date", ""), + description=body.get("description", ""), + series=series, + series_index=series_index if series else 0, + subjects=(body.get("genres", []) + body.get("subgenres", []) + body.get("tags", [])), + ) + + with conn: + with conn.cursor() as cur: + cur.execute("SELECT has_cover FROM library WHERE filename = %s", (filename,)) + row = cur.fetchone() + has_cover = bool(row[0]) if row and row[0] is not None else bool(_scan_epub(new_path if moved else old_path).get("has_cover", False)) + + cur.execute( + """ + INSERT INTO library ( + filename, title, author, publisher, has_cover, + series, series_index, publication_status, + source_url, publish_date, description, + archived, needs_review, updated_at + ) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, FALSE, FALSE, NOW()) + ON CONFLICT (filename) DO UPDATE SET + title = EXCLUDED.title, + author = EXCLUDED.author, + publisher = EXCLUDED.publisher, + series = EXCLUDED.series, + series_index = EXCLUDED.series_index, + publication_status = EXCLUDED.publication_status, + source_url = EXCLUDED.source_url, + publish_date = EXCLUDED.publish_date, + description = EXCLUDED.description, + needs_review = FALSE, + updated_at = NOW() + """, + ( + new_filename, + title, + author, + publisher, + has_cover, + series, + series_index if series else 0, + body.get("publication_status", ""), + body.get("source_url", ""), + body.get("publish_date") or None, + body.get("description", ""), + ), + ) + + if new_filename != filename: + cur.execute("UPDATE book_tags SET filename = %s WHERE filename = %s", (new_filename, filename)) + cur.execute("UPDATE reading_progress SET filename = %s WHERE filename = %s", (new_filename, filename)) + cur.execute("UPDATE reading_sessions SET filename = %s WHERE filename = %s", (new_filename, filename)) + cur.execute("UPDATE library_cover_cache SET filename = %s WHERE filename = %s", (new_filename, filename)) + cur.execute("DELETE FROM library WHERE filename = %s", (filename,)) + + cur.execute("DELETE FROM book_tags WHERE filename = %s", (new_filename,)) + rows = ( + [(new_filename, g, "genre") for g in body.get("genres", []) if g] + + [(new_filename, g, "subgenre") for g in body.get("subgenres", []) if g] + + [(new_filename, g, "tag") for g in body.get("tags", []) if g] + ) + if rows: + cur.executemany( + "INSERT INTO book_tags (filename, tag, tag_type) VALUES (%s, %s, %s)" + " ON CONFLICT (filename, tag, tag_type) DO NOTHING", + rows, + ) + if old_parent_to_prune is not None: + _prune_empty_output_dirs(old_parent_to_prune) + return JSONResponse({"ok": True, "filename": new_filename, "renamed": new_filename != filename}) + except Exception as e: + if moved and new_path.exists() and not old_path.exists(): + new_path.replace(old_path) + return JSONResponse({"error": str(e)}, status_code=500) + finally: + conn.close() + + +@router.get("/library/read/{filename:path}", response_class=HTMLResponse) +async def reader_page(filename: str, request: Request): + path = _resolve_output_path(filename) + if path is None: + return HTMLResponse("Not found", status_code=404) + if not path.exists(): + return HTMLResponse("Not found", status_code=404) + conn = _db_conn() + try: + with conn.cursor() as cur: + cur.execute("SELECT title FROM library WHERE filename = %s", (filename,)) + row = cur.fetchone() + title = row[0] if row and row[0] else filename + finally: + conn.close() + return templates.TemplateResponse(request, "reader.html", { + "filename": filename, + "title": title, + "epub_url": f"/library/epub/{filename}", + }) + +@router.get("/library/pdf/{filename:path}") +async def library_pdf_page(filename: str, page: int = 0, dpi: int = 150): + path = _resolve_output_path(filename) + if path is None: + return JSONResponse({"error": "Invalid filename"}, status_code=400) + if not path.exists(): + return JSONResponse({"error": "File not found"}, status_code=404) + if path.suffix.lower() != ".pdf": + return JSONResponse({"error": "Not a PDF file"}, status_code=400) + try: + data = pdf_render_page(path, page, dpi=dpi) + return Response(content=data, media_type="image/png") + except IndexError: + return JSONResponse({"error": "Page out of range"}, status_code=416) + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=500) + + +@router.get("/library/cbr/{filename:path}/{page:int}") +async def library_cbr_page(filename: str, page: int): + path = _resolve_output_path(filename) + if path is None: + return JSONResponse({"error": "Invalid filename"}, status_code=400) + if not path.exists(): + return JSONResponse({"error": "File not found"}, status_code=404) + if path.suffix.lower() not in {".cbr", ".cbz"}: + return JSONResponse({"error": "Not a CBR/CBZ file"}, status_code=400) + try: + data, mime = cbr_get_page(path, page) + return Response(content=data, media_type=mime) + except IndexError: + return JSONResponse({"error": "Page out of range"}, status_code=416) + except Exception as e: + return JSONResponse({"error": str(e)}, status_code=500) diff --git a/containers/novela/routers/settings.py b/containers/novela/routers/settings.py new file mode 100644 index 0000000..5e61d82 --- /dev/null +++ b/containers/novela/routers/settings.py @@ -0,0 +1,104 @@ +import re + +from fastapi import APIRouter, Request +from fastapi.responses import HTMLResponse +from fastapi.templating import Jinja2Templates + +from db import get_db_conn + +templates = Jinja2Templates(directory="templates") +router = APIRouter() + + +@router.get("/settings", response_class=HTMLResponse) +async def settings_page(request: Request): + return templates.TemplateResponse(request, "settings.html", {"active": "settings"}) + + +@router.get("/api/break-patterns") +async def get_break_patterns(): + with get_db_conn() as conn: + with conn.cursor() as cur: + cur.execute( + "SELECT id, pattern_type, pattern, enabled, is_default FROM break_patterns ORDER BY id" + ) + return [ + { + "id": r[0], + "pattern_type": r[1], + "pattern": r[2], + "enabled": r[3], + "is_default": r[4], + } + for r in cur.fetchall() + ] + + +@router.post("/api/break-patterns") +async def add_break_pattern(request: Request): + body = await request.json() + ptype = (body.get("pattern_type") or "").strip() + pattern = (body.get("pattern") or "").strip() + if ptype not in ("regex", "css_class") or not pattern: + return {"error": "Invalid input"} + if ptype == "regex": + try: + re.compile(pattern) + except re.error as e: + return {"error": f"Invalid regex: {e}"} + + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute( + """ + INSERT INTO break_patterns (pattern_type, pattern) + VALUES (%s, %s) + ON CONFLICT (pattern_type, pattern) DO NOTHING + RETURNING id + """, + (ptype, pattern), + ) + row = cur.fetchone() + if not row: + return {"error": "Pattern already exists"} + return {"ok": True, "id": row[0]} + + +@router.patch("/api/break-patterns/{pid}") +async def update_break_pattern(pid: int, request: Request): + body = await request.json() + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + if "enabled" in body: + cur.execute("UPDATE break_patterns SET enabled = %s WHERE id = %s", (bool(body["enabled"]), pid)) + if "pattern" in body: + new_pat = (body.get("pattern") or "").strip() + cur.execute("SELECT pattern_type FROM break_patterns WHERE id = %s", (pid,)) + row = cur.fetchone() + if row and row[0] == "regex": + try: + re.compile(new_pat) + except re.error as e: + return {"error": f"Invalid regex: {e}"} + cur.execute("UPDATE break_patterns SET pattern = %s WHERE id = %s", (new_pat, pid)) + return {"ok": True} + + +@router.delete("/api/break-patterns/{pid}") +async def delete_break_pattern(pid: int): + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute("DELETE FROM break_patterns WHERE id = %s", (pid,)) + return {"ok": True} + + +@router.delete("/api/reading-history") +async def reset_reading_history(): + with get_db_conn() as conn: + with conn: + with conn.cursor() as cur: + cur.execute("DELETE FROM reading_sessions") + return {"ok": True} diff --git a/containers/novela/scrapers/__init__.py b/containers/novela/scrapers/__init__.py new file mode 100644 index 0000000..3fdb823 --- /dev/null +++ b/containers/novela/scrapers/__init__.py @@ -0,0 +1,17 @@ +from .base import BaseScraper +from .awesomedude import AwesomeDudeScraper +from .gayauthors import GayAuthorsScraper + +# Register scrapers in priority order (first match wins) +_SCRAPERS: list[type[BaseScraper]] = [ + AwesomeDudeScraper, + GayAuthorsScraper, +] + + +def get_scraper(url: str) -> BaseScraper: + """Return the appropriate scraper instance for the given URL.""" + for scraper_cls in _SCRAPERS: + if scraper_cls.matches(url): + return scraper_cls() + raise ValueError(f"No scraper available for URL: {url}") diff --git a/containers/novela/scrapers/__pycache__/__init__.cpython-311.pyc b/containers/novela/scrapers/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..74d68bd Binary files /dev/null and b/containers/novela/scrapers/__pycache__/__init__.cpython-311.pyc differ diff --git a/containers/novela/scrapers/__pycache__/awesomedude.cpython-311.pyc b/containers/novela/scrapers/__pycache__/awesomedude.cpython-311.pyc new file mode 100644 index 0000000..2c0f3d5 Binary files /dev/null and b/containers/novela/scrapers/__pycache__/awesomedude.cpython-311.pyc differ diff --git a/containers/novela/scrapers/__pycache__/base.cpython-311.pyc b/containers/novela/scrapers/__pycache__/base.cpython-311.pyc new file mode 100644 index 0000000..ad3a751 Binary files /dev/null and b/containers/novela/scrapers/__pycache__/base.cpython-311.pyc differ diff --git a/containers/novela/scrapers/__pycache__/gayauthors.cpython-311.pyc b/containers/novela/scrapers/__pycache__/gayauthors.cpython-311.pyc new file mode 100644 index 0000000..0279816 Binary files /dev/null and b/containers/novela/scrapers/__pycache__/gayauthors.cpython-311.pyc differ diff --git a/containers/novela/scrapers/awesomedude.py b/containers/novela/scrapers/awesomedude.py new file mode 100644 index 0000000..b190c5f --- /dev/null +++ b/containers/novela/scrapers/awesomedude.py @@ -0,0 +1,265 @@ +import re +from urllib.parse import urljoin, urlparse + +import httpx +from bs4 import BeautifulSoup + +from .base import BaseScraper + +LAYOUT_RE = re.compile( + r"nav|menu|sidebar|header|footer|breadcrumb|pagination|" + r"comment|widget|aside|banner|ad|rating|follow|share", + re.I, +) +GENERIC_PAGE_TITLES = {"awesomedude home"} + + +class AwesomeDudeScraper(BaseScraper): + + @classmethod + def matches(cls, url: str) -> bool: + return "awesomedude.org" in url + + async def login(self, client: httpx.AsyncClient, username: str, password: str) -> bool: + return True # no login required + + async def fetch_book_info(self, client: httpx.AsyncClient, url: str) -> dict: + r = await client.get(url) + soup = BeautifulSoup(r.text, "html.parser") + actual_url = str(r.url) + + def _clean_title_from_url(page_url: str) -> str: + filename = urlparse(page_url).path.rsplit("/", 1)[-1] + stem = re.sub(r"\.html?$", "", filename, flags=re.I) + stem = stem.replace("_", " ").replace("-", " ").strip() + return stem.title() if stem else "Unknown title" + + def _extract_author_from_byline(text: str) -> str | None: + line = re.sub(r"\s+", " ", text).strip() + m = re.match(r"^by\s+([A-Za-z][A-Za-z .'\-]{1,80})$", line, re.I) + if not m: + return None + return m.group(1).strip(" .,-") + + # ── Title and author ────────────────────────────────────────────── + # Primary: "Story Title by Author Name" + book_title = "Unknown title" + author = "Unknown author" + page_title_el = soup.find("title") + page_title_text = page_title_el.get_text(strip=True) if page_title_el else "" + m = re.match(r"^(.+?)\s+by\s+(.+)$", page_title_text, re.I) + if m: + book_title = m.group(1).strip() + author = m.group(2).strip() + elif page_title_text and page_title_text.strip().lower() not in GENERIC_PAGE_TITLES: + book_title = page_title_text + + # Fallback: first h1 or h2 with "by" pattern + if author == "Unknown author": + for tag in soup.find_all(["h1", "h2"]): + text = tag.get_text(strip=True) + m = re.match(r"^(.+?)\s+by\s+(.+)$", text, re.I) + if m: + book_title = m.group(1).strip() + author = m.group(2).strip() + break + + # Fallback: byline text ("By Author Name") in body. + if author == "Unknown author": + for text_node in soup.find_all(string=True): + candidate = _extract_author_from_byline(str(text_node)) + if candidate: + author = candidate + break + + # If page title is generic, derive story title from the URL slug. + if book_title == "Unknown title": + book_title = _clean_title_from_url(actual_url) + + # ── Index image ─────────────────────────────────────────────────── + # First image on the page that is not a tiny icon/button and is on + # the same domain. Used as an illustration in the Book Info page. + index_image_url = None + page_host = urlparse(actual_url).netloc + for img in soup.find_all("img"): + src = img.get("src", "") + if not src or src.startswith("data:"): + continue + full_src = urljoin(actual_url, src) + if urlparse(full_src).netloc != page_host: + continue + # Skip obviously tiny elements (buttons / spacers) + try: + if img.get("width") and int(img["width"]) < 60: + continue + if img.get("height") and int(img["height"]) < 60: + continue + except (ValueError, TypeError): + pass + index_image_url = full_src + break + + # ── Chapter discovery ───────────────────────────────────────────── + # Scan for links to .htm/.html files in the same directory, excluding + # the index page itself. + base_dir = actual_url.rsplit("/", 1)[0] + "/" + chapter_links: list[dict] = [] + seen: set[str] = set() + for a in soup.find_all("a", href=True): + full = urljoin(actual_url, a["href"]) + if (full.startswith(base_dir) + and re.search(r"\.html?(\?.*)?$", full, re.I) + and not re.search(r"/index\.html?$", full, re.I) + and full not in seen): + seen.add(full) + text = re.sub(r'\s+', ' ', a.get_text(separator=' ')).strip() + chapter_links.append({"url": full, "title": text, "book_title": book_title, "author": author}) + + if not chapter_links: + # Single-file story: the index page itself is the only chapter + chapter_links = [{"url": actual_url, "title": book_title, "book_title": book_title, "author": author}] + chapter_method = "single_page" + else: + chapter_method = "html_scan" + for i, c in enumerate(chapter_links, 1): + t = c["title"] + if re.match(r"^\d+$", t): + c["title"] = f"Chapter {t}" + elif not t or t.lower() == book_title.lower(): + c["title"] = f"Chapter {i}" + + return { + "title": book_title, + "author": author, + "publisher": "awesomedude.org", + "series": "", + "series_index_hint": 0, + "genres": [], + "subgenres": [], + "tags": [], + "description": "", + "updated_date": "", + "publication_status": "", + "source_url": url, + "chapters": chapter_links, + "chapter_method": chapter_method, + "index_image_url": index_image_url, + } + + async def fetch_chapter(self, client: httpx.AsyncClient, ch: dict) -> dict: + cr = await client.get(ch["url"]) + csoup = BeautifulSoup(cr.text, "html.parser") + title = ch["title"] + book_title_lc = ch.get("book_title", "").lower() + author_lc = ch.get("author", "").lower() + + # Try to refine chapter title from an in-page heading, + # but skip the book title and "by Author" headings. + for tag in csoup.find_all(["h1", "h2", "h3"]): + text = re.sub(r'\s+', ' ', tag.get_text(separator=' ')).strip() + if not text or len(text) >= 120: + continue + text_lc = text.lower() + if re.search(r"\s+by\s+", text, re.I): + continue + if book_title_lc and book_title_lc in text_lc: + continue + if author_lc and author_lc in text_lc: + continue + title = text + break + + # Content extraction: prefer an element with a content-like id/class; + # fall back to the largest div/section not tagged as layout. + content_el = ( + csoup.find(id=re.compile(r"^(chapter|story|content|text)[_-]?", re.I)) + or csoup.find(class_=re.compile(r"story.?text|chapter.?text|post.?content|entry.?content", re.I)) + or csoup.find("article") + ) + + if not content_el: + candidates = [ + el for el in csoup.find_all(["div", "article", "section"]) + if not re.search(LAYOUT_RE, " ".join(el.get("class", []))) + and not re.search(LAYOUT_RE, el.get("id", "")) + ] + if candidates: + content_el = max(candidates, key=lambda el: len(el.get_text(" ", strip=True))) + + body = csoup.find("body") + if body: + body_p_count = len(body.find_all("p")) + body_text_len = len(body.get_text(" ", strip=True)) + selected_p_count = len(content_el.find_all("p")) if content_el else 0 + selected_text_len = len(content_el.get_text(" ", strip=True)) if content_el else 0 + + # Some awesomedude pages keep story text as direct <p> children of body. + # If the selected container is too small, use body instead. + if body_p_count >= 6 and ( + not content_el + or selected_p_count < 3 + or selected_text_len < int(body_text_len * 0.35) + ): + content_el = body + + # Last resort: entire body + if not content_el: + content_el = body + + # Remove known site nav containers when body is used as content root. + if content_el and content_el.name == "body": + for nav_el in content_el.select(".storynavbg, .storynav, .storynavlink, .clearme"): + nav_el.decompose() + + # Strip leading non-story front matter (title/byline/header image blocks). + # Keep the first substantial story paragraph intact. + if content_el: + normalized_title = re.sub(r"\s+", " ", book_title_lc).strip() + normalized_author = re.sub(r"\s+", " ", author_lc).strip() + + for child in list(content_el.children): + if not hasattr(child, "get_text"): + continue # NavigableString whitespace + + text = child.get_text(" ", strip=True) + if not text: + if hasattr(child, "decompose"): + child.decompose() + continue + + text_lc_norm = re.sub(r"\s+", " ", text.lower()).strip() + is_byline = bool(re.match(r"^by\s+[A-Za-z]", text, re.I)) + is_title_line = bool( + normalized_title + and ( + text_lc_norm == normalized_title + or text_lc_norm.startswith(f"{normalized_title} by ") + ) + ) + is_author_line = bool( + normalized_author + and ( + text_lc_norm == normalized_author + or text_lc_norm == f"by {normalized_author}" + ) + ) + is_media_only = getattr(child, "find", lambda *_: None)("img") is not None and len(text_lc_norm) < 80 + + if ( + (is_title_line and len(text_lc_norm) <= 200) + or (is_author_line and len(text_lc_norm) <= 120) + or (is_byline and len(text_lc_norm) <= 120) + or is_media_only + ): + child.decompose() + continue + + break # first substantive paragraph reached + + + return { + "title": title, + "content_el": content_el, + "selector_id": content_el.get("id") if content_el else None, + "selector_class": " ".join(content_el.get("class", [])) if content_el else None, + } diff --git a/containers/novela/scrapers/base.py b/containers/novela/scrapers/base.py new file mode 100644 index 0000000..9570e86 --- /dev/null +++ b/containers/novela/scrapers/base.py @@ -0,0 +1,58 @@ +from abc import ABC, abstractmethod + +import httpx + +HEADERS = { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" +} + + +class BaseScraper(ABC): + """Abstract base class for all site scrapers. + + To add support for a new site: + 1. Create a new file in scrapers/ (e.g. scrapers/mysite.py) + 2. Subclass BaseScraper and implement all abstract methods + 3. Register the class in scrapers/__init__.py + """ + + @classmethod + @abstractmethod + def matches(cls, url: str) -> bool: + """Return True if this scraper handles the given URL.""" + + @abstractmethod + async def login(self, client: httpx.AsyncClient, username: str, password: str) -> bool: + """Perform site login. Returns True on success, False on failure.""" + + @abstractmethod + async def fetch_book_info(self, client: httpx.AsyncClient, url: str) -> dict: + """Fetch the story index page and return all book metadata + chapter list. + + Returns a dict with: + title str — book title + author str — author name + genres list[str] — primary genres + subgenres list[str] — sub-genres + tags list[str] — tags + description str — blurb / synopsis + updated_date str — last updated, YYYY-MM-DD or "" + source_url str — canonical story URL + chapters list[dict] — [{url: str, title: str}, ...] + chapter_method str — "html_scan" or "fallback_numeric" + + Note: cover is not scraped. It is supplied by the user at convert time. + """ + + @abstractmethod + async def fetch_chapter(self, client: httpx.AsyncClient, ch: dict) -> dict: + """Fetch a chapter page and extract its content element. + + ch is a dict with at least {url: str, title: str}. + + Returns a dict with: + title str — chapter title (may be refined from slug/heading) + content_el Tag|None — BeautifulSoup element containing story text + selector_id str|None — id attribute of the matched element (for debug) + selector_class str|None — class string of the matched element (for debug) + """ diff --git a/containers/novela/scrapers/gayauthors.py b/containers/novela/scrapers/gayauthors.py new file mode 100644 index 0000000..f2a9c84 --- /dev/null +++ b/containers/novela/scrapers/gayauthors.py @@ -0,0 +1,236 @@ +import re +from urllib.parse import urljoin + +import httpx +from bs4 import BeautifulSoup + +from .base import BaseScraper + +GA_BASE = "https://www.gayauthors.org" + +LAYOUT_RE = re.compile( + r"nav|menu|sidebar|header|footer|breadcrumb|pagination|" + r"comment|react|widget|aside|banner|ad|rating|follow|tag|share", + re.I, +) + + +class GayAuthorsScraper(BaseScraper): + + @classmethod + def matches(cls, url: str) -> bool: + return "gayauthors.org" in url + + async def login(self, client: httpx.AsyncClient, username: str, password: str) -> bool: + # GET login page first to follow any www. → non-www. redirect, then POST to that URL + lr = await client.get(GA_BASE + "/login/") + login_post_url = str(lr.url) + lsoup = BeautifulSoup(lr.text, "html.parser") + csrf_input = lsoup.find("input", {"name": "csrfKey"}) + csrf = csrf_input.get("value", "") if csrf_input else "" + await client.post(login_post_url, data={ + "auth": username, + "password": password, + "csrfKey": csrf, + "remember_me": "1", + "_processLogin": "usernamepassword", + "submit": "Sign In", + }) + return any(c.name == "ips4_member_id" for c in client.cookies.jar) + + async def fetch_book_info(self, client: httpx.AsyncClient, url: str) -> dict: + r = await client.get(url) + soup = BeautifulSoup(r.text, "html.parser") + + # Title — use itemprop="name" to avoid badge text being included + title_el = soup.find(attrs={"itemprop": "name"}) or soup.find("h1") or soup.find("h2") + book_title = title_el.get_text(strip=True) if title_el else "Unknown title" + + # Series — detect badge in h1; two known tooltip formats: + # 'Book "N" of the "Series Name" series' + # 'This Story belongs to the world of: Series Name' + series = "" + series_index_hint = 0 + h1 = soup.find("h1") + if h1: + badge = h1.find(class_=re.compile(r"\bipsBadge\b")) + if badge: + tooltip = badge.get("title", "") + m = re.search(r'Book\s+"(\d+)"\s+of\s+the\s+"(.+?)"\s+series', tooltip, re.I) + if m: + series_index_hint = int(m.group(1)) + series = m.group(2).strip() + " (Series)" + else: + m = re.search(r"belongs to the world of:\s*(.+)", tooltip, re.I) + if m: + series = m.group(1).strip() + " (Series)" + + # Author + author_el = ( + soup.find(attrs={"itemprop": "author"}) + or soup.find("a", {"rel": "author"}) + or soup.find(class_=re.compile(r"author", re.I)) + ) + author = author_el.get_text(strip=True) if author_el else "Unknown author" + + # Genres and sub-genres + genres: list[str] = [] + subgenres: list[str] = [] + for div in soup.find_all("div"): + div_text = div.get_text(separator=" ", strip=True) + if re.match(r"^Genres\s*:", div_text) and not re.match(r"^Sub-?genres\s*:", div_text): + genres = [a.get_text(strip=True) for a in div.find_all("a", attrs={"itemprop": "genre"})] + elif re.match(r"^Sub-?genres\s*:", div_text): + subgenres = [a.get_text(strip=True) for a in div.find_all("a", attrs={"itemprop": "genre"})] + + # Tags + tags: list[str] = [] + tags_ul = soup.find("ul", class_=re.compile(r"\bipsTags\b", re.I)) + if tags_ul: + tags = [ + span.get_text(strip=True).title() + for span in tags_ul.find_all("span", class_="ipsTag") + ] + + # Description (from ipsComment_content > ipsColumn_fluid) + description = "" + desc_container = soup.find(class_="ipsComment_content") + if desc_container: + fluid = desc_container.find(class_="ipsColumn_fluid") + if fluid: + paras = [ + p.get_text(strip=True) for p in fluid.find_all("p") + if p.get_text().replace("\xa0", "").strip() + ] + description = "\n\n".join(paras) + + # Updated date: "Updated: MM/DD/YYYY" → "YYYY-MM-DD" + updated_date = "" + date_match = re.search(r"Updated:\s*(\d{2}/\d{2}/\d{4})", soup.get_text()) + if date_match: + m, d, y = date_match.group(1).split("/") + updated_date = f"{y}-{m}-{d}" + + # Publication status — ipsBadge_style* link inside /stories/browse/status/ href + status_el = soup.find("a", class_=re.compile(r"\bipsBadge_style\d+\b"), + href=re.compile(r"/stories/browse/status/", re.I)) + publication_status = status_el.get_text(strip=True) if status_el else "" + + # Chapter discovery + # Primary: scan HTML for direct child path segments of the story URL + base_story_url = url.rstrip("/") + base_norm = re.sub(r"://www\.", "://", base_story_url) + actual_page_url = str(r.url) + chapter_links = [] + for a in soup.find_all("a", href=True): + href = a["href"] + full = urljoin(actual_page_url, href) + full_norm = re.sub(r"://www\.", "://", full) + suffix = full_norm[len(base_norm):] + if full_norm.startswith(base_norm + "/") and re.match(r"^/[\w-]+/?$", suffix) and "?" not in full: + text = re.sub(r"^\s*First\s+Chapter\s*", "", a.get_text(strip=True), flags=re.I).strip() + if full not in [c["url"] for c in chapter_links] and text: + chapter_links.append({"url": full, "title": text}) + # Normalise "N. Title" → "Chapter N - Title"; if remainder already starts with "Chapter N", just strip prefix + # Titles with no leading number get "Chapter N - " from the URL slug (e.g. first chapter after badge strip) + for c in chapter_links: + m = re.match(r"^(\d+)[\.\)]\s*(.+)$", c["title"]) + if m: + num, rest = m.group(1), m.group(2).strip() + if re.match(r"Chapter\s+\d", rest, re.I): + c["title"] = rest + else: + c["title"] = f"Chapter {num} - {rest}" + elif not re.match(r"Chapter\s+\d", c["title"], re.I): + slug = c["url"].rstrip("/").split("/")[-1] + if slug.isdigit(): + c["title"] = f"Chapter {slug} - {c['title']}" + + # Fallback: sequential numeric URLs based on chapter count from page text + fallback_used = False + if not chapter_links: + fallback_used = True + count_match = re.search(r'(\d+)\s+[Cc]hapters?', soup.get_text()) + chapter_count = int(count_match.group(1)) if count_match else 0 + chapter_links = [ + {"url": f"{base_story_url}/{i}/", "title": f"Chapter {i}"} + for i in range(1, chapter_count + 1) + ] + + # Strip series name prefix from title (e.g. "Series Name: Actual Title" → "Actual Title") + if series: + series_base = re.sub(r"\s*\(Series\)$", "", series, flags=re.I).strip() + book_title = re.sub( + r"^" + re.escape(series_base) + r"\s*[:–—-]\s*", + "", book_title, flags=re.I + ).strip() or book_title + + return { + "title": book_title, + "author": author, + "publisher": "Gay Author Story Archive", + "series": series, + "series_index_hint": series_index_hint, + "genres": genres, + "subgenres": subgenres, + "tags": tags, + "description": description, + "updated_date": updated_date, + "publication_status": publication_status, + "source_url": url, + "chapters": chapter_links, + "chapter_method": "fallback_numeric" if fallback_used else "html_scan", + } + + async def fetch_chapter(self, client: httpx.AsyncClient, ch: dict) -> dict: + cr = await client.get(ch["url"]) + csoup = BeautifulSoup(cr.text, "html.parser") + + # Derive title from h1#chapterTitle (e.g. "Story - 1. My Name is Nick") + title = ch["title"] + chapter_h1 = csoup.find("h1", id="chapterTitle") + if chapter_h1: + raw = chapter_h1.get_text(strip=True) + m = re.search(r"(\d+)[\.\)]\s*(.+)$", raw) + if m: + num, rest = m.group(1), m.group(2).strip() + # Avoid "Chapter 1 - Chapter 1:" when rest already names the chapter + if re.match(r"Chapter\s+\d", rest, re.I): + title = rest + else: + title = f"Chapter {num} - {rest}" + elif title.startswith("Chapter "): + # Fallback: refine generic "Chapter N" placeholder from slug or heading + slug = str(cr.url).rstrip("/").split("/")[-1] + slug_title = re.sub(r"^\d+-", "", slug).replace("-", " ").title() + if slug_title and not slug_title.strip().isdigit(): + title = slug_title + title_heading = csoup.find(class_=re.compile(r"chapter.?title|entry.?title|post.?title", re.I)) + if title_heading: + title = title_heading.get_text(strip=True) + + # Content extraction (most-specific to least-specific) + content_el = ( + csoup.find(id=re.compile(r"^(chapter|story)[_-]?text$", re.I)) + or csoup.find(attrs={"data-role": re.compile(r"chapter|story|content|text", re.I)}) + or csoup.find(class_=re.compile( + r"cBBCodePost|ipsBBCode|cBBCode|story.?text|chapter.?text|post.?content|entry.?content", re.I + )) + or csoup.find("article") + ) + + if not content_el: + candidates = [ + el for el in csoup.find_all(["div", "article", "section"]) + if not re.search(LAYOUT_RE, " ".join(el.get("class", []))) + and not re.search(LAYOUT_RE, el.get("id", "")) + ] + if candidates: + content_el = max(candidates, key=lambda el: len(el.get_text())) + + return { + "title": title, + "content_el": content_el, + "selector_id": content_el.get("id") if content_el else None, + "selector_class": " ".join(content_el.get("class", [])) if content_el else None, + } diff --git a/containers/novela/security.py b/containers/novela/security.py new file mode 100644 index 0000000..d5dc3f7 --- /dev/null +++ b/containers/novela/security.py @@ -0,0 +1,43 @@ +import base64 +import hashlib +import os + +from cryptography.fernet import Fernet, InvalidToken + +_PREFIX = "enc$" + + +def _master_secret() -> str: + return ( + os.environ.get("NOVELA_MASTER_KEY") + or os.environ.get("POSTGRES_PASSWORD") + or "novela-default-key-change-me" + ) + + +def _fernet() -> Fernet: + digest = hashlib.sha256(_master_secret().encode("utf-8")).digest() + key = base64.urlsafe_b64encode(digest) + return Fernet(key) + + +def encrypt_value(value: str | None) -> str: + raw = value or "" + token = _fernet().encrypt(raw.encode("utf-8")).decode("utf-8") + return _PREFIX + token + + +def is_encrypted_value(value: str | None) -> bool: + return bool(value) and str(value).startswith(_PREFIX) + + +def decrypt_value(value: str | None) -> str: + if not value: + return "" + if not value.startswith(_PREFIX): + return value + token = value[len(_PREFIX) :] + try: + return _fernet().decrypt(token.encode("utf-8")).decode("utf-8") + except InvalidToken: + return "" diff --git a/containers/novela/static/book.css b/containers/novela/static/book.css new file mode 100644 index 0000000..9fae22f --- /dev/null +++ b/containers/novela/static/book.css @@ -0,0 +1,282 @@ +/* ── Novela — Book detail page styles ─────────────────────────────────── */ + +:root { + --bg: #0f0e0c; --surface: #1a1815; --surface2: #221f1b; + --border: #2e2a24; --accent: #c8783a; --accent2: #e8a063; + --text: #e8e2d9; --text-dim: #8a8278; --text-faint: #4a453e; + --success: #6baa6b; --warning: #c8a03a; --error: #c85a3a; + --radius: 6px; --sidebar: 220px; + --mono: 'DM Mono', monospace; --serif: 'Libre Baskerville', Georgia, serif; +} +*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; } +html, body { height: 100%; background: var(--bg); color: var(--text); font-family: var(--serif); } + +.main { + margin-left: var(--sidebar); + min-height: 100vh; + padding: 2rem 2.5rem 4rem; +} + +/* ── Hero layout ─────────────────────────────────────────────────── */ + +.book-hero { + display: flex; + gap: 2.5rem; + align-items: flex-start; + margin-bottom: 2rem; +} + +/* Cover */ +.cover-area { flex-shrink: 0; } +.cover-wrap { + position: relative; + width: 180px; height: 270px; + border-radius: var(--radius); overflow: hidden; background: var(--surface2); +} +.cover-wrap img { position: absolute; inset: 0; width: 100%; height: 100%; object-fit: cover; } +.cover-wrap canvas { width: 100%; height: 100%; display: block; } + +/* Want to Read star under cover */ +.btn-wtr { + display: flex; align-items: center; gap: 0.5rem; + margin-top: 0.75rem; + background: none; border: 1px solid var(--border); + border-radius: var(--radius); padding: 0.35rem 0.7rem; + font-family: var(--mono); font-size: 0.7rem; + color: var(--text-dim); cursor: pointer; + transition: color 0.15s, border-color 0.15s; + width: 100%; justify-content: center; +} +.btn-wtr:hover { color: var(--warning); border-color: var(--warning); } +.btn-wtr.active { color: var(--warning); border-color: var(--warning); } + +/* Info panel */ +.book-info { flex: 1; min-width: 0; } + +.book-title { + font-size: 1.6rem; font-weight: 700; line-height: 1.2; margin-bottom: 0.4rem; +} +.book-author { + font-family: var(--mono); font-size: 0.85rem; color: var(--text-dim); margin-bottom: 1rem; +} +.book-author a { color: inherit; text-decoration: none; } +.book-author a:hover { color: var(--accent2); } +.publisher-link { color: inherit; text-decoration: none; } +.publisher-link:hover { color: var(--accent2); } + +.meta-grid { display: flex; flex-direction: column; gap: 0.35rem; margin-bottom: 1.25rem; } +.meta-row { display: flex; gap: 0.75rem; font-family: var(--mono); font-size: 0.78rem; } +.meta-label { color: var(--text-dim); min-width: 7rem; flex-shrink: 0; } +.meta-value { color: var(--text); } + +.book-description { + white-space: break-spaces; + line-height: 1.6; + color: var(--text-dim); + width: 100%; + word-break: break-word; +} + +.tag-pill { + display: inline-block; font-family: var(--mono); font-size: 0.62rem; + padding: 0.15rem 0.5rem; border-radius: 3px; + background: var(--surface2); color: var(--text-dim); + border: 1px solid var(--border); margin: 0 0.2rem 0.2rem 0; +} +a.tag-pill { + text-decoration: none; cursor: pointer; + transition: color 0.15s, border-color 0.15s; +} +a.tag-pill:hover { color: var(--accent2); border-color: var(--accent); } + +.status-pill { + display: inline-block; font-family: var(--mono); font-size: 0.62rem; + padding: 0.15rem 0.5rem; border-radius: 3px; +} +.status-complete { background: rgba(107,170,107,0.12); color: var(--success); border: 1px solid rgba(107,170,107,0.25); } +.status-ongoing { background: rgba(200,160,58,0.12); color: var(--warning); border: 1px solid rgba(200,160,58,0.25); } +.status-hiatus { background: rgba(200,160,58,0.12); color: var(--warning); border: 1px solid rgba(200,160,58,0.25); } + +/* Progress */ +.progress-section { margin-bottom: 1.25rem; } +.progress-label { font-family: var(--mono); font-size: 0.65rem; color: var(--text-dim); margin-bottom: 0.35rem; } +.progress-bar-wrap { height: 5px; background: var(--surface2); border-radius: 100px; overflow: hidden; margin-bottom: 0.3rem; } +.progress-bar-fill { height: 100%; background: var(--accent); border-radius: 100px; } +.progress-pct { font-family: var(--mono); font-size: 0.65rem; color: var(--text-dim); } + +/* Reading stats */ +.read-stats { font-family: var(--mono); font-size: 0.72rem; color: var(--text-dim); margin-bottom: 1.25rem; } +.read-stats span { color: var(--accent); } + +/* Action buttons */ +.action-row { display: flex; gap: 0.75rem; flex-wrap: wrap; margin-bottom: 1.5rem; } + +.btn-primary { + display: inline-flex; align-items: center; gap: 0.5rem; + padding: 0.65rem 1.25rem; + background: var(--accent); color: #0f0e0c; + border: none; border-radius: var(--radius); + font-family: var(--mono); font-size: 0.8rem; font-weight: 500; + cursor: pointer; text-decoration: none; transition: background 0.15s; +} +.btn-primary:hover { background: var(--accent2); } + +.btn-secondary { + display: inline-flex; align-items: center; gap: 0.5rem; + padding: 0.65rem 1.25rem; + background: var(--surface2); color: var(--text-dim); + border: 1px solid var(--border); border-radius: var(--radius); + font-family: var(--mono); font-size: 0.8rem; + cursor: pointer; text-decoration: none; + transition: color 0.15s, border-color 0.15s; +} +.btn-secondary:hover { color: var(--text); border-color: var(--text-faint); } + +.btn-danger { + display: inline-flex; align-items: center; gap: 0.5rem; + padding: 0.65rem 1.25rem; + background: var(--surface2); color: var(--error); + border: 1px solid rgba(200,90,58,0.35); border-radius: var(--radius); + font-family: var(--mono); font-size: 0.8rem; + cursor: pointer; text-decoration: none; + transition: color 0.15s, border-color 0.15s, background 0.15s; +} +.btn-danger:hover { background: rgba(200,90,58,0.1); border-color: var(--error); } + +/* ── Edit panel ──────────────────────────────────────────────────── */ + +.edit-backdrop { display: none; position: fixed; inset: 0; background: rgba(0,0,0,0.6); z-index: 200; } +.edit-backdrop.open { display: block; } +.edit-panel { + position: fixed; top: 0; right: -480px; width: 460px; max-width: 96vw; + height: 100vh; background: var(--surface); border-left: 1px solid var(--border); + padding: 1.75rem 1.5rem; overflow-y: auto; z-index: 201; + transition: right 0.22s ease; display: flex; flex-direction: column; gap: 1.1rem; +} +.edit-panel.open { right: 0; } +.edit-panel-header { display: flex; align-items: center; justify-content: space-between; margin-bottom: 0.25rem; } +.edit-panel-title { font-family: var(--mono); font-size: 0.7rem; letter-spacing: 0.12em; text-transform: uppercase; color: var(--accent); } +.edit-close { background: none; border: none; color: var(--text-dim); cursor: pointer; padding: 0.2rem; line-height: 1; } +.edit-close:hover { color: var(--text); } +.edit-field { display: flex; flex-direction: column; gap: 0.3rem; } +.edit-label { font-family: var(--mono); font-size: 0.65rem; color: var(--text-dim); letter-spacing: 0.06em; text-transform: uppercase; } +.edit-input { + background: var(--bg); border: 1px solid var(--border); border-radius: var(--radius); + color: var(--text); font-family: var(--mono); font-size: 0.82rem; + padding: 0.5rem 0.7rem; outline: none; transition: border-color 0.15s; width: 100%; +} +.edit-input:focus { border-color: var(--accent); } +.edit-textarea { + line-height: 1.5; + resize: vertical; + min-height: 7.5rem; +} +.edit-select { + background: var(--bg); border: 1px solid var(--border); border-radius: var(--radius); + color: var(--text); font-family: var(--mono); font-size: 0.82rem; + padding: 0.5rem 0.7rem; outline: none; width: 100%; + transition: border-color 0.15s; appearance: none; +} +.edit-select:focus { border-color: var(--accent); } +.edit-row { display: grid; grid-template-columns: 1fr 1fr; gap: 0.75rem; } + +/* Genre tag input */ +.genre-box { + background: var(--bg); border: 1px solid var(--border); border-radius: var(--radius); + padding: 0.35rem 0.5rem; display: flex; flex-wrap: wrap; gap: 0.35rem; + align-items: center; cursor: text; transition: border-color 0.15s; min-height: 2.5rem; +} +.genre-box:focus-within { border-color: var(--accent); } +.genre-tag { + display: inline-flex; align-items: center; gap: 0.3rem; + background: var(--surface2); border: 1px solid var(--border); + border-radius: 3px; padding: 0.15rem 0.45rem; + font-family: var(--mono); font-size: 0.7rem; color: var(--text); +} +.genre-tag-x { background: none; border: none; color: var(--text-faint); cursor: pointer; font-size: 0.85rem; line-height: 1; padding: 0; } +.genre-tag-x:hover { color: var(--error); } +.genre-input { background: none; border: none; outline: none; color: var(--text); font-family: var(--mono); font-size: 0.82rem; min-width: 120px; flex: 1; } +.genre-dropdown { + position: absolute; background: var(--surface); border: 1px solid var(--border); + border-radius: var(--radius); max-height: 180px; overflow-y: auto; + z-index: 300; width: 100%; margin-top: 2px; box-shadow: 0 4px 16px rgba(0,0,0,0.4); +} +.genre-option { padding: 0.45rem 0.75rem; font-family: var(--mono); font-size: 0.8rem; color: var(--text-dim); cursor: pointer; } +.genre-option:hover, .genre-option.active { background: var(--surface2); color: var(--text); } +.genre-wrap { position: relative; } + +.edit-footer { display: flex; gap: 0.6rem; justify-content: flex-end; padding-top: 0.5rem; border-top: 1px solid var(--border); margin-top: auto; } + +/* ── Modals ──────────────────────────────────────────────────────── */ + +.modal-backdrop { display: none; position: fixed; inset: 0; background: rgba(0,0,0,0.7); z-index: 100; align-items: center; justify-content: center; } +.modal-backdrop.open { display: flex; } +.modal { background: var(--surface); border: 1px solid var(--border); border-radius: var(--radius); padding: 1.5rem 2rem; max-width: 380px; width: 90%; } +.modal h3 { font-family: var(--mono); font-size: 0.9rem; margin-bottom: 0.75rem; } +.modal p { font-family: var(--mono); font-size: 0.75rem; color: var(--text-dim); margin-bottom: 1.25rem; line-height: 1.6; } +.modal-actions { display: flex; gap: 0.6rem; justify-content: flex-end; } +.modal-field { display: flex; flex-direction: column; gap: 0.3rem; margin-bottom: 1.25rem; } +.modal-label { font-family: var(--mono); font-size: 0.65rem; color: var(--text-dim); letter-spacing: 0.06em; text-transform: uppercase; } +.modal-input { + background: var(--bg); border: 1px solid var(--border); border-radius: var(--radius); + color: var(--text); font-family: var(--mono); font-size: 0.82rem; + padding: 0.5rem 0.7rem; outline: none; transition: border-color 0.15s; width: 100%; color-scheme: dark; +} +.modal-input:focus { border-color: var(--accent); } +.date-row { display: grid; grid-template-columns: 3fr 2fr 2fr; gap: 0.5rem; } +.date-time-row { margin-top: 0.75rem; } +.date-field { display: flex; flex-direction: column; gap: 0.25rem; } +.date-sub-label { font-family: var(--mono); font-size: 0.6rem; color: var(--text-faint); } +.date-input { + background: var(--bg); border: 1px solid var(--border); border-radius: var(--radius); + color: var(--text); font-family: var(--mono); font-size: 0.82rem; + padding: 0.5rem 0.5rem; outline: none; width: 100%; + text-align: center; transition: border-color 0.15s; -moz-appearance: textfield; +} +.date-input::-webkit-inner-spin-button, +.date-input::-webkit-outer-spin-button { -webkit-appearance: none; } +.date-input:focus { border-color: var(--accent); } + +.divider { border: none; border-top: 1px solid var(--border); margin: 1.5rem 0; } +.section-label { font-family: var(--mono); font-size: 0.6rem; letter-spacing: 0.12em; text-transform: uppercase; color: var(--text-dim); margin-bottom: 0.75rem; } +.description-text { font-size: 0.88rem; color: var(--text-dim); line-height: 1.8; } +.description-text p + p { margin-top: 0.75rem; } + +/* ── Responsive ──────────────────────────────────────────────────── */ + +@media (max-width: 768px) { + .main { + margin-left: 0; + padding: 4rem 1rem 4rem; + } + + .book-hero { + flex-direction: column; + align-items: center; + gap: 1.5rem; + } + + .cover-area { + display: flex; + flex-direction: column; + align-items: center; + width: 100%; + } + + .cover-wrap { width: 160px; height: 240px; } + .btn-wtr { width: 160px; } + + .book-info { width: 100%; } + + .book-title { font-size: 1.3rem; } + + .meta-label { min-width: 5.5rem; } + + .action-row { gap: 0.5rem; } + .btn-primary, .btn-secondary, .btn-danger { + padding: 0.55rem 0.9rem; + font-size: 0.75rem; + } + + .edit-panel { width: 100vw; max-width: 100vw; right: -100vw; } +} diff --git a/containers/novela/static/book.js b/containers/novela/static/book.js new file mode 100644 index 0000000..e5efa42 --- /dev/null +++ b/containers/novela/static/book.js @@ -0,0 +1,304 @@ +/* ── Novela — Book detail page script ─────────────────────────────────── */ +/* Requires: BOOK global defined inline before this script is loaded */ + +const { filename, title, author } = BOOK; + +// ── Placeholder cover ────────────────────────────────────────────────────── + +const canvas = document.getElementById('cover-canvas'); +canvas.width = 180; +canvas.height = 270; + +const COVER_PALETTES = [ + ['#1a2a3a','#4a8caa'],['#2a1a1a','#aa4a4a'],['#1a2a1a','#4aaa6a'],['#2a1a2a','#8a4aaa'], + ['#2a2a1a','#aaa04a'],['#1a2a2a','#4aaa9a'],['#2a1a14','#c8783a'],['#141a2a','#5a78c8'], +]; +function strHash(s) { + let h = 0; + for (let i = 0; i < s.length; i++) h = (Math.imul(31, h) + s.charCodeAt(i)) | 0; + return Math.abs(h); +} +function makePlaceholderCover(cv, ttl, auth) { + const w = cv.width || 180, h = cv.height || 270; + const ctx = cv.getContext('2d'); + const [bg, fg] = COVER_PALETTES[strHash(ttl) % COVER_PALETTES.length]; + ctx.fillStyle = bg; ctx.fillRect(0, 0, w, h); + ctx.fillStyle = fg; ctx.globalAlpha = 0.15; ctx.fillRect(0, 0, w, h * 0.08); ctx.globalAlpha = 1; + ctx.fillStyle = fg; ctx.fillRect(w * 0.12, h * 0.12, w * 0.04, h * 0.55); + ctx.fillStyle = '#e8e2d9'; + ctx.font = `bold ${Math.round(w * 0.105)}px 'Libre Baskerville', Georgia, serif`; + ctx.textAlign = 'center'; + const words = ttl.split(' '); let line = '', lines = []; + for (const word of words) { + const test = line ? line + ' ' + word : word; + if (ctx.measureText(test).width > w * 0.72 && line) { lines.push(line); line = word; } + else line = test; + } + if (line) lines.push(line); + lines = lines.slice(0, 4); + const lineH = Math.round(w * 0.12); + const startY = h * 0.28 - ((lines.length - 1) * lineH) / 2; + lines.forEach((l, i) => ctx.fillText(l, w * 0.55, startY + i * lineH)); + ctx.fillStyle = fg; ctx.font = `${Math.round(w * 0.075)}px 'DM Mono', monospace`; + ctx.globalAlpha = 0.85; + const a = auth.length > 18 ? auth.slice(0, 17) + '…' : auth; + ctx.fillText(a, w * 0.55, h * 0.86); + ctx.globalAlpha = 1; +} +requestAnimationFrame(() => makePlaceholderCover(canvas, title, author)); +if (BOOK.has_cover) { + const img = document.getElementById('cover-img'); + if (img && img.complete && img.naturalWidth > 0) canvas.style.display = 'none'; + else if (img) img.onload = () => canvas.style.display = 'none'; +} + +// ── Want to Read toggle ──────────────────────────────────────────────────── + +async function toggleWtr() { + const resp = await fetch(`/library/want-to-read/${encodeURIComponent(filename)}`, { method: 'POST' }); + const result = await resp.json(); + if (result.error) return; + const btn = document.getElementById('wtr-btn'); + const svg = document.getElementById('wtr-svg'); + btn.classList.toggle('active', result.want_to_read); + svg.setAttribute('fill', result.want_to_read ? 'currentColor' : 'none'); +} + +// ── Mark as Read modal ───────────────────────────────────────────────────── + +function openMarkReadModal() { + const now = new Date(); + document.getElementById('read-year').value = now.getFullYear(); + document.getElementById('read-month').value = now.getMonth() + 1; + document.getElementById('read-day').value = now.getDate(); + document.getElementById('read-time').value = ''; + document.getElementById('mark-read-modal').classList.add('open'); + document.getElementById('read-year').focus(); + document.getElementById('read-year').select(); +} + +document.getElementById('read-year').addEventListener('input', function() { + if (this.value.length === 4) { const m = document.getElementById('read-month'); m.focus(); m.select(); } +}); +document.getElementById('read-month').addEventListener('input', function() { + if (this.value.length === 2) { const d = document.getElementById('read-day'); d.focus(); d.select(); } +}); + +async function confirmMarkRead() { + const year = document.getElementById('read-year').value; + const month = String(document.getElementById('read-month').value).padStart(2, '0'); + const day = String(document.getElementById('read-day').value).padStart(2, '0'); + const time = document.getElementById('read-time').value; + const body = (year && month && day) + ? { read_at: `${year}-${month}-${day}T${time || '12:00'}:00` } + : {}; + await fetch(`/library/mark-read/${encodeURIComponent(filename)}`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(body), + }); + window.location.reload(); +} + +// ── Mark as unread ───────────────────────────────────────────────────────── + +async function markUnread() { + const resp = await fetch(`/library/progress/${encodeURIComponent(filename)}`, { method: 'DELETE' }); + if ((await resp.json()).ok) window.location.reload(); +} + +// ── Archive toggle ───────────────────────────────────────────────────────── + +async function toggleArchive() { + const resp = await fetch(`/library/archive/${encodeURIComponent(filename)}`, { method: 'POST' }); + const result = await resp.json(); + if (result.error) return; + const btn = document.getElementById('archive-btn'); + btn.innerHTML = btn.innerHTML.replace( + result.archived ? 'Archive' : 'Unarchive', + result.archived ? 'Unarchive' : 'Archive' + ); +} + +// ── Add cover ────────────────────────────────────────────────────────────── + +async function uploadCover(input) { + const file = input.files[0]; + if (!file) return; + const reader = new FileReader(); + reader.onload = async (e) => { + const b64 = e.target.result.split(',')[1]; + const resp = await fetch(`/library/cover/${encodeURIComponent(filename)}`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ cover_b64: b64 }), + }); + if (resp.ok) window.location.reload(); + else alert('Cover upload failed.'); + }; + reader.readAsDataURL(file); + input.value = ''; +} + +// ── Delete ───────────────────────────────────────────────────────────────── + +document.getElementById('delete-title').textContent = title; +async function confirmDelete() { + const resp = await fetch(`/library/file/${encodeURIComponent(filename)}`, { method: 'DELETE' }); + if (resp.ok) window.location.href = '/library'; + else alert('Delete failed.'); +} + +// ── PillInput — reusable tag pill input with autocomplete ────────────────── + +class PillInput { + constructor(boxId, inputId, dropdownId) { + this.box = document.getElementById(boxId); + this.input = document.getElementById(inputId); + this.dropdown = document.getElementById(dropdownId); + this.values = []; + this.all = []; + this.ddIndex = -1; + this.box.addEventListener('click', () => this.input.focus()); + this.input.addEventListener('input', () => this._onInput()); + this.input.addEventListener('keydown', (e) => this._onKeydown(e)); + this.input.addEventListener('blur', () => setTimeout(() => this._hideDropdown(), 150)); + } + + set(values) { this.values = [...values]; this._render(); } + setSuggestions(all) { this.all = all; } + getValues() { return [...this.values]; } + + _render() { + [...this.box.querySelectorAll('.genre-tag')].forEach(t => t.remove()); + this.values.forEach((v, i) => { + const pill = document.createElement('span'); + pill.className = 'genre-tag'; + pill.innerHTML = `${v} <button class="genre-tag-x" type="button">×</button>`; + pill.querySelector('.genre-tag-x').onclick = () => { this.values.splice(i, 1); this._render(); }; + this.box.insertBefore(pill, this.input); + }); + } + + _add(v) { + v = v.trim(); + if (v && !this.values.includes(v)) { this.values.push(v); this._render(); } + this.input.value = ''; + this._hideDropdown(); + } + + _showDropdown(items) { + if (!items.length) { this.dropdown.style.display = 'none'; return; } + this.dropdown.innerHTML = items.map(g => + `<div class="genre-option" data-val="${g.replace(/"/g,'"')}">${g}</div>` + ).join(''); + this.dropdown.querySelectorAll('.genre-option').forEach(el => { + el.onmousedown = (e) => { e.preventDefault(); this._add(el.dataset.val); }; + }); + this.dropdown.style.display = 'block'; + this.ddIndex = -1; + } + + _hideDropdown() { + this.dropdown.style.display = 'none'; + this.ddIndex = -1; + } + + _onInput() { + const q = this.input.value.trim().toLowerCase(); + if (!q) { this._hideDropdown(); return; } + const matches = this.all.filter(g => g.toLowerCase().includes(q) && !this.values.includes(g)); + this._showDropdown(matches); + } + + _onKeydown(e) { + const opts = this.dropdown.querySelectorAll('.genre-option'); + if (e.key === 'ArrowDown') { + e.preventDefault(); + this.ddIndex = Math.min(this.ddIndex + 1, opts.length - 1); + opts.forEach((o, i) => o.classList.toggle('active', i === this.ddIndex)); + } else if (e.key === 'ArrowUp') { + e.preventDefault(); + this.ddIndex = Math.max(this.ddIndex - 1, -1); + opts.forEach((o, i) => o.classList.toggle('active', i === this.ddIndex)); + } else if (e.key === 'Enter') { + e.preventDefault(); + if (this.ddIndex >= 0 && opts[this.ddIndex]) this._add(opts[this.ddIndex].dataset.val); + else if (this.input.value.trim()) this._add(this.input.value); + } else if (e.key === 'Escape') { + this._hideDropdown(); + } + } +} + +const genreInput = new PillInput('genre-box', 'genre-input', 'genre-dropdown'); +const subgenreInput = new PillInput('subgenre-box', 'subgenre-input', 'subgenre-dropdown'); +const tagInput = new PillInput('tag-box', 'tag-input', 'tag-dropdown'); + +// ── Edit panel ───────────────────────────────────────────────────────────── + +async function openEdit() { + const [allGenres, allSubgenres, allTags] = await Promise.all([ + fetch('/api/genres?type=genre').then(r => r.json()), + fetch('/api/genres?type=subgenre').then(r => r.json()), + fetch('/api/genres?type=tag').then(r => r.json()), + ]); + genreInput.setSuggestions(allGenres); + subgenreInput.setSuggestions(allSubgenres); + tagInput.setSuggestions(allTags); + + document.getElementById('ed-title').value = BOOK.title; + document.getElementById('ed-author').value = BOOK.author; + document.getElementById('ed-publisher').value = BOOK.publisher; + document.getElementById('ed-series').value = BOOK.series; + document.getElementById('ed-series-index').value = BOOK.series_index; + document.getElementById('ed-status').value = BOOK.publication_status; + document.getElementById('ed-url').value = BOOK.source_url; + document.getElementById('ed-publish-date').value = BOOK.publish_date; + document.getElementById('ed-description').value = BOOK.description; + + genreInput.set(BOOK.genres); + subgenreInput.set(BOOK.subgenres); + tagInput.set(BOOK.tags); + + document.getElementById('edit-backdrop').classList.add('open'); + document.getElementById('edit-panel').classList.add('open'); +} + +function closeEdit() { + document.getElementById('edit-backdrop').classList.remove('open'); + document.getElementById('edit-panel').classList.remove('open'); + genreInput._hideDropdown(); + subgenreInput._hideDropdown(); + tagInput._hideDropdown(); +} + +async function saveEdit() { + const body = { + title: document.getElementById('ed-title').value, + author: document.getElementById('ed-author').value, + publisher: document.getElementById('ed-publisher').value, + series: document.getElementById('ed-series').value, + series_index: document.getElementById('ed-series-index').value, + publication_status: document.getElementById('ed-status').value, + source_url: document.getElementById('ed-url').value, + publish_date: document.getElementById('ed-publish-date').value, + description: document.getElementById('ed-description').value, + genres: genreInput.getValues(), + subgenres: subgenreInput.getValues(), + tags: tagInput.getValues(), + }; + const resp = await fetch(`/library/book/${encodeURIComponent(filename)}`, { + method: 'PATCH', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(body), + }); + const result = await resp.json(); + if (resp.ok && result.filename) { + window.location.href = `/library/book/${encodeURIComponent(result.filename)}`; + } else if (resp.ok) { + window.location.reload(); + } else { + alert(result.error || 'Save failed.'); + } +} diff --git a/containers/novela/static/break.png b/containers/novela/static/break.png new file mode 100644 index 0000000..f2c3823 Binary files /dev/null and b/containers/novela/static/break.png differ diff --git a/containers/novela/static/editor.css b/containers/novela/static/editor.css new file mode 100644 index 0000000..925d585 --- /dev/null +++ b/containers/novela/static/editor.css @@ -0,0 +1,206 @@ +:root { + --bg: #0f0e0c; --surface: #1a1815; --surface2: #221f1b; + --border: #2e2a24; --accent: #c8783a; --text: #e8e2d9; + --text-dim: #8a8278; --text-faint: #4a453e; + --success: #6baa6b; --danger: #c85a5a; + --radius: 6px; + --mono: 'DM Mono', monospace; + --header-h: 50px; + --panel-w: 240px; +} + +*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; } +html, body { height: 100%; background: var(--bg); color: var(--text); font-family: var(--mono); overflow: hidden; } + +/* ── Header ── */ +.editor-header { + position: fixed; top: 0; left: 0; right: 0; + height: var(--header-h); + background: var(--surface); + border-bottom: 1px solid var(--border); + display: flex; align-items: center; + padding: 0 1rem; gap: 0.75rem; + z-index: 100; +} + +.header-back { + font-size: 0.72rem; color: var(--text-dim); text-decoration: none; + display: flex; align-items: center; gap: 0.35rem; + flex-shrink: 0; transition: color 0.12s; white-space: nowrap; +} +.header-back:hover { color: var(--text); } + +.header-chapter { + flex: 1; font-size: 0.72rem; color: var(--text-faint); + text-align: center; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; +} + +.header-actions { display: flex; align-items: center; gap: 0.5rem; flex-shrink: 0; } + +.save-status { + font-size: 0.65rem; color: var(--text-faint); + min-width: 5rem; text-align: right; +} +.save-status.dirty { color: var(--accent); } +.save-status.saving { color: var(--text-faint); } +.save-status.saved { color: var(--success); } +.save-status.error { color: var(--danger); } + +.btn-save { + padding: 0.3rem 0.9rem; + background: var(--accent); border: none; border-radius: var(--radius); + font-family: var(--mono); font-size: 0.72rem; color: #fff; + cursor: pointer; transition: opacity 0.12s; +} +.btn-save:disabled { opacity: 0.3; cursor: not-allowed; } +.btn-save:not(:disabled):hover { opacity: 0.85; } + +.btn-save-all { + display: flex; align-items: center; + padding: 0.3rem 0.9rem; + background: none; border: 1px solid var(--accent); border-radius: var(--radius); + font-family: var(--mono); font-size: 0.72rem; color: var(--accent); + cursor: pointer; transition: background 0.12s; +} +.btn-save-all:hover { background: rgba(200,120,58,0.12); } + +.btn-break { + display: flex; align-items: center; gap: 0.35rem; + padding: 0.3rem 0.7rem; + background: none; border: 1px solid var(--border); border-radius: var(--radius); + font-family: var(--mono); font-size: 0.68rem; + color: var(--text-dim); cursor: pointer; + transition: color 0.12s, border-color 0.12s; +} +.btn-break:disabled { opacity: 0.3; cursor: not-allowed; } +.btn-break:not(:disabled):hover { color: var(--text); border-color: var(--text-faint); } + +.btn-replace { + display: flex; align-items: center; gap: 0.35rem; + padding: 0.3rem 0.7rem; + background: none; border: 1px solid var(--border); border-radius: var(--radius); + font-family: var(--mono); font-size: 0.68rem; + color: var(--text-dim); cursor: pointer; + transition: color 0.12s, border-color 0.12s; +} +.btn-replace:hover { color: var(--text); border-color: var(--text-faint); } + + +.btn-add-page, +.btn-del-page { + display: flex; align-items: center; gap: 0.35rem; + padding: 0.3rem 0.7rem; + background: none; border: 1px solid var(--border); border-radius: var(--radius); + font-family: var(--mono); font-size: 0.68rem; + color: var(--text-dim); cursor: pointer; + transition: color 0.12s, border-color 0.12s; +} +.btn-add-page:hover { color: var(--text); border-color: var(--text-faint); } +.btn-del-page { color: var(--danger); border-color: rgba(200,90,90,0.35); } +.btn-del-page:disabled, +.btn-add-page:disabled { opacity: 0.3; cursor: not-allowed; } +.btn-del-page:not(:disabled):hover { background: rgba(200,90,90,0.1); border-color: var(--danger); } + +/* ── Find & Replace modal ── */ +.modal-backdrop { + display: none; + position: fixed; inset: 0; + background: rgba(0,0,0,0.55); + z-index: 200; + align-items: center; justify-content: center; +} +.modal-backdrop.open { display: flex; } +.modal { + background: var(--surface); + border: 1px solid var(--border); + border-radius: var(--radius); + padding: 1.5rem; + width: 420px; max-width: 90vw; + display: flex; flex-direction: column; gap: 1rem; +} +.modal-title { + font-size: 0.78rem; font-weight: 500; color: var(--text); + letter-spacing: 0.04em; +} +.modal-field { display: flex; flex-direction: column; gap: 0.35rem; } +.modal-label { font-size: 0.68rem; color: var(--text-dim); } +.modal-input { + background: var(--surface2); border: 1px solid var(--border); + border-radius: var(--radius); padding: 0.4rem 0.6rem; + font-family: var(--mono); font-size: 0.78rem; color: var(--text); + outline: none; width: 100%; +} +.modal-input:focus { border-color: var(--accent); } +.modal-options { display: flex; gap: 1.5rem; } +.modal-opt { + display: flex; align-items: center; gap: 0.4rem; + font-size: 0.72rem; color: var(--text-dim); cursor: pointer; +} +.modal-opt input { accent-color: var(--accent); cursor: pointer; } +.modal-progress { + font-size: 0.72rem; color: var(--text-faint); + min-height: 1.2rem; +} +.modal-progress.ok { color: var(--success); } +.modal-progress.error { color: var(--danger); } +.modal-actions { display: flex; justify-content: flex-end; gap: 0.6rem; } + +.btn-primary { + padding: 0.35rem 1rem; + background: var(--accent); border: none; border-radius: var(--radius); + font-family: var(--mono); font-size: 0.72rem; color: #fff; + cursor: pointer; transition: opacity 0.12s; +} +.btn-primary:disabled { opacity: 0.3; cursor: not-allowed; } +.btn-primary:not(:disabled):hover { opacity: 0.85; } +.btn-secondary { + padding: 0.35rem 0.9rem; + background: none; border: 1px solid var(--border); border-radius: var(--radius); + font-family: var(--mono); font-size: 0.72rem; color: var(--text-dim); + cursor: pointer; transition: color 0.12s, border-color 0.12s; +} +.btn-secondary:hover { color: var(--text); border-color: var(--text-faint); } + +/* ── Two-panel layout ── */ +.editor-body { + position: fixed; + top: var(--header-h); left: 0; right: 0; bottom: 0; + display: flex; +} + +/* ── Chapter panel ── */ +.chapter-panel { + width: var(--panel-w); flex-shrink: 0; + background: var(--surface); + border-right: 1px solid var(--border); + display: flex; flex-direction: column; + overflow: hidden; +} + +.chapter-panel-title { + padding: 0.75rem 1rem 0.55rem; + font-size: 0.65rem; letter-spacing: 0.1em; text-transform: uppercase; + color: var(--accent); flex-shrink: 0; + border-bottom: 1px solid var(--border); +} + +.chapter-list { flex: 1; overflow-y: auto; } + +.chapter-item { + padding: 0.55rem 0.75rem 0.55rem 1rem; + font-size: 0.72rem; color: var(--text-dim); + cursor: pointer; border-bottom: 1px solid var(--border); + transition: background 0.1s, color 0.1s; + display: flex; align-items: center; gap: 0.4rem; + overflow: hidden; +} +.chapter-item:hover { background: var(--surface2); color: var(--text); } +.chapter-item.active { background: var(--surface2); color: var(--text); } +.chapter-item .dirty-dot { + width: 6px; height: 6px; border-radius: 50%; + background: var(--accent); flex-shrink: 0; +} +.chapter-item-title { overflow: hidden; text-overflow: ellipsis; white-space: nowrap; } + +/* ── Monaco container ── */ +.editor-pane { flex: 1; overflow: hidden; } diff --git a/containers/novela/static/editor.js b/containers/novela/static/editor.js new file mode 100644 index 0000000..fd4e28c --- /dev/null +++ b/containers/novela/static/editor.js @@ -0,0 +1,430 @@ +require.config({ paths: { vs: 'https://cdn.jsdelivr.net/npm/monaco-editor@0.45.0/min/vs' } }); + +const { filename } = EDITOR; + +let editor = null; +let chapters = []; +let currentIndex = -1; +let dirty = new Set(); // indices with unsaved changes +let pendingContent = new Map(); // index -> modified content not yet saved +let loadingChapter = false; // suppress dirty events during setValue +let saving = false; + +// ── Init Monaco ─────────────────────────────────────────────────────────────── + +require(['vs/editor/editor.main'], function () { + editor = monaco.editor.create(document.getElementById('editor-pane'), { + language: 'xml', + theme: 'vs-dark', + wordWrap: 'on', + minimap: { enabled: true }, + fontSize: 13, + fontFamily: "'DM Mono', monospace", + lineNumbers: 'on', + scrollBeyondLastLine: false, + automaticLayout: true, + }); + + editor.onDidChangeModelContent(() => { + if (loadingChapter) return; + if (currentIndex >= 0) { + dirty.add(currentIndex); + renderChapterList(); + setStatus('dirty', 'Unsaved changes'); + document.getElementById('btn-save').disabled = false; + updateSaveAll(); + } + }); + + // Ctrl+S / Cmd+S + editor.addCommand(monaco.KeyMod.CtrlCmd | monaco.KeyCode.KeyS, saveChapter); + + loadChapterList(); +}); + +// ── Chapter list ────────────────────────────────────────────────────────────── + +async function loadChapterList(targetIndex = 0) { + const resp = await fetch(`/library/chapters/${encodeURIComponent(filename)}`); + if (!resp.ok) { + setStatus('error', 'Failed to load chapters'); + return; + } + chapters = await resp.json(); + if (!Array.isArray(chapters)) chapters = []; + + if (chapters.length === 0) { + currentIndex = -1; + dirty.clear(); + pendingContent.clear(); + renderChapterList(); + document.getElementById('header-chapter').textContent = 'No chapters'; + document.getElementById('btn-save').disabled = true; + document.getElementById('btn-break').disabled = true; + document.getElementById('btn-del-page').disabled = true; + if (editor) { loadingChapter = true; editor.setValue(''); loadingChapter = false; } + updateSaveAll(); + return; + } + + const next = Math.min(Math.max(targetIndex, 0), chapters.length - 1); + renderChapterList(); + await loadChapter(next); +} + +function renderChapterList() { + const el = document.getElementById('chapter-list'); + el.innerHTML = ''; + chapters.forEach((ch, i) => { + const item = document.createElement('div'); + item.className = 'chapter-item' + (i === currentIndex ? ' active' : ''); + item.innerHTML = + (dirty.has(i) ? '<span class="dirty-dot"></span>' : '') + + `<span class="chapter-item-title">${esc(ch.title)}</span>`; + item.onclick = () => switchChapter(i); + el.appendChild(item); + }); +} + +// ── Load / switch ───────────────────────────────────────────────────────────── + +async function switchChapter(index) { + if (index === currentIndex) return; + // Preserve current editor content in pending cache before switching (never lose changes) + if (dirty.has(currentIndex) && editor) { + pendingContent.set(currentIndex, editor.getValue()); + } + loadChapter(index); +} + +async function loadChapter(index) { + setStatus('', ''); + document.getElementById('btn-save').disabled = true; + document.getElementById('btn-break').disabled = true; + document.getElementById('btn-del-page').disabled = true; + document.getElementById('header-chapter').textContent = 'Loading…'; + + let content, title; + + if (pendingContent.has(index)) { + content = pendingContent.get(index); + title = chapters[index]?.title ?? ''; + } else { + const resp = await fetch(`/api/edit/chapter/${index}/${encodeURIComponent(filename)}`); + if (!resp.ok) { setStatus('error', 'Load failed'); return; } + const data = await resp.json(); + content = data.content; + title = data.title; + } + + currentIndex = index; + + loadingChapter = true; + editor.setValue(content); + editor.setScrollTop(0); + loadingChapter = false; + + // Restore dirty state based on whether we loaded from pending cache + if (dirty.has(index)) { + document.getElementById('btn-save').disabled = false; + setStatus('dirty', 'Unsaved changes'); + } else { + document.getElementById('btn-save').disabled = true; + setStatus('', ''); + } + + renderChapterList(); + document.getElementById('header-chapter').textContent = title; + document.getElementById('btn-break').disabled = false; + document.getElementById('btn-del-page').disabled = chapters.length <= 1; + updateSaveAll(); +} + +// ── Save (current chapter) ──────────────────────────────────────────────────── + +async function saveChapter() { + if (currentIndex < 0 || saving) return; + saving = true; + document.getElementById('btn-save').disabled = true; + setStatus('saving', 'Saving…'); + + try { + const resp = await fetch( + `/api/edit/chapter/${currentIndex}/${encodeURIComponent(filename)}`, + { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ content: editor.getValue() }), + } + ); + const data = await resp.json(); + if (data.ok) { + dirty.delete(currentIndex); + pendingContent.delete(currentIndex); + renderChapterList(); + setStatus('saved', 'Saved'); + setTimeout(() => setStatus('', ''), 2000); + updateSaveAll(); + } else { + setStatus('error', data.error || 'Save failed'); + document.getElementById('btn-save').disabled = false; + } + } catch { + setStatus('error', 'Save failed'); + document.getElementById('btn-save').disabled = false; + } finally { + saving = false; + } +} + +// ── Save all pending ────────────────────────────────────────────────────────── + +async function saveAllChapters() { + if (saving) return; + saving = true; + const btn = document.getElementById('btn-save-all'); + if (btn) btn.disabled = true; + setStatus('saving', 'Saving all…'); + + // Flush current editor content into pendingContent first + if (currentIndex >= 0 && dirty.has(currentIndex)) { + pendingContent.set(currentIndex, editor.getValue()); + } + + const indices = [...dirty]; + for (const i of indices) { + const content = pendingContent.has(i) + ? pendingContent.get(i) + : (i === currentIndex ? editor.getValue() : null); + if (!content) continue; + + try { + const resp = await fetch( + `/api/edit/chapter/${i}/${encodeURIComponent(filename)}`, + { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ content }), + } + ); + const data = await resp.json(); + if (data.ok) { + dirty.delete(i); + pendingContent.delete(i); + } + } catch { + setStatus('error', `Save failed on chapter ${i + 1}`); + saving = false; + updateSaveAll(); + return; + } + } + + // Reload current chapter display to reflect saved state + if (currentIndex >= 0) { + loadingChapter = true; + editor.setValue(editor.getValue()); // no-op, just clears dirty for display + loadingChapter = false; + document.getElementById('btn-save').disabled = true; + } + + renderChapterList(); + setStatus('saved', 'All saved'); + setTimeout(() => setStatus('', ''), 2000); + saving = false; + updateSaveAll(); +} + +function updateSaveAll() { + const btn = document.getElementById('btn-save-all'); + if (!btn) return; + const count = dirty.size; + if (count > 1) { + btn.style.display = 'flex'; + btn.textContent = `Save all (${count})`; + } else { + btn.style.display = 'none'; + } +} + +// ── Insert break ────────────────────────────────────────────────────────────── + +function insertBreak() { + if (!editor || currentIndex < 0) return; + const pos = editor.getPosition(); + editor.executeEdits('insert-break', [{ + range: new monaco.Range(pos.lineNumber, pos.column, pos.lineNumber, pos.column), + text: '\n<center><img src="../Images/break.png" style="height:15px;"/></center>\n', + forceMoveMarkers: true, + }]); + editor.focus(); +} + + + +// ── Add / delete chapter ───────────────────────────────────────────────────── + +async function addChapter() { + if (saving) return; + if (dirty.size > 0) { + alert('Save pending changes before adding a page.'); + return; + } + const title = prompt('Title for new page:', `New chapter ${Math.max(chapters.length + 1, 1)}`); + if (title === null) return; + + const resp = await fetch(`/api/edit/chapter/add/${encodeURIComponent(filename)}`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ title, after_index: currentIndex }), + }); + const data = await resp.json(); + if (!resp.ok || !data.ok) { + setStatus('error', data.error || 'Add page failed'); + return; + } + + dirty.clear(); + pendingContent.clear(); + await loadChapterList(data.index ?? Math.max(currentIndex + 1, 0)); + setStatus('saved', 'Page added'); + setTimeout(() => setStatus('', ''), 1500); +} + +async function deleteChapter() { + if (saving || currentIndex < 0) return; + if (chapters.length <= 1) { + alert('Cannot delete the last page.'); + return; + } + if (dirty.size > 0) { + alert('Save pending changes before deleting a page.'); + return; + } + const chTitle = chapters[currentIndex]?.title || `chapter ${currentIndex + 1}`; + if (!confirm(`Delete page "${chTitle}"?`)) return; + + const resp = await fetch(`/api/edit/chapter/${currentIndex}/${encodeURIComponent(filename)}`, { + method: 'DELETE', + }); + const data = await resp.json(); + if (!resp.ok || !data.ok) { + setStatus('error', data.error || 'Delete page failed'); + return; + } + + dirty.clear(); + pendingContent.clear(); + await loadChapterList(data.index ?? Math.max(currentIndex - 1, 0)); + setStatus('saved', 'Page deleted'); + setTimeout(() => setStatus('', ''), 1500); +} + +// ── Find & Replace all chapters ─────────────────────────────────────────────── + +function openReplaceModal() { + document.getElementById('replace-modal').classList.add('open'); + document.getElementById('rp-search').focus(); + document.getElementById('rp-progress').textContent = ''; + document.getElementById('rp-progress').className = 'modal-progress'; + document.getElementById('rp-run').disabled = false; +} + +function closeReplaceModal() { + document.getElementById('replace-modal').classList.remove('open'); +} + +document.addEventListener('keydown', e => { + if (e.key === 'Escape') closeReplaceModal(); +}); + +async function replaceInAllChapters() { + const searchVal = document.getElementById('rp-search').value; + if (!searchVal) return; + const replaceVal = document.getElementById('rp-replace').value; + const useRegex = document.getElementById('rp-regex').checked; + const caseSens = document.getElementById('rp-case').checked; + + const runBtn = document.getElementById('rp-run'); + const prog = document.getElementById('rp-progress'); + runBtn.disabled = true; + + let pattern; + try { + pattern = useRegex + ? new RegExp(searchVal, caseSens ? 'g' : 'gi') + : new RegExp(searchVal.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'), caseSens ? 'g' : 'gi'); + } catch (e) { + prog.className = 'modal-progress error'; + prog.textContent = 'Invalid regex: ' + e.message; + runBtn.disabled = false; + return; + } + + let totalOccurrences = 0; + let chaptersChanged = 0; + + // Flush current editor content into pending before we start + if (currentIndex >= 0) { + pendingContent.set(currentIndex, editor.getValue()); + } + + for (let i = 0; i < chapters.length; i++) { + prog.className = 'modal-progress'; + prog.textContent = `Checking chapter ${i + 1} / ${chapters.length}…`; + + let original; + if (pendingContent.has(i)) { + original = pendingContent.get(i); + } else { + try { + const resp = await fetch(`/api/edit/chapter/${i}/${encodeURIComponent(filename)}`); + if (!resp.ok) continue; + const data = await resp.json(); + original = data.content; + } catch { + continue; + } + } + + // Count occurrences + let count = 0; + const updated = original.replace(pattern, m => { count++; return replaceVal; }); + if (count === 0) continue; + + totalOccurrences += count; + chaptersChanged++; + pendingContent.set(i, updated); + dirty.add(i); + } + + // Reload current chapter from pending cache if it was changed + if (dirty.has(currentIndex) && pendingContent.has(currentIndex)) { + loadingChapter = true; + editor.setValue(pendingContent.get(currentIndex)); + loadingChapter = false; + document.getElementById('btn-save').disabled = false; + setStatus('dirty', 'Unsaved changes'); + } + + renderChapterList(); + updateSaveAll(); + + prog.className = totalOccurrences > 0 ? 'modal-progress ok' : 'modal-progress'; + prog.textContent = totalOccurrences > 0 + ? `${totalOccurrences} replacement${totalOccurrences !== 1 ? 's' : ''} in ${chaptersChanged} chapter${chaptersChanged !== 1 ? 's' : ''} — not saved yet.` + : 'No matches found.'; + runBtn.disabled = false; +} + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +function setStatus(cls, text) { + const el = document.getElementById('save-status'); + el.className = 'save-status' + (cls ? ' ' + cls : ''); + el.textContent = text; +} + +function esc(s) { + return String(s ?? '').replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>'); +} diff --git a/containers/novela/static/epub-style.css b/containers/novela/static/epub-style.css new file mode 100644 index 0000000..69dc2d2 --- /dev/null +++ b/containers/novela/static/epub-style.css @@ -0,0 +1,254 @@ +/* This defines styles and classes used in the book */ +@page { + margin: 10px; +} +html, body, div, span, applet, object, iframe, h1, h2, h3, h4, h5, h6, p, +blockquote, pre, a, abbr, acronym, address, big, cite, code, del, dfn, em, img, +ins, kbd, q, s, samp, small, strike, strong, sub, sup, tt, var, b, u, i, center, +fieldset, form, label, legend, table, caption, tbody, tfoot, thead, tr, th, td, +article, aside, canvas, details, embed, figure, figcaption, footer, header, +hgroup, menu, nav, output, ruby, section, summary, time, mark, audio, video, ol, +ul, li, dl, dt, dd { + margin: 0; + padding: 0; + border: 0; + font-size: 100%; + vertical-align: baseline; +} +html { + line-height: 1.2; + font-family: Georgia, serif; + color: #1a1a1a; +} +p { + text-indent: 0; + margin: 1em 0; + widows: 2; + orphans: 2; +} +a, a:visited { + color: #1a1a1a; +} +img { + max-width: 100%; +} +sup { + vertical-align: super; + font-size: smaller; +} +sub { + vertical-align: sub; + font-size: smaller; +} +h1 { + margin: 3em 0 0 0; + font-size: 2em; + page-break-before: always; + line-height: 150%; +} +h2 { + margin: 1.5em 0 0 0; + font-size: 1.5em; + line-height: 135%; +} +h3 { + margin: 1.3em 0 0 0; + font-size: 1.3em; +} +h4 { + margin: 1.2em 0 0 0; + font-size: 1.2em; +} +h5 { + margin: 1.1em 0 0 0; + font-size: 1.1em; +} +h6 { + font-size: 1em; +} +h1, h2, h3, h4, h5, h6 { + text-indent: 0; + text-align: left; + font-weight: bold; + page-break-after: avoid; + page-break-inside: avoid; +} + +ol, ul { + margin: 1em 0 0 1.7em; +} +li > ol, li > ul { + margin-top: 0; +} +blockquote { + margin: 1em 0 1em 1.7em; +} +code { + font-family: Menlo, Monaco, 'Lucida Console', Consolas, monospace; + font-size: 85%; + margin: 0; + hyphens: manual; +} +pre { + margin: 1em 0; + overflow: auto; +} +pre code { + padding: 0; + overflow: visible; + overflow-wrap: normal; +} +.sourceCode { + background-color: transparent; + overflow: visible; +} +hr { + background-color: #1a1a1a; + border: none; + height: 1px; + margin: 1em 0; +} +table { + margin: 1em 0; + border-collapse: collapse; + width: 100%; + overflow-x: auto; + display: block; +} +table caption { + margin-bottom: 0.75em; +} +tbody { + margin-top: 0.5em; + border-top: 1px solid #1a1a1a; + border-bottom: 1px solid #1a1a1a; +} +th, td { + padding: 0.25em 0.5em 0.25em 0.5em; +} +th { + border-top: 1px solid #1a1a1a; +} +header { + margin-bottom: 4em; + text-align: center; +} +#TOC li { + list-style: none; +} +#TOC ul { + padding-left: 1.3em; +} +#TOC > ul { + padding-left: 0; +} +#TOC a:not(:hover) { + text-decoration: none; +} +code { + white-space: pre-wrap; +} +span.smallcaps { + font-variant: small-caps; +} + +/* This is the most compatible CSS, but it only allows two columns: */ +div.column { + display: inline-block; + vertical-align: top; + width: 50%; +} + +div.hanging-indent { + margin-left: 1.5em; + text-indent: -1.5em; +} +ul.task-list { + list-style: none; +} +ul.task-list li input[type="checkbox"] { + width: 0.8em; + margin: 0 0.8em 0.2em -1.6em; + vertical-align: middle; +} +.display.math { + display: block; + text-align: center; + margin: 0.5rem auto; +} + +/* For title, author, and date on the cover page */ +h1.title { } +p.author { } +p.date { } + +nav#toc ol, nav#landmarks ol { + padding: 0; + margin-left: 1em; +} +nav#toc ol li, nav#landmarks ol li { + list-style-type: none; + margin: 0; + padding: 0; +} +a.footnote-ref { + vertical-align: super; +} +em, em em em, em em em em em { + font-style: italic; +} +em em, em em em em { + font-style: normal; +} +q { + quotes: "“" "”" "‘" "’"; +} +@media screen { + .sourceCode { + overflow: visible !important; + white-space: pre-wrap !important; + } +} + +/* ================================================= */ +/* Custom colors for subheadings and chat (Kavita) */ +/* <span class="subheading">Tussentitel</span> */ +/* <span class="chat">“Dit is een chatregel.”</span> */ +/* ================================================= */ + +/* bestaande regels */ +span.subheading { + color: rgb(224, 62, 45) !important; + font-weight: bold !important; +} + +span.chat { + color: rgb(230, 126, 35) !important; +} + +/* nieuwe regels voor vet binnen je spans */ +span.subheading strong, +span.subheading b { + color: rgb(224, 62, 45) !important; +} + +span.chat strong, +span.chat b { + color: rgb(230, 126, 35) !important; +} + +/* eventueel ook voor dark mode */ +@media (prefers-color-scheme: dark) { + span.subheading, + span.subheading strong, + span.subheading b { + color: rgb(241, 90, 76) !important; + } + + span.chat, + span.chat strong, + span.chat b { + color: rgb(243, 156, 18) !important; + } +} + diff --git a/containers/novela/static/library.css b/containers/novela/static/library.css new file mode 100644 index 0000000..7dd7a60 --- /dev/null +++ b/containers/novela/static/library.css @@ -0,0 +1,456 @@ +/* ── Novela — Library page styles ─────────────────────────────────────── */ + +:root { + --bg: #0f0e0c; + --surface: #1a1815; + --surface2: #221f1b; + --border: #2e2a24; + --accent: #c8783a; + --accent2: #e8a063; + --text: #e8e2d9; + --text-dim: #8a8278; + --text-faint: #4a453e; + --success: #6baa6b; + --warning: #c8a03a; + --error: #c85a3a; + --radius: 6px; + --sidebar: 220px; + --mono: 'DM Mono', monospace; + --serif: 'Libre Baskerville', Georgia, serif; +} + +*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; } + +html, body { + height: 100%; + background: var(--bg); + color: var(--text); + font-family: var(--serif); +} + +/* ── Main content ───────────────────────────────────────────────── */ + +.main { + margin-left: var(--sidebar); + min-height: 100vh; + padding: 2rem 2.5rem 4rem; +} + +.main-header { + display: flex; + align-items: center; + justify-content: space-between; + margin-bottom: 1.75rem; +} + +.main-title { + font-family: var(--mono); + font-size: 0.7rem; + letter-spacing: 0.12em; + text-transform: uppercase; + color: var(--accent); +} + +.empty { + text-align: center; + color: var(--text-faint); + font-family: var(--mono); + font-size: 0.82rem; + padding: 4rem 2rem; +} + +.import-dropzone { + border: 1px dashed var(--border); + background: rgba(34, 31, 27, 0.45); + border-radius: var(--radius); + padding: 0.9rem 1rem; + margin-bottom: 1.1rem; + cursor: pointer; + transition: border-color 0.15s, background 0.15s; +} +.import-dropzone:hover { border-color: var(--accent); } +.import-dropzone.dragover { + border-color: var(--accent2); + background: rgba(200, 120, 58, 0.12); +} +.import-dropzone.uploading { + opacity: 0.8; + cursor: progress; +} +.import-title { + font-family: var(--mono); + font-size: 0.72rem; + text-transform: uppercase; + letter-spacing: 0.08em; + color: var(--accent2); +} +.import-sub { + margin-top: 0.25rem; + font-family: var(--mono); + font-size: 0.68rem; + color: var(--text-dim); +} + +/* ── Cover grid ─────────────────────────────────────────────────── */ + +.cover-grid { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(150px, 1fr)); + gap: 1.5rem; +} + +.book-card { + display: flex; + flex-direction: column; + cursor: pointer; +} + +.cover-wrap { + position: relative; + width: 100%; + aspect-ratio: 2 / 3; + border-radius: var(--radius); + overflow: hidden; + background: var(--surface2); +} + +.cover-img { + width: 100%; + height: 100%; + object-fit: cover; + display: block; +} + +.cover-canvas { + width: 100%; + height: 100%; + display: block; +} + +/* Badge: status top-right */ +.badge-status { + position: absolute; + top: 0.4rem; + right: 0.4rem; + width: 22px; + height: 22px; + border-radius: 50%; + display: flex; + align-items: center; + justify-content: center; + z-index: 2; +} +.badge-complete { background: rgba(107,170,107,0.18); color: var(--success); } +.badge-ongoing { background: rgba(200,160,58,0.18); color: var(--warning); } +.badge-hiatus { background: rgba(200,160,58,0.18); color: var(--warning); } + +/* Star: want-to-read top-left */ +.btn-star { + position: absolute; + top: 0.35rem; + left: 0.35rem; + width: 22px; + height: 22px; + border: none; + background: rgba(15,14,12,0.6); + border-radius: 50%; + display: flex; + align-items: center; + justify-content: center; + cursor: pointer; + color: var(--text-faint); + transition: color 0.15s, background 0.15s; + padding: 0; + z-index: 2; +} +.btn-star:hover { color: var(--warning); background: rgba(15,14,12,0.8); } +.btn-star.starred { color: var(--warning); } + +/* Book info below cover */ +.book-info { padding: 0.5rem 0.2rem 0; } +.book-title { + font-size: 0.78rem; + font-weight: 700; + color: var(--text); + line-height: 1.3; + display: -webkit-box; + -webkit-line-clamp: 2; + -webkit-box-orient: vertical; + overflow: hidden; +} +.book-author { + font-family: var(--mono); + font-size: 0.65rem; + color: var(--text-dim); + margin-top: 0.2rem; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} +.book-series { + font-family: var(--mono); + font-size: 0.6rem; + color: var(--text-dim); + margin-top: 0.15rem; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} +.series-index { color: var(--accent2); } + +/* Read count pill */ +.read-pill { + position: absolute; bottom: 0.35rem; right: 0.35rem; + background: rgba(200,120,58,0.88); color: #0f0e0c; + font-family: var(--mono); font-size: 0.6rem; font-weight: 500; + padding: 0.1rem 0.38rem; border-radius: 3px; z-index: 2; pointer-events: none; +} + +/* Reading progress mini bar at bottom of cover */ +.progress-mini { + position: absolute; bottom: 0; left: 0; right: 0; + height: 3px; z-index: 2; pointer-events: none; + background: rgba(200,120,58,0.25); +} +.progress-mini-fill { height: 100%; background: var(--accent); } + +/* ── Dialogs ─────────────────────────────────────────────────────── */ + +.overlay { + display: none; + position: fixed; + inset: 0; + background: rgba(0,0,0,0.65); + backdrop-filter: blur(2px); + align-items: center; + justify-content: center; + z-index: 100; +} +.overlay.visible { display: flex; } + +.dialog { + background: var(--surface); + border: 1px solid var(--border); + border-radius: var(--radius); + padding: 2rem; + max-width: 420px; + width: 90%; +} +.dialog-title { + font-family: var(--mono); + font-size: 0.7rem; + text-transform: uppercase; + letter-spacing: 0.1em; + margin-bottom: 0.75rem; +} +.dialog-title.del { color: var(--error); } +.dialog-title.cover { color: var(--warning); } +.dialog p { font-size: 0.88rem; color: var(--text-dim); margin-bottom: 1.25rem; } +.dialog p strong { color: var(--text); } +.dialog-actions { display: flex; gap: 0.75rem; justify-content: flex-end; } + +.btn { + display: inline-flex; + align-items: center; + gap: 0.35rem; + padding: 0.35rem 0.75rem; + border-radius: var(--radius); + font-family: var(--mono); + font-size: 0.75rem; + font-weight: 500; + cursor: pointer; + border: none; + transition: background 0.15s, color 0.15s; +} +.btn-cancel { background: var(--surface2); color: var(--text-dim); border: 1px solid var(--border); } +.btn-cancel:hover { color: var(--text); } +.btn-confirm-del { background: rgba(200,90,58,0.15); color: var(--error); border: 1px solid rgba(200,90,58,0.3); } +.btn-confirm-del:hover { background: rgba(200,90,58,0.28); } +.btn-confirm-cover { background: rgba(200,160,58,0.15); color: var(--warning); border: 1px solid rgba(200,160,58,0.3); } +.btn-confirm-cover:hover { background: rgba(200,160,58,0.28); } +.btn-confirm-cover:disabled { opacity: 0.4; cursor: not-allowed; } + +.cover-upload-area { + border: 1px dashed var(--border); + border-radius: var(--radius); + padding: 1rem; + text-align: center; + margin-bottom: 1rem; + cursor: pointer; + transition: border-color 0.15s; + position: relative; +} +.cover-upload-area:hover { border-color: var(--warning); } +.cover-upload-area input[type="file"] { position: absolute; inset: 0; opacity: 0; cursor: pointer; width: 100%; } +.cover-upload-label { font-family: var(--mono); font-size: 0.78rem; color: var(--text-dim); pointer-events: none; } +.cover-upload-label span { color: var(--warning); } +.cover-preview { + display: none; max-height: 160px; max-width: 110px; + border-radius: var(--radius); margin: 0 auto 0.5rem; object-fit: contain; +} +.cover-preview.visible { display: block; } + +.spinner-inline { + display: none; width: 12px; height: 12px; + border: 2px solid var(--text-faint); border-top-color: var(--accent); + border-radius: 50%; animation: spin 0.7s linear infinite; +} +@keyframes spin { to { transform: rotate(360deg); } } + +/* ── Series view ─────────────────────────────────────────────────────────── */ + +.series-card { display: flex; flex-direction: column; cursor: pointer; } + +.series-cover-wrap { + position: relative; + width: calc(100% - 12px); + aspect-ratio: 2/3; + overflow: visible; + margin-left: 2px; + margin-top: 10px; +} + +.sci { + position: absolute; inset: 0; + border-radius: var(--radius); overflow: hidden; + background: var(--surface2); border: 1px solid var(--border); +} +.sci canvas, .sci img { width: 100%; height: 100%; object-fit: cover; display: block; } +.sci-3 { transform: translate(10px, -10px) rotate(5deg); z-index: 1; } +.sci-2 { transform: translate(5px, -5px) rotate(2.5deg); z-index: 2; } +.sci-1 { z-index: 3; } + +.series-info { padding: 0.65rem 0.2rem 0; } +.series-name { + font-size: 0.78rem; font-weight: 700; color: var(--text); line-height: 1.3; + display: -webkit-box; -webkit-line-clamp: 2; -webkit-box-orient: vertical; overflow: hidden; +} +.series-meta { + font-family: var(--mono); font-size: 0.65rem; color: var(--text-dim); + margin-top: 0.3rem; display: flex; align-items: center; gap: 0.5rem; flex-wrap: wrap; +} +.series-dots { display: flex; gap: 0.22rem; align-items: center; } +.series-dot { width: 7px; height: 7px; border-radius: 50%; } +.dot-read { background: var(--success); } +.dot-reading { background: var(--accent); } +.dot-unread { background: var(--border); } + +/* ── Series detail ───────────────────────────────────────────────────────── */ + +.detail-header { display: flex; align-items: center; gap: 0.75rem; margin-bottom: 1.75rem; } +.btn-back { + display: inline-flex; align-items: center; gap: 0.35rem; + padding: 0.3rem 0.65rem; border-radius: var(--radius); + font-family: var(--mono); font-size: 0.7rem; + background: var(--surface2); color: var(--text-dim); + border: 1px solid var(--border); cursor: pointer; + transition: color 0.12s, border-color 0.12s; +} +.btn-back:hover { color: var(--text); border-color: var(--text-faint); } + +.series-slot { display: flex; flex-direction: column; } +.slot-index-label { + font-family: var(--mono); font-size: 0.6rem; color: var(--accent2); + text-align: center; margin-bottom: 0.2rem; letter-spacing: 0.04em; +} +.slot-missing .cover-wrap { border: 2px dashed var(--border); display: flex; align-items: center; justify-content: center; } +.slot-missing-inner { + display: flex; flex-direction: column; align-items: center; gap: 0.4rem; + color: var(--text-faint); pointer-events: none; user-select: none; +} +.slot-missing-inner svg { opacity: 0.5; } +.slot-missing-inner span { font-family: var(--mono); font-size: 0.65rem; } + +/* ── Authors list ─────────────────────────────────────────────────────────── */ + +.author-list { display: flex; flex-direction: column; gap: 0.3rem; } +.author-item { + display: flex; align-items: center; gap: 0.75rem; + padding: 0.55rem 0.75rem; border-radius: var(--radius); + cursor: pointer; border: 1px solid var(--border); + background: var(--surface); transition: background 0.12s, border-color 0.12s; +} +.author-item:hover { background: var(--surface2); border-color: var(--text-faint); } +.author-avatar { + width: 32px; height: 32px; border-radius: 50%; + display: flex; align-items: center; justify-content: center; + font-family: var(--mono); font-size: 0.72rem; font-weight: 700; + flex-shrink: 0; color: #0f0e0c; +} +.author-name { flex: 1; font-size: 0.82rem; color: var(--text); } +.author-count { font-family: var(--mono); font-size: 0.65rem; color: var(--text-dim); white-space: nowrap; } +.author-chevron { color: var(--text-faint); margin-left: 0.25rem; } + +/* ── Search bar ──────────────────────────────────────────────────────── */ + +.search-wrap { position: relative; display: flex; align-items: center; } +.search-icon { position: absolute; left: 0.5rem; color: var(--text-faint); pointer-events: none; } +.search-input { + background: var(--surface); border: 1px solid var(--border); + border-radius: var(--radius); color: var(--text); + font-family: var(--mono); font-size: 0.78rem; + padding: 0.4rem 1.8rem 0.4rem 2rem; + outline: none; width: 220px; + transition: border-color 0.15s, width 0.2s; +} +.search-input:focus { border-color: var(--accent); width: 280px; } +.search-input::placeholder { color: var(--text-faint); } +.search-clear { + position: absolute; right: 0.4rem; + background: none; border: none; color: var(--text-faint); + cursor: pointer; font-size: 1rem; line-height: 1; padding: 0 0.1rem; +} +.search-clear:hover { color: var(--text-dim); } + +/* ── Responsive ──────────────────────────────────────────────────────── */ + +@media (max-width: 768px) { + .main { + margin-left: 0; + padding: 4rem 1rem 4rem; + } + + .main-header { + flex-wrap: wrap; + gap: 0.75rem; + margin-bottom: 1.25rem; + } + + .cover-grid { + grid-template-columns: repeat(auto-fill, minmax(130px, 1fr)); + gap: 1rem; + } + + .search-input { width: 100%; } + .search-input:focus { width: 100%; } + .search-wrap { flex: 1; min-width: 0; } + + .author-item { padding: 0.5rem 0.6rem; } +} + + +.publishers-wrap { + display: flex; + flex-direction: column; + gap: 0.9rem; +} + +.publisher-missing-wrap { + border: 1px solid rgba(200, 120, 58, 0.28); + border-radius: var(--radius); + overflow: hidden; +} + +.publisher-missing-item { + background: rgba(200, 120, 58, 0.08); +} + +.publisher-divider { + font-family: var(--mono); + font-size: 0.66rem; + letter-spacing: 0.1em; + text-transform: uppercase; + color: var(--text-dim); + border-top: 1px solid var(--border); + padding-top: 0.8rem; +} diff --git a/containers/novela/static/library.js b/containers/novela/static/library.js new file mode 100644 index 0000000..aff079a --- /dev/null +++ b/containers/novela/static/library.js @@ -0,0 +1,979 @@ +/* ── Novela — Library page script ─────────────────────────────────────── */ + +let allBooks = []; +let currentView = 'all'; +let currentParam = null; +let pendingDelete = null; +let coverTargetFilename = null; +let coverB64 = null; +let importInProgress = false; +const MISSING_PUBLISHER_KEY = '__missing__'; +const MISSING_PUBLISHER_LABEL = 'No publisher'; + + +// ── Placeholder cover generation ─────────────────────────────────────────── + +function strHash(s) { + let h = 0; + for (let i = 0; i < s.length; i++) h = (Math.imul(31, h) + s.charCodeAt(i)) | 0; + return Math.abs(h); +} + +const COVER_PALETTES = [ + ['#1a2a3a', '#4a8caa'], + ['#2a1a1a', '#aa4a4a'], + ['#1a2a1a', '#4aaa6a'], + ['#2a1a2a', '#8a4aaa'], + ['#2a2a1a', '#aaa04a'], + ['#1a2a2a', '#4aaa9a'], + ['#2a1a14', '#c8783a'], + ['#141a2a', '#5a78c8'], +]; + +function makePlaceholderCover(canvas, title, author) { + const w = canvas.width = canvas.offsetWidth || 150; + const h = canvas.height = canvas.offsetHeight || 225; + const ctx = canvas.getContext('2d'); + + const [bg, fg] = COVER_PALETTES[strHash(title) % COVER_PALETTES.length]; + ctx.fillStyle = bg; + ctx.fillRect(0, 0, w, h); + + ctx.fillStyle = fg; + ctx.globalAlpha = 0.15; + ctx.fillRect(0, 0, w, h * 0.08); + ctx.globalAlpha = 1; + + ctx.fillStyle = fg; + ctx.fillRect(w * 0.12, h * 0.12, w * 0.04, h * 0.55); + + ctx.fillStyle = '#e8e2d9'; + ctx.font = `bold ${Math.round(w * 0.105)}px 'Libre Baskerville', Georgia, serif`; + ctx.textAlign = 'center'; + wrapText(ctx, title, w * 0.55, h * 0.28, w * 0.72, Math.round(w * 0.12)); + + ctx.fillStyle = fg; + ctx.font = `${Math.round(w * 0.075)}px 'DM Mono', monospace`; + ctx.globalAlpha = 0.85; + ctx.fillText(truncate(author, 18), w * 0.55, h * 0.86); + ctx.globalAlpha = 1; +} + +function wrapText(ctx, text, x, y, maxW, lineH) { + const words = text.split(' '); + let line = ''; + let lines = []; + for (const word of words) { + const test = line ? line + ' ' + word : word; + if (ctx.measureText(test).width > maxW && line) { lines.push(line); line = word; } + else line = test; + } + if (line) lines.push(line); + lines = lines.slice(0, 4); + const startY = y - ((lines.length - 1) * lineH) / 2; + lines.forEach((l, i) => ctx.fillText(l, x, startY + i * lineH)); +} + +function truncate(s, n) { return s.length > n ? s.slice(0, n - 1) + '…' : s; } + +// ── Data loading ─────────────────────────────────────────────────────────── + +async function loadLibrary() { + const resp = await fetch('/library/list'); + allBooks = await resp.json(); + updateCounts(); + renderGrid(); + return true; +} + +function activeBooks() { return allBooks.filter(b => !b.archived); } +function archivedBooks() { return allBooks.filter(b => b.archived); } + +function updateCounts() { + const active = activeBooks(); + const wtrCount = active.filter(b => b.want_to_read).length; + const seriesCount = new Set(active.filter(b => b.series).map(b => b.series)).size; + const authorCount = new Set(active.map(b => bookAuthor(b)).filter(Boolean)).size; + const publisherCount = new Set(active.map(b => bookPublisherKey(b))).size; + const newCount = active.filter(b => b.needs_review).length; + const archCount = archivedBooks().length; + document.getElementById('count-all').textContent = active.length || ''; + document.getElementById('count-wtr').textContent = wtrCount || ''; + document.getElementById('count-series').textContent = seriesCount || ''; + document.getElementById('count-authors').textContent = authorCount || ''; + document.getElementById('count-publishers').textContent = publisherCount || ''; + const newEl = document.getElementById('count-new'); + if (newEl) newEl.textContent = newCount || ''; + const archEl = document.getElementById('count-archived'); + if (archEl) archEl.textContent = archCount || ''; +} + +function bookAuthor(b) { + if (b.author) return b.author; + const parts = b.filename.replace(/\.epub$/, '').split('-'); + return (parts[1] ?? '').replace(/_/g, ' '); +} + +function bookTitle(b) { + return b.title || (b.filename.replace(/\.epub$/, '').split('-')[2] ?? '').replace(/_/g, ' '); +} + +function normalizePublisherName(value) { + const v = (value || '').trim(); + if (!v) return MISSING_PUBLISHER_KEY; + const low = v.toLowerCase(); + if (low === 'unknown publisher') return MISSING_PUBLISHER_KEY; + return v; +} + +function publisherDisplayName(key) { + return key === MISSING_PUBLISHER_KEY ? MISSING_PUBLISHER_LABEL : key; +} + +function bookPublisherKey(b) { + return normalizePublisherName(b.publisher); +} + +// ── View switching ───────────────────────────────────────────────────────── + +function _viewUrl(view, param) { + if (view === 'wtr') return '/library#wtr'; + if (view === 'series') return '/library#series'; + if (view === 'series-detail') return '/library#series/' + encodeURIComponent(param || ''); + if (view === 'authors') return '/library#authors'; + if (view === 'author-detail') return '/library#authors/' + encodeURIComponent(param || ''); + if (view === 'publishers') return '/library#publishers'; + if (view === 'publisher-detail') return '/library#publishers/' + encodeURIComponent(param || ''); + if (view === 'archived') return '/library#archived'; + if (view === 'new') return '/library#new'; + if (view === 'genre') return '/library#genre/' + encodeURIComponent(param || ''); + return '/library'; +} + +function _applyView(view, param) { + currentView = view; + currentParam = param || null; + + // Clear search input when switching to a non-search view + if (view !== 'search') { + const si = document.getElementById('search-input'); + if (si) { si.value = ''; document.getElementById('search-clear').style.display = 'none'; } + } + + ['nav-all','nav-wtr','nav-new','nav-series','nav-authors','nav-publishers','nav-archived'].forEach(id => { + const el = document.getElementById(id); + if (el) el.classList.remove('active'); + }); + const activeMap = { + 'all': 'nav-all', 'wtr': 'nav-wtr', + 'series': 'nav-series', 'series-detail': 'nav-series', + 'authors': 'nav-authors', 'author-detail': 'nav-authors', + 'publishers': 'nav-publishers', 'publisher-detail': 'nav-publishers', + 'new': 'nav-new', + 'archived': 'nav-archived', + }; + const el = document.getElementById(activeMap[view]); + if (el) el.classList.add('active'); + + document.getElementById('section-title').textContent = + view === 'all' ? 'All books' : + view === 'wtr' ? 'Want to Read' : + view === 'series' ? 'Series' : + view === 'series-detail' ? (param || '') : + view === 'authors' ? 'Authors' : + view === 'author-detail' ? (param || '') : + view === 'publishers' ? 'Publishers' : + view === 'publisher-detail' ? publisherDisplayName(param || '') : + view === 'new' ? 'New' : + view === 'archived' ? 'Archived' : + view === 'genre' ? `Genre: ${param || ''}` : + view === 'search' ? `Search: "${param || ''}"` : ''; + + const showBack = view === 'series-detail' || view === 'author-detail' || view === 'publisher-detail'; + document.getElementById('back-btn').style.display = showBack ? '' : 'none'; + + renderGrid(); +} + +function switchView(view, param) { + history.pushState({ view, param: param || null }, '', _viewUrl(view, param)); + _applyView(view, param); +} + +function goBack() { history.back(); } + +window.addEventListener('popstate', e => { + if (e.state) _applyView(e.state.view, e.state.param); + else _applyView('all', null); +}); + +// ── Render dispatcher ────────────────────────────────────────────────────── + +function renderGrid() { + const active = activeBooks(); + if (currentView === 'all') renderBooksGrid(active); + else if (currentView === 'wtr') renderBooksGrid(active.filter(b => b.want_to_read)); + else if (currentView === 'series') renderSeriesGrid(); + else if (currentView === 'series-detail') renderSeriesDetail(currentParam); + else if (currentView === 'authors') renderAuthorsView(); + else if (currentView === 'author-detail') renderAuthorDetail(currentParam); + else if (currentView === 'publishers') renderPublishersView(); + else if (currentView === 'publisher-detail') renderPublisherDetail(currentParam); + else if (currentView === 'archived') renderBooksGrid(archivedBooks()); + else if (currentView === 'new') renderBooksGrid(active.filter(b => b.needs_review)); + else if (currentView === 'genre') renderGenreView(currentParam); + else if (currentView === 'search') renderSearchResults(currentParam); +} + +// ── Book grid (All / WTR / Author detail) ───────────────────────────────── + +function renderBooksGrid(books) { + const container = document.getElementById('grid-container'); + + if (!books.length) { + container.innerHTML = `<div class="empty">${ + currentView === 'wtr' ? 'No books marked as Want to Read. Star a book to add it here.' : + currentView === 'archived' ? 'No archived books. Archive a book from its detail page.' : + currentView === 'new' ? 'No newly imported books waiting for metadata review.' : + currentView === 'genre' ? `No books tagged "${esc(currentParam || '')}".` : + currentView === 'search' ? `No results for "${esc(currentParam || '')}".` : + 'No EPUBs yet. Convert a story first.' + }</div>`; + return; + } + + const grid = document.createElement('div'); + grid.className = 'cover-grid'; + + books.forEach(b => { + const author = bookAuthor(b); + const title = bookTitle(b); + + const card = document.createElement('div'); + card.className = 'book-card'; + card.id = `card-${cssId(b.filename)}`; + + const st = (b.publication_status || '').toLowerCase(); + let statusBadge = ''; + if (st === 'complete') { + statusBadge = `<div class="badge-status badge-complete" title="Complete"> + <svg width="10" height="10" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="3"><polyline points="20 6 9 17 4 12"/></svg> + </div>`; + } else if (st === 'ongoing') { + statusBadge = `<div class="badge-status badge-ongoing" title="Ongoing"> + <svg width="10" height="10" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="3"><circle cx="12" cy="12" r="10"/><polyline points="12 6 12 12 16 14"/></svg> + </div>`; + } else if (st === 'hiatus') { + statusBadge = `<div class="badge-status badge-hiatus" title="Hiatus"> + <svg width="10" height="10" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="3"><line x1="10" y1="9" x2="10" y2="15"/><line x1="14" y1="9" x2="14" y2="15"/><circle cx="12" cy="12" r="10"/></svg> + </div>`; + } + + const starClass = b.want_to_read ? 'btn-star starred' : 'btn-star'; + const seriesText = b.series + ? `${esc(b.series)}${b.series_index ? ' <span class="series-index">[' + b.series_index + ']</span>' : ''}` + : ''; + + card.innerHTML = ` + <div class="cover-wrap" id="wrap-${cssId(b.filename)}"> + <canvas class="cover-canvas" id="canvas-${cssId(b.filename)}"></canvas> + <button class="${starClass}" id="star-${cssId(b.filename)}" + onclick="event.stopPropagation();toggleWtr('${jsEsc(b.filename)}')" title="Want to Read"> + <svg width="11" height="11" viewBox="0 0 24 24" fill="${b.want_to_read ? 'currentColor' : 'none'}" stroke="currentColor" stroke-width="2.5" id="star-svg-${cssId(b.filename)}"> + <polygon points="12 2 15.09 8.26 22 9.27 17 14.14 18.18 21.02 12 17.77 5.82 21.02 7 14.14 2 9.27 8.91 8.26 12 2"/> + </svg> + </button> + ${statusBadge} + ${b.read_count > 0 ? `<div class="read-pill">${b.read_count}\u00d7</div>` : ''} + ${b.progress > 0 ? `<div class="progress-mini"><div class="progress-mini-fill" style="width:${b.progress}%"></div></div>` : ''} + </div> + <div class="book-info"> + <div class="book-title">${esc(title)}</div> + <div class="book-author">${esc(author)}</div> + ${seriesText ? `<div class="book-series">${seriesText}</div>` : ''} + </div>`; + card.onclick = () => { location.href = `/library/book/${encodeURIComponent(b.filename)}`; }; + + grid.appendChild(card); + }); + + container.innerHTML = ''; + container.appendChild(grid); + + books.forEach(b => { + const author = bookAuthor(b); + const title = bookTitle(b); + const wrap = document.getElementById(`wrap-${cssId(b.filename)}`); + const canvas = document.getElementById(`canvas-${cssId(b.filename)}`); + if (b.has_cover) { + const img = document.createElement('img'); + img.className = 'cover-img'; + img.style.cssText = 'position:absolute;inset:0;width:100%;height:100%;object-fit:cover'; + img.src = `/library/cover-cached/${encodeURIComponent(b.filename)}`; + img.alt = title; + if (b.has_cached_cover) { + canvas.style.display = 'none'; + } + img.onload = () => { canvas.style.display = 'none'; }; + img.onerror = () => { + canvas.style.display = 'block'; + makePlaceholderCover(canvas, title, author); + }; + wrap.insertBefore(img, wrap.firstChild); + } + if (!b.has_cover || !b.has_cached_cover) { + requestAnimationFrame(() => makePlaceholderCover(canvas, title, author)); + } + }); +} + +// ── Series grid ──────────────────────────────────────────────────────────── + +function groupBySeries() { + const map = {}; + for (const b of activeBooks()) { + if (!b.series) continue; + if (!map[b.series]) map[b.series] = []; + map[b.series].push(b); + } + for (const s of Object.values(map)) s.sort((a, b) => a.series_index - b.series_index); + return map; +} + +function bookDotStatus(b) { + if (b.progress > 0) return 'reading'; + if (b.read_count > 0) return 'read'; + return 'unread'; +} + +function renderSeriesGrid() { + const map = groupBySeries(); + const container = document.getElementById('grid-container'); + const entries = Object.entries(map).sort(([a], [b]) => a.localeCompare(b)); + + if (!entries.length) { + container.innerHTML = '<div class="empty">No series found. Series metadata is read from the EPUB files.</div>'; + return; + } + + const grid = document.createElement('div'); + grid.className = 'cover-grid'; + + entries.forEach(([seriesName, books]) => { + const card = document.createElement('div'); + card.className = 'series-card'; + card.onclick = () => switchView('series-detail', seriesName); + + const dotStatuses = books.map(bookDotStatus); + const maxDots = 10; + const visibleDots = dotStatuses.slice(0, maxDots); + const extraDots = dotStatuses.length - maxDots; + const dotsHtml = visibleDots.map(s => + `<span class="series-dot dot-${s}" title="${s}"></span>` + ).join('') + (extraDots > 0 ? `<span style="font-family:var(--mono);font-size:0.55rem;color:var(--text-faint)">+${extraDots}</span>` : ''); + + const stackBooks = books.slice(0, 3).reverse(); + const stackId = cssId(seriesName); + let stackHtml = ''; + for (let i = 0; i < 3; i++) { + const depth = 3 - i; + const b = stackBooks[i]; + if (b) { + stackHtml += `<div class="sci sci-${depth}" id="sci-${stackId}-${depth}"> + <canvas id="sci-canvas-${stackId}-${depth}" style="width:100%;height:100%"></canvas> + </div>`; + } + } + + card.innerHTML = ` + <div class="series-cover-wrap">${stackHtml}</div> + <div class="series-info"> + <div class="series-name">${esc(seriesName)}</div> + <div class="series-meta"> + <span>${books.length} book${books.length !== 1 ? 's' : ''}</span> + <div class="series-dots">${dotsHtml}</div> + </div> + </div>`; + + grid.appendChild(card); + }); + + container.innerHTML = ''; + container.appendChild(grid); + + entries.forEach(([seriesName, books]) => { + const stackBooks = books.slice(0, 3).reverse(); + const stackId = cssId(seriesName); + for (let i = 0; i < stackBooks.length; i++) { + const depth = 3 - i; + const b = stackBooks[i]; + const canvas = document.getElementById(`sci-canvas-${stackId}-${depth}`); + const wrap = document.getElementById(`sci-${stackId}-${depth}`); + if (!canvas || !wrap) continue; + const author = bookAuthor(b); + const title = bookTitle(b); + if (b.has_cover) { + const img = document.createElement('img'); + img.style.cssText = 'position:absolute;inset:0;width:100%;height:100%;object-fit:cover'; + img.src = `/library/cover/${encodeURIComponent(b.filename)}`; + img.alt = title; + img.onload = () => { canvas.style.display = 'none'; }; + img.onerror = () => { requestAnimationFrame(() => makePlaceholderCover(canvas, title, author)); }; + wrap.insertBefore(img, wrap.firstChild); + } + requestAnimationFrame(() => makePlaceholderCover(canvas, title, author)); + } + }); +} + +// ── Series detail ────────────────────────────────────────────────────────── + +function getSeriesSlots(books) { + const indexed = books.filter(b => b.series_index > 0); + const unindexed = books.filter(b => b.series_index === 0 || !b.series_index); + if (indexed.length === 0) return books; + + const byIndex = {}; + for (const b of indexed) { + if (!byIndex[b.series_index]) byIndex[b.series_index] = []; + byIndex[b.series_index].push(b); + } + const min = Math.min(...indexed.map(b => b.series_index)); + const max = Math.max(...indexed.map(b => b.series_index)); + + const slots = []; + for (let i = min; i <= max; i++) { + if (byIndex[i]) for (const b of byIndex[i]) slots.push(b); + else slots.push({ missing: true, series_index: i }); + } + return [...unindexed, ...slots]; +} + +function renderSeriesDetail(seriesName) { + const map = groupBySeries(); + const books = map[seriesName] || []; + const slots = getSeriesSlots(books); + const container = document.getElementById('grid-container'); + + if (!slots.length) { + container.innerHTML = '<div class="empty">No books found in this series.</div>'; + return; + } + + const grid = document.createElement('div'); + grid.className = 'cover-grid'; + + slots.forEach(slot => { + const wrapper = document.createElement('div'); + wrapper.className = 'series-slot' + (slot.missing ? ' slot-missing' : ''); + + if (slot.series_index) { + const lbl = document.createElement('div'); + lbl.className = 'slot-index-label'; + lbl.textContent = `#${slot.series_index}`; + wrapper.appendChild(lbl); + } + + if (slot.missing) { + wrapper.innerHTML += ` + <div class="cover-wrap"> + <div class="slot-missing-inner"> + <svg width="28" height="28" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.5"> + <circle cx="12" cy="12" r="10"/> + <line x1="12" y1="8" x2="12" y2="12"/> + <line x1="12" y1="16" x2="12.01" y2="16"/> + </svg> + <span>Missing</span> + </div> + </div> + <div class="book-info"> + <div class="book-title" style="color:var(--text-faint)">Volume ${slot.series_index}</div> + </div>`; + } else { + const b = slot; + const author = bookAuthor(b); + const title = bookTitle(b); + const cid = cssId(b.filename); + + const st = (b.publication_status || '').toLowerCase(); + let statusBadge = ''; + if (st === 'complete') { + statusBadge = `<div class="badge-status badge-complete" title="Complete"><svg width="10" height="10" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="3"><polyline points="20 6 9 17 4 12"/></svg></div>`; + } else if (st === 'ongoing') { + statusBadge = `<div class="badge-status badge-ongoing" title="Ongoing"><svg width="10" height="10" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="3"><circle cx="12" cy="12" r="10"/><polyline points="12 6 12 12 16 14"/></svg></div>`; + } else if (st === 'hiatus') { + statusBadge = `<div class="badge-status badge-hiatus" title="Hiatus"><svg width="10" height="10" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="3"><line x1="10" y1="9" x2="10" y2="15"/><line x1="14" y1="9" x2="14" y2="15"/><circle cx="12" cy="12" r="10"/></svg></div>`; + } + + const bookCard = document.createElement('div'); + bookCard.className = 'book-card'; + bookCard.style.cursor = 'pointer'; + bookCard.onclick = () => { location.href = `/library/book/${encodeURIComponent(b.filename)}`; }; + bookCard.innerHTML = ` + <div class="cover-wrap" id="wrap-${cid}"> + <canvas class="cover-canvas" id="canvas-${cid}"></canvas> + ${statusBadge} + ${b.read_count > 0 ? `<div class="read-pill">${b.read_count}\u00d7</div>` : ''} + ${b.progress > 0 ? `<div class="progress-mini"><div class="progress-mini-fill" style="width:${b.progress}%"></div></div>` : ''} + </div> + <div class="book-info"> + <div class="book-title">${esc(title)}</div> + <div class="book-author">${esc(author)}</div> + </div>`; + wrapper.appendChild(bookCard); + } + + grid.appendChild(wrapper); + }); + + container.innerHTML = ''; + container.appendChild(grid); + + slots.filter(s => !s.missing).forEach(b => { + const author = bookAuthor(b); + const title = bookTitle(b); + const canvas = document.getElementById(`canvas-${cssId(b.filename)}`); + const wrap = document.getElementById(`wrap-${cssId(b.filename)}`); + if (!canvas) return; + if (b.has_cover) { + const img = document.createElement('img'); + img.style.cssText = 'position:absolute;inset:0;width:100%;height:100%;object-fit:cover'; + img.src = `/library/cover/${encodeURIComponent(b.filename)}`; + img.alt = title; + img.onload = () => { canvas.style.display = 'none'; }; + img.onerror = () => { requestAnimationFrame(() => makePlaceholderCover(canvas, title, author)); }; + wrap.insertBefore(img, wrap.firstChild); + } + requestAnimationFrame(() => makePlaceholderCover(canvas, title, author)); + }); +} + +// ── Authors list ─────────────────────────────────────────────────────────── + +function renderAuthorsView() { + const container = document.getElementById('grid-container'); + + const authorMap = {}; + for (const b of activeBooks()) { + const a = bookAuthor(b); + if (!a) continue; + if (!authorMap[a]) authorMap[a] = []; + authorMap[a].push(b); + } + + const entries = Object.entries(authorMap).sort(([a], [b]) => a.localeCompare(b)); + + if (!entries.length) { + container.innerHTML = '<div class="empty">No authors found.</div>'; + return; + } + + const list = document.createElement('div'); + list.className = 'author-list'; + + entries.forEach(([authorName, books]) => { + const initial = authorName.trim()[0]?.toUpperCase() || '?'; + const [bg, fg] = COVER_PALETTES[strHash(authorName) % COVER_PALETTES.length]; + + const item = document.createElement('div'); + item.className = 'author-item'; + item.onclick = () => switchView('author-detail', authorName); + item.innerHTML = ` + <div class="author-avatar" style="background:${bg};color:${fg}">${esc(initial)}</div> + <div class="author-name">${esc(authorName)}</div> + <div class="author-count">${books.length} book${books.length !== 1 ? 's' : ''}</div> + <svg class="author-chevron" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <polyline points="9 18 15 12 9 6"/> + </svg>`; + list.appendChild(item); + }); + + container.innerHTML = ''; + container.appendChild(list); +} + + +function renderPublishersView() { + const container = document.getElementById('grid-container'); + + const publisherMap = {}; + for (const b of activeBooks()) { + const key = bookPublisherKey(b); + if (!publisherMap[key]) publisherMap[key] = []; + publisherMap[key].push(b); + } + + const missingBooks = publisherMap[MISSING_PUBLISHER_KEY] || []; + const filledEntries = Object.entries(publisherMap) + .filter(([key]) => key !== MISSING_PUBLISHER_KEY) + .sort(([a], [b]) => a.localeCompare(b)); + + if (!filledEntries.length && !missingBooks.length) { + container.innerHTML = '<div class="empty">No publishers found.</div>'; + return; + } + + const wrap = document.createElement('div'); + wrap.className = 'publishers-wrap'; + + const missingList = document.createElement('div'); + missingList.className = 'author-list publisher-missing-wrap'; + const missingName = publisherDisplayName(MISSING_PUBLISHER_KEY); + const mInitial = missingName[0]?.toUpperCase() || '?'; + const [mbg, mfg] = COVER_PALETTES[strHash(missingName) % COVER_PALETTES.length]; + + const missingItem = document.createElement('div'); + missingItem.className = 'author-item publisher-missing-item'; + missingItem.onclick = () => switchView('publisher-detail', MISSING_PUBLISHER_KEY); + missingItem.innerHTML = ` + <div class="author-avatar" style="background:${mbg};color:${mfg}">${esc(mInitial)}</div> + <div class="author-name">${esc(missingName)}</div> + <div class="author-count">${missingBooks.length} book${missingBooks.length !== 1 ? 's' : ''}</div> + <svg class="author-chevron" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <polyline points="9 18 15 12 9 6"/> + </svg>`; + missingList.appendChild(missingItem); + wrap.appendChild(missingList); + + if (filledEntries.length) { + const divider = document.createElement('div'); + divider.className = 'publisher-divider'; + divider.textContent = 'Publishers'; + wrap.appendChild(divider); + + const list = document.createElement('div'); + list.className = 'author-list'; + + filledEntries.forEach(([publisherKey, books]) => { + const publisherName = publisherDisplayName(publisherKey); + const initial = publisherName.trim()[0]?.toUpperCase() || '?'; + const [bg, fg] = COVER_PALETTES[strHash(publisherName) % COVER_PALETTES.length]; + + const item = document.createElement('div'); + item.className = 'author-item'; + item.onclick = () => switchView('publisher-detail', publisherKey); + item.innerHTML = ` + <div class="author-avatar" style="background:${bg};color:${fg}">${esc(initial)}</div> + <div class="author-name">${esc(publisherName)}</div> + <div class="author-count">${books.length} book${books.length !== 1 ? 's' : ''}</div> + <svg class="author-chevron" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <polyline points="9 18 15 12 9 6"/> + </svg>`; + list.appendChild(item); + }); + + wrap.appendChild(list); + } + + container.innerHTML = ''; + container.appendChild(wrap); +} + + +// ── Genre view ───────────────────────────────────────────────────────────── + +function renderGenreView(tag) { + const books = activeBooks().filter(b => (b.genres || []).includes(tag)); + renderBooksGrid(books); +} + +// ── Search ───────────────────────────────────────────────────────────────── + +function renderSearchResults(query) { + if (!query) { renderBooksGrid(activeBooks()); return; } + const q = query.toLowerCase(); + const books = activeBooks().filter(b => + bookTitle(b).toLowerCase().includes(q) || + bookAuthor(b).toLowerCase().includes(q) || + (b.genres || []).some(g => g.toLowerCase().includes(q)) + ); + renderBooksGrid(books); +} + +function clearSearch() { + document.getElementById('search-input').value = ''; + document.getElementById('search-clear').style.display = 'none'; + switchView('all'); +} + +// ── Author detail ────────────────────────────────────────────────────────── + +function renderAuthorDetail(authorName) { + const books = allBooks + .filter(b => bookAuthor(b) === authorName) + .sort((a, b) => { + const sa = a.series || '\uffff'; + const sb = b.series || '\uffff'; + if (sa !== sb) return sa.localeCompare(sb); + if (a.series_index !== b.series_index) return a.series_index - b.series_index; + return bookTitle(a).localeCompare(bookTitle(b)); + }); + renderBooksGrid(books); +} + + +function renderPublisherDetail(publisherName) { + const books = allBooks + .filter(b => bookPublisherKey(b) === normalizePublisherName(publisherName)) + .sort((a, b) => { + const sa = a.series || '\uffff'; + const sb = b.series || '\uffff'; + if (sa !== sb) return sa.localeCompare(sb); + if (a.series_index !== b.series_index) return a.series_index - b.series_index; + return bookTitle(a).localeCompare(bookTitle(b)); + }); + renderBooksGrid(books); +} + +// ── Want to Read toggle ──────────────────────────────────────────────────── + +async function toggleWtr(filename) { + const resp = await fetch(`/library/want-to-read/${encodeURIComponent(filename)}`, { method: 'POST' }); + const result = await resp.json(); + if (result.error) return; + + const book = allBooks.find(b => b.filename === filename); + if (book) book.want_to_read = result.want_to_read; + + const id = cssId(filename); + const btn = document.getElementById(`star-${id}`); + const svg = document.getElementById(`star-svg-${id}`); + if (btn) btn.className = result.want_to_read ? 'btn-star starred' : 'btn-star'; + if (svg) svg.setAttribute('fill', result.want_to_read ? 'currentColor' : 'none'); + + if (currentView === 'wtr' && !result.want_to_read) { + const card = document.getElementById(`card-${id}`); + if (card) card.remove(); + const grid = document.querySelector('.cover-grid'); + if (grid && !grid.children.length) { + document.getElementById('grid-container').innerHTML = + '<div class="empty">No books marked as Want to Read.</div>'; + } + } + updateCounts(); +} + +// ── Delete ───────────────────────────────────────────────────────────────── + +function askDelete(filename) { + pendingDelete = filename; + document.getElementById('confirm-filename').textContent = filename; + document.getElementById('confirm-overlay').classList.add('visible'); +} + +function closeConfirm() { + pendingDelete = null; + document.getElementById('confirm-overlay').classList.remove('visible'); +} + +async function confirmDelete() { + if (!pendingDelete) return; + const filename = pendingDelete; + closeConfirm(); + await fetch(`/library/file/${encodeURIComponent(filename)}`, { method: 'DELETE' }); + loadLibrary(); +} + +// ── Add cover ────────────────────────────────────────────────────────────── + +function openCoverDialog(filename) { + coverTargetFilename = filename; + coverB64 = null; + document.getElementById('cover-target-filename').textContent = filename; + document.getElementById('cover-file-input').value = ''; + document.getElementById('cover-dialog-preview').classList.remove('visible'); + document.getElementById('cover-upload-prompt').textContent = 'Click to select a cover image'; + document.getElementById('cover-upload-btn').disabled = true; + document.getElementById('cover-overlay').classList.add('visible'); +} + +function closeCoverDialog() { + coverTargetFilename = null; + coverB64 = null; + document.getElementById('cover-overlay').classList.remove('visible'); +} + +function onCoverFileSelected() { + const file = document.getElementById('cover-file-input').files[0]; + if (!file) return; + const reader = new FileReader(); + reader.onload = e => { + const dataUrl = e.target.result; + coverB64 = dataUrl.split(',')[1]; + const preview = document.getElementById('cover-dialog-preview'); + preview.src = dataUrl; + preview.classList.add('visible'); + document.getElementById('cover-upload-prompt').textContent = file.name; + document.getElementById('cover-upload-btn').disabled = false; + }; + reader.readAsDataURL(file); +} + +async function uploadCover() { + if (!coverTargetFilename || !coverB64) return; + document.getElementById('cover-upload-btn').disabled = true; + document.getElementById('cover-upload-label').textContent = 'Uploading…'; + document.getElementById('cover-spinner').style.display = 'inline-block'; + + const resp = await fetch(`/library/cover/${encodeURIComponent(coverTargetFilename)}`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ cover_b64: coverB64 }), + }); + const result = await resp.json(); + + document.getElementById('cover-upload-label').textContent = 'Add cover'; + document.getElementById('cover-spinner').style.display = 'none'; + + if (result.error) { + alert('Error: ' + result.error); + document.getElementById('cover-upload-btn').disabled = false; + return; + } + closeCoverDialog(); + loadLibrary(); +} + +// ── Rescan ───────────────────────────────────────────────────────────────── + +async function rescanLibrary() { + const btn = document.getElementById('rescan-btn'); + const label = document.getElementById('rescan-label'); + btn.disabled = true; + label.textContent = 'Scanning…'; + await fetch('/library/rescan', { method: 'POST' }); + await loadLibrary(); + btn.disabled = false; + label.textContent = 'Rescan library'; +} + +function openImportPicker() { + if (importInProgress) return; + const input = document.getElementById('import-file-input'); + if (input) input.click(); +} + +function onImportFilesSelected(fileList) { + if (!fileList || !fileList.length) return; + uploadImportedFiles(Array.from(fileList)); + const input = document.getElementById('import-file-input'); + if (input) input.value = ''; +} + +async function uploadImportedFiles(files) { + if (!files.length || importInProgress) return; + const zone = document.getElementById('import-dropzone'); + const title = zone?.querySelector('.import-title'); + const sub = zone?.querySelector('.import-sub'); + + importInProgress = true; + zone?.classList.add('uploading'); + if (title) title.textContent = 'Importing EPUBs…'; + if (sub) sub.textContent = `${files.length} file(s) selected`; + + const form = new FormData(); + files.forEach(f => form.append('files', f)); + + try { + const resp = await fetch('/library/import', { method: 'POST', body: form }); + const data = await resp.json(); + if (!resp.ok || data.error) { + alert(data.error || 'Import failed.'); + } else { + const importedCount = (data.imported || []).length; + const skippedCount = (data.skipped || []).length; + if (title) title.textContent = importedCount + ? `Imported ${importedCount} EPUB(s)` + : 'No EPUBs imported'; + if (sub) sub.textContent = skippedCount + ? `${skippedCount} skipped` + : 'Ready for next import'; + await loadLibrary(); + } + } catch { + alert('Import failed.'); + } finally { + importInProgress = false; + zone?.classList.remove('uploading'); + setTimeout(() => { + if (title) title.textContent = 'Drop EPUB files here'; + if (sub) sub.textContent = 'or click to choose files'; + }, 1200); + } +} + +// ── Utilities ────────────────────────────────────────────────────────────── + +function esc(s) { + return String(s ?? '').replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>'); +} +function jsEsc(s) { return String(s ?? '').replace(/\\/g, '\\\\').replace(/'/g, "\\'"); } +function cssId(filename) { return filename.replace(/[^a-zA-Z0-9_-]/g, '_'); } + +// ── Search input ─────────────────────────────────────────────────────────── + +let searchTimer = null; +document.getElementById('search-input').addEventListener('input', function() { + const q = this.value.trim(); + document.getElementById('search-clear').style.display = q ? '' : 'none'; + clearTimeout(searchTimer); + searchTimer = setTimeout(() => { + if (q) { + currentView = 'search'; + currentParam = q; + ['nav-all','nav-wtr','nav-new','nav-series','nav-authors','nav-publishers','nav-archived'].forEach(id => { + const el = document.getElementById(id); + if (el) el.classList.remove('active'); + }); + document.getElementById('section-title').textContent = `Search: "${q}"`; + document.getElementById('back-btn').style.display = 'none'; + renderGrid(); + } else { + switchView('all'); + } + }, 250); +}); + +// ── Init ─────────────────────────────────────────────────────────────────── + +const importZone = document.getElementById('import-dropzone'); +if (importZone) { + ['dragenter', 'dragover'].forEach(evt => { + importZone.addEventListener(evt, e => { + e.preventDefault(); + e.stopPropagation(); + if (!importInProgress) importZone.classList.add('dragover'); + }); + }); + ['dragleave', 'drop'].forEach(evt => { + importZone.addEventListener(evt, e => { + e.preventDefault(); + e.stopPropagation(); + importZone.classList.remove('dragover'); + }); + }); + importZone.addEventListener('drop', e => { + if (importInProgress) return; + const files = Array.from(e.dataTransfer?.files || []).filter(f => f.name.toLowerCase().endsWith('.epub')); + if (!files.length) return; + uploadImportedFiles(files); + }); +} + +loadLibrary().then(() => { + const hash = window.location.hash.slice(1); + let view = 'all', param = null; + if (hash === 'wtr') view = 'wtr'; + else if (hash === 'series') view = 'series'; + else if (hash.startsWith('series/')) { view = 'series-detail'; param = decodeURIComponent(hash.slice(7)); } + else if (hash === 'authors') view = 'authors'; + else if (hash.startsWith('authors/')) { view = 'author-detail'; param = decodeURIComponent(hash.slice(8)); } + else if (hash === 'publishers' || hash === 'publisher') view = 'publishers'; + else if (hash.startsWith('publishers/')) { view = 'publisher-detail'; param = decodeURIComponent(hash.slice(11)); } + else if (hash.startsWith('publisher/')) { view = 'publisher-detail'; param = decodeURIComponent(hash.slice(10)); } + else if (hash === 'archived') view = 'archived'; + else if (hash === 'new') view = 'new'; + else if (hash.startsWith('genre/')) { view = 'genre'; param = decodeURIComponent(hash.slice(6)); } + history.replaceState({ view, param }, '', _viewUrl(view, param)); + _applyView(view, param); +}); diff --git a/containers/novela/static/sidebar.css b/containers/novela/static/sidebar.css new file mode 100644 index 0000000..b2fdab1 --- /dev/null +++ b/containers/novela/static/sidebar.css @@ -0,0 +1,153 @@ +/* ── Sidebar ── */ + +html { + scrollbar-gutter: stable; + overflow-y: scroll; +} + +.sidebar { + position: fixed; + top: 0; left: 0; bottom: 0; + width: var(--sidebar, 220px); + min-width: var(--sidebar, 220px); + max-width: var(--sidebar, 220px); + background: var(--surface); + border-right: 1px solid var(--border); + display: flex; + flex-direction: column; + padding: 1.5rem 0.75rem; + z-index: 10; +} + +.sidebar-logo { + padding: 0 0.5rem 1.5rem; + border-bottom: 1px solid var(--border); + margin-bottom: 1rem; +} +.sidebar-logo h1 { + margin: 0; + font-size: 1.25rem; + font-weight: 700; + letter-spacing: -0.02em; +} +.sidebar-logo h1 span { color: var(--accent); } +.sidebar-logo p { + font-family: var(--mono); + font-size: 0.62rem; + color: var(--text-dim); + letter-spacing: 0.1em; + margin-top: 0.2rem; +} + +.sidebar-section-label { + font-family: var(--mono); + font-size: 0.6rem; + letter-spacing: 0.14em; + text-transform: uppercase; + color: var(--text-dim); + padding: 0 0.5rem; + margin-bottom: 0.35rem; + margin-top: 0.25rem; +} + +.sidebar-nav { + list-style: none; + margin: 0; + padding: 0; +} +.sidebar-nav li + li { margin-top: 0.15rem; } +.sidebar-nav a { + white-space: nowrap; + display: flex; + align-items: center; + gap: 0.6rem; + padding: 0.45rem 0.6rem; + border-radius: var(--radius); + font-family: var(--mono); + font-size: 0.78rem; + color: var(--text-dim); + text-decoration: none; + transition: background 0.12s, color 0.12s; +} +.sidebar-nav a:hover { background: var(--surface2); color: var(--text); } +.sidebar-nav a.active { background: var(--surface2); color: var(--accent); } +.sidebar-nav a svg { flex-shrink: 0; } + +.sidebar-count { + font-size: 0.65rem; + color: var(--text-dim); + margin-left: auto; + min-width: 2ch; + text-align: right; +} + +.sidebar-divider { + border: none; + border-top: 1px solid var(--border); + margin: 0.85rem 0; +} + +.sidebar-bottom { margin-top: auto; } + +.btn-rescan { + display: flex; + align-items: center; + justify-content: center; + gap: 0.5rem; + width: 100%; + padding: 0.4rem 0.6rem; + background: none; + border: 1px solid var(--border); + border-radius: var(--radius); + font-family: var(--mono); + font-size: 0.7rem; + color: var(--text-dim); + cursor: pointer; + transition: background 0.12s, color 0.12s; + margin-top: 0.5rem; +} +.btn-rescan:hover { background: var(--surface2); color: var(--text); } +.btn-rescan:disabled { opacity: 0.5; cursor: not-allowed; } + +/* ── Mobile hamburger ──────────────────────────────────────────────────── */ + +.sidebar-toggle { + display: none; + position: fixed; + top: 0.75rem; + left: 0.75rem; + z-index: 50; + background: var(--surface); + border: 1px solid var(--border); + border-radius: var(--radius); + width: 36px; + height: 36px; + align-items: center; + justify-content: center; + cursor: pointer; + color: var(--text-dim); + transition: color 0.15s, border-color 0.15s; + padding: 0; +} +.sidebar-toggle:hover { color: var(--text); border-color: var(--text-faint); } + +.sidebar-overlay { + display: none; + position: fixed; + inset: 0; + background: rgba(0,0,0,0.6); + z-index: 9; +} + +@media (max-width: 768px) { + .sidebar-toggle { display: flex; } + + .sidebar { + transform: translateX(-100%); + transition: transform 0.22s ease; + z-index: 11; + } + .sidebar.open { transform: translateX(0); } + + .sidebar-overlay.open { display: block; } +} diff --git a/containers/novela/templates/_sidebar.html b/containers/novela/templates/_sidebar.html new file mode 100644 index 0000000..786feef --- /dev/null +++ b/containers/novela/templates/_sidebar.html @@ -0,0 +1,250 @@ +<button class="sidebar-toggle" id="sidebar-toggle" onclick="toggleSidebar()" aria-label="Menu"> + <svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2.5"> + <line x1="3" y1="6" x2="21" y2="6"/> + <line x1="3" y1="12" x2="21" y2="12"/> + <line x1="3" y1="18" x2="21" y2="18"/> + </svg> +</button> +<div class="sidebar-overlay" id="sidebar-overlay" onclick="closeSidebar()"></div> + +<aside class="sidebar" id="sidebar"> + <div class="sidebar-logo"> + <a href="/home" style="text-decoration:none;color:inherit"><h1>No<span>vela</span></h1></a> + </div> + + <ul class="sidebar-nav"> + <li> + <a href="/home"{% if active == 'home' %} class="active"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <path d="M3 9l9-7 9 7v11a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2z"/> + <polyline points="9 22 9 12 15 12 15 22"/> + </svg> + Home + </a> + </li> + </ul> + + <hr class="sidebar-divider"/> + + <div class="sidebar-section-label">Library</div> + <ul class="sidebar-nav"{% if active == 'library' %} id="lib-nav"{% endif %}> + <li> + <a href="{% if active == 'library' %}#{% else %}/library{% endif %}" + {% if active == 'library' %}id="nav-all" class="active" onclick="switchView('all'); return false;" + {% elif active == 'book' %}class="active"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <rect x="3" y="3" width="7" height="7"/><rect x="14" y="3" width="7" height="7"/> + <rect x="3" y="14" width="7" height="7"/><rect x="14" y="14" width="7" height="7"/> + </svg> + All books + <span class="sidebar-count" id="count-all"></span> + </a> + </li> + <li> + <a href="{% if active == 'library' %}#{% elif active == 'book' %}/library#wtr{% else %}/library#wtr{% endif %}" + {% if active == 'library' %}id="nav-wtr" onclick="switchView('wtr'); return false;"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <polygon points="12 2 15.09 8.26 22 9.27 17 14.14 18.18 21.02 12 17.77 5.82 21.02 7 14.14 2 9.27 8.91 8.26 12 2"/> + </svg> + Want to Read + <span class="sidebar-count" id="count-wtr"></span> + </a> + </li> + <li> + <a href="{% if active == 'library' %}#{% else %}/library#new{% endif %}" + {% if active == 'library' %}id="nav-new" onclick="switchView('new'); return false;"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <path d="M12 3v18"/><path d="M3 12h18"/> + </svg> + New + <span class="sidebar-count" id="count-new"></span> + </a> + </li> + <li> + <a href="{% if active == 'library' %}#{% else %}/library#series{% endif %}" + {% if active == 'library' %}id="nav-series" onclick="switchView('series'); return false;"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <rect x="2" y="3" width="6" height="18" rx="1"/> + <rect x="9" y="3" width="6" height="18" rx="1"/> + <rect x="16" y="3" width="6" height="18" rx="1"/> + </svg> + Series + <span class="sidebar-count" id="count-series"></span> + </a> + </li> + <li> + <a href="{% if active == 'library' %}#{% else %}/library#authors{% endif %}" + {% if active == 'library' %}id="nav-authors" onclick="switchView('authors'); return false;"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <path d="M17 21v-2a4 4 0 0 0-4-4H5a4 4 0 0 0-4 4v2"/> + <circle cx="9" cy="7" r="4"/> + <path d="M23 21v-2a4 4 0 0 0-3-3.87"/> + <path d="M16 3.13a4 4 0 0 1 0 7.75"/> + </svg> + Authors + <span class="sidebar-count" id="count-authors"></span> + </a> + </li> + <li> + <a href="{% if active == 'library' %}#{% else %}/library#publishers{% endif %}" + {% if active == 'library' %}id="nav-publishers" onclick="switchView('publishers'); return false;"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <path d="M3 21h18"/> + <path d="M5 21V7l7-4 7 4v14"/> + <path d="M9 21v-6h6v6"/> + </svg> + Publishers + <span class="sidebar-count" id="count-publishers"></span> + </a> + </li> + <li> + <a href="{% if active == 'library' %}#{% else %}/library#archived{% endif %}" + {% if active == 'library' %}id="nav-archived" onclick="switchView('archived'); return false;"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <polyline points="21 8 21 21 3 21 3 8"/> + <rect x="1" y="3" width="22" height="5"/> + <line x1="10" y1="12" x2="14" y2="12"/> + </svg> + Archived + <span class="sidebar-count" id="count-archived"></span> + </a> + </li> + <li> + <a href="/stats"{% if active == 'stats' %} class="active"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <line x1="18" y1="20" x2="18" y2="10"/> + <line x1="12" y1="20" x2="12" y2="4"/> + <line x1="6" y1="20" x2="6" y2="14"/> + </svg> + Statistics + </a> + </li> + </ul> + + <hr class="sidebar-divider"/> + + <div class="sidebar-section-label">Tools</div> + <ul class="sidebar-nav"> + <li> + <a href="/convert"{% if active == 'convert' %} class="active"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <polyline points="16 18 22 12 16 6"/><polyline points="8 6 2 12 8 18"/> + </svg> + Convert + </a> + </li> + <li> + <a href="/credentials-manager"{% if active == 'credentials' %} class="active"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <circle cx="12" cy="12" r="3"/><path d="M19.07 4.93a10 10 0 010 14.14M4.93 4.93a10 10 0 000 14.14"/> + </svg> + Credentials + </a> + </li> + <li> + <a href="/debug"{% if active == 'debug' %} class="active"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <circle cx="11" cy="11" r="8"/><path d="M21 21l-4.35-4.35"/> + </svg> + Debug + </a> + </li> + <li> + <a href="/backup"{% if active == 'backup' %} class="active"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <path d="M21 8v11a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V8"/> + <polyline points="1 8 12 2 23 8"/> + <path d="M12 22v-8"/> + </svg> + Backup + </a> + </li> + <li> + <a href="/settings"{% if active == 'settings' %} class="active"{% endif %}> + <svg width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"> + <circle cx="12" cy="12" r="3"/> + <path d="M19.4 15a1.65 1.65 0 0 0 .33 1.82l.06.06a2 2 0 0 1-2.83 2.83l-.06-.06a1.65 1.65 0 0 0-1.82-.33 1.65 1.65 0 0 0-1 1.51V21a2 2 0 0 1-4 0v-.09A1.65 1.65 0 0 0 9 19.4a1.65 1.65 0 0 0-1.82.33l-.06.06a2 2 0 0 1-2.83-2.83l.06-.06A1.65 1.65 0 0 0 4.68 15a1.65 1.65 0 0 0-1.51-1H3a2 2 0 0 1 0-4h.09A1.65 1.65 0 0 0 4.6 9a1.65 1.65 0 0 0-.33-1.82l-.06-.06a2 2 0 0 1 2.83-2.83l.06.06A1.65 1.65 0 0 0 9 4.68a1.65 1.65 0 0 0 1-1.51V3a2 2 0 0 1 4 0v.09a1.65 1.65 0 0 0 1 1.51 1.65 1.65 0 0 0 1.82-.33l.06-.06a2 2 0 0 1 2.83 2.83l-.06.06A1.65 1.65 0 0 0 19.4 9a1.65 1.65 0 0 0 1.51 1H21a2 2 0 0 1 0 4h-.09a1.65 1.65 0 0 0-1.51 1z"/> + </svg> + Settings + </a> + </li> + </ul> + + <div class="sidebar-bottom"> + <button class="btn-rescan" onclick="rescanLibraryGlobal()" id="rescan-btn"> + <svg width="12" height="12" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2.5"> + <polyline points="23 4 23 10 17 10"/> + <path d="M20.49 15a9 9 0 1 1-2.12-9.36L23 10"/> + </svg> + <span id="rescan-label">Rescan library</span> + </button> + </div> +</aside> + +<script> + function toggleSidebar() { + document.getElementById('sidebar').classList.toggle('open'); + document.getElementById('sidebar-overlay').classList.toggle('open'); + } + function closeSidebar() { + document.getElementById('sidebar').classList.remove('open'); + document.getElementById('sidebar-overlay').classList.remove('open'); + } + // Close sidebar on any nav link click (mobile) + document.querySelectorAll('.sidebar-nav a').forEach(a => { + a.addEventListener('click', () => { if (window.innerWidth <= 768) closeSidebar(); }); + }); + + function applyLibraryCounts(books) { + const active = books.filter(b => !b.archived); + const wtrCount = active.filter(b => b.want_to_read).length; + const newCount = active.filter(b => b.needs_review).length; + const seriesCount = new Set(active.filter(b => b.series).map(b => b.series)).size; + const authorCount = new Set(active.map(b => b.author).filter(Boolean)).size; + const publisherCount = new Set(active.map(b => b.publisher).filter(Boolean)).size; + const archivedCount = books.filter(b => b.archived).length; + + const setCount = (id, value) => { + const el = document.getElementById(id); + if (el) el.textContent = value || ''; + }; + + setCount('count-all', active.length); + setCount('count-wtr', wtrCount); + setCount('count-new', newCount); + setCount('count-series', seriesCount); + setCount('count-authors', authorCount); + setCount('count-publishers', publisherCount); + setCount('count-archived', archivedCount); + } + + async function refreshLibraryCounts() { + try { + const resp = await fetch('/library/list'); + if (!resp.ok) return; + const books = await resp.json(); + applyLibraryCounts(books); + } catch (_) { + // silently ignore; sidebar remains usable without counts + } + } + + async function rescanLibraryGlobal() { + const btn = document.getElementById('rescan-btn'); + const label = document.getElementById('rescan-label'); + if (btn) btn.disabled = true; + if (label) label.textContent = 'Scanning…'; + try { + const resp = await fetch('/library/rescan', { method: 'POST' }); + if (!resp.ok) throw new Error('rescan failed'); + await refreshLibraryCounts(); + } catch (_) { + alert('Rescan failed.'); + } finally { + if (btn) btn.disabled = false; + if (label) label.textContent = 'Rescan library'; + } + } + + refreshLibraryCounts(); +</script> diff --git a/containers/novela/templates/backup.html b/containers/novela/templates/backup.html new file mode 100644 index 0000000..a777d52 --- /dev/null +++ b/containers/novela/templates/backup.html @@ -0,0 +1,274 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="UTF-8"/> + <meta name="viewport" content="width=device-width, initial-scale=1.0"/> + <title>Novela - Backup + + + + + + + {% include "_sidebar.html" %} + +
+
Backup
+ +
+
Run
+

+ Gebruik Dry Run om zonder upload te valideren (inclusief `pg_dump`). +

+
+ + + +
+
+
+ +
+
Health
+
+
+ +
+
Latest Status
+
+
+ +
+
History (Last 20)
+
+ + + + + + + + + + + + + +
IDStatusFilesBytesStartedFinishedError
+
+
+
+ + + + diff --git a/containers/novela/templates/book.html b/containers/novela/templates/book.html new file mode 100644 index 0000000..750bd45 --- /dev/null +++ b/containers/novela/templates/book.html @@ -0,0 +1,319 @@ + + + + + + Novela — {{ title or filename }} + + + + + + + +{% include "_sidebar.html" %} + +
+
+ +
+
+ + {% if has_cover %} + {{ title }} + {% endif %} +
+ + +
+ + +
+
{{ title or filename }}
+ {% if author %}{% endif %} + +
+ {% if series %} +
+ Series + {{ series }}{% if series_index %} [{{ series_index }}]{% endif %} +
+ {% endif %} +
+ Publisher + {% if publisher %} + {{ publisher }} + {% else %} + No publisher + {% endif %} +
+ {% if publication_status %} +
+ Status + + {% set st = publication_status | lower %} + + {{ publication_status }} + + +
+ {% endif %} + {% if publish_date %} +
+ Updated + {{ publish_date }} +
+ {% endif %} + {% if genres %} +
+ Genres + {% for g in genres %}{{ g }}{% endfor %} +
+ {% endif %} + {% if subgenres %} +
+ Sub-genres + {% for g in subgenres %}{{ g }}{% endfor %} +
+ {% endif %} + {% if tags %} +
+ Tags + {% for g in tags %}{{ g }}{% endfor %} +
+ {% endif %} + {% if description %} +
+ Description +
{{ description }}
+
+ {% endif %} + {% if source_url %} +
+ Bron + + + {{ source_url }} + + +
+ {% endif %} +
+ + {% if progress > 0 %} +
+
Reading progress
+
+
+
+
{{ progress }}% complete
+
+ {% endif %} + + {% if read_count > 0 %} +
+ Read {{ read_count }}× + {% if last_read %} + · Last read {{ last_read[:10] }} + {% endif %} +
+ {% endif %} + +
+ + + + + + {% if progress > 0 %}Continue reading{% else %}Start reading{% endif %} + + {% if progress > 0 %} + + {% endif %} + + + + + + + Download + + + + + + + + + + Edit EPUB + + + + +
+
+
+
+ +
+
+
+ Edit metadata + +
+
+
+
+
+
+
+
+
+ + +
+
+
+
+
+ +
+
+ +
+ +
+
+
+ +
+
+ +
+ +
+
+
+ +
+
+ +
+ +
+
+ +
+ + + + + + + + + diff --git a/containers/novela/templates/credentials.html b/containers/novela/templates/credentials.html new file mode 100644 index 0000000..9ef2564 --- /dev/null +++ b/containers/novela/templates/credentials.html @@ -0,0 +1,406 @@ + + + + + + Novela — Credentials + + + + + + + +{% include "_sidebar.html" %} + +
+ + +
+
Saved Credentials
+
    +
  • No credentials saved yet.
  • +
+
+ + +
+
Add Credentials
+ + +
Tip: use dropbox with the access token in Password.
+ + +
+
+ + +
+
+ +
+ + +
+
+
+ + + +
+ +
+ + + + diff --git a/containers/novela/templates/debug.html b/containers/novela/templates/debug.html new file mode 100644 index 0000000..97f276c --- /dev/null +++ b/containers/novela/templates/debug.html @@ -0,0 +1,327 @@ + + + + + + Novela — Debug + + + + + + + +{% include "_sidebar.html" %} + +
+ +
+
Inspect URL
+ + + +
+ +
+ +
+ + + + diff --git a/containers/novela/templates/editor.html b/containers/novela/templates/editor.html new file mode 100644 index 0000000..46d794c --- /dev/null +++ b/containers/novela/templates/editor.html @@ -0,0 +1,97 @@ + + + + + + Novela — Edit {{ title or filename }} + + + + + + + +
+ + + + + {{ (title or filename) | truncate(30, True) }} + +
+
+ + + + + + + +
+
+ + +
+ +
+
+ + + + + + + + + diff --git a/containers/novela/templates/grabber.html b/containers/novela/templates/grabber.html new file mode 100644 index 0000000..109612a --- /dev/null +++ b/containers/novela/templates/grabber.html @@ -0,0 +1,562 @@ + + + + + + Novela + + + + + + + +{% include "_sidebar.html" %} + +
+ + +
+
Book URL
+ + +
+ +
+ + +
+
Book info
+ +
+ +
+ + +
+ + cover preview +
+ Click to select a cover image +
+
+
+ + +
+ + +
+
Progress
+
Connecting...
+
+
+
+
    +
    +
    + + +
    +
    Done
    +
    +
    + + +
    +
    + +
    + + + + diff --git a/containers/novela/templates/home.html b/containers/novela/templates/home.html new file mode 100644 index 0000000..fff06a5 --- /dev/null +++ b/containers/novela/templates/home.html @@ -0,0 +1,422 @@ + + + + + + Novela — Home + + + + + + + +{% include "_sidebar.html" %} + +
    + + +
    + + + + + + + + + + + +
    + + + + +
    + + + + diff --git a/containers/novela/templates/index.html b/containers/novela/templates/index.html new file mode 100644 index 0000000..109612a --- /dev/null +++ b/containers/novela/templates/index.html @@ -0,0 +1,562 @@ + + + + + + Novela + + + + + + + +{% include "_sidebar.html" %} + +
    + + +
    +
    Book URL
    + + +
    + +
    + + +
    +
    Book info
    + +
    + +
    + + +
    + + cover preview +
    + Click to select a cover image +
    +
    +
    + + +
    + + +
    +
    Progress
    +
    Connecting...
    +
    +
    +
    +
      +
      +
      + + +
      +
      Done
      +
      +
      + + +
      +
      + +
      + + + + diff --git a/containers/novela/templates/library.html b/containers/novela/templates/library.html new file mode 100644 index 0000000..9995902 --- /dev/null +++ b/containers/novela/templates/library.html @@ -0,0 +1,78 @@ + + + + + + Novela — Library + + + + + + + +{% include "_sidebar.html" %} + + +
      +
      +
      + +
      All books
      +
      +
      + + + + + +
      +
      +
      + +
      Drop EPUB files here
      +
      or click to choose files
      +
      +
      +
      Loading…
      +
      +
      + + +
      +
      +
      Delete book
      +

      Delete ?
      This cannot be undone.

      +
      + + +
      +
      +
      + + +
      +
      +
      Add cover
      +

      +
      + + preview +
      Click to select a cover image
      +
      +
      + + +
      +
      +
      + + + + diff --git a/containers/novela/templates/reader.html b/containers/novela/templates/reader.html new file mode 100644 index 0000000..02e2ee5 --- /dev/null +++ b/containers/novela/templates/reader.html @@ -0,0 +1,455 @@ + + + + + + Novela — {{ title }} + + + + + + + +
      +
      + Loading… +
      + + +
      +
      +
      Reading settings
      +
      +
      + Content width + 65% +
      + +
      +
      + + +
      + + + + + + {{ title | truncate(30, True) }} + +
      +
      + +
      +
      + + +
      +
      +
      + + + +
      +
      + + + + + + + diff --git a/containers/novela/templates/settings.html b/containers/novela/templates/settings.html new file mode 100644 index 0000000..f0933a1 --- /dev/null +++ b/containers/novela/templates/settings.html @@ -0,0 +1,441 @@ + + + + + + Novela — Settings + + + + + + + +{% include "_sidebar.html" %} + +
      +
      Settings
      + + +
      +
      Reading history
      +
      + Reset all recorded reading sessions. This will permanently delete the entire reading history + and reset all counters on library cards and the Statistics page.
      + This action cannot be undone. +
      + + +
      + +
      +
      Break detection
      +
      + Patronen die herkend worden als scèneovergang tijdens het converteren. + Wijzigingen zijn actief bij de eerstvolgende conversie. +
      + +
      +
      Regex patronen
      +
      +
      + + +
      +
      + + +
      +
      + +
      + +
      +
      CSS classes
      +
      +
      + + +
      +
      + +
      + +
      +
      Test
      +
      + + +
      + +
      + + +
      +
      + + +
      +
      +
      Reset reading history
      +

      + This will permanently delete all reading sessions from the database. + Statistics will be cleared and all read counts on library cards will reset to zero.

      + Are you sure you want to continue? +

      +
      + + +
      +
      +
      + + + + diff --git a/containers/novela/templates/stats.html b/containers/novela/templates/stats.html new file mode 100644 index 0000000..96dfed8 --- /dev/null +++ b/containers/novela/templates/stats.html @@ -0,0 +1,320 @@ + + + + + + Novela — Statistics + + + + + + + + +{% include "_sidebar.html" %} + +
      +
      Reading Statistics
      + +
      +
      +
      Total reads
      +
      +
      +
      +
      Books read
      +
      +
      unique titles
      +
      +
      +
      Favourite genre
      +
      +
      +
      +
      +
      Publisher
      +
      +
      +
      +
      + + +
      +
      +
      Reads per month — last 12 months
      +
      +
      +
      + + +
      +
      +
      Day of the week
      +
      +
      +
      +
      Hour of the day
      +
      +
      +
      + + +
      +
      +
      Genre distribution (library)
      +
      +
      +
      +
      Most read books
      +
      +
      +
      + + +
      +
      Reading history — last 50 sessions
      +
      +
      Loading…
      +
      +
      +
      + + + + diff --git a/containers/novela/xhtml.py b/containers/novela/xhtml.py new file mode 100644 index 0000000..0db7fde --- /dev/null +++ b/containers/novela/xhtml.py @@ -0,0 +1,169 @@ +import re +from html import escape as he + +from bs4 import NavigableString, Tag + +BREAK_PATTERNS = [ + re.compile(r"^\s*[\*\-]{3,}\s*$"), # *** of --- + re.compile(r"^\s*[·•◦‣⁃]\s*[·•◦‣⁃]\s*[·•◦‣⁃]\s*$"), # • • • + re.compile(r"^\s*~{2,}\s*$"), # ~~ + re.compile(r"^\s*={3,}\s*$"), # === + re.compile(r"^\s*#{3,}\s*$"), # ### + re.compile(r"^\s*[oO0]{1,3}\s*$"), # oOo + re.compile(r"^\s*[-–—]\s*[oO0]\s*[-–—]\s*$"), # -o- / —O— + re.compile(r"^\s*[<>]+\s*[·•*]\s*[<>]+\s*$"), # <<<<<·>>>>> +] + +BREAK_CSS_CLASSES = [ + "hr", "separator", "section-break", "divider", "break", + "chapterbreak", "scene-break", "scenebreak", +] +# Normalised set (hyphens removed, lowercase) for exact-match checking. +# Substring matching caused false positives: e.g. "ipsPageBreak" contains +# "break" but is a layout class, not a scene-break marker. +_BREAK_CSS_NORM = frozenset(b.replace("-", "") for b in BREAK_CSS_CLASSES) + +# --------------------------------------------------------------------------- +# Runtime-configurable overrides (populated from DB by main.py before scraping) +# --------------------------------------------------------------------------- + +_active_patterns: list | None = None # None → fall back to BREAK_PATTERNS +_active_css_norm: frozenset | None = None # None → fall back to _BREAK_CSS_NORM + + +def configure_break_patterns(regex_strings: list[str], css_classes: list[str]) -> None: + """Override the active break patterns with values loaded from the database. + + Called by main.py before each scrape so user-edited patterns take effect + without requiring a server restart. + """ + global _active_patterns, _active_css_norm + compiled = [] + for p in regex_strings: + try: + compiled.append(re.compile(p)) + except re.error: + pass + _active_patterns = compiled + _active_css_norm = frozenset(c.lower().replace("-", "") for c in css_classes) + + +def _get_patterns() -> list: + return _active_patterns if _active_patterns is not None else BREAK_PATTERNS + + +def _get_css_norm() -> frozenset: + return _active_css_norm if _active_css_norm is not None else _BREAK_CSS_NORM + + +def is_break_element(el, empty_p_is_spacer: bool = False) -> bool: + """Detect scene breaks based on tag, class, or text pattern.""" + patterns = _get_patterns() + css_norm = _get_css_norm() + if isinstance(el, Tag): + if el.name == "hr": + return True + classes = el.get("class", []) + for cls in classes: + if cls.lower().replace("-", "") in css_norm: + return True + # Empty paragraph (whitespace or   only) counts as a break, + # unless the content uses them as spacers between every paragraph. + if el.name == "p" and not empty_p_is_spacer: + child_tags = [c for c in el.children if isinstance(c, Tag)] + if not child_tags and not el.get_text().replace("\xa0", "").strip(): + return True + # Image that represents a break + if el.name == "img": + src = el.get("src", "").lower() + alt = el.get("alt", "").lower() + if any(b in src or b in alt for b in ["break", "divider", "separator", "hr"]): + return True + # Element containing only a single break image + children = [c for c in el.children if not (isinstance(c, NavigableString) and not c.strip())] + if len(children) == 1 and isinstance(children[0], Tag) and children[0].name == "img": + return is_break_element(children[0]) + # Text pattern + text = el.get_text() + for pat in patterns: + if pat.match(text): + return True + elif isinstance(el, NavigableString): + for pat in patterns: + if pat.match(str(el)): + return True + return False + + +def element_to_xhtml(el, break_img_path: str = "../Images/break.png", empty_p_is_spacer: bool = False) -> str: + """Convert a BeautifulSoup element to an XHTML fragment.""" + if is_break_element(el, empty_p_is_spacer): + result = f'
      ' + # HTML parsers (notably html.parser) can nest subsequent siblings inside + # void elements like
      , so a break element may contain actual content + # as children. Process those children so no text is silently discarded. + if isinstance(el, Tag): + trailer = "".join( + element_to_xhtml(c, break_img_path, empty_p_is_spacer) + for c in el.children + ) + if trailer.strip(): + result += "\n" + trailer + return result + + if isinstance(el, NavigableString): + text = str(el) + if text.strip(): + return he(text) + return "" + + if el.name in ("p", "div"): + inner = "".join(element_to_xhtml(c, break_img_path, empty_p_is_spacer) for c in el.children) + inner = inner.strip() + if not inner: + return "" + return f"

      {inner}

      \n" + + if el.name in ("em", "i"): + inner = "".join(element_to_xhtml(c, break_img_path, empty_p_is_spacer) for c in el.children) + return f"{inner}" + + if el.name in ("strong", "b"): + inner = "".join(element_to_xhtml(c, break_img_path, empty_p_is_spacer) for c in el.children) + return f"{inner}" + + if el.name in ("h1", "h2", "h3", "h4"): + inner = "".join(element_to_xhtml(c, break_img_path, empty_p_is_spacer) for c in el.children) + return f"<{el.name}>{inner}\n" + + if el.name == "br": + return "
      " + + if el.name in ("sup", "sub"): + inner = "".join(element_to_xhtml(c, break_img_path, empty_p_is_spacer) for c in el.children) + return inner + + if el.name == "a": + inner = "".join(element_to_xhtml(c, break_img_path, empty_p_is_spacer) for c in el.children) + return inner # strip links, keep text + + if el.name == "img": + src = el.get("src", "") + alt = he(el.get("alt", "")) + if src: + return f'{alt}\n' + return "" + + if el.name == "figure": + parts = [] + for c in el.children: + if isinstance(c, Tag) and c.name == "figcaption": + continue + parts.append(element_to_xhtml(c, break_img_path, empty_p_is_spacer)) + return "".join(parts) + + # Other tags: recurse + parts = [] + for c in el.children: + parts.append(element_to_xhtml(c, break_img_path, empty_p_is_spacer)) + return "".join(parts) diff --git a/docs/BLUEPRINT.md b/docs/BLUEPRINT.md new file mode 100644 index 0000000..5917940 --- /dev/null +++ b/docs/BLUEPRINT.md @@ -0,0 +1,420 @@ +# Novela 2.0 - Blauwdruk + +> Vervangt repository `story-grabber`. Nieuwe repo: **Novela**. +> Stack: FastAPI · Jinja2 · plain JS · PostgreSQL 16 · Docker / Portainer + +--- + +## 1. Doelstelling + +Novela 2.0 is een volledig zelfgehoste media-bibliotheek en e-reader voor epub, pdf en cbr/cbz. +Het vervangt Kavita (library), Calibre (metadata), en Sigil (epub editor) in een web-applicatie. + +Kernprincipe: **de database is de snelle index, het bestand is de bron van waarheid.** +Elke schrijfactie raakt altijd beide: eerst het bestand, dan de database. Lezen gaat altijd via de database. + +--- + +## 2. Wat behouden blijft uit v1 + +| Module | Bestand | Toelichting | +|---|---|---| +| EPUB bouw | `epub.py` | `make_epub`, `make_chapter_xhtml`, `add_cover_to_epub` | +| EPUB lezen/schrijven | `epub.py` | `read_epub_file`, `write_epub_file` | +| XHTML conversie | `xhtml.py` | `element_to_xhtml`, `is_break_element`, `configure_break_patterns` | +| Scrapers | `scrapers/` | base, awesomedude, gayauthors, plugin-patroon blijft | +| SSE job streaming | `main.py` | `JOBS` dict + `/events/{job_id}` `StreamingResponse` | +| Migrations patroon | `migrations.py` | idempotente `CREATE IF NOT EXISTS`, `run_migrations()` bij startup | +| Cover cache | DB tabel | `library_cover_cache`, WebP thumbnails 300x450 | +| Reading progress | DB tabel | CFI voor epub, paginanummer voor pdf/cbr | +| Reading sessions | DB tabel | leesgeschiedenis per boek | +| Break patterns | DB tabel | regex + css_class patronen voor scene-breaks | + +--- + +## 3. Projectstructuur + +```text +novela/ +├── containers/ +│ └── novela/ +│ ├── main.py +│ ├── migrations.py +│ ├── db.py +│ ├── epub.py +│ ├── xhtml.py +│ ├── pdf.py +│ ├── cbr.py +│ ├── routers/ +│ │ ├── __init__.py +│ │ ├── library.py +│ │ ├── reader.py +│ │ ├── editor.py +│ │ ├── grabber.py +│ │ ├── backup.py +│ │ └── settings.py +│ ├── scrapers/ +│ ├── static/ +│ ├── templates/ +│ ├── requirements.txt +│ └── Dockerfile +├── stack/ +│ ├── stack.yml +│ └── novela.env +└── docs/ + ├── BLUEPRINT.md + └── TECHNICAL.md +``` + +--- + +## 4. Bibliotheek op schijf + +`output/` wordt `library/`. + +```text +library/ +├── epub/ +│ └── {Publisher}/ +│ └── {Author}/ +│ ├── Stories/ +│ │ └── {Titel}.epub +│ └── Series/ +│ └── {Serienaam}/ +│ └── {001 - Titel}.epub +├── pdf/ +│ └── {Author}/ +│ └── {Titel}.pdf +├── comics/ +│ └── {Author of Serienaam}/ +│ └── {001 - Titel}.cbr +└── covers/ +``` + +Naamgeving-regels: +- Ongeldige tekens weg: `< > : " / \\ | ? *` en control chars +- Max 80 tekens per map-segment, 140 voor bestandsnaam +- Bij conflict: `Titel (2).epub`, `Titel (3).epub`, enz. + +Hernoemen na metadata-bewerking: +- Bestand verplaatsen op schijf +- DB-verwijzingen updaten: `library`, `book_tags`, `reading_progress`, `reading_sessions`, `library_cover_cache` +- Lege mappen opruimen + +--- + +## 5. Database schema + +### 5.1 `library` + +```sql +CREATE TABLE library ( + id SERIAL PRIMARY KEY, + filename VARCHAR(600) UNIQUE NOT NULL, + media_type VARCHAR(10) NOT NULL DEFAULT 'epub', + title VARCHAR(500), + author VARCHAR(255), + publisher VARCHAR(255), + series VARCHAR(500), + series_index INTEGER DEFAULT 0, + publication_status VARCHAR(100), + has_cover BOOLEAN DEFAULT FALSE, + description TEXT DEFAULT '', + source_url VARCHAR(1000), + publish_date DATE, + archived BOOLEAN DEFAULT FALSE, + want_to_read BOOLEAN DEFAULT FALSE, + needs_review BOOLEAN DEFAULT FALSE, + created_at TIMESTAMP DEFAULT NOW(), + updated_at TIMESTAMP DEFAULT NOW() +); +``` + +### 5.2 `book_tags` + +```sql +CREATE TABLE book_tags ( + id SERIAL PRIMARY KEY, + filename VARCHAR(600) NOT NULL REFERENCES library(filename) ON DELETE CASCADE, + tag VARCHAR(255) NOT NULL, + tag_type VARCHAR(20) NOT NULL, + UNIQUE (filename, tag, tag_type) +); +CREATE INDEX idx_book_tags_filename ON book_tags (filename); +``` + +`tag_type`: +- `genre` +- `subgenre` +- `tag` +- `subject` + +### 5.3 `reading_progress` + +```sql +CREATE TABLE reading_progress ( + id SERIAL PRIMARY KEY, + filename VARCHAR(600) UNIQUE NOT NULL REFERENCES library(filename) ON DELETE CASCADE, + cfi TEXT, + page INTEGER, + progress INTEGER DEFAULT 0, + updated_at TIMESTAMP DEFAULT NOW() +); +``` + +### 5.4 `reading_sessions` + +```sql +CREATE TABLE reading_sessions ( + id SERIAL PRIMARY KEY, + filename VARCHAR(600) NOT NULL REFERENCES library(filename) ON DELETE CASCADE, + read_at TIMESTAMP DEFAULT NOW() +); +CREATE INDEX idx_reading_sessions_filename ON reading_sessions (filename); +``` + +### 5.5 `library_cover_cache` + +```sql +CREATE TABLE library_cover_cache ( + filename VARCHAR(600) PRIMARY KEY REFERENCES library(filename) ON DELETE CASCADE, + mime_type VARCHAR(100) NOT NULL, + thumb_webp BYTEA NOT NULL, + updated_at TIMESTAMP DEFAULT NOW() +); +``` + +### 5.6 `credentials` + +```sql +CREATE TABLE credentials ( + id SERIAL PRIMARY KEY, + site VARCHAR(255) UNIQUE NOT NULL, + username VARCHAR(255) NOT NULL, + password VARCHAR(255) NOT NULL, + updated_at TIMESTAMP DEFAULT NOW() +); +``` + +### 5.7 `break_patterns` + +```sql +CREATE TABLE break_patterns ( + id SERIAL PRIMARY KEY, + pattern_type VARCHAR(20) NOT NULL, + pattern TEXT NOT NULL, + enabled BOOLEAN DEFAULT TRUE, + is_default BOOLEAN DEFAULT FALSE, + created_at TIMESTAMP DEFAULT NOW(), + UNIQUE (pattern_type, pattern) +); +``` + +### 5.8 `backup_log` + +```sql +CREATE TABLE backup_log ( + id SERIAL PRIMARY KEY, + status VARCHAR(20) NOT NULL, + files_count INTEGER, + size_bytes BIGINT, + error_msg TEXT, + started_at TIMESTAMP DEFAULT NOW(), + finished_at TIMESTAMP +); +``` + +--- + +## 6. Schrijfprincipe: bestand en database synchroon + +Volgorde per bewerking: +1. Bewerk bestand op schijf +2. Update database +3. Retourneer succes + +Nooit alleen DB updaten zonder bestand. + +--- + +## 7. Coverstrategie + +Opslaan: +- EPUB cover in bestand (`OEBPS/Images/cover.{ext}`) +- Thumbnail als `300x450` WebP in `library_cover_cache` + +Ontbrekende cover: +- Als geen cover: voeg tag `Cover Missing` toe +- UI upload schrijft cover in EPUB en cache + +Opvragen: +- Primair: `/library/cover-cached/{filename}` +- Fallback: `/library/cover/{filename}` + +PDF en CBR: +- PDF: eerste pagina als thumbnail +- CBR/CBZ: eerste afbeelding als thumbnail + +--- + +## 8. Verwijder-flow + +`DELETE /library/file/{filename}`: +1. Verwijder bestand +2. Prune lege mappen +3. Delete uit `library` (cascade verwijdert gerelateerde tabellen) + +--- + +## 9. Router-overzicht + +### 9.1 `routers/library.py` +- `GET /library` +- `GET /api/library` +- `POST /library/rescan` +- `POST /library/import` +- `DELETE /library/file/{filename}` +- `GET /library/cover/{filename}` +- `GET /library/cover-cached/{filename}` +- `POST /library/cover/{filename}` +- `POST /library/want-to-read/{filename}` +- `POST /library/archive/{filename}` +- `GET /home` +- `GET /api/home` +- `GET /stats` +- `GET /api/stats` + +### 9.2 `routers/reader.py` +- `GET /library/read/{filename}` +- `GET /library/book/{filename}` +- `PATCH /library/book/{filename}` +- `GET /library/epub/{filename}` +- `GET /library/chapters/{filename}` +- `GET /library/chapter/{index}/{filename}` +- `GET /library/chapter-img/{path}` +- `GET /library/pdf/{filename}` +- `GET /library/cbr/{filename}/{page}` +- `GET /library/progress/{filename}` +- `POST /library/progress/{filename}` +- `DELETE /library/progress/{filename}` +- `POST /library/mark-read/{filename}` +- `GET /api/genres` + +### 9.3 `routers/editor.py` +- `GET /library/editor/{filename}` +- `GET /api/edit/chapter/{index}/{filename}` +- `POST /api/edit/chapter/{index}/{filename}` +- `POST /api/edit/chapter/add/{filename}` +- `DELETE /api/edit/chapter/{index}/{filename}` + +### 9.4 `routers/grabber.py` +- `GET /grabber` +- `POST /preload` +- `POST /convert` +- `GET /events/{job_id}` +- `GET /debug` +- `POST /debug/run` +- `GET /credentials` +- `POST /credentials` +- `DELETE /credentials/{site}` + +### 9.5 `routers/backup.py` +- `GET /backup` +- `GET /api/backup/status` +- `POST /api/backup/run` +- `GET /api/backup/history` + +### 9.6 `routers/settings.py` +- `GET /settings` +- `GET /api/break-patterns` +- `POST /api/break-patterns` +- `PATCH /api/break-patterns/{id}` +- `DELETE /api/break-patterns/{id}` +- `DELETE /api/reading-history` + +--- + +## 10. Nieuwe modules + +### 10.1 `db.py` +Gedeelde psycopg2 connection pool (`init_pool`, `get_conn`, `release_conn`). + +### 10.2 `pdf.py` +PyMuPDF rendering (`pdf_render_page`), page count en cover thumb. + +### 10.3 `cbr.py` +RAR/ZIP paginalijst, page extract en cover thumb. + +--- + +## 11. Cover-flow per mediatype + +| Actie | EPUB | PDF | CBR/CBZ | +|---|---|---|---| +| Cover import | Uit OPF/Images | Eerste pagina render | Eerste image uit archief | +| Thumbnail | Pillow -> WebP | PyMuPDF + Pillow -> WebP | Pillow -> WebP | +| Opslag | EPUB + cache | cache | cache | +| Cover vervangen | Ja | Nee | Nee | +| Geen cover | `Cover Missing` tag | `Cover Missing` tag | `Cover Missing` tag | + +--- + +## 12. Database-opzet + +- Start met schone v2 database +- Geen migratiepad vanuit v1 data +- `run_migrations()` op startup +- `CREATE TABLE IF NOT EXISTS` overal idempotent + +--- + +## 13. Docker stack + +Zie [`stack/stack.yml`](../stack/stack.yml). + +Belangrijk: +- App container expose `8099 -> 8000` +- PostgreSQL 16 +- Adminer op `8098` +- `NOVELA_MASTER_KEY` in `stack/novela.env` en doorgifte in `stack/stack.yml` voor encrypted credentials + +--- + +## 14. Requirements + +Zie [`containers/novela/requirements.txt`](../containers/novela/requirements.txt). + +--- + +## 15. Bestanden klaarzetten + +Bron: `/docker/develop/story-grabber/containers/story-grabber`. +Doel: `/docker/develop/novela/containers/novela`. + +Overnemen: +- `epub.py` +- `xhtml.py` +- `scrapers/*` +- `static/*` +- `templates/*` + +Nieuw schrijven: +- `main.py`, `db.py`, `pdf.py`, `cbr.py`, `migrations.py` +- `routers/*` + +--- + +## 16. Bouw-volgorde + +1. `db.py` +2. `migrations.py` +3. `main.py` +4. `routers/library.py` +5. `routers/reader.py` +6. `routers/editor.py` +7. `routers/grabber.py` +8. `routers/settings.py` +9. `pdf.py` + reader uitbreiding +10. `cbr.py` + reader uitbreiding +11. `routers/backup.py` +12. `routers/library.py` uitbreiden voor pdf/cbr import diff --git a/docs/TECHNICAL.md b/docs/TECHNICAL.md new file mode 100644 index 0000000..6e09903 --- /dev/null +++ b/docs/TECHNICAL.md @@ -0,0 +1,100 @@ +# Novela 2.0 - Technical Plan + +## Scope +Dit document beschrijft de technische uitvoering van de blauwdruk in implementeerbare stappen. + +## Architectural Rules +- Bestand is source of truth. +- Database is snelle index. +- Schrijfacties: eerst bestand, dan DB. +- Lezen: primair uit DB, met scan/rescan voor recovery. + +## Data Integrity Rules +- Alle child-tabellen refereren `library(filename)` met `ON DELETE CASCADE`. +- Verwijderen van een boek is een enkel `DELETE FROM library` na file-delete. +- Rename-flow moet `filename` synchroon aanpassen in: + - `library` + - `book_tags` + - `reading_progress` + - `reading_sessions` + - `library_cover_cache` + +## Runtime Lifecycle +- Startup: + 1. `init_pool()` + 2. `run_migrations()` + 3. routers mounten +- Shutdown: + 1. `close_pool()` + +## Module Responsibilities +- `db.py`: pool ownership + connection helpers. +- `migrations.py`: schema + seeds. +- `routers/library.py`: import/scan/delete/cover/home/stats. +- `routers/reader.py`: lezen + progress + metadata patch + epub editor endpoints. +- `routers/editor.py`: uiteindelijke dedicated editor routes (kan initieel delegaten). +- `routers/grabber.py`: scraper orchestration + credentials + SSE. +- `routers/backup.py`: Dropbox sync + pg dump + logging. +- `routers/settings.py`: break patterns + cleaning endpoints. + +## Endpoint Contract Notes +- Alle file routes gebruiken veilige path-resolutie tegen traversal. +- Cover endpoint gedrag: + - cached eerst + - fallback raw extract + - anders 404 +- Progress payload: + - EPUB: `{ cfi, progress }` + - PDF/CBR: `{ page, progress }` + +## Backup Plan +- `POST /api/backup/run`: + - insert `running` in `backup_log` + - sync files naar Dropbox (incremental op mtime+size) + - draai `pg_dump` en upload `.sql` + - update `backup_log` naar `success`/`error` +- OAuth token opslag via `credentials` (`site='dropbox'`) en encrypted-at-rest (Fernet) in de database. +- Beheer via webinterface op `/credentials-manager` (site: `dropbox`, token in password veld). +- Legacy plaintext credentials worden automatisch gemigreerd naar encrypted bij uitlezen. + +## Migration Plan from Current State +1. Behoud v1 stabiele modules (`epub.py`, `xhtml.py`, scrapers, templates/static). +2. Introduceer nieuwe routers zonder bestaande frontend te breken (compat routes waar nodig). +3. Schakel library root om naar `library/`. +4. Activeer PDF/CBR scan en reader paden. +5. Split editor-routes uit reader naar dedicated `editor.py`. +6. Volledige scrape->epub flow migreren naar `grabber.py`. +7. Backup volledig afronden (Dropbox + pg_dump). + +## Test Matrix +- Import: + - EPUB met/zonder cover + - PDF 1+ pagina + - CBR/CBZ met images +- Reader: + - EPUB CFI save/load + - PDF page render + page progress + - CBR page render + page progress +- Metadata edit: + - rename path + - db references geupdate + - old row cleanup +- Delete: + - file weg + - lege dirs gepruned + - cascade records weg +- Break patterns: + - create/update/delete/enable +- Grabber: + - preload/debug + - convert job events +- Backup: + - status/history + - success/error logging + +## Deployment Notes +- Docker image bouwt vanuit `containers/novela`. +- Stack uit `stack/stack.yml` met env uit `stack/novela.env`. +- `NOVELA_MASTER_KEY` is verplicht voor encrypt/decrypt van credentials in de database en moet stabiel blijven na initiele ingebruikname. +- Postgres volume persistent. +- Library mount persistent. diff --git a/stack/novela.env b/stack/novela.env new file mode 100644 index 0000000..e7b6ffb --- /dev/null +++ b/stack/novela.env @@ -0,0 +1,7 @@ +POSTGRES_DB=novela +POSTGRES_USER=novela +POSTGRES_PASSWORD=change-me + +# Required for credential encryption/decryption (Fernet) in DB. +# Keep this stable after first use; changing it breaks decrypt of existing credentials. +NOVELA_MASTER_KEY=change-me-long-random-secret diff --git a/stack/stack.yml b/stack/stack.yml new file mode 100644 index 0000000..42d6e8e --- /dev/null +++ b/stack/stack.yml @@ -0,0 +1,48 @@ +version: "3.8" +services: + novela: + image: gitea.oskamp.info/ivooskamp/novela:dev + container_name: novela + restart: unless-stopped + ports: + - "8099:8000" + environment: + POSTGRES_HOST: postgres + POSTGRES_PORT: 5432 + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + NOVELA_MASTER_KEY: ${NOVELA_MASTER_KEY} + volumes: + - /docker/appdata/novela/library:/app/library + - /docker/appdata/novela/config:/app/config + depends_on: + - postgres + networks: + - novela-net + + postgres: + image: postgres:16 + container_name: novela-db + restart: unless-stopped + environment: + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + volumes: + - /docker/appdata/novela/postgres:/var/lib/postgresql/data + networks: + - novela-net + + adminer: + image: adminer:latest + container_name: novela-adminer + restart: unless-stopped + ports: + - "8098:8080" + networks: + - novela-net + +networks: + novela-net: + driver: bridge