diff --git a/containers/novela/main.py b/containers/novela/main.py
index d32781a..9aba2f4 100644
--- a/containers/novela/main.py
+++ b/containers/novela/main.py
@@ -6,6 +6,7 @@ from fastapi.staticfiles import StaticFiles
from db import close_pool, init_pool
from migrations import run_migrations
+from routers.backup import start_backup_scheduler, stop_backup_scheduler
from routers import (
backup_router,
editor_router,
@@ -20,9 +21,11 @@ from routers import (
async def lifespan(app: FastAPI):
init_pool()
run_migrations()
+ await start_backup_scheduler()
try:
yield
finally:
+ await stop_backup_scheduler()
close_pool()
diff --git a/containers/novela/migrations.py b/containers/novela/migrations.py
index b1b02d1..ef42d19 100644
--- a/containers/novela/migrations.py
+++ b/containers/novela/migrations.py
@@ -123,12 +123,14 @@ def migrate_create_credentials() -> None:
CREATE TABLE IF NOT EXISTS credentials (
id SERIAL PRIMARY KEY,
site VARCHAR(255) UNIQUE NOT NULL,
- username VARCHAR(255) NOT NULL,
- password VARCHAR(255) NOT NULL,
+ username TEXT NOT NULL,
+ password TEXT NOT NULL,
updated_at TIMESTAMP DEFAULT NOW()
)
"""
)
+ _exec("ALTER TABLE credentials ALTER COLUMN username TYPE TEXT")
+ _exec("ALTER TABLE credentials ALTER COLUMN password TYPE TEXT")
def migrate_create_break_patterns() -> None:
@@ -191,6 +193,40 @@ def migrate_create_backup_log() -> None:
)
+def migrate_create_perf_indexes() -> None:
+ # Match library list sorting and common filters.
+ _exec(
+ """
+ CREATE INDEX IF NOT EXISTS idx_library_sort_coalesce
+ ON library (
+ (COALESCE(publisher, '')),
+ (COALESCE(author, '')),
+ (COALESCE(series, '')),
+ series_index,
+ (COALESCE(title, ''))
+ )
+ """
+ )
+ _exec("CREATE INDEX IF NOT EXISTS idx_library_needs_review ON library (needs_review)")
+ _exec("CREATE INDEX IF NOT EXISTS idx_library_archived ON library (archived)")
+
+ # Speeds grouped reads + recent-read lookups.
+ _exec(
+ """
+ CREATE INDEX IF NOT EXISTS idx_reading_sessions_filename_readat
+ ON reading_sessions (filename, read_at DESC)
+ """
+ )
+
+ # Helps ORDER BY filename, tag fetch for tag-map construction.
+ _exec(
+ """
+ CREATE INDEX IF NOT EXISTS idx_book_tags_filename_tag
+ ON book_tags (filename, tag)
+ """
+ )
+
+
def run_migrations() -> None:
migrate_create_library()
migrate_create_book_tags()
@@ -200,4 +236,5 @@ def run_migrations() -> None:
migrate_create_credentials()
migrate_create_break_patterns()
migrate_create_backup_log()
+ migrate_create_perf_indexes()
migrate_seed_break_patterns()
diff --git a/containers/novela/routers/backup.py b/containers/novela/routers/backup.py
index 6f71447..0ce7ee9 100644
--- a/containers/novela/routers/backup.py
+++ b/containers/novela/routers/backup.py
@@ -1,3 +1,5 @@
+import asyncio
+import hashlib
import json
import os
import shutil
@@ -22,14 +24,21 @@ LIBRARY_DIR = Path(os.environ.get("LIBRARY_DIR", "library"))
CONFIG_DIR = Path(os.environ.get("CONFIG_DIR", "config"))
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
MANIFEST_PATH = CONFIG_DIR / "backup_manifest.json"
-DROPBOX_ROOT = (os.environ.get("DROPBOX_BACKUP_ROOT", "/novela") or "/novela").rstrip("/")
+DEFAULT_DROPBOX_ROOT = "/novela"
+DEFAULT_RETENTION_COUNT = 14
+DEFAULT_SCHEDULE_ENABLED = False
+DEFAULT_SCHEDULE_INTERVAL_HOURS = 24
+
+
+BACKUP_TASKS: dict[int, asyncio.Task] = {}
+SCHEDULER_TASK: asyncio.Task | None = None
def _now_iso() -> str:
return datetime.now(timezone.utc).isoformat()
-def _load_manifest() -> dict[str, dict[str, float | int]]:
+def _load_manifest() -> dict[str, dict[str, float | int | str]]:
if not MANIFEST_PATH.exists():
return {}
try:
@@ -41,22 +50,25 @@ def _load_manifest() -> dict[str, dict[str, float | int]]:
return {}
-def _save_manifest(manifest: dict[str, dict[str, float | int]]) -> None:
+def _save_manifest(manifest: dict[str, dict[str, float | int | str]]) -> None:
MANIFEST_PATH.write_text(json.dumps(manifest, indent=2, sort_keys=True), encoding="utf-8")
-def _load_dropbox_token() -> str:
+def _dropbox_credential_details() -> dict:
with get_db_conn() as conn:
with conn:
with conn.cursor() as cur:
- cur.execute("SELECT username, password FROM credentials WHERE site = 'dropbox' LIMIT 1")
+ cur.execute(
+ "SELECT username, password, updated_at FROM credentials WHERE site = 'dropbox' LIMIT 1"
+ )
row = cur.fetchone()
if not row:
- return ""
+ return {"configured": False, "token": "", "updated_at": None}
- username_raw, password_raw = row
+ username_raw, password_raw, updated_at = row
username = decrypt_value(username_raw)
password = decrypt_value(password_raw)
+ token = (password or username or "").strip()
if not is_encrypted_value(username_raw) or not is_encrypted_value(password_raw):
cur.execute(
@@ -64,11 +76,252 @@ def _load_dropbox_token() -> str:
UPDATE credentials
SET username = %s, password = %s, updated_at = NOW()
WHERE site = 'dropbox'
+ RETURNING updated_at
""",
(encrypt_value(username), encrypt_value(password)),
)
+ upd = cur.fetchone()
+ if upd:
+ updated_at = upd[0]
- return (password or username or "").strip()
+ return {
+ "configured": bool(token),
+ "token": token,
+ "updated_at": updated_at.isoformat() if updated_at else None,
+ }
+
+
+def _load_dropbox_token() -> str:
+ return _dropbox_credential_details().get("token", "")
+
+
+def _normalize_dropbox_root(value: str | None) -> str:
+ root = (value or "").strip() or DEFAULT_DROPBOX_ROOT
+ if not root.startswith("/"):
+ root = "/" + root
+ root = "/" + "/".join(part for part in root.split("/") if part)
+ return root or DEFAULT_DROPBOX_ROOT
+
+
+def _dropbox_root_details() -> dict:
+ with get_db_conn() as conn:
+ with conn:
+ with conn.cursor() as cur:
+ cur.execute(
+ "SELECT username, password, updated_at FROM credentials WHERE site = 'dropbox_backup_root' LIMIT 1"
+ )
+ row = cur.fetchone()
+ if not row:
+ env_val = os.environ.get("DROPBOX_BACKUP_ROOT", DEFAULT_DROPBOX_ROOT)
+ return {
+ "root": _normalize_dropbox_root(env_val),
+ "updated_at": None,
+ }
+
+ username_raw, password_raw, updated_at = row
+ username = decrypt_value(username_raw)
+ password = decrypt_value(password_raw)
+ root = _normalize_dropbox_root(password or username or DEFAULT_DROPBOX_ROOT)
+
+ if not is_encrypted_value(username_raw) or not is_encrypted_value(password_raw):
+ cur.execute(
+ """
+ UPDATE credentials
+ SET username = %s, password = %s, updated_at = NOW()
+ WHERE site = 'dropbox_backup_root'
+ RETURNING updated_at
+ """,
+ (encrypt_value(""), encrypt_value(root)),
+ )
+ upd = cur.fetchone()
+ if upd:
+ updated_at = upd[0]
+
+ return {
+ "root": root,
+ "updated_at": updated_at.isoformat() if updated_at else None,
+ }
+
+
+def _load_dropbox_root() -> str:
+ return _dropbox_root_details().get("root", DEFAULT_DROPBOX_ROOT)
+
+
+def _dropbox_retention_details() -> dict:
+ with get_db_conn() as conn:
+ with conn:
+ with conn.cursor() as cur:
+ cur.execute(
+ "SELECT username, password, updated_at FROM credentials WHERE site = 'dropbox_backup_retention' LIMIT 1"
+ )
+ row = cur.fetchone()
+ if not row:
+ return {"retention_count": DEFAULT_RETENTION_COUNT, "updated_at": None}
+
+ username_raw, password_raw, updated_at = row
+ username = decrypt_value(username_raw)
+ password = decrypt_value(password_raw)
+ raw = (password or username or "").strip()
+ try:
+ retention_count = max(1, int(raw))
+ except Exception:
+ retention_count = DEFAULT_RETENTION_COUNT
+
+ if not is_encrypted_value(username_raw) or not is_encrypted_value(password_raw):
+ cur.execute(
+ """
+ UPDATE credentials
+ SET username = %s, password = %s, updated_at = NOW()
+ WHERE site = 'dropbox_backup_retention'
+ RETURNING updated_at
+ """,
+ (encrypt_value(""), encrypt_value(str(retention_count))),
+ )
+ upd = cur.fetchone()
+ if upd:
+ updated_at = upd[0]
+
+ return {
+ "retention_count": retention_count,
+ "updated_at": updated_at.isoformat() if updated_at else None,
+ }
+
+
+def _load_dropbox_retention_count() -> int:
+ return int(_dropbox_retention_details().get("retention_count", DEFAULT_RETENTION_COUNT))
+
+
+def _dropbox_schedule_details() -> dict:
+ with get_db_conn() as conn:
+ with conn:
+ with conn.cursor() as cur:
+ cur.execute(
+ "SELECT username, password, updated_at FROM credentials WHERE site = 'dropbox_backup_schedule' LIMIT 1"
+ )
+ row = cur.fetchone()
+ if not row:
+ return {
+ "enabled": DEFAULT_SCHEDULE_ENABLED,
+ "interval_hours": DEFAULT_SCHEDULE_INTERVAL_HOURS,
+ "updated_at": None,
+ }
+
+ username_raw, password_raw, updated_at = row
+ username = decrypt_value(username_raw)
+ password = decrypt_value(password_raw)
+ raw = (password or username or "").strip().lower()
+
+ enabled = False
+ interval_hours = DEFAULT_SCHEDULE_INTERVAL_HOURS
+ try:
+ obj = json.loads(raw) if raw.startswith("{") else None
+ except Exception:
+ obj = None
+
+ if isinstance(obj, dict):
+ enabled = bool(obj.get("enabled", DEFAULT_SCHEDULE_ENABLED))
+ try:
+ interval_hours = max(1, int(obj.get("interval_hours", DEFAULT_SCHEDULE_INTERVAL_HOURS)))
+ except Exception:
+ interval_hours = DEFAULT_SCHEDULE_INTERVAL_HOURS
+ else:
+ parts = raw.split(":")
+ if len(parts) == 2:
+ enabled = parts[0] in {"1", "true", "yes", "on"}
+ try:
+ interval_hours = max(1, int(parts[1]))
+ except Exception:
+ interval_hours = DEFAULT_SCHEDULE_INTERVAL_HOURS
+
+ norm = json.dumps({"enabled": enabled, "interval_hours": interval_hours}, separators=(",", ":"))
+ if not is_encrypted_value(username_raw) or not is_encrypted_value(password_raw):
+ cur.execute(
+ """
+ UPDATE credentials
+ SET username = %s, password = %s, updated_at = NOW()
+ WHERE site = 'dropbox_backup_schedule'
+ RETURNING updated_at
+ """,
+ (encrypt_value(""), encrypt_value(norm)),
+ )
+ upd = cur.fetchone()
+ if upd:
+ updated_at = upd[0]
+
+ return {
+ "enabled": enabled,
+ "interval_hours": interval_hours,
+ "updated_at": updated_at.isoformat() if updated_at else None,
+ }
+
+
+def _load_backup_schedule() -> tuple[bool, int]:
+ d = _dropbox_schedule_details()
+ return bool(d.get("enabled", DEFAULT_SCHEDULE_ENABLED)), int(d.get("interval_hours", DEFAULT_SCHEDULE_INTERVAL_HOURS))
+
+
+def _save_backup_schedule(enabled: bool, interval_hours: int) -> None:
+ interval = max(1, int(interval_hours))
+ payload = json.dumps({"enabled": bool(enabled), "interval_hours": interval}, separators=(",", ":"))
+ with get_db_conn() as conn:
+ with conn:
+ with conn.cursor() as cur:
+ cur.execute(
+ """
+ INSERT INTO credentials (site, username, password, updated_at)
+ VALUES ('dropbox_backup_schedule', %s, %s, NOW())
+ ON CONFLICT (site) DO UPDATE
+ SET username = EXCLUDED.username,
+ password = EXCLUDED.password,
+ updated_at = NOW()
+ """,
+ (encrypt_value(""), encrypt_value(payload)),
+ )
+
+
+def _dropbox_join(root: str, *parts: str) -> str:
+ clean_root = _normalize_dropbox_root(root)
+ segs = [p.strip("/") for p in parts if p and p.strip("/")]
+ if clean_root == "/":
+ return "/" + "/".join(segs) if segs else "/"
+ if not segs:
+ return clean_root
+ return clean_root + "/" + "/".join(segs)
+
+
+def _save_dropbox_root(root: str) -> None:
+ with get_db_conn() as conn:
+ with conn:
+ with conn.cursor() as cur:
+ cur.execute(
+ """
+ INSERT INTO credentials (site, username, password, updated_at)
+ VALUES ('dropbox_backup_root', %s, %s, NOW())
+ ON CONFLICT (site) DO UPDATE
+ SET username = EXCLUDED.username,
+ password = EXCLUDED.password,
+ updated_at = NOW()
+ """,
+ (encrypt_value(""), encrypt_value(_normalize_dropbox_root(root))),
+ )
+
+
+def _save_dropbox_retention_count(retention_count: int) -> None:
+ val = max(1, int(retention_count))
+ with get_db_conn() as conn:
+ with conn:
+ with conn.cursor() as cur:
+ cur.execute(
+ """
+ INSERT INTO credentials (site, username, password, updated_at)
+ VALUES ('dropbox_backup_retention', %s, %s, NOW())
+ ON CONFLICT (site) DO UPDATE
+ SET username = EXCLUDED.username,
+ password = EXCLUDED.password,
+ updated_at = NOW()
+ """,
+ (encrypt_value(""), encrypt_value(str(val))),
+ )
def _dbx() -> dropbox.Dropbox:
@@ -105,6 +358,48 @@ def _dropbox_upload_bytes(client: dropbox.Dropbox, target_path: str, data: bytes
return len(data)
+def _dropbox_exists(client: dropbox.Dropbox, path: str) -> bool:
+ try:
+ client.files_get_metadata(path)
+ return True
+ except ApiError as e:
+ text = str(e).lower()
+ if "not_found" in text or "path/not_found" in text:
+ return False
+ raise
+
+
+def _dropbox_list_files_recursive(client: dropbox.Dropbox, root: str) -> list[str]:
+ paths: list[str] = []
+ try:
+ res = client.files_list_folder(root, recursive=True)
+ except ApiError as e:
+ text = str(e).lower()
+ if "not_found" in text or "path/not_found" in text:
+ return []
+ raise
+
+ while True:
+ for entry in res.entries:
+ if isinstance(entry, dropbox.files.FileMetadata):
+ paths.append(entry.path_lower or entry.path_display or "")
+ if not res.has_more:
+ break
+ res = client.files_list_folder_continue(res.cursor)
+ return [p for p in paths if p]
+
+
+def _dropbox_delete_paths(client: dropbox.Dropbox, paths: list[str]) -> int:
+ deleted = 0
+ for p in paths:
+ try:
+ client.files_delete_v2(p)
+ deleted += 1
+ except ApiError:
+ pass
+ return deleted
+
+
def _iter_library_files() -> list[Path]:
if not LIBRARY_DIR.exists():
return []
@@ -116,6 +411,23 @@ def _current_file_state(path: Path) -> dict[str, float | int]:
return {"mtime": st.st_mtime, "size": st.st_size}
+def _sha256_file(path: Path) -> str:
+ h = hashlib.sha256()
+ with path.open("rb") as f:
+ for chunk in iter(lambda: f.read(1024 * 1024), b""):
+ h.update(chunk)
+ return h.hexdigest()
+
+
+def _snapshot_name() -> str:
+ stamp = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S")
+ return f"snapshot-{stamp}.json"
+
+
+def _object_path(objects_root: str, sha256: str) -> str:
+ return _dropbox_join(objects_root, sha256[:2], sha256)
+
+
def _pg_dump_cmd(tmp_path: Path) -> list[str]:
return [
"pg_dump",
@@ -153,6 +465,39 @@ def _run_pg_dump() -> tuple[bytes, str]:
tmp_path.unlink(missing_ok=True)
+def _has_running_backup() -> bool:
+ with get_db_conn() as conn:
+ with conn:
+ with conn.cursor() as cur:
+ cur.execute(
+ """
+ SELECT id
+ FROM backup_log
+ WHERE status = 'running' AND finished_at IS NULL
+ ORDER BY started_at DESC
+ """
+ )
+ rows = [int(r[0]) for r in cur.fetchall()]
+ if not rows:
+ return False
+
+ active_ids = set(BACKUP_TASKS.keys())
+ stale_ids = [rid for rid in rows if rid not in active_ids]
+ if stale_ids:
+ cur.execute(
+ """
+ UPDATE backup_log
+ SET status = 'error',
+ error_msg = COALESCE(error_msg, 'Interrupted: service restart or crash'),
+ finished_at = NOW()
+ WHERE id = ANY(%s)
+ """,
+ (stale_ids,),
+ )
+
+ return any(rid in active_ids for rid in rows)
+
+
def _insert_backup_log_running() -> int:
with get_db_conn() as conn:
with conn:
@@ -185,6 +530,62 @@ def _finish_backup_log(log_id: int, *, status: str, files_count: int | None, siz
)
+def _list_snapshot_paths(client: dropbox.Dropbox, snapshots_root: str) -> list[str]:
+ files = _dropbox_list_files_recursive(client, snapshots_root)
+ return sorted([p for p in files if p.endswith(".json")], reverse=True)
+
+
+def _load_snapshot_data(client: dropbox.Dropbox, snapshot_path: str) -> dict:
+ _meta, res = client.files_download(snapshot_path)
+ raw = res.content
+ parsed = json.loads(raw.decode("utf-8", errors="replace"))
+ return parsed if isinstance(parsed, dict) else {}
+
+
+def _enforce_snapshot_retention(
+ client: dropbox.Dropbox,
+ snapshots_root: str,
+ keep_count: int,
+) -> tuple[list[str], list[str]]:
+ all_snapshots = _list_snapshot_paths(client, snapshots_root)
+ keep = max(1, int(keep_count))
+ kept = all_snapshots[:keep]
+ to_delete = all_snapshots[keep:]
+ if to_delete:
+ _dropbox_delete_paths(client, to_delete)
+ return kept, to_delete
+
+
+def _collect_hashes_from_snapshots(client: dropbox.Dropbox, snapshot_paths: list[str]) -> set[str]:
+ used: set[str] = set()
+ for path in snapshot_paths:
+ try:
+ snap = _load_snapshot_data(client, path)
+ except Exception:
+ continue
+ files = snap.get("files", {}) if isinstance(snap, dict) else {}
+ if not isinstance(files, dict):
+ continue
+ for item in files.values():
+ if not isinstance(item, dict):
+ continue
+ sha = str(item.get("sha256") or "").lower()
+ if len(sha) == 64 and all(c in "0123456789abcdef" for c in sha):
+ used.add(sha)
+ return used
+
+
+def _prune_orphan_objects(client: dropbox.Dropbox, objects_root: str, referenced_hashes: set[str]) -> int:
+ object_files = _dropbox_list_files_recursive(client, objects_root)
+ to_delete: list[str] = []
+ for p in object_files:
+ name = Path(p).name.lower()
+ if len(name) == 64 and all(c in "0123456789abcdef" for c in name):
+ if name not in referenced_hashes:
+ to_delete.append(p)
+ return _dropbox_delete_paths(client, to_delete)
+
+
def _run_backup_internal(*, dry_run: bool) -> tuple[int, int]:
client = None if dry_run else _dbx()
manifest = _load_manifest()
@@ -192,30 +593,76 @@ def _run_backup_internal(*, dry_run: bool) -> tuple[int, int]:
uploaded_count = 0
uploaded_size = 0
- new_manifest: dict[str, dict[str, float | int]] = {}
+ new_manifest: dict[str, dict[str, float | int | str]] = {}
+
+ dropbox_root = _load_dropbox_root()
+ retention_count = _load_dropbox_retention_count()
+
+ objects_root = _dropbox_join(dropbox_root, "library_objects")
+ snapshots_root = _dropbox_join(dropbox_root, "library_snapshots")
- library_root = f"{DROPBOX_ROOT}/library"
if client is not None:
- _ensure_dropbox_dir(client, library_root)
+ _ensure_dropbox_dir(client, objects_root)
+ _ensure_dropbox_dir(client, snapshots_root)
+
+ snapshot_files: dict[str, dict[str, float | int | str]] = {}
for path in files:
rel = path.relative_to(LIBRARY_DIR).as_posix()
state = _current_file_state(path)
- new_manifest[rel] = state
+ prev = manifest.get(rel, {}) if isinstance(manifest.get(rel), dict) else {}
- if manifest.get(rel) == state:
- continue
-
- data = path.read_bytes()
- target = f"{library_root}/{rel}"
- if client is not None:
- uploaded_size += _dropbox_upload_bytes(client, target, data)
+ sha256 = ""
+ if (
+ prev
+ and prev.get("mtime") == state["mtime"]
+ and prev.get("size") == state["size"]
+ and isinstance(prev.get("sha256"), str)
+ ):
+ sha256 = str(prev.get("sha256"))
else:
- uploaded_size += len(data)
+ sha256 = _sha256_file(path)
+
+ entry = {"mtime": state["mtime"], "size": state["size"], "sha256": sha256}
+ new_manifest[rel] = entry
+ snapshot_files[rel] = entry
+
+ object_target = _object_path(objects_root, sha256)
+ if client is not None:
+ if not _dropbox_exists(client, object_target):
+ data = path.read_bytes()
+ uploaded_size += _dropbox_upload_bytes(client, object_target, data)
+ uploaded_count += 1
+ else:
+ # Dry run reports potential upload work for changed objects.
+ if not prev or prev.get("sha256") != sha256:
+ uploaded_size += int(state["size"])
+ uploaded_count += 1
+
+ snapshot = {
+ "created_at": _now_iso(),
+ "retention_count": retention_count,
+ "files": snapshot_files,
+ }
+ snapshot_data = json.dumps(snapshot, sort_keys=True, separators=(",", ":")).encode("utf-8")
+ snapshot_name = _snapshot_name()
+ snapshot_target = _dropbox_join(snapshots_root, snapshot_name)
+
+ if client is not None:
+ uploaded_size += _dropbox_upload_bytes(client, snapshot_target, snapshot_data)
+ uploaded_count += 1
+
+ kept_snapshots, _deleted_snapshots = _enforce_snapshot_retention(
+ client, snapshots_root, retention_count
+ )
+ referenced_hashes = _collect_hashes_from_snapshots(client, kept_snapshots)
+ _prune_orphan_objects(client, objects_root, referenced_hashes)
+ else:
+ uploaded_size += len(snapshot_data)
uploaded_count += 1
dump_data, dump_name = _run_pg_dump()
- dump_target = f"{DROPBOX_ROOT}/postgres/{dump_name}"
+ dump_target = _dropbox_join(dropbox_root, "postgres", dump_name)
if client is not None:
uploaded_size += _dropbox_upload_bytes(client, dump_target, dump_data)
else:
@@ -235,6 +682,97 @@ async def backup_page(request: Request):
return templates.TemplateResponse(request, template, {"active": "backup"})
+@router.get("/api/backup/credentials")
+async def backup_dropbox_credentials():
+ details = _dropbox_credential_details()
+ root_details = _dropbox_root_details()
+ retention_details = _dropbox_retention_details()
+ token = details.get("token", "")
+ preview = ""
+ if token:
+ preview = f"{token[:4]}...{token[-4:]}" if len(token) >= 10 else "(configured)"
+ return {
+ "configured": bool(token),
+ "token_preview": preview,
+ "updated_at": details.get("updated_at"),
+ "dropbox_root": root_details.get("root", DEFAULT_DROPBOX_ROOT),
+ "root_updated_at": root_details.get("updated_at"),
+ "retention_count": int(retention_details.get("retention_count", DEFAULT_RETENTION_COUNT)),
+ "retention_updated_at": retention_details.get("updated_at"),
+ "schedule_enabled": _dropbox_schedule_details().get("enabled", DEFAULT_SCHEDULE_ENABLED),
+ "schedule_interval_hours": _dropbox_schedule_details().get("interval_hours", DEFAULT_SCHEDULE_INTERVAL_HOURS),
+ "schedule_updated_at": _dropbox_schedule_details().get("updated_at"),
+ }
+
+
+@router.post("/api/backup/credentials")
+async def backup_dropbox_credentials_save(request: Request):
+ body = {}
+ try:
+ body = await request.json()
+ except Exception:
+ pass
+
+ try:
+ existing_token = _load_dropbox_token()
+ token = (body.get("token") or "").strip() or existing_token
+ if not token:
+ return {"ok": False, "error": "Dropbox token is required."}
+
+ dropbox_root = _normalize_dropbox_root(body.get("dropbox_root") or _load_dropbox_root())
+ raw_retention = body.get("retention_count", _load_dropbox_retention_count())
+ try:
+ retention_count = max(1, int(raw_retention))
+ except Exception:
+ retention_count = DEFAULT_RETENTION_COUNT
+
+ schedule_enabled = bool(body.get("schedule_enabled", _load_backup_schedule()[0]))
+ raw_interval = body.get("schedule_interval_hours", _load_backup_schedule()[1])
+ try:
+ schedule_interval_hours = max(1, int(raw_interval))
+ except Exception:
+ schedule_interval_hours = DEFAULT_SCHEDULE_INTERVAL_HOURS
+
+ with get_db_conn() as conn:
+ with conn:
+ with conn.cursor() as cur:
+ cur.execute(
+ """
+ INSERT INTO credentials (site, username, password, updated_at)
+ VALUES ('dropbox', %s, %s, NOW())
+ ON CONFLICT (site) DO UPDATE
+ SET username = EXCLUDED.username,
+ password = EXCLUDED.password,
+ updated_at = NOW()
+ """,
+ (encrypt_value(""), encrypt_value(token)),
+ )
+
+ _save_dropbox_root(dropbox_root)
+ _save_dropbox_retention_count(retention_count)
+ _save_backup_schedule(schedule_enabled, schedule_interval_hours)
+ return {
+ "ok": True,
+ "dropbox_root": dropbox_root,
+ "retention_count": retention_count,
+ "schedule_enabled": schedule_enabled,
+ "schedule_interval_hours": schedule_interval_hours,
+ }
+ except Exception as e:
+ return {"ok": False, "error": str(e)}
+
+
+@router.delete("/api/backup/credentials")
+async def backup_dropbox_credentials_delete():
+ with get_db_conn() as conn:
+ with conn:
+ with conn.cursor() as cur:
+ cur.execute(
+ "DELETE FROM credentials WHERE site IN ('dropbox', 'dropbox_backup_root', 'dropbox_backup_retention', 'dropbox_backup_schedule')"
+ )
+ return {"ok": True}
+
+
@router.get("/api/backup/health")
async def backup_health():
token_present = bool(_load_dropbox_token())
@@ -249,10 +787,16 @@ async def backup_health():
except Exception as e:
dropbox_error = str(e)
+ dropbox_root = _load_dropbox_root()
+ retention_count = _load_dropbox_retention_count()
+ schedule_enabled, schedule_interval_hours = _load_backup_schedule()
+
return {
"token_present": token_present,
"dropbox_ok": dropbox_ok,
"dropbox_error": dropbox_error,
+ "dropbox_root": dropbox_root,
+ "retention_count": retention_count,
"pg_dump_available": bool(pg_dump_path),
"pg_dump_path": pg_dump_path,
"library_exists": LIBRARY_DIR.exists(),
@@ -313,6 +857,87 @@ async def backup_history():
]
+def _start_backup_task(*, dry_run: bool) -> int:
+ log_id = _insert_backup_log_running()
+ task = asyncio.create_task(_run_backup_job(log_id, dry_run))
+ BACKUP_TASKS[log_id] = task
+ return log_id
+
+
+def _is_scheduled_backup_due(interval_hours: int) -> bool:
+ with get_db_conn() as conn:
+ with conn.cursor() as cur:
+ cur.execute(
+ """
+ SELECT finished_at
+ FROM backup_log
+ WHERE status = 'success' AND finished_at IS NOT NULL
+ ORDER BY finished_at DESC
+ LIMIT 1
+ """
+ )
+ row = cur.fetchone()
+ if not row or not row[0]:
+ return True
+
+ last = row[0]
+ if last.tzinfo is None:
+ last = last.replace(tzinfo=timezone.utc)
+ now = datetime.now(timezone.utc)
+ return (now - last).total_seconds() >= max(1, int(interval_hours)) * 3600
+
+
+async def _scheduler_loop() -> None:
+ while True:
+ try:
+ enabled, interval_hours = _load_backup_schedule()
+ if enabled and not _has_running_backup() and _is_scheduled_backup_due(interval_hours):
+ _start_backup_task(dry_run=False)
+ except Exception:
+ # Keep scheduler alive; errors are visible in backup history when runs fail.
+ pass
+ await asyncio.sleep(60)
+
+
+async def start_backup_scheduler() -> None:
+ global SCHEDULER_TASK
+ if SCHEDULER_TASK is None or SCHEDULER_TASK.done():
+ SCHEDULER_TASK = asyncio.create_task(_scheduler_loop())
+
+
+async def stop_backup_scheduler() -> None:
+ global SCHEDULER_TASK
+ if SCHEDULER_TASK is not None:
+ SCHEDULER_TASK.cancel()
+ try:
+ await SCHEDULER_TASK
+ except asyncio.CancelledError:
+ pass
+ SCHEDULER_TASK = None
+
+
+async def _run_backup_job(log_id: int, dry_run: bool) -> None:
+ try:
+ files_count, size_bytes = await asyncio.to_thread(_run_backup_internal, dry_run=dry_run)
+ _finish_backup_log(
+ log_id,
+ status="success",
+ files_count=files_count,
+ size_bytes=size_bytes,
+ error_msg=None,
+ )
+ except Exception as e:
+ _finish_backup_log(
+ log_id,
+ status="error",
+ files_count=None,
+ size_bytes=None,
+ error_msg=str(e),
+ )
+ finally:
+ BACKUP_TASKS.pop(log_id, None)
+
+
@router.post("/api/backup/run")
async def run_backup(request: Request):
body = {}
@@ -322,38 +947,21 @@ async def run_backup(request: Request):
pass
dry_run = bool(body.get("dry_run", False))
- log_id = _insert_backup_log_running()
- try:
- files_count, size_bytes = _run_backup_internal(dry_run=dry_run)
- _finish_backup_log(
- log_id,
- status="success",
- files_count=files_count,
- size_bytes=size_bytes,
- error_msg=None,
- )
- return {
- "ok": True,
- "backup_id": log_id,
- "status": "success",
- "dry_run": dry_run,
- "files_count": files_count,
- "size_bytes": size_bytes,
- "finished_at": _now_iso(),
- }
- except Exception as e:
- _finish_backup_log(
- log_id,
- status="error",
- files_count=None,
- size_bytes=None,
- error_msg=str(e),
- )
+ if _has_running_backup():
return {
"ok": False,
- "backup_id": log_id,
- "status": "error",
- "dry_run": dry_run,
- "error": str(e),
+ "status": "running",
+ "error": "A backup is already running.",
"finished_at": _now_iso(),
}
+
+ log_id = _start_backup_task(dry_run=dry_run)
+
+ return {
+ "ok": True,
+ "backup_id": log_id,
+ "status": "running",
+ "dry_run": dry_run,
+ "message": "Backup started in background.",
+ "started_at": _now_iso(),
+ }
diff --git a/containers/novela/routers/common.py b/containers/novela/routers/common.py
index f71ace1..493ddb6 100644
--- a/containers/novela/routers/common.py
+++ b/containers/novela/routers/common.py
@@ -346,22 +346,23 @@ def list_library_json() -> list[dict]:
l.series, l.series_index, l.publication_status, l.want_to_read,
l.archived, l.needs_review, l.updated_at,
rp.progress, rp.cfi, rp.page,
- COUNT(rs.id)::int AS read_count,
- MAX(rs.read_at) AS last_read
+ COALESCE(rs.read_count, 0)::int AS read_count,
+ rs.last_read,
+ (cc.filename IS NOT NULL) AS has_cached_cover
FROM library l
LEFT JOIN reading_progress rp ON rp.filename = l.filename
- LEFT JOIN reading_sessions rs ON rs.filename = l.filename
- GROUP BY l.filename, l.media_type, l.title, l.author, l.publisher, l.has_cover,
- l.series, l.series_index, l.publication_status, l.want_to_read,
- l.archived, l.needs_review, l.updated_at, rp.progress, rp.cfi, rp.page
+ LEFT JOIN (
+ SELECT filename, COUNT(*)::int AS read_count, MAX(read_at) AS last_read
+ FROM reading_sessions
+ GROUP BY filename
+ ) rs ON rs.filename = l.filename
+ LEFT JOIN library_cover_cache cc ON cc.filename = l.filename
ORDER BY COALESCE(l.publisher, ''), COALESCE(l.author, ''), COALESCE(l.series, ''), l.series_index, COALESCE(l.title, '')
"""
)
rows = cur.fetchall()
- cur.execute("SELECT filename, tag, tag_type FROM book_tags ORDER BY tag")
+ cur.execute("SELECT filename, tag, tag_type FROM book_tags ORDER BY filename, tag")
tags = cur.fetchall()
- cur.execute("SELECT filename FROM library_cover_cache")
- cached = {r[0] for r in cur.fetchall()}
tag_map: dict[str, list[dict]] = {}
for filename, tag, tag_type in tags:
@@ -377,7 +378,7 @@ def list_library_json() -> list[dict]:
"author": r[3] or "",
"publisher": r[4] or "",
"has_cover": bool(r[5]),
- "has_cached_cover": r[0] in cached,
+ "has_cached_cover": bool(r[18]),
"series": r[6] or "",
"series_index": r[7] or 0,
"publication_status": r[8] or "",
diff --git a/containers/novela/routers/library.py b/containers/novela/routers/library.py
index 2e441e4..0e4ed23 100644
--- a/containers/novela/routers/library.py
+++ b/containers/novela/routers/library.py
@@ -71,13 +71,18 @@ async def library_page(request: Request):
@router.get("/api/library")
-async def api_library():
- _sync_disk_to_db()
+async def api_library(rescan: bool = False, include_file_info: bool = False):
+ # Fast path: avoid expensive full disk scan on every library page load.
+ # Use /library/rescan (or ?rescan=true) when a full sync is needed.
+ if rescan:
+ _sync_disk_to_db()
+
books = list_library_json()
- for b in books:
- p = resolve_library_path(b["filename"])
- if p and p.exists():
- b.update(relative_file_info(p))
+ if include_file_info:
+ for b in books:
+ p = resolve_library_path(b["filename"])
+ if p and p.exists():
+ b.update(relative_file_info(p))
return books
@@ -308,6 +313,46 @@ async def library_archive(filename: str):
return {"ok": True, "archived": val}
+@router.post("/library/new/mark-reviewed")
+async def library_mark_new_reviewed(request: Request):
+ body = await request.json()
+ filenames = body.get("filenames", [])
+ if not isinstance(filenames, list):
+ return {"error": "filenames must be a list"}
+
+ cleaned: list[str] = []
+ seen: set[str] = set()
+ for raw in filenames:
+ if not isinstance(raw, str):
+ continue
+ name = raw.strip()
+ if not name or name in seen:
+ continue
+ full = resolve_library_path(name)
+ if full is None:
+ continue
+ cleaned.append(name)
+ seen.add(name)
+
+ if not cleaned:
+ return {"ok": True, "updated": 0}
+
+ placeholders = ", ".join(["%s"] * len(cleaned))
+ with get_db_conn() as conn:
+ with conn:
+ with conn.cursor() as cur:
+ cur.execute(
+ f"""
+ UPDATE library
+ SET needs_review = FALSE, updated_at = NOW()
+ WHERE filename IN ({placeholders})
+ """,
+ tuple(cleaned),
+ )
+ updated = cur.rowcount or 0
+ return {"ok": True, "updated": updated}
+
+
@router.get("/home", response_class=HTMLResponse)
async def home_page(request: Request):
return templates.TemplateResponse(request, "home.html", {"active": "home"})
@@ -319,30 +364,165 @@ async def api_home():
with conn.cursor() as cur:
cur.execute(
"""
- SELECT l.filename, l.title, l.author, l.media_type,
+ SELECT l.filename, l.title, l.author, l.has_cover,
+ l.series, l.series_index, l.publication_status,
+ l.media_type,
COALESCE(rp.progress, 0) AS progress,
- MAX(rs.read_at) AS last_read
- FROM library l
- LEFT JOIN reading_progress rp ON rp.filename = l.filename
- LEFT JOIN reading_sessions rs ON rs.filename = l.filename
- GROUP BY l.filename, l.title, l.author, l.media_type, rp.progress
- ORDER BY last_read DESC NULLS LAST, l.updated_at DESC
- LIMIT 30
+ rp.cfi
+ FROM reading_progress rp
+ JOIN library l ON l.filename = rp.filename
+ WHERE rp.progress > 0
+ AND l.archived = FALSE
+ ORDER BY rp.updated_at DESC
"""
)
- rows = cur.fetchall()
+ cr_rows = cur.fetchall()
+
+ cur.execute(
+ """
+ SELECT l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type
+ FROM library l
+ LEFT JOIN reading_sessions rs ON rs.filename = l.filename
+ LEFT JOIN reading_progress rp ON rp.filename = l.filename
+ WHERE COALESCE(l.series, '') = ''
+ AND l.filename NOT LIKE '%/Series/%'
+ AND l.archived = FALSE
+ AND rs.id IS NULL
+ AND COALESCE(rp.progress, 0) = 0
+ AND EXISTS (
+ SELECT 1
+ FROM book_tags bt
+ WHERE bt.filename = l.filename
+ AND bt.tag = 'Shorts'
+ AND bt.tag_type IN ('tag', 'subject')
+ )
+ GROUP BY l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type
+ ORDER BY RANDOM()
+ """
+ )
+ shorts_rows = cur.fetchall()
+
+ cur.execute(
+ """
+ SELECT l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type
+ FROM library l
+ LEFT JOIN reading_sessions rs ON rs.filename = l.filename
+ LEFT JOIN reading_progress rp ON rp.filename = l.filename
+ WHERE COALESCE(l.series, '') = ''
+ AND l.filename NOT LIKE '%/Series/%'
+ AND l.archived = FALSE
+ AND rs.id IS NULL
+ AND COALESCE(rp.progress, 0) = 0
+ AND NOT EXISTS (
+ SELECT 1
+ FROM book_tags bt
+ WHERE bt.filename = l.filename
+ AND bt.tag = 'Shorts'
+ AND bt.tag_type IN ('tag', 'subject')
+ )
+ GROUP BY l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type
+ ORDER BY RANDOM()
+ """
+ )
+ novels_rows = cur.fetchall()
+
+ cur.execute(
+ """
+ SELECT l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type,
+ MAX(rs.read_at) AS last_read
+ FROM library l
+ JOIN reading_sessions rs ON rs.filename = l.filename
+ WHERE COALESCE(l.series, '') = ''
+ AND l.filename NOT LIKE '%/Series/%'
+ AND l.archived = FALSE
+ AND EXISTS (
+ SELECT 1
+ FROM book_tags bt
+ WHERE bt.filename = l.filename
+ AND bt.tag = 'Shorts'
+ AND bt.tag_type IN ('tag', 'subject')
+ )
+ GROUP BY l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type
+ ORDER BY MAX(rs.read_at) ASC
+ """
+ )
+ shorts_read_rows = cur.fetchall()
+
+ cur.execute(
+ """
+ SELECT l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type,
+ MAX(rs.read_at) AS last_read
+ FROM library l
+ JOIN reading_sessions rs ON rs.filename = l.filename
+ WHERE COALESCE(l.series, '') = ''
+ AND l.filename NOT LIKE '%/Series/%'
+ AND l.archived = FALSE
+ AND NOT EXISTS (
+ SELECT 1
+ FROM book_tags bt
+ WHERE bt.filename = l.filename
+ AND bt.tag = 'Shorts'
+ AND bt.tag_type IN ('tag', 'subject')
+ )
+ GROUP BY l.filename, l.title, l.author, l.has_cover, l.publication_status, l.media_type
+ ORDER BY MAX(rs.read_at) ASC
+ """
+ )
+ novels_read_rows = cur.fetchall()
+
+ def simple(rows):
+ return [
+ {
+ "filename": r[0],
+ "title": r[1] or "",
+ "author": r[2] or "",
+ "has_cover": bool(r[3]),
+ "publication_status": r[4] or "",
+ "media_type": r[5] or "epub",
+ "progress": 0,
+ "series": "",
+ "series_index": 0,
+ }
+ for r in rows
+ ]
+
+ def simple_read(rows):
+ return [
+ {
+ "filename": r[0],
+ "title": r[1] or "",
+ "author": r[2] or "",
+ "has_cover": bool(r[3]),
+ "publication_status": r[4] or "",
+ "media_type": r[5] or "epub",
+ "last_read": r[6].isoformat() if r[6] else None,
+ "progress": 0,
+ "series": "",
+ "series_index": 0,
+ }
+ for r in rows
+ ]
+
return {
"continue_reading": [
{
"filename": r[0],
"title": r[1] or "",
"author": r[2] or "",
- "media_type": r[3],
- "progress": r[4] or 0,
- "last_read": r[5].isoformat() if r[5] else None,
+ "has_cover": bool(r[3]),
+ "series": r[4] or "",
+ "series_index": r[5] or 0,
+ "publication_status": r[6] or "",
+ "media_type": r[7] or "epub",
+ "progress": r[8] or 0,
+ "progress_cfi": r[9],
}
- for r in rows
- ]
+ for r in cr_rows
+ ],
+ "shorts_unread": simple(shorts_rows),
+ "novels_unread": simple(novels_rows),
+ "shorts_read": simple_read(shorts_read_rows),
+ "novels_read": simple_read(novels_read_rows),
}
@@ -357,10 +537,13 @@ async def api_stats():
with conn.cursor() as cur:
cur.execute("SELECT COUNT(*)::int FROM library")
total_books = cur.fetchone()[0]
+
cur.execute("SELECT COUNT(*)::int FROM reading_sessions")
total_reads = cur.fetchone()[0]
+
cur.execute("SELECT COUNT(DISTINCT filename)::int FROM reading_sessions")
unique_books_read = cur.fetchone()[0]
+
cur.execute(
"""
SELECT media_type, COUNT(*)::int
@@ -370,14 +553,143 @@ async def api_stats():
"""
)
by_type = [{"media_type": r[0], "count": r[1]} for r in cur.fetchall()]
+
+ cur.execute(
+ """
+ WITH months AS (
+ SELECT date_trunc('month', CURRENT_DATE) - (n * interval '1 month') AS month_start
+ FROM generate_series(11, 0, -1) AS n
+ ), counts AS (
+ SELECT date_trunc('month', read_at) AS month_start, COUNT(*)::int AS cnt
+ FROM reading_sessions
+ WHERE read_at >= date_trunc('month', CURRENT_DATE) - interval '11 months'
+ GROUP BY 1
+ )
+ SELECT to_char(m.month_start, 'YYYY-MM') AS month, COALESCE(c.cnt, 0)::int AS count
+ FROM months m
+ LEFT JOIN counts c ON c.month_start = m.month_start
+ ORDER BY m.month_start
+ """
+ )
+ reads_by_month = [{"month": r[0], "count": r[1]} for r in cur.fetchall()]
+
+ cur.execute(
+ """
+ SELECT EXTRACT(DOW FROM read_at)::int AS dow, COUNT(*)::int
+ FROM reading_sessions
+ GROUP BY 1
+ """
+ )
+ reads_by_dow = [0] * 7
+ for dow, count in cur.fetchall():
+ idx = (int(dow) + 6) % 7
+ reads_by_dow[idx] = int(count)
+
+ cur.execute(
+ """
+ SELECT EXTRACT(HOUR FROM read_at)::int AS hour, COUNT(*)::int
+ FROM reading_sessions
+ GROUP BY 1
+ """
+ )
+ reads_by_hour = [0] * 24
+ for hour, count in cur.fetchall():
+ h = int(hour)
+ if 0 <= h <= 23:
+ reads_by_hour[h] = int(count)
+
+ cur.execute(
+ """
+ SELECT bt.tag AS name, COUNT(DISTINCT bt.filename)::int AS count
+ FROM book_tags bt
+ JOIN library l ON l.filename = bt.filename
+ WHERE bt.tag_type IN ('genre', 'subgenre')
+ GROUP BY bt.tag
+ ORDER BY count DESC, name ASC
+ """
+ )
+ genre_counts = [{"name": r[0], "count": r[1]} for r in cur.fetchall()]
+
+ cur.execute(
+ """
+ SELECT publisher AS name, COUNT(*)::int AS count
+ FROM library
+ WHERE COALESCE(TRIM(publisher), '') <> ''
+ GROUP BY publisher
+ ORDER BY count DESC, name ASC
+ """
+ )
+ publisher_counts = [{"name": r[0], "count": r[1]} for r in cur.fetchall()]
+
+ cur.execute(
+ """
+ SELECT
+ COALESCE(NULLIF(TRIM(l.title), ''), l.filename) AS title,
+ COALESCE(l.author, '') AS author,
+ COUNT(*)::int AS count
+ FROM reading_sessions rs
+ JOIN library l ON l.filename = rs.filename
+ GROUP BY l.filename, l.title, l.author
+ ORDER BY count DESC, MAX(rs.read_at) DESC
+ LIMIT 10
+ """
+ )
+ top_books = [{"title": r[0], "author": r[1], "count": r[2]} for r in cur.fetchall()]
+
+ cur.execute(
+ """
+ SELECT
+ COALESCE(NULLIF(TRIM(l.title), ''), l.filename) AS title,
+ COALESCE(l.author, '') AS author,
+ COALESCE(l.publisher, '') AS publisher,
+ rs.read_at,
+ COALESCE(
+ array_remove(
+ array_agg(DISTINCT CASE WHEN bt.tag_type IN ('genre', 'subgenre') THEN bt.tag END),
+ NULL
+ ),
+ ARRAY[]::text[]
+ ) AS genres
+ FROM reading_sessions rs
+ JOIN library l ON l.filename = rs.filename
+ LEFT JOIN book_tags bt ON bt.filename = l.filename
+ GROUP BY rs.id, l.filename, l.title, l.author, l.publisher, rs.read_at
+ ORDER BY rs.read_at DESC
+ LIMIT 50
+ """
+ )
+ history = [
+ {
+ "title": r[0],
+ "author": r[1],
+ "publisher": r[2],
+ "read_at": r[3].isoformat() if r[3] else None,
+ "genres": list(r[4] or []),
+ }
+ for r in cur.fetchall()
+ ]
+
+ fav_genre = genre_counts[0]["name"] if genre_counts else None
+ fav_publisher = publisher_counts[0]["name"] if publisher_counts else None
+
return {
"total_books": total_books,
"total_reads": total_reads,
"unique_books_read": unique_books_read,
"by_media_type": by_type,
+ "reads_by_month": reads_by_month,
+ "reads_by_dow": reads_by_dow,
+ "reads_by_hour": reads_by_hour,
+ "genre_counts": genre_counts,
+ "publisher_counts": publisher_counts,
+ "fav_genre": fav_genre,
+ "fav_publisher": fav_publisher,
+ "top_books": top_books,
+ "history": history,
"generated_at": datetime.now(timezone.utc).isoformat(),
}
+
@router.get("/library/list")
async def library_list_compat():
return await api_library()
diff --git a/containers/novela/static/library.css b/containers/novela/static/library.css
index 7dd7a60..5edce11 100644
--- a/containers/novela/static/library.css
+++ b/containers/novela/static/library.css
@@ -454,3 +454,186 @@ html, body {
border-top: 1px solid var(--border);
padding-top: 0.8rem;
}
+
+/* ── New view controls + list mode ─────────────────────────────────────── */
+
+.new-controls {
+ margin-bottom: 1rem;
+}
+
+.new-controls-bar {
+ display: flex;
+ flex-wrap: wrap;
+ align-items: center;
+ justify-content: space-between;
+ gap: 0.6rem;
+ border: 1px solid var(--border);
+ background: rgba(34, 31, 27, 0.5);
+ border-radius: var(--radius);
+ padding: 0.55rem 0.6rem;
+}
+
+.new-view-toggle {
+ display: inline-flex;
+ gap: 0.3rem;
+}
+
+.new-actions {
+ position: relative;
+ display: flex;
+ flex-wrap: wrap;
+ align-items: center;
+ justify-content: flex-end;
+ gap: 0.45rem;
+}
+
+.btn.btn-view,
+.btn.btn-light,
+.btn.btn-mark-reviewed {
+ border: 1px solid var(--border);
+ background: var(--surface2);
+ color: var(--text-dim);
+}
+
+.btn.btn-view.active {
+ border-color: rgba(200, 120, 58, 0.45);
+ background: rgba(200, 120, 58, 0.16);
+ color: var(--accent2);
+}
+
+.btn.btn-light:hover,
+.btn.btn-view:hover {
+ color: var(--text);
+}
+
+.btn.btn-mark-reviewed {
+ border-color: rgba(107, 170, 107, 0.35);
+ background: rgba(107, 170, 107, 0.14);
+ color: var(--success);
+}
+
+.btn.btn-mark-reviewed:hover {
+ background: rgba(107, 170, 107, 0.24);
+}
+
+.btn.btn-mark-reviewed:disabled {
+ opacity: 0.45;
+ cursor: not-allowed;
+}
+
+.new-selection-count {
+ font-family: var(--mono);
+ font-size: 0.68rem;
+ color: var(--text-dim);
+ padding: 0 0.1rem;
+}
+
+.new-columns-menu {
+ display: none;
+ position: absolute;
+ top: calc(100% + 0.4rem);
+ right: 0;
+ z-index: 20;
+ border: 1px solid var(--border);
+ border-radius: var(--radius);
+ background: var(--surface);
+ min-width: 190px;
+ max-height: 260px;
+ overflow: auto;
+ padding: 0.35rem;
+ box-shadow: 0 8px 24px rgba(0, 0, 0, 0.35);
+}
+
+.new-columns-menu.visible {
+ display: block;
+}
+
+.new-col-item {
+ display: flex;
+ align-items: center;
+ gap: 0.45rem;
+ padding: 0.3rem 0.35rem;
+ border-radius: 4px;
+ font-family: var(--mono);
+ font-size: 0.68rem;
+ color: var(--text-dim);
+}
+
+.new-col-item:hover {
+ background: var(--surface2);
+ color: var(--text);
+}
+
+.new-list-wrap {
+ overflow: auto;
+ border: 1px solid var(--border);
+ border-radius: var(--radius);
+ background: rgba(34, 31, 27, 0.48);
+}
+
+.new-list-table {
+ width: 100%;
+ min-width: 980px;
+ border-collapse: collapse;
+}
+
+.new-list-table thead th {
+ text-align: left;
+ padding: 0.55rem 0.5rem;
+ font-family: var(--mono);
+ font-size: 0.63rem;
+ letter-spacing: 0.06em;
+ text-transform: uppercase;
+ color: var(--accent2);
+ border-bottom: 1px solid var(--border);
+ background: rgba(15, 14, 12, 0.35);
+}
+
+.new-list-table tbody td {
+ padding: 0.52rem 0.5rem;
+ border-bottom: 1px solid rgba(46, 42, 36, 0.55);
+ font-size: 0.74rem;
+ color: var(--text);
+ vertical-align: top;
+}
+
+.new-list-table tbody tr {
+ cursor: pointer;
+}
+
+.new-list-table tbody tr:hover {
+ background: rgba(200, 120, 58, 0.08);
+}
+
+.new-col-select {
+ width: 34px;
+ min-width: 34px;
+ text-align: center;
+}
+
+.new-list-table .col-title {
+ font-weight: 700;
+}
+
+.new-list-table .col-center {
+ text-align: center;
+}
+
+@media (max-width: 900px) {
+ .main {
+ padding: 1.2rem 1rem 2rem;
+ }
+
+ .new-controls-bar {
+ align-items: stretch;
+ }
+
+ .new-actions {
+ justify-content: flex-start;
+ }
+
+ .new-columns-menu {
+ left: 0;
+ right: auto;
+ }
+}
diff --git a/containers/novela/static/library.js b/containers/novela/static/library.js
index aff079a..5ec49a8 100644
--- a/containers/novela/static/library.js
+++ b/containers/novela/static/library.js
@@ -9,6 +9,28 @@ let coverB64 = null;
let importInProgress = false;
const MISSING_PUBLISHER_KEY = '__missing__';
const MISSING_PUBLISHER_LABEL = 'No publisher';
+const IMPORT_EXTENSIONS = ['.epub', '.pdf', '.cbr', '.cbz'];
+const NEW_VIEW_MODE_KEY = 'novela.new.viewMode';
+const NEW_VISIBLE_COLUMNS_KEY = 'novela.new.visibleColumns';
+const NEW_DEFAULT_COLUMNS = ['publisher', 'author', 'series', 'volume', 'title', 'has_cover', 'updated', 'genres', 'subgenres', 'tags', 'status'];
+const NEW_COLUMN_DEFS = [
+ { id: 'publisher', label: 'Publisher' },
+ { id: 'author', label: 'Author' },
+ { id: 'series', label: 'Series' },
+ { id: 'volume', label: 'Volume' },
+ { id: 'title', label: 'Title' },
+ { id: 'has_cover', label: 'Has cover' },
+ { id: 'updated', label: 'Updated' },
+ { id: 'genres', label: 'Genres' },
+ { id: 'subgenres', label: 'Sub-genres' },
+ { id: 'tags', label: 'Tags' },
+ { id: 'status', label: 'Status' },
+];
+
+let newViewMode = loadNewViewMode();
+let newVisibleColumns = loadNewVisibleColumns();
+let newSelectedFilenames = new Set();
+let newLastToggledIndex = null;
// ── Placeholder cover generation ───────────────────────────────────────────
@@ -108,14 +130,19 @@ function updateCounts() {
if (archEl) archEl.textContent = archCount || '';
}
+function _filenameBase(filename) {
+ const leaf = String(filename || '').split('/').pop() || '';
+ return leaf.replace(/\.[^.]+$/, '');
+}
+
function bookAuthor(b) {
if (b.author) return b.author;
- const parts = b.filename.replace(/\.epub$/, '').split('-');
+ const parts = _filenameBase(b.filename).split('-');
return (parts[1] ?? '').replace(/_/g, ' ');
}
function bookTitle(b) {
- return b.title || (b.filename.replace(/\.epub$/, '').split('-')[2] ?? '').replace(/_/g, ' ');
+ return b.title || (_filenameBase(b.filename).split('-')[2] ?? '').replace(/_/g, ' ');
}
function normalizePublisherName(value) {
@@ -189,6 +216,11 @@ function _applyView(view, param) {
view === 'genre' ? `Genre: ${param || ''}` :
view === 'search' ? `Search: "${param || ''}"` : '';
+ if (view !== 'new') {
+ newSelectedFilenames.clear();
+ newLastToggledIndex = null;
+ }
+
const showBack = view === 'series-detail' || view === 'author-detail' || view === 'publisher-detail';
document.getElementById('back-btn').style.display = showBack ? '' : 'none';
@@ -211,6 +243,7 @@ window.addEventListener('popstate', e => {
function renderGrid() {
const active = activeBooks();
+ if (currentView !== 'new') hideNewControls();
if (currentView === 'all') renderBooksGrid(active);
else if (currentView === 'wtr') renderBooksGrid(active.filter(b => b.want_to_read));
else if (currentView === 'series') renderSeriesGrid();
@@ -220,11 +253,326 @@ function renderGrid() {
else if (currentView === 'publishers') renderPublishersView();
else if (currentView === 'publisher-detail') renderPublisherDetail(currentParam);
else if (currentView === 'archived') renderBooksGrid(archivedBooks());
- else if (currentView === 'new') renderBooksGrid(active.filter(b => b.needs_review));
+ else if (currentView === 'new') renderNewBooksView(active.filter(b => b.needs_review));
else if (currentView === 'genre') renderGenreView(currentParam);
else if (currentView === 'search') renderSearchResults(currentParam);
}
+// ── New view (bulk review + list/grid toggle) ─────────────────────────────
+
+function loadNewViewMode() {
+ try {
+ const raw = localStorage.getItem(NEW_VIEW_MODE_KEY);
+ return raw === 'list' ? 'list' : 'grid';
+ } catch {
+ return 'grid';
+ }
+}
+
+function loadNewVisibleColumns() {
+ try {
+ const raw = localStorage.getItem(NEW_VISIBLE_COLUMNS_KEY);
+ if (!raw) return [...NEW_DEFAULT_COLUMNS];
+ const parsed = JSON.parse(raw);
+ if (!Array.isArray(parsed)) return [...NEW_DEFAULT_COLUMNS];
+ const allowed = new Set(NEW_COLUMN_DEFS.map(c => c.id));
+ const saved = new Set(parsed.filter(v => typeof v === 'string' && allowed.has(v)));
+ const normalized = NEW_COLUMN_DEFS.map(c => c.id).filter(id => saved.has(id));
+ if (!normalized.length) return [...NEW_DEFAULT_COLUMNS];
+ return normalized;
+ } catch {
+ return [...NEW_DEFAULT_COLUMNS];
+ }
+}
+
+function persistNewColumns() {
+ try {
+ localStorage.setItem(NEW_VISIBLE_COLUMNS_KEY, JSON.stringify(newVisibleColumns));
+ } catch {
+ // ignore storage failures
+ }
+}
+
+function persistNewViewMode() {
+ try {
+ localStorage.setItem(NEW_VIEW_MODE_KEY, newViewMode);
+ } catch {
+ // ignore storage failures
+ }
+}
+
+function hideNewControls() {
+ const controls = document.getElementById('new-controls');
+ if (!controls) return;
+ controls.style.display = 'none';
+ controls.innerHTML = '';
+}
+
+function setNewViewMode(mode) {
+ if (mode !== 'grid' && mode !== 'list') return;
+ newViewMode = mode;
+ if (mode === 'grid') {
+ newSelectedFilenames.clear();
+ newLastToggledIndex = null;
+ }
+ persistNewViewMode();
+ renderGrid();
+}
+
+function toggleNewColumnsMenu(ev) {
+ ev?.stopPropagation();
+ const menu = document.getElementById('new-columns-menu');
+ if (!menu) return;
+ menu.classList.toggle('visible');
+}
+
+function toggleNewColumn(columnId) {
+ const set = new Set(newVisibleColumns);
+ if (set.has(columnId)) set.delete(columnId);
+ else set.add(columnId);
+
+ const ordered = NEW_COLUMN_DEFS.map(c => c.id).filter(id => set.has(id));
+ newVisibleColumns = ordered.length ? ordered : ['title'];
+ persistNewColumns();
+ renderGrid();
+}
+
+function toggleSelectAllNewRows(checked, books) {
+ if (checked) {
+ books.forEach(b => newSelectedFilenames.add(b.filename));
+ newLastToggledIndex = books.length ? books.length - 1 : null;
+ } else {
+ books.forEach(b => newSelectedFilenames.delete(b.filename));
+ newLastToggledIndex = null;
+ }
+ renderNewControls(books);
+ if (newViewMode === 'list') {
+ const rowChecks = document.querySelectorAll('.new-row-select');
+ rowChecks.forEach(cb => { cb.checked = checked; });
+ }
+}
+
+function toggleNewRowWithShift(filename, checked, shiftPressed) {
+ const books = activeBooks().filter(b => b.needs_review);
+ const filenames = books.map(b => b.filename);
+ const idx = filenames.indexOf(filename);
+ if (idx === -1) return;
+
+ const doRange = !!(shiftPressed && newLastToggledIndex !== null);
+ if (doRange) {
+ const start = Math.min(newLastToggledIndex, idx);
+ const end = Math.max(newLastToggledIndex, idx);
+ for (let i = start; i <= end; i++) {
+ const name = filenames[i];
+ if (checked) newSelectedFilenames.add(name);
+ else newSelectedFilenames.delete(name);
+ }
+ } else {
+ if (checked) newSelectedFilenames.add(filename);
+ else newSelectedFilenames.delete(filename);
+ }
+
+ newLastToggledIndex = idx;
+ renderNewControls(books);
+ renderNewBooksList(books);
+}
+
+function handleNewRowCheckboxClick(filename, checkboxEl, ev) {
+ ev?.stopPropagation();
+ const shiftPressed = !!(ev && ev.shiftKey);
+ toggleNewRowWithShift(filename, !!checkboxEl?.checked, shiftPressed);
+}
+
+function clearNewSelection(books) {
+ books.forEach(b => newSelectedFilenames.delete(b.filename));
+ newLastToggledIndex = null;
+ renderGrid();
+}
+
+async function markSelectedNewAsReviewed(books) {
+ const selected = books.filter(b => newSelectedFilenames.has(b.filename)).map(b => b.filename);
+ if (!selected.length) return;
+
+ const btn = document.getElementById('btn-mark-reviewed');
+ if (btn) btn.disabled = true;
+
+ try {
+ const resp = await fetch('/library/new/mark-reviewed', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ filenames: selected }),
+ });
+ const result = await resp.json();
+ if (!resp.ok || result.error) {
+ alert(result.error || 'Could not mark books as reviewed.');
+ return;
+ }
+
+ const selectedSet = new Set(selected);
+ allBooks.forEach(b => {
+ if (selectedSet.has(b.filename)) b.needs_review = false;
+ });
+ selected.forEach(f => newSelectedFilenames.delete(f));
+ updateCounts();
+ renderGrid();
+ } catch {
+ alert('Could not mark books as reviewed.');
+ } finally {
+ if (btn) btn.disabled = false;
+ }
+}
+
+function tagValuesByType(book, type) {
+ return (book.tags || [])
+ .filter(t => t && t.tag_type === type && t.tag)
+ .map(t => t.tag);
+}
+
+function bookGenres(book) {
+ const explicit = tagValuesByType(book, 'genre');
+ if (explicit.length) return explicit;
+ return (book.tags || [])
+ .filter(t => t && t.tag_type === 'subject' && t.tag)
+ .map(t => t.tag);
+}
+
+function bookSubgenres(book) {
+ return tagValuesByType(book, 'subgenre');
+}
+
+function bookPlainTags(book) {
+ return tagValuesByType(book, 'tag');
+}
+
+function formatUpdated(iso) {
+ if (!iso) return '';
+ const d = new Date(iso);
+ if (Number.isNaN(d.getTime())) return '';
+ const y = d.getFullYear();
+ const m = String(d.getMonth() + 1).padStart(2, '0');
+ const day = String(d.getDate()).padStart(2, '0');
+ return `${y}-${m}-${day}`;
+}
+
+function newCellText(book, colId) {
+ if (colId === 'publisher') return publisherDisplayName(bookPublisherKey(book));
+ if (colId === 'author') return bookAuthor(book);
+ if (colId === 'series') return book.series || '';
+ if (colId === 'title') return bookTitle(book);
+ if (colId === 'has_cover') return book.has_cover ? 'Yes' : 'No';
+ if (colId === 'updated') return formatUpdated(book.updated_at);
+ if (colId === 'genres') return bookGenres(book).join(', ');
+ if (colId === 'subgenres') return bookSubgenres(book).join(', ');
+ if (colId === 'tags') return bookPlainTags(book).join(', ');
+ if (colId === 'volume') return book.series_index > 0 ? String(book.series_index) : '';
+ if (colId === 'status') return book.publication_status || '';
+ return '';
+}
+
+function renderNewControls(books) {
+ const controls = document.getElementById('new-controls');
+ if (!controls) return;
+ if (currentView !== 'new') {
+ hideNewControls();
+ return;
+ }
+
+ const validFilenames = new Set(books.map(b => b.filename));
+ newSelectedFilenames.forEach(filename => {
+ if (!validFilenames.has(filename)) newSelectedFilenames.delete(filename);
+ });
+
+ const listMode = newViewMode === 'list';
+ const selectedCount = listMode
+ ? books.filter(b => newSelectedFilenames.has(b.filename)).length
+ : 0;
+ const allSelected = listMode && !!books.length && selectedCount === books.length;
+
+ controls.style.display = '';
+ controls.innerHTML = `
+
+
+
+
+
+
+ ${listMode ? `
+
+
+ ${NEW_COLUMN_DEFS.map(col => `
+
+ `).join('')}
+
+
${selectedCount} selected
+
+
+
+ ` : `
+
Switch to List to select multiple books
+ `}
+
+
`;
+}
+
+function renderNewBooksList(books) {
+ const container = document.getElementById('grid-container');
+ if (!books.length) {
+ container.innerHTML = 'No newly imported books waiting for metadata review.
';
+ return;
+ }
+
+ const cols = NEW_COLUMN_DEFS.filter(c => newVisibleColumns.includes(c.id));
+ const selectedCount = books.filter(b => newSelectedFilenames.has(b.filename)).length;
+ const allSelected = selectedCount === books.length;
+
+ container.innerHTML = `
+ `;
+
+ container.querySelectorAll('.new-list-row').forEach(row => {
+ row.addEventListener('click', () => {
+ const filename = row.getAttribute('data-filename') || '';
+ if (!filename) return;
+ location.href = `/library/book/${encodeURIComponent(filename)}`;
+ });
+ });
+}
+
+function renderNewBooksView(books) {
+ renderNewControls(books);
+ if (newViewMode === 'list') {
+ renderNewBooksList(books);
+ return;
+ }
+ renderBooksGrid(books);
+}
+
// ── Book grid (All / WTR / Author detail) ─────────────────────────────────
function renderBooksGrid(books) {
@@ -237,7 +585,7 @@ function renderBooksGrid(books) {
currentView === 'new' ? 'No newly imported books waiting for metadata review.' :
currentView === 'genre' ? `No books tagged "${esc(currentParam || '')}".` :
currentView === 'search' ? `No results for "${esc(currentParam || '')}".` :
- 'No EPUBs yet. Convert a story first.'
+ 'No books yet. Import EPUB, PDF or CBR/CBZ to get started.'
}`;
return;
}
@@ -855,7 +1203,9 @@ function openImportPicker() {
function onImportFilesSelected(fileList) {
if (!fileList || !fileList.length) return;
- uploadImportedFiles(Array.from(fileList));
+ const files = Array.from(fileList).filter(f => IMPORT_EXTENSIONS.some(ext => f.name.toLowerCase().endsWith(ext)));
+ if (!files.length) return;
+ uploadImportedFiles(files);
const input = document.getElementById('import-file-input');
if (input) input.value = '';
}
@@ -868,7 +1218,7 @@ async function uploadImportedFiles(files) {
importInProgress = true;
zone?.classList.add('uploading');
- if (title) title.textContent = 'Importing EPUBs…';
+ if (title) title.textContent = 'Importing files…';
if (sub) sub.textContent = `${files.length} file(s) selected`;
const form = new FormData();
@@ -883,8 +1233,8 @@ async function uploadImportedFiles(files) {
const importedCount = (data.imported || []).length;
const skippedCount = (data.skipped || []).length;
if (title) title.textContent = importedCount
- ? `Imported ${importedCount} EPUB(s)`
- : 'No EPUBs imported';
+ ? `Imported ${importedCount} file(s)`
+ : 'No files imported';
if (sub) sub.textContent = skippedCount
? `${skippedCount} skipped`
: 'Ready for next import';
@@ -896,7 +1246,7 @@ async function uploadImportedFiles(files) {
importInProgress = false;
zone?.classList.remove('uploading');
setTimeout(() => {
- if (title) title.textContent = 'Drop EPUB files here';
+ if (title) title.textContent = 'Drop EPUB, PDF or CBR/CBZ files here';
if (sub) sub.textContent = 'or click to choose files';
}, 1200);
}
@@ -954,12 +1304,20 @@ if (importZone) {
});
importZone.addEventListener('drop', e => {
if (importInProgress) return;
- const files = Array.from(e.dataTransfer?.files || []).filter(f => f.name.toLowerCase().endsWith('.epub'));
+ const files = Array.from(e.dataTransfer?.files || []).filter(f => IMPORT_EXTENSIONS.some(ext => f.name.toLowerCase().endsWith(ext)));
if (!files.length) return;
uploadImportedFiles(files);
});
}
+document.addEventListener('click', e => {
+ const menu = document.getElementById('new-columns-menu');
+ if (!menu) return;
+ const toggleBtn = e.target && e.target.closest ? e.target.closest('.new-actions .btn-light') : null;
+ if (menu.contains(e.target) || toggleBtn) return;
+ menu.classList.remove('visible');
+});
+
loadLibrary().then(() => {
const hash = window.location.hash.slice(1);
let view = 'all', param = null;
diff --git a/containers/novela/templates/backup.html b/containers/novela/templates/backup.html
index a777d52..403ae08 100644
--- a/containers/novela/templates/backup.html
+++ b/containers/novela/templates/backup.html
@@ -90,6 +90,25 @@
.btn.primary { border-color: rgba(200,120,58,0.45); background: rgba(200,120,58,0.12); }
.btn:disabled { opacity: 0.5; cursor: not-allowed; }
+ .field-label {
+ display: block;
+ font-family: var(--mono);
+ font-size: 0.72rem;
+ color: var(--text-dim);
+ margin-bottom: 0.4rem;
+ }
+ .field-input {
+ width: 100%;
+ border: 1px solid var(--border);
+ background: var(--surface2);
+ color: var(--text);
+ border-radius: 6px;
+ padding: 0.55rem 0.7rem;
+ font-family: var(--mono);
+ font-size: 0.78rem;
+ margin-bottom: 0.7rem;
+ }
+
.status-line { margin-top: 0.7rem; font-family: var(--mono); font-size: 0.74rem; }
.ok { color: var(--ok); }
.warn { color: var(--warn); }
@@ -114,6 +133,29 @@
Backup
+
+
Run
@@ -182,6 +224,9 @@
rowHtml('Dropbox token', d.token_present ? 'present' : 'missing'),
rowHtml('Dropbox auth', fmtStatus(d.dropbox_ok)),
rowHtml('Dropbox error', d.dropbox_error || '-'),
+ rowHtml('Dropbox root', d.dropbox_root || '/novela'),
+ rowHtml('Snapshots keep', d.retention_count ?? 14),
+ rowHtml('Schedule', d.schedule_enabled ? `enabled (${d.schedule_interval_hours || 24}h)` : 'disabled'),
rowHtml('pg_dump', d.pg_dump_available ? (d.pg_dump_path || 'available') : 'missing'),
rowHtml('Library exists', fmtStatus(d.library_exists)),
rowHtml('Library path', d.library_path || '-'),
@@ -230,6 +275,100 @@
`).join('');
}
+ async function loadDropboxSettings() {
+ const out = document.getElementById('dropbox-status');
+ const tokenEl = document.getElementById('dropbox-token');
+ const rootEl = document.getElementById('dropbox-root');
+ const retentionEl = document.getElementById('retention-count');
+ const scheduleEnabledEl = document.getElementById('schedule-enabled');
+ const scheduleHoursEl = document.getElementById('schedule-hours');
+ out.className = 'status-line';
+ out.textContent = 'Loading Dropbox settings...';
+ try {
+ const r = await fetch('/api/backup/credentials');
+ const d = await r.json();
+ tokenEl.value = '';
+ rootEl.value = d.dropbox_root || '/novela';
+ retentionEl.value = d.retention_count ?? 14;
+ scheduleEnabledEl.value = String(!!d.schedule_enabled);
+ scheduleHoursEl.value = d.schedule_interval_hours ?? 24;
+ if (d.configured) {
+ out.className = 'status-line ok';
+ out.textContent = `Configured (${d.token_preview || 'token set'})${d.updated_at ? ` • updated ${d.updated_at}` : ''}`;
+ } else {
+ out.className = 'status-line warn';
+ out.textContent = 'No Dropbox token configured.';
+ }
+ } catch (e) {
+ out.className = 'status-line err';
+ out.textContent = `Failed to load settings: ${e}`;
+ }
+ }
+
+ async function saveDropboxToken() {
+ const out = document.getElementById('dropbox-status');
+ const token = (document.getElementById('dropbox-token').value || '').trim();
+ const dropboxRoot = (document.getElementById('dropbox-root').value || '').trim();
+ const retentionCount = Math.max(1, parseInt((document.getElementById('retention-count').value || '14').trim(), 10) || 14);
+ const scheduleEnabled = document.getElementById('schedule-enabled').value === 'true';
+ const scheduleIntervalHours = Math.max(1, parseInt((document.getElementById('schedule-hours').value || '24').trim(), 10) || 24);
+ out.className = 'status-line warn';
+ out.textContent = 'Saving backup settings...';
+ try {
+ const r = await fetch('/api/backup/credentials', {
+ method: 'POST',
+ headers: {'Content-Type': 'application/json'},
+ body: JSON.stringify({
+ token,
+ dropbox_root: dropboxRoot,
+ retention_count: retentionCount,
+ schedule_enabled: scheduleEnabled,
+ schedule_interval_hours: scheduleIntervalHours
+ }),
+ });
+ const raw = await r.text();
+ let d;
+ try {
+ d = JSON.parse(raw);
+ } catch (_) {
+ throw new Error(`HTTP ${r.status}: ${raw.slice(0, 180) || 'non-JSON response'}`);
+ }
+ if (!d.ok) throw new Error(d.error || 'save failed');
+ out.className = 'status-line ok';
+ out.textContent = `Backup settings saved. Root: ${d.dropbox_root || dropboxRoot || '/novela'} • keep: ${d.retention_count || retentionCount} • schedule: ${(d.schedule_enabled ? 'on' : 'off')} (${d.schedule_interval_hours || scheduleIntervalHours}h)`;
+ await Promise.all([loadDropboxSettings(), loadHealth()]);
+ } catch (e) {
+ out.className = 'status-line err';
+ out.textContent = `Save failed: ${e}`;
+ }
+ }
+
+ async function clearDropboxToken() {
+ if (!confirm('Remove Dropbox token for backup?')) return;
+ const out = document.getElementById('dropbox-status');
+ out.className = 'status-line warn';
+ out.textContent = 'Removing token...';
+ try {
+ await fetch('/api/backup/credentials', {method: 'DELETE'});
+ out.className = 'status-line ok';
+ out.textContent = 'Dropbox token removed.';
+ document.getElementById('dropbox-token').value = '';
+ document.getElementById('dropbox-root').value = '/novela';
+ document.getElementById('retention-count').value = 14;
+ document.getElementById('schedule-enabled').value = 'false';
+ document.getElementById('schedule-hours').value = 24;
+ await Promise.all([loadDropboxSettings(), loadHealth()]);
+ } catch (e) {
+ out.className = 'status-line err';
+ out.textContent = `Remove failed: ${e}`;
+ }
+ }
+
+ function toggleDropboxToken() {
+ const el = document.getElementById('dropbox-token');
+ el.type = el.type === 'password' ? 'text' : 'password';
+ }
+
async function runBackup(dryRun) {
const btnDry = document.getElementById('btn-dry');
const btnLive = document.getElementById('btn-live');
@@ -249,7 +388,11 @@
const d = await r.json();
if (d.ok) {
out.className = 'status-line ok';
- out.textContent = `Backup ${d.status}. id=${d.backup_id}, files=${d.files_count}, bytes=${d.size_bytes}, dry_run=${d.dry_run}`;
+ if (d.status === 'running') {
+ out.textContent = `Backup started in background. id=${d.backup_id}, dry_run=${d.dry_run}`;
+ } else {
+ out.textContent = `Backup ${d.status}. id=${d.backup_id}, files=${d.files_count}, bytes=${d.size_bytes}, dry_run=${d.dry_run}`;
+ }
} else {
out.className = 'status-line err';
out.textContent = `Backup failed: ${d.error || 'unknown error'}`;
@@ -265,7 +408,7 @@
}
async function refreshAll() {
- await Promise.all([loadHealth(), loadStatus(), loadHistory()]);
+ await Promise.all([loadDropboxSettings(), loadHealth(), loadStatus(), loadHistory()]);
}
refreshAll();
diff --git a/containers/novela/templates/home.html b/containers/novela/templates/home.html
index fff06a5..1490d2e 100644
--- a/containers/novela/templates/home.html
+++ b/containers/novela/templates/home.html
@@ -21,7 +21,71 @@
.main { margin-left: var(--sidebar); min-height: 100vh; padding: 2rem 2.5rem 4rem; }
- /* ── Section header ──────────────────────────────────────────────── */
+ .main-header {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ margin-bottom: 1.75rem;
+ }
+ .main-title {
+ font-family: var(--mono);
+ font-size: 0.7rem;
+ letter-spacing: 0.12em;
+ text-transform: uppercase;
+ color: var(--accent);
+ }
+
+ .search-wrap { position: relative; display: flex; align-items: center; }
+ .search-icon { position: absolute; left: 0.5rem; color: var(--text-faint); pointer-events: none; }
+ .search-input {
+ background: var(--surface); border: 1px solid var(--border);
+ border-radius: var(--radius); color: var(--text);
+ font-family: var(--mono); font-size: 0.78rem;
+ padding: 0.4rem 1.8rem 0.4rem 2rem;
+ outline: none; width: 220px;
+ transition: border-color 0.15s, width 0.2s;
+ }
+ .search-input:focus { border-color: var(--accent); width: 280px; }
+ .search-input::placeholder { color: var(--text-faint); }
+ .search-clear {
+ position: absolute; right: 0.4rem;
+ background: none; border: none; color: var(--text-faint);
+ cursor: pointer; font-size: 1rem; line-height: 1; padding: 0 0.1rem;
+ }
+ .search-clear:hover { color: var(--text-dim); }
+
+ .import-dropzone {
+ border: 1px dashed var(--border);
+ background: rgba(34, 31, 27, 0.45);
+ border-radius: var(--radius);
+ padding: 0.9rem 1rem;
+ margin-bottom: 1.1rem;
+ cursor: pointer;
+ transition: border-color 0.15s, background 0.15s;
+ }
+ .import-dropzone:hover { border-color: var(--accent); }
+ .import-dropzone.dragover {
+ border-color: var(--accent2);
+ background: rgba(200, 120, 58, 0.12);
+ }
+ .import-dropzone.uploading {
+ opacity: 0.8;
+ cursor: progress;
+ }
+ .import-title {
+ font-family: var(--mono);
+ font-size: 0.72rem;
+ text-transform: uppercase;
+ letter-spacing: 0.08em;
+ color: var(--accent2);
+ }
+ .import-sub {
+ margin-top: 0.25rem;
+ font-family: var(--mono);
+ font-size: 0.68rem;
+ color: var(--text-dim);
+ }
+
.section-block { margin-bottom: 2.5rem; }
.section-header {
display: flex; align-items: baseline; justify-content: space-between;
@@ -40,7 +104,6 @@
}
.section-more:hover { color: var(--accent); }
- /* ── Horizontal scroll row ───────────────────────────────────────── */
.h-row { display: flex; gap: 1rem; overflow-x: auto; padding-bottom: 0.75rem; }
.h-row::-webkit-scrollbar { height: 4px; }
.h-row::-webkit-scrollbar-thumb { background: var(--border); border-radius: 4px; }
@@ -71,7 +134,6 @@
.h-progress-fill { height: 100%; background: var(--accent); border-radius: 2px; }
.h-pct { font-family: var(--mono); font-size: 0.6rem; color: var(--text-dim); }
- /* ── Full grid ───────────────────────────────────────────────────── */
.grid-header {
display: flex; align-items: center; gap: 0.75rem; margin-bottom: 1.75rem;
}
@@ -119,10 +181,26 @@
font-size: 0.82rem; padding: 4rem 2rem;
}
- /* ── Responsive ────────────────────────────────────────────── */
@media (max-width: 768px) {
- .main { margin-left: 0; padding: 4rem 1rem 4rem; }
- .cover-grid { grid-template-columns: repeat(auto-fill, minmax(130px, 1fr)); gap: 1rem; }
+ .main {
+ margin-left: 0;
+ padding: 4rem 1rem 4rem;
+ }
+
+ .main-header {
+ flex-wrap: wrap;
+ gap: 0.75rem;
+ margin-bottom: 1.25rem;
+ }
+
+ .cover-grid {
+ grid-template-columns: repeat(auto-fill, minmax(130px, 1fr));
+ gap: 1rem;
+ }
+
+ .search-input { width: 100%; }
+ .search-input:focus { width: 100%; }
+ .search-wrap { flex: 1; min-width: 0; }
}
@@ -131,8 +209,23 @@
{% include "_sidebar.html" %}
+
+
+
+
+
Drop EPUB, PDF or CBR/CBZ files here
+
or click to choose files
+
-
-
Nothing here yet — convert some books to get started.
+
Nothing here yet - import some books to get started.
-
-