Improve SQLite batching and diagnostics visibility

This commit is contained in:
2026-03-03 15:03:23 +13:00
parent e582ff4ef7
commit dda17a20a5
10 changed files with 667 additions and 188 deletions

View File

@@ -1 +1 @@
0303261413
0303261502

View File

@@ -1,2 +1,2 @@
BUILD_NUMBER = "0303261413"
CHANGELOG = '2026-03-03|Hotfix: expand landing-page search to all requests\n2026-03-02|Hotfix: add logged-out password reset flow\n2026-03-02|Process 1 build 0203261953\n2026-03-02|Process 1 build 0203261610\n2026-03-02|Process 1 build 0203261608\n2026-03-02|Add dedicated profile invites page and fix mobile admin layout\n2026-03-01|Persist Seerr media failure suppression and reduce sync error noise\n2026-03-01|Add repository line ending policy\n2026-03-01|Finalize diagnostics, logging controls, and email test support\n2026-03-01|Add invite email templates and delivery workflow\n2026-02-28|Finalize dev-1.3 upgrades and Seerr updates\n2026-02-27|admin docs and layout refresh, build 2702261314\n2026-02-27|Build 2702261153: fix jellyfin sync user visibility\n2026-02-26|Build 2602262241: live request page updates\n2026-02-26|Build 2602262204\n2026-02-26|Build 2602262159: restore jellyfin-first user source\n2026-02-26|Build 2602262049: split magent settings and harden local login\n2026-02-26|Build 2602262030: add magent settings and hardening\n2026-02-26|Build 2602261731: fix user resync after nuclear wipe\n2026-02-26|Build 2602261717: master invite policy and self-service invite controls\n2026-02-26|Build 2602261636: self-service invites and count fixes\n2026-02-26|Build 2602261605: invite trace and cross-system user lifecycle\n2026-02-26|Build 2602261536: refine invite layouts and tighten UI\n2026-02-26|Build 2602261523: live updates, invite cleanup and nuclear resync\n2026-02-26|Build 2602261442: tidy users and invite layouts\n2026-02-26|Build 2602261409: unify invite management controls\n2026-02-26|Build 2602260214: invites profiles and expiry admin controls\n2026-02-26|Build 2602260022: enterprise UI refresh and users bulk auto-search\n2026-02-25|Build 2502262321: fix auto-search quality and per-user toggle\n2026-02-02|Build 0202261541: allow FQDN service URLs\n2026-01-30|Build 3001262148: single container\n2026-01-29|Build 2901262244: format changelog\n2026-01-29|Build 2901262240: cache users\n2026-01-29|Tidy full changelog\n2026-01-29|Update full changelog\n2026-01-29|Bake build number and changelog\n2026-01-29|Hardcode build number in backend\n2026-01-29|release: 2901262102\n2026-01-29|release: 2901262044\n2026-01-29|release: 2901262036\n2026-01-27|Hydrate missing artwork from Jellyseerr (build 271261539)\n2026-01-27|Fallback to TMDB when artwork cache fails (build 271261524)\n2026-01-27|Add service test buttons (build 271261335)\n2026-01-27|Bump build number (process 2) 271261322\n2026-01-27|Add cache load spinner (build 271261238)\n2026-01-27|Fix snapshot title fallback (build 271261228)\n2026-01-27|Fix request titles in snapshots (build 271261219)\n2026-01-27|Bump build number to 271261202\n2026-01-27|Clarify request sync settings (build 271261159)\n2026-01-27|Fix backend cache stats import (build 271261149)\n2026-01-27|Improve cache stats performance (build 271261145)\n2026-01-27|Add cache control artwork stats\n2026-01-26|Fix sync progress bar animation\n2026-01-26|Fix cache title hydration\n2026-01-25|Build 2501262041\n2026-01-25|Harden request cache titles and cache-only reads\n2026-01-25|Serve bundled branding assets by default\n2026-01-25|Seed branding logo from bundled assets\n2026-01-25|Tidy request sync controls\n2026-01-25|Add Jellyfin login cache and admin-only stats\n2026-01-25|Add user stats and activity tracking\n2026-01-25|Move account actions into avatar menu\n2026-01-25|Improve mobile header layout\n2026-01-25|Automate build number tagging and sync\n2026-01-25|Add site banner, build number, and changelog\n2026-01-24|Improve request handling and qBittorrent categories\n2026-01-24|Map Prowlarr releases to Arr indexers for manual grab\n2026-01-24|Clarify how-it-works steps and fixes\n2026-01-24|Document fix buttons in how-it-works\n2026-01-24|Route grabs through Sonarr/Radarr only\n2026-01-23|Use backend branding assets for logo and favicon\n2026-01-23|Copy public assets into frontend image\n2026-01-23|Fix backend Dockerfile paths for root context\n2026-01-23|Add Docker Hub compose override\n2026-01-23|Remove password fields from users page\n2026-01-23|Use bundled branding assets\n2026-01-23|Add default branding assets when missing\n2026-01-23|Show available status on landing when in Jellyfin\n2026-01-23|Fix cache titles and move feedback link\n2026-01-23|Add feedback form and webhook\n2026-01-23|Hide header actions when signed out\n2026-01-23|Fallback manual grab to qBittorrent\n2026-01-23|Split search actions and improve download options\n2026-01-23|Fix cache titles via Jellyseerr media lookup\n2026-01-22|Update README with Docker-first guide\n2026-01-22|Update README\n2026-01-22|Ignore build artifacts\n2026-01-22|Initial commit'
BUILD_NUMBER = "0303261502"
CHANGELOG = '2026-03-03|Add login page visibility controls\n2026-03-03|Hotfix: expand landing-page search to all requests\n2026-03-02|Hotfix: add logged-out password reset flow\n2026-03-02|Process 1 build 0203261953\n2026-03-02|Process 1 build 0203261610\n2026-03-02|Process 1 build 0203261608\n2026-03-02|Add dedicated profile invites page and fix mobile admin layout\n2026-03-01|Persist Seerr media failure suppression and reduce sync error noise\n2026-03-01|Add repository line ending policy\n2026-03-01|Finalize diagnostics, logging controls, and email test support\n2026-03-01|Add invite email templates and delivery workflow\n2026-02-28|Finalize dev-1.3 upgrades and Seerr updates\n2026-02-27|admin docs and layout refresh, build 2702261314\n2026-02-27|Build 2702261153: fix jellyfin sync user visibility\n2026-02-26|Build 2602262241: live request page updates\n2026-02-26|Build 2602262204\n2026-02-26|Build 2602262159: restore jellyfin-first user source\n2026-02-26|Build 2602262049: split magent settings and harden local login\n2026-02-26|Build 2602262030: add magent settings and hardening\n2026-02-26|Build 2602261731: fix user resync after nuclear wipe\n2026-02-26|Build 2602261717: master invite policy and self-service invite controls\n2026-02-26|Build 2602261636: self-service invites and count fixes\n2026-02-26|Build 2602261605: invite trace and cross-system user lifecycle\n2026-02-26|Build 2602261536: refine invite layouts and tighten UI\n2026-02-26|Build 2602261523: live updates, invite cleanup and nuclear resync\n2026-02-26|Build 2602261442: tidy users and invite layouts\n2026-02-26|Build 2602261409: unify invite management controls\n2026-02-26|Build 2602260214: invites profiles and expiry admin controls\n2026-02-26|Build 2602260022: enterprise UI refresh and users bulk auto-search\n2026-02-25|Build 2502262321: fix auto-search quality and per-user toggle\n2026-02-02|Build 0202261541: allow FQDN service URLs\n2026-01-30|Build 3001262148: single container\n2026-01-29|Build 2901262244: format changelog\n2026-01-29|Build 2901262240: cache users\n2026-01-29|Tidy full changelog\n2026-01-29|Update full changelog\n2026-01-29|Bake build number and changelog\n2026-01-29|Hardcode build number in backend\n2026-01-29|release: 2901262102\n2026-01-29|release: 2901262044\n2026-01-29|release: 2901262036\n2026-01-27|Hydrate missing artwork from Jellyseerr (build 271261539)\n2026-01-27|Fallback to TMDB when artwork cache fails (build 271261524)\n2026-01-27|Add service test buttons (build 271261335)\n2026-01-27|Bump build number (process 2) 271261322\n2026-01-27|Add cache load spinner (build 271261238)\n2026-01-27|Fix snapshot title fallback (build 271261228)\n2026-01-27|Fix request titles in snapshots (build 271261219)\n2026-01-27|Bump build number to 271261202\n2026-01-27|Clarify request sync settings (build 271261159)\n2026-01-27|Fix backend cache stats import (build 271261149)\n2026-01-27|Improve cache stats performance (build 271261145)\n2026-01-27|Add cache control artwork stats\n2026-01-26|Fix sync progress bar animation\n2026-01-26|Fix cache title hydration\n2026-01-25|Build 2501262041\n2026-01-25|Harden request cache titles and cache-only reads\n2026-01-25|Serve bundled branding assets by default\n2026-01-25|Seed branding logo from bundled assets\n2026-01-25|Tidy request sync controls\n2026-01-25|Add Jellyfin login cache and admin-only stats\n2026-01-25|Add user stats and activity tracking\n2026-01-25|Move account actions into avatar menu\n2026-01-25|Improve mobile header layout\n2026-01-25|Automate build number tagging and sync\n2026-01-25|Add site banner, build number, and changelog\n2026-01-24|Improve request handling and qBittorrent categories\n2026-01-24|Map Prowlarr releases to Arr indexers for manual grab\n2026-01-24|Clarify how-it-works steps and fixes\n2026-01-24|Document fix buttons in how-it-works\n2026-01-24|Route grabs through Sonarr/Radarr only\n2026-01-23|Use backend branding assets for logo and favicon\n2026-01-23|Copy public assets into frontend image\n2026-01-23|Fix backend Dockerfile paths for root context\n2026-01-23|Add Docker Hub compose override\n2026-01-23|Remove password fields from users page\n2026-01-23|Use bundled branding assets\n2026-01-23|Add default branding assets when missing\n2026-01-23|Show available status on landing when in Jellyfin\n2026-01-23|Fix cache titles and move feedback link\n2026-01-23|Add feedback form and webhook\n2026-01-23|Hide header actions when signed out\n2026-01-23|Fallback manual grab to qBittorrent\n2026-01-23|Split search actions and improve download options\n2026-01-23|Fix cache titles via Jellyseerr media lookup\n2026-01-22|Update README with Docker-first guide\n2026-01-22|Update README\n2026-01-22|Ignore build artifacts\n2026-01-22|Initial commit'

View File

@@ -4,6 +4,7 @@ import sqlite3
import logging
from hashlib import sha256
from datetime import datetime, timezone, timedelta
from time import perf_counter
from typing import Any, Dict, Optional
from .config import settings
@@ -16,6 +17,9 @@ SEERR_MEDIA_FAILURE_SHORT_SUPPRESS_HOURS = 6
SEERR_MEDIA_FAILURE_RETRY_SUPPRESS_HOURS = 24
SEERR_MEDIA_FAILURE_PERSISTENT_SUPPRESS_DAYS = 30
SEERR_MEDIA_FAILURE_PERSISTENT_THRESHOLD = 3
SQLITE_BUSY_TIMEOUT_MS = 5_000
SQLITE_CACHE_SIZE_KIB = 32_768
SQLITE_MMAP_SIZE_BYTES = 256 * 1024 * 1024
def _db_path() -> str:
@@ -26,8 +30,30 @@ def _db_path() -> str:
return path
def _apply_connection_pragmas(conn: sqlite3.Connection) -> None:
pragmas = (
("journal_mode", "WAL"),
("synchronous", "NORMAL"),
("temp_store", "MEMORY"),
("cache_size", -SQLITE_CACHE_SIZE_KIB),
("mmap_size", SQLITE_MMAP_SIZE_BYTES),
("busy_timeout", SQLITE_BUSY_TIMEOUT_MS),
)
for pragma, value in pragmas:
try:
conn.execute(f"PRAGMA {pragma} = {value}")
except sqlite3.DatabaseError:
logger.debug("sqlite pragma skipped: %s=%s", pragma, value, exc_info=True)
def _connect() -> sqlite3.Connection:
return sqlite3.connect(_db_path())
conn = sqlite3.connect(
_db_path(),
timeout=SQLITE_BUSY_TIMEOUT_MS / 1000,
cached_statements=512,
)
_apply_connection_pragmas(conn)
return conn
def _parse_datetime_value(value: Optional[str]) -> Optional[datetime]:
@@ -321,6 +347,30 @@ def init_db() -> None:
ON requests_cache (requested_by_norm)
"""
)
conn.execute(
"""
CREATE INDEX IF NOT EXISTS idx_requests_cache_updated_at
ON requests_cache (updated_at DESC, request_id DESC)
"""
)
conn.execute(
"""
CREATE INDEX IF NOT EXISTS idx_requests_cache_requested_by_id_created_at
ON requests_cache (requested_by_id, created_at DESC, request_id DESC)
"""
)
conn.execute(
"""
CREATE INDEX IF NOT EXISTS idx_requests_cache_requested_by_norm_created_at
ON requests_cache (requested_by_norm, created_at DESC, request_id DESC)
"""
)
conn.execute(
"""
CREATE INDEX IF NOT EXISTS idx_requests_cache_status_created_at
ON requests_cache (status, created_at DESC, request_id DESC)
"""
)
conn.execute(
"""
CREATE INDEX IF NOT EXISTS idx_artwork_cache_status_updated_at
@@ -441,6 +491,15 @@ def init_db() -> None:
)
except sqlite3.OperationalError:
pass
try:
conn.execute(
"""
CREATE INDEX IF NOT EXISTS idx_users_username_nocase
ON users (username COLLATE NOCASE)
"""
)
except sqlite3.OperationalError:
pass
try:
conn.execute("ALTER TABLE requests_cache ADD COLUMN requested_by_id INTEGER")
except sqlite3.OperationalError:
@@ -454,6 +513,10 @@ def init_db() -> None:
)
except sqlite3.OperationalError:
pass
try:
conn.execute("PRAGMA optimize")
except sqlite3.OperationalError:
pass
_backfill_auth_providers()
ensure_admin_user()
@@ -1619,41 +1682,44 @@ def get_user_request_stats(username_norm: str, requested_by_id: Optional[int] =
"last_request_at": None,
}
with _connect() as conn:
total_row = conn.execute(
row = conn.execute(
"""
SELECT COUNT(*)
SELECT
COUNT(*) AS total,
SUM(CASE WHEN status = 4 THEN 1 ELSE 0 END) AS ready,
SUM(CASE WHEN status = 1 THEN 1 ELSE 0 END) AS pending,
SUM(CASE WHEN status = 2 THEN 1 ELSE 0 END) AS approved,
SUM(CASE WHEN status = 5 THEN 1 ELSE 0 END) AS working,
SUM(CASE WHEN status = 6 THEN 1 ELSE 0 END) AS partial,
SUM(CASE WHEN status = 3 THEN 1 ELSE 0 END) AS declined,
MAX(created_at) AS last_request_at
FROM requests_cache
WHERE requested_by_id = ?
""",
(requested_by_id,),
).fetchone()
status_rows = conn.execute(
"""
SELECT status, COUNT(*)
FROM requests_cache
WHERE requested_by_id = ?
GROUP BY status
""",
(requested_by_id,),
).fetchall()
last_row = conn.execute(
"""
SELECT MAX(created_at)
FROM requests_cache
WHERE requested_by_id = ?
""",
(requested_by_id,),
).fetchone()
counts = {int(row[0]): int(row[1]) for row in status_rows if row[0] is not None}
pending = counts.get(1, 0)
approved = counts.get(2, 0)
declined = counts.get(3, 0)
ready = counts.get(4, 0)
working = counts.get(5, 0)
partial = counts.get(6, 0)
if not row:
return {
"total": 0,
"ready": 0,
"pending": 0,
"approved": 0,
"working": 0,
"partial": 0,
"declined": 0,
"in_progress": 0,
"last_request_at": None,
}
total = int(row[0] or 0)
ready = int(row[1] or 0)
pending = int(row[2] or 0)
approved = int(row[3] or 0)
working = int(row[4] or 0)
partial = int(row[5] or 0)
declined = int(row[6] or 0)
in_progress = approved + working + partial
return {
"total": int(total_row[0] or 0) if total_row else 0,
"total": total,
"ready": ready,
"pending": pending,
"approved": approved,
@@ -1661,7 +1727,7 @@ def get_user_request_stats(username_norm: str, requested_by_id: Optional[int] =
"partial": partial,
"declined": declined,
"in_progress": in_progress,
"last_request_at": last_row[0] if last_row else None,
"last_request_at": row[7],
}
@@ -1688,50 +1754,7 @@ def get_global_request_total() -> int:
return int(row[0] or 0)
def upsert_request_cache(
request_id: int,
media_id: Optional[int],
media_type: Optional[str],
status: Optional[int],
title: Optional[str],
year: Optional[int],
requested_by: Optional[str],
requested_by_norm: Optional[str],
requested_by_id: Optional[int],
created_at: Optional[str],
updated_at: Optional[str],
payload_json: str,
) -> None:
normalized_title = _normalize_title_value(title)
normalized_year = _normalize_year_value(year)
derived_title = None
derived_year = None
if not normalized_title or normalized_year is None:
derived_title, derived_year = _extract_title_year_from_payload(payload_json)
if _is_placeholder_title(normalized_title, request_id):
normalized_title = None
if derived_title and not normalized_title:
normalized_title = derived_title
if normalized_year is None and derived_year is not None:
normalized_year = derived_year
with _connect() as conn:
existing_title = None
existing_year = None
if normalized_title is None or normalized_year is None:
row = conn.execute(
"SELECT title, year FROM requests_cache WHERE request_id = ?",
(request_id,),
).fetchone()
if row:
existing_title, existing_year = row[0], row[1]
if _is_placeholder_title(existing_title, request_id):
existing_title = None
if normalized_title is None and existing_title:
normalized_title = existing_title
if normalized_year is None and existing_year is not None:
normalized_year = existing_year
conn.execute(
"""
_REQUESTS_CACHE_UPSERT_SQL = """
INSERT INTO requests_cache (
request_id,
media_id,
@@ -1759,7 +1782,97 @@ def upsert_request_cache(
created_at = excluded.created_at,
updated_at = excluded.updated_at,
payload_json = excluded.payload_json
""",
"""
def get_request_cache_lookup(request_ids: list[int]) -> Dict[int, Dict[str, Any]]:
normalized_ids = sorted({int(request_id) for request_id in request_ids if isinstance(request_id, int)})
if not normalized_ids:
return {}
placeholders = ", ".join("?" for _ in normalized_ids)
query = f"""
SELECT request_id, updated_at, title, year
FROM requests_cache
WHERE request_id IN ({placeholders})
"""
with _connect() as conn:
rows = conn.execute(query, tuple(normalized_ids)).fetchall()
return {
int(row[0]): {
"request_id": int(row[0]),
"updated_at": row[1],
"title": row[2],
"year": row[3],
}
for row in rows
}
def _prepare_requests_cache_upsert_rows(
records: list[Dict[str, Any]], conn: sqlite3.Connection
) -> list[tuple[Any, ...]]:
if not records:
return []
existing_rows: Dict[int, tuple[Optional[str], Optional[int]]] = {}
ids_needing_existing = [
int(record["request_id"])
for record in records
if isinstance(record.get("request_id"), int)
and (
not _normalize_title_value(record.get("title"))
or _normalize_year_value(record.get("year")) is None
)
]
if ids_needing_existing:
placeholders = ", ".join("?" for _ in sorted(set(ids_needing_existing)))
query = f"""
SELECT request_id, title, year
FROM requests_cache
WHERE request_id IN ({placeholders})
"""
for row in conn.execute(query, tuple(sorted(set(ids_needing_existing)))).fetchall():
existing_rows[int(row[0])] = (row[1], row[2])
prepared: list[tuple[Any, ...]] = []
for record in records:
request_id = int(record["request_id"])
media_id = record.get("media_id")
media_type = record.get("media_type")
status = record.get("status")
requested_by = record.get("requested_by")
requested_by_norm = record.get("requested_by_norm")
requested_by_id = record.get("requested_by_id")
created_at = record.get("created_at")
updated_at = record.get("updated_at")
payload_json = str(record.get("payload_json") or "")
normalized_title = _normalize_title_value(record.get("title"))
normalized_year = _normalize_year_value(record.get("year"))
derived_title = None
derived_year = None
if not normalized_title or normalized_year is None:
derived_title, derived_year = _extract_title_year_from_payload(payload_json)
if _is_placeholder_title(normalized_title, request_id):
normalized_title = None
if derived_title and not normalized_title:
normalized_title = derived_title
if normalized_year is None and derived_year is not None:
normalized_year = derived_year
existing_title = None
existing_year = None
if normalized_title is None or normalized_year is None:
existing = existing_rows.get(request_id)
if existing:
existing_title, existing_year = existing
if _is_placeholder_title(existing_title, request_id):
existing_title = None
if normalized_title is None and existing_title:
normalized_title = existing_title
if normalized_year is None and existing_year is not None:
normalized_year = existing_year
prepared.append(
(
request_id,
media_id,
@@ -1773,8 +1886,47 @@ def upsert_request_cache(
created_at,
updated_at,
payload_json,
),
)
)
return prepared
def upsert_request_cache(
request_id: int,
media_id: Optional[int],
media_type: Optional[str],
status: Optional[int],
title: Optional[str],
year: Optional[int],
requested_by: Optional[str],
requested_by_norm: Optional[str],
requested_by_id: Optional[int],
created_at: Optional[str],
updated_at: Optional[str],
payload_json: str,
) -> None:
with _connect() as conn:
rows = _prepare_requests_cache_upsert_rows(
[
{
"request_id": request_id,
"media_id": media_id,
"media_type": media_type,
"status": status,
"title": title,
"year": year,
"requested_by": requested_by,
"requested_by_norm": requested_by_norm,
"requested_by_id": requested_by_id,
"created_at": created_at,
"updated_at": updated_at,
"payload_json": payload_json,
}
],
conn,
)
if rows:
conn.execute(_REQUESTS_CACHE_UPSERT_SQL, rows[0])
logger.debug(
"requests_cache upsert: request_id=%s media_id=%s status=%s updated_at=%s",
request_id,
@@ -1784,6 +1936,17 @@ def upsert_request_cache(
)
def upsert_request_cache_many(records: list[Dict[str, Any]]) -> int:
if not records:
return 0
with _connect() as conn:
rows = _prepare_requests_cache_upsert_rows(records, conn)
if rows:
conn.executemany(_REQUESTS_CACHE_UPSERT_SQL, rows)
logger.debug("requests_cache bulk upsert: rows=%s", len(records))
return len(records)
def get_request_cache_last_updated() -> Optional[str]:
with _connect() as conn:
row = conn.execute(
@@ -2017,9 +2180,45 @@ def upsert_artwork_cache_status(
poster_cached: bool,
backdrop_cached: bool,
) -> None:
upsert_artwork_cache_status_many(
[
{
"request_id": request_id,
"tmdb_id": tmdb_id,
"media_type": media_type,
"poster_path": poster_path,
"backdrop_path": backdrop_path,
"has_tmdb": has_tmdb,
"poster_cached": poster_cached,
"backdrop_cached": backdrop_cached,
}
]
)
def upsert_artwork_cache_status_many(records: list[Dict[str, Any]]) -> int:
if not records:
return 0
updated_at = datetime.now(timezone.utc).isoformat()
params = [
(
record["request_id"],
record.get("tmdb_id"),
record.get("media_type"),
record.get("poster_path"),
record.get("backdrop_path"),
1 if record.get("has_tmdb") else 0,
1 if record.get("poster_cached") else 0,
1 if record.get("backdrop_cached") else 0,
updated_at,
)
for record in records
if isinstance(record.get("request_id"), int)
]
if not params:
return 0
with _connect() as conn:
conn.execute(
conn.executemany(
"""
INSERT INTO artwork_cache_status (
request_id,
@@ -2043,18 +2242,9 @@ def upsert_artwork_cache_status(
backdrop_cached = excluded.backdrop_cached,
updated_at = excluded.updated_at
""",
(
request_id,
tmdb_id,
media_type,
poster_path,
backdrop_path,
1 if has_tmdb else 0,
1 if poster_cached else 0,
1 if backdrop_cached else 0,
updated_at,
),
params,
)
return len(params)
def get_artwork_cache_status_count() -> int:
@@ -2638,6 +2828,73 @@ def run_integrity_check() -> str:
return str(row[0])
def get_database_diagnostics() -> Dict[str, Any]:
db_path = _db_path()
wal_path = f"{db_path}-wal"
shm_path = f"{db_path}-shm"
def _size(path: str) -> int:
try:
return os.path.getsize(path)
except OSError:
return 0
started = perf_counter()
with _connect() as conn:
integrity_started = perf_counter()
integrity_row = conn.execute("PRAGMA integrity_check").fetchone()
integrity_ms = round((perf_counter() - integrity_started) * 1000, 1)
integrity = str(integrity_row[0]) if integrity_row else "unknown"
pragma_started = perf_counter()
page_size_row = conn.execute("PRAGMA page_size").fetchone()
page_count_row = conn.execute("PRAGMA page_count").fetchone()
freelist_row = conn.execute("PRAGMA freelist_count").fetchone()
pragma_ms = round((perf_counter() - pragma_started) * 1000, 1)
row_count_started = perf_counter()
table_counts = {
"users": int(conn.execute("SELECT COUNT(*) FROM users").fetchone()[0] or 0),
"requests_cache": int(conn.execute("SELECT COUNT(*) FROM requests_cache").fetchone()[0] or 0),
"artwork_cache_status": int(conn.execute("SELECT COUNT(*) FROM artwork_cache_status").fetchone()[0] or 0),
"signup_invites": int(conn.execute("SELECT COUNT(*) FROM signup_invites").fetchone()[0] or 0),
"settings": int(conn.execute("SELECT COUNT(*) FROM settings").fetchone()[0] or 0),
"actions": int(conn.execute("SELECT COUNT(*) FROM actions").fetchone()[0] or 0),
"snapshots": int(conn.execute("SELECT COUNT(*) FROM snapshots").fetchone()[0] or 0),
"seerr_media_failures": int(conn.execute("SELECT COUNT(*) FROM seerr_media_failures").fetchone()[0] or 0),
"password_reset_tokens": int(conn.execute("SELECT COUNT(*) FROM password_reset_tokens").fetchone()[0] or 0),
}
row_count_ms = round((perf_counter() - row_count_started) * 1000, 1)
page_size = int(page_size_row[0] or 0) if page_size_row else 0
page_count = int(page_count_row[0] or 0) if page_count_row else 0
freelist_pages = int(freelist_row[0] or 0) if freelist_row else 0
db_size_bytes = _size(db_path)
wal_size_bytes = _size(wal_path)
shm_size_bytes = _size(shm_path)
return {
"integrity_check": integrity,
"database_path": db_path,
"database_size_bytes": db_size_bytes,
"wal_size_bytes": wal_size_bytes,
"shm_size_bytes": shm_size_bytes,
"page_size_bytes": page_size,
"page_count": page_count,
"freelist_pages": freelist_pages,
"allocated_bytes": page_size * page_count,
"free_bytes": page_size * freelist_pages,
"row_counts": table_counts,
"timings_ms": {
"integrity_check": integrity_ms,
"pragmas": pragma_ms,
"row_counts": row_count_ms,
"total": round((perf_counter() - started) * 1000, 1),
},
}
def vacuum_db() -> None:
with _connect() as conn:
conn.execute("VACUUM")

View File

@@ -26,7 +26,7 @@ from ..db import (
get_cached_requests,
get_cached_requests_since,
get_cached_request_by_media_id,
get_request_cache_by_id,
get_request_cache_lookup,
get_request_cache_payload,
get_request_cache_last_updated,
get_request_cache_count,
@@ -35,7 +35,9 @@ from ..db import (
repair_request_cache_titles,
prune_duplicate_requests_cache,
upsert_request_cache,
upsert_request_cache_many,
upsert_artwork_cache_status,
upsert_artwork_cache_status_many,
get_artwork_cache_missing_count,
get_artwork_cache_status_count,
get_setting,
@@ -411,26 +413,55 @@ def _upsert_artwork_status(
poster_cached: Optional[bool] = None,
backdrop_cached: Optional[bool] = None,
) -> None:
record = _build_artwork_status_record(payload, cache_mode, poster_cached, backdrop_cached)
if not record:
return
upsert_artwork_cache_status(**record)
def _build_request_cache_record(payload: Dict[str, Any], request_payload: Dict[str, Any]) -> Dict[str, Any]:
return {
"request_id": payload.get("request_id"),
"media_id": payload.get("media_id"),
"media_type": payload.get("media_type"),
"status": payload.get("status"),
"title": payload.get("title"),
"year": payload.get("year"),
"requested_by": payload.get("requested_by"),
"requested_by_norm": payload.get("requested_by_norm"),
"requested_by_id": payload.get("requested_by_id"),
"created_at": payload.get("created_at"),
"updated_at": payload.get("updated_at"),
"payload_json": json.dumps(request_payload, ensure_ascii=True),
}
def _build_artwork_status_record(
payload: Dict[str, Any],
cache_mode: str,
poster_cached: Optional[bool] = None,
backdrop_cached: Optional[bool] = None,
) -> Optional[Dict[str, Any]]:
parsed = _parse_request_payload(payload)
request_id = parsed.get("request_id")
if not isinstance(request_id, int):
return
return None
tmdb_id, media_type = _extract_tmdb_lookup(payload)
poster_path, backdrop_path = _extract_artwork_paths(payload)
has_tmdb = bool(tmdb_id and media_type)
poster_cached_flag, backdrop_cached_flag = _compute_cached_flags(
poster_path, backdrop_path, cache_mode, poster_cached, backdrop_cached
)
upsert_artwork_cache_status(
request_id=request_id,
tmdb_id=tmdb_id,
media_type=media_type,
poster_path=poster_path,
backdrop_path=backdrop_path,
has_tmdb=has_tmdb,
poster_cached=poster_cached_flag,
backdrop_cached=backdrop_cached_flag,
)
return {
"request_id": request_id,
"tmdb_id": tmdb_id,
"media_type": media_type,
"poster_path": poster_path,
"backdrop_path": backdrop_path,
"has_tmdb": has_tmdb,
"poster_cached": poster_cached_flag,
"backdrop_cached": backdrop_cached_flag,
}
def _collect_artwork_cache_disk_stats() -> tuple[int, int]:
@@ -631,6 +662,16 @@ async def _sync_all_requests(client: JellyseerrClient) -> int:
if not isinstance(items, list) or not items:
logger.info("Seerr sync completed: no more results at skip=%s", skip)
break
page_request_ids = [
payload.get("request_id")
for item in items
if isinstance(item, dict)
for payload in [_parse_request_payload(item)]
if isinstance(payload.get("request_id"), int)
]
cached_by_request_id = get_request_cache_lookup(page_request_ids)
page_cache_records: list[Dict[str, Any]] = []
page_artwork_records: list[Dict[str, Any]] = []
for item in items:
if not isinstance(item, dict):
continue
@@ -638,9 +679,8 @@ async def _sync_all_requests(client: JellyseerrClient) -> int:
request_id = payload.get("request_id")
cached_title = None
if isinstance(request_id, int):
if not payload.get("title"):
cached = get_request_cache_by_id(request_id)
if cached and cached.get("title"):
cached = cached_by_request_id.get(request_id)
if not payload.get("title") and cached and cached.get("title"):
cached_title = cached.get("title")
needs_details = (
not payload.get("title")
@@ -672,25 +712,17 @@ async def _sync_all_requests(client: JellyseerrClient) -> int:
payload["title"] = cached_title
if not isinstance(payload.get("request_id"), int):
continue
payload_json = json.dumps(item, ensure_ascii=True)
upsert_request_cache(
request_id=payload.get("request_id"),
media_id=payload.get("media_id"),
media_type=payload.get("media_type"),
status=payload.get("status"),
title=payload.get("title"),
year=payload.get("year"),
requested_by=payload.get("requested_by"),
requested_by_norm=payload.get("requested_by_norm"),
requested_by_id=payload.get("requested_by_id"),
created_at=payload.get("created_at"),
updated_at=payload.get("updated_at"),
payload_json=payload_json,
)
page_cache_records.append(_build_request_cache_record(payload, item))
if isinstance(item, dict):
_upsert_artwork_status(item, cache_mode)
artwork_record = _build_artwork_status_record(item, cache_mode)
if artwork_record:
page_artwork_records.append(artwork_record)
stored += 1
_sync_state["stored"] = stored
if page_cache_records:
upsert_request_cache_many(page_cache_records)
if page_artwork_records:
upsert_artwork_cache_status_many(page_artwork_records)
if len(items) < take:
logger.info("Seerr sync completed: stored=%s", stored)
break
@@ -749,6 +781,16 @@ async def _sync_delta_requests(client: JellyseerrClient) -> int:
if not isinstance(items, list) or not items:
logger.info("Seerr delta sync completed: no more results at skip=%s", skip)
break
page_request_ids = [
payload.get("request_id")
for item in items
if isinstance(item, dict)
for payload in [_parse_request_payload(item)]
if isinstance(payload.get("request_id"), int)
]
cached_by_request_id = get_request_cache_lookup(page_request_ids)
page_cache_records: list[Dict[str, Any]] = []
page_artwork_records: list[Dict[str, Any]] = []
page_changed = False
for item in items:
if not isinstance(item, dict):
@@ -756,7 +798,7 @@ async def _sync_delta_requests(client: JellyseerrClient) -> int:
payload = _parse_request_payload(item)
request_id = payload.get("request_id")
if isinstance(request_id, int):
cached = get_request_cache_by_id(request_id)
cached = cached_by_request_id.get(request_id)
incoming_updated = payload.get("updated_at")
cached_title = cached.get("title") if cached else None
if cached and incoming_updated and cached.get("updated_at") == incoming_updated and cached.get("title"):
@@ -790,26 +832,18 @@ async def _sync_delta_requests(client: JellyseerrClient) -> int:
payload["title"] = cached_title
if not isinstance(payload.get("request_id"), int):
continue
payload_json = json.dumps(item, ensure_ascii=True)
upsert_request_cache(
request_id=payload.get("request_id"),
media_id=payload.get("media_id"),
media_type=payload.get("media_type"),
status=payload.get("status"),
title=payload.get("title"),
year=payload.get("year"),
requested_by=payload.get("requested_by"),
requested_by_norm=payload.get("requested_by_norm"),
requested_by_id=payload.get("requested_by_id"),
created_at=payload.get("created_at"),
updated_at=payload.get("updated_at"),
payload_json=payload_json,
)
page_cache_records.append(_build_request_cache_record(payload, item))
if isinstance(item, dict):
_upsert_artwork_status(item, cache_mode)
artwork_record = _build_artwork_status_record(item, cache_mode)
if artwork_record:
page_artwork_records.append(artwork_record)
stored += 1
page_changed = True
_sync_state["stored"] = stored
if page_cache_records:
upsert_request_cache_many(page_cache_records)
if page_artwork_records:
upsert_artwork_cache_status_many(page_artwork_records)
if not page_changed:
unchanged_pages += 1
else:
@@ -894,6 +928,8 @@ async def _prefetch_artwork_cache(
batch = get_request_cache_payloads(limit=limit, offset=offset)
if not batch:
break
page_cache_records: list[Dict[str, Any]] = []
page_artwork_records: list[Dict[str, Any]] = []
for row in batch:
payload = row.get("payload")
if not isinstance(payload, dict):
@@ -921,20 +957,7 @@ async def _prefetch_artwork_cache(
parsed = _parse_request_payload(payload)
request_id = parsed.get("request_id")
if isinstance(request_id, int):
upsert_request_cache(
request_id=request_id,
media_id=parsed.get("media_id"),
media_type=parsed.get("media_type"),
status=parsed.get("status"),
title=parsed.get("title"),
year=parsed.get("year"),
requested_by=parsed.get("requested_by"),
requested_by_norm=parsed.get("requested_by_norm"),
requested_by_id=parsed.get("requested_by_id"),
created_at=parsed.get("created_at"),
updated_at=parsed.get("updated_at"),
payload_json=json.dumps(payload, ensure_ascii=True),
)
page_cache_records.append(_build_request_cache_record(parsed, payload))
poster_cached_flag = False
backdrop_cached_flag = False
if poster_path:
@@ -949,17 +972,23 @@ async def _prefetch_artwork_cache(
backdrop_cached_flag = bool(await cache_tmdb_image(backdrop_path, "w780"))
except httpx.HTTPError:
backdrop_cached_flag = False
_upsert_artwork_status(
artwork_record = _build_artwork_status_record(
payload,
cache_mode,
poster_cached=poster_cached_flag if poster_path else None,
backdrop_cached=backdrop_cached_flag if backdrop_path else None,
)
if artwork_record:
page_artwork_records.append(artwork_record)
processed += 1
if processed % 25 == 0:
_artwork_prefetch_state.update(
{"processed": processed, "message": f"Cached artwork for {processed} requests"}
)
if page_cache_records:
upsert_request_cache_many(page_cache_records)
if page_artwork_records:
upsert_artwork_cache_status_many(page_artwork_records)
offset += limit
total_requests = get_request_cache_count()

View File

@@ -16,7 +16,7 @@ from ..clients.qbittorrent import QBittorrentClient
from ..clients.radarr import RadarrClient
from ..clients.sonarr import SonarrClient
from ..config import settings as env_settings
from ..db import run_integrity_check
from ..db import get_database_diagnostics
from ..runtime import get_runtime_settings
from .invite_email import send_test_email, smtp_email_config_ready, smtp_email_delivery_warning
@@ -205,12 +205,16 @@ async def _run_http_post(
async def _run_database_check() -> Dict[str, Any]:
integrity = await asyncio.to_thread(run_integrity_check)
detail = await asyncio.to_thread(get_database_diagnostics)
integrity = _clean_text(detail.get("integrity_check"), "unknown")
requests_cached = detail.get("row_counts", {}).get("requests_cache", 0) if isinstance(detail, dict) else 0
wal_size_bytes = detail.get("wal_size_bytes", 0) if isinstance(detail, dict) else 0
wal_size_megabytes = round((float(wal_size_bytes or 0) / (1024 * 1024)), 2)
status = "up" if integrity == "ok" else "degraded"
return {
"status": status,
"message": f"SQLite integrity_check returned {integrity}",
"detail": integrity,
"message": f"SQLite {integrity} · {requests_cached} cached requests · WAL {wal_size_megabytes:.2f} MB",
"detail": detail,
}

View File

@@ -243,6 +243,31 @@ const MAGENT_GROUPS_BY_SECTION: Record<string, Set<string>> = {
]),
}
const SITE_SECTION_GROUPS: Array<{
key: string
title: string
description: string
keys: string[]
}> = [
{
key: 'site-banner',
title: 'Site Banner',
description: 'Control the sitewide banner message, tone, and visibility.',
keys: ['site_banner_enabled', 'site_banner_tone', 'site_banner_message'],
},
{
key: 'site-login',
title: 'Login Page Behaviour',
description: 'Control which sign-in and recovery options are shown on the logged-out login page.',
keys: [
'site_login_show_jellyfin_login',
'site_login_show_local_login',
'site_login_show_forgot_password',
'site_login_show_signup_link',
],
},
]
const SETTING_LABEL_OVERRIDES: Record<string, string> = {
jellyseerr_base_url: 'Seerr base URL',
jellyseerr_api_key: 'Seerr API key',
@@ -559,6 +584,7 @@ export default function SettingsPage({ section }: SettingsPageProps) {
const settingsSection = SETTINGS_SECTION_MAP[section] ?? null
const isMagentGroupedSection = section === 'magent' || section === 'general' || section === 'notifications'
const isSiteGroupedSection = section === 'site'
const visibleSections = settingsSection ? [settingsSection] : []
const isCacheSection = section === 'cache'
const cacheSettingKeys = new Set(['requests_sync_ttl_minutes', 'requests_data_source'])
@@ -620,6 +646,22 @@ export default function SettingsPage({ section }: SettingsPageProps) {
})
return groups
})()
: isSiteGroupedSection
? (() => {
const siteItems = groupedSettings.site ?? []
const byKey = new Map(siteItems.map((item) => [item.key, item]))
return SITE_SECTION_GROUPS.map((group) => {
const items = group.keys
.map((key) => byKey.get(key))
.filter((item): item is AdminSetting => Boolean(item))
return {
key: group.key,
title: group.title,
description: group.description,
items,
}
})
})()
: visibleSections.map((sectionKey) => ({
key: sectionKey,
title: SECTION_LABELS[sectionKey] ?? sectionKey,
@@ -1696,7 +1738,7 @@ export default function SettingsPage({ section }: SettingsPageProps) {
)}
</div>
{(sectionGroup.description || SECTION_DESCRIPTIONS[sectionGroup.key]) &&
(!settingsSection || isMagentGroupedSection) && (
(!settingsSection || isMagentGroupedSection || isSiteGroupedSection) && (
<p className="section-subtitle">
{sectionGroup.description || SECTION_DESCRIPTIONS[sectionGroup.key]}
</p>
@@ -2172,11 +2214,12 @@ export default function SettingsPage({ section }: SettingsPageProps) {
const isPemField =
setting.key === 'magent_ssl_certificate_pem' ||
setting.key === 'magent_ssl_private_key_pem'
const shouldSpanFull = isPemField || setting.key === 'site_banner_message'
return (
<label
key={setting.key}
data-helper={helperText || undefined}
className={isPemField ? 'field-span-full' : undefined}
className={shouldSpanFull ? 'field-span-full' : undefined}
>
<span className="label-row">
<span>{labelFromKey(setting.key)}</span>

View File

@@ -6075,6 +6075,52 @@ textarea {
background: rgba(255, 255, 255, 0.03);
}
.diagnostic-detail-panel {
display: grid;
gap: 0.9rem;
}
.diagnostic-detail-group {
display: grid;
gap: 0.6rem;
}
.diagnostic-detail-group h4 {
margin: 0;
font-size: 0.86rem;
letter-spacing: 0.06em;
text-transform: uppercase;
color: var(--ink-muted);
}
.diagnostic-detail-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(9rem, 1fr));
gap: 0.7rem;
}
.diagnostic-detail-item {
display: grid;
gap: 0.2rem;
min-width: 0;
padding: 0.75rem;
border-radius: 0.8rem;
border: 1px solid rgba(255, 255, 255, 0.06);
background: rgba(255, 255, 255, 0.025);
}
.diagnostic-detail-item span {
font-size: 0.76rem;
letter-spacing: 0.05em;
text-transform: uppercase;
color: var(--muted);
}
.diagnostic-detail-item strong {
line-height: 1.35;
overflow-wrap: anywhere;
}
.diagnostics-rail-metrics {
display: grid;
gap: 0.75rem;

View File

@@ -56,6 +56,21 @@ type AdminDiagnosticsPanelProps = {
embedded?: boolean
}
type DatabaseDiagnosticDetail = {
integrity_check?: string
database_path?: string
database_size_bytes?: number
wal_size_bytes?: number
shm_size_bytes?: number
page_size_bytes?: number
page_count?: number
freelist_pages?: number
allocated_bytes?: number
free_bytes?: number
row_counts?: Record<string, number>
timings_ms?: Record<string, number>
}
const REFRESH_INTERVAL_MS = 30000
const STATUS_LABELS: Record<string, string> = {
@@ -85,6 +100,54 @@ function statusLabel(status: string) {
return STATUS_LABELS[status] ?? status
}
function formatBytes(value?: number) {
if (typeof value !== 'number' || Number.isNaN(value) || value < 0) {
return '0 B'
}
if (value >= 1024 * 1024 * 1024) {
return `${(value / (1024 * 1024 * 1024)).toFixed(2)} GB`
}
if (value >= 1024 * 1024) {
return `${(value / (1024 * 1024)).toFixed(2)} MB`
}
if (value >= 1024) {
return `${(value / 1024).toFixed(1)} KB`
}
return `${value} B`
}
function formatDetailLabel(value: string) {
return value
.replace(/_/g, ' ')
.replace(/\b\w/g, (character) => character.toUpperCase())
}
function asDatabaseDiagnosticDetail(detail: unknown): DatabaseDiagnosticDetail | null {
if (!detail || typeof detail !== 'object' || Array.isArray(detail)) {
return null
}
return detail as DatabaseDiagnosticDetail
}
function renderDatabaseMetricGroup(title: string, values: Array<[string, string]>) {
if (values.length === 0) {
return null
}
return (
<div className="diagnostic-detail-group">
<h4>{title}</h4>
<div className="diagnostic-detail-grid">
{values.map(([label, value]) => (
<div key={`${title}-${label}`} className="diagnostic-detail-item">
<span>{label}</span>
<strong>{value}</strong>
</div>
))}
</div>
</div>
)
}
export default function AdminDiagnosticsPanel({ embedded = false }: AdminDiagnosticsPanelProps) {
const router = useRouter()
const [loading, setLoading] = useState(true)
@@ -405,6 +468,43 @@ export default function AdminDiagnosticsPanel({ embedded = false }: AdminDiagnos
<span className="system-dot" />
<span>{isRunning ? 'Running diagnostic...' : check.message}</span>
</div>
{check.key === 'database'
? (() => {
const detail = asDatabaseDiagnosticDetail(check.detail)
if (!detail) {
return null
}
return (
<div className="diagnostic-detail-panel">
{renderDatabaseMetricGroup('Storage', [
['Database file', formatBytes(detail.database_size_bytes)],
['WAL file', formatBytes(detail.wal_size_bytes)],
['Shared memory', formatBytes(detail.shm_size_bytes)],
['Allocated bytes', formatBytes(detail.allocated_bytes)],
['Free bytes', formatBytes(detail.free_bytes)],
['Page size', formatBytes(detail.page_size_bytes)],
['Page count', `${detail.page_count?.toLocaleString() ?? 0}`],
['Freelist pages', `${detail.freelist_pages?.toLocaleString() ?? 0}`],
])}
{renderDatabaseMetricGroup(
'Tables',
Object.entries(detail.row_counts ?? {}).map(([key, value]) => [
formatDetailLabel(key),
value.toLocaleString(),
]),
)}
{renderDatabaseMetricGroup(
'Timings',
Object.entries(detail.timings_ms ?? {}).map(([key, value]) => [
formatDetailLabel(key),
`${value.toFixed(1)} ms`,
]),
)}
</div>
)
})()
: null}
</article>
)
})}

View File

@@ -1,12 +1,12 @@
{
"name": "magent-frontend",
"version": "0303261413",
"version": "0303261502",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "magent-frontend",
"version": "0303261413",
"version": "0303261502",
"dependencies": {
"next": "16.1.6",
"react": "19.2.4",

View File

@@ -1,7 +1,7 @@
{
"name": "magent-frontend",
"private": true,
"version": "0303261413",
"version": "0303261502",
"scripts": {
"dev": "next dev",
"build": "next build",