Fix cache titles via Jellyseerr media lookup

This commit is contained in:
2026-01-23 11:35:59 +13:00
parent 7a7d570852
commit 7b8fc1d99b
2 changed files with 73 additions and 5 deletions

View File

@@ -458,7 +458,7 @@ def get_request_cache_by_id(request_id: int) -> Optional[Dict[str, Any]]:
with _connect() as conn: with _connect() as conn:
row = conn.execute( row = conn.execute(
""" """
SELECT request_id, updated_at SELECT request_id, updated_at, title
FROM requests_cache FROM requests_cache
WHERE request_id = ? WHERE request_id = ?
""", """,
@@ -468,7 +468,7 @@ def get_request_cache_by_id(request_id: int) -> Optional[Dict[str, Any]]:
logger.debug("requests_cache miss: request_id=%s", request_id) logger.debug("requests_cache miss: request_id=%s", request_id)
return None return None
logger.debug("requests_cache hit: request_id=%s updated_at=%s", row[0], row[1]) logger.debug("requests_cache hit: request_id=%s updated_at=%s", row[0], row[1])
return {"request_id": row[0], "updated_at": row[1]} return {"request_id": row[0], "updated_at": row[1], "title": row[2]}
def get_request_cache_payload(request_id: int) -> Optional[Dict[str, Any]]: def get_request_cache_payload(request_id: int) -> Optional[Dict[str, Any]]:
@@ -545,7 +545,7 @@ def get_request_cache_overview(limit: int = 50) -> list[Dict[str, Any]]:
with _connect() as conn: with _connect() as conn:
rows = conn.execute( rows = conn.execute(
""" """
SELECT request_id, media_id, media_type, status, title, year, requested_by, created_at, updated_at SELECT request_id, media_id, media_type, status, title, year, requested_by, created_at, updated_at, payload_json
FROM requests_cache FROM requests_cache
ORDER BY updated_at DESC, request_id DESC ORDER BY updated_at DESC, request_id DESC
LIMIT ? LIMIT ?
@@ -554,13 +554,27 @@ def get_request_cache_overview(limit: int = 50) -> list[Dict[str, Any]]:
).fetchall() ).fetchall()
results: list[Dict[str, Any]] = [] results: list[Dict[str, Any]] = []
for row in rows: for row in rows:
title = row[4]
if not title and row[9]:
try:
payload = json.loads(row[9])
if isinstance(payload, dict):
media = payload.get("media") or {}
title = (
(media.get("title") if isinstance(media, dict) else None)
or (media.get("name") if isinstance(media, dict) else None)
or payload.get("title")
or payload.get("name")
)
except json.JSONDecodeError:
title = row[4]
results.append( results.append(
{ {
"request_id": row[0], "request_id": row[0],
"media_id": row[1], "media_id": row[1],
"media_type": row[2], "media_type": row[2],
"status": row[3], "status": row[3],
"title": row[4], "title": title,
"year": row[5], "year": row[5],
"requested_by": row[6], "requested_by": row[6],
"created_at": row[7], "created_at": row[7],

View File

@@ -265,6 +265,16 @@ async def _hydrate_title_from_tmdb(
return None, None return None, None
async def _hydrate_media_details(client: JellyseerrClient, media_id: Optional[int]) -> Optional[Dict[str, Any]]:
if not media_id:
return None
try:
details = await client.get_media(int(media_id))
except httpx.HTTPStatusError:
return None
return details if isinstance(details, dict) else None
async def _hydrate_artwork_from_tmdb( async def _hydrate_artwork_from_tmdb(
client: JellyseerrClient, media_type: Optional[str], tmdb_id: Optional[int] client: JellyseerrClient, media_type: Optional[str], tmdb_id: Optional[int]
) -> tuple[Optional[str], Optional[str]]: ) -> tuple[Optional[str], Optional[str]]:
@@ -389,6 +399,28 @@ async def _sync_all_requests(client: JellyseerrClient) -> int:
if isinstance(details, dict): if isinstance(details, dict):
payload = _parse_request_payload(details) payload = _parse_request_payload(details)
item = details item = details
if not payload.get("title") and payload.get("media_id"):
media_details = await _hydrate_media_details(client, payload.get("media_id"))
if isinstance(media_details, dict):
media_title = media_details.get("title") or media_details.get("name")
if media_title:
payload["title"] = media_title
if not payload.get("year") and media_details.get("year"):
payload["year"] = media_details.get("year")
if not payload.get("tmdb_id") and media_details.get("tmdbId"):
payload["tmdb_id"] = media_details.get("tmdbId")
if not payload.get("media_type") and media_details.get("mediaType"):
payload["media_type"] = media_details.get("mediaType")
if isinstance(item, dict):
existing_media = item.get("media")
if isinstance(existing_media, dict):
merged = dict(media_details)
for key, value in existing_media.items():
if value is not None:
merged[key] = value
item["media"] = merged
else:
item["media"] = media_details
poster_path, backdrop_path = _extract_artwork_paths(item) poster_path, backdrop_path = _extract_artwork_paths(item)
if cache_mode == "cache" and not (poster_path or backdrop_path): if cache_mode == "cache" and not (poster_path or backdrop_path):
details = await _get_request_details(client, request_id) details = await _get_request_details(client, request_id)
@@ -483,13 +515,35 @@ async def _sync_delta_requests(client: JellyseerrClient) -> int:
if isinstance(request_id, int): if isinstance(request_id, int):
cached = get_request_cache_by_id(request_id) cached = get_request_cache_by_id(request_id)
incoming_updated = payload.get("updated_at") incoming_updated = payload.get("updated_at")
if cached and incoming_updated and cached.get("updated_at") == incoming_updated: if cached and incoming_updated and cached.get("updated_at") == incoming_updated and cached.get("title"):
continue continue
if not payload.get("title") or not payload.get("media_id"): if not payload.get("title") or not payload.get("media_id"):
details = await _get_request_details(client, request_id) details = await _get_request_details(client, request_id)
if isinstance(details, dict): if isinstance(details, dict):
payload = _parse_request_payload(details) payload = _parse_request_payload(details)
item = details item = details
if not payload.get("title") and payload.get("media_id"):
media_details = await _hydrate_media_details(client, payload.get("media_id"))
if isinstance(media_details, dict):
media_title = media_details.get("title") or media_details.get("name")
if media_title:
payload["title"] = media_title
if not payload.get("year") and media_details.get("year"):
payload["year"] = media_details.get("year")
if not payload.get("tmdb_id") and media_details.get("tmdbId"):
payload["tmdb_id"] = media_details.get("tmdbId")
if not payload.get("media_type") and media_details.get("mediaType"):
payload["media_type"] = media_details.get("mediaType")
if isinstance(item, dict):
existing_media = item.get("media")
if isinstance(existing_media, dict):
merged = dict(media_details)
for key, value in existing_media.items():
if value is not None:
merged[key] = value
item["media"] = merged
else:
item["media"] = media_details
poster_path, backdrop_path = _extract_artwork_paths(item) poster_path, backdrop_path = _extract_artwork_paths(item)
if cache_mode == "cache" and not (poster_path or backdrop_path): if cache_mode == "cache" and not (poster_path or backdrop_path):
details = await _get_request_details(client, request_id) details = await _get_request_details(client, request_id)