Initial Upload
Some checks failed
CI / Lint & Typecheck (push) Has been cancelled
CI / Test (routes) (push) Has been cancelled
CI / Test (security) (push) Has been cancelled
CI / Test (services) (push) Has been cancelled
CI / Test (unit) (push) Has been cancelled
CI / Test (integration) (push) Has been cancelled
CI / Test Coverage (push) Has been cancelled
CI / Build (push) Has been cancelled

This commit is contained in:
2025-12-17 12:32:50 +13:00
commit 3015f48118
471 changed files with 141143 additions and 0 deletions

View File

@@ -0,0 +1,16 @@
import { defineConfig } from 'drizzle-kit';
if (!process.env.DATABASE_URL) {
throw new Error('DATABASE_URL environment variable is required');
}
export default defineConfig({
schema: './src/db/schema.ts',
out: './src/db/migrations',
dialect: 'postgresql',
dbCredentials: {
url: process.env.DATABASE_URL,
},
verbose: true,
strict: true,
});

73
apps/server/package.json Normal file
View File

@@ -0,0 +1,73 @@
{
"name": "@tracearr/server",
"version": "0.1.0",
"private": true,
"type": "module",
"main": "./dist/index.js",
"scripts": {
"dev": "tsx watch --env-file=../../.env src/index.ts",
"build": "tsc",
"start": "node dist/index.js",
"typecheck": "tsc --noEmit",
"lint": "eslint src/",
"lint:fix": "eslint src/ --fix",
"test": "vitest run",
"test:all": "vitest run",
"test:watch": "vitest",
"test:unit": "vitest run --config vitest.unit.config.ts",
"test:services": "vitest run --config vitest.services.config.ts",
"test:routes": "vitest run --config vitest.routes.config.ts",
"test:security": "vitest run --config vitest.security.config.ts",
"test:integration": "vitest run --config vitest.integration.config.ts",
"test:coverage": "vitest run --coverage",
"test:unit:coverage": "vitest run --config vitest.unit.config.ts --coverage",
"test:services:coverage": "vitest run --config vitest.services.config.ts --coverage",
"test:routes:coverage": "vitest run --config vitest.routes.config.ts --coverage",
"clean": "rm -rf dist .turbo coverage",
"db:generate": "drizzle-kit generate",
"db:migrate": "drizzle-kit migrate",
"db:push": "drizzle-kit push",
"db:studio": "drizzle-kit studio"
},
"dependencies": {
"@fastify/cookie": "^11.0.0",
"@fastify/cors": "^11.0.0",
"@fastify/helmet": "^13.0.0",
"@fastify/jwt": "^10.0.0",
"@fastify/rate-limit": "^10.0.0",
"@fastify/sensible": "^6.0.0",
"@fastify/static": "^8.0.0",
"@fastify/swagger": "^9.0.0",
"@fastify/swagger-ui": "^5.0.0",
"@fastify/websocket": "^11.0.0",
"@tracearr/shared": "workspace:*",
"bcrypt": "^6.0.0",
"bullmq": "^5.65.1",
"dotenv": "^16.4.7",
"drizzle-orm": "^0.44.0",
"eventsource": "^4.1.0",
"expo-server-sdk": "^4.0.0",
"fastify": "^5.0.0",
"fastify-plugin": "^5.0.0",
"ioredis": "^5.4.0",
"jsonwebtoken": "^9.0.3",
"maxmind": "^4.3.29",
"pg": "^8.13.0",
"sharp": "^0.34.0",
"socket.io": "^4.8.0",
"zod": "^4.0.0"
},
"devDependencies": {
"@tracearr/test-utils": "workspace:*",
"@types/bcrypt": "^5.0.2",
"@types/eventsource": "^3.0.0",
"@types/jsonwebtoken": "^9.0.9",
"@types/pg": "^8.11.10",
"@vitest/coverage-v8": "^4.0.0",
"drizzle-kit": "^0.31.0",
"pino-pretty": "^13.0.0",
"tsx": "^4.19.0",
"typescript": "^5.7.0",
"vitest": "^4.0.0"
}
}

View File

@@ -0,0 +1,50 @@
/**
* Database client and connection pool
*/
import { drizzle } from 'drizzle-orm/node-postgres';
import { migrate } from 'drizzle-orm/node-postgres/migrator';
import pg from 'pg';
import * as schema from './schema.js';
const { Pool } = pg;
if (!process.env.DATABASE_URL) {
throw new Error('DATABASE_URL environment variable is required');
}
const pool = new Pool({
connectionString: process.env.DATABASE_URL,
max: 20, // Maximum connections
idleTimeoutMillis: 20000, // Close idle connections after 20s
connectionTimeoutMillis: 10000, // Connection timeout (increased for complex queries)
maxUses: 7500, // Max queries per connection before refresh (prevents memory leaks)
allowExitOnIdle: false, // Keep pool alive during idle periods
});
// Log pool errors for debugging
pool.on('error', (err) => {
console.error('[DB Pool Error]', err.message);
});
export const db = drizzle(pool, { schema });
export async function closeDatabase(): Promise<void> {
await pool.end();
}
export async function checkDatabaseConnection(): Promise<boolean> {
try {
const client = await pool.connect();
await client.query('SELECT 1');
client.release();
return true;
} catch (error) {
console.error('Database connection check failed:', error);
return false;
}
}
export async function runMigrations(migrationsFolder: string): Promise<void> {
await migrate(db, { migrationsFolder });
}

View File

@@ -0,0 +1,154 @@
CREATE TABLE "rules" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"name" varchar(100) NOT NULL,
"type" varchar(50) NOT NULL,
"params" jsonb NOT NULL,
"server_user_id" uuid,
"is_active" boolean DEFAULT true NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE TABLE "server_users" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"user_id" uuid NOT NULL,
"server_id" uuid NOT NULL,
"external_id" varchar(255) NOT NULL,
"username" varchar(255) NOT NULL,
"email" varchar(255),
"thumb_url" text,
"is_server_admin" boolean DEFAULT false NOT NULL,
"trust_score" integer DEFAULT 100 NOT NULL,
"session_count" integer DEFAULT 0 NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE TABLE "servers" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"name" varchar(100) NOT NULL,
"type" varchar(20) NOT NULL,
"url" text NOT NULL,
"token" text NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE TABLE "sessions" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"server_id" uuid NOT NULL,
"server_user_id" uuid NOT NULL,
"session_key" varchar(255) NOT NULL,
"state" varchar(20) NOT NULL,
"media_type" varchar(20) NOT NULL,
"media_title" text NOT NULL,
"grandparent_title" varchar(500),
"season_number" integer,
"episode_number" integer,
"year" integer,
"thumb_path" varchar(500),
"rating_key" varchar(255),
"external_session_id" varchar(255),
"started_at" timestamp with time zone DEFAULT now() NOT NULL,
"stopped_at" timestamp with time zone,
"duration_ms" integer,
"total_duration_ms" integer,
"progress_ms" integer,
"last_paused_at" timestamp with time zone,
"paused_duration_ms" integer DEFAULT 0 NOT NULL,
"reference_id" uuid,
"watched" boolean DEFAULT false NOT NULL,
"ip_address" varchar(45) NOT NULL,
"geo_city" varchar(255),
"geo_region" varchar(255),
"geo_country" varchar(100),
"geo_lat" real,
"geo_lon" real,
"player_name" varchar(255),
"device_id" varchar(255),
"product" varchar(255),
"device" varchar(255),
"platform" varchar(100),
"quality" varchar(100),
"is_transcode" boolean DEFAULT false NOT NULL,
"bitrate" integer
);
--> statement-breakpoint
CREATE TABLE "settings" (
"id" integer PRIMARY KEY DEFAULT 1 NOT NULL,
"allow_guest_access" boolean DEFAULT false NOT NULL,
"discord_webhook_url" text,
"custom_webhook_url" text,
"notify_on_violation" boolean DEFAULT true NOT NULL,
"notify_on_session_start" boolean DEFAULT false NOT NULL,
"notify_on_session_stop" boolean DEFAULT false NOT NULL,
"notify_on_server_down" boolean DEFAULT true NOT NULL,
"poller_enabled" boolean DEFAULT true NOT NULL,
"poller_interval_ms" integer DEFAULT 15000 NOT NULL,
"tautulli_url" text,
"tautulli_api_key" text,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE TABLE "users" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"username" varchar(100) NOT NULL,
"name" varchar(255),
"thumbnail" text,
"email" varchar(255),
"password_hash" text,
"plex_account_id" varchar(255),
"role" varchar(20) DEFAULT 'member' NOT NULL,
"aggregate_trust_score" integer DEFAULT 100 NOT NULL,
"total_violations" integer DEFAULT 0 NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE TABLE "violations" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"rule_id" uuid NOT NULL,
"server_user_id" uuid NOT NULL,
"session_id" uuid NOT NULL,
"severity" varchar(20) NOT NULL,
"data" jsonb NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"acknowledged_at" timestamp with time zone
);
--> statement-breakpoint
ALTER TABLE "rules" ADD CONSTRAINT "rules_server_user_id_server_users_id_fk" FOREIGN KEY ("server_user_id") REFERENCES "public"."server_users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "server_users" ADD CONSTRAINT "server_users_user_id_users_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "server_users" ADD CONSTRAINT "server_users_server_id_servers_id_fk" FOREIGN KEY ("server_id") REFERENCES "public"."servers"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "sessions" ADD CONSTRAINT "sessions_server_id_servers_id_fk" FOREIGN KEY ("server_id") REFERENCES "public"."servers"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "sessions" ADD CONSTRAINT "sessions_server_user_id_server_users_id_fk" FOREIGN KEY ("server_user_id") REFERENCES "public"."server_users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "violations" ADD CONSTRAINT "violations_rule_id_rules_id_fk" FOREIGN KEY ("rule_id") REFERENCES "public"."rules"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "violations" ADD CONSTRAINT "violations_server_user_id_server_users_id_fk" FOREIGN KEY ("server_user_id") REFERENCES "public"."server_users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "violations" ADD CONSTRAINT "violations_session_id_sessions_id_fk" FOREIGN KEY ("session_id") REFERENCES "public"."sessions"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
CREATE INDEX "rules_active_idx" ON "rules" USING btree ("is_active");--> statement-breakpoint
CREATE INDEX "rules_server_user_id_idx" ON "rules" USING btree ("server_user_id");--> statement-breakpoint
CREATE UNIQUE INDEX "server_users_user_server_unique" ON "server_users" USING btree ("user_id","server_id");--> statement-breakpoint
CREATE UNIQUE INDEX "server_users_server_external_unique" ON "server_users" USING btree ("server_id","external_id");--> statement-breakpoint
CREATE INDEX "server_users_user_idx" ON "server_users" USING btree ("user_id");--> statement-breakpoint
CREATE INDEX "server_users_server_idx" ON "server_users" USING btree ("server_id");--> statement-breakpoint
CREATE INDEX "server_users_username_idx" ON "server_users" USING btree ("username");--> statement-breakpoint
CREATE INDEX "sessions_server_user_time_idx" ON "sessions" USING btree ("server_user_id","started_at");--> statement-breakpoint
CREATE INDEX "sessions_server_time_idx" ON "sessions" USING btree ("server_id","started_at");--> statement-breakpoint
CREATE INDEX "sessions_state_idx" ON "sessions" USING btree ("state");--> statement-breakpoint
CREATE INDEX "sessions_external_session_idx" ON "sessions" USING btree ("server_id","external_session_id");--> statement-breakpoint
CREATE INDEX "sessions_device_idx" ON "sessions" USING btree ("server_user_id","device_id");--> statement-breakpoint
CREATE INDEX "sessions_reference_idx" ON "sessions" USING btree ("reference_id");--> statement-breakpoint
CREATE INDEX "sessions_server_user_rating_idx" ON "sessions" USING btree ("server_user_id","rating_key");--> statement-breakpoint
CREATE INDEX "sessions_geo_idx" ON "sessions" USING btree ("geo_lat","geo_lon");--> statement-breakpoint
CREATE INDEX "sessions_geo_time_idx" ON "sessions" USING btree ("started_at","geo_lat","geo_lon");--> statement-breakpoint
CREATE INDEX "sessions_media_type_idx" ON "sessions" USING btree ("media_type");--> statement-breakpoint
CREATE INDEX "sessions_transcode_idx" ON "sessions" USING btree ("is_transcode");--> statement-breakpoint
CREATE INDEX "sessions_platform_idx" ON "sessions" USING btree ("platform");--> statement-breakpoint
CREATE INDEX "sessions_top_movies_idx" ON "sessions" USING btree ("media_type","media_title","year");--> statement-breakpoint
CREATE INDEX "sessions_top_shows_idx" ON "sessions" USING btree ("media_type","grandparent_title");--> statement-breakpoint
CREATE UNIQUE INDEX "users_username_unique" ON "users" USING btree ("username");--> statement-breakpoint
CREATE UNIQUE INDEX "users_email_unique" ON "users" USING btree ("email");--> statement-breakpoint
CREATE INDEX "users_plex_account_id_idx" ON "users" USING btree ("plex_account_id");--> statement-breakpoint
CREATE INDEX "users_role_idx" ON "users" USING btree ("role");--> statement-breakpoint
CREATE INDEX "violations_server_user_id_idx" ON "violations" USING btree ("server_user_id");--> statement-breakpoint
CREATE INDEX "violations_rule_id_idx" ON "violations" USING btree ("rule_id");--> statement-breakpoint
CREATE INDEX "violations_created_at_idx" ON "violations" USING btree ("created_at");

View File

@@ -0,0 +1,2 @@
DROP INDEX "users_username_unique";--> statement-breakpoint
CREATE INDEX "users_username_idx" ON "users" USING btree ("username");

View File

@@ -0,0 +1,26 @@
CREATE TABLE "mobile_sessions" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"refresh_token_hash" varchar(64) NOT NULL,
"device_name" varchar(100) NOT NULL,
"device_id" varchar(100) NOT NULL,
"platform" varchar(20) NOT NULL,
"expo_push_token" varchar(255),
"last_seen_at" timestamp with time zone DEFAULT now() NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
CONSTRAINT "mobile_sessions_refresh_token_hash_unique" UNIQUE("refresh_token_hash")
);
--> statement-breakpoint
CREATE TABLE "mobile_tokens" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"token_hash" varchar(64) NOT NULL,
"is_enabled" boolean DEFAULT true NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"rotated_at" timestamp with time zone,
CONSTRAINT "mobile_tokens_token_hash_unique" UNIQUE("token_hash")
);
--> statement-breakpoint
ALTER TABLE "settings" ADD COLUMN "external_url" text;--> statement-breakpoint
ALTER TABLE "settings" ADD COLUMN "base_path" varchar(100) DEFAULT '' NOT NULL;--> statement-breakpoint
ALTER TABLE "settings" ADD COLUMN "trust_proxy" boolean DEFAULT false NOT NULL;--> statement-breakpoint
CREATE INDEX "mobile_sessions_device_id_idx" ON "mobile_sessions" USING btree ("device_id");--> statement-breakpoint
CREATE INDEX "mobile_sessions_refresh_token_idx" ON "mobile_sessions" USING btree ("refresh_token_hash");

View File

@@ -0,0 +1,28 @@
CREATE TABLE "notification_preferences" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"mobile_session_id" uuid NOT NULL,
"push_enabled" boolean DEFAULT true NOT NULL,
"on_violation_detected" boolean DEFAULT true NOT NULL,
"on_stream_started" boolean DEFAULT false NOT NULL,
"on_stream_stopped" boolean DEFAULT false NOT NULL,
"on_concurrent_streams" boolean DEFAULT true NOT NULL,
"on_new_device" boolean DEFAULT true NOT NULL,
"on_trust_score_changed" boolean DEFAULT false NOT NULL,
"on_server_down" boolean DEFAULT true NOT NULL,
"on_server_up" boolean DEFAULT true NOT NULL,
"violation_min_severity" integer DEFAULT 1 NOT NULL,
"violation_rule_types" text[] DEFAULT '{}',
"max_per_minute" integer DEFAULT 10 NOT NULL,
"max_per_hour" integer DEFAULT 60 NOT NULL,
"quiet_hours_enabled" boolean DEFAULT false NOT NULL,
"quiet_hours_start" varchar(5),
"quiet_hours_end" varchar(5),
"quiet_hours_timezone" varchar(50) DEFAULT 'UTC',
"quiet_hours_override_critical" boolean DEFAULT true NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL,
CONSTRAINT "notification_preferences_mobile_session_id_unique" UNIQUE("mobile_session_id")
);
--> statement-breakpoint
ALTER TABLE "notification_preferences" ADD CONSTRAINT "notification_preferences_mobile_session_id_mobile_sessions_id_fk" FOREIGN KEY ("mobile_session_id") REFERENCES "public"."mobile_sessions"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
CREATE INDEX "notification_prefs_mobile_session_idx" ON "notification_preferences" USING btree ("mobile_session_id");

View File

@@ -0,0 +1,25 @@
CREATE TABLE "notification_channel_routing" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"event_type" varchar(50) NOT NULL,
"discord_enabled" boolean DEFAULT true NOT NULL,
"webhook_enabled" boolean DEFAULT true NOT NULL,
"push_enabled" boolean DEFAULT true NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL,
CONSTRAINT "notification_channel_routing_event_type_unique" UNIQUE("event_type")
);
--> statement-breakpoint
CREATE INDEX "notification_channel_routing_event_type_idx" ON "notification_channel_routing" USING btree ("event_type");
--> statement-breakpoint
-- Seed default routing configuration for all event types
INSERT INTO "notification_channel_routing" ("event_type", "discord_enabled", "webhook_enabled", "push_enabled")
VALUES
('violation_detected', true, true, true),
('stream_started', false, false, false),
('stream_stopped', false, false, false),
('concurrent_streams', true, true, true),
('new_device', true, true, true),
('trust_score_changed', false, false, false),
('server_down', true, true, true),
('server_up', true, true, true)
ON CONFLICT ("event_type") DO NOTHING;

View File

@@ -0,0 +1 @@
ALTER TABLE "mobile_sessions" ADD COLUMN "device_secret" varchar(64);

View File

@@ -0,0 +1,3 @@
CREATE INDEX "mobile_sessions_expo_push_token_idx" ON "mobile_sessions" USING btree ("expo_push_token");--> statement-breakpoint
ALTER TABLE "notification_preferences" ADD CONSTRAINT "quiet_hours_start_format" CHECK ("notification_preferences"."quiet_hours_start" IS NULL OR "notification_preferences"."quiet_hours_start" ~ '^([01][0-9]|2[0-3]):[0-5][0-9]$');--> statement-breakpoint
ALTER TABLE "notification_preferences" ADD CONSTRAINT "quiet_hours_end_format" CHECK ("notification_preferences"."quiet_hours_end" IS NULL OR "notification_preferences"."quiet_hours_end" ~ '^([01][0-9]|2[0-3]):[0-5][0-9]$');

View File

@@ -0,0 +1 @@
ALTER TABLE "settings" ADD COLUMN "mobile_enabled" boolean DEFAULT false NOT NULL;

View File

@@ -0,0 +1,41 @@
-- Custom SQL migration file, put your code below! --
-- Update mobile_tokens schema for one-time pairing tokens
-- Remove old columns (is_enabled, rotated_at) and add new columns (expires_at, created_by, used_at)
-- Step 1: Clear existing tokens (breaking change - old schema incompatible)
DELETE FROM "mobile_tokens";
-- Step 2: Drop old columns
ALTER TABLE "mobile_tokens" DROP COLUMN IF EXISTS "is_enabled";
ALTER TABLE "mobile_tokens" DROP COLUMN IF EXISTS "rotated_at";
-- Step 3: Add new required column with temporary default (IF NOT EXISTS for idempotency)
DO $$ BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'mobile_tokens' AND column_name = 'expires_at') THEN
ALTER TABLE "mobile_tokens" ADD COLUMN "expires_at" timestamp with time zone NOT NULL DEFAULT NOW() + INTERVAL '15 minutes';
END IF;
END $$;
-- Step 4: Add nullable columns (IF NOT EXISTS for idempotency)
DO $$ BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'mobile_tokens' AND column_name = 'created_by') THEN
ALTER TABLE "mobile_tokens" ADD COLUMN "created_by" uuid;
END IF;
END $$;
DO $$ BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'mobile_tokens' AND column_name = 'used_at') THEN
ALTER TABLE "mobile_tokens" ADD COLUMN "used_at" timestamp with time zone;
END IF;
END $$;
-- Step 5: Add foreign key constraint (IF NOT EXISTS for idempotency)
DO $$ BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.table_constraints WHERE constraint_name = 'mobile_tokens_created_by_users_id_fk') THEN
ALTER TABLE "mobile_tokens" ADD CONSTRAINT "mobile_tokens_created_by_users_id_fk" FOREIGN KEY ("created_by") REFERENCES "users"("id") ON DELETE CASCADE ON UPDATE NO ACTION;
END IF;
END $$;
-- Step 6: Remove temporary default from expires_at
ALTER TABLE "mobile_tokens" ALTER COLUMN "expires_at" DROP DEFAULT;

View File

@@ -0,0 +1,20 @@
-- TimescaleDB hypertables with columnstore don't allow non-constant defaults like now()
-- So we add columns as nullable first, backfill, then set NOT NULL
-- Add last_seen_at as nullable first
ALTER TABLE "sessions" ADD COLUMN "last_seen_at" timestamp with time zone;--> statement-breakpoint
-- Backfill existing rows: use started_at as the initial last_seen_at value
UPDATE "sessions" SET "last_seen_at" = "started_at" WHERE "last_seen_at" IS NULL;--> statement-breakpoint
-- Now set NOT NULL constraint (no default needed - app always provides value)
ALTER TABLE "sessions" ALTER COLUMN "last_seen_at" SET NOT NULL;--> statement-breakpoint
-- Add force_stopped column
ALTER TABLE "sessions" ADD COLUMN "force_stopped" boolean DEFAULT false NOT NULL;--> statement-breakpoint
-- Add short_session column
ALTER TABLE "sessions" ADD COLUMN "short_session" boolean DEFAULT false NOT NULL;--> statement-breakpoint
-- Create index for stale session detection
CREATE INDEX "sessions_stale_detection_idx" ON "sessions" USING btree ("last_seen_at","stopped_at");

View File

@@ -0,0 +1,18 @@
-- Multi-server support: Add user_id to mobile_sessions
-- BREAKING CHANGE: Clears existing mobile sessions - users must re-pair devices
-- Clear existing data (notification_preferences has FK to mobile_sessions)
DELETE FROM "notification_preferences";--> statement-breakpoint
DELETE FROM "mobile_sessions";--> statement-breakpoint
-- Unrelated schema drift fix from drizzle-kit
ALTER TABLE "sessions" ALTER COLUMN "last_seen_at" DROP DEFAULT;--> statement-breakpoint
-- Add user_id column (required for multi-user mobile support)
ALTER TABLE "mobile_sessions" ADD COLUMN "user_id" uuid NOT NULL;--> statement-breakpoint
-- Add foreign key constraint
ALTER TABLE "mobile_sessions" ADD CONSTRAINT "mobile_sessions_user_id_users_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
-- Add index for efficient user lookups
CREATE INDEX "mobile_sessions_user_idx" ON "mobile_sessions" USING btree ("user_id");

View File

@@ -0,0 +1,27 @@
-- Note: session_id has no FK constraint because sessions is a TimescaleDB hypertable
-- (hypertables don't support foreign key references to their primary key)
CREATE TABLE "termination_logs" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"session_id" uuid NOT NULL,
"server_id" uuid NOT NULL,
"server_user_id" uuid NOT NULL,
"trigger" varchar(20) NOT NULL,
"triggered_by_user_id" uuid,
"rule_id" uuid,
"violation_id" uuid,
"reason" text,
"success" boolean NOT NULL,
"error_message" text,
"created_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
ALTER TABLE "termination_logs" ADD CONSTRAINT "termination_logs_server_id_servers_id_fk" FOREIGN KEY ("server_id") REFERENCES "public"."servers"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "termination_logs" ADD CONSTRAINT "termination_logs_server_user_id_server_users_id_fk" FOREIGN KEY ("server_user_id") REFERENCES "public"."server_users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "termination_logs" ADD CONSTRAINT "termination_logs_triggered_by_user_id_users_id_fk" FOREIGN KEY ("triggered_by_user_id") REFERENCES "public"."users"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "termination_logs" ADD CONSTRAINT "termination_logs_rule_id_rules_id_fk" FOREIGN KEY ("rule_id") REFERENCES "public"."rules"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "termination_logs" ADD CONSTRAINT "termination_logs_violation_id_violations_id_fk" FOREIGN KEY ("violation_id") REFERENCES "public"."violations"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
CREATE INDEX "termination_logs_session_idx" ON "termination_logs" USING btree ("session_id");--> statement-breakpoint
CREATE INDEX "termination_logs_server_user_idx" ON "termination_logs" USING btree ("server_user_id");--> statement-breakpoint
CREATE INDEX "termination_logs_triggered_by_idx" ON "termination_logs" USING btree ("triggered_by_user_id");--> statement-breakpoint
CREATE INDEX "termination_logs_rule_idx" ON "termination_logs" USING btree ("rule_id");--> statement-breakpoint
CREATE INDEX "termination_logs_created_at_idx" ON "termination_logs" USING btree ("created_at");

View File

@@ -0,0 +1 @@
ALTER TABLE "sessions" ADD COLUMN "plex_session_id" varchar(255);

View File

@@ -0,0 +1,3 @@
ALTER TABLE "termination_logs" DROP CONSTRAINT IF EXISTS "termination_logs_session_id_sessions_id_fk";
--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "sessions_dedup_fallback_idx" ON "sessions" USING btree ("server_id","server_user_id","rating_key","started_at");

View File

@@ -0,0 +1,2 @@
ALTER TABLE "settings" ADD COLUMN "webhook_format" text;--> statement-breakpoint
ALTER TABLE "settings" ADD COLUMN "ntfy_topic" text;

View File

@@ -0,0 +1 @@
ALTER TABLE "servers" ADD COLUMN "machine_identifier" varchar(100);

View File

@@ -0,0 +1,4 @@
ALTER TABLE "settings" DROP COLUMN "notify_on_violation";--> statement-breakpoint
ALTER TABLE "settings" DROP COLUMN "notify_on_session_start";--> statement-breakpoint
ALTER TABLE "settings" DROP COLUMN "notify_on_session_stop";--> statement-breakpoint
ALTER TABLE "settings" DROP COLUMN "notify_on_server_down";

View File

@@ -0,0 +1 @@
ALTER TABLE "settings" ADD COLUMN "primary_auth_method" varchar(20) DEFAULT 'local' NOT NULL;

View File

@@ -0,0 +1,2 @@
ALTER TABLE "notification_channel_routing" ADD COLUMN "web_toast_enabled" boolean DEFAULT true NOT NULL;--> statement-breakpoint
ALTER TABLE "settings" ADD COLUMN "unit_system" varchar(20) DEFAULT 'metric' NOT NULL;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,139 @@
{
"version": "7",
"dialect": "postgresql",
"entries": [
{
"idx": 0,
"version": "7",
"when": 1764697096357,
"tag": "0000_lying_dorian_gray",
"breakpoints": true
},
{
"idx": 1,
"version": "7",
"when": 1764705329215,
"tag": "0001_graceful_starjammers",
"breakpoints": true
},
{
"idx": 2,
"version": "7",
"when": 1764788872702,
"tag": "0002_rainy_bishop",
"breakpoints": true
},
{
"idx": 3,
"version": "7",
"when": 1764799561925,
"tag": "0003_black_maginty",
"breakpoints": true
},
{
"idx": 4,
"version": "7",
"when": 1764800111127,
"tag": "0004_bent_unus",
"breakpoints": true
},
{
"idx": 5,
"version": "7",
"when": 1764801611462,
"tag": "0005_elite_wendell_vaughn",
"breakpoints": true
},
{
"idx": 6,
"version": "7",
"when": 1764806894704,
"tag": "0006_worthless_blue_shield",
"breakpoints": true
},
{
"idx": 7,
"version": "7",
"when": 1764865910195,
"tag": "0007_tense_pestilence",
"breakpoints": true
},
{
"idx": 8,
"version": "7",
"when": 1764871905960,
"tag": "0008_update_mobile_tokens_schema",
"breakpoints": true
},
{
"idx": 9,
"version": "7",
"when": 1764996689797,
"tag": "0009_quiet_vertigo",
"breakpoints": true
},
{
"idx": 10,
"version": "7",
"when": 1765214112454,
"tag": "0010_fair_zuras",
"breakpoints": true
},
{
"idx": 11,
"version": "7",
"when": 1765302271434,
"tag": "0011_breezy_ultron",
"breakpoints": true
},
{
"idx": 12,
"version": "7",
"when": 1765303740465,
"tag": "0012_strong_hannibal_king",
"breakpoints": true
},
{
"idx": 13,
"version": "7",
"when": 1765468947659,
"tag": "0013_same_rage",
"breakpoints": true
},
{
"idx": 14,
"version": "7",
"when": 1765479413354,
"tag": "0014_past_molly_hayes",
"breakpoints": true
},
{
"idx": 15,
"version": "7",
"when": 1765482999236,
"tag": "0015_gifted_the_liberteens",
"breakpoints": true
},
{
"idx": 16,
"version": "7",
"when": 1765571040426,
"tag": "0016_yummy_enchantress",
"breakpoints": true
},
{
"idx": 17,
"version": "7",
"when": 1765667812248,
"tag": "0017_broken_husk",
"breakpoints": true
},
{
"idx": 18,
"version": "7",
"when": 1765858132064,
"tag": "0018_robust_shotgun",
"breakpoints": true
}
]
}

View File

@@ -0,0 +1,325 @@
/**
* Prepared statements for hot-path queries
*
* Prepared statements optimize performance by allowing PostgreSQL to reuse
* query plans across executions. These are particularly valuable for:
* - Queries called on every page load (dashboard)
* - Queries called frequently during polling
* - Queries with predictable parameter patterns
*
* @see https://orm.drizzle.team/docs/perf-queries
*/
import { eq, gte, and, isNull, desc, sql } from 'drizzle-orm';
import { db } from './client.js';
import { sessions, violations, users, serverUsers, servers, rules } from './schema.js';
// ============================================================================
// Dashboard Stats Queries
// ============================================================================
/**
* Count unique plays (grouped by reference_id) since a given date
* Used for: Dashboard "Today's Plays" metric
* Called: Every dashboard page load
*/
export const playsCountSince = db
.select({
count: sql<number>`count(DISTINCT COALESCE(reference_id, id))::int`,
})
.from(sessions)
.where(gte(sessions.startedAt, sql.placeholder('since')))
.prepare('plays_count_since');
/**
* Sum total watch time since a given date
* Used for: Dashboard "Watch Time" metric
* Called: Every dashboard page load
*/
export const watchTimeSince = db
.select({
totalMs: sql<number>`COALESCE(SUM(duration_ms), 0)::bigint`,
})
.from(sessions)
.where(gte(sessions.startedAt, sql.placeholder('since')))
.prepare('watch_time_since');
/**
* Count violations since a given date
* Used for: Dashboard "Alerts" metric
* Called: Every dashboard page load
*/
export const violationsCountSince = db
.select({
count: sql<number>`count(*)::int`,
})
.from(violations)
.where(gte(violations.createdAt, sql.placeholder('since')))
.prepare('violations_count_since');
/**
* Count unique active users since a given date
* Used for: Dashboard "Active Users Today" metric
* Called: Every dashboard page load
*/
export const uniqueUsersSince = db
.select({
count: sql<number>`count(DISTINCT server_user_id)::int`,
})
.from(sessions)
.where(gte(sessions.startedAt, sql.placeholder('since')))
.prepare('unique_users_since');
/**
* Count unacknowledged violations
* Used for: Alert badge in navigation
* Called: On app load and after acknowledgment
*/
export const unacknowledgedViolationsCount = db
.select({
count: sql<number>`count(*)::int`,
})
.from(violations)
.where(isNull(violations.acknowledgedAt))
.prepare('unacknowledged_violations_count');
// ============================================================================
// Polling Queries
// ============================================================================
/**
* Find server user by server ID and external ID
* Used for: Server user lookup during session polling
* Called: Every poll cycle for each active session (potentially 10+ times per 15 seconds)
*/
export const serverUserByExternalId = db
.select()
.from(serverUsers)
.where(
and(
eq(serverUsers.serverId, sql.placeholder('serverId')),
eq(serverUsers.externalId, sql.placeholder('externalId'))
)
)
.limit(1)
.prepare('server_user_by_external_id');
/**
* Find session by server ID and session key
* Used for: Session lookup during polling to check for existing sessions
* Called: Every poll cycle for each active session
*/
export const sessionByServerAndKey = db
.select()
.from(sessions)
.where(
and(
eq(sessions.serverId, sql.placeholder('serverId')),
eq(sessions.sessionKey, sql.placeholder('sessionKey'))
)
)
.limit(1)
.prepare('session_by_server_and_key');
// ============================================================================
// User Queries
// ============================================================================
/**
* Get server user by ID with basic info
* Used for: Server user details in violations, sessions
* Called: Frequently for UI enrichment
*/
export const serverUserById = db
.select({
id: serverUsers.id,
userId: serverUsers.userId,
username: serverUsers.username,
thumbUrl: serverUsers.thumbUrl,
trustScore: serverUsers.trustScore,
})
.from(serverUsers)
.where(eq(serverUsers.id, sql.placeholder('id')))
.limit(1)
.prepare('server_user_by_id');
/**
* Get user identity by ID
* Used for: User identity info (the real person)
* Called: When viewing user profile
*/
export const userById = db
.select({
id: users.id,
name: users.name,
thumbnail: users.thumbnail,
email: users.email,
role: users.role,
aggregateTrustScore: users.aggregateTrustScore,
})
.from(users)
.where(eq(users.id, sql.placeholder('id')))
.limit(1)
.prepare('user_by_id');
// ============================================================================
// Session Queries
// ============================================================================
/**
* Get session by ID
* Used for: Session detail page, violation context
* Called: When viewing session details
*/
export const sessionById = db
.select()
.from(sessions)
.where(eq(sessions.id, sql.placeholder('id')))
.limit(1)
.prepare('session_by_id');
// ============================================================================
// Stats Queries (hot-path for dashboard and analytics pages)
// ============================================================================
/**
* Plays by platform since a given date
* Used for: Stats platform breakdown chart
* Called: Every stats page load
*/
export const playsByPlatformSince = db
.select({
platform: sessions.platform,
count: sql<number>`count(DISTINCT COALESCE(reference_id, id))::int`,
})
.from(sessions)
.where(gte(sessions.startedAt, sql.placeholder('since')))
.groupBy(sessions.platform)
.orderBy(sql`count(DISTINCT COALESCE(reference_id, id)) DESC`)
.prepare('plays_by_platform_since');
/**
* Quality breakdown (direct vs transcode) since a given date
* Used for: Stats quality chart
* Called: Every stats page load
*/
export const qualityStatsSince = db
.select({
isTranscode: sessions.isTranscode,
count: sql<number>`count(DISTINCT COALESCE(reference_id, id))::int`,
})
.from(sessions)
.where(gte(sessions.startedAt, sql.placeholder('since')))
.groupBy(sessions.isTranscode)
.prepare('quality_stats_since');
/**
* Watch time by media type since a given date
* Used for: Watch time breakdown by content type
* Called: Stats page load
*/
export const watchTimeByTypeSince = db
.select({
mediaType: sessions.mediaType,
totalMs: sql<number>`COALESCE(SUM(duration_ms), 0)::bigint`,
})
.from(sessions)
.where(gte(sessions.startedAt, sql.placeholder('since')))
.groupBy(sessions.mediaType)
.prepare('watch_time_by_type_since');
// ============================================================================
// Rule Queries (hot-path for poller)
// ============================================================================
/**
* Get all active rules
* Used for: Rule evaluation during session polling
* Called: Every poll cycle (~15 seconds per server)
*/
export const getActiveRules = db
.select()
.from(rules)
.where(eq(rules.isActive, true))
.prepare('get_active_rules');
/**
* Get recent sessions for a user (for rule evaluation)
* Used for: Evaluating device velocity, concurrent streams rules
* Called: During rule evaluation for active sessions
*/
export const getUserRecentSessions = db
.select({
id: sessions.id,
startedAt: sessions.startedAt,
stoppedAt: sessions.stoppedAt,
ipAddress: sessions.ipAddress,
deviceId: sessions.deviceId,
geoLat: sessions.geoLat,
geoLon: sessions.geoLon,
geoCity: sessions.geoCity,
geoCountry: sessions.geoCountry,
state: sessions.state,
})
.from(sessions)
.where(
and(
eq(sessions.serverUserId, sql.placeholder('serverUserId')),
gte(sessions.startedAt, sql.placeholder('since'))
)
)
.orderBy(desc(sessions.startedAt))
.limit(100)
.prepare('get_user_recent_sessions');
// ============================================================================
// Violation Queries
// ============================================================================
/**
* Get unacknowledged violations with pagination
* Used for: Violation list in dashboard
* Called: Frequently for alert displays
*/
export const getUnackedViolations = db
.select()
.from(violations)
.where(isNull(violations.acknowledgedAt))
.orderBy(desc(violations.createdAt))
.limit(sql.placeholder('limit'))
.prepare('get_unacked_violations');
// ============================================================================
// Server Queries
// ============================================================================
/**
* Get server by ID
* Used for: Server details, validation
* Called: Frequently during API requests
*/
export const serverById = db
.select()
.from(servers)
.where(eq(servers.id, sql.placeholder('id')))
.limit(1)
.prepare('server_by_id');
// ============================================================================
// Type exports for execute results
// ============================================================================
export type PlaysCountResult = Awaited<ReturnType<typeof playsCountSince.execute>>;
export type WatchTimeResult = Awaited<ReturnType<typeof watchTimeSince.execute>>;
export type ViolationsCountResult = Awaited<ReturnType<typeof violationsCountSince.execute>>;
export type ServerUserByExternalIdResult = Awaited<ReturnType<typeof serverUserByExternalId.execute>>;
export type ServerUserByIdResult = Awaited<ReturnType<typeof serverUserById.execute>>;
export type UserByIdResult = Awaited<ReturnType<typeof userById.execute>>;
export type SessionByIdResult = Awaited<ReturnType<typeof sessionById.execute>>;
export type PlaysByPlatformResult = Awaited<ReturnType<typeof playsByPlatformSince.execute>>;
export type QualityStatsResult = Awaited<ReturnType<typeof qualityStatsSince.execute>>;
export type WatchTimeByTypeResult = Awaited<ReturnType<typeof watchTimeByTypeSince.execute>>;
export type ActiveRulesResult = Awaited<ReturnType<typeof getActiveRules.execute>>;
export type UserRecentSessionsResult = Awaited<ReturnType<typeof getUserRecentSessions.execute>>;
export type UnackedViolationsResult = Awaited<ReturnType<typeof getUnackedViolations.execute>>;
export type ServerByIdResult = Awaited<ReturnType<typeof serverById.execute>>;

View File

@@ -0,0 +1,624 @@
/**
* Drizzle ORM schema definitions for Tracearr
*
* Multi-Server User Architecture:
* - `users` = Identity (the real human)
* - `server_users` = Account on a specific server (Plex/Jellyfin/Emby)
* - One user can have multiple server_users (accounts across servers)
* - Sessions and violations link to server_users (server-specific)
*/
import {
pgTable,
uuid,
varchar,
text,
timestamp,
boolean,
integer,
real,
jsonb,
index,
uniqueIndex,
check,
} from 'drizzle-orm/pg-core';
import { relations, sql } from 'drizzle-orm';
// Server types enum
export const serverTypeEnum = ['plex', 'jellyfin', 'emby'] as const;
// Session state enum
export const sessionStateEnum = ['playing', 'paused', 'stopped'] as const;
// Media type enum
export const mediaTypeEnum = ['movie', 'episode', 'track'] as const;
// Rule type enum
export const ruleTypeEnum = [
'impossible_travel',
'simultaneous_locations',
'device_velocity',
'concurrent_streams',
'geo_restriction',
] as const;
// Violation severity enum
export const violationSeverityEnum = ['low', 'warning', 'high'] as const;
// Media servers (Plex/Jellyfin/Emby instances)
export const servers = pgTable('servers', {
id: uuid('id').primaryKey().defaultRandom(),
name: varchar('name', { length: 100 }).notNull(),
type: varchar('type', { length: 20 }).notNull().$type<(typeof serverTypeEnum)[number]>(),
url: text('url').notNull(),
token: text('token').notNull(), // Encrypted
machineIdentifier: varchar('machine_identifier', { length: 100 }), // Plex clientIdentifier for dedup
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
});
/**
* Users - Identity table representing real humans
*
* This is the "anchor" identity that can own multiple server accounts.
* Stores authentication credentials and aggregated metrics.
*/
export const users = pgTable(
'users',
{
id: uuid('id').primaryKey().defaultRandom(),
// Identity
username: varchar('username', { length: 100 }).notNull(), // Login identifier (unique)
name: varchar('name', { length: 255 }), // Display name (optional, defaults to null)
thumbnail: text('thumbnail'), // Custom avatar (nullable)
email: varchar('email', { length: 255 }), // For identity matching (nullable)
// Authentication (nullable - not all users authenticate directly)
passwordHash: text('password_hash'), // bcrypt hash for local login
plexAccountId: varchar('plex_account_id', { length: 255 }), // Plex.tv global account ID for OAuth
// Access control - combined permission level and account status
// Can log in: 'owner', 'admin', 'viewer'
// Cannot log in: 'member' (default), 'disabled', 'pending'
role: varchar('role', { length: 20 })
.notNull()
.$type<'owner' | 'admin' | 'viewer' | 'member' | 'disabled' | 'pending'>()
.default('member'),
// Aggregated metrics (cached, updated by triggers)
aggregateTrustScore: integer('aggregate_trust_score').notNull().default(100),
totalViolations: integer('total_violations').notNull().default(0),
// Timestamps
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [
// Username is display name from media server (not unique across servers)
index('users_username_idx').on(table.username),
uniqueIndex('users_email_unique').on(table.email),
index('users_plex_account_id_idx').on(table.plexAccountId),
index('users_role_idx').on(table.role),
]
);
/**
* Server Users - Account on a specific media server
*
* Represents a user's account on a Plex/Jellyfin/Emby server.
* One user (identity) can have multiple server_users (accounts across servers).
* Sessions and violations link here for per-server tracking.
*/
export const serverUsers = pgTable(
'server_users',
{
id: uuid('id').primaryKey().defaultRandom(),
// Relationships - always linked to both user and server
userId: uuid('user_id')
.notNull()
.references(() => users.id, { onDelete: 'cascade' }),
serverId: uuid('server_id')
.notNull()
.references(() => servers.id, { onDelete: 'cascade' }),
// Server-specific identity
externalId: varchar('external_id', { length: 255 }).notNull(), // Plex/Jellyfin user ID
username: varchar('username', { length: 255 }).notNull(), // Username on this server
email: varchar('email', { length: 255 }), // Email from server sync (may differ from users.email)
thumbUrl: text('thumb_url'), // Avatar from server
// Server-specific permissions
isServerAdmin: boolean('is_server_admin').notNull().default(false),
// Per-server trust
trustScore: integer('trust_score').notNull().default(100),
sessionCount: integer('session_count').notNull().default(0), // For aggregate weighting
// Timestamps
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [
// One account per user per server
uniqueIndex('server_users_user_server_unique').on(table.userId, table.serverId),
// Atomic upsert during sync
uniqueIndex('server_users_server_external_unique').on(table.serverId, table.externalId),
// Query optimization
index('server_users_user_idx').on(table.userId),
index('server_users_server_idx').on(table.serverId),
index('server_users_username_idx').on(table.username),
]
);
// Session history (will be converted to hypertable)
export const sessions = pgTable(
'sessions',
{
id: uuid('id').primaryKey().defaultRandom(),
serverId: uuid('server_id')
.notNull()
.references(() => servers.id, { onDelete: 'cascade' }),
// Links to server_users for per-server tracking
serverUserId: uuid('server_user_id')
.notNull()
.references(() => serverUsers.id, { onDelete: 'cascade' }),
sessionKey: varchar('session_key', { length: 255 }).notNull(),
// Plex Session.id - required for termination API (different from sessionKey)
// For Jellyfin/Emby, sessionKey is used directly for termination
plexSessionId: varchar('plex_session_id', { length: 255 }),
state: varchar('state', { length: 20 }).notNull().$type<(typeof sessionStateEnum)[number]>(),
mediaType: varchar('media_type', { length: 20 })
.notNull()
.$type<(typeof mediaTypeEnum)[number]>(),
mediaTitle: text('media_title').notNull(),
// Enhanced media metadata for episodes
grandparentTitle: varchar('grandparent_title', { length: 500 }), // Show name (for episodes)
seasonNumber: integer('season_number'), // Season number (for episodes)
episodeNumber: integer('episode_number'), // Episode number (for episodes)
year: integer('year'), // Release year
thumbPath: varchar('thumb_path', { length: 500 }), // Poster path (e.g., /library/metadata/123/thumb)
ratingKey: varchar('rating_key', { length: 255 }), // Plex/Jellyfin media identifier
externalSessionId: varchar('external_session_id', { length: 255 }), // External reference for deduplication
startedAt: timestamp('started_at', { withTimezone: true }).notNull().defaultNow(),
stoppedAt: timestamp('stopped_at', { withTimezone: true }),
lastSeenAt: timestamp('last_seen_at', { withTimezone: true }).notNull(), // Last time session was seen in poll (for stale detection) - no default, app always provides
durationMs: integer('duration_ms'), // Actual watch duration (excludes paused time)
totalDurationMs: integer('total_duration_ms'), // Total media length
progressMs: integer('progress_ms'), // Current playback position
// Pause tracking - accumulates total paused time across pause/resume cycles
lastPausedAt: timestamp('last_paused_at', { withTimezone: true }), // When current pause started
pausedDurationMs: integer('paused_duration_ms').notNull().default(0), // Accumulated pause time
// Session grouping for "resume where left off" tracking
referenceId: uuid('reference_id'), // Links to first session in resume chain
watched: boolean('watched').notNull().default(false), // True if user watched 85%+
forceStopped: boolean('force_stopped').notNull().default(false), // True if session was force-stopped due to inactivity
shortSession: boolean('short_session').notNull().default(false), // True if session duration < MIN_PLAY_TIME_MS (120s)
ipAddress: varchar('ip_address', { length: 45 }).notNull(),
geoCity: varchar('geo_city', { length: 255 }),
geoRegion: varchar('geo_region', { length: 255 }), // State/province/subdivision
geoCountry: varchar('geo_country', { length: 100 }),
geoLat: real('geo_lat'),
geoLon: real('geo_lon'),
playerName: varchar('player_name', { length: 255 }), // Player title/friendly name
deviceId: varchar('device_id', { length: 255 }), // Machine identifier (unique device UUID)
product: varchar('product', { length: 255 }), // Product name (e.g., "Plex for iOS")
device: varchar('device', { length: 255 }), // Device type (e.g., "iPhone", "Android TV")
platform: varchar('platform', { length: 100 }),
quality: varchar('quality', { length: 100 }),
isTranscode: boolean('is_transcode').notNull().default(false),
bitrate: integer('bitrate'),
},
(table) => [
index('sessions_server_user_time_idx').on(table.serverUserId, table.startedAt),
index('sessions_server_time_idx').on(table.serverId, table.startedAt),
index('sessions_state_idx').on(table.state),
index('sessions_external_session_idx').on(table.serverId, table.externalSessionId),
index('sessions_device_idx').on(table.serverUserId, table.deviceId),
index('sessions_reference_idx').on(table.referenceId), // For session grouping queries
index('sessions_server_user_rating_idx').on(table.serverUserId, table.ratingKey), // For resume detection
// Index for Tautulli import deduplication fallback (when externalSessionId not found)
index('sessions_dedup_fallback_idx').on(
table.serverId,
table.serverUserId,
table.ratingKey,
table.startedAt
),
// Indexes for stats queries
index('sessions_geo_idx').on(table.geoLat, table.geoLon), // For /stats/locations basic geo lookup
index('sessions_geo_time_idx').on(table.startedAt, table.geoLat, table.geoLon), // For time-filtered map queries
index('sessions_media_type_idx').on(table.mediaType), // For media type aggregations
index('sessions_transcode_idx').on(table.isTranscode), // For quality stats
index('sessions_platform_idx').on(table.platform), // For platform stats
// Indexes for top-content queries (movies and shows aggregation)
index('sessions_top_movies_idx').on(table.mediaType, table.mediaTitle, table.year), // For top movies GROUP BY
index('sessions_top_shows_idx').on(table.mediaType, table.grandparentTitle), // For top shows GROUP BY series
// Index for stale session detection (active sessions that haven't been seen recently)
index('sessions_stale_detection_idx').on(table.lastSeenAt, table.stoppedAt),
]
);
// Sharing detection rules
export const rules = pgTable(
'rules',
{
id: uuid('id').primaryKey().defaultRandom(),
name: varchar('name', { length: 100 }).notNull(),
type: varchar('type', { length: 50 }).notNull().$type<(typeof ruleTypeEnum)[number]>(),
params: jsonb('params').notNull().$type<Record<string, unknown>>(),
// Nullable: null = global rule, set = specific server user
serverUserId: uuid('server_user_id').references(() => serverUsers.id, { onDelete: 'cascade' }),
isActive: boolean('is_active').notNull().default(true),
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [
index('rules_active_idx').on(table.isActive),
index('rules_server_user_id_idx').on(table.serverUserId),
]
);
// Rule violations
export const violations = pgTable(
'violations',
{
id: uuid('id').primaryKey().defaultRandom(),
ruleId: uuid('rule_id')
.notNull()
.references(() => rules.id, { onDelete: 'cascade' }),
// Links to server_users for per-server tracking
serverUserId: uuid('server_user_id')
.notNull()
.references(() => serverUsers.id, { onDelete: 'cascade' }),
sessionId: uuid('session_id')
.notNull()
.references(() => sessions.id, { onDelete: 'cascade' }),
severity: varchar('severity', { length: 20 })
.notNull()
.$type<(typeof violationSeverityEnum)[number]>(),
data: jsonb('data').notNull().$type<Record<string, unknown>>(),
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
acknowledgedAt: timestamp('acknowledged_at', { withTimezone: true }),
},
(table) => [
index('violations_server_user_id_idx').on(table.serverUserId),
index('violations_rule_id_idx').on(table.ruleId),
index('violations_created_at_idx').on(table.createdAt),
]
);
// Mobile pairing tokens (one-time use, expire after 15 minutes)
export const mobileTokens = pgTable('mobile_tokens', {
id: uuid('id').primaryKey().defaultRandom(),
tokenHash: varchar('token_hash', { length: 64 }).notNull().unique(), // SHA-256 of trr_mob_xxx token
expiresAt: timestamp('expires_at', { withTimezone: true }).notNull(),
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
createdBy: uuid('created_by').references(() => users.id, { onDelete: 'cascade' }),
usedAt: timestamp('used_at', { withTimezone: true }), // Set when token is used, null = unused
});
// Mobile sessions (paired devices)
export const mobileSessions = pgTable(
'mobile_sessions',
{
id: uuid('id').primaryKey().defaultRandom(),
// Link to user identity for multi-user support
userId: uuid('user_id')
.notNull()
.references(() => users.id, { onDelete: 'cascade' }),
refreshTokenHash: varchar('refresh_token_hash', { length: 64 }).notNull().unique(), // SHA-256
deviceName: varchar('device_name', { length: 100 }).notNull(),
deviceId: varchar('device_id', { length: 100 }).notNull(),
platform: varchar('platform', { length: 20 }).notNull().$type<'ios' | 'android'>(),
expoPushToken: varchar('expo_push_token', { length: 255 }), // For push notifications
deviceSecret: varchar('device_secret', { length: 64 }), // For push payload encryption (base64)
lastSeenAt: timestamp('last_seen_at', { withTimezone: true }).notNull().defaultNow(),
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [
index('mobile_sessions_user_idx').on(table.userId),
index('mobile_sessions_device_id_idx').on(table.deviceId),
index('mobile_sessions_refresh_token_idx').on(table.refreshTokenHash),
index('mobile_sessions_expo_push_token_idx').on(table.expoPushToken),
]
);
// Notification preferences per mobile device
export const notificationPreferences = pgTable(
'notification_preferences',
{
id: uuid('id').primaryKey().defaultRandom(),
mobileSessionId: uuid('mobile_session_id')
.notNull()
.unique()
.references(() => mobileSessions.id, { onDelete: 'cascade' }),
// Global toggles
pushEnabled: boolean('push_enabled').notNull().default(true),
// Event type toggles
onViolationDetected: boolean('on_violation_detected').notNull().default(true),
onStreamStarted: boolean('on_stream_started').notNull().default(false),
onStreamStopped: boolean('on_stream_stopped').notNull().default(false),
onConcurrentStreams: boolean('on_concurrent_streams').notNull().default(true),
onNewDevice: boolean('on_new_device').notNull().default(true),
onTrustScoreChanged: boolean('on_trust_score_changed').notNull().default(false),
onServerDown: boolean('on_server_down').notNull().default(true),
onServerUp: boolean('on_server_up').notNull().default(true),
// Severity filtering (violations only)
violationMinSeverity: integer('violation_min_severity').notNull().default(1), // 1=low, 2=warning, 3=high
violationRuleTypes: text('violation_rule_types').array().default([]), // Empty = all types
// Rate limiting
maxPerMinute: integer('max_per_minute').notNull().default(10),
maxPerHour: integer('max_per_hour').notNull().default(60),
// Quiet hours
quietHoursEnabled: boolean('quiet_hours_enabled').notNull().default(false),
quietHoursStart: varchar('quiet_hours_start', { length: 5 }), // HH:MM format
quietHoursEnd: varchar('quiet_hours_end', { length: 5 }), // HH:MM format
quietHoursTimezone: varchar('quiet_hours_timezone', { length: 50 }).default('UTC'),
quietHoursOverrideCritical: boolean('quiet_hours_override_critical').notNull().default(true),
// Timestamps
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [
index('notification_prefs_mobile_session_idx').on(table.mobileSessionId),
// Validate quiet hours format: HH:MM where HH is 00-23 and MM is 00-59
check(
'quiet_hours_start_format',
sql`${table.quietHoursStart} IS NULL OR ${table.quietHoursStart} ~ '^([01][0-9]|2[0-3]):[0-5][0-9]$'`
),
check(
'quiet_hours_end_format',
sql`${table.quietHoursEnd} IS NULL OR ${table.quietHoursEnd} ~ '^([01][0-9]|2[0-3]):[0-5][0-9]$'`
),
]
);
// Notification event type enum
export const notificationEventTypeEnum = [
'violation_detected',
'stream_started',
'stream_stopped',
'concurrent_streams',
'new_device',
'trust_score_changed',
'server_down',
'server_up',
] as const;
// Notification channel routing configuration
// Controls which channels receive which event types (web admin configurable)
export const notificationChannelRouting = pgTable(
'notification_channel_routing',
{
id: uuid('id').primaryKey().defaultRandom(),
eventType: varchar('event_type', { length: 50 })
.notNull()
.unique()
.$type<(typeof notificationEventTypeEnum)[number]>(),
// Channel toggles
discordEnabled: boolean('discord_enabled').notNull().default(true),
webhookEnabled: boolean('webhook_enabled').notNull().default(true),
pushEnabled: boolean('push_enabled').notNull().default(true),
webToastEnabled: boolean('web_toast_enabled').notNull().default(true),
// Timestamps
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [index('notification_channel_routing_event_type_idx').on(table.eventType)]
);
// Termination trigger type enum
export const terminationTriggerEnum = ['manual', 'rule'] as const;
// Stream termination audit log
export const terminationLogs = pgTable(
'termination_logs',
{
id: uuid('id').primaryKey().defaultRandom(),
// What was terminated
// Note: No FK constraint because sessions is a TimescaleDB hypertable
// (hypertables don't support foreign key references to their primary key)
// The relationship is maintained via Drizzle ORM relations
sessionId: uuid('session_id').notNull(),
serverId: uuid('server_id')
.notNull()
.references(() => servers.id, { onDelete: 'cascade' }),
// The user whose stream was terminated
serverUserId: uuid('server_user_id')
.notNull()
.references(() => serverUsers.id, { onDelete: 'cascade' }),
// How it was triggered
trigger: varchar('trigger', { length: 20 })
.notNull()
.$type<(typeof terminationTriggerEnum)[number]>(),
// Who triggered it (for manual) - nullable for rule-triggered
triggeredByUserId: uuid('triggered_by_user_id').references(() => users.id, {
onDelete: 'set null',
}),
// What rule triggered it (for rule-triggered) - nullable for manual
ruleId: uuid('rule_id').references(() => rules.id, { onDelete: 'set null' }),
violationId: uuid('violation_id').references(() => violations.id, { onDelete: 'set null' }),
// Message shown to user (Plex only)
reason: text('reason'),
// Result
success: boolean('success').notNull(),
errorMessage: text('error_message'), // If success=false
// Timestamp
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [
index('termination_logs_session_idx').on(table.sessionId),
index('termination_logs_server_user_idx').on(table.serverUserId),
index('termination_logs_triggered_by_idx').on(table.triggeredByUserId),
index('termination_logs_rule_idx').on(table.ruleId),
index('termination_logs_created_at_idx').on(table.createdAt),
]
);
// Unit system enum for display preferences
export const unitSystemEnum = ['metric', 'imperial'] as const;
// Application settings (single row)
export const settings = pgTable('settings', {
id: integer('id').primaryKey().default(1),
allowGuestAccess: boolean('allow_guest_access').notNull().default(false),
// Display preferences
unitSystem: varchar('unit_system', { length: 20 })
.notNull()
.$type<(typeof unitSystemEnum)[number]>()
.default('metric'),
discordWebhookUrl: text('discord_webhook_url'),
customWebhookUrl: text('custom_webhook_url'),
webhookFormat: text('webhook_format').$type<'json' | 'ntfy' | 'apprise'>(), // Format for custom webhook payloads
ntfyTopic: text('ntfy_topic'), // Topic for ntfy notifications (required when webhookFormat is 'ntfy')
// Poller settings
pollerEnabled: boolean('poller_enabled').notNull().default(true),
pollerIntervalMs: integer('poller_interval_ms').notNull().default(15000),
// Tautulli integration
tautulliUrl: text('tautulli_url'),
tautulliApiKey: text('tautulli_api_key'), // Encrypted
// Network/access settings for self-hosted deployments
externalUrl: text('external_url'), // Public URL for mobile/external access (e.g., https://tracearr.example.com)
basePath: varchar('base_path', { length: 100 }).notNull().default(''), // For subfolder proxies (e.g., /tracearr)
trustProxy: boolean('trust_proxy').notNull().default(false), // Trust X-Forwarded-* headers from reverse proxy
// Mobile access
mobileEnabled: boolean('mobile_enabled').notNull().default(false),
// Authentication settings
primaryAuthMethod: varchar('primary_auth_method', { length: 20 })
.$type<'jellyfin' | 'local'>()
.notNull()
.default('local'), // Default to local auth
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
});
// ============================================================================
// Relations
// ============================================================================
export const serversRelations = relations(servers, ({ many }) => ({
serverUsers: many(serverUsers),
sessions: many(sessions),
}));
export const usersRelations = relations(users, ({ many }) => ({
serverUsers: many(serverUsers),
mobileSessions: many(mobileSessions),
mobileTokens: many(mobileTokens),
}));
export const serverUsersRelations = relations(serverUsers, ({ one, many }) => ({
user: one(users, {
fields: [serverUsers.userId],
references: [users.id],
}),
server: one(servers, {
fields: [serverUsers.serverId],
references: [servers.id],
}),
sessions: many(sessions),
rules: many(rules),
violations: many(violations),
}));
export const sessionsRelations = relations(sessions, ({ one, many }) => ({
server: one(servers, {
fields: [sessions.serverId],
references: [servers.id],
}),
serverUser: one(serverUsers, {
fields: [sessions.serverUserId],
references: [serverUsers.id],
}),
violations: many(violations),
}));
export const rulesRelations = relations(rules, ({ one, many }) => ({
serverUser: one(serverUsers, {
fields: [rules.serverUserId],
references: [serverUsers.id],
}),
violations: many(violations),
}));
export const violationsRelations = relations(violations, ({ one }) => ({
rule: one(rules, {
fields: [violations.ruleId],
references: [rules.id],
}),
serverUser: one(serverUsers, {
fields: [violations.serverUserId],
references: [serverUsers.id],
}),
session: one(sessions, {
fields: [violations.sessionId],
references: [sessions.id],
}),
}));
export const mobileSessionsRelations = relations(mobileSessions, ({ one }) => ({
user: one(users, {
fields: [mobileSessions.userId],
references: [users.id],
}),
notificationPreferences: one(notificationPreferences, {
fields: [mobileSessions.id],
references: [notificationPreferences.mobileSessionId],
}),
}));
export const notificationPreferencesRelations = relations(notificationPreferences, ({ one }) => ({
mobileSession: one(mobileSessions, {
fields: [notificationPreferences.mobileSessionId],
references: [mobileSessions.id],
}),
}));
export const mobileTokensRelations = relations(mobileTokens, ({ one }) => ({
createdByUser: one(users, {
fields: [mobileTokens.createdBy],
references: [users.id],
}),
}));
export const terminationLogsRelations = relations(terminationLogs, ({ one }) => ({
session: one(sessions, {
fields: [terminationLogs.sessionId],
references: [sessions.id],
}),
server: one(servers, {
fields: [terminationLogs.serverId],
references: [servers.id],
}),
serverUser: one(serverUsers, {
fields: [terminationLogs.serverUserId],
references: [serverUsers.id],
}),
triggeredByUser: one(users, {
fields: [terminationLogs.triggeredByUserId],
references: [users.id],
}),
rule: one(rules, {
fields: [terminationLogs.ruleId],
references: [rules.id],
}),
violation: one(violations, {
fields: [terminationLogs.violationId],
references: [violations.id],
}),
}));

View File

@@ -0,0 +1,649 @@
/**
* TimescaleDB initialization and setup
*
* This module ensures TimescaleDB features are properly configured for the sessions table.
* It runs on every server startup and is idempotent - safe to run multiple times.
*/
import { db } from './client.js';
import { sql } from 'drizzle-orm';
export interface TimescaleStatus {
extensionInstalled: boolean;
sessionsIsHypertable: boolean;
compressionEnabled: boolean;
continuousAggregates: string[];
chunkCount: number;
}
/**
* Check if TimescaleDB extension is available
*/
async function isTimescaleInstalled(): Promise<boolean> {
try {
const result = await db.execute(sql`
SELECT EXISTS(
SELECT 1 FROM pg_extension WHERE extname = 'timescaledb'
) as installed
`);
return (result.rows[0] as { installed: boolean })?.installed ?? false;
} catch {
return false;
}
}
/**
* Check if sessions table is already a hypertable
*/
async function isSessionsHypertable(): Promise<boolean> {
try {
const result = await db.execute(sql`
SELECT EXISTS(
SELECT 1 FROM timescaledb_information.hypertables
WHERE hypertable_name = 'sessions'
) as is_hypertable
`);
return (result.rows[0] as { is_hypertable: boolean })?.is_hypertable ?? false;
} catch {
// If timescaledb_information doesn't exist, extension isn't installed
return false;
}
}
/**
* Get list of existing continuous aggregates
*/
async function getContinuousAggregates(): Promise<string[]> {
try {
const result = await db.execute(sql`
SELECT view_name
FROM timescaledb_information.continuous_aggregates
WHERE hypertable_name = 'sessions'
`);
return (result.rows as { view_name: string }[]).map((r) => r.view_name);
} catch {
return [];
}
}
/**
* Check if compression is enabled on sessions
*/
async function isCompressionEnabled(): Promise<boolean> {
try {
const result = await db.execute(sql`
SELECT compression_enabled
FROM timescaledb_information.hypertables
WHERE hypertable_name = 'sessions'
`);
return (result.rows[0] as { compression_enabled: boolean })?.compression_enabled ?? false;
} catch {
return false;
}
}
/**
* Get chunk count for sessions hypertable
*/
async function getChunkCount(): Promise<number> {
try {
const result = await db.execute(sql`
SELECT count(*)::int as count
FROM timescaledb_information.chunks
WHERE hypertable_name = 'sessions'
`);
return (result.rows[0] as { count: number })?.count ?? 0;
} catch {
return 0;
}
}
/**
* Convert sessions table to hypertable
* This is idempotent - if_not_exists ensures it won't fail if already a hypertable
*/
async function convertToHypertable(): Promise<void> {
// First, we need to handle the primary key change
// TimescaleDB requires the partition column (started_at) in the primary key
// Check if we need to modify the primary key
const pkResult = await db.execute(sql`
SELECT constraint_name
FROM information_schema.table_constraints
WHERE table_name = 'sessions'
AND constraint_type = 'PRIMARY KEY'
`);
const pkName = (pkResult.rows[0] as { constraint_name: string })?.constraint_name;
// Check if started_at is already in the primary key
const pkColsResult = await db.execute(sql`
SELECT column_name
FROM information_schema.key_column_usage
WHERE table_name = 'sessions'
AND constraint_name = ${pkName}
`);
const pkColumns = (pkColsResult.rows as { column_name: string }[]).map((r) => r.column_name);
if (!pkColumns.includes('started_at')) {
// Need to modify primary key for hypertable conversion
// Drop FK constraint from violations if it exists
await db.execute(sql`
ALTER TABLE "violations" DROP CONSTRAINT IF EXISTS "violations_session_id_sessions_id_fk"
`);
// Drop existing primary key
if (pkName) {
await db.execute(sql.raw(`ALTER TABLE "sessions" DROP CONSTRAINT IF EXISTS "${pkName}"`));
}
// Add composite primary key
await db.execute(sql`
ALTER TABLE "sessions" ADD PRIMARY KEY ("id", "started_at")
`);
// Add index for violations session lookup (since we can't have FK to hypertable)
await db.execute(sql`
CREATE INDEX IF NOT EXISTS "violations_session_lookup_idx" ON "violations" ("session_id")
`);
}
// Convert to hypertable
await db.execute(sql`
SELECT create_hypertable('sessions', 'started_at',
chunk_time_interval => INTERVAL '7 days',
migrate_data => true,
if_not_exists => true
)
`);
// Create expression indexes for COALESCE(reference_id, id) pattern
// This pattern is used throughout the codebase for play grouping
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_play_id
ON sessions ((COALESCE(reference_id, id)))
`);
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_time_play_id
ON sessions (started_at DESC, (COALESCE(reference_id, id)))
`);
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_user_play_id
ON sessions (server_user_id, (COALESCE(reference_id, id)))
`);
}
/**
* Create partial indexes for common filtered queries
* These reduce scan size by excluding irrelevant rows
*/
async function createPartialIndexes(): Promise<void> {
// Partial index for geo queries (excludes NULL rows - ~20% savings)
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_geo_partial
ON sessions (geo_lat, geo_lon, started_at DESC)
WHERE geo_lat IS NOT NULL AND geo_lon IS NOT NULL
`);
// Partial index for unacknowledged violations by user (hot path for user-specific alerts)
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_violations_unacked_partial
ON violations (server_user_id, created_at DESC)
WHERE acknowledged_at IS NULL
`);
// Partial index for unacknowledged violations list (hot path for main violations list)
// This index is optimized for the common query: ORDER BY created_at DESC WHERE acknowledged_at IS NULL
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_violations_unacked_list
ON violations (created_at DESC)
WHERE acknowledged_at IS NULL
`);
// Partial index for active/playing sessions
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_active_partial
ON sessions (server_id, server_user_id, started_at DESC)
WHERE state = 'playing'
`);
// Partial index for transcoded sessions (quality analysis)
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_transcode_partial
ON sessions (started_at DESC, quality, bitrate)
WHERE is_transcode = true
`);
}
/**
* Create optimized indexes for top content queries
* Time-prefixed indexes enable efficient time-filtered aggregations
*/
async function createContentIndexes(): Promise<void> {
// Time-prefixed index for media title queries
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_media_time
ON sessions (started_at DESC, media_type, media_title)
`);
// Time-prefixed index for show/episode queries (excludes NULLs)
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_show_time
ON sessions (started_at DESC, grandparent_title, season_number, episode_number)
WHERE grandparent_title IS NOT NULL
`);
// Covering index for top content query (includes frequently accessed columns)
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_top_content_covering
ON sessions (started_at DESC, media_title, media_type)
INCLUDE (duration_ms, server_user_id)
`);
// Device tracking index for device velocity rule
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_device_tracking
ON sessions (server_user_id, started_at DESC, device_id, ip_address)
`);
}
/**
* Check if TimescaleDB Toolkit is installed
*/
async function isToolkitInstalled(): Promise<boolean> {
try {
const result = await db.execute(sql`
SELECT EXISTS(
SELECT 1 FROM pg_extension WHERE extname = 'timescaledb_toolkit'
) as installed
`);
return (result.rows[0] as { installed: boolean })?.installed ?? false;
} catch {
return false;
}
}
/**
* Check if TimescaleDB Toolkit is available to be installed on the system
*/
async function isToolkitAvailableOnSystem(): Promise<boolean> {
try {
const result = await db.execute(sql`
SELECT EXISTS(
SELECT 1 FROM pg_available_extensions WHERE name = 'timescaledb_toolkit'
) as available
`);
return (result.rows[0] as { available: boolean })?.available ?? false;
} catch {
return false;
}
}
/**
* Create continuous aggregates for dashboard performance
*
* Uses HyperLogLog from TimescaleDB Toolkit for approximate distinct counts
* (99.5% accuracy) since TimescaleDB doesn't support COUNT(DISTINCT) in
* continuous aggregates. Falls back to COUNT(*) if Toolkit unavailable.
*/
async function createContinuousAggregates(): Promise<void> {
const hasToolkit = await isToolkitInstalled();
// Drop old unused aggregates
// daily_plays_by_platform: platform stats use prepared statement instead
// daily_play_patterns/hourly_play_patterns: never wired up, missing server_id for multi-server filtering
await db.execute(sql`DROP MATERIALIZED VIEW IF EXISTS daily_plays_by_platform CASCADE`);
await db.execute(sql`DROP MATERIALIZED VIEW IF EXISTS daily_play_patterns CASCADE`);
await db.execute(sql`DROP MATERIALIZED VIEW IF EXISTS hourly_play_patterns CASCADE`);
if (hasToolkit) {
// Use HyperLogLog for accurate distinct play counting
// hyperloglog(32768, ...) gives ~0.4% error rate
// Daily plays by user with HyperLogLog
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS daily_plays_by_user
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
SELECT
time_bucket('1 day', started_at) AS day,
server_user_id,
hyperloglog(32768, COALESCE(reference_id, id)) AS plays_hll,
SUM(COALESCE(duration_ms, 0)) AS total_duration_ms
FROM sessions
GROUP BY day, server_user_id
WITH NO DATA
`);
// Daily plays by server with HyperLogLog
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS daily_plays_by_server
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
SELECT
time_bucket('1 day', started_at) AS day,
server_id,
hyperloglog(32768, COALESCE(reference_id, id)) AS plays_hll,
SUM(COALESCE(duration_ms, 0)) AS total_duration_ms
FROM sessions
GROUP BY day, server_id
WITH NO DATA
`);
// Daily stats summary (main dashboard aggregate) with HyperLogLog
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS daily_stats_summary
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
SELECT
time_bucket('1 day', started_at) AS day,
hyperloglog(32768, COALESCE(reference_id, id)) AS plays_hll,
hyperloglog(32768, server_user_id) AS users_hll,
hyperloglog(32768, server_id) AS servers_hll,
SUM(COALESCE(duration_ms, 0)) AS total_duration_ms,
AVG(COALESCE(duration_ms, 0))::bigint AS avg_duration_ms
FROM sessions
GROUP BY day
WITH NO DATA
`);
// Hourly concurrent streams (used by /concurrent endpoint)
// Note: This uses COUNT(*) since concurrent streams isn't about unique plays
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS hourly_concurrent_streams
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
SELECT
time_bucket('1 hour', started_at) AS hour,
server_id,
COUNT(*) AS stream_count
FROM sessions
WHERE state IN ('playing', 'paused')
GROUP BY hour, server_id
WITH NO DATA
`);
} else {
// Fallback: Standard aggregates without HyperLogLog
// Note: These use COUNT(*) which overcounts resumed sessions
console.warn('TimescaleDB Toolkit not available - using COUNT(*) aggregates');
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS daily_plays_by_user
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 day', started_at) AS day,
server_user_id,
COUNT(*) AS play_count,
SUM(COALESCE(duration_ms, 0)) AS total_duration_ms
FROM sessions
GROUP BY day, server_user_id
WITH NO DATA
`);
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS daily_plays_by_server
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 day', started_at) AS day,
server_id,
COUNT(*) AS play_count,
SUM(COALESCE(duration_ms, 0)) AS total_duration_ms
FROM sessions
GROUP BY day, server_id
WITH NO DATA
`);
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS daily_stats_summary
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 day', started_at) AS day,
COUNT(DISTINCT COALESCE(reference_id, id)) AS play_count,
COUNT(DISTINCT server_user_id) AS user_count,
COUNT(DISTINCT server_id) AS server_count,
SUM(COALESCE(duration_ms, 0)) AS total_duration_ms,
AVG(COALESCE(duration_ms, 0))::bigint AS avg_duration_ms
FROM sessions
GROUP BY day
WITH NO DATA
`);
// Hourly concurrent streams (used by /concurrent endpoint)
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS hourly_concurrent_streams
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 hour', started_at) AS hour,
server_id,
COUNT(*) AS stream_count
FROM sessions
WHERE state IN ('playing', 'paused')
GROUP BY hour, server_id
WITH NO DATA
`);
}
}
/**
* Set up refresh policies for continuous aggregates
* Refreshes every 5 minutes with 1 hour lag for real-time dashboard
*/
async function setupRefreshPolicies(): Promise<void> {
await db.execute(sql`
SELECT add_continuous_aggregate_policy('daily_plays_by_user',
start_offset => INTERVAL '3 days',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '5 minutes',
if_not_exists => true
)
`);
await db.execute(sql`
SELECT add_continuous_aggregate_policy('daily_plays_by_server',
start_offset => INTERVAL '3 days',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '5 minutes',
if_not_exists => true
)
`);
await db.execute(sql`
SELECT add_continuous_aggregate_policy('daily_stats_summary',
start_offset => INTERVAL '3 days',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '5 minutes',
if_not_exists => true
)
`);
await db.execute(sql`
SELECT add_continuous_aggregate_policy('hourly_concurrent_streams',
start_offset => INTERVAL '1 day',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '5 minutes',
if_not_exists => true
)
`);
}
/**
* Enable compression on sessions hypertable
*/
async function enableCompression(): Promise<void> {
// Enable compression settings
await db.execute(sql`
ALTER TABLE sessions SET (
timescaledb.compress,
timescaledb.compress_segmentby = 'server_user_id, server_id'
)
`);
// Add compression policy (compress chunks older than 7 days)
await db.execute(sql`
SELECT add_compression_policy('sessions', INTERVAL '7 days', if_not_exists => true)
`);
}
/**
* Manually refresh all continuous aggregates
* Call this after bulk data imports (e.g., Tautulli import) to make the data immediately available
*/
export async function refreshAggregates(): Promise<void> {
const hasExtension = await isTimescaleInstalled();
if (!hasExtension) return;
const aggregates = await getContinuousAggregates();
for (const aggregate of aggregates) {
try {
// Refresh the entire aggregate (no time bounds = full refresh)
await db.execute(
sql.raw(`CALL refresh_continuous_aggregate('${aggregate}', NULL, NULL)`)
);
} catch (err) {
// Log but don't fail - aggregate might not have data yet
console.warn(`Failed to refresh aggregate ${aggregate}:`, err);
}
}
}
/**
* Get current TimescaleDB status
*/
export async function getTimescaleStatus(): Promise<TimescaleStatus> {
const extensionInstalled = await isTimescaleInstalled();
if (!extensionInstalled) {
return {
extensionInstalled: false,
sessionsIsHypertable: false,
compressionEnabled: false,
continuousAggregates: [],
chunkCount: 0,
};
}
return {
extensionInstalled: true,
sessionsIsHypertable: await isSessionsHypertable(),
compressionEnabled: await isCompressionEnabled(),
continuousAggregates: await getContinuousAggregates(),
chunkCount: await getChunkCount(),
};
}
/**
* Initialize TimescaleDB for the sessions table
*
* This function is idempotent and safe to run on:
* - Fresh installs (sets everything up)
* - Existing installs with TimescaleDB already configured (no-op)
* - Partially configured installs (completes setup)
* - Installs without TimescaleDB extension (graceful skip)
*/
export async function initTimescaleDB(): Promise<{
success: boolean;
status: TimescaleStatus;
actions: string[];
}> {
const actions: string[] = [];
// Check if TimescaleDB extension is available
const hasExtension = await isTimescaleInstalled();
if (!hasExtension) {
return {
success: true, // Not a failure - just no TimescaleDB
status: {
extensionInstalled: false,
sessionsIsHypertable: false,
compressionEnabled: false,
continuousAggregates: [],
chunkCount: 0,
},
actions: ['TimescaleDB extension not installed - skipping setup'],
};
}
actions.push('TimescaleDB extension found');
// Enable TimescaleDB Toolkit for HyperLogLog (approximate distinct counts)
// Check if available first to avoid noisy PostgreSQL errors in logs
const toolkitAvailable = await isToolkitAvailableOnSystem();
if (toolkitAvailable) {
const toolkitInstalled = await isToolkitInstalled();
if (!toolkitInstalled) {
await db.execute(sql`CREATE EXTENSION IF NOT EXISTS timescaledb_toolkit`);
actions.push('TimescaleDB Toolkit extension enabled');
} else {
actions.push('TimescaleDB Toolkit extension already enabled');
}
} else {
actions.push('TimescaleDB Toolkit not available (optional - using standard aggregates)');
}
// Check if sessions is already a hypertable
const isHypertable = await isSessionsHypertable();
if (!isHypertable) {
await convertToHypertable();
actions.push('Converted sessions table to hypertable');
} else {
actions.push('Sessions already a hypertable');
}
// Check and create continuous aggregates
const existingAggregates = await getContinuousAggregates();
const expectedAggregates = [
'daily_plays_by_user',
'daily_plays_by_server',
'daily_stats_summary',
'hourly_concurrent_streams',
];
const missingAggregates = expectedAggregates.filter(
(agg) => !existingAggregates.includes(agg)
);
if (missingAggregates.length > 0) {
await createContinuousAggregates();
await setupRefreshPolicies();
actions.push(`Created continuous aggregates: ${missingAggregates.join(', ')}`);
} else {
actions.push('All continuous aggregates exist');
}
// Check and enable compression
const hasCompression = await isCompressionEnabled();
if (!hasCompression) {
await enableCompression();
actions.push('Enabled compression on sessions');
} else {
actions.push('Compression already enabled');
}
// Create partial indexes for optimized filtered queries
try {
await createPartialIndexes();
actions.push('Created partial indexes (geo, violations, active, transcode)');
} catch (err) {
console.warn('Failed to create some partial indexes:', err);
actions.push('Partial indexes: some may already exist');
}
// Create content and device tracking indexes
try {
await createContentIndexes();
actions.push('Created content and device tracking indexes');
} catch (err) {
console.warn('Failed to create some content indexes:', err);
actions.push('Content indexes: some may already exist');
}
// Get final status
const status = await getTimescaleStatus();
return {
success: true,
status,
actions,
};
}

458
apps/server/src/index.ts Normal file
View File

@@ -0,0 +1,458 @@
import { fileURLToPath } from 'node:url';
import { dirname, resolve } from 'node:path';
import { config } from 'dotenv';
import Fastify from 'fastify';
import cors from '@fastify/cors';
import helmet from '@fastify/helmet';
import sensible from '@fastify/sensible';
import cookie from '@fastify/cookie';
import rateLimit from '@fastify/rate-limit';
import fastifyStatic from '@fastify/static';
import { existsSync } from 'node:fs';
import { Redis } from 'ioredis';
import { API_BASE_PATH, REDIS_KEYS, WS_EVENTS } from '@tracearr/shared';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
// Project root directory (apps/server/src -> project root)
const PROJECT_ROOT = resolve(__dirname, '../../..');
// Load .env from project root
config({ path: resolve(PROJECT_ROOT, '.env') });
// GeoIP database path (in project root/data)
const GEOIP_DB_PATH = resolve(PROJECT_ROOT, 'data/GeoLite2-City.mmdb');
// Migrations path (relative to compiled output in production, source in dev)
const MIGRATIONS_PATH = resolve(__dirname, '../src/db/migrations');
import type { ActiveSession, ViolationWithDetails, DashboardStats, TautulliImportProgress } from '@tracearr/shared';
import authPlugin from './plugins/auth.js';
import redisPlugin from './plugins/redis.js';
import { authRoutes } from './routes/auth/index.js';
import { setupRoutes } from './routes/setup.js';
import { serverRoutes } from './routes/servers.js';
import { userRoutes } from './routes/users/index.js';
import { sessionRoutes } from './routes/sessions.js';
import { ruleRoutes } from './routes/rules.js';
import { violationRoutes } from './routes/violations.js';
import { statsRoutes } from './routes/stats/index.js';
import { settingsRoutes } from './routes/settings.js';
import { importRoutes } from './routes/import.js';
import { imageRoutes } from './routes/images.js';
import { debugRoutes } from './routes/debug.js';
import { mobileRoutes } from './routes/mobile.js';
import { notificationPreferencesRoutes } from './routes/notificationPreferences.js';
import { channelRoutingRoutes } from './routes/channelRouting.js';
import { getPollerSettings, getNetworkSettings } from './routes/settings.js';
import { initializeEncryption, migrateToken, looksEncrypted } from './utils/crypto.js';
import { geoipService } from './services/geoip.js';
import { createCacheService, createPubSubService } from './services/cache.js';
import { initializePoller, startPoller, stopPoller } from './jobs/poller/index.js';
import { sseManager } from './services/sseManager.js';
import { initializeSSEProcessor, startSSEProcessor, stopSSEProcessor } from './jobs/sseProcessor.js';
import { initializeWebSocket, broadcastToSessions } from './websocket/index.js';
import {
initNotificationQueue,
startNotificationWorker,
shutdownNotificationQueue,
} from './jobs/notificationQueue.js';
import {
initImportQueue,
startImportWorker,
shutdownImportQueue,
} from './jobs/importQueue.js';
import { initPushRateLimiter } from './services/pushRateLimiter.js';
import { db, runMigrations } from './db/client.js';
import { initTimescaleDB, getTimescaleStatus } from './db/timescale.js';
import { sql, eq } from 'drizzle-orm';
import { servers } from './db/schema.js';
const PORT = parseInt(process.env.PORT ?? '3000', 10);
const HOST = process.env.HOST ?? '0.0.0.0';
async function buildApp(options: { trustProxy?: boolean } = {}) {
const app = Fastify({
logger: {
level: process.env.LOG_LEVEL ?? 'info',
transport:
process.env.NODE_ENV === 'development'
? { target: 'pino-pretty', options: { colorize: true } }
: undefined,
},
// Trust proxy if enabled in settings or via env var
// This respects X-Forwarded-For, X-Forwarded-Proto headers from reverse proxies
trustProxy: options.trustProxy ?? process.env.TRUST_PROXY === 'true',
});
// Run database migrations
try {
app.log.info('Running database migrations...');
await runMigrations(MIGRATIONS_PATH);
app.log.info('Database migrations complete');
} catch (err) {
app.log.error({ err }, 'Failed to run database migrations');
throw err;
}
// Initialize TimescaleDB features (hypertable, compression, aggregates)
try {
app.log.info('Initializing TimescaleDB...');
const tsResult = await initTimescaleDB();
for (const action of tsResult.actions) {
app.log.info(` TimescaleDB: ${action}`);
}
if (tsResult.status.sessionsIsHypertable) {
app.log.info(
`TimescaleDB ready: ${tsResult.status.chunkCount} chunks, ` +
`compression=${tsResult.status.compressionEnabled}, ` +
`aggregates=${tsResult.status.continuousAggregates.length}`
);
} else if (!tsResult.status.extensionInstalled) {
app.log.warn('TimescaleDB extension not installed - running without time-series optimization');
}
} catch (err) {
app.log.error({ err }, 'Failed to initialize TimescaleDB - continuing without optimization');
// Don't throw - app can still work without TimescaleDB features
}
// Initialize encryption (optional - only needed for migrating existing encrypted tokens)
const encryptionAvailable = initializeEncryption();
if (encryptionAvailable) {
app.log.info('Encryption key available for token migration');
}
// Migrate any encrypted tokens to plain text
try {
const allServers = await db.select({ id: servers.id, token: servers.token }).from(servers);
let migrated = 0;
let failed = 0;
for (const server of allServers) {
if (looksEncrypted(server.token)) {
const result = migrateToken(server.token);
if (result.wasEncrypted) {
await db.update(servers).set({ token: result.plainText }).where(eq(servers.id, server.id));
migrated++;
} else {
// Looks encrypted but couldn't decrypt - always warn regardless of key availability
app.log.warn(
{ serverId: server.id, hasEncryptionKey: encryptionAvailable },
'Server token appears encrypted but could not be decrypted. ' +
(encryptionAvailable
? 'The encryption key may not match. '
: 'No ENCRYPTION_KEY provided. ') +
'You may need to re-add this server.'
);
failed++;
}
}
}
if (migrated > 0) {
app.log.info(`Migrated ${migrated} server token(s) from encrypted to plain text storage`);
}
if (failed > 0) {
app.log.warn(
`${failed} server(s) have tokens that could not be decrypted. ` +
'These servers will need to be re-added.'
);
}
} catch (err) {
app.log.error({ err }, 'Failed to migrate encrypted tokens');
// Don't throw - let the app start, individual servers will fail gracefully
}
// Initialize GeoIP service (optional - graceful degradation)
await geoipService.initialize(GEOIP_DB_PATH);
if (geoipService.hasDatabase()) {
app.log.info('GeoIP database loaded');
} else {
app.log.warn('GeoIP database not available - location features disabled');
}
// Security plugins - relaxed for HTTP-only deployments
await app.register(helmet, {
contentSecurityPolicy: false,
crossOriginOpenerPolicy: false,
crossOriginEmbedderPolicy: false,
originAgentCluster: false,
});
await app.register(cors, {
origin: process.env.CORS_ORIGIN || true,
credentials: true,
});
await app.register(rateLimit, {
max: 1000,
timeWindow: '1 minute',
});
// Utility plugins
await app.register(sensible);
await app.register(cookie, {
secret: process.env.COOKIE_SECRET,
});
// Redis plugin
await app.register(redisPlugin);
// Auth plugin (depends on cookie)
await app.register(authPlugin);
// Create cache and pubsub services
const redisUrl = process.env.REDIS_URL ?? 'redis://localhost:6379';
const pubSubRedis = new Redis(redisUrl);
const cacheService = createCacheService(app.redis);
const pubSubService = createPubSubService(app.redis, pubSubRedis);
// Initialize push notification rate limiter (uses Redis for sliding window counters)
initPushRateLimiter(app.redis);
app.log.info('Push notification rate limiter initialized');
// Initialize notification queue (uses Redis for job storage)
try {
initNotificationQueue(redisUrl);
startNotificationWorker();
app.log.info('Notification queue initialized');
} catch (err) {
app.log.error({ err }, 'Failed to initialize notification queue');
// Don't throw - notifications are non-critical
}
// Initialize import queue (uses Redis for job storage)
try {
initImportQueue(redisUrl);
startImportWorker();
app.log.info('Import queue initialized');
} catch (err) {
app.log.error({ err }, 'Failed to initialize import queue');
// Don't throw - imports can fall back to direct execution
}
// Initialize poller with cache services and Redis client
initializePoller(cacheService, pubSubService, app.redis);
// Initialize SSE manager and processor for real-time Plex updates
try {
await sseManager.initialize(cacheService, pubSubService);
initializeSSEProcessor(cacheService, pubSubService);
app.log.info('SSE manager initialized');
} catch (err) {
app.log.error({ err }, 'Failed to initialize SSE manager');
// Don't throw - SSE is optional, fallback to polling
}
// Cleanup pub/sub redis, notification queue, and import queue on close
app.addHook('onClose', async () => {
await pubSubRedis.quit();
stopPoller();
await sseManager.stop();
stopSSEProcessor();
await shutdownNotificationQueue();
await shutdownImportQueue();
});
// Health check endpoint
app.get('/health', async () => {
let dbHealthy = false;
let redisHealthy = false;
// Check database
try {
await db.execute(sql`SELECT 1`);
dbHealthy = true;
} catch {
dbHealthy = false;
}
// Check Redis
try {
const pong = await app.redis.ping();
redisHealthy = pong === 'PONG';
} catch {
redisHealthy = false;
}
// Check TimescaleDB status
let timescale = null;
try {
const tsStatus = await getTimescaleStatus();
timescale = {
installed: tsStatus.extensionInstalled,
hypertable: tsStatus.sessionsIsHypertable,
compression: tsStatus.compressionEnabled,
aggregates: tsStatus.continuousAggregates.length,
chunks: tsStatus.chunkCount,
};
} catch {
timescale = { installed: false, hypertable: false, compression: false, aggregates: 0, chunks: 0 };
}
return {
status: dbHealthy && redisHealthy ? 'ok' : 'degraded',
db: dbHealthy,
redis: redisHealthy,
geoip: geoipService.hasDatabase(),
timescale,
};
});
// API routes
await app.register(setupRoutes, { prefix: `${API_BASE_PATH}/setup` });
await app.register(authRoutes, { prefix: `${API_BASE_PATH}/auth` });
await app.register(serverRoutes, { prefix: `${API_BASE_PATH}/servers` });
await app.register(userRoutes, { prefix: `${API_BASE_PATH}/users` });
await app.register(sessionRoutes, { prefix: `${API_BASE_PATH}/sessions` });
await app.register(ruleRoutes, { prefix: `${API_BASE_PATH}/rules` });
await app.register(violationRoutes, { prefix: `${API_BASE_PATH}/violations` });
await app.register(statsRoutes, { prefix: `${API_BASE_PATH}/stats` });
await app.register(settingsRoutes, { prefix: `${API_BASE_PATH}/settings` });
await app.register(channelRoutingRoutes, { prefix: `${API_BASE_PATH}/settings/notifications` });
await app.register(importRoutes, { prefix: `${API_BASE_PATH}/import` });
await app.register(imageRoutes, { prefix: `${API_BASE_PATH}/images` });
await app.register(debugRoutes, { prefix: `${API_BASE_PATH}/debug` });
await app.register(mobileRoutes, { prefix: `${API_BASE_PATH}/mobile` });
await app.register(notificationPreferencesRoutes, { prefix: `${API_BASE_PATH}/notifications` });
// Serve static frontend in production
const webDistPath = resolve(PROJECT_ROOT, 'apps/web/dist');
if (process.env.NODE_ENV === 'production' && existsSync(webDistPath)) {
await app.register(fastifyStatic, {
root: webDistPath,
prefix: '/',
});
// SPA fallback - serve index.html for all non-API routes
app.setNotFoundHandler((request, reply) => {
if (request.url.startsWith('/api/') || request.url === '/health') {
return reply.code(404).send({ error: 'Not Found' });
}
return reply.sendFile('index.html');
});
app.log.info('Static file serving enabled for production');
}
return app;
}
async function start() {
try {
const app = await buildApp();
// Handle graceful shutdown
const signals: NodeJS.Signals[] = ['SIGINT', 'SIGTERM'];
for (const signal of signals) {
process.on(signal, () => {
app.log.info(`Received ${signal}, shutting down gracefully...`);
stopPoller();
void shutdownNotificationQueue();
void shutdownImportQueue();
void app.close().then(() => process.exit(0));
});
}
await app.listen({ port: PORT, host: HOST });
app.log.info(`Server running at http://${HOST}:${PORT}`);
// Initialize WebSocket server using Fastify's underlying HTTP server
const httpServer = app.server;
initializeWebSocket(httpServer);
app.log.info('WebSocket server initialized');
// Set up Redis pub/sub to forward events to WebSocket clients
const redisUrl = process.env.REDIS_URL ?? 'redis://localhost:6379';
const wsSubscriber = new Redis(redisUrl);
void wsSubscriber.subscribe(REDIS_KEYS.PUBSUB_EVENTS, (err) => {
if (err) {
app.log.error({ err }, 'Failed to subscribe to pub/sub channel');
} else {
app.log.info('Subscribed to pub/sub channel for WebSocket events');
}
});
wsSubscriber.on('message', (_channel: string, message: string) => {
try {
const { event, data } = JSON.parse(message) as {
event: string;
data: unknown;
timestamp: number;
};
// Forward events to WebSocket clients
switch (event) {
case WS_EVENTS.SESSION_STARTED:
broadcastToSessions('session:started', data as ActiveSession);
break;
case WS_EVENTS.SESSION_STOPPED:
broadcastToSessions('session:stopped', data as string);
break;
case WS_EVENTS.SESSION_UPDATED:
broadcastToSessions('session:updated', data as ActiveSession);
break;
case WS_EVENTS.VIOLATION_NEW:
broadcastToSessions('violation:new', data as ViolationWithDetails);
break;
case WS_EVENTS.STATS_UPDATED:
broadcastToSessions('stats:updated', data as DashboardStats);
break;
case WS_EVENTS.IMPORT_PROGRESS:
broadcastToSessions('import:progress', data as TautulliImportProgress);
break;
default:
// Unknown event, ignore
break;
}
} catch (err) {
app.log.error({ err, message }, 'Failed to process pub/sub message');
}
});
// Handle graceful shutdown for WebSocket subscriber
const cleanupWsSubscriber = () => {
void wsSubscriber.quit();
};
process.on('SIGINT', cleanupWsSubscriber);
process.on('SIGTERM', cleanupWsSubscriber);
// Start session poller after server is listening (uses DB settings)
const pollerSettings = await getPollerSettings();
if (pollerSettings.enabled) {
startPoller({ enabled: true, intervalMs: pollerSettings.intervalMs });
} else {
app.log.info('Session poller disabled in settings');
}
// Start SSE connections for Plex servers (real-time updates)
try {
startSSEProcessor(); // Subscribe to SSE events
await sseManager.start(); // Start SSE connections
app.log.info('SSE connections started for Plex servers');
} catch (err) {
app.log.error({ err }, 'Failed to start SSE connections - falling back to polling');
}
// Log network settings status
const networkSettings = await getNetworkSettings();
const envTrustProxy = process.env.TRUST_PROXY === 'true';
if (networkSettings.trustProxy && !envTrustProxy) {
app.log.warn(
'Trust proxy is enabled in settings but TRUST_PROXY env var is not set. ' +
'Set TRUST_PROXY=true and restart for reverse proxy support.'
);
}
if (networkSettings.externalUrl) {
app.log.info(`External URL configured: ${networkSettings.externalUrl}`);
}
if (networkSettings.basePath) {
app.log.info(`Base path configured: ${networkSettings.basePath}`);
}
} catch (err) {
console.error('Failed to start server:', err);
process.exit(1);
}
}
void start();

View File

@@ -0,0 +1,255 @@
/**
* Aggregator Job Tests
*
* Tests the ACTUAL exported functions from aggregator.ts:
* - startAggregator: Start the stats refresh interval
* - stopAggregator: Stop the interval
* - triggerRefresh: Force an immediate refresh
*
* These tests validate:
* - Interval lifecycle (start/stop)
* - Double-start prevention
* - Config merging
* - Disabled state handling
* - Immediate execution on start
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
// Import ACTUAL production functions - not local duplicates
import { startAggregator, stopAggregator, triggerRefresh } from '../aggregator.js';
describe('aggregator', () => {
// Spy on console methods
let consoleLogSpy: ReturnType<typeof vi.spyOn>;
beforeEach(() => {
vi.useFakeTimers();
consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => undefined);
vi.spyOn(console, 'error').mockImplementation(() => undefined);
});
afterEach(() => {
// Always stop the aggregator to clean up any running intervals
stopAggregator();
vi.useRealTimers();
vi.restoreAllMocks();
});
describe('startAggregator', () => {
it('should start the aggregator with default config', () => {
startAggregator();
expect(consoleLogSpy).toHaveBeenCalledWith(
expect.stringContaining('Starting stats aggregator')
);
});
it('should log interval time when starting', () => {
startAggregator({ intervalMs: 30000 });
expect(consoleLogSpy).toHaveBeenCalledWith(
'Starting stats aggregator with 30000ms interval'
);
});
it('should run refreshStats immediately on start', () => {
startAggregator();
// First call should be the "Starting..." message
// Second call should be the "Refreshing..." message from immediate run
expect(consoleLogSpy).toHaveBeenCalledWith('Refreshing dashboard statistics...');
});
it('should prevent double start', () => {
startAggregator();
consoleLogSpy.mockClear();
startAggregator();
expect(consoleLogSpy).toHaveBeenCalledWith('Aggregator already running');
// Should only log "already running", not start again
expect(consoleLogSpy).not.toHaveBeenCalledWith(
expect.stringContaining('Starting stats aggregator')
);
});
it('should not start when disabled', () => {
startAggregator({ enabled: false });
expect(consoleLogSpy).toHaveBeenCalledWith('Stats aggregator disabled');
expect(consoleLogSpy).not.toHaveBeenCalledWith(
expect.stringContaining('Starting stats aggregator')
);
});
it('should run on interval after start', () => {
startAggregator({ intervalMs: 10000 });
consoleLogSpy.mockClear();
// Advance time by interval
vi.advanceTimersByTime(10000);
expect(consoleLogSpy).toHaveBeenCalledWith('Refreshing dashboard statistics...');
});
it('should run multiple times on interval', () => {
startAggregator({ intervalMs: 5000 });
consoleLogSpy.mockClear();
// Advance 3 intervals
vi.advanceTimersByTime(5000);
vi.advanceTimersByTime(5000);
vi.advanceTimersByTime(5000);
// Should have run 3 times
const refreshCalls = consoleLogSpy.mock.calls.filter(
(call: unknown[]) => call[0] === 'Refreshing dashboard statistics...'
);
expect(refreshCalls).toHaveLength(3);
});
it('should merge partial config with defaults', () => {
// Only override intervalMs, enabled should default to true
startAggregator({ intervalMs: 1000 });
// Should start (enabled defaults to true)
expect(consoleLogSpy).toHaveBeenCalledWith(
'Starting stats aggregator with 1000ms interval'
);
});
it('should use default interval when not specified', () => {
startAggregator({});
// Default is POLLING_INTERVALS.STATS_REFRESH = 60000
expect(consoleLogSpy).toHaveBeenCalledWith(
'Starting stats aggregator with 60000ms interval'
);
});
});
describe('stopAggregator', () => {
it('should stop the aggregator', () => {
startAggregator();
consoleLogSpy.mockClear();
stopAggregator();
expect(consoleLogSpy).toHaveBeenCalledWith('Stats aggregator stopped');
});
it('should allow starting again after stop', () => {
startAggregator();
stopAggregator();
consoleLogSpy.mockClear();
startAggregator();
// Should start fresh, not say "already running"
expect(consoleLogSpy).toHaveBeenCalledWith(
expect.stringContaining('Starting stats aggregator')
);
expect(consoleLogSpy).not.toHaveBeenCalledWith('Aggregator already running');
});
it('should do nothing when not running', () => {
// Don't start first
stopAggregator();
// Should not log anything since there's nothing to stop
expect(consoleLogSpy).not.toHaveBeenCalledWith('Stats aggregator stopped');
});
it('should prevent further interval executions', () => {
startAggregator({ intervalMs: 5000 });
consoleLogSpy.mockClear();
stopAggregator();
consoleLogSpy.mockClear();
// Advance time - should not trigger refresh
vi.advanceTimersByTime(5000);
vi.advanceTimersByTime(5000);
const refreshCalls = consoleLogSpy.mock.calls.filter(
(call: unknown[]) => call[0] === 'Refreshing dashboard statistics...'
);
expect(refreshCalls).toHaveLength(0);
});
});
describe('triggerRefresh', () => {
it('should trigger immediate refresh', async () => {
await triggerRefresh();
expect(consoleLogSpy).toHaveBeenCalledWith('Refreshing dashboard statistics...');
});
it('should work independently of aggregator state', async () => {
// Don't start aggregator
await triggerRefresh();
expect(consoleLogSpy).toHaveBeenCalledWith('Refreshing dashboard statistics...');
});
it('should work while aggregator is running', async () => {
startAggregator({ intervalMs: 60000 });
consoleLogSpy.mockClear();
await triggerRefresh();
expect(consoleLogSpy).toHaveBeenCalledWith('Refreshing dashboard statistics...');
});
it('should be awaitable', async () => {
const promise = triggerRefresh();
// Should be a promise
expect(promise).toBeInstanceOf(Promise);
// Should resolve without error
await expect(promise).resolves.toBeUndefined();
});
});
describe('lifecycle scenarios', () => {
it('should handle start-stop-start-stop cycle', () => {
startAggregator();
stopAggregator();
startAggregator();
stopAggregator();
// Should have logged "stopped" twice
const stopCalls = consoleLogSpy.mock.calls.filter(
(call: unknown[]) => call[0] === 'Stats aggregator stopped'
);
expect(stopCalls).toHaveLength(2);
});
it('should handle multiple stop calls gracefully', () => {
startAggregator();
stopAggregator();
stopAggregator();
stopAggregator();
// Only first stop should log
const stopCalls = consoleLogSpy.mock.calls.filter(
(call: unknown[]) => call[0] === 'Stats aggregator stopped'
);
expect(stopCalls).toHaveLength(1);
});
it('should handle start with disabled then start with enabled', () => {
startAggregator({ enabled: false });
consoleLogSpy.mockClear();
startAggregator({ enabled: true });
// Second call should start (since disabled didn't create interval)
expect(consoleLogSpy).toHaveBeenCalledWith(
expect.stringContaining('Starting stats aggregator')
);
});
});
});

View File

@@ -0,0 +1,243 @@
/**
* Cleanup Mobile Tokens Job Tests
*
* Tests the mobile token cleanup job:
* - Deletes expired unused tokens (older than 1 hour)
* - Deletes used tokens (older than 30 days)
* - Returns count of deleted tokens
*
* Uses mocked database to test cleanup logic in isolation.
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { randomUUID } from 'node:crypto';
// Mock the database
vi.mock('../../db/client.js', () => ({
db: {
delete: vi.fn(),
},
}));
// Import after mocking
import { db } from '../../db/client.js';
import { cleanupMobileTokens } from '../cleanupMobileTokens.js';
// Type the mocked db
const mockDb = db as unknown as {
delete: ReturnType<typeof vi.fn>;
};
// Helper to create a mock delete chain
function mockDeleteChain(expiredResult: { id: string }[], usedResult: { id: string }[]) {
let callCount = 0;
mockDb.delete.mockImplementation(() => ({
where: vi.fn().mockReturnValue({
returning: vi.fn().mockImplementation(() => {
callCount++;
// First call is for expired tokens, second for used tokens
return callCount === 1 ? expiredResult : usedResult;
}),
}),
}));
}
describe('cleanupMobileTokens', () => {
beforeEach(() => {
vi.useFakeTimers();
vi.setSystemTime(new Date('2025-01-15T12:00:00Z'));
vi.clearAllMocks();
});
afterEach(() => {
vi.useRealTimers();
});
describe('expired unused tokens cleanup', () => {
it('should delete expired unused tokens older than 1 hour', async () => {
const expiredTokens = [
{ id: randomUUID() },
{ id: randomUUID() },
{ id: randomUUID() },
];
mockDeleteChain(expiredTokens, []);
const result = await cleanupMobileTokens();
expect(result.deleted).toBe(3);
expect(mockDb.delete).toHaveBeenCalledTimes(2); // Both expired and used queries
});
it('should not delete recently created unused tokens', async () => {
// No expired tokens found
mockDeleteChain([], []);
const result = await cleanupMobileTokens();
expect(result.deleted).toBe(0);
});
});
describe('used tokens cleanup', () => {
it('should delete used tokens older than 30 days', async () => {
const usedTokens = [
{ id: randomUUID() },
{ id: randomUUID() },
];
mockDeleteChain([], usedTokens);
const result = await cleanupMobileTokens();
expect(result.deleted).toBe(2);
});
it('should not delete recently used tokens', async () => {
// No old used tokens found
mockDeleteChain([], []);
const result = await cleanupMobileTokens();
expect(result.deleted).toBe(0);
});
});
describe('combined cleanup', () => {
it('should delete both expired and used tokens in single run', async () => {
const expiredTokens = [{ id: randomUUID() }, { id: randomUUID() }];
const usedTokens = [{ id: randomUUID() }, { id: randomUUID() }, { id: randomUUID() }];
mockDeleteChain(expiredTokens, usedTokens);
const result = await cleanupMobileTokens();
// Total: 2 expired + 3 used = 5
expect(result.deleted).toBe(5);
});
it('should return zero when no tokens need cleanup', async () => {
mockDeleteChain([], []);
const result = await cleanupMobileTokens();
expect(result.deleted).toBe(0);
});
it('should handle large number of tokens', async () => {
const expiredTokens = Array.from({ length: 100 }, () => ({ id: randomUUID() }));
const usedTokens = Array.from({ length: 50 }, () => ({ id: randomUUID() }));
mockDeleteChain(expiredTokens, usedTokens);
const result = await cleanupMobileTokens();
expect(result.deleted).toBe(150);
});
});
describe('database query construction', () => {
it('should call delete on mobileTokens table', async () => {
mockDeleteChain([], []);
await cleanupMobileTokens();
// Should be called twice: once for expired, once for used
expect(mockDb.delete).toHaveBeenCalledTimes(2);
});
it('should use where clause with proper conditions', async () => {
const whereMock = vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue([]),
});
mockDb.delete.mockReturnValue({ where: whereMock });
await cleanupMobileTokens();
// Each delete should chain to where
expect(whereMock).toHaveBeenCalledTimes(2);
});
it('should return ids from deleted tokens', async () => {
const returningMock = vi.fn()
.mockResolvedValueOnce([{ id: 'expired-1' }])
.mockResolvedValueOnce([{ id: 'used-1' }, { id: 'used-2' }]);
mockDb.delete.mockReturnValue({
where: vi.fn().mockReturnValue({
returning: returningMock,
}),
});
const result = await cleanupMobileTokens();
expect(result.deleted).toBe(3);
expect(returningMock).toHaveBeenCalledTimes(2);
});
});
describe('time boundaries', () => {
it('should calculate 1 hour cutoff correctly', async () => {
// Set current time to noon UTC
vi.setSystemTime(new Date('2025-01-15T12:00:00Z'));
let capturedWhereFn: unknown;
mockDb.delete.mockImplementation(() => ({
where: vi.fn().mockImplementation((condition) => {
if (!capturedWhereFn) {
capturedWhereFn = condition;
}
return { returning: vi.fn().mockResolvedValue([]) };
}),
}));
await cleanupMobileTokens();
// The first where clause should be for expired tokens
// 1 hour ago from noon = 11:00 AM
expect(capturedWhereFn).toBeDefined();
});
it('should calculate 30 day cutoff correctly', async () => {
// Set current time to Jan 15, 2025
vi.setSystemTime(new Date('2025-01-15T12:00:00Z'));
const whereConditions: unknown[] = [];
mockDb.delete.mockImplementation(() => ({
where: vi.fn().mockImplementation((condition) => {
whereConditions.push(condition);
return { returning: vi.fn().mockResolvedValue([]) };
}),
}));
await cleanupMobileTokens();
// Should have captured both where conditions
// First for expired (1 hour), second for used (30 days)
expect(whereConditions).toHaveLength(2);
});
});
describe('error handling', () => {
it('should propagate database errors for expired query', async () => {
mockDb.delete.mockReturnValue({
where: vi.fn().mockReturnValue({
returning: vi.fn().mockRejectedValue(new Error('Database connection failed')),
}),
});
await expect(cleanupMobileTokens()).rejects.toThrow('Database connection failed');
});
it('should propagate database errors for used query', async () => {
const returningMock = vi.fn()
.mockResolvedValueOnce([]) // Expired query succeeds
.mockRejectedValueOnce(new Error('Query timeout')); // Used query fails
mockDb.delete.mockReturnValue({
where: vi.fn().mockReturnValue({
returning: returningMock,
}),
});
await expect(cleanupMobileTokens()).rejects.toThrow('Query timeout');
});
});
});

View File

@@ -0,0 +1,78 @@
/**
* Background job for refreshing dashboard statistics
*/
import { POLLING_INTERVALS } from '@tracearr/shared';
let aggregatorInterval: NodeJS.Timeout | null = null;
export interface AggregatorConfig {
enabled: boolean;
intervalMs: number;
}
const defaultConfig: AggregatorConfig = {
enabled: true,
intervalMs: POLLING_INTERVALS.STATS_REFRESH,
};
/**
* Refresh dashboard statistics and cache them
*/
async function refreshStats(): Promise<void> {
try {
// 1. Query active stream count
// 2. Query today's play count
// 3. Query watch time for period
// 4. Query violation count for last 24h
// 5. Cache results in Redis with TTL
// 6. Broadcast stats update via WebSocket
console.log('Refreshing dashboard statistics...');
} catch (error) {
console.error('Stats aggregation error:', error);
}
}
/**
* Start the aggregator job
*/
export function startAggregator(config: Partial<AggregatorConfig> = {}): void {
const mergedConfig = { ...defaultConfig, ...config };
if (!mergedConfig.enabled) {
console.log('Stats aggregator disabled');
return;
}
if (aggregatorInterval) {
console.log('Aggregator already running');
return;
}
console.log(`Starting stats aggregator with ${mergedConfig.intervalMs}ms interval`);
// Run immediately on start
void refreshStats();
// Then run on interval
aggregatorInterval = setInterval(() => void refreshStats(), mergedConfig.intervalMs);
}
/**
* Stop the aggregator job
*/
export function stopAggregator(): void {
if (aggregatorInterval) {
clearInterval(aggregatorInterval);
aggregatorInterval = null;
console.log('Stats aggregator stopped');
}
}
/**
* Force an immediate stats refresh
*/
export async function triggerRefresh(): Promise<void> {
await refreshStats();
}

View File

@@ -0,0 +1,40 @@
/**
* Cleanup job for expired/used mobile pairing tokens
*
* Run via cron or BullMQ scheduler to periodically clean up:
* - Expired unused tokens (older than 1 hour)
* - Used tokens (older than 30 days)
*/
import { lt, and, isNull, isNotNull } from 'drizzle-orm';
import { db } from '../db/client.js';
import { mobileTokens } from '../db/schema.js';
export async function cleanupMobileTokens(): Promise<{ deleted: number }> {
const oneHourAgo = new Date(Date.now() - 60 * 60 * 1000);
const thirtyDaysAgo = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000);
// Delete expired unused tokens older than 1 hour
const expiredResult = await db
.delete(mobileTokens)
.where(
and(
lt(mobileTokens.expiresAt, oneHourAgo),
isNull(mobileTokens.usedAt)
)
)
.returning({ id: mobileTokens.id });
// Delete used tokens older than 30 days
const usedResult = await db
.delete(mobileTokens)
.where(
and(
isNotNull(mobileTokens.usedAt),
lt(mobileTokens.usedAt, thirtyDaysAgo)
)
)
.returning({ id: mobileTokens.id });
return { deleted: expiredResult.length + usedResult.length };
}

View File

@@ -0,0 +1,377 @@
/**
* Import Queue - BullMQ-based async import processing
*
* Provides reliable, resumable Tautulli import with:
* - Restart resilience (job state persisted in Redis)
* - Cancellation support
* - Progress tracking via WebSocket
* - Checkpoint/resume on failure
*/
import { Queue, Worker, type Job, type ConnectionOptions } from 'bullmq';
import type { TautulliImportProgress, TautulliImportResult } from '@tracearr/shared';
import { TautulliService } from '../services/tautulli.js';
import { getPubSubService } from '../services/cache.js';
// Job data types
export interface ImportJobData {
type: 'tautulli';
serverId: string;
userId: string; // Audit trail - who initiated the import
checkpoint?: number; // Resume from this page (for future use)
}
export type ImportJobResult = TautulliImportResult;
// Queue configuration
const QUEUE_NAME = 'imports';
const DLQ_NAME = 'imports-dlq';
// Connection and instances
let connectionOptions: ConnectionOptions | null = null;
let importQueue: Queue<ImportJobData> | null = null;
let importWorker: Worker<ImportJobData> | null = null;
let dlqQueue: Queue<ImportJobData> | null = null;
/**
* Initialize the import queue with Redis connection
*/
export function initImportQueue(redisUrl: string): void {
if (importQueue) {
console.log('Import queue already initialized');
return;
}
connectionOptions = { url: redisUrl };
importQueue = new Queue<ImportJobData>(QUEUE_NAME, {
connection: connectionOptions,
defaultJobOptions: {
attempts: 3,
backoff: {
type: 'exponential',
delay: 60000, // 1min, 2min, 4min between retries
},
// Note: Job timeout is set per-worker, not in defaultJobOptions
removeOnComplete: {
count: 100, // Keep last 100 completed imports
age: 7 * 24 * 60 * 60, // 7 days
},
removeOnFail: false, // Keep failed jobs for investigation
},
});
dlqQueue = new Queue<ImportJobData>(DLQ_NAME, {
connection: connectionOptions,
defaultJobOptions: {
removeOnComplete: false,
removeOnFail: false,
},
});
console.log('Import queue initialized');
}
/**
* Start the import worker to process queued jobs
*/
export function startImportWorker(): void {
if (!connectionOptions) {
throw new Error('Import queue not initialized. Call initImportQueue first.');
}
if (importWorker) {
console.log('Import worker already running');
return;
}
importWorker = new Worker<ImportJobData>(
QUEUE_NAME,
async (job: Job<ImportJobData>) => {
const startTime = Date.now();
console.log(`[Import] Starting job ${job.id} for server ${job.data.serverId}`);
try {
const result = await processImportJob(job);
const duration = Math.round((Date.now() - startTime) / 1000);
console.log(`[Import] Job ${job.id} completed in ${duration}s:`, result);
return result;
} catch (error) {
const duration = Math.round((Date.now() - startTime) / 1000);
console.error(`[Import] Job ${job.id} failed after ${duration}s:`, error);
throw error;
}
},
{
connection: connectionOptions,
concurrency: 1, // Only 1 import at a time per worker
// Large imports (300k+ records) can take hours - extend lock to prevent stalled job detection
lockDuration: 5 * 60 * 1000, // 5 minutes (default is 30s)
stalledInterval: 5 * 60 * 1000, // Check for stalled jobs every 5 minutes
limiter: {
max: 1,
duration: 60000, // Max 1 new import per minute (prevents spam)
},
}
);
// Handle job failures - notify frontend and move to DLQ if retries exhausted
importWorker.on('failed', (job, error) => {
if (!job) return;
// Always notify frontend of failure
const pubSubService = getPubSubService();
if (pubSubService) {
void pubSubService.publish('import:progress', {
status: 'error',
totalRecords: 0,
fetchedRecords: 0,
processedRecords: 0,
importedRecords: 0,
updatedRecords: 0,
skippedRecords: 0,
duplicateRecords: 0,
unknownUserRecords: 0,
activeSessionRecords: 0,
errorRecords: 0,
currentPage: 0,
totalPages: 0,
message: `Import failed: ${error?.message || 'Unknown error'}`,
jobId: job.id,
});
}
if (job.attemptsMade >= (job.opts.attempts || 3)) {
console.error(`[Import] Job ${job.id} exhausted retries, moving to DLQ:`, error);
if (dlqQueue) {
void dlqQueue.add(`dlq-${job.data.type}`, job.data, {
jobId: `dlq-${job.id}`,
});
}
}
});
importWorker.on('error', (error) => {
console.error('[Import] Worker error:', error);
});
console.log('Import worker started');
}
/**
* Process a single import job
*/
async function processImportJob(job: Job<ImportJobData>): Promise<ImportJobResult> {
const { serverId } = job.data;
const pubSubService = getPubSubService();
// Progress callback to update job and publish to WebSocket
const onProgress = async (progress: TautulliImportProgress) => {
// Update BullMQ job progress (0-100)
const percent =
progress.totalRecords > 0
? Math.round((progress.processedRecords / progress.totalRecords) * 100)
: 0;
await job.updateProgress(percent);
// Extend lock to prevent stalled job detection during long imports
// This is critical for large imports (300k+ records) that can take hours
try {
await job.extendLock(job.token ?? '', 5 * 60 * 1000); // Extend by 5 minutes
} catch {
// Lock extension can fail if job was already moved to another state
console.warn(`[Import] Failed to extend lock for job ${job.id}`);
}
// Publish to WebSocket for UI
if (pubSubService) {
await pubSubService.publish('import:progress', {
...progress,
jobId: job.id,
});
}
};
// Run the actual import with progress callback
const result = await TautulliService.importHistory(serverId, pubSubService ?? undefined, onProgress);
// Publish final result (note: TautulliService already publishes final progress,
// but this is a fallback to ensure frontend receives completion notification)
if (pubSubService) {
const total = result.imported + result.updated + result.skipped + result.errors;
await pubSubService.publish('import:progress', {
status: result.success ? 'complete' : 'error',
totalRecords: total,
fetchedRecords: total,
processedRecords: total,
importedRecords: result.imported,
updatedRecords: result.updated,
skippedRecords: result.skipped,
duplicateRecords: 0, // Not available in result
unknownUserRecords: 0, // Not available in result
activeSessionRecords: 0, // Not available in result
errorRecords: result.errors,
currentPage: 0,
totalPages: 0,
message: result.message,
jobId: job.id,
});
}
return result;
}
/**
* Get active import job for a server (if any)
*/
export async function getActiveImportForServer(serverId: string): Promise<string | null> {
if (!importQueue) {
return null;
}
const activeJobs = await importQueue.getJobs(['active', 'waiting', 'delayed']);
const existingJob = activeJobs.find((j) => j.data.serverId === serverId);
return existingJob?.id ?? null;
}
/**
* Enqueue a new import job
*/
export async function enqueueImport(serverId: string, userId: string): Promise<string> {
if (!importQueue) {
throw new Error('Import queue not initialized');
}
// Check for existing active import for this server
const existingJobId = await getActiveImportForServer(serverId);
if (existingJobId) {
throw new Error(`Import already in progress for server ${serverId} (job ${existingJobId})`);
}
const job = await importQueue.add('tautulli-import', {
type: 'tautulli',
serverId,
userId,
});
const jobId = job.id ?? `unknown-${Date.now()}`;
console.log(`[Import] Enqueued job ${jobId} for server ${serverId}`);
return jobId;
}
/**
* Get import job status
*/
export async function getImportStatus(jobId: string): Promise<{
jobId: string;
state: string;
progress: number | object | null;
result?: ImportJobResult;
failedReason?: string;
createdAt?: number;
finishedAt?: number;
} | null> {
if (!importQueue) {
return null;
}
const job = await importQueue.getJob(jobId);
if (!job) {
return null;
}
const state = await job.getState();
// job.progress can be number, object, or undefined
const progress = job.progress;
return {
jobId: job.id ?? jobId, // Fallback to input jobId if somehow null
state,
progress: typeof progress === 'number' || typeof progress === 'object' ? progress : null,
result: job.returnvalue as ImportJobResult | undefined,
failedReason: job.failedReason,
createdAt: job.timestamp,
finishedAt: job.finishedOn,
};
}
/**
* Cancel an import job
*/
export async function cancelImport(jobId: string): Promise<boolean> {
if (!importQueue) {
return false;
}
const job = await importQueue.getJob(jobId);
if (!job) {
return false;
}
const state = await job.getState();
// Can only cancel waiting/delayed jobs
// Active jobs need worker-level cancellation (not implemented in Phase 1)
if (state === 'waiting' || state === 'delayed') {
await job.remove();
console.log(`[Import] Cancelled job ${jobId}`);
return true;
}
console.log(`[Import] Cannot cancel job ${jobId} in state ${state}`);
return false;
}
/**
* Get queue statistics
*/
export async function getImportQueueStats(): Promise<{
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
dlqSize: number;
} | null> {
if (!importQueue || !dlqQueue) {
return null;
}
const [waiting, active, completed, failed, delayed, dlqWaiting] = await Promise.all([
importQueue.getWaitingCount(),
importQueue.getActiveCount(),
importQueue.getCompletedCount(),
importQueue.getFailedCount(),
importQueue.getDelayedCount(),
dlqQueue.getWaitingCount(),
]);
return { waiting, active, completed, failed, delayed, dlqSize: dlqWaiting };
}
/**
* Gracefully shutdown
*/
export async function shutdownImportQueue(): Promise<void> {
console.log('Shutting down import queue...');
if (importWorker) {
await importWorker.close();
importWorker = null;
}
if (importQueue) {
await importQueue.close();
importQueue = null;
}
if (dlqQueue) {
await dlqQueue.close();
dlqQueue = null;
}
console.log('Import queue shutdown complete');
}

View File

@@ -0,0 +1,355 @@
/**
* Notification Queue - BullMQ-based async notification dispatch
*
* Provides reliable, retryable notification delivery with dead letter support.
* All notifications are enqueued here and processed asynchronously by workers.
*/
import { Queue, Worker, type Job, type ConnectionOptions } from 'bullmq';
import type { ViolationWithDetails, ActiveSession, NotificationEventType } from '@tracearr/shared';
import { notificationService } from '../services/notify.js';
import { pushNotificationService } from '../services/pushNotification.js';
import { getNotificationSettings } from '../routes/settings.js';
import { getChannelRouting } from '../routes/channelRouting.js';
/**
* Map job types to notification event types for routing lookup
*/
const JOB_TYPE_TO_EVENT_TYPE: Record<NotificationJobData['type'], NotificationEventType> = {
violation: 'violation_detected',
session_started: 'stream_started',
session_stopped: 'stream_stopped',
server_down: 'server_down',
server_up: 'server_up',
};
// Job type discriminated union for type-safe job handling
export type NotificationJobData =
| { type: 'violation'; payload: ViolationWithDetails }
| { type: 'session_started'; payload: ActiveSession }
| { type: 'session_stopped'; payload: ActiveSession }
| { type: 'server_down'; payload: { serverName: string; serverId: string } }
| { type: 'server_up'; payload: { serverName: string; serverId: string } };
// Queue name constant
const QUEUE_NAME = 'notifications';
// Dead letter queue name for failed jobs that exceed retry attempts
const DLQ_NAME = 'notifications-dlq';
// Connection options (will be set during initialization)
let connectionOptions: ConnectionOptions | null = null;
// Queue and worker instances
let notificationQueue: Queue<NotificationJobData> | null = null;
let notificationWorker: Worker<NotificationJobData> | null = null;
let dlqQueue: Queue<NotificationJobData> | null = null;
/**
* Initialize the notification queue with Redis connection
*/
export function initNotificationQueue(redisUrl: string): void {
if (notificationQueue) {
console.log('Notification queue already initialized');
return;
}
connectionOptions = { url: redisUrl };
// Create the main notification queue
notificationQueue = new Queue<NotificationJobData>(QUEUE_NAME, {
connection: connectionOptions,
defaultJobOptions: {
attempts: 3,
backoff: {
type: 'exponential',
delay: 1000, // 1s, 2s, 4s
},
removeOnComplete: {
count: 1000, // Keep last 1000 completed jobs for debugging
age: 24 * 60 * 60, // Remove completed jobs older than 24h
},
removeOnFail: {
count: 5000, // Keep more failed jobs for analysis
age: 7 * 24 * 60 * 60, // Keep failed jobs for 7 days
},
},
});
// Create dead letter queue for jobs that fail all retries
dlqQueue = new Queue<NotificationJobData>(DLQ_NAME, {
connection: connectionOptions,
defaultJobOptions: {
removeOnComplete: false, // Never auto-remove from DLQ
removeOnFail: false,
},
});
console.log('Notification queue initialized');
}
/**
* Start the notification worker to process queued jobs
*/
export function startNotificationWorker(): void {
if (!connectionOptions) {
throw new Error('Notification queue not initialized. Call initNotificationQueue first.');
}
if (notificationWorker) {
console.log('Notification worker already running');
return;
}
notificationWorker = new Worker<NotificationJobData>(
QUEUE_NAME,
async (job: Job<NotificationJobData>) => {
const startTime = Date.now();
try {
await processNotificationJob(job);
const duration = Date.now() - startTime;
console.log(
`Notification job ${job.id} (${job.data.type}) processed in ${duration}ms`
);
} catch (error) {
const duration = Date.now() - startTime;
console.error(
`Notification job ${job.id} (${job.data.type}) failed after ${duration}ms:`,
error
);
throw error; // Re-throw to trigger retry
}
},
{
connection: connectionOptions,
concurrency: 5, // Process up to 5 notifications in parallel
limiter: {
max: 30, // Max 30 jobs per duration
duration: 1000, // Per second (rate limit for external services)
},
}
);
// Handle failed jobs that exceed retry attempts - move to DLQ
notificationWorker.on('failed', (job, error) => {
if (job && job.attemptsMade >= (job.opts.attempts || 3)) {
console.error(
`Notification job ${job.id} (${job.data.type}) exhausted retries, moving to DLQ:`,
error
);
// Move to dead letter queue for manual investigation
if (dlqQueue) {
void dlqQueue.add(`dlq-${job.data.type}`, job.data, {
jobId: `dlq-${job.id}`,
});
}
}
});
notificationWorker.on('error', (error) => {
console.error('Notification worker error:', error);
});
console.log('Notification worker started');
}
/**
* Process a single notification job
*/
async function processNotificationJob(job: Job<NotificationJobData>): Promise<void> {
const { type, payload } = job.data;
// Load current settings and channel routing for each job
// (settings/routing may change between enqueue and process)
const settings = await getNotificationSettings();
const eventType = JOB_TYPE_TO_EVENT_TYPE[type];
const routing = await getChannelRouting(eventType);
// Build settings object with routing-aware channel enablement
// The routing config controls which channels receive notifications
const effectiveSettings = {
// Only include webhook URLs if routing allows
discordWebhookUrl: routing.discordEnabled ? settings.discordWebhookUrl : null,
customWebhookUrl: routing.webhookEnabled ? settings.customWebhookUrl : null,
webhookFormat: settings.webhookFormat,
ntfyTopic: settings.ntfyTopic,
// Fill in defaults for other Settings fields
allowGuestAccess: false,
unitSystem: settings.unitSystem ?? 'metric' as const, // Display preference for units
pollerEnabled: true,
pollerIntervalMs: 15000,
tautulliUrl: null,
tautulliApiKey: null,
externalUrl: null,
basePath: '',
trustProxy: false,
mobileEnabled: settings.mobileEnabled ?? false,
primaryAuthMethod: 'local' as const, // Not used in notifications, but required by Settings type
};
switch (type) {
case 'violation':
// Send to Discord/webhooks (if routing allows)
if (routing.discordEnabled || routing.webhookEnabled) {
await notificationService.notifyViolation(payload, effectiveSettings);
}
// Send push notification to mobile devices (if routing allows)
if (routing.pushEnabled) {
await pushNotificationService.notifyViolation(payload);
}
break;
case 'session_started':
// Send to Discord/webhooks (if routing allows)
if (routing.discordEnabled || routing.webhookEnabled) {
await notificationService.notifySessionStarted(payload, effectiveSettings);
}
// Send push notification to mobile devices (if routing allows)
if (routing.pushEnabled) {
await pushNotificationService.notifySessionStarted(payload);
}
break;
case 'session_stopped':
// Send to Discord/webhooks (if routing allows)
if (routing.discordEnabled || routing.webhookEnabled) {
await notificationService.notifySessionStopped(payload, effectiveSettings);
}
// Send push notification to mobile devices (if routing allows)
if (routing.pushEnabled) {
await pushNotificationService.notifySessionStopped(payload);
}
break;
case 'server_down':
// Send to Discord/webhooks (if routing allows)
if (routing.discordEnabled || routing.webhookEnabled) {
await notificationService.notifyServerDown(payload.serverName, effectiveSettings);
}
// Send push notification to mobile devices (if routing allows)
if (routing.pushEnabled) {
await pushNotificationService.notifyServerDown(payload.serverName, payload.serverId);
}
break;
case 'server_up':
// Send to Discord/webhooks (if routing allows)
if (routing.discordEnabled || routing.webhookEnabled) {
await notificationService.notifyServerUp(payload.serverName, effectiveSettings);
}
// Send push notification to mobile devices (if routing allows)
if (routing.pushEnabled) {
await pushNotificationService.notifyServerUp(payload.serverName, payload.serverId);
}
break;
default: {
// TypeScript exhaustiveness check
const _exhaustive: never = type;
throw new Error(`Unknown notification type: ${_exhaustive}`);
}
}
}
/**
* Enqueue a notification for async processing
*/
export async function enqueueNotification(
data: NotificationJobData,
options?: { priority?: number; delay?: number }
): Promise<string | undefined> {
if (!notificationQueue) {
console.error('Notification queue not initialized, dropping notification:', data.type);
return undefined;
}
const job = await notificationQueue.add(data.type, data, {
priority: options?.priority,
delay: options?.delay,
});
return job.id;
}
/**
* Get queue statistics for monitoring
*/
export async function getQueueStats(): Promise<{
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
dlqSize: number;
} | null> {
if (!notificationQueue || !dlqQueue) {
return null;
}
const [waiting, active, completed, failed, delayed, dlqWaiting] = await Promise.all([
notificationQueue.getWaitingCount(),
notificationQueue.getActiveCount(),
notificationQueue.getCompletedCount(),
notificationQueue.getFailedCount(),
notificationQueue.getDelayedCount(),
dlqQueue.getWaitingCount(),
]);
return {
waiting,
active,
completed,
failed,
delayed,
dlqSize: dlqWaiting,
};
}
/**
* Gracefully shutdown the notification queue and worker
*/
export async function shutdownNotificationQueue(): Promise<void> {
console.log('Shutting down notification queue...');
if (notificationWorker) {
await notificationWorker.close();
notificationWorker = null;
}
if (notificationQueue) {
await notificationQueue.close();
notificationQueue = null;
}
if (dlqQueue) {
await dlqQueue.close();
dlqQueue = null;
}
console.log('Notification queue shutdown complete');
}
/**
* Retry all jobs in the dead letter queue
*/
export async function retryDlqJobs(): Promise<number> {
if (!dlqQueue || !notificationQueue) {
return 0;
}
const jobs = await dlqQueue.getJobs(['waiting', 'delayed']);
let retried = 0;
for (const job of jobs) {
// Re-enqueue to main queue
await notificationQueue.add(job.data.type, job.data);
await job.remove();
retried++;
}
console.log(`Retried ${retried} jobs from DLQ`);
return retried;
}

View File

@@ -0,0 +1,321 @@
/**
* Session Edge Cases Tests
*
* TDD tests for implementing robust session handling.
* These tests are written BEFORE implementation (RED phase).
*
* HIGH Priority Edge Cases:
* 1. Watch Completion - 85% threshold (configurable per media type)
* 2. Stale Stream Force-Stop - 5 minute timeout
* 3. Minimum Play Time Filtering - 120s default
* 4. Continued Session Threshold - 60s configurable
*/
import { describe, it, expect } from 'vitest';
import {
checkWatchCompletion,
shouldGroupWithPreviousSession,
} from '../stateTracker.js';
// ============================================================================
// Watch Completion Detection
// ============================================================================
// Industry standard uses 85% threshold (configurable per media type)
// Current implementation uses hardcoded 80%
describe('Watch Completion Detection', () => {
describe('85% threshold (industry standard)', () => {
it('should mark as watched at 85% progress', () => {
// Default: MOVIE_WATCHED_PERCENT = 85, TV_WATCHED_PERCENT = 85
// Currently fails because implementation uses 80%
const progressMs = 85000; // 85%
const totalMs = 100000;
const result = checkWatchCompletion(progressMs, totalMs);
// This should pass at 85% but current implementation requires only 80%
expect(result).toBe(true);
});
it('should NOT mark as watched at 84% progress', () => {
const progressMs = 84000; // 84%
const totalMs = 100000;
const result = checkWatchCompletion(progressMs, totalMs);
// With 85% threshold, 84% should NOT be watched
// Current implementation incorrectly marks this as watched (80% threshold)
expect(result).toBe(false);
});
it('should NOT mark as watched at exactly 80% (below threshold)', () => {
const progressMs = 80000; // 80%
const totalMs = 100000;
const result = checkWatchCompletion(progressMs, totalMs);
// With 85% threshold, 80% is NOT watched
// Current implementation incorrectly marks this as watched
expect(result).toBe(false);
});
});
describe('configurable threshold per media type', () => {
it('should accept custom threshold for movies', () => {
const progressMs = 90000;
const totalMs = 100000;
const customThreshold = 0.90; // 90% for some users
// checkWatchCompletion should accept optional threshold parameter
// Currently it doesn't - this test should fail
const result = checkWatchCompletion(progressMs, totalMs, customThreshold);
expect(result).toBe(true);
// 89% should not pass with 90% threshold
const result2 = checkWatchCompletion(89000, 100000, customThreshold);
expect(result2).toBe(false);
});
it('should accept custom threshold for TV episodes', () => {
const progressMs = 85000;
const totalMs = 100000;
const tvThreshold = 0.85;
const result = checkWatchCompletion(progressMs, totalMs, tvThreshold);
expect(result).toBe(true);
});
});
// Marker-based completion (future enhancement)
describe.skip('marker-based completion', () => {
it('should mark as watched when reaching credits marker', () => {
// Plex provides intro/credits markers
// If credits marker exists and user passes it, mark as watched
// This requires API integration to get markers
});
});
});
// ============================================================================
// Stale Stream Force-Stop
// ============================================================================
// Stop sessions after 5 minutes of no updates
describe('Stale Stream Force-Stop', () => {
describe('shouldForceStopStaleSession', () => {
// This function doesn't exist yet - tests will fail
it('should return true when session has no updates for 5+ minutes', async () => {
const { shouldForceStopStaleSession } = await import('../stateTracker.js');
// 5 minutes + 1 second (strictly greater than threshold)
const moreThanFiveMinutesAgo = new Date(Date.now() - (5 * 60 * 1000 + 1000));
const result = shouldForceStopStaleSession(moreThanFiveMinutesAgo);
expect(result).toBe(true);
});
it('should return false when session was updated within 5 minutes', async () => {
const { shouldForceStopStaleSession } = await import('../stateTracker.js');
const threeMinutesAgo = new Date(Date.now() - 3 * 60 * 1000);
const result = shouldForceStopStaleSession(threeMinutesAgo);
expect(result).toBe(false);
});
it('should return false when lastSeenAt is exactly 5 minutes ago', async () => {
const { shouldForceStopStaleSession } = await import('../stateTracker.js');
// Edge case: exactly at threshold should NOT stop
const exactlyFiveMinutesAgo = new Date(Date.now() - 5 * 60 * 1000);
const result = shouldForceStopStaleSession(exactlyFiveMinutesAgo);
expect(result).toBe(false);
});
it('should accept configurable timeout in seconds', async () => {
const { shouldForceStopStaleSession } = await import('../stateTracker.js');
// 3 minute custom timeout
const threeMinuteTimeout = 180;
const threeMinutesAgo = new Date(Date.now() - 3 * 60 * 1000 - 1);
const result = shouldForceStopStaleSession(threeMinutesAgo, threeMinuteTimeout);
expect(result).toBe(true);
});
});
describe('integration with session processing', () => {
it('should mark force-stopped sessions with forceStopped flag', async () => {
// The database schema should have a forceStopped boolean field
// When a session is force-stopped, this flag should be set to true
// This test verifies the schema has the field
const { sessions } = await import('../../../db/schema.js');
expect(sessions).toHaveProperty('forceStopped');
});
});
});
// ============================================================================
// Minimum Play Time Filtering
// ============================================================================
// Use LOGGING_IGNORE_INTERVAL = 120 seconds
describe('Minimum Play Time Filtering', () => {
describe('shouldRecordSession', () => {
// This function doesn't exist yet - tests will fail
it('should NOT record session with < 120 seconds play time', async () => {
const { shouldRecordSession } = await import('../stateTracker.js');
const durationMs = 119 * 1000; // 119 seconds
const result = shouldRecordSession(durationMs);
expect(result).toBe(false);
});
it('should record session with >= 120 seconds play time', async () => {
const { shouldRecordSession } = await import('../stateTracker.js');
const durationMs = 120 * 1000; // Exactly 120 seconds
const result = shouldRecordSession(durationMs);
expect(result).toBe(true);
});
it('should accept custom minimum play time', async () => {
const { shouldRecordSession } = await import('../stateTracker.js');
// Some users want stricter filtering (e.g., 5 minutes)
const customMinMs = 5 * 60 * 1000;
const durationMs = 4 * 60 * 1000; // 4 minutes
const result = shouldRecordSession(durationMs, customMinMs);
expect(result).toBe(false);
});
it('should always record sessions with 0 minimum configured', async () => {
const { shouldRecordSession } = await import('../stateTracker.js');
const durationMs = 1000; // 1 second
const result = shouldRecordSession(durationMs, 0);
expect(result).toBe(true);
});
});
describe('media type specific filtering', () => {
it('should apply different minimums for movies vs episodes', async () => {
const { shouldRecordSession } = await import('../stateTracker.js');
// Movies might have higher threshold
const movieMinMs = 180 * 1000; // 3 minutes for movies
const episodeMinMs = 120 * 1000; // 2 minutes for episodes
// 2.5 minute watch
const durationMs = 150 * 1000;
const recordMovie = shouldRecordSession(durationMs, movieMinMs);
const recordEpisode = shouldRecordSession(durationMs, episodeMinMs);
expect(recordMovie).toBe(false); // Under 3 min threshold
expect(recordEpisode).toBe(true); // Over 2 min threshold
});
});
});
// ============================================================================
// Continued Session Threshold
// ============================================================================
// Use CONTINUED_SESSION_THRESHOLD = 60 seconds
describe('Continued Session Threshold', () => {
describe('shouldGroupWithPreviousSession with configurable threshold', () => {
const baseSession = {
id: 'previous-session-id',
referenceId: null,
progressMs: 30 * 60 * 1000, // 30 minutes
watched: false,
stoppedAt: new Date(),
};
it('should NOT group sessions with > 60 second gap when continued session is close', () => {
// If new session starts at same progress but previous stopped > 60s ago,
// it should NOT be grouped (it's a fresh start, not a continue)
const sixtyOneSecondsAgo = new Date(Date.now() - 61 * 1000);
// This should return null because it's been too long since previous session
const result = shouldGroupWithPreviousSession(
{ ...baseSession, stoppedAt: sixtyOneSecondsAgo },
30 * 60 * 1000 // Same progress
);
// With 60s default threshold, should NOT group
expect(result).toBeNull();
});
it('should group sessions within 60 second gap', () => {
const thirtySecondsAgo = new Date(Date.now() - 30 * 1000);
const result = shouldGroupWithPreviousSession(
{ ...baseSession, stoppedAt: thirtySecondsAgo },
30 * 60 * 1000
);
expect(result).toBe('previous-session-id');
});
it('should accept configurable continued session threshold', () => {
// Some users want longer window for continued sessions
const customThresholdMs = 5 * 60 * 1000; // 5 minutes
const twoMinutesAgo = new Date(Date.now() - 2 * 60 * 1000);
// Third parameter is optional threshold in ms
const result = shouldGroupWithPreviousSession(
{ ...baseSession, stoppedAt: twoMinutesAgo },
30 * 60 * 1000,
customThresholdMs // Custom threshold
);
// Within 5 minute threshold, should group
expect(result).toBe('previous-session-id');
});
});
describe('24 hour maximum for session grouping', () => {
it('should NEVER group sessions more than 24 hours apart regardless of threshold', () => {
const twentyFiveHoursAgo = new Date(Date.now() - 25 * 60 * 60 * 1000);
const result = shouldGroupWithPreviousSession(
{
id: 'old-session',
referenceId: null,
progressMs: 30 * 60 * 1000,
watched: false,
stoppedAt: twentyFiveHoursAgo,
},
30 * 60 * 1000
);
expect(result).toBeNull();
});
});
});
// ============================================================================
// Integration Test: Full Session Lifecycle with Edge Cases
// ============================================================================
describe('Integration: Full Session Lifecycle', () => {
it('should handle complete watch session with 85% completion', () => {
// 2 hour movie, watched 1h42m (85%)
const totalMs = 2 * 60 * 60 * 1000; // 2 hours
const progressMs = 1.7 * 60 * 60 * 1000; // 1h42m = 85%
const watched = checkWatchCompletion(progressMs, totalMs);
expect(watched).toBe(true);
});
it('should handle partial watch below 85% threshold', () => {
// 2 hour movie, watched 1h36m (80%)
const totalMs = 2 * 60 * 60 * 1000;
const progressMs = 1.6 * 60 * 60 * 1000; // 1h36m = 80%
const watched = checkWatchCompletion(progressMs, totalMs);
// With 85% threshold, 80% is NOT watched
expect(watched).toBe(false);
});
});

View File

@@ -0,0 +1,454 @@
/**
* State Tracker Tests
*
* Tests session state tracking functions from poller/stateTracker.ts:
* - calculatePauseAccumulation: Track pause duration across state transitions
* - calculateStopDuration: Calculate final watch time when session stops
* - checkWatchCompletion: Determine if content was "watched" (80% threshold)
* - shouldGroupWithPreviousSession: Link resumed sessions together
*/
import { describe, it, expect } from 'vitest';
import { randomUUID } from 'node:crypto';
import {
calculatePauseAccumulation,
calculateStopDuration,
checkWatchCompletion,
isQualityChangeScenario,
shouldGroupWithPreviousSession,
} from '../stateTracker.js';
describe('calculatePauseAccumulation', () => {
describe('state transitions', () => {
it('should record lastPausedAt when transitioning from playing to paused', () => {
const now = new Date();
const result = calculatePauseAccumulation(
'playing',
'paused',
{ lastPausedAt: null, pausedDurationMs: 0 },
now
);
expect(result.lastPausedAt).toEqual(now);
expect(result.pausedDurationMs).toBe(0);
});
it('should accumulate pause duration when transitioning from paused to playing', () => {
const pauseStart = new Date('2024-01-01T10:00:00Z');
const resumeTime = new Date('2024-01-01T10:30:00Z'); // 30 minutes later
const result = calculatePauseAccumulation(
'paused',
'playing',
{ lastPausedAt: pauseStart, pausedDurationMs: 0 },
resumeTime
);
expect(result.lastPausedAt).toBeNull();
expect(result.pausedDurationMs).toBe(30 * 60 * 1000);
});
it('should not change anything for playing to playing transition', () => {
const now = new Date();
const existingSession = { lastPausedAt: null, pausedDurationMs: 5000 };
const result = calculatePauseAccumulation('playing', 'playing', existingSession, now);
expect(result.lastPausedAt).toBeNull();
expect(result.pausedDurationMs).toBe(5000);
});
it('should not change anything for paused to paused transition', () => {
const pausedAt = new Date('2024-01-01T10:00:00Z');
const now = new Date('2024-01-01T10:30:00Z');
const existingSession = { lastPausedAt: pausedAt, pausedDurationMs: 5000 };
const result = calculatePauseAccumulation('paused', 'paused', existingSession, now);
expect(result.lastPausedAt).toEqual(pausedAt);
expect(result.pausedDurationMs).toBe(5000);
});
});
describe('multiple pause cycles', () => {
it('should accumulate correctly across multiple pause/resume cycles', () => {
const times = {
pause1: new Date('2024-01-01T10:05:00Z'),
resume1: new Date('2024-01-01T10:10:00Z'), // 5 min pause
pause2: new Date('2024-01-01T10:15:00Z'),
resume2: new Date('2024-01-01T10:25:00Z'), // 10 min pause
};
let session = { lastPausedAt: null as Date | null, pausedDurationMs: 0 };
// First pause
session = calculatePauseAccumulation('playing', 'paused', session, times.pause1);
expect(session.lastPausedAt).toEqual(times.pause1);
// First resume - 5 min accumulated
session = calculatePauseAccumulation('paused', 'playing', session, times.resume1);
expect(session.pausedDurationMs).toBe(5 * 60 * 1000);
// Second pause
session = calculatePauseAccumulation('playing', 'paused', session, times.pause2);
expect(session.lastPausedAt).toEqual(times.pause2);
// Second resume - 15 min total (5 + 10)
session = calculatePauseAccumulation('paused', 'playing', session, times.resume2);
expect(session.pausedDurationMs).toBe(15 * 60 * 1000);
expect(session.lastPausedAt).toBeNull();
});
});
});
describe('calculateStopDuration', () => {
describe('basic duration calculation', () => {
it('should calculate correct duration for session with no pauses', () => {
const startedAt = new Date('2024-01-01T10:00:00Z');
const stoppedAt = new Date('2024-01-01T12:00:00Z'); // 2 hours later
const result = calculateStopDuration(
{ startedAt, lastPausedAt: null, pausedDurationMs: 0 },
stoppedAt
);
expect(result.durationMs).toBe(2 * 60 * 60 * 1000);
expect(result.finalPausedDurationMs).toBe(0);
});
it('should exclude accumulated pause time from duration', () => {
const startedAt = new Date('2024-01-01T10:00:00Z');
const stoppedAt = new Date('2024-01-01T12:00:00Z');
const result = calculateStopDuration(
{
startedAt,
lastPausedAt: null,
pausedDurationMs: 30 * 60 * 1000, // 30 minutes paused
},
stoppedAt
);
expect(result.durationMs).toBe(1.5 * 60 * 60 * 1000);
expect(result.finalPausedDurationMs).toBe(30 * 60 * 1000);
});
});
describe('stopped while paused', () => {
it('should include remaining pause time if stopped while paused', () => {
const startedAt = new Date('2024-01-01T10:00:00Z');
const pausedAt = new Date('2024-01-01T11:30:00Z');
const stoppedAt = new Date('2024-01-01T12:00:00Z');
const result = calculateStopDuration(
{
startedAt,
lastPausedAt: pausedAt,
pausedDurationMs: 15 * 60 * 1000, // 15 minutes already accumulated
},
stoppedAt
);
// Total elapsed: 2 hours
// Paused: 15 min (previous) + 30 min (current) = 45 min
// Watch time: 2 hours - 45 min = 1.25 hours
expect(result.finalPausedDurationMs).toBe(45 * 60 * 1000);
expect(result.durationMs).toBe(1.25 * 60 * 60 * 1000);
});
});
describe('edge cases', () => {
it('should not return negative duration', () => {
const startedAt = new Date('2024-01-01T10:00:00Z');
const stoppedAt = new Date('2024-01-01T10:30:00Z');
const result = calculateStopDuration(
{
startedAt,
lastPausedAt: null,
pausedDurationMs: 60 * 60 * 1000, // More than elapsed
},
stoppedAt
);
expect(result.durationMs).toBe(0);
});
});
describe('real-world scenarios', () => {
it('should handle movie with dinner break', () => {
const startedAt = new Date('2024-01-01T18:00:00Z');
const stoppedAt = new Date('2024-01-01T21:00:00Z'); // 3 hours wall clock
const result = calculateStopDuration(
{
startedAt,
lastPausedAt: null,
pausedDurationMs: 60 * 60 * 1000, // 1 hour dinner pause
},
stoppedAt
);
expect(result.durationMs).toBe(2 * 60 * 60 * 1000);
});
});
});
describe('checkWatchCompletion', () => {
describe('85% threshold (industry standard)', () => {
it('should return true when progress >= 85%', () => {
expect(checkWatchCompletion(8500, 10000)).toBe(true); // Exactly 85%
expect(checkWatchCompletion(9000, 10000)).toBe(true); // 90%
expect(checkWatchCompletion(10000, 10000)).toBe(true); // 100%
});
it('should return false when progress < 85%', () => {
expect(checkWatchCompletion(8499, 10000)).toBe(false); // Just under 85%
expect(checkWatchCompletion(8000, 10000)).toBe(false); // 80%
expect(checkWatchCompletion(5000, 10000)).toBe(false); // 50%
});
});
describe('null handling', () => {
it('should return false when progressMs is null', () => {
expect(checkWatchCompletion(null, 10000)).toBe(false);
});
it('should return false when totalDurationMs is null', () => {
expect(checkWatchCompletion(8000, null)).toBe(false);
});
it('should return false when both are null', () => {
expect(checkWatchCompletion(null, null)).toBe(false);
});
});
});
describe('shouldGroupWithPreviousSession', () => {
describe('session grouping', () => {
it('should group when resuming from same progress within threshold', () => {
const previousSessionId = randomUUID();
const thirtySecondsAgo = new Date(Date.now() - 30 * 1000);
const result = shouldGroupWithPreviousSession(
{
id: previousSessionId,
referenceId: null,
progressMs: 30 * 60 * 1000,
watched: false,
stoppedAt: thirtySecondsAgo,
},
30 * 60 * 1000
);
expect(result).toBe(previousSessionId);
});
it('should use existing referenceId for chained sessions', () => {
const originalSessionId = randomUUID();
const previousSessionId = randomUUID();
const thirtySecondsAgo = new Date(Date.now() - 30 * 1000);
const result = shouldGroupWithPreviousSession(
{
id: previousSessionId,
referenceId: originalSessionId,
progressMs: 60 * 60 * 1000,
watched: false,
stoppedAt: thirtySecondsAgo,
},
60 * 60 * 1000
);
expect(result).toBe(originalSessionId);
});
});
describe('no grouping conditions', () => {
it('should not group if previous session was fully watched', () => {
const thirtySecondsAgo = new Date(Date.now() - 30 * 1000);
const result = shouldGroupWithPreviousSession(
{
id: randomUUID(),
referenceId: null,
progressMs: 90 * 60 * 1000,
watched: true,
stoppedAt: thirtySecondsAgo,
},
0
);
expect(result).toBeNull();
});
it('should not group if previous session is older than 24 hours', () => {
const twoDaysAgo = new Date(Date.now() - 48 * 60 * 60 * 1000);
const result = shouldGroupWithPreviousSession(
{
id: randomUUID(),
referenceId: null,
progressMs: 30 * 60 * 1000,
watched: false,
stoppedAt: twoDaysAgo,
},
30 * 60 * 1000
);
expect(result).toBeNull();
});
it('should not group if user rewound (new progress < previous)', () => {
const thirtySecondsAgo = new Date(Date.now() - 30 * 1000);
const result = shouldGroupWithPreviousSession(
{
id: randomUUID(),
referenceId: null,
progressMs: 60 * 60 * 1000,
watched: false,
stoppedAt: thirtySecondsAgo,
},
30 * 60 * 1000 // Rewound
);
expect(result).toBeNull();
});
it('should not group if gap exceeds default threshold (60s)', () => {
const twoMinutesAgo = new Date(Date.now() - 2 * 60 * 1000);
const result = shouldGroupWithPreviousSession(
{
id: randomUUID(),
referenceId: null,
progressMs: 30 * 60 * 1000,
watched: false,
stoppedAt: twoMinutesAgo,
},
30 * 60 * 1000
);
expect(result).toBeNull();
});
});
});
describe('Integration: Complete Watch Session', () => {
it('should handle complete watch session with multiple pauses', () => {
const times = {
start: new Date('2024-01-01T10:00:00Z'),
pause1: new Date('2024-01-01T10:30:00Z'),
resume1: new Date('2024-01-01T10:45:00Z'), // 15 min pause
pause2: new Date('2024-01-01T11:30:00Z'),
resume2: new Date('2024-01-01T12:00:00Z'), // 30 min pause
stop: new Date('2024-01-01T12:45:00Z'),
};
let session = { lastPausedAt: null as Date | null, pausedDurationMs: 0 };
session = calculatePauseAccumulation('playing', 'paused', session, times.pause1);
session = calculatePauseAccumulation('paused', 'playing', session, times.resume1);
session = calculatePauseAccumulation('playing', 'paused', session, times.pause2);
session = calculatePauseAccumulation('paused', 'playing', session, times.resume2);
expect(session.pausedDurationMs).toBe(45 * 60 * 1000);
const result = calculateStopDuration(
{ startedAt: times.start, ...session },
times.stop
);
// Wall clock: 2h 45m, Paused: 45m, Watch time: 2h
expect(result.durationMs).toBe(120 * 60 * 1000);
});
it('should correctly chain session groups', () => {
const session1Id = randomUUID();
const thirtySecondsAgo = new Date(Date.now() - 30 * 1000);
// First resume - links to session1
const ref1 = shouldGroupWithPreviousSession(
{
id: session1Id,
referenceId: null,
progressMs: 30 * 60 * 1000,
watched: false,
stoppedAt: thirtySecondsAgo,
},
30 * 60 * 1000
);
expect(ref1).toBe(session1Id);
// Second resume - should still link to original
const session2Id = randomUUID();
const ref2 = shouldGroupWithPreviousSession(
{
id: session2Id,
referenceId: session1Id,
progressMs: 60 * 60 * 1000,
watched: false,
stoppedAt: thirtySecondsAgo,
},
60 * 60 * 1000
);
expect(ref2).toBe(session1Id);
});
});
describe('isQualityChangeScenario', () => {
describe('quality change detection', () => {
it('should return session id when active session exists for same user+content', () => {
const sessionId = randomUUID();
const result = isQualityChangeScenario({
id: sessionId,
referenceId: null,
stoppedAt: null, // Active session
});
expect(result).toBe(sessionId);
});
it('should return original referenceId when session is already part of a chain', () => {
const originalSessionId = randomUUID();
const currentSessionId = randomUUID();
const result = isQualityChangeScenario({
id: currentSessionId,
referenceId: originalSessionId, // Already linked to original
stoppedAt: null,
});
expect(result).toBe(originalSessionId);
});
});
describe('non-quality-change scenarios', () => {
it('should return null when no existing session', () => {
expect(isQualityChangeScenario(null)).toBeNull();
expect(isQualityChangeScenario(undefined)).toBeNull();
});
it('should return null when session is already stopped (resume scenario)', () => {
const result = isQualityChangeScenario({
id: randomUUID(),
referenceId: null,
stoppedAt: new Date(), // Session stopped - not a quality change
});
expect(result).toBeNull();
});
it('should return null for stopped session even with referenceId', () => {
const result = isQualityChangeScenario({
id: randomUUID(),
referenceId: randomUUID(),
stoppedAt: new Date(), // Session stopped
});
expect(result).toBeNull();
});
});
});

View File

@@ -0,0 +1,308 @@
/**
* Poller Utility Functions Tests
*
* Tests pure utility functions from poller/utils.ts:
* - formatQualityString: Format bitrate for display
* - isPrivateIP: Detect private/local IP addresses
* - parseJellyfinClient: Extract client info from Jellyfin user agent
*/
import { describe, it, expect } from 'vitest';
import { formatQualityString, isPrivateIP, parseJellyfinClient } from '../utils.js';
describe('formatQualityString', () => {
describe('bitrate formatting', () => {
it('should format transcode bitrate in Mbps', () => {
expect(formatQualityString(8000000, 0, false)).toBe('8Mbps');
expect(formatQualityString(10000000, 0, true)).toBe('10Mbps');
});
it('should fall back to source bitrate when transcode bitrate is 0', () => {
expect(formatQualityString(0, 12000000, false)).toBe('12Mbps');
});
it('should round bitrate correctly', () => {
expect(formatQualityString(8500000, 0, false)).toBe('9Mbps'); // Rounds up
expect(formatQualityString(8400000, 0, false)).toBe('8Mbps'); // Rounds down
});
});
describe('fallback labels', () => {
it('should return "Transcoding" when no bitrate but is transcoding', () => {
expect(formatQualityString(0, 0, true)).toBe('Transcoding');
});
it('should return "Direct" when no bitrate and not transcoding', () => {
expect(formatQualityString(0, 0, false)).toBe('Direct');
});
});
});
describe('isPrivateIP', () => {
describe('IPv4 private ranges', () => {
it('should detect 10.x.x.x as private (10.0.0.0/8)', () => {
expect(isPrivateIP('10.0.0.1')).toBe(true);
expect(isPrivateIP('10.255.255.255')).toBe(true);
expect(isPrivateIP('10.123.45.67')).toBe(true);
});
it('should detect 172.16-31.x.x as private (172.16.0.0/12)', () => {
expect(isPrivateIP('172.16.0.1')).toBe(true);
expect(isPrivateIP('172.31.255.255')).toBe(true);
expect(isPrivateIP('172.20.10.5')).toBe(true);
});
it('should NOT detect 172.15.x.x or 172.32.x.x as private', () => {
expect(isPrivateIP('172.15.0.1')).toBe(false);
expect(isPrivateIP('172.32.0.1')).toBe(false);
});
it('should detect 192.168.x.x as private (192.168.0.0/16)', () => {
expect(isPrivateIP('192.168.0.1')).toBe(true);
expect(isPrivateIP('192.168.1.1')).toBe(true);
expect(isPrivateIP('192.168.255.255')).toBe(true);
});
it('should detect 127.x.x.x as private (loopback)', () => {
expect(isPrivateIP('127.0.0.1')).toBe(true);
expect(isPrivateIP('127.255.255.255')).toBe(true);
});
it('should detect 169.254.x.x as private (link-local)', () => {
expect(isPrivateIP('169.254.0.1')).toBe(true);
expect(isPrivateIP('169.254.255.255')).toBe(true);
});
it('should detect 0.x.x.x as private (current network)', () => {
expect(isPrivateIP('0.0.0.0')).toBe(true);
expect(isPrivateIP('0.1.2.3')).toBe(true);
});
});
describe('IPv4 public addresses', () => {
it('should NOT detect public IPs as private', () => {
expect(isPrivateIP('8.8.8.8')).toBe(false); // Google DNS
expect(isPrivateIP('1.1.1.1')).toBe(false); // Cloudflare DNS
expect(isPrivateIP('142.250.80.46')).toBe(false); // Google
expect(isPrivateIP('151.101.1.140')).toBe(false); // Reddit
expect(isPrivateIP('203.0.113.50')).toBe(false); // Documentation range but public
});
});
describe('IPv6 private ranges', () => {
it('should detect ::1 as private (loopback)', () => {
expect(isPrivateIP('::1')).toBe(true);
});
it('should detect fe80: as private (link-local)', () => {
expect(isPrivateIP('fe80::1')).toBe(true);
expect(isPrivateIP('fe80:0:0:0:0:0:0:1')).toBe(true);
expect(isPrivateIP('FE80::abcd:1234')).toBe(true); // Case insensitive
});
it('should detect fc/fd as private (unique local)', () => {
expect(isPrivateIP('fc00::1')).toBe(true);
expect(isPrivateIP('fd00::1')).toBe(true);
expect(isPrivateIP('fdab:cdef:1234::1')).toBe(true);
});
});
describe('IPv6 public addresses', () => {
it('should NOT detect public IPv6 as private', () => {
expect(isPrivateIP('2001:4860:4860::8888')).toBe(false); // Google DNS
expect(isPrivateIP('2606:4700:4700::1111')).toBe(false); // Cloudflare DNS
});
});
describe('edge cases', () => {
it('should treat empty string as private', () => {
expect(isPrivateIP('')).toBe(true);
});
it('should treat null-like values as private', () => {
expect(isPrivateIP(null as unknown as string)).toBe(true);
expect(isPrivateIP(undefined as unknown as string)).toBe(true);
});
});
});
describe('parseJellyfinClient', () => {
describe('iOS devices', () => {
it('should parse "Jellyfin iOS" as iOS/iPhone', () => {
const result = parseJellyfinClient('Jellyfin iOS');
expect(result.platform).toBe('iOS');
expect(result.device).toBe('iPhone');
});
it('should parse clients containing "iphone" as iOS/iPhone', () => {
const result = parseJellyfinClient('Jellyfin for iPhone');
expect(result.platform).toBe('iOS');
expect(result.device).toBe('iPhone');
});
it('should parse "Jellyfin iPad" as iOS/iPad', () => {
const result = parseJellyfinClient('Jellyfin iPad');
expect(result.platform).toBe('iOS');
expect(result.device).toBe('iPad');
});
it('should be case insensitive for iOS detection', () => {
expect(parseJellyfinClient('jellyfin IOS').platform).toBe('iOS');
expect(parseJellyfinClient('JELLYFIN iOS').platform).toBe('iOS');
});
});
describe('Android devices', () => {
it('should parse "Jellyfin Android" as Android/Android', () => {
const result = parseJellyfinClient('Jellyfin Android');
expect(result.platform).toBe('Android');
expect(result.device).toBe('Android');
});
it('should parse Android TV clients as Android TV', () => {
const result = parseJellyfinClient('Jellyfin Android TV');
expect(result.platform).toBe('Android TV');
expect(result.device).toBe('Android TV');
});
it('should parse Shield clients as Android TV', () => {
const result = parseJellyfinClient('Jellyfin for Shield');
expect(result.platform).toBe('Android TV');
expect(result.device).toBe('Android TV');
});
it('should parse NVIDIA Shield with Android in name', () => {
const result = parseJellyfinClient('Jellyfin Android Shield');
expect(result.platform).toBe('Android TV');
expect(result.device).toBe('Android TV');
});
it('should parse just "Shield" as Android TV', () => {
const result = parseJellyfinClient('Shield');
expect(result.platform).toBe('Android TV');
expect(result.device).toBe('Android TV');
});
});
describe('Smart TVs', () => {
it('should parse Samsung/Tizen clients as Samsung TV', () => {
expect(parseJellyfinClient('Jellyfin Samsung')).toEqual({
platform: 'Tizen',
device: 'Samsung TV',
});
expect(parseJellyfinClient('Jellyfin Tizen')).toEqual({
platform: 'Tizen',
device: 'Samsung TV',
});
});
it('should parse LG/webOS clients as LG TV', () => {
expect(parseJellyfinClient('Jellyfin webOS')).toEqual({
platform: 'webOS',
device: 'LG TV',
});
expect(parseJellyfinClient('Jellyfin LG')).toEqual({
platform: 'webOS',
device: 'LG TV',
});
});
it('should parse Roku clients as Roku', () => {
expect(parseJellyfinClient('Jellyfin Roku')).toEqual({
platform: 'Roku',
device: 'Roku',
});
});
});
describe('Apple TV', () => {
it('should parse tvOS clients as Apple TV', () => {
expect(parseJellyfinClient('Jellyfin tvOS')).toEqual({
platform: 'tvOS',
device: 'Apple TV',
});
});
it('should parse "Apple TV" in client name as Apple TV', () => {
expect(parseJellyfinClient('Jellyfin Apple TV')).toEqual({
platform: 'tvOS',
device: 'Apple TV',
});
});
it('should parse Swiftfin as Apple TV', () => {
expect(parseJellyfinClient('Swiftfin')).toEqual({
platform: 'tvOS',
device: 'Apple TV',
});
});
});
describe('Web browsers', () => {
it('should parse "Jellyfin Web" as Web/Browser', () => {
expect(parseJellyfinClient('Jellyfin Web')).toEqual({
platform: 'Web',
device: 'Browser',
});
});
});
describe('Media players', () => {
it('should parse Kodi clients as Kodi', () => {
expect(parseJellyfinClient('Kodi')).toEqual({
platform: 'Kodi',
device: 'Kodi',
});
expect(parseJellyfinClient('Jellyfin for Kodi')).toEqual({
platform: 'Kodi',
device: 'Kodi',
});
});
it('should parse Infuse clients as Infuse', () => {
expect(parseJellyfinClient('Infuse')).toEqual({
platform: 'Infuse',
device: 'Infuse',
});
});
});
describe('deviceType parameter', () => {
it('should use deviceType when provided and meaningful', () => {
const result = parseJellyfinClient('Custom Client', 'Smart TV');
expect(result.platform).toBe('Custom Client');
expect(result.device).toBe('Smart TV');
});
it('should ignore deviceType when empty', () => {
const result = parseJellyfinClient('Jellyfin iOS', '');
expect(result.platform).toBe('iOS');
expect(result.device).toBe('iPhone');
});
it('should ignore deviceType when "Unknown"', () => {
const result = parseJellyfinClient('Jellyfin Android', 'Unknown');
expect(result.platform).toBe('Android');
expect(result.device).toBe('Android');
});
it('should prefer deviceType over parsing when deviceType is meaningful', () => {
const result = parseJellyfinClient('Jellyfin iOS', 'Custom Device');
expect(result.device).toBe('Custom Device');
});
});
describe('fallback behavior', () => {
it('should use client name as fallback for unknown clients', () => {
const result = parseJellyfinClient('Unknown Client App');
expect(result.platform).toBe('Unknown Client App');
expect(result.device).toBe('Unknown Client App');
});
it('should handle empty client string', () => {
const result = parseJellyfinClient('');
expect(result.platform).toBe('Unknown');
expect(result.device).toBe('Unknown');
});
});
});

View File

@@ -0,0 +1,48 @@
/**
* Violations Module Tests
*
* Tests rule/violation functions from poller/violations.ts:
* - getTrustScorePenalty: Map violation severity to trust score penalty
* - doesRuleApplyToUser: Check if a rule applies to a specific user
*/
import { describe, it, expect } from 'vitest';
import { randomUUID } from 'node:crypto';
import { getTrustScorePenalty, doesRuleApplyToUser } from '../violations.js';
describe('getTrustScorePenalty', () => {
describe('severity mapping', () => {
it('should return 20 for HIGH severity', () => {
expect(getTrustScorePenalty('high')).toBe(20);
});
it('should return 10 for WARNING severity', () => {
expect(getTrustScorePenalty('warning')).toBe(10);
});
it('should return 5 for LOW severity', () => {
expect(getTrustScorePenalty('low')).toBe(5);
});
});
});
describe('doesRuleApplyToUser', () => {
describe('global rules', () => {
it('should apply global rules (serverUserId=null) to any user', () => {
const globalRule = { serverUserId: null };
expect(doesRuleApplyToUser(globalRule, randomUUID())).toBe(true);
expect(doesRuleApplyToUser(globalRule, randomUUID())).toBe(true);
});
});
describe('user-specific rules', () => {
it('should apply user-specific rule only to that user', () => {
const targetServerUserId = randomUUID();
const otherServerUserId = randomUUID();
const userRule = { serverUserId: targetServerUserId };
expect(doesRuleApplyToUser(userRule, targetServerUserId)).toBe(true);
expect(doesRuleApplyToUser(userRule, otherServerUserId)).toBe(false);
});
});
});

View File

@@ -0,0 +1,94 @@
/**
* Poller Database Operations
*
* Database query functions used by the poller.
* Includes batch loading for performance optimization and rule fetching.
*/
import { eq, and, desc, gte, inArray } from 'drizzle-orm';
import { TIME_MS, SESSION_LIMITS, type Session, type Rule, type RuleParams } from '@tracearr/shared';
import { db } from '../../db/client.js';
import { sessions, rules } from '../../db/schema.js';
import { mapSessionRow } from './sessionMapper.js';
// ============================================================================
// Session Batch Loading
// ============================================================================
/**
* Batch load recent sessions for multiple server users (eliminates N+1 in polling loop)
*
* This function fetches sessions from the last N hours for a batch of server users
* in a single query, avoiding the performance penalty of querying per-user.
*
* @param serverUserIds - Array of server user IDs to load sessions for
* @param hours - Number of hours to look back (default: 24)
* @returns Map of serverUserId -> Session[] for each server user
*
* @example
* const sessionMap = await batchGetRecentUserSessions(['su-1', 'su-2', 'su-3']);
* const user1Sessions = sessionMap.get('su-1') ?? [];
*/
export async function batchGetRecentUserSessions(
serverUserIds: string[],
hours = 24
): Promise<Map<string, Session[]>> {
if (serverUserIds.length === 0) return new Map();
const since = new Date(Date.now() - hours * TIME_MS.HOUR);
const result = new Map<string, Session[]>();
// Initialize empty arrays for all server users
for (const serverUserId of serverUserIds) {
result.set(serverUserId, []);
}
// Single query to get recent sessions for all server users using inArray
const recentSessions = await db
.select()
.from(sessions)
.where(and(
inArray(sessions.serverUserId, serverUserIds),
gte(sessions.startedAt, since)
))
.orderBy(desc(sessions.startedAt));
// Group by server user (limit per user to prevent memory issues)
for (const s of recentSessions) {
const userSessions = result.get(s.serverUserId) ?? [];
if (userSessions.length < SESSION_LIMITS.MAX_RECENT_PER_USER) {
userSessions.push(mapSessionRow(s));
}
result.set(s.serverUserId, userSessions);
}
return result;
}
// ============================================================================
// Rule Loading
// ============================================================================
/**
* Get all active rules for evaluation
*
* @returns Array of active Rule objects
*
* @example
* const rules = await getActiveRules();
* // Evaluate each session against these rules
*/
export async function getActiveRules(): Promise<Rule[]> {
const activeRules = await db.select().from(rules).where(eq(rules.isActive, true));
return activeRules.map((r) => ({
id: r.id,
name: r.name,
type: r.type,
params: r.params as unknown as RuleParams,
serverUserId: r.serverUserId,
isActive: r.isActive,
createdAt: r.createdAt,
updatedAt: r.updatedAt,
}));
}

View File

@@ -0,0 +1,78 @@
/**
* Poller Module
*
* Background job for polling Plex/Jellyfin servers for active sessions.
* This module provides a unified interface for session tracking, including:
* - Automatic polling on configurable intervals
* - Session state tracking (playing, paused, stopped)
* - Pause duration accumulation
* - Watch completion detection (85% threshold)
* - Session grouping for resume tracking
* - Rule evaluation and violation creation
* - Stale session detection and force-stop (5 minute timeout, 60s sweep)
* - Minimum play time filtering (120s threshold)
*
* @example
* import { initializePoller, startPoller, stopPoller, sweepStaleSessions } from './jobs/poller';
*
* // Initialize with cache services and Redis client
* initializePoller(cacheService, pubSubService, redis);
*
* // Start polling (also starts stale session sweep on 60s interval)
* startPoller({ enabled: true, intervalMs: 15000 });
*
* // Manually trigger stale session sweep
* await sweepStaleSessions();
*
* // Stop polling (also stops stale session sweep)
* stopPoller();
*/
// ============================================================================
// Public API - Lifecycle Management
// ============================================================================
export {
initializePoller,
startPoller,
stopPoller,
triggerPoll,
triggerReconciliationPoll,
sweepStaleSessions,
} from './processor.js';
// ============================================================================
// Types
// ============================================================================
export type { PollerConfig } from './types.js';
// ============================================================================
// Pure Utility Functions (exported for testing)
// ============================================================================
export {
isPrivateIP,
parseJellyfinClient,
formatQualityString,
} from './utils.js';
// ============================================================================
// State Tracking Functions (exported for testing)
// ============================================================================
export {
calculatePauseAccumulation,
calculateStopDuration,
checkWatchCompletion,
shouldGroupWithPreviousSession,
} from './stateTracker.js';
// ============================================================================
// Rule/Violation Functions (exported for testing)
// ============================================================================
export {
getTrustScorePenalty,
doesRuleApplyToUser,
} from './violations.js';

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,222 @@
/**
* Session Mapping Functions
*
* Functions to transform sessions between different formats:
* - MediaSession (from mediaServer adapter) → ProcessedSession (for DB storage)
* - Database row → Session type (for application use)
*/
import type { Session } from '@tracearr/shared';
import type { MediaSession } from '../../services/mediaServer/types.js';
import type { ProcessedSession } from './types.js';
import { parseJellyfinClient } from './utils.js';
import type { sessions } from '../../db/schema.js';
// ============================================================================
// Quality Formatting
// ============================================================================
/**
* Format video resolution to a user-friendly string
* Normalizes various resolution formats to standard labels
*
* @param resolution - Raw resolution string from API (e.g., "4k", "1080", "720p", "sd")
* @param height - Video height in pixels as fallback
* @returns Formatted resolution string (e.g., "4K", "1080p", "720p", "SD")
*/
function formatResolution(resolution?: string, height?: number): string | null {
if (resolution) {
const lower = resolution.toLowerCase();
// Handle "4k" or "2160" resolution
if (lower === '4k' || lower === '2160' || lower === '2160p') {
return '4K';
}
// Handle standard numeric resolutions
if (lower === '1080' || lower === '1080p') {
return '1080p';
}
if (lower === '720' || lower === '720p') {
return '720p';
}
if (lower === '480' || lower === '480p') {
return '480p';
}
if (lower === 'sd') {
return 'SD';
}
// Return the original with 'p' suffix if it looks numeric
if (/^\d+$/.test(lower)) {
return `${lower}p`;
}
// Return as-is if it's already formatted (e.g., "1080p")
return resolution;
}
// Fall back to height-based resolution
if (height) {
if (height >= 2160) return '4K';
if (height >= 1080) return '1080p';
if (height >= 720) return '720p';
if (height >= 480) return '480p';
return 'SD';
}
return null;
}
/**
* Build quality display string from session quality data
* Prefers resolution over bitrate for clarity
*
* @param quality - Session quality object from MediaSession
* @returns Quality string for display (e.g., "4K", "1080p", "54Mbps", "Direct")
*/
function formatQualityString(quality: MediaSession['quality']): string {
// Prefer resolution-based display
const resolution = formatResolution(quality.videoResolution, quality.videoHeight);
if (resolution) {
return resolution;
}
// Fall back to bitrate if available
if (quality.bitrate > 0) {
return `${Math.round(quality.bitrate / 1000)}Mbps`;
}
// Last resort: transcode status
return quality.isTranscode ? 'Transcoding' : 'Direct';
}
// ============================================================================
// MediaSession → ProcessedSession Mapping
// ============================================================================
/**
* Map unified MediaSession to ProcessedSession format
* Works for both Plex and Jellyfin sessions from the new adapter
*
* @param session - Unified MediaSession from the mediaServer adapter
* @param serverType - Type of media server ('plex' | 'jellyfin')
* @returns ProcessedSession ready for database storage
*
* @example
* const processed = mapMediaSession(mediaSession, 'plex');
* // Use processed for DB insert
*/
export function mapMediaSession(
session: MediaSession,
serverType: 'plex' | 'jellyfin' | 'emby'
): ProcessedSession {
const isEpisode = session.media.type === 'episode';
// For episodes, prefer show poster; for movies, use media poster
const thumbPath = isEpisode && session.episode?.showThumbPath
? session.episode.showThumbPath
: session.media.thumbPath ?? '';
// Build quality string from resolution (preferred) or bitrate
const quality = formatQualityString(session.quality);
// Keep the IP address - GeoIP service handles private IPs correctly
const ipAddress = session.network.ipAddress;
// Get platform/device - Jellyfin and Emby may need client string parsing
let platform = session.player.platform ?? '';
let device = session.player.device ?? '';
if ((serverType === 'jellyfin' || serverType === 'emby') && session.player.product) {
const parsed = parseJellyfinClient(session.player.product, device);
platform = platform || parsed.platform;
device = device || parsed.device;
}
return {
sessionKey: session.sessionKey,
plexSessionId: session.plexSessionId,
ratingKey: session.mediaId,
// User data
externalUserId: session.user.id,
username: session.user.username || 'Unknown',
userThumb: session.user.thumb ?? '',
mediaTitle: session.media.title,
mediaType: session.media.type === 'movie' ? 'movie'
: session.media.type === 'episode' ? 'episode'
: 'track',
// Enhanced media metadata
grandparentTitle: session.episode?.showTitle ?? '',
seasonNumber: session.episode?.seasonNumber ?? 0,
episodeNumber: session.episode?.episodeNumber ?? 0,
year: session.media.year ?? 0,
thumbPath,
// Connection info
ipAddress,
playerName: session.player.name,
deviceId: session.player.deviceId,
product: session.player.product ?? '',
device,
platform,
quality,
isTranscode: session.quality.isTranscode,
bitrate: session.quality.bitrate,
state: session.playback.state === 'paused' ? 'paused' : 'playing',
totalDurationMs: session.media.durationMs,
progressMs: session.playback.positionMs,
// Jellyfin provides exact pause timestamp for more accurate tracking
lastPausedDate: session.lastPausedDate,
};
}
// ============================================================================
// Database Row → Session Mapping
// ============================================================================
/**
* Map a database session row to the Session type
*
* @param s - Database session row from drizzle select
* @returns Session object for application use
*
* @example
* const rows = await db.select().from(sessions).where(...);
* const sessionObjects = rows.map(mapSessionRow);
*/
export function mapSessionRow(s: typeof sessions.$inferSelect): Session {
return {
id: s.id,
serverId: s.serverId,
serverUserId: s.serverUserId,
sessionKey: s.sessionKey,
state: s.state,
mediaType: s.mediaType,
mediaTitle: s.mediaTitle,
grandparentTitle: s.grandparentTitle,
seasonNumber: s.seasonNumber,
episodeNumber: s.episodeNumber,
year: s.year,
thumbPath: s.thumbPath,
ratingKey: s.ratingKey,
externalSessionId: s.externalSessionId,
startedAt: s.startedAt,
stoppedAt: s.stoppedAt,
durationMs: s.durationMs,
totalDurationMs: s.totalDurationMs,
progressMs: s.progressMs,
lastPausedAt: s.lastPausedAt,
pausedDurationMs: s.pausedDurationMs,
referenceId: s.referenceId,
watched: s.watched,
ipAddress: s.ipAddress,
geoCity: s.geoCity,
geoRegion: s.geoRegion,
geoCountry: s.geoCountry,
geoLat: s.geoLat,
geoLon: s.geoLon,
playerName: s.playerName,
deviceId: s.deviceId,
product: s.product,
device: s.device,
platform: s.platform,
quality: s.quality,
isTranscode: s.isTranscode,
bitrate: s.bitrate,
};
}

View File

@@ -0,0 +1,295 @@
/**
* Session State Tracking
*
* Pure functions for tracking session state transitions, pause accumulation,
* watch completion, and session grouping (resume detection).
*/
import { SESSION_LIMITS, type SessionState } from '@tracearr/shared';
import type { PauseAccumulationResult, StopDurationResult, SessionPauseData } from './types.js';
// ============================================================================
// Pause Tracking
// ============================================================================
/**
* Calculate pause accumulation when session state changes.
* Handles transitions between playing and paused states.
*
* @param previousState - Previous playback state
* @param newState - New playback state
* @param existingSession - Current session pause data
* @param now - Current timestamp
* @returns Updated pause tracking data
*
* @example
* // Starting to pause
* calculatePauseAccumulation('playing', 'paused', { lastPausedAt: null, pausedDurationMs: 0 }, now);
* // Returns: { lastPausedAt: now, pausedDurationMs: 0 }
*
* // Resuming playback after 5 minutes paused
* calculatePauseAccumulation('paused', 'playing', { lastPausedAt: fiveMinutesAgo, pausedDurationMs: 0 }, now);
* // Returns: { lastPausedAt: null, pausedDurationMs: 300000 }
*/
export function calculatePauseAccumulation(
previousState: SessionState,
newState: SessionState,
existingSession: { lastPausedAt: Date | null; pausedDurationMs: number },
now: Date
): PauseAccumulationResult {
let lastPausedAt = existingSession.lastPausedAt;
let pausedDurationMs = existingSession.pausedDurationMs;
if (previousState === 'playing' && newState === 'paused') {
// Started pausing - record timestamp
lastPausedAt = now;
} else if (previousState === 'paused' && newState === 'playing') {
// Resumed playing - accumulate pause duration
if (existingSession.lastPausedAt) {
const pausedMs = now.getTime() - existingSession.lastPausedAt.getTime();
pausedDurationMs = (existingSession.pausedDurationMs || 0) + pausedMs;
}
lastPausedAt = null;
}
return { lastPausedAt, pausedDurationMs };
}
/**
* Calculate final duration when a session is stopped.
* Accounts for any remaining pause time if stopped while paused.
*
* @param session - Session pause tracking data
* @param stoppedAt - Timestamp when session stopped
* @returns Actual watch duration and final paused duration
*
* @example
* // Session that was playing when stopped
* calculateStopDuration({ startedAt: tenMinutesAgo, lastPausedAt: null, pausedDurationMs: 60000 }, now);
* // Returns: { durationMs: 540000, finalPausedDurationMs: 60000 } (9 min watch, 1 min paused)
*
* // Session that was paused when stopped (adds remaining pause time)
* calculateStopDuration({ startedAt: tenMinutesAgo, lastPausedAt: twoMinutesAgo, pausedDurationMs: 60000 }, now);
* // Returns: { durationMs: 420000, finalPausedDurationMs: 180000 } (7 min watch, 3 min paused)
*/
export function calculateStopDuration(
session: SessionPauseData,
stoppedAt: Date
): StopDurationResult {
const totalElapsedMs = stoppedAt.getTime() - session.startedAt.getTime();
// Calculate final paused duration - accumulate any remaining pause if stopped while paused
let finalPausedDurationMs = session.pausedDurationMs || 0;
if (session.lastPausedAt) {
// Session was stopped while paused - add the remaining pause time
finalPausedDurationMs += stoppedAt.getTime() - session.lastPausedAt.getTime();
}
// Calculate actual watch duration (excludes all paused time)
const durationMs = Math.max(0, totalElapsedMs - finalPausedDurationMs);
return { durationMs, finalPausedDurationMs };
}
// ============================================================================
// Stale Session Detection
// ============================================================================
/**
* Determine if a session should be force-stopped due to inactivity.
* A session is considered stale when no updates have been received for the timeout period.
*
* @param lastSeenAt - Last update timestamp for the session
* @param timeoutSeconds - Optional custom timeout in seconds, defaults to 5 minutes
* @returns true if the session should be force-stopped
*
* @example
* // Session last seen 6 minutes ago
* shouldForceStopStaleSession(sixMinutesAgo); // true
*
* // Session last seen 3 minutes ago
* shouldForceStopStaleSession(threeMinutesAgo); // false
*
* // Exactly at threshold (5 minutes) - NOT stale yet
* shouldForceStopStaleSession(fiveMinutesAgo); // false
*/
export function shouldForceStopStaleSession(
lastSeenAt: Date,
timeoutSeconds: number = SESSION_LIMITS.STALE_SESSION_TIMEOUT_SECONDS
): boolean {
const elapsedMs = Date.now() - lastSeenAt.getTime();
const timeoutMs = timeoutSeconds * 1000;
// Strictly greater than - at exactly the threshold, session is NOT stale yet
return elapsedMs > timeoutMs;
}
// ============================================================================
// Minimum Play Time Filtering
// ============================================================================
/**
* Determine if a session should be recorded based on minimum play time.
* Sessions shorter than the threshold are filtered out to reduce noise.
*
* @param durationMs - Session duration in milliseconds
* @param minPlayTimeMs - Optional custom minimum play time, defaults to 2 minutes
* @returns true if the session should be recorded
*
* @example
* shouldRecordSession(60 * 1000); // false (1 min < 2 min threshold)
* shouldRecordSession(120 * 1000); // true (exactly 2 min threshold)
* shouldRecordSession(180 * 1000); // true (3 min > threshold)
* shouldRecordSession(1000, 0); // true (no minimum when 0)
*/
export function shouldRecordSession(
durationMs: number,
minPlayTimeMs: number = SESSION_LIMITS.MIN_PLAY_TIME_MS
): boolean {
// If minimum is 0, always record
if (minPlayTimeMs === 0) return true;
return durationMs >= minPlayTimeMs;
}
// ============================================================================
// Watch Completion
// ============================================================================
/**
* Check if a session should be marked as "watched" based on progress threshold.
*
* @param progressMs - Current playback position in milliseconds
* @param totalDurationMs - Total media duration in milliseconds
* @param threshold - Optional custom threshold (0-1), defaults to 85%
* @returns true if watched at least the threshold percentage of the content
*
* @example
* checkWatchCompletion(8500, 10000); // true (85% with default threshold)
* checkWatchCompletion(8000, 10000); // false (80% < 85% default)
* checkWatchCompletion(9000, 10000, 0.90); // true (90% with custom threshold)
* checkWatchCompletion(null, 6000000); // false (no progress)
*/
export function checkWatchCompletion(
progressMs: number | null,
totalDurationMs: number | null,
threshold: number = SESSION_LIMITS.WATCH_COMPLETION_THRESHOLD
): boolean {
if (!progressMs || !totalDurationMs) return false;
return (progressMs / totalDurationMs) >= threshold;
}
// ============================================================================
// Quality Change Detection
// ============================================================================
/**
* Determine if a new session represents a quality/resolution change during playback.
* This happens when Plex/Jellyfin assigns a new sessionKey but the user is still
* watching the same content.
*
* @param existingActiveSession - Active (not stopped) session for same user+content, or null
* @returns referenceId to link to if this is a quality change, or null
*
* @example
* // Quality change detected - link to existing session
* isQualityChangeScenario({ id: 'sess-1', referenceId: null, stoppedAt: null });
* // Returns: 'sess-1'
*
* // Quality change with existing chain - link to original
* isQualityChangeScenario({ id: 'sess-2', referenceId: 'sess-1', stoppedAt: null });
* // Returns: 'sess-1'
*
* // Session already stopped - not a quality change
* isQualityChangeScenario({ id: 'sess-1', referenceId: null, stoppedAt: new Date() });
* // Returns: null
*
* // No existing session
* isQualityChangeScenario(null);
* // Returns: null
*/
export function isQualityChangeScenario(
existingActiveSession: {
id: string;
referenceId: string | null;
stoppedAt: Date | null;
} | null | undefined
): string | null {
// No existing session = not a quality change
if (!existingActiveSession) return null;
// Session already stopped = not a quality change (this is a resume scenario)
if (existingActiveSession.stoppedAt !== null) return null;
// Active session exists for same user+content = quality change
// Link to the original session chain
return existingActiveSession.referenceId || existingActiveSession.id;
}
// ============================================================================
// Session Grouping (Resume Detection)
// ============================================================================
/**
* Determine if a new session should be grouped with a previous session (resume tracking).
* Returns the referenceId to link to, or null if sessions shouldn't be grouped.
*
* Sessions are grouped when:
* - Same user and same media item (ratingKey)
* - Previous session stopped within 24 hours (absolute maximum)
* - Previous session stopped within continued session threshold (default 60s)
* - Previous session wasn't fully watched
* - New session starts at same or later position (resuming, not rewatching)
*
* @param previousSession - Previous session data for the same user/media
* @param newProgressMs - Current playback position of new session
* @param continuedThresholdMs - Optional custom threshold for "continued session" grouping (default: 60s)
* @returns referenceId to link to, or null if not grouping
*
* @example
* // Resuming within 60s (default threshold)
* shouldGroupWithPreviousSession(
* { id: 'sess-1', referenceId: null, progressMs: 1800000, watched: false, stoppedAt: thirtySecondsAgo },
* 1800000
* ); // Returns: 'sess-1'
*
* // Continued session with 5 minute threshold
* shouldGroupWithPreviousSession(
* { id: 'sess-1', referenceId: null, progressMs: 1800000, watched: false, stoppedAt: twoMinutesAgo },
* 1800000,
* 5 * 60 * 1000 // 5 minute threshold
* ); // Returns: 'sess-1' (within threshold)
*/
export function shouldGroupWithPreviousSession(
previousSession: {
referenceId: string | null;
id: string;
progressMs: number | null;
watched: boolean;
stoppedAt: Date | null;
},
newProgressMs: number,
continuedThresholdMs?: number
): string | null {
// Must have a stoppedAt time
if (!previousSession.stoppedAt) return null;
// Calculate 24h window internally - absolute maximum for any grouping
const twentyFourHoursAgo = new Date(Date.now() - 24 * 60 * 60 * 1000);
if (previousSession.stoppedAt < twentyFourHoursAgo) return null;
// Apply continued session threshold (default: 60 seconds from SESSION_LIMITS)
const thresholdMs = continuedThresholdMs ?? SESSION_LIMITS.CONTINUED_SESSION_THRESHOLD_MS;
const gapMs = Date.now() - previousSession.stoppedAt.getTime();
if (gapMs > thresholdMs) return null;
// Must not be fully watched
if (previousSession.watched) return null;
// New session must be resuming from same or later position
const prevProgress = previousSession.progressMs || 0;
if (newProgressMs >= prevProgress) {
// Link to the first session in the chain
return previousSession.referenceId || previousSession.id;
}
return null;
}

View File

@@ -0,0 +1,173 @@
/**
* Poller Type Definitions
*
* Shared interfaces and types for the session polling system.
* Separated from implementation for clean imports and testing.
*/
import type { Session, SessionState, Rule, RuleParams, ActiveSession } from '@tracearr/shared';
// ============================================================================
// Configuration Types
// ============================================================================
/**
* Configuration for the poller job
*/
export interface PollerConfig {
/** Whether polling is enabled */
enabled: boolean;
/** Polling interval in milliseconds */
intervalMs: number;
}
// ============================================================================
// Server Types
// ============================================================================
/**
* Server data with decrypted token for API calls
*/
export interface ServerWithToken {
id: string;
name: string;
type: 'plex' | 'jellyfin';
url: string;
token: string;
createdAt: Date;
updatedAt: Date;
}
// ============================================================================
// Session Types
// ============================================================================
/**
* Processed session format after mapping from MediaSession
* Contains all fields needed for database storage and display
*/
export interface ProcessedSession {
/** Unique session key from media server */
sessionKey: string;
/** Plex Session.id - required for termination API (different from sessionKey) */
plexSessionId?: string;
/** Media item identifier (ratingKey for Plex, itemId for Jellyfin) */
ratingKey: string;
// User identification from media server
/** External user ID from Plex/Jellyfin for lookup */
externalUserId: string;
/** Display name from media server */
username: string;
/** Avatar URL from media server */
userThumb: string;
// Media metadata
/** Media title */
mediaTitle: string;
/** Media type classification */
mediaType: 'movie' | 'episode' | 'track';
/** Show name (for episodes) */
grandparentTitle: string;
/** Season number (for episodes) */
seasonNumber: number;
/** Episode number (for episodes) */
episodeNumber: number;
/** Release year */
year: number;
/** Poster path */
thumbPath: string;
// Connection info
/** Client IP address */
ipAddress: string;
/** Player/device name */
playerName: string;
/** Unique device identifier */
deviceId: string;
/** Product/app name (e.g., "Plex for iOS") */
product: string;
/** Device type (e.g., "iPhone") */
device: string;
/** Platform (e.g., "iOS") */
platform: string;
// Quality info
/** Quality display string */
quality: string;
/** Whether stream is transcoded */
isTranscode: boolean;
/** Bitrate in kbps */
bitrate: number;
// Playback state
/** Current playback state */
state: 'playing' | 'paused';
/** Total media duration in milliseconds */
totalDurationMs: number;
/** Current playback position in milliseconds */
progressMs: number;
/**
* Jellyfin-specific: When the current pause started (from API).
* More accurate than tracking pause transitions via polling.
*/
lastPausedDate?: Date;
}
// ============================================================================
// Pause Tracking Types
// ============================================================================
/**
* Result of pause accumulation calculation
*/
export interface PauseAccumulationResult {
/** Timestamp when pause started (null if playing) */
lastPausedAt: Date | null;
/** Total accumulated pause duration in milliseconds */
pausedDurationMs: number;
}
/**
* Result of stop duration calculation
*/
export interface StopDurationResult {
/** Actual watch duration excluding pause time in milliseconds */
durationMs: number;
/** Final total paused duration in milliseconds */
finalPausedDurationMs: number;
}
/**
* Session data needed for pause calculations
*/
export interface SessionPauseData {
startedAt: Date;
lastPausedAt: Date | null;
pausedDurationMs: number;
}
// ============================================================================
// Processing Results
// ============================================================================
/**
* Result of processing a single server's sessions
*/
export interface ServerProcessingResult {
/** Whether the server was successfully polled (false = connection error) */
success: boolean;
/** Newly created sessions */
newSessions: ActiveSession[];
/** Session keys that stopped playing */
stoppedSessionKeys: string[];
/** Sessions that were updated (state change, progress, etc.) */
updatedSessions: ActiveSession[];
}
// ============================================================================
// Re-exports for convenience
// ============================================================================
export type { Session, SessionState, Rule, RuleParams };

View File

@@ -0,0 +1,157 @@
/**
* Poller Utility Functions
*
* Pure utility functions for IP detection, client parsing, and formatting.
* These functions have no side effects and are easily testable.
*/
// ============================================================================
// IP Address Utilities
// ============================================================================
/**
* Check if an IP address is private/local (won't have GeoIP data)
*
* @param ip - IP address to check
* @returns true if the IP is private/local
*
* @example
* isPrivateIP('192.168.1.100'); // true
* isPrivateIP('8.8.8.8'); // false
*/
export function isPrivateIP(ip: string): boolean {
if (!ip) return true;
// IPv4 private ranges
const privateIPv4 = [
/^10\./, // 10.0.0.0/8
/^172\.(1[6-9]|2\d|3[01])\./, // 172.16.0.0/12
/^192\.168\./, // 192.168.0.0/16
/^127\./, // Loopback
/^169\.254\./, // Link-local
/^0\./, // Current network
];
// IPv6 private ranges
const privateIPv6 = [
/^::1$/i, // Loopback
/^fe80:/i, // Link-local
/^fc/i, // Unique local
/^fd/i, // Unique local
];
return privateIPv4.some(r => r.test(ip)) || privateIPv6.some(r => r.test(ip));
}
// ============================================================================
// Client Parsing Utilities
// ============================================================================
/**
* Parse platform and device info from Jellyfin client string
*
* Jellyfin clients report as "Jellyfin iOS", "Jellyfin Android", "Jellyfin Web", etc.
* This function extracts meaningful platform and device information from these strings.
*
* @param client - Client application name string
* @param deviceType - Optional device type hint
* @returns Parsed platform and device information
*
* @example
* parseJellyfinClient('Jellyfin iOS'); // { platform: 'iOS', device: 'iPhone' }
* parseJellyfinClient('Jellyfin Web'); // { platform: 'Web', device: 'Browser' }
* parseJellyfinClient('Swiftfin'); // { platform: 'tvOS', device: 'Apple TV' }
*/
export function parseJellyfinClient(
client: string,
deviceType?: string
): { platform: string; device: string } {
// If deviceType is provided and meaningful, use it
if (deviceType && deviceType.length > 0 && deviceType !== 'Unknown') {
return { platform: client, device: deviceType };
}
const clientLower = client.toLowerCase();
// iOS devices
if (clientLower.includes('ios') || clientLower.includes('iphone')) {
return { platform: 'iOS', device: 'iPhone' };
}
if (clientLower.includes('ipad')) {
return { platform: 'iOS', device: 'iPad' };
}
// Android and NVIDIA Shield
if (clientLower.includes('android')) {
if (clientLower.includes('tv') || clientLower.includes('shield')) {
return { platform: 'Android TV', device: 'Android TV' };
}
return { platform: 'Android', device: 'Android' };
}
// NVIDIA Shield without "android" in client name
if (clientLower.includes('shield')) {
return { platform: 'Android TV', device: 'Android TV' };
}
// Smart TVs
if (clientLower.includes('samsung') || clientLower.includes('tizen')) {
return { platform: 'Tizen', device: 'Samsung TV' };
}
if (clientLower.includes('webos') || clientLower.includes('lg')) {
return { platform: 'webOS', device: 'LG TV' };
}
if (clientLower.includes('roku')) {
return { platform: 'Roku', device: 'Roku' };
}
// Apple TV
if (clientLower.includes('tvos') || clientLower.includes('apple tv') || clientLower.includes('swiftfin')) {
return { platform: 'tvOS', device: 'Apple TV' };
}
// Desktop/Web
if (clientLower.includes('web')) {
return { platform: 'Web', device: 'Browser' };
}
// Media players
if (clientLower.includes('kodi')) {
return { platform: 'Kodi', device: 'Kodi' };
}
if (clientLower.includes('infuse')) {
return { platform: 'Infuse', device: 'Infuse' };
}
// Fallback
return { platform: client || 'Unknown', device: deviceType || client || 'Unknown' };
}
// ============================================================================
// Formatting Utilities
// ============================================================================
/**
* Format quality string from bitrate and transcoding info
*
* @param transcodeBitrate - Transcoded bitrate in bps (0 if not transcoding)
* @param sourceBitrate - Original source bitrate in bps
* @param isTranscoding - Whether the stream is being transcoded
* @returns Formatted quality string (e.g., "12Mbps", "Transcoding", "Direct")
*
* @example
* formatQualityString(12000000, 20000000, true); // "12Mbps"
* formatQualityString(0, 0, true); // "Transcoding"
* formatQualityString(0, 0, false); // "Direct"
*/
export function formatQualityString(
transcodeBitrate: number,
sourceBitrate: number,
isTranscoding: boolean
): string {
const bitrate = transcodeBitrate || sourceBitrate;
return bitrate > 0
? `${Math.round(bitrate / 1000000)}Mbps`
: isTranscoding
? 'Transcoding'
: 'Direct';
}

View File

@@ -0,0 +1,425 @@
/**
* Violation Handling
*
* Functions for creating violations, calculating trust score penalties,
* and determining rule applicability.
*/
import { eq, sql, and, isNull, gte } from 'drizzle-orm';
import type { ExtractTablesWithRelations } from 'drizzle-orm';
import type { PgTransaction } from 'drizzle-orm/pg-core';
import type { PostgresJsQueryResultHKT } from 'drizzle-orm/postgres-js';
import type { Rule, ViolationSeverity, ViolationWithDetails, RuleType } from '@tracearr/shared';
import { WS_EVENTS, TIME_MS } from '@tracearr/shared';
import { db } from '../../db/client.js';
import { servers, serverUsers, sessions, violations, users, rules } from '../../db/schema.js';
import type * as schema from '../../db/schema.js';
import type { RuleEvaluationResult } from '../../services/rules.js';
import type { PubSubService } from '../../services/cache.js';
import { enqueueNotification } from '../notificationQueue.js';
// Type for transaction context
type TransactionContext = PgTransaction<PostgresJsQueryResultHKT, typeof schema, ExtractTablesWithRelations<typeof schema>>;
// ============================================================================
// Trust Score Calculation
// ============================================================================
/**
* Calculate trust score penalty based on violation severity.
*
* @param severity - Violation severity level
* @returns Trust score penalty (negative value to subtract)
*
* @example
* getTrustScorePenalty('high'); // 20
* getTrustScorePenalty('warning'); // 10
* getTrustScorePenalty('low'); // 5
*/
export function getTrustScorePenalty(severity: ViolationSeverity): number {
return severity === 'high' ? 20 : severity === 'warning' ? 10 : 5;
}
// ============================================================================
// Violation Deduplication
// ============================================================================
// Deduplication window - violations within this time window with overlapping sessions are considered duplicates
const VIOLATION_DEDUP_WINDOW_MS = 5 * TIME_MS.MINUTE;
/**
* Check if a duplicate violation already exists for the same user/rule type with overlapping sessions.
*
* This prevents creating multiple violations when:
* - Multiple sessions start simultaneously and each sees the others as active
* - The same violation event is detected by both SSE and poller
*
* A violation is considered a duplicate if:
* - Same serverUserId
* - Same rule type (not just ruleId - any rule of the same type)
* - Created within the dedup window
* - Not yet acknowledged
* - Any overlap in relatedSessionIds OR the triggering session is in the other's related sessions
*
* @param serverUserId - Server user who violated the rule
* @param ruleType - Type of rule (concurrent_streams, simultaneous_locations, etc.)
* @param triggeringSessionId - The session that triggered this violation
* @param relatedSessionIds - Session IDs involved in this violation
* @returns true if a duplicate violation exists
*/
export async function isDuplicateViolation(
serverUserId: string,
ruleType: RuleType,
triggeringSessionId: string,
relatedSessionIds: string[]
): Promise<boolean> {
// Only deduplicate for rules that involve multiple sessions
if (!['concurrent_streams', 'simultaneous_locations'].includes(ruleType)) {
return false;
}
const windowStart = new Date(Date.now() - VIOLATION_DEDUP_WINDOW_MS);
// Find recent unacknowledged violations for same user and rule type
const recentViolations = await db
.select({
id: violations.id,
sessionId: violations.sessionId,
data: violations.data,
})
.from(violations)
.innerJoin(rules, eq(violations.ruleId, rules.id))
.where(
and(
eq(violations.serverUserId, serverUserId),
eq(rules.type, ruleType),
isNull(violations.acknowledgedAt),
gte(violations.createdAt, windowStart)
)
);
if (recentViolations.length === 0) {
return false;
}
// Check for overlap with any recent violation
for (const existing of recentViolations) {
const existingData = existing.data as Record<string, unknown> | null;
const existingRelatedIds = (existingData?.relatedSessionIds as string[]) || [];
// Case 1: This triggering session is already covered as a related session in an existing violation
if (existingRelatedIds.includes(triggeringSessionId)) {
console.log(
`[Violations] Skipping duplicate: triggering session ${triggeringSessionId} is related to existing violation ${existing.id}`
);
return true;
}
// Case 2: The existing violation's triggering session is in our related sessions
if (existing.sessionId && relatedSessionIds.includes(existing.sessionId)) {
console.log(
`[Violations] Skipping duplicate: existing violation ${existing.id} triggered by session in our related sessions`
);
return true;
}
// Case 3: Any overlap in related session IDs
const hasOverlap = relatedSessionIds.some((id) => existingRelatedIds.includes(id));
if (hasOverlap) {
console.log(
`[Violations] Skipping duplicate: overlapping related sessions with existing violation ${existing.id}`
);
return true;
}
}
return false;
}
// ============================================================================
// Rule Applicability
// ============================================================================
/**
* Check if a rule applies to a specific server user.
*
* Global rules (serverUserId=null) apply to all server users.
* User-specific rules only apply to that server user.
*
* @param rule - Rule to check
* @param serverUserId - Server user ID to check against
* @returns true if the rule applies to this server user
*
* @example
* doesRuleApplyToUser({ serverUserId: null }, 'su-123'); // true (global rule)
* doesRuleApplyToUser({ serverUserId: 'su-123' }, 'su-123'); // true (user-specific)
* doesRuleApplyToUser({ serverUserId: 'su-456' }, 'su-123'); // false (different user)
*/
export function doesRuleApplyToUser(
rule: { serverUserId: string | null },
serverUserId: string
): boolean {
return rule.serverUserId === null || rule.serverUserId === serverUserId;
}
// ============================================================================
// Violation Creation
// ============================================================================
/**
* Create a violation from rule evaluation result.
* Uses a transaction to ensure violation insert and trust score update are atomic.
*
* @deprecated Use `createViolationInTransaction()` + `broadcastViolations()` instead
* for proper atomic behavior when creating sessions and violations together.
* This function creates its own transaction, which cannot be combined with
* session creation. Only use this for standalone violation creation outside
* the poller flow.
*
* @param ruleId - ID of the rule that was violated
* @param serverUserId - ID of the server user who violated the rule
* @param sessionId - ID of the session where violation occurred
* @param result - Rule evaluation result with severity and data
* @param rule - Full rule object for broadcast details
* @param pubSubService - Optional pub/sub service for WebSocket broadcast
*
* @example
* // Preferred pattern (in poller):
* const violationResults = await db.transaction(async (tx) => {
* const session = await tx.insert(sessions).values(data).returning();
* return await createViolationInTransaction(tx, ruleId, serverUserId, session.id, result, rule);
* });
* await broadcastViolations(violationResults, sessionId, pubSubService);
*
* // Legacy pattern (standalone, avoid in new code):
* await createViolation(ruleId, serverUserId, sessionId, result, rule, pubSubService);
*/
export async function createViolation(
ruleId: string,
serverUserId: string,
sessionId: string,
result: RuleEvaluationResult,
rule: Rule,
pubSubService: PubSubService | null
): Promise<void> {
// Calculate trust penalty based on severity
const trustPenalty = getTrustScorePenalty(result.severity);
// Use transaction to ensure violation creation and trust score update are atomic
const created = await db.transaction(async (tx) => {
const [violation] = await tx
.insert(violations)
.values({
ruleId,
serverUserId,
sessionId,
severity: result.severity,
data: result.data,
})
.returning();
// Decrease server user trust score based on severity (atomic within transaction)
await tx
.update(serverUsers)
.set({
trustScore: sql`GREATEST(0, ${serverUsers.trustScore} - ${trustPenalty})`,
updatedAt: new Date(),
})
.where(eq(serverUsers.id, serverUserId));
return violation;
});
// Get server user and server details for the violation broadcast (outside transaction - read only)
const [details] = await db
.select({
userId: serverUsers.id,
username: serverUsers.username,
thumbUrl: serverUsers.thumbUrl,
identityName: users.name,
serverId: servers.id,
serverName: servers.name,
serverType: servers.type,
})
.from(serverUsers)
.innerJoin(users, eq(serverUsers.userId, users.id))
.innerJoin(sessions, eq(sessions.id, sessionId))
.innerJoin(servers, eq(servers.id, sessions.serverId))
.where(eq(serverUsers.id, serverUserId))
.limit(1);
// Publish violation event for WebSocket broadcast
if (pubSubService && created && details) {
const violationWithDetails: ViolationWithDetails = {
id: created.id,
ruleId: created.ruleId,
serverUserId: created.serverUserId,
sessionId: created.sessionId,
severity: created.severity,
data: created.data,
acknowledgedAt: created.acknowledgedAt,
createdAt: created.createdAt,
user: {
id: details.userId,
username: details.username,
thumbUrl: details.thumbUrl,
serverId: details.serverId,
identityName: details.identityName,
},
rule: {
id: rule.id,
name: rule.name,
type: rule.type,
},
server: {
id: details.serverId,
name: details.serverName,
type: details.serverType,
},
};
await pubSubService.publish(WS_EVENTS.VIOLATION_NEW, violationWithDetails);
console.log(`[Poller] Violation broadcast: ${rule.name} for user ${details.username}`);
// Enqueue notification for async dispatch (Discord, webhooks, push)
await enqueueNotification({ type: 'violation', payload: violationWithDetails });
}
}
// ============================================================================
// Transaction-Aware Violation Creation
// ============================================================================
/**
* Result of creating a violation within a transaction.
* Contains data needed for post-transaction broadcasting.
*/
export interface ViolationInsertResult {
violation: typeof violations.$inferSelect;
rule: Rule;
trustPenalty: number;
}
/**
* Create a violation within an existing transaction context.
* Use this when session insert + violation creation must be atomic.
*
* This function:
* 1. Inserts the violation record
* 2. Updates the server user's trust score
* Both within the provided transaction.
*
* Broadcasting/notification must be done AFTER the transaction commits.
*
* @param tx - Transaction context
* @param ruleId - ID of the rule that was violated
* @param serverUserId - ID of the server user who violated the rule
* @param sessionId - ID of the session where violation occurred
* @param result - Rule evaluation result with severity and data
* @param rule - Full rule object for broadcast details
* @returns Violation insert result for post-transaction broadcasting
*/
export async function createViolationInTransaction(
tx: TransactionContext,
ruleId: string,
serverUserId: string,
sessionId: string,
result: RuleEvaluationResult,
rule: Rule
): Promise<ViolationInsertResult> {
const trustPenalty = getTrustScorePenalty(result.severity);
const [violation] = await tx
.insert(violations)
.values({
ruleId,
serverUserId,
sessionId,
severity: result.severity,
data: result.data,
})
.returning();
// Decrease server user trust score based on severity
await tx
.update(serverUsers)
.set({
trustScore: sql`GREATEST(0, ${serverUsers.trustScore} - ${trustPenalty})`,
updatedAt: new Date(),
})
.where(eq(serverUsers.id, serverUserId));
return { violation: violation!, rule, trustPenalty };
}
/**
* Broadcast violation events after transaction has committed.
* Call this AFTER the transaction to ensure data is persisted before broadcasting.
*
* @param violationResults - Array of violation insert results
* @param sessionId - Session ID for fetching server details
* @param pubSubService - PubSub service for WebSocket broadcast
*/
export async function broadcastViolations(
violationResults: ViolationInsertResult[],
sessionId: string,
pubSubService: PubSubService | null
): Promise<void> {
if (!pubSubService || violationResults.length === 0) return;
// Get server user and server details for the violation broadcast (single query for all)
const [details] = await db
.select({
userId: serverUsers.id,
username: serverUsers.username,
thumbUrl: serverUsers.thumbUrl,
identityName: users.name,
serverId: servers.id,
serverName: servers.name,
serverType: servers.type,
})
.from(sessions)
.innerJoin(serverUsers, eq(serverUsers.id, sessions.serverUserId))
.innerJoin(users, eq(serverUsers.userId, users.id))
.innerJoin(servers, eq(servers.id, sessions.serverId))
.where(eq(sessions.id, sessionId))
.limit(1);
if (!details) return;
for (const { violation, rule } of violationResults) {
const violationWithDetails: ViolationWithDetails = {
id: violation.id,
ruleId: violation.ruleId,
serverUserId: violation.serverUserId,
sessionId: violation.sessionId,
severity: violation.severity,
data: violation.data,
acknowledgedAt: violation.acknowledgedAt,
createdAt: violation.createdAt,
user: {
id: details.userId,
username: details.username,
thumbUrl: details.thumbUrl,
serverId: details.serverId,
identityName: details.identityName,
},
rule: {
id: rule.id,
name: rule.name,
type: rule.type,
},
server: {
id: details.serverId,
name: details.serverName,
type: details.serverType,
},
};
await pubSubService.publish(WS_EVENTS.VIOLATION_NEW, violationWithDetails);
console.log(`[Poller] Violation broadcast: ${rule.name} for user ${details.username}`);
// Enqueue notification for async dispatch (Discord, webhooks, push)
await enqueueNotification({ type: 'violation', payload: violationWithDetails });
}
}

View File

@@ -0,0 +1,644 @@
/**
* SSE Event Processor
*
* Handles incoming SSE events and updates sessions accordingly.
* This bridges the real-time SSE events to the existing session processing logic.
*
* Flow:
* 1. SSE event received (playing/paused/stopped/progress)
* 2. Fetch full session details from Plex API (SSE only gives minimal info)
* 3. Process session update using existing poller logic
* 4. Broadcast updates via WebSocket
*/
import { eq, and, isNull } from 'drizzle-orm';
import type { PlexPlaySessionNotification, ActiveSession } from '@tracearr/shared';
import { db } from '../db/client.js';
import { servers, sessions, serverUsers, users } from '../db/schema.js';
import { createMediaServerClient } from '../services/mediaServer/index.js';
import { sseManager } from '../services/sseManager.js';
import type { CacheService, PubSubService } from '../services/cache.js';
import { geoipService } from '../services/geoip.js';
import { ruleEngine } from '../services/rules.js';
import { mapMediaSession } from './poller/sessionMapper.js';
import { calculatePauseAccumulation, calculateStopDuration, checkWatchCompletion } from './poller/stateTracker.js';
import { getActiveRules, batchGetRecentUserSessions } from './poller/database.js';
import { createViolation, isDuplicateViolation } from './poller/violations.js';
import { enqueueNotification } from './notificationQueue.js';
import { triggerReconciliationPoll } from './poller/index.js';
let cacheService: CacheService | null = null;
let pubSubService: PubSubService | null = null;
// Store wrapped handlers so we can properly remove them
interface SessionEvent { serverId: string; notification: PlexPlaySessionNotification }
const wrappedHandlers = {
playing: (e: SessionEvent) => void handlePlaying(e),
paused: (e: SessionEvent) => void handlePaused(e),
stopped: (e: SessionEvent) => void handleStopped(e),
progress: (e: SessionEvent) => void handleProgress(e),
reconciliation: () => void handleReconciliation(),
};
/**
* Initialize the SSE processor with cache services
*/
export function initializeSSEProcessor(cache: CacheService, pubSub: PubSubService): void {
cacheService = cache;
pubSubService = pubSub;
}
/**
* Start the SSE processor
* Subscribes to SSE manager events and processes them
* Note: sseManager.start() is called separately in index.ts after server is listening
*/
export function startSSEProcessor(): void {
if (!cacheService || !pubSubService) {
throw new Error('SSE processor not initialized');
}
console.log('[SSEProcessor] Starting');
// Subscribe to SSE events
sseManager.on('plex:session:playing', wrappedHandlers.playing);
sseManager.on('plex:session:paused', wrappedHandlers.paused);
sseManager.on('plex:session:stopped', wrappedHandlers.stopped);
sseManager.on('plex:session:progress', wrappedHandlers.progress);
sseManager.on('reconciliation:needed', wrappedHandlers.reconciliation);
}
/**
* Stop the SSE processor
* Note: sseManager.stop() is called separately in index.ts during cleanup
*/
export function stopSSEProcessor(): void {
console.log('[SSEProcessor] Stopping');
sseManager.off('plex:session:playing', wrappedHandlers.playing);
sseManager.off('plex:session:paused', wrappedHandlers.paused);
sseManager.off('plex:session:stopped', wrappedHandlers.stopped);
sseManager.off('plex:session:progress', wrappedHandlers.progress);
sseManager.off('reconciliation:needed', wrappedHandlers.reconciliation);
}
/**
* Handle playing event (new session or resume)
*/
async function handlePlaying(event: {
serverId: string;
notification: PlexPlaySessionNotification;
}): Promise<void> {
const { serverId, notification } = event;
try {
const session = await fetchFullSession(serverId, notification.sessionKey);
if (!session) {
return;
}
const existingRows = await db
.select()
.from(sessions)
.where(
and(
eq(sessions.serverId, serverId),
eq(sessions.sessionKey, notification.sessionKey),
isNull(sessions.stoppedAt)
)
)
.limit(1);
if (existingRows[0]) {
await updateExistingSession(existingRows[0], session, 'playing');
} else {
await createNewSession(serverId, session);
}
} catch (error) {
console.error('[SSEProcessor] Error handling playing event:', error);
}
}
/**
* Handle paused event
*/
async function handlePaused(event: {
serverId: string;
notification: PlexPlaySessionNotification;
}): Promise<void> {
const { serverId, notification } = event;
try {
const existingRows = await db
.select()
.from(sessions)
.where(
and(
eq(sessions.serverId, serverId),
eq(sessions.sessionKey, notification.sessionKey),
isNull(sessions.stoppedAt)
)
)
.limit(1);
if (!existingRows[0]) {
return;
}
const session = await fetchFullSession(serverId, notification.sessionKey);
if (session) {
await updateExistingSession(existingRows[0], session, 'paused');
}
} catch (error) {
console.error('[SSEProcessor] Error handling paused event:', error);
}
}
/**
* Handle stopped event
*/
async function handleStopped(event: {
serverId: string;
notification: PlexPlaySessionNotification;
}): Promise<void> {
const { serverId, notification } = event;
try {
// Query without limit to handle any duplicate sessions that may exist
const existingRows = await db
.select()
.from(sessions)
.where(
and(
eq(sessions.serverId, serverId),
eq(sessions.sessionKey, notification.sessionKey),
isNull(sessions.stoppedAt)
)
);
if (existingRows.length === 0) {
return;
}
// Stop all matching sessions (handles potential duplicates)
for (const session of existingRows) {
await stopSession(session);
}
} catch (error) {
console.error('[SSEProcessor] Error handling stopped event:', error);
}
}
/**
* Handle progress event (periodic position updates)
*/
async function handleProgress(event: {
serverId: string;
notification: PlexPlaySessionNotification;
}): Promise<void> {
const { serverId, notification } = event;
try {
const existingRows = await db
.select()
.from(sessions)
.where(
and(
eq(sessions.serverId, serverId),
eq(sessions.sessionKey, notification.sessionKey),
isNull(sessions.stoppedAt)
)
)
.limit(1);
if (!existingRows[0]) {
return;
}
// Update progress in database
const watched = existingRows[0].watched || checkWatchCompletion(
notification.viewOffset,
existingRows[0].totalDurationMs
);
await db
.update(sessions)
.set({
progressMs: notification.viewOffset,
watched,
})
.where(eq(sessions.id, existingRows[0].id));
// Update cache
if (cacheService) {
const cached = await cacheService.getSessionById(existingRows[0].id);
if (cached) {
cached.progressMs = notification.viewOffset;
cached.watched = watched;
await cacheService.setSessionById(existingRows[0].id, cached);
}
}
// Broadcast update (but don't spam - progress events are frequent)
// Only broadcast if there's a significant change (e.g., watched status changed)
if (watched && !existingRows[0].watched && pubSubService) {
const cached = await cacheService?.getSessionById(existingRows[0].id);
if (cached) {
await pubSubService.publish('session:updated', cached);
}
}
} catch (error) {
console.error('[SSEProcessor] Error handling progress event:', error);
}
}
/**
* Handle reconciliation request
* Triggers a light poll for SSE-connected servers to catch any missed events
*/
async function handleReconciliation(): Promise<void> {
console.log('[SSEProcessor] Triggering reconciliation poll');
await triggerReconciliationPoll();
}
/**
* Fetch full session details from Plex server
*/
async function fetchFullSession(
serverId: string,
sessionKey: string
): Promise<ReturnType<typeof mapMediaSession> | null> {
try {
const serverRows = await db
.select()
.from(servers)
.where(eq(servers.id, serverId))
.limit(1);
const server = serverRows[0];
if (!server) {
return null;
}
const client = createMediaServerClient({
type: server.type as 'plex',
url: server.url,
token: server.token,
});
const allSessions = await client.getSessions();
const targetSession = allSessions.find(s => s.sessionKey === sessionKey);
if (!targetSession) {
return null;
}
return mapMediaSession(targetSession, server.type as 'plex');
} catch (error) {
console.error(`[SSEProcessor] Error fetching session ${sessionKey}:`, error);
return null;
}
}
/**
* Create a new session from SSE event
*/
async function createNewSession(
serverId: string,
processed: ReturnType<typeof mapMediaSession>
): Promise<void> {
// Get server info
const serverRows = await db
.select()
.from(servers)
.where(eq(servers.id, serverId))
.limit(1);
const server = serverRows[0];
if (!server) {
return;
}
// Get or create server user (with identity name from users table)
const serverUserRows = await db
.select({
id: serverUsers.id,
username: serverUsers.username,
thumbUrl: serverUsers.thumbUrl,
identityName: users.name,
})
.from(serverUsers)
.innerJoin(users, eq(serverUsers.userId, users.id))
.where(
and(
eq(serverUsers.serverId, serverId),
eq(serverUsers.externalId, processed.externalUserId)
)
)
.limit(1);
const serverUserId = serverUserRows[0]?.id;
if (!serverUserId) {
// This shouldn't happen often since users are synced, but handle it
console.warn(`[SSEProcessor] Server user not found for ${processed.externalUserId}, skipping`);
return;
}
// GeoIP lookup
const geo = geoipService.lookup(processed.ipAddress);
// Check if an active session already exists (prevents race condition with poller)
// This can happen when SSE and poller both try to create a session simultaneously
const existingActiveSession = await db
.select()
.from(sessions)
.where(
and(
eq(sessions.serverId, serverId),
eq(sessions.sessionKey, processed.sessionKey),
isNull(sessions.stoppedAt)
)
)
.limit(1);
if (existingActiveSession.length > 0) {
// Session already exists (likely created by poller), skip insert
// The existing session will be updated by subsequent SSE events
console.log(`[SSEProcessor] Active session already exists for ${processed.sessionKey}, skipping create`);
return;
}
// Insert new session
const insertedRows = await db
.insert(sessions)
.values({
serverId,
serverUserId,
sessionKey: processed.sessionKey,
ratingKey: processed.ratingKey || null,
state: processed.state,
mediaType: processed.mediaType,
mediaTitle: processed.mediaTitle,
grandparentTitle: processed.grandparentTitle || null,
seasonNumber: processed.seasonNumber || null,
episodeNumber: processed.episodeNumber || null,
year: processed.year || null,
thumbPath: processed.thumbPath || null,
startedAt: new Date(),
lastSeenAt: new Date(), // Track when we first saw this session
totalDurationMs: processed.totalDurationMs || null,
progressMs: processed.progressMs || null,
lastPausedAt: processed.state === 'paused' ? new Date() : null,
pausedDurationMs: 0,
watched: false,
ipAddress: processed.ipAddress,
geoCity: geo.city,
geoRegion: geo.region,
geoCountry: geo.country,
geoLat: geo.lat,
geoLon: geo.lon,
playerName: processed.playerName,
deviceId: processed.deviceId || null,
product: processed.product || null,
device: processed.device || null,
platform: processed.platform,
quality: processed.quality,
isTranscode: processed.isTranscode,
bitrate: processed.bitrate,
})
.returning();
const inserted = insertedRows[0];
if (!inserted) {
return;
}
// Get server user details
const serverUserFromDb = serverUserRows[0];
const userDetail = serverUserFromDb
? {
id: serverUserFromDb.id,
username: serverUserFromDb.username,
thumbUrl: serverUserFromDb.thumbUrl,
identityName: serverUserFromDb.identityName,
}
: { id: serverUserId, username: 'Unknown', thumbUrl: null, identityName: null };
// Build active session
const activeSession: ActiveSession = {
id: inserted.id,
serverId,
serverUserId,
sessionKey: processed.sessionKey,
state: processed.state,
mediaType: processed.mediaType,
mediaTitle: processed.mediaTitle,
grandparentTitle: processed.grandparentTitle || null,
seasonNumber: processed.seasonNumber || null,
episodeNumber: processed.episodeNumber || null,
year: processed.year || null,
thumbPath: processed.thumbPath || null,
ratingKey: processed.ratingKey || null,
externalSessionId: null,
startedAt: inserted.startedAt,
stoppedAt: null,
durationMs: null,
totalDurationMs: processed.totalDurationMs || null,
progressMs: processed.progressMs || null,
lastPausedAt: inserted.lastPausedAt,
pausedDurationMs: 0,
referenceId: null,
watched: false,
ipAddress: processed.ipAddress,
geoCity: geo.city,
geoRegion: geo.region,
geoCountry: geo.country,
geoLat: geo.lat,
geoLon: geo.lon,
playerName: processed.playerName,
deviceId: processed.deviceId || null,
product: processed.product || null,
device: processed.device || null,
platform: processed.platform,
quality: processed.quality,
isTranscode: processed.isTranscode,
bitrate: processed.bitrate,
user: userDetail,
server: { id: server.id, name: server.name, type: server.type as 'plex' },
};
// Update cache atomically
if (cacheService) {
// Add to active sessions SET + store session data (atomic)
await cacheService.addActiveSession(activeSession);
await cacheService.addUserSession(serverUserId, inserted.id);
}
// Broadcast new session
if (pubSubService) {
await pubSubService.publish('session:started', activeSession);
await enqueueNotification({ type: 'session_started', payload: activeSession });
}
// Evaluate rules
const activeRules = await getActiveRules();
const recentSessions = await batchGetRecentUserSessions([serverUserId]);
const ruleResults = await ruleEngine.evaluateSession(inserted, activeRules, recentSessions.get(serverUserId) ?? []);
for (const result of ruleResults) {
const matchingRule = activeRules.find(
(r) => (r.serverUserId === null || r.serverUserId === serverUserId) && result.violated
);
if (matchingRule) {
// Check for duplicate violations before creating
// This prevents multiple violations when sessions start simultaneously
const relatedSessionIds = (result.data?.relatedSessionIds as string[]) || [];
const isDuplicate = await isDuplicateViolation(
serverUserId,
matchingRule.type,
inserted.id,
relatedSessionIds
);
if (isDuplicate) {
continue; // Skip creating duplicate violation
}
// TODO: Refactor to use createViolationInTransaction pattern for atomicity
// Session is already inserted before rule evaluation, so using standalone function for now
// eslint-disable-next-line @typescript-eslint/no-deprecated
await createViolation(matchingRule.id, serverUserId, inserted.id, result, matchingRule, pubSubService);
}
}
console.log(`[SSEProcessor] Created session ${inserted.id} for ${processed.mediaTitle}`);
}
/**
* Update an existing session
*/
async function updateExistingSession(
existingSession: typeof sessions.$inferSelect,
processed: ReturnType<typeof mapMediaSession>,
newState: 'playing' | 'paused'
): Promise<void> {
const now = new Date();
const previousState = existingSession.state;
// Calculate pause accumulation
const pauseResult = calculatePauseAccumulation(
previousState,
newState,
{ lastPausedAt: existingSession.lastPausedAt, pausedDurationMs: existingSession.pausedDurationMs || 0 },
now
);
// Check watch completion
const watched = existingSession.watched || checkWatchCompletion(
processed.progressMs,
processed.totalDurationMs
);
// Update session in database
await db
.update(sessions)
.set({
state: newState,
quality: processed.quality,
bitrate: processed.bitrate,
progressMs: processed.progressMs || null,
lastPausedAt: pauseResult.lastPausedAt,
pausedDurationMs: pauseResult.pausedDurationMs,
watched,
})
.where(eq(sessions.id, existingSession.id));
// Update cache and broadcast
if (cacheService) {
let cached = await cacheService.getSessionById(existingSession.id);
// If cache miss, try to get from active sessions SET
if (!cached) {
const allActive = await cacheService.getAllActiveSessions();
cached = allActive.find((s) => s.id === existingSession.id) || null;
}
if (cached) {
// Update cached session with new state
cached.state = newState;
cached.quality = processed.quality;
cached.bitrate = processed.bitrate;
cached.progressMs = processed.progressMs || null;
cached.lastPausedAt = pauseResult.lastPausedAt;
cached.pausedDurationMs = pauseResult.pausedDurationMs;
cached.watched = watched;
// Atomic update: just update this session's data (ID already in SET)
await cacheService.updateActiveSession(cached);
// Broadcast the update
if (pubSubService) {
await pubSubService.publish('session:updated', cached);
}
}
}
}
/**
* Stop a session
*/
async function stopSession(existingSession: typeof sessions.$inferSelect): Promise<void> {
const stoppedAt = new Date();
// Calculate final duration
const { durationMs, finalPausedDurationMs } = calculateStopDuration(
{
startedAt: existingSession.startedAt,
lastPausedAt: existingSession.lastPausedAt,
pausedDurationMs: existingSession.pausedDurationMs || 0,
},
stoppedAt
);
// Check watch completion
const watched = existingSession.watched || checkWatchCompletion(
existingSession.progressMs,
existingSession.totalDurationMs
);
// Update session
await db
.update(sessions)
.set({
state: 'stopped',
stoppedAt,
durationMs,
pausedDurationMs: finalPausedDurationMs,
lastPausedAt: null,
watched,
})
.where(eq(sessions.id, existingSession.id));
// Get session details for notification BEFORE removing from cache
const cachedSession = await cacheService?.getSessionById(existingSession.id);
// Update cache atomically (no more race condition)
if (cacheService) {
// Atomic remove from active sessions SET + delete session data
await cacheService.removeActiveSession(existingSession.id);
await cacheService.removeUserSession(existingSession.serverUserId, existingSession.id);
}
// Broadcast stopped
if (pubSubService) {
await pubSubService.publish('session:stopped', existingSession.id);
// Use cached session data for notification
if (cachedSession) {
await enqueueNotification({ type: 'session_stopped', payload: cachedSession });
}
}
console.log(`[SSEProcessor] Stopped session ${existingSession.id}`);
}

View File

@@ -0,0 +1,82 @@
/**
* Authentication plugin for Fastify
*/
import type { FastifyPluginAsync, FastifyRequest, FastifyReply } from 'fastify';
import fp from 'fastify-plugin';
import jwt from '@fastify/jwt';
import type { AuthUser } from '@tracearr/shared';
declare module '@fastify/jwt' {
interface FastifyJWT {
payload: AuthUser;
user: AuthUser;
}
}
declare module 'fastify' {
interface FastifyInstance {
authenticate: (request: FastifyRequest, reply: FastifyReply) => Promise<void>;
requireOwner: (request: FastifyRequest, reply: FastifyReply) => Promise<void>;
requireMobile: (request: FastifyRequest, reply: FastifyReply) => Promise<void>;
}
}
const authPlugin: FastifyPluginAsync = async (app) => {
const secret = process.env.JWT_SECRET;
if (!secret) {
throw new Error('JWT_SECRET environment variable is required');
}
await app.register(jwt, {
secret,
sign: {
algorithm: 'HS256',
},
cookie: {
cookieName: 'token',
signed: false,
},
});
// Authenticate decorator - verifies JWT
app.decorate('authenticate', async function (request: FastifyRequest, reply: FastifyReply) {
try {
await request.jwtVerify();
} catch {
reply.unauthorized('Invalid or expired token');
}
});
// Require owner role decorator
app.decorate('requireOwner', async function (request: FastifyRequest, reply: FastifyReply) {
try {
await request.jwtVerify();
if (request.user.role !== 'owner') {
reply.forbidden('Owner access required');
}
} catch {
reply.unauthorized('Invalid or expired token');
}
});
// Require mobile token decorator - validates token was issued for mobile app
app.decorate('requireMobile', async function (request: FastifyRequest, reply: FastifyReply) {
try {
await request.jwtVerify();
if (!request.user.mobile) {
reply.forbidden('Mobile access token required');
}
} catch {
reply.unauthorized('Invalid or expired token');
}
});
};
export default fp(authPlugin, {
name: 'auth',
dependencies: ['@fastify/cookie'],
});

View File

@@ -0,0 +1,50 @@
/**
* Redis client plugin for Fastify
*/
import type { FastifyPluginAsync } from 'fastify';
import fp from 'fastify-plugin';
import { Redis } from 'ioredis';
declare module 'fastify' {
interface FastifyInstance {
redis: Redis;
}
}
const redisPlugin: FastifyPluginAsync = async (app) => {
const redisUrl = process.env.REDIS_URL ?? 'redis://localhost:6379';
const redis = new Redis(redisUrl, {
maxRetriesPerRequest: 3,
retryStrategy(times: number) {
const delay = Math.min(times * 50, 2000);
return delay;
},
reconnectOnError(err: Error) {
const targetError = 'READONLY';
if (err.message.includes(targetError)) {
return true;
}
return false;
},
});
redis.on('connect', () => {
app.log.info('Redis connected');
});
redis.on('error', (err: Error) => {
app.log.error({ err }, 'Redis error');
});
app.decorate('redis', redis);
app.addHook('onClose', async () => {
await redis.quit();
});
};
export default fp(redisPlugin, {
name: 'redis',
});

View File

@@ -0,0 +1,114 @@
/**
* Zod validation plugin for Fastify
* Provides schema validation for request body, query, and params
*/
import type {
FastifyPluginAsync,
FastifyRequest,
FastifyReply,
preHandlerHookHandler,
} from 'fastify';
import fp from 'fastify-plugin';
import { type z, ZodError } from 'zod';
import { ValidationError } from '../utils/errors.js';
// Validation schema options
export interface ValidationSchemas {
body?: z.ZodType;
query?: z.ZodType;
params?: z.ZodType;
}
declare module 'fastify' {
interface FastifyInstance {
validateRequest: (schemas: ValidationSchemas) => preHandlerHookHandler;
}
}
/**
* Parse Zod error into field-level errors
*/
function parseZodError(error: ZodError): Array<{ field: string; message: string }> {
return error.issues.map((issue) => ({
field: issue.path.join('.') || 'unknown',
message: issue.message,
}));
}
const validationPlugin: FastifyPluginAsync = async (app) => {
/**
* Create a preHandler that validates request against Zod schemas
*/
app.decorate('validateRequest', function (schemas: ValidationSchemas): preHandlerHookHandler {
return function (
request: FastifyRequest,
_reply: FastifyReply
): void {
const errors: Array<{ field: string; message: string }> = [];
// Validate body
if (schemas.body) {
try {
const result = schemas.body.parse(request.body);
// Replace body with parsed/transformed result
request.body = result;
} catch (err) {
if (err instanceof ZodError) {
errors.push(
...parseZodError(err).map((e) => ({
field: `body.${e.field}`,
message: e.message,
}))
);
}
}
}
// Validate query
if (schemas.query) {
try {
const result = schemas.query.parse(request.query);
// Replace query with parsed/transformed result
request.query = result;
} catch (err) {
if (err instanceof ZodError) {
errors.push(
...parseZodError(err).map((e) => ({
field: `query.${e.field}`,
message: e.message,
}))
);
}
}
}
// Validate params
if (schemas.params) {
try {
const result = schemas.params.parse(request.params);
// Replace params with parsed/transformed result
request.params = result;
} catch (err) {
if (err instanceof ZodError) {
errors.push(
...parseZodError(err).map((e) => ({
field: `params.${e.field}`,
message: e.message,
}))
);
}
}
}
// If any validation errors, throw ValidationError
if (errors.length > 0) {
throw new ValidationError('Validation failed', errors);
}
};
});
};
export default fp(validationPlugin, {
name: 'validation',
});

View File

@@ -0,0 +1,598 @@
/**
* Channel Routing routes unit tests
*
* Tests the API endpoints for notification channel routing:
* - GET /routing - Get all routing configuration
* - PATCH /routing/:eventType - Update routing for specific event
*
* Also tests internal helper functions:
* - getChannelRouting() - Get routing for a specific event type
* - getAllChannelRouting() - Get all routing configuration
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
import type { AuthUser } from '@tracearr/shared';
// Mock the database module
vi.mock('../../db/client.js', () => ({
db: {
select: vi.fn(),
insert: vi.fn(),
update: vi.fn(),
},
}));
// Import mocked db and routes
import { db } from '../../db/client.js';
import {
channelRoutingRoutes,
getChannelRouting,
getAllChannelRouting,
} from '../channelRouting.js';
/**
* Create mock routing row
*/
function createMockRouting(
eventType: string,
overrides?: Partial<{
id: string;
discordEnabled: boolean;
webhookEnabled: boolean;
pushEnabled: boolean;
createdAt: Date;
updatedAt: Date;
}>
) {
return {
id: overrides?.id ?? randomUUID(),
eventType,
discordEnabled: overrides?.discordEnabled ?? true,
webhookEnabled: overrides?.webhookEnabled ?? true,
pushEnabled: overrides?.pushEnabled ?? true,
createdAt: overrides?.createdAt ?? new Date(),
updatedAt: overrides?.updatedAt ?? new Date(),
};
}
/**
* Build a test Fastify instance with mocked auth
*/
async function buildTestApp(authUser: AuthUser): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
// Register sensible for HTTP error helpers
await app.register(sensible);
// Mock the authenticate decorator
app.decorate('authenticate', async (request: unknown) => {
(request as { user: AuthUser }).user = authUser;
});
// Register routes (using settings prefix as per route registration)
await app.register(channelRoutingRoutes, { prefix: '/settings/notifications' });
return app;
}
/**
* Create a mock owner auth user
*/
function createOwnerUser(): AuthUser {
return {
userId: randomUUID(),
username: 'owner',
role: 'owner',
serverIds: [randomUUID()],
};
}
/**
* Create a mock viewer auth user (non-owner)
*/
function createViewerUser(): AuthUser {
return {
userId: randomUUID(),
username: 'viewer',
role: 'viewer',
serverIds: [randomUUID()],
};
}
/**
* Mock db.select() to return array of items with orderBy
*/
function mockDbSelectAll(items: unknown[]) {
vi.mocked(db.select).mockReturnValue({
from: vi.fn().mockReturnValue({
orderBy: vi.fn().mockResolvedValue(items),
}),
} as never);
}
/**
* Mock db.select() to return single item with where + limit
* Note: Currently unused but kept for future tests
*/
function _mockDbSelectOne(item: unknown) {
vi.mocked(db.select).mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue(item ? [item] : []),
}),
}),
} as never);
}
/**
* Mock db.insert() to return inserted items
*/
function mockDbInsert(items: unknown[]) {
vi.mocked(db.insert).mockReturnValue({
values: vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue(items),
}),
} as never);
}
/**
* Mock db.update() to return nothing
*/
function mockDbUpdate() {
vi.mocked(db.update).mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
} as never);
}
describe('Channel Routing Routes', () => {
let app: FastifyInstance;
const ownerUser = createOwnerUser();
const viewerUser = createViewerUser();
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(async () => {
if (app) {
await app.close();
}
});
describe('GET /settings/notifications/routing', () => {
it('returns all routing configuration for owner', async () => {
app = await buildTestApp(ownerUser);
const mockRoutings = [
createMockRouting('violation_detected'),
createMockRouting('stream_started', { discordEnabled: false }),
createMockRouting('stream_stopped', { pushEnabled: false }),
];
mockDbSelectAll(mockRoutings);
const response = await app.inject({
method: 'GET',
url: '/settings/notifications/routing',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toHaveLength(3);
expect(body[0]).toHaveProperty('eventType', 'violation_detected');
expect(body[0]).toHaveProperty('discordEnabled', true);
expect(body[1]).toHaveProperty('eventType', 'stream_started');
expect(body[1]).toHaveProperty('discordEnabled', false);
});
it('creates default routing if no rows exist', async () => {
app = await buildTestApp(ownerUser);
// First call returns empty, meaning no routing exists
mockDbSelectAll([]);
// Mock insert to return defaults
const defaultRoutings = [
createMockRouting('violation_detected'),
createMockRouting('stream_started', {
discordEnabled: false,
webhookEnabled: false,
pushEnabled: false,
}),
];
mockDbInsert(defaultRoutings);
const response = await app.inject({
method: 'GET',
url: '/settings/notifications/routing',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toHaveLength(2);
expect(db.insert).toHaveBeenCalled();
});
it('rejects non-owner access with 403', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'GET',
url: '/settings/notifications/routing',
});
expect(response.statusCode).toBe(403);
const body = response.json();
expect(body.message).toBe('Only server owners can view notification routing');
});
});
describe('PATCH /settings/notifications/routing/:eventType', () => {
it('updates existing routing for owner', async () => {
app = await buildTestApp(ownerUser);
const existingRouting = createMockRouting('violation_detected');
// First select finds existing
vi.mocked(db.select).mockReturnValueOnce({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([existingRouting]),
}),
}),
} as never);
// Mock update
mockDbUpdate();
// Second select returns updated
const updatedRouting = {
...existingRouting,
discordEnabled: false,
updatedAt: new Date(),
};
vi.mocked(db.select).mockReturnValueOnce({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([updatedRouting]),
}),
}),
} as never);
const response = await app.inject({
method: 'PATCH',
url: '/settings/notifications/routing/violation_detected',
payload: { discordEnabled: false },
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.eventType).toBe('violation_detected');
expect(body.discordEnabled).toBe(false);
expect(db.update).toHaveBeenCalled();
});
it('creates new routing if none exists', async () => {
app = await buildTestApp(ownerUser);
const newRouting = createMockRouting('server_down', {
discordEnabled: true,
webhookEnabled: true,
pushEnabled: false,
});
// Track select calls - first returns empty, second returns created routing
let selectCallCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCallCount++;
if (selectCallCount === 1) {
// First call - no existing routing
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
} as never;
} else {
// Second call - return newly created routing
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([newRouting]),
}),
}),
} as never;
}
});
// Mock insert for new routing
mockDbInsert([newRouting]);
const response = await app.inject({
method: 'PATCH',
url: '/settings/notifications/routing/server_down',
payload: { pushEnabled: false },
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.eventType).toBe('server_down');
expect(body.pushEnabled).toBe(false);
expect(db.insert).toHaveBeenCalled();
});
it('rejects invalid event type with 400', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'PATCH',
url: '/settings/notifications/routing/invalid_event_type',
payload: { discordEnabled: false },
});
expect(response.statusCode).toBe(400);
const body = response.json();
expect(body.message).toContain('Invalid event type');
});
it('rejects invalid request body with 400', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'PATCH',
url: '/settings/notifications/routing/violation_detected',
payload: { discordEnabled: 'not-a-boolean' },
});
expect(response.statusCode).toBe(400);
const body = response.json();
expect(body.message).toBe('Invalid request body');
});
it('rejects non-owner access with 403', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'PATCH',
url: '/settings/notifications/routing/violation_detected',
payload: { discordEnabled: false },
});
expect(response.statusCode).toBe(403);
const body = response.json();
expect(body.message).toBe('Only server owners can update notification routing');
});
it('updates multiple channel settings at once', async () => {
app = await buildTestApp(ownerUser);
const existingRouting = createMockRouting('violation_detected');
const updatedRouting = {
...existingRouting,
discordEnabled: false,
webhookEnabled: false,
pushEnabled: true,
};
// Track select calls
let selectCallCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCallCount++;
const routing = selectCallCount === 1 ? existingRouting : updatedRouting;
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([routing]),
}),
}),
} as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/settings/notifications/routing/violation_detected',
payload: {
discordEnabled: false,
webhookEnabled: false,
pushEnabled: true,
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.discordEnabled).toBe(false);
expect(body.webhookEnabled).toBe(false);
expect(body.pushEnabled).toBe(true);
});
it('handles partial updates', async () => {
app = await buildTestApp(ownerUser);
const existingRouting = createMockRouting('stream_started', {
discordEnabled: true,
webhookEnabled: true,
pushEnabled: true,
});
// Only discord changed
const updatedRouting = { ...existingRouting, discordEnabled: false };
// Track select calls
let selectCallCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCallCount++;
const routing = selectCallCount === 1 ? existingRouting : updatedRouting;
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([routing]),
}),
}),
} as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/settings/notifications/routing/stream_started',
payload: { discordEnabled: false },
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.discordEnabled).toBe(false);
// Others should remain unchanged
expect(body.webhookEnabled).toBe(true);
expect(body.pushEnabled).toBe(true);
});
});
});
describe('Channel Routing Helper Functions', () => {
beforeEach(() => {
vi.clearAllMocks();
// Reset all mock implementations
vi.mocked(db.select).mockReset();
vi.mocked(db.insert).mockReset();
vi.mocked(db.update).mockReset();
});
describe('getChannelRouting', () => {
it('returns routing for existing event type', async () => {
vi.mocked(db.select).mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([
{
discordEnabled: true,
webhookEnabled: false,
pushEnabled: true,
},
]),
}),
}),
}) as never);
const routing = await getChannelRouting('violation_detected');
expect(routing.discordEnabled).toBe(true);
expect(routing.webhookEnabled).toBe(false);
expect(routing.pushEnabled).toBe(true);
});
it('returns defaults for high-priority events with no routing', async () => {
vi.mocked(db.select).mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
}) as never);
const routing = await getChannelRouting('violation_detected');
// High-priority events default to enabled
expect(routing.discordEnabled).toBe(true);
expect(routing.webhookEnabled).toBe(true);
expect(routing.pushEnabled).toBe(true);
});
it('returns defaults for low-priority events with no routing', async () => {
vi.mocked(db.select).mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
}) as never);
const routing = await getChannelRouting('stream_started');
// Low-priority events default to disabled
expect(routing.discordEnabled).toBe(false);
expect(routing.webhookEnabled).toBe(false);
expect(routing.pushEnabled).toBe(false);
});
it('returns defaults for trust_score_changed (low-priority)', async () => {
vi.mocked(db.select).mockImplementation(() => ({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
}) as never);
const routing = await getChannelRouting('trust_score_changed');
expect(routing.discordEnabled).toBe(false);
expect(routing.webhookEnabled).toBe(false);
expect(routing.pushEnabled).toBe(false);
});
});
describe('getAllChannelRouting', () => {
it('returns map of all routing configuration', async () => {
vi.mocked(db.select).mockImplementation(() => ({
from: vi.fn().mockResolvedValue([
{
eventType: 'violation_detected',
discordEnabled: true,
webhookEnabled: true,
pushEnabled: true,
},
{
eventType: 'stream_started',
discordEnabled: false,
webhookEnabled: false,
pushEnabled: false,
},
{
eventType: 'server_down',
discordEnabled: true,
webhookEnabled: true,
pushEnabled: false,
},
]),
}) as never);
const routingMap = await getAllChannelRouting();
expect(routingMap.size).toBe(3);
expect(routingMap.get('violation_detected')).toEqual({
discordEnabled: true,
webhookEnabled: true,
pushEnabled: true,
});
expect(routingMap.get('stream_started')).toEqual({
discordEnabled: false,
webhookEnabled: false,
pushEnabled: false,
});
expect(routingMap.get('server_down')).toEqual({
discordEnabled: true,
webhookEnabled: true,
pushEnabled: false,
});
});
it('returns empty map when no routing exists', async () => {
vi.mocked(db.select).mockImplementation(() => ({
from: vi.fn().mockResolvedValue([]),
}) as never);
const routingMap = await getAllChannelRouting();
expect(routingMap.size).toBe(0);
});
});
});

View File

@@ -0,0 +1,399 @@
/**
* Dashboard stats route tests
*
* Tests the API endpoint for dashboard summary metrics:
* - GET /dashboard - Dashboard summary metrics (active streams, plays, watch time, alerts)
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
import type { AuthUser, ActiveSession, DashboardStats } from '@tracearr/shared';
import { REDIS_KEYS } from '@tracearr/shared';
// Mock the prepared statements module
vi.mock('../../db/prepared.js', () => ({
playsCountSince: {
execute: vi.fn(),
},
watchTimeSince: {
execute: vi.fn(),
},
violationsCountSince: {
execute: vi.fn(),
},
uniqueUsersSince: {
execute: vi.fn(),
},
}));
// Mock cache service - need to provide getAllActiveSessions for active stream count
const mockGetAllActiveSessions = vi.fn().mockResolvedValue([]);
vi.mock('../../services/cache.js', () => ({
getCacheService: vi.fn(() => ({
getAllActiveSessions: mockGetAllActiveSessions,
})),
}));
// Import the mocked modules and the routes
import {
playsCountSince,
watchTimeSince,
violationsCountSince,
uniqueUsersSince,
} from '../../db/prepared.js';
import { dashboardRoutes } from '../stats/dashboard.js';
/**
* Build a test Fastify instance with mocked auth and redis
*/
async function buildTestApp(
authUser: AuthUser,
redisMock?: { get: ReturnType<typeof vi.fn>; setex: ReturnType<typeof vi.fn> }
): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
await app.register(sensible);
// Mock the authenticate decorator
app.decorate('authenticate', async (request: any) => {
request.user = authUser;
});
// Mock Redis
app.decorate(
'redis',
(redisMock ?? {
get: vi.fn().mockResolvedValue(null),
setex: vi.fn().mockResolvedValue('OK'),
}) as never
);
await app.register(dashboardRoutes, { prefix: '/stats' });
return app;
}
function createOwnerUser(serverIds?: string[]): AuthUser {
return {
userId: randomUUID(),
username: 'owner',
role: 'owner',
serverIds: serverIds ?? [randomUUID()],
};
}
function createActiveSession(overrides: Partial<ActiveSession> = {}): ActiveSession {
const serverId = overrides.serverId ?? randomUUID();
return {
id: overrides.id ?? randomUUID(),
sessionKey: overrides.sessionKey ?? 'session-123',
serverId,
serverUserId: overrides.serverUserId ?? randomUUID(),
state: overrides.state ?? 'playing',
mediaType: overrides.mediaType ?? 'movie',
mediaTitle: overrides.mediaTitle ?? 'Test Movie',
grandparentTitle: overrides.grandparentTitle ?? null,
seasonNumber: overrides.seasonNumber ?? null,
episodeNumber: overrides.episodeNumber ?? null,
year: overrides.year ?? 2024,
thumbPath: overrides.thumbPath ?? '/library/metadata/123/thumb',
ratingKey: overrides.ratingKey ?? 'media-123',
externalSessionId: overrides.externalSessionId ?? null,
startedAt: overrides.startedAt ?? new Date(),
stoppedAt: overrides.stoppedAt ?? null,
durationMs: overrides.durationMs ?? 0,
progressMs: overrides.progressMs ?? 0,
totalDurationMs: overrides.totalDurationMs ?? 7200000,
lastPausedAt: overrides.lastPausedAt ?? null,
pausedDurationMs: overrides.pausedDurationMs ?? 0,
referenceId: overrides.referenceId ?? null,
watched: overrides.watched ?? false,
ipAddress: overrides.ipAddress ?? '192.168.1.100',
geoCity: overrides.geoCity ?? 'New York',
geoRegion: overrides.geoRegion ?? 'NY',
geoCountry: overrides.geoCountry ?? 'US',
geoLat: overrides.geoLat ?? 40.7128,
geoLon: overrides.geoLon ?? -74.006,
playerName: overrides.playerName ?? 'Chrome',
deviceId: overrides.deviceId ?? 'device-123',
product: overrides.product ?? 'Plex Web',
device: overrides.device ?? 'Chrome',
platform: overrides.platform ?? 'Chrome',
quality: overrides.quality ?? '1080p',
isTranscode: overrides.isTranscode ?? false,
bitrate: overrides.bitrate ?? 20000,
user: overrides.user ?? {
id: randomUUID(),
username: 'testuser',
thumbUrl: null,
identityName: null,
},
server: overrides.server ?? {
id: serverId,
name: 'Test Server',
type: 'plex',
},
};
}
describe('Dashboard Stats Routes', () => {
let app: FastifyInstance;
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(async () => {
if (app) {
await app.close();
}
});
describe('GET /stats/dashboard', () => {
it('should return cached stats when available', async () => {
const ownerUser = createOwnerUser();
const cachedStats: DashboardStats = {
activeStreams: 5,
todayPlays: 25,
watchTimeHours: 12.5,
alertsLast24h: 3,
activeUsersToday: 8,
};
const redisMock = {
get: vi.fn().mockResolvedValue(JSON.stringify(cachedStats)),
setex: vi.fn().mockResolvedValue('OK'),
};
app = await buildTestApp(ownerUser, redisMock);
const response = await app.inject({
method: 'GET',
url: '/stats/dashboard',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body).toEqual(cachedStats);
expect(redisMock.get).toHaveBeenCalledWith(REDIS_KEYS.DASHBOARD_STATS);
// Should not call database when cache hit
expect(playsCountSince.execute).not.toHaveBeenCalled();
});
it('should compute stats when cache is empty', async () => {
const ownerUser = createOwnerUser();
const redisMock = {
get: vi.fn().mockResolvedValue(null),
setex: vi.fn().mockResolvedValue('OK'),
};
// Mock prepared statement results
vi.mocked(playsCountSince.execute).mockResolvedValue([{ count: 15 }]);
vi.mocked(watchTimeSince.execute).mockResolvedValue([{ totalMs: 18000000 }]); // 5 hours
vi.mocked(violationsCountSince.execute).mockResolvedValue([{ count: 2 }]);
vi.mocked(uniqueUsersSince.execute).mockResolvedValue([{ count: 6 }]);
app = await buildTestApp(ownerUser, redisMock);
const response = await app.inject({
method: 'GET',
url: '/stats/dashboard',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.todayPlays).toBe(15);
expect(body.watchTimeHours).toBe(5);
expect(body.alertsLast24h).toBe(2);
expect(body.activeUsersToday).toBe(6);
expect(body.activeStreams).toBe(0);
// Should cache the results
expect(redisMock.setex).toHaveBeenCalledWith(
REDIS_KEYS.DASHBOARD_STATS,
60,
expect.any(String)
);
});
it('should count active sessions from cache', async () => {
const ownerUser = createOwnerUser();
const activeSessions = [createActiveSession(), createActiveSession(), createActiveSession()];
// Mock the cache service to return active sessions
mockGetAllActiveSessions.mockResolvedValueOnce(activeSessions);
const redisMock = {
get: vi.fn().mockResolvedValueOnce(null), // No dashboard cache
setex: vi.fn().mockResolvedValue('OK'),
};
vi.mocked(playsCountSince.execute).mockResolvedValue([{ count: 10 }]);
vi.mocked(watchTimeSince.execute).mockResolvedValue([{ totalMs: 7200000 }]); // 2 hours
vi.mocked(violationsCountSince.execute).mockResolvedValue([{ count: 0 }]);
vi.mocked(uniqueUsersSince.execute).mockResolvedValue([{ count: 3 }]);
app = await buildTestApp(ownerUser, redisMock);
const response = await app.inject({
method: 'GET',
url: '/stats/dashboard',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.activeStreams).toBe(3);
expect(body.todayPlays).toBe(10);
expect(body.watchTimeHours).toBe(2);
});
it('should handle invalid JSON in dashboard cache gracefully', async () => {
const ownerUser = createOwnerUser();
const redisMock = {
get: vi
.fn()
.mockResolvedValueOnce('invalid json') // Invalid dashboard cache
.mockResolvedValueOnce(null), // No active sessions
setex: vi.fn().mockResolvedValue('OK'),
};
vi.mocked(playsCountSince.execute).mockResolvedValue([{ count: 5 }]);
vi.mocked(watchTimeSince.execute).mockResolvedValue([{ totalMs: 3600000 }]);
vi.mocked(violationsCountSince.execute).mockResolvedValue([{ count: 1 }]);
vi.mocked(uniqueUsersSince.execute).mockResolvedValue([{ count: 2 }]);
app = await buildTestApp(ownerUser, redisMock);
const response = await app.inject({
method: 'GET',
url: '/stats/dashboard',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.todayPlays).toBe(5);
});
it('should handle invalid JSON in active sessions cache gracefully', async () => {
const ownerUser = createOwnerUser();
// Cache service handles invalid JSON internally and returns empty array
mockGetAllActiveSessions.mockResolvedValueOnce([]);
const redisMock = {
get: vi.fn().mockResolvedValueOnce(null), // No dashboard cache
setex: vi.fn().mockResolvedValue('OK'),
};
vi.mocked(playsCountSince.execute).mockResolvedValue([{ count: 8 }]);
vi.mocked(watchTimeSince.execute).mockResolvedValue([{ totalMs: 0 }]);
vi.mocked(violationsCountSince.execute).mockResolvedValue([{ count: 0 }]);
vi.mocked(uniqueUsersSince.execute).mockResolvedValue([{ count: 4 }]);
app = await buildTestApp(ownerUser, redisMock);
const response = await app.inject({
method: 'GET',
url: '/stats/dashboard',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.activeStreams).toBe(0);
expect(body.todayPlays).toBe(8);
});
it('should handle null results from prepared statements', async () => {
const ownerUser = createOwnerUser();
const redisMock = {
get: vi.fn().mockResolvedValue(null),
setex: vi.fn().mockResolvedValue('OK'),
};
vi.mocked(playsCountSince.execute).mockResolvedValue([]);
vi.mocked(watchTimeSince.execute).mockResolvedValue([]);
vi.mocked(violationsCountSince.execute).mockResolvedValue([]);
vi.mocked(uniqueUsersSince.execute).mockResolvedValue([]);
app = await buildTestApp(ownerUser, redisMock);
const response = await app.inject({
method: 'GET',
url: '/stats/dashboard',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.todayPlays).toBe(0);
expect(body.watchTimeHours).toBe(0);
expect(body.alertsLast24h).toBe(0);
expect(body.activeUsersToday).toBe(0);
});
it('should round watch time to one decimal place', async () => {
const ownerUser = createOwnerUser();
const redisMock = {
get: vi.fn().mockResolvedValue(null),
setex: vi.fn().mockResolvedValue('OK'),
};
// 5.555... hours = 20000000 ms
vi.mocked(playsCountSince.execute).mockResolvedValue([{ count: 0 }]);
vi.mocked(watchTimeSince.execute).mockResolvedValue([{ totalMs: 20000000 }]);
vi.mocked(violationsCountSince.execute).mockResolvedValue([{ count: 0 }]);
vi.mocked(uniqueUsersSince.execute).mockResolvedValue([{ count: 0 }]);
app = await buildTestApp(ownerUser, redisMock);
const response = await app.inject({
method: 'GET',
url: '/stats/dashboard',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.watchTimeHours).toBe(5.6);
});
it('should reject access to server not in user access list', async () => {
const serverId1 = randomUUID();
const serverId2 = randomUUID();
// Non-owner user only has access to serverId1
const viewerUser: AuthUser = {
userId: randomUUID(),
username: 'viewer',
role: 'viewer',
serverIds: [serverId1],
};
app = await buildTestApp(viewerUser);
// Try to access stats for serverId2 (not in user's serverIds)
const response = await app.inject({
method: 'GET',
url: `/stats/dashboard?serverId=${serverId2}`,
});
expect(response.statusCode).toBe(403);
});
it('should reject invalid serverId format', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: '/stats/dashboard?serverId=not-a-uuid',
});
expect(response.statusCode).toBe(400);
});
});
});

View File

@@ -0,0 +1,652 @@
/**
* Debug routes unit tests
*
* Tests the hidden debug API endpoints (owner-only):
* - GET /debug/stats - Database statistics
* - DELETE /debug/sessions - Clear all sessions
* - DELETE /debug/violations - Clear all violations
* - DELETE /debug/users - Clear all non-owner users
* - DELETE /debug/servers - Clear all servers
* - DELETE /debug/rules - Clear all rules
* - POST /debug/reset - Full factory reset
* - POST /debug/refresh-aggregates - Refresh TimescaleDB aggregates
* - GET /debug/env - Safe environment info
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
import type { AuthUser } from '@tracearr/shared';
// Mock the database module
vi.mock('../../db/client.js', () => ({
db: {
select: vi.fn(),
delete: vi.fn(),
update: vi.fn(),
execute: vi.fn(),
},
}));
// Import mocked db and routes
import { db } from '../../db/client.js';
import { debugRoutes } from '../debug.js';
/**
* Build a test Fastify instance with mocked auth
*/
async function buildTestApp(authUser: AuthUser): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
// Register sensible for HTTP error helpers
await app.register(sensible);
// Mock the authenticate decorator
app.decorate('authenticate', async (request: unknown) => {
(request as { user: AuthUser }).user = authUser;
});
// Register routes
await app.register(debugRoutes, { prefix: '/debug' });
return app;
}
/**
* Create a mock owner auth user
*/
function createOwnerUser(): AuthUser {
return {
userId: randomUUID(),
username: 'owner',
role: 'owner',
serverIds: [randomUUID()],
};
}
/**
* Create a mock viewer auth user (non-owner)
*/
function createViewerUser(): AuthUser {
return {
userId: randomUUID(),
username: 'viewer',
role: 'viewer',
serverIds: [randomUUID()],
};
}
/**
* Create a mock for db.select() with count queries (Promise.all pattern)
*/
function mockDbSelectCounts(counts: number[]) {
let callIndex = 0;
vi.mocked(db.select).mockImplementation(() => {
const count = counts[callIndex++] ?? 0;
return {
from: vi.fn().mockReturnValue(Promise.resolve([{ count }])),
} as never;
});
}
/**
* Create a mock for db.execute() for database size/table queries
*/
function mockDbExecute(results: unknown[]) {
let callIndex = 0;
vi.mocked(db.execute).mockImplementation(() => {
const result = results[callIndex++] ?? { rows: [] };
return Promise.resolve(result) as never;
});
}
/**
* Create a mock for db.delete()
*/
function mockDbDelete(deletedItems: { id: string }[]) {
vi.mocked(db.delete).mockReturnValue({
returning: vi.fn().mockResolvedValue(deletedItems),
where: vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue(deletedItems),
}),
} as never);
}
/**
* Create a mock for db.select() for user queries
*/
function mockDbSelectUsers(users: { id: string }[]) {
vi.mocked(db.select).mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(users),
}),
} as never);
}
/**
* Create a mock for db.update()
*/
function mockDbUpdate() {
vi.mocked(db.update).mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
} as never);
}
describe('Debug Routes', () => {
let app: FastifyInstance;
const ownerUser = createOwnerUser();
const viewerUser = createViewerUser();
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(async () => {
if (app) {
await app.close();
}
});
describe('Authorization', () => {
it('allows owner access to debug routes', async () => {
app = await buildTestApp(ownerUser);
// Mock for GET /env (simplest endpoint)
const response = await app.inject({
method: 'GET',
url: '/debug/env',
});
expect(response.statusCode).toBe(200);
});
it('rejects non-owner access with 403', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'GET',
url: '/debug/env',
});
expect(response.statusCode).toBe(403);
const body = response.json();
expect(body.message).toBe('Owner access required');
});
it('rejects viewer from all debug endpoints', async () => {
app = await buildTestApp(viewerUser);
const endpoints = [
{ method: 'GET' as const, url: '/debug/stats' },
{ method: 'DELETE' as const, url: '/debug/sessions' },
{ method: 'DELETE' as const, url: '/debug/violations' },
{ method: 'DELETE' as const, url: '/debug/users' },
{ method: 'DELETE' as const, url: '/debug/servers' },
{ method: 'DELETE' as const, url: '/debug/rules' },
{ method: 'POST' as const, url: '/debug/reset' },
{ method: 'POST' as const, url: '/debug/refresh-aggregates' },
{ method: 'GET' as const, url: '/debug/env' },
];
for (const { method, url } of endpoints) {
const response = await app.inject({ method, url });
expect(response.statusCode).toBe(403);
}
});
});
describe('GET /debug/stats', () => {
it('returns database statistics', async () => {
app = await buildTestApp(ownerUser);
// Mock count queries
mockDbSelectCounts([100, 25, 50, 3, 10]);
// Mock execute for database size and table sizes
mockDbExecute([
{ rows: [{ size: '256 MB' }] },
{
rows: [
{ table_name: 'sessions', total_size: '128 MB' },
{ table_name: 'users', total_size: '64 MB' },
],
},
]);
const response = await app.inject({
method: 'GET',
url: '/debug/stats',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.counts).toEqual({
sessions: 100,
violations: 25,
users: 50,
servers: 3,
rules: 10,
});
expect(body.database.size).toBe('256 MB');
expect(body.database.tables).toHaveLength(2);
});
it('handles empty database', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectCounts([0, 0, 0, 0, 0]);
mockDbExecute([
{ rows: [{ size: '8 KB' }] },
{ rows: [] },
]);
const response = await app.inject({
method: 'GET',
url: '/debug/stats',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.counts.sessions).toBe(0);
expect(body.counts.violations).toBe(0);
expect(body.counts.users).toBe(0);
expect(body.counts.servers).toBe(0);
expect(body.counts.rules).toBe(0);
});
it('handles missing count values (undefined)', async () => {
app = await buildTestApp(ownerUser);
// Mock count queries returning empty arrays (undefined count)
vi.mocked(db.select).mockImplementation(() => {
return {
from: vi.fn().mockReturnValue(Promise.resolve([])), // Empty array, no count property
} as never;
});
mockDbExecute([
{ rows: [{ size: '8 KB' }] },
{ rows: [] },
]);
const response = await app.inject({
method: 'GET',
url: '/debug/stats',
});
expect(response.statusCode).toBe(200);
const body = response.json();
// Should fallback to 0 for all counts
expect(body.counts.sessions).toBe(0);
expect(body.counts.violations).toBe(0);
expect(body.counts.users).toBe(0);
expect(body.counts.servers).toBe(0);
expect(body.counts.rules).toBe(0);
});
it('handles missing database size', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectCounts([100, 25, 50, 3, 10]);
// Mock execute with empty rows for database size
mockDbExecute([
{ rows: [] }, // No size row
{ rows: [] },
]);
const response = await app.inject({
method: 'GET',
url: '/debug/stats',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.database.size).toBe('unknown');
});
});
describe('DELETE /debug/sessions', () => {
it('deletes all sessions and violations', async () => {
app = await buildTestApp(ownerUser);
// Mock delete for violations first, then sessions
let deleteCallIndex = 0;
vi.mocked(db.delete).mockImplementation(() => {
const items =
deleteCallIndex === 0
? [{ id: 'v1' }, { id: 'v2' }] // violations
: [{ id: 's1' }, { id: 's2' }, { id: 's3' }]; // sessions
deleteCallIndex++;
return {
returning: vi.fn().mockResolvedValue(items),
} as never;
});
const response = await app.inject({
method: 'DELETE',
url: '/debug/sessions',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(true);
expect(body.deleted.sessions).toBe(3);
expect(body.deleted.violations).toBe(2);
});
it('handles no sessions to delete', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(db.delete).mockReturnValue({
returning: vi.fn().mockResolvedValue([]),
} as never);
const response = await app.inject({
method: 'DELETE',
url: '/debug/sessions',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(true);
expect(body.deleted.sessions).toBe(0);
expect(body.deleted.violations).toBe(0);
});
});
describe('DELETE /debug/violations', () => {
it('deletes all violations', async () => {
app = await buildTestApp(ownerUser);
mockDbDelete([{ id: 'v1' }, { id: 'v2' }, { id: 'v3' }]);
const response = await app.inject({
method: 'DELETE',
url: '/debug/violations',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(true);
expect(body.deleted).toBe(3);
});
it('handles no violations to delete', async () => {
app = await buildTestApp(ownerUser);
mockDbDelete([]);
const response = await app.inject({
method: 'DELETE',
url: '/debug/violations',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(true);
expect(body.deleted).toBe(0);
});
});
describe('DELETE /debug/users', () => {
it('deletes non-owner users', async () => {
app = await buildTestApp(ownerUser);
// Mock select to find non-owner users
mockDbSelectUsers([{ id: 'user-1' }, { id: 'user-2' }]);
// Mock delete operations
let deleteCallIndex = 0;
vi.mocked(db.delete).mockImplementation(() => {
const result =
deleteCallIndex < 2
? { where: vi.fn().mockResolvedValue(undefined) }
: {
where: vi.fn().mockReturnValue({
returning: vi
.fn()
.mockResolvedValue([{ id: 'user-1' }, { id: 'user-2' }]),
}),
};
deleteCallIndex++;
return result as never;
});
const response = await app.inject({
method: 'DELETE',
url: '/debug/users',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(true);
expect(body.deleted).toBe(2);
});
it('handles no non-owner users', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectUsers([]);
const response = await app.inject({
method: 'DELETE',
url: '/debug/users',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(true);
expect(body.deleted).toBe(0);
});
});
describe('DELETE /debug/servers', () => {
it('deletes all servers', async () => {
app = await buildTestApp(ownerUser);
mockDbDelete([{ id: 'server-1' }, { id: 'server-2' }]);
const response = await app.inject({
method: 'DELETE',
url: '/debug/servers',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(true);
expect(body.deleted).toBe(2);
});
});
describe('DELETE /debug/rules', () => {
it('deletes all rules and violations first', async () => {
app = await buildTestApp(ownerUser);
// Mock delete - first for violations (no returning), then for rules (with returning)
let deleteCallIndex = 0;
vi.mocked(db.delete).mockImplementation(() => {
deleteCallIndex++;
if (deleteCallIndex === 1) {
// violations - just resolves
return Promise.resolve() as never;
} else {
// rules - returns deleted items
return {
returning: vi
.fn()
.mockResolvedValue([{ id: 'rule-1' }, { id: 'rule-2' }]),
} as never;
}
});
const response = await app.inject({
method: 'DELETE',
url: '/debug/rules',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(true);
expect(body.deleted).toBe(2);
});
});
describe('POST /debug/reset', () => {
it('performs full factory reset', async () => {
app = await buildTestApp(ownerUser);
// Mock all delete operations
vi.mocked(db.delete).mockReturnValue(Promise.resolve() as never);
// Mock update for settings reset
mockDbUpdate();
const response = await app.inject({
method: 'POST',
url: '/debug/reset',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(true);
expect(body.message).toContain('Factory reset complete');
// Verify delete was called 11 times (violations, terminationLogs, sessions, rules,
// notificationChannelRouting, notificationPreferences, mobileSessions, mobileTokens,
// serverUsers, users, servers)
expect(db.delete).toHaveBeenCalledTimes(11);
// Verify settings update was called
expect(db.update).toHaveBeenCalled();
});
});
describe('POST /debug/refresh-aggregates', () => {
it('refreshes continuous aggregates successfully', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(db.execute).mockResolvedValue({ rows: [] } as never);
const response = await app.inject({
method: 'POST',
url: '/debug/refresh-aggregates',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(true);
expect(body.message).toBe('Aggregates refreshed');
// Should call execute twice (hourly_stats and daily_stats)
expect(db.execute).toHaveBeenCalledTimes(2);
});
it('handles aggregate refresh failure gracefully', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(db.execute).mockRejectedValue(
new Error('Aggregates not configured')
);
const response = await app.inject({
method: 'POST',
url: '/debug/refresh-aggregates',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(false);
expect(body.message).toContain('not configured or refresh failed');
});
});
describe('GET /debug/env', () => {
it('returns safe environment info', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: '/debug/env',
});
expect(response.statusCode).toBe(200);
const body = response.json();
// Check structure
expect(body).toHaveProperty('nodeVersion');
expect(body).toHaveProperty('platform');
expect(body).toHaveProperty('arch');
expect(body).toHaveProperty('uptime');
expect(body).toHaveProperty('memoryUsage');
expect(body).toHaveProperty('env');
// Check memory usage format
expect(body.memoryUsage.heapUsed).toMatch(/^\d+ MB$/);
expect(body.memoryUsage.heapTotal).toMatch(/^\d+ MB$/);
expect(body.memoryUsage.rss).toMatch(/^\d+ MB$/);
// Check env does not expose secrets
expect(body.env.DATABASE_URL).toMatch(/^\[(set|not set)\]$/);
expect(body.env.REDIS_URL).toMatch(/^\[(set|not set)\]$/);
expect(body.env.ENCRYPTION_KEY).toMatch(/^\[(set|not set)\]$/);
});
it('masks sensitive environment variables', async () => {
app = await buildTestApp(ownerUser);
// Set env vars temporarily
process.env.DATABASE_URL = 'postgres://secret:password@localhost/db';
process.env.REDIS_URL = 'redis://secret@localhost:6379';
const response = await app.inject({
method: 'GET',
url: '/debug/env',
});
expect(response.statusCode).toBe(200);
const body = response.json();
// Should show [set] not the actual values
expect(body.env.DATABASE_URL).toBe('[set]');
expect(body.env.REDIS_URL).toBe('[set]');
// Clean up
delete process.env.DATABASE_URL;
delete process.env.REDIS_URL;
});
it('shows [not set] for unset environment variables', async () => {
app = await buildTestApp(ownerUser);
// Ensure env vars are NOT set
const origDbUrl = process.env.DATABASE_URL;
const origRedisUrl = process.env.REDIS_URL;
const origEncKey = process.env.ENCRYPTION_KEY;
delete process.env.DATABASE_URL;
delete process.env.REDIS_URL;
delete process.env.ENCRYPTION_KEY;
const response = await app.inject({
method: 'GET',
url: '/debug/env',
});
expect(response.statusCode).toBe(200);
const body = response.json();
// Should show [not set] for unset env vars
expect(body.env.DATABASE_URL).toBe('[not set]');
expect(body.env.REDIS_URL).toBe('[not set]');
expect(body.env.ENCRYPTION_KEY).toBe('[not set]');
// Restore original values
if (origDbUrl) process.env.DATABASE_URL = origDbUrl;
if (origRedisUrl) process.env.REDIS_URL = origRedisUrl;
if (origEncKey) process.env.ENCRYPTION_KEY = origEncKey;
});
});
});

View File

@@ -0,0 +1,429 @@
/**
* Image routes unit tests
*
* Tests the API endpoints for image proxy functionality:
* - GET /images/proxy - Proxy an image from a media server
* - GET /images/avatar - Get a user avatar
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
// Mock image proxy service
vi.mock('../../services/imageProxy.js', () => ({
proxyImage: vi.fn(),
}));
// Import mocked service and routes
import { proxyImage } from '../../services/imageProxy.js';
import { imageRoutes } from '../images.js';
/**
* Build a test Fastify instance
* Note: Image routes are public (no auth required)
*/
async function buildTestApp(): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
// Register sensible for HTTP error helpers
await app.register(sensible);
// Register routes
await app.register(imageRoutes, { prefix: '/images' });
return app;
}
describe('Image Routes', () => {
let app: FastifyInstance;
const mockProxyImage = vi.mocked(proxyImage);
const validServerId = randomUUID();
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(async () => {
if (app) {
await app.close();
}
});
describe('GET /images/proxy', () => {
it('returns proxied image with correct headers', async () => {
app = await buildTestApp();
const mockImageData = Buffer.from('fake-image-data');
mockProxyImage.mockResolvedValue({
data: mockImageData,
contentType: 'image/jpeg',
cached: false,
});
const response = await app.inject({
method: 'GET',
url: '/images/proxy',
query: {
server: validServerId,
url: '/library/metadata/123/thumb/456',
},
});
expect(response.statusCode).toBe(200);
expect(response.headers['content-type']).toBe('image/jpeg');
expect(response.headers['x-cache']).toBe('MISS');
expect(response.headers['cache-control']).toContain('public');
expect(response.rawPayload).toEqual(mockImageData);
// Verify service was called with defaults
expect(mockProxyImage).toHaveBeenCalledWith({
serverId: validServerId,
imagePath: '/library/metadata/123/thumb/456',
width: 300,
height: 450,
fallback: 'poster',
});
});
it('returns cache HIT header when image is cached', async () => {
app = await buildTestApp();
mockProxyImage.mockResolvedValue({
data: Buffer.from('cached-image'),
contentType: 'image/png',
cached: true,
});
const response = await app.inject({
method: 'GET',
url: '/images/proxy',
query: {
server: validServerId,
url: '/some/image/path',
},
});
expect(response.statusCode).toBe(200);
expect(response.headers['x-cache']).toBe('HIT');
});
it('accepts custom width and height', async () => {
app = await buildTestApp();
mockProxyImage.mockResolvedValue({
data: Buffer.from('image'),
contentType: 'image/webp',
cached: false,
});
const response = await app.inject({
method: 'GET',
url: '/images/proxy',
query: {
server: validServerId,
url: '/path',
width: '500',
height: '750',
},
});
expect(response.statusCode).toBe(200);
expect(mockProxyImage).toHaveBeenCalledWith(
expect.objectContaining({
width: 500,
height: 750,
})
);
});
it('accepts avatar fallback type', async () => {
app = await buildTestApp();
mockProxyImage.mockResolvedValue({
data: Buffer.from('image'),
contentType: 'image/png',
cached: false,
});
const response = await app.inject({
method: 'GET',
url: '/images/proxy',
query: {
server: validServerId,
url: '/path',
fallback: 'avatar',
},
});
expect(response.statusCode).toBe(200);
expect(mockProxyImage).toHaveBeenCalledWith(
expect.objectContaining({
fallback: 'avatar',
})
);
});
it('accepts art fallback type', async () => {
app = await buildTestApp();
mockProxyImage.mockResolvedValue({
data: Buffer.from('image'),
contentType: 'image/png',
cached: false,
});
const response = await app.inject({
method: 'GET',
url: '/images/proxy',
query: {
server: validServerId,
url: '/path',
fallback: 'art',
},
});
expect(response.statusCode).toBe(200);
expect(mockProxyImage).toHaveBeenCalledWith(
expect.objectContaining({
fallback: 'art',
})
);
});
it('rejects missing server ID', async () => {
app = await buildTestApp();
const response = await app.inject({
method: 'GET',
url: '/images/proxy',
query: {
url: '/some/path',
},
});
expect(response.statusCode).toBe(400);
const body = response.json();
expect(body.error).toBe('Invalid query parameters');
});
it('rejects invalid server ID format', async () => {
app = await buildTestApp();
const response = await app.inject({
method: 'GET',
url: '/images/proxy',
query: {
server: 'not-a-uuid',
url: '/some/path',
},
});
expect(response.statusCode).toBe(400);
const body = response.json();
expect(body.error).toBe('Invalid query parameters');
});
it('rejects missing URL', async () => {
app = await buildTestApp();
const response = await app.inject({
method: 'GET',
url: '/images/proxy',
query: {
server: validServerId,
},
});
expect(response.statusCode).toBe(400);
});
it('rejects width below minimum', async () => {
app = await buildTestApp();
const response = await app.inject({
method: 'GET',
url: '/images/proxy',
query: {
server: validServerId,
url: '/path',
width: '5',
},
});
expect(response.statusCode).toBe(400);
});
it('rejects width above maximum', async () => {
app = await buildTestApp();
const response = await app.inject({
method: 'GET',
url: '/images/proxy',
query: {
server: validServerId,
url: '/path',
width: '3000',
},
});
expect(response.statusCode).toBe(400);
});
it('rejects invalid fallback type', async () => {
app = await buildTestApp();
const response = await app.inject({
method: 'GET',
url: '/images/proxy',
query: {
server: validServerId,
url: '/path',
fallback: 'invalid',
},
});
expect(response.statusCode).toBe(400);
});
});
describe('GET /images/avatar', () => {
it('returns avatar from media server when server and url provided', async () => {
app = await buildTestApp();
const mockImageData = Buffer.from('avatar-data');
mockProxyImage.mockResolvedValue({
data: mockImageData,
contentType: 'image/png',
cached: false,
});
const response = await app.inject({
method: 'GET',
url: '/images/avatar',
query: {
server: validServerId,
url: '/users/123/avatar',
},
});
expect(response.statusCode).toBe(200);
expect(response.headers['content-type']).toBe('image/png');
expect(response.headers['cache-control']).toContain('public');
expect(response.rawPayload).toEqual(mockImageData);
expect(mockProxyImage).toHaveBeenCalledWith({
serverId: validServerId,
imagePath: '/users/123/avatar',
width: 100,
height: 100,
fallback: 'avatar',
});
});
it('accepts custom size parameter', async () => {
app = await buildTestApp();
mockProxyImage.mockResolvedValue({
data: Buffer.from('avatar'),
contentType: 'image/jpeg',
cached: false,
});
const response = await app.inject({
method: 'GET',
url: '/images/avatar',
query: {
server: validServerId,
url: '/avatar',
size: '200',
},
});
expect(response.statusCode).toBe(200);
expect(mockProxyImage).toHaveBeenCalledWith(
expect.objectContaining({
width: 200,
height: 200,
})
);
});
it('returns fallback avatar when no server provided', async () => {
app = await buildTestApp();
const mockFallbackData = Buffer.from('fallback-avatar');
mockProxyImage.mockResolvedValue({
data: mockFallbackData,
contentType: 'image/svg+xml',
cached: false,
});
const response = await app.inject({
method: 'GET',
url: '/images/avatar',
});
expect(response.statusCode).toBe(200);
expect(response.headers['cache-control']).toContain('public');
expect(mockProxyImage).toHaveBeenCalledWith({
serverId: 'fallback',
imagePath: 'fallback',
width: 100,
height: 100,
fallback: 'avatar',
});
});
it('returns fallback avatar when server provided but no url', async () => {
app = await buildTestApp();
const mockFallbackData = Buffer.from('fallback-avatar');
mockProxyImage.mockResolvedValue({
data: mockFallbackData,
contentType: 'image/svg+xml',
cached: false,
});
const response = await app.inject({
method: 'GET',
url: '/images/avatar',
query: {
server: validServerId,
},
});
expect(response.statusCode).toBe(200);
// Without URL, should use fallback
expect(mockProxyImage).toHaveBeenCalledWith({
serverId: 'fallback',
imagePath: 'fallback',
width: 100,
height: 100,
fallback: 'avatar',
});
});
it('sets longer cache for fallback avatars', async () => {
app = await buildTestApp();
mockProxyImage.mockResolvedValue({
data: Buffer.from('fallback'),
contentType: 'image/svg+xml',
cached: false,
});
const response = await app.inject({
method: 'GET',
url: '/images/avatar',
});
expect(response.statusCode).toBe(200);
// Fallback should have longer cache (86400 seconds = 1 day)
expect(response.headers['cache-control']).toContain('max-age=86400');
});
});
});

View File

@@ -0,0 +1,581 @@
/**
* Import routes unit tests
*
* Tests the API endpoints for data import from external sources:
* - POST /import/tautulli - Start Tautulli history import
* - POST /import/tautulli/test - Test Tautulli connection
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
import type { AuthUser } from '@tracearr/shared';
// Mock class for TautulliService
let mockTautulliInstance: {
testConnection: ReturnType<typeof vi.fn>;
getUsers: ReturnType<typeof vi.fn>;
getHistory: ReturnType<typeof vi.fn>;
};
// Mock external services
vi.mock('../../services/tautulli.js', () => {
const MockTautulliService = vi.fn().mockImplementation(function (
this: typeof mockTautulliInstance
) {
// Copy mock instance methods to this
this.testConnection = mockTautulliInstance.testConnection;
this.getUsers = mockTautulliInstance.getUsers;
this.getHistory = mockTautulliInstance.getHistory;
});
// Add static method
(MockTautulliService as unknown as { importHistory: ReturnType<typeof vi.fn> }).importHistory = vi.fn();
return { TautulliService: MockTautulliService };
});
vi.mock('../../services/cache.js', () => ({
getPubSubService: vi.fn().mockReturnValue(null),
}));
vi.mock('../../services/sync.js', () => ({
syncServer: vi.fn().mockResolvedValue(undefined),
}));
// Mock import queue functions
vi.mock('../../jobs/importQueue.js', () => ({
enqueueImport: vi.fn().mockRejectedValue(new Error('Queue not available')),
getImportStatus: vi.fn().mockResolvedValue(null),
cancelImport: vi.fn().mockResolvedValue(false),
getImportQueueStats: vi.fn().mockResolvedValue(null),
getActiveImportForServer: vi.fn().mockResolvedValue(null),
}));
// Import mocked services and routes
import { TautulliService } from '../../services/tautulli.js';
import { syncServer } from '../../services/sync.js';
import {
enqueueImport,
getImportStatus,
cancelImport,
getImportQueueStats,
getActiveImportForServer,
} from '../../jobs/importQueue.js';
import { importRoutes } from '../import.js';
/**
* Build a test Fastify instance with mocked auth
*/
async function buildTestApp(authUser: AuthUser): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
// Register sensible for HTTP error helpers
await app.register(sensible);
// Mock the authenticate decorator
app.decorate('authenticate', async (request: unknown) => {
(request as { user: AuthUser }).user = authUser;
});
// Register routes
await app.register(importRoutes, { prefix: '/import' });
return app;
}
/**
* Create a mock owner auth user
*/
function createOwnerUser(): AuthUser {
return {
userId: randomUUID(),
username: 'owner',
role: 'owner',
serverIds: [randomUUID()],
};
}
/**
* Create a mock viewer auth user (non-owner)
*/
function createViewerUser(): AuthUser {
return {
userId: randomUUID(),
username: 'viewer',
role: 'viewer',
serverIds: [randomUUID()],
};
}
describe('Import Routes', () => {
let app: FastifyInstance;
const ownerUser = createOwnerUser();
const viewerUser = createViewerUser();
beforeEach(() => {
vi.clearAllMocks();
// Reset mock instance with default behavior
mockTautulliInstance = {
testConnection: vi.fn().mockResolvedValue(false),
getUsers: vi.fn().mockResolvedValue([]),
getHistory: vi.fn().mockResolvedValue({ total: 0 }),
};
});
afterEach(async () => {
if (app) {
await app.close();
}
});
describe('POST /import/tautulli', () => {
const validServerId = randomUUID();
it('starts import for owner user', async () => {
app = await buildTestApp(ownerUser);
// Mock TautulliService.importHistory static method
const mockImportHistory = vi.fn().mockResolvedValue({ imported: 100 });
(TautulliService as unknown as { importHistory: ReturnType<typeof vi.fn> }).importHistory =
mockImportHistory;
const response = await app.inject({
method: 'POST',
url: '/import/tautulli',
payload: { serverId: validServerId },
});
expect(response.statusCode).toBe(200);
const body = response.json();
// When queue is not available (no Redis in tests), falls back to direct execution
expect(body.status).toBe('started');
expect(body.message).toContain('Import started');
// Verify server sync was called
expect(syncServer).toHaveBeenCalledWith(validServerId, {
syncUsers: true,
syncLibraries: false,
});
});
it('rejects non-owner users', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'POST',
url: '/import/tautulli',
payload: { serverId: validServerId },
});
expect(response.statusCode).toBe(403);
const body = response.json();
expect(body.message).toBe('Only server owners can import data');
});
it('rejects missing serverId', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'POST',
url: '/import/tautulli',
payload: {},
});
expect(response.statusCode).toBe(400);
const body = response.json();
expect(body.message).toContain('serverId is required');
});
it('rejects invalid request body', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'POST',
url: '/import/tautulli',
payload: { serverId: 123 }, // Should be string
});
expect(response.statusCode).toBe(400);
});
it('handles sync failure gracefully', async () => {
app = await buildTestApp(ownerUser);
// Mock sync failure
vi.mocked(syncServer).mockRejectedValueOnce(new Error('Sync failed'));
const response = await app.inject({
method: 'POST',
url: '/import/tautulli',
payload: { serverId: validServerId },
});
expect(response.statusCode).toBe(500);
const body = response.json();
expect(body.message).toContain('Failed to sync server');
});
});
describe('POST /import/tautulli/test', () => {
const validUrl = 'http://localhost:8181';
const validApiKey = 'test-api-key-12345';
it('returns success when connection works', async () => {
// Configure mock instance for successful connection
mockTautulliInstance.testConnection.mockResolvedValue(true);
mockTautulliInstance.getUsers.mockResolvedValue([{ user_id: 1 }, { user_id: 2 }]);
mockTautulliInstance.getHistory.mockResolvedValue({ total: 1500 });
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'POST',
url: '/import/tautulli/test',
payload: { url: validUrl, apiKey: validApiKey },
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({
success: true,
message: 'Connection successful',
users: 2,
historyRecords: 1500,
});
});
it('returns failure when connection fails', async () => {
// Configure mock instance for failed connection
mockTautulliInstance.testConnection.mockResolvedValue(false);
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'POST',
url: '/import/tautulli/test',
payload: { url: validUrl, apiKey: validApiKey },
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({
success: false,
message: 'Connection failed. Please check URL and API key.',
});
});
it('handles connection error gracefully', async () => {
// Configure mock instance for connection error
mockTautulliInstance.testConnection.mockRejectedValue(new Error('Network unreachable'));
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'POST',
url: '/import/tautulli/test',
payload: { url: validUrl, apiKey: validApiKey },
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({
success: false,
message: 'Network unreachable',
});
});
it('handles non-Error exceptions', async () => {
// Configure mock instance for non-Error exception
mockTautulliInstance.testConnection.mockRejectedValue('String error');
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'POST',
url: '/import/tautulli/test',
payload: { url: validUrl, apiKey: validApiKey },
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({
success: false,
message: 'Connection failed',
});
});
it('rejects non-owner users', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'POST',
url: '/import/tautulli/test',
payload: { url: validUrl, apiKey: validApiKey },
});
expect(response.statusCode).toBe(403);
const body = response.json();
expect(body.message).toBe('Only server owners can test Tautulli connection');
});
it('rejects missing URL', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'POST',
url: '/import/tautulli/test',
payload: { apiKey: validApiKey },
});
expect(response.statusCode).toBe(400);
const body = response.json();
expect(body.message).toContain('URL and API key are required');
});
it('rejects missing API key', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'POST',
url: '/import/tautulli/test',
payload: { url: validUrl },
});
expect(response.statusCode).toBe(400);
const body = response.json();
expect(body.message).toContain('URL and API key are required');
});
it('rejects empty request body', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'POST',
url: '/import/tautulli/test',
payload: {},
});
expect(response.statusCode).toBe(400);
});
});
describe('GET /import/tautulli/active/:serverId', () => {
const serverId = randomUUID();
it('returns active: false when no import is active', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: `/import/tautulli/active/${serverId}`,
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({ active: false });
});
it('returns active: true with status when import is active', async () => {
app = await buildTestApp(ownerUser);
const mockStatus = {
jobId: 'job-123',
state: 'active',
progress: { processed: 50, total: 100 },
};
vi.mocked(getActiveImportForServer).mockResolvedValueOnce('job-123');
vi.mocked(getImportStatus).mockResolvedValueOnce(mockStatus);
const response = await app.inject({
method: 'GET',
url: `/import/tautulli/active/${serverId}`,
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.active).toBe(true);
expect(body.jobId).toBe('job-123');
});
it('returns active: false when job exists but status is null', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(getActiveImportForServer).mockResolvedValueOnce('job-123');
vi.mocked(getImportStatus).mockResolvedValueOnce(null);
const response = await app.inject({
method: 'GET',
url: `/import/tautulli/active/${serverId}`,
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({ active: false });
});
});
describe('GET /import/tautulli/:jobId', () => {
it('returns job status when found', async () => {
app = await buildTestApp(ownerUser);
const mockStatus = {
jobId: 'job-456',
state: 'completed',
progress: { processed: 100, total: 100 },
};
vi.mocked(getImportStatus).mockResolvedValueOnce(mockStatus);
const response = await app.inject({
method: 'GET',
url: '/import/tautulli/job-456',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.jobId).toBe('job-456');
expect(body.state).toBe('completed');
});
it('returns 404 when job not found', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(getImportStatus).mockResolvedValueOnce(null);
const response = await app.inject({
method: 'GET',
url: '/import/tautulli/nonexistent-job',
});
expect(response.statusCode).toBe(404);
const body = response.json();
expect(body.message).toBe('Import job not found');
});
});
describe('DELETE /import/tautulli/:jobId', () => {
it('cancels job successfully for owner', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(cancelImport).mockResolvedValueOnce(true);
const response = await app.inject({
method: 'DELETE',
url: '/import/tautulli/job-789',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({ status: 'cancelled', jobId: 'job-789' });
});
it('returns 400 when cancel fails', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(cancelImport).mockResolvedValueOnce(false);
const response = await app.inject({
method: 'DELETE',
url: '/import/tautulli/job-789',
});
expect(response.statusCode).toBe(400);
const body = response.json();
expect(body.message).toContain('Cannot cancel job');
});
it('rejects non-owner users', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'DELETE',
url: '/import/tautulli/job-789',
});
expect(response.statusCode).toBe(403);
const body = response.json();
expect(body.message).toBe('Only server owners can cancel imports');
});
});
describe('GET /import/stats', () => {
it('returns queue stats when available', async () => {
app = await buildTestApp(ownerUser);
const mockStats = {
waiting: 2,
active: 1,
completed: 10,
failed: 0,
delayed: 0,
dlqSize: 0,
};
vi.mocked(getImportQueueStats).mockResolvedValueOnce(mockStats);
const response = await app.inject({
method: 'GET',
url: '/import/stats',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual(mockStats);
});
it('returns 503 when queue is unavailable', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(getImportQueueStats).mockResolvedValueOnce(null);
const response = await app.inject({
method: 'GET',
url: '/import/stats',
});
expect(response.statusCode).toBe(503);
const body = response.json();
expect(body.message).toBe('Import queue not available');
});
});
describe('POST /import/tautulli with queue', () => {
const validServerId = randomUUID();
it('returns queued status when queue is available', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(enqueueImport).mockResolvedValueOnce('job-queue-123');
const response = await app.inject({
method: 'POST',
url: '/import/tautulli',
payload: { serverId: validServerId },
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.status).toBe('queued');
expect(body.jobId).toBe('job-queue-123');
});
it('returns conflict when import already in progress', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(enqueueImport).mockRejectedValueOnce(
new Error('Import already in progress for this server')
);
const response = await app.inject({
method: 'POST',
url: '/import/tautulli',
payload: { serverId: validServerId },
});
expect(response.statusCode).toBe(409);
const body = response.json();
expect(body.message).toContain('already in progress');
});
});
});

View File

@@ -0,0 +1,564 @@
/**
* Mobile routes beta mode tests
*
* Tests MOBILE_BETA_MODE=true behavior:
* - Tokens never expire (100 years)
* - Tokens can be reused (not single-use)
* - No device limit enforcement
*
* This test file sets MOBILE_BETA_MODE before importing the module
* to ensure the env var is read correctly at module load time.
*/
import { describe, it, expect, beforeAll, beforeEach, afterEach, vi } from 'vitest';
// Set env var BEFORE any imports that might load mobile.ts
process.env.MOBILE_BETA_MODE = 'true';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
import type { AuthUser } from '@tracearr/shared';
// Mock the database module
vi.mock('../../db/client.js', () => ({
db: {
select: vi.fn(),
insert: vi.fn(),
update: vi.fn(),
delete: vi.fn(),
transaction: vi.fn(),
},
}));
// Mock the termination service
vi.mock('../../services/termination.js', () => ({
terminateSession: vi.fn(),
}));
// Import mocked db and routes
import { db } from '../../db/client.js';
import { mobileRoutes } from '../mobile.js';
// Mock Redis
const mockRedis = {
get: vi.fn(),
set: vi.fn(),
setex: vi.fn(),
del: vi.fn(),
eval: vi.fn(),
ttl: vi.fn(),
};
// Mock JWT
const mockJwt = {
sign: vi.fn(),
verify: vi.fn(),
};
async function buildTestApp(authUser: AuthUser | null): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
await app.register(sensible);
app.decorate('redis', mockRedis as never);
app.decorate('jwt', mockJwt as never);
app.decorate('authenticate', async (request: unknown) => {
if (authUser) {
(request as { user: AuthUser }).user = authUser;
}
});
app.decorate('requireMobile', async (request: unknown) => {
if (authUser) {
(request as { user: AuthUser }).user = authUser;
}
});
await app.register(mobileRoutes, { prefix: '/mobile' });
return app;
}
function createOwnerUser(): AuthUser {
return {
userId: randomUUID(),
username: 'owner',
role: 'owner',
serverIds: [randomUUID()],
};
}
function createMockToken(overrides?: Partial<{
id: string;
tokenHash: string;
expiresAt: Date;
usedAt: Date | null;
createdBy: string;
createdAt: Date;
}>) {
return {
id: overrides?.id ?? randomUUID(),
tokenHash: overrides?.tokenHash ?? 'tokenhash123',
expiresAt: overrides?.expiresAt ?? new Date(Date.now() + 15 * 60 * 1000),
usedAt: overrides?.usedAt ?? null,
createdBy: overrides?.createdBy ?? randomUUID(),
createdAt: overrides?.createdAt ?? new Date(),
};
}
describe('Mobile Routes - Beta Mode Enabled', () => {
let app: FastifyInstance;
const ownerUser = createOwnerUser();
beforeAll(() => {
// Verify env var is set
expect(process.env.MOBILE_BETA_MODE).toBe('true');
});
beforeEach(() => {
vi.clearAllMocks();
vi.mocked(db.select).mockReset();
vi.mocked(db.insert).mockReset();
vi.mocked(db.update).mockReset();
vi.mocked(db.delete).mockReset();
vi.mocked(db.transaction).mockReset();
mockRedis.get.mockReset();
mockRedis.set.mockReset();
mockRedis.setex.mockReset();
mockRedis.del.mockReset();
mockRedis.eval.mockReset();
mockRedis.ttl.mockReset();
mockJwt.sign.mockReset();
});
afterEach(async () => {
if (app) {
await app.close();
}
});
describe('Token reuse in beta mode', () => {
it('accepts already-used tokens', async () => {
app = await buildTestApp(null);
mockRedis.eval.mockResolvedValue(1);
// Mock device count check
let selectCallCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCallCount++;
if (selectCallCount === 1) {
return { from: vi.fn().mockResolvedValue([{ count: 0 }]) } as never;
} else {
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
} as never;
}
});
const mockOwner = { id: randomUUID(), username: 'owner', role: 'owner' };
const mockServerId = randomUUID();
// Token has usedAt set - should still be accepted in beta mode
vi.mocked(db.transaction).mockImplementation(async (callback) => {
let txSelectCallCount = 0;
const tx = {
execute: vi.fn().mockResolvedValue(undefined),
select: vi.fn().mockImplementation(() => {
txSelectCallCount++;
if (txSelectCallCount === 3) {
return { from: vi.fn().mockResolvedValue([{ id: mockServerId, name: 'Server', type: 'plex' }]) };
}
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
for: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([
createMockToken({ usedAt: new Date() }), // Already used!
]),
}),
limit: vi.fn().mockResolvedValue([mockOwner]),
}),
}),
};
}),
insert: vi.fn().mockReturnValue({
values: vi.fn().mockResolvedValue(undefined),
}),
update: vi.fn().mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
}),
};
return callback(tx as never);
});
mockJwt.sign.mockReturnValue('mock.jwt.token');
mockRedis.setex.mockResolvedValue('OK');
const response = await app.inject({
method: 'POST',
url: '/mobile/pair',
payload: {
token: 'trr_mob_validtokenvalue12345678901234567890',
deviceName: 'iPhone 15',
deviceId: 'device-123',
platform: 'ios',
},
});
// In beta mode, already-used tokens should be accepted
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.accessToken).toBe('mock.jwt.token');
});
it('does not mark token as used after pairing', async () => {
app = await buildTestApp(null);
mockRedis.eval.mockResolvedValue(1);
let selectCallCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCallCount++;
if (selectCallCount === 1) {
return { from: vi.fn().mockResolvedValue([{ count: 0 }]) } as never;
} else {
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
} as never;
}
});
const mockOwner = { id: randomUUID(), username: 'owner', role: 'owner' };
const mockServerId = randomUUID();
let tokenUpdateCalled = false;
vi.mocked(db.transaction).mockImplementation(async (callback) => {
let txSelectCallCount = 0;
const tx = {
execute: vi.fn().mockResolvedValue(undefined),
select: vi.fn().mockImplementation(() => {
txSelectCallCount++;
if (txSelectCallCount === 3) {
return { from: vi.fn().mockResolvedValue([{ id: mockServerId, name: 'Server', type: 'plex' }]) };
}
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
for: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([createMockToken()]),
}),
limit: vi.fn().mockResolvedValue([mockOwner]),
}),
}),
};
}),
insert: vi.fn().mockReturnValue({
values: vi.fn().mockResolvedValue(undefined),
}),
update: vi.fn().mockImplementation((_table) => {
// Check if this is the mobileTokens update (marking as used)
// In beta mode, this should NOT be called for mobileTokens
return {
set: vi.fn().mockImplementation((setValues) => {
if (setValues.usedAt) {
tokenUpdateCalled = true;
}
return {
where: vi.fn().mockResolvedValue(undefined),
};
}),
};
}),
};
return callback(tx as never);
});
mockJwt.sign.mockReturnValue('mock.jwt.token');
mockRedis.setex.mockResolvedValue('OK');
const response = await app.inject({
method: 'POST',
url: '/mobile/pair',
payload: {
token: 'trr_mob_validtokenvalue12345678901234567890',
deviceName: 'iPhone 15',
deviceId: 'device-123',
platform: 'ios',
},
});
expect(response.statusCode).toBe(200);
// In beta mode, token should NOT be marked as used
expect(tokenUpdateCalled).toBe(false);
});
});
describe('Device limit in beta mode', () => {
it('allows pairing when at device limit', async () => {
app = await buildTestApp(null);
mockRedis.eval.mockResolvedValue(1);
// Mock device count at limit (5) - should still allow in beta mode
let selectCallCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCallCount++;
if (selectCallCount === 1) {
return { from: vi.fn().mockResolvedValue([{ count: 5 }]) } as never;
} else {
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
} as never;
}
});
const mockOwner = { id: randomUUID(), username: 'owner', role: 'owner' };
const mockServerId = randomUUID();
vi.mocked(db.transaction).mockImplementation(async (callback) => {
let txSelectCallCount = 0;
const tx = {
execute: vi.fn().mockResolvedValue(undefined),
select: vi.fn().mockImplementation(() => {
txSelectCallCount++;
if (txSelectCallCount === 3) {
return { from: vi.fn().mockResolvedValue([{ id: mockServerId, name: 'Server', type: 'plex' }]) };
}
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
for: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([createMockToken()]),
}),
limit: vi.fn().mockResolvedValue([mockOwner]),
}),
}),
};
}),
insert: vi.fn().mockReturnValue({
values: vi.fn().mockResolvedValue(undefined),
}),
update: vi.fn().mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
}),
};
return callback(tx as never);
});
mockJwt.sign.mockReturnValue('mock.jwt.token');
mockRedis.setex.mockResolvedValue('OK');
const response = await app.inject({
method: 'POST',
url: '/mobile/pair',
payload: {
token: 'trr_mob_validtokenvalue12345678901234567890',
deviceName: 'iPhone 15',
deviceId: 'device-new',
platform: 'ios',
},
});
// In beta mode, should succeed even at device limit
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.accessToken).toBe('mock.jwt.token');
});
it('allows pairing when exceeding device limit', async () => {
app = await buildTestApp(null);
mockRedis.eval.mockResolvedValue(1);
// Mock device count OVER limit (10 devices)
let selectCallCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCallCount++;
if (selectCallCount === 1) {
return { from: vi.fn().mockResolvedValue([{ count: 10 }]) } as never;
} else {
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
} as never;
}
});
const mockOwner = { id: randomUUID(), username: 'owner', role: 'owner' };
const mockServerId = randomUUID();
vi.mocked(db.transaction).mockImplementation(async (callback) => {
let txSelectCallCount = 0;
const tx = {
execute: vi.fn().mockResolvedValue(undefined),
select: vi.fn().mockImplementation(() => {
txSelectCallCount++;
if (txSelectCallCount === 3) {
return { from: vi.fn().mockResolvedValue([{ id: mockServerId, name: 'Server', type: 'plex' }]) };
}
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
for: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([createMockToken()]),
}),
limit: vi.fn().mockResolvedValue([mockOwner]),
}),
}),
};
}),
insert: vi.fn().mockReturnValue({
values: vi.fn().mockResolvedValue(undefined),
}),
update: vi.fn().mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
}),
};
return callback(tx as never);
});
mockJwt.sign.mockReturnValue('mock.jwt.token');
mockRedis.setex.mockResolvedValue('OK');
const response = await app.inject({
method: 'POST',
url: '/mobile/pair',
payload: {
token: 'trr_mob_validtokenvalue12345678901234567890',
deviceName: 'iPhone 15',
deviceId: 'device-new',
platform: 'ios',
},
});
// In beta mode, should succeed even with 10+ devices
expect(response.statusCode).toBe(200);
});
});
describe('Token expiry in beta mode', () => {
it('generates tokens with 100 year expiry', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(db.select).mockReturnValue({
from: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([{ mobileEnabled: true }]),
}),
} as never);
mockRedis.eval.mockResolvedValue(1);
let capturedExpiry: Date | null = null;
vi.mocked(db.transaction).mockImplementation(async (callback) => {
const tx = {
execute: vi.fn().mockResolvedValue(undefined),
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue([{ count: 0 }]),
}),
}),
insert: vi.fn().mockImplementation(() => ({
values: vi.fn().mockImplementation((values: { expiresAt: Date }) => {
capturedExpiry = values.expiresAt;
return Promise.resolve(undefined);
}),
})),
};
return callback(tx as never);
});
const beforeRequest = Date.now();
const response = await app.inject({
method: 'POST',
url: '/mobile/pair-token',
});
expect(response.statusCode).toBe(200);
expect(capturedExpiry).not.toBeNull();
// Token should expire in ~100 years (with some tolerance)
const expiryMs = capturedExpiry!.getTime() - beforeRequest;
const expectedExpiryMs = 100 * 365 * 24 * 60 * 60 * 1000; // 100 years in ms
// Allow 1 day tolerance for leap year calculations
const tolerance = 24 * 60 * 60 * 1000;
expect(expiryMs).toBeGreaterThanOrEqual(expectedExpiryMs - tolerance);
expect(expiryMs).toBeLessThanOrEqual(expectedExpiryMs + tolerance);
});
});
describe('Token generation in beta mode', () => {
it('allows generating tokens even at device limit', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(db.select).mockReturnValue({
from: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([{ mobileEnabled: true }]),
}),
} as never);
mockRedis.eval.mockResolvedValue(1);
// Mock transaction with different responses for pending tokens vs device count
vi.mocked(db.transaction).mockImplementation(async (callback) => {
let txSelectCallCount = 0;
const tx = {
execute: vi.fn().mockResolvedValue(undefined),
select: vi.fn().mockImplementation(() => {
txSelectCallCount++;
if (txSelectCallCount === 1) {
// First query: pending tokens count - return 0 (below limit)
return {
from: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue([{ count: 0 }]),
}),
};
} else {
// Second query: device count - return 5 (at limit)
return {
from: vi.fn().mockResolvedValue([{ count: 5 }]),
};
}
}),
insert: vi.fn().mockReturnValue({
values: vi.fn().mockResolvedValue(undefined),
}),
};
return callback(tx as never);
});
const response = await app.inject({
method: 'POST',
url: '/mobile/pair-token',
});
// In beta mode, should succeed even at device limit
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.token).toMatch(/^trr_mob_/);
});
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,629 @@
/**
* Notification Preferences routes tests
*
* Tests the API endpoints for mobile notification preferences:
* - GET /notifications/preferences - Get preferences for current device
* - PATCH /notifications/preferences - Update preferences for current device
*/
import { describe, it, expect, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
// Define mobile auth user type
interface MobileAuthUser {
userId: string;
deviceId?: string;
role: 'owner' | 'guest';
}
// Mock the database module before importing routes
vi.mock('../../db/client.js', () => ({
db: {
select: vi.fn(),
insert: vi.fn(),
update: vi.fn(),
},
}));
// Mock the push rate limiter
vi.mock('../../services/pushRateLimiter.js', () => ({
getPushRateLimiter: vi.fn(),
}));
// Import mocked modules
import { db } from '../../db/client.js';
import { getPushRateLimiter } from '../../services/pushRateLimiter.js';
import { notificationPreferencesRoutes } from '../notificationPreferences.js';
// Helper to create DB chain mocks
function mockDbSelectLimit(result: unknown[]) {
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(result),
};
vi.mocked(db.select).mockReturnValue(chain as never);
return chain;
}
function mockDbInsert(result: unknown[]) {
const chain = {
values: vi.fn().mockReturnThis(),
returning: vi.fn().mockResolvedValue(result),
};
vi.mocked(db.insert).mockReturnValue(chain as never);
return chain;
}
function mockDbUpdate() {
const chain = {
set: vi.fn().mockReturnThis(),
where: vi.fn().mockResolvedValue(undefined),
};
vi.mocked(db.update).mockReturnValue(chain as never);
return chain;
}
async function buildTestApp(mobileUser: MobileAuthUser): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
await app.register(sensible);
// Mock requireMobile middleware
app.decorate('requireMobile', async (request: unknown) => {
(request as { user: MobileAuthUser }).user = mobileUser;
});
await app.register(notificationPreferencesRoutes, { prefix: '/notifications' });
return app;
}
const mobileSessionId = randomUUID();
const deviceId = randomUUID();
const mobileUser: MobileAuthUser = {
userId: randomUUID(),
deviceId: deviceId,
role: 'owner',
};
const mockMobileSession = {
id: mobileSessionId,
deviceId: deviceId,
deviceName: 'Test iPhone',
expoPushToken: 'ExponentPushToken[xxx]',
lastSeenAt: new Date(),
};
const mockPrefsRow = {
id: randomUUID(),
mobileSessionId: mobileSessionId,
pushEnabled: true,
onViolationDetected: true,
onStreamStarted: false,
onStreamStopped: false,
onConcurrentStreams: true,
onNewDevice: true,
onTrustScoreChanged: false,
onServerDown: true,
onServerUp: true,
violationMinSeverity: 1,
violationRuleTypes: ['impossible_travel', 'concurrent_streams'],
maxPerMinute: 5,
maxPerHour: 50,
quietHoursEnabled: false,
quietHoursStart: null,
quietHoursEnd: null,
quietHoursTimezone: 'UTC',
quietHoursOverrideCritical: true,
createdAt: new Date(),
updatedAt: new Date(),
};
describe('Notification Preferences Routes', () => {
let app: FastifyInstance;
afterEach(async () => {
await app?.close();
vi.clearAllMocks();
});
describe('GET /notifications/preferences', () => {
it('returns existing preferences for mobile user with deviceId', async () => {
app = await buildTestApp(mobileUser);
// Mock: find mobile session by deviceId, then find preferences
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [{ id: mockMobileSession.id }] // Mobile session found
: [mockPrefsRow] // Preferences found
),
};
return chain as never;
});
// No rate limiter
vi.mocked(getPushRateLimiter).mockReturnValue(null);
const response = await app.inject({
method: 'GET',
url: '/notifications/preferences',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.pushEnabled).toBe(true);
expect(body.onViolationDetected).toBe(true);
expect(body.maxPerMinute).toBe(5);
expect(body.violationRuleTypes).toEqual(['impossible_travel', 'concurrent_streams']);
});
it('includes rate limit status when rate limiter is available', async () => {
app = await buildTestApp(mobileUser);
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [{ id: mockMobileSession.id }]
: [mockPrefsRow]
),
};
return chain as never;
});
// Mock rate limiter with status
const mockRateLimiter = {
getStatus: vi.fn().mockResolvedValue({
remainingMinute: 3,
remainingHour: 45,
resetMinuteIn: 30,
resetHourIn: 1800,
}),
};
vi.mocked(getPushRateLimiter).mockReturnValue(mockRateLimiter as never);
const response = await app.inject({
method: 'GET',
url: '/notifications/preferences',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.rateLimitStatus).toBeDefined();
expect(body.rateLimitStatus.remainingMinute).toBe(3);
expect(body.rateLimitStatus.remainingHour).toBe(45);
});
it('creates default preferences if none exist', async () => {
app = await buildTestApp(mobileUser);
const defaultPrefs = {
...mockPrefsRow,
pushEnabled: true,
onViolationDetected: true,
onStreamStarted: false,
onStreamStopped: false,
};
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [{ id: mockMobileSession.id }] // Mobile session found
: [] // No preferences yet
),
};
return chain as never;
});
mockDbInsert([defaultPrefs]);
vi.mocked(getPushRateLimiter).mockReturnValue(null);
const response = await app.inject({
method: 'GET',
url: '/notifications/preferences',
});
expect(response.statusCode).toBe(200);
expect(db.insert).toHaveBeenCalled();
});
it('falls back to user lookup when deviceId not provided', async () => {
const userWithoutDeviceId: MobileAuthUser = {
userId: randomUUID(),
// No deviceId
role: 'owner',
};
app = await buildTestApp(userWithoutDeviceId);
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [{ id: mockMobileSession.id }] // Fallback finds session
: [mockPrefsRow]
),
};
return chain as never;
});
vi.mocked(getPushRateLimiter).mockReturnValue(null);
const response = await app.inject({
method: 'GET',
url: '/notifications/preferences',
});
expect(response.statusCode).toBe(200);
});
it('returns 404 when no mobile session found', async () => {
app = await buildTestApp(mobileUser);
mockDbSelectLimit([]); // No mobile session
const response = await app.inject({
method: 'GET',
url: '/notifications/preferences',
});
expect(response.statusCode).toBe(404);
expect(response.json().message).toContain('No mobile session');
});
});
describe('PATCH /notifications/preferences', () => {
it('updates preferences for mobile user', async () => {
app = await buildTestApp(mobileUser);
const updatedPrefs = {
...mockPrefsRow,
pushEnabled: false,
onStreamStarted: true,
};
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [{ id: mockMobileSession.id }] // Mobile session found
: selectCount === 2
? [mockPrefsRow] // Existing prefs found
: [updatedPrefs] // Updated prefs returned
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/notifications/preferences',
payload: {
pushEnabled: false,
onStreamStarted: true,
},
});
expect(response.statusCode).toBe(200);
expect(db.update).toHaveBeenCalled();
const body = response.json();
expect(body.pushEnabled).toBe(false);
expect(body.onStreamStarted).toBe(true);
});
it('updates all notification event preferences', async () => {
app = await buildTestApp(mobileUser);
const updatedPrefs = {
...mockPrefsRow,
onViolationDetected: false,
onConcurrentStreams: false,
onNewDevice: false,
onTrustScoreChanged: true,
onServerDown: false,
onServerUp: false,
};
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [{ id: mockMobileSession.id }]
: selectCount === 2
? [mockPrefsRow]
: [updatedPrefs]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/notifications/preferences',
payload: {
onViolationDetected: false,
onConcurrentStreams: false,
onNewDevice: false,
onTrustScoreChanged: true,
onServerDown: false,
onServerUp: false,
},
});
expect(response.statusCode).toBe(200);
});
it('updates rate limit settings', async () => {
app = await buildTestApp(mobileUser);
const updatedPrefs = {
...mockPrefsRow,
maxPerMinute: 10,
maxPerHour: 100,
};
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [{ id: mockMobileSession.id }]
: selectCount === 2
? [mockPrefsRow]
: [updatedPrefs]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/notifications/preferences',
payload: {
maxPerMinute: 10,
maxPerHour: 100,
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.maxPerMinute).toBe(10);
expect(body.maxPerHour).toBe(100);
});
it('updates quiet hours settings', async () => {
app = await buildTestApp(mobileUser);
const updatedPrefs = {
...mockPrefsRow,
quietHoursEnabled: true,
quietHoursStart: '22:00',
quietHoursEnd: '07:00',
quietHoursTimezone: 'America/New_York',
quietHoursOverrideCritical: false,
};
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [{ id: mockMobileSession.id }]
: selectCount === 2
? [mockPrefsRow]
: [updatedPrefs]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/notifications/preferences',
payload: {
quietHoursEnabled: true,
quietHoursStart: '22:00',
quietHoursEnd: '07:00',
quietHoursTimezone: 'America/New_York',
quietHoursOverrideCritical: false,
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.quietHoursEnabled).toBe(true);
expect(body.quietHoursStart).toBe('22:00');
expect(body.quietHoursEnd).toBe('07:00');
});
it('updates violation filter settings', async () => {
app = await buildTestApp(mobileUser);
const updatedPrefs = {
...mockPrefsRow,
violationMinSeverity: 2,
violationRuleTypes: ['geo_restriction'],
};
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [{ id: mockMobileSession.id }]
: selectCount === 2
? [mockPrefsRow]
: [updatedPrefs]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/notifications/preferences',
payload: {
violationMinSeverity: 2,
violationRuleTypes: ['geo_restriction'],
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.violationMinSeverity).toBe(2);
expect(body.violationRuleTypes).toEqual(['geo_restriction']);
});
it('creates preferences if they do not exist', async () => {
app = await buildTestApp(mobileUser);
const newPrefs = {
...mockPrefsRow,
pushEnabled: false,
};
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
orderBy: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [{ id: mockMobileSession.id }] // Mobile session found
: selectCount === 2
? [] // No existing prefs
: [newPrefs] // After update
),
};
return chain as never;
});
mockDbInsert([mockPrefsRow]); // Insert creates defaults
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/notifications/preferences',
payload: {
pushEnabled: false,
},
});
expect(response.statusCode).toBe(200);
expect(db.insert).toHaveBeenCalled();
});
it('returns 404 when no mobile session found', async () => {
app = await buildTestApp(mobileUser);
mockDbSelectLimit([]); // No mobile session
const response = await app.inject({
method: 'PATCH',
url: '/notifications/preferences',
payload: {
pushEnabled: true,
},
});
expect(response.statusCode).toBe(404);
expect(response.json().message).toContain('No mobile session');
});
it('rejects invalid request body', async () => {
app = await buildTestApp(mobileUser);
const response = await app.inject({
method: 'PATCH',
url: '/notifications/preferences',
payload: {
violationMinSeverity: 5, // Invalid: max is 3
},
});
expect(response.statusCode).toBe(400);
});
it('rejects invalid quiet hours format', async () => {
app = await buildTestApp(mobileUser);
const response = await app.inject({
method: 'PATCH',
url: '/notifications/preferences',
payload: {
quietHoursStart: '9:00', // Invalid format - should be HH:MM
},
});
expect(response.statusCode).toBe(400);
});
it('rejects maxPerMinute outside valid range', async () => {
app = await buildTestApp(mobileUser);
const response = await app.inject({
method: 'PATCH',
url: '/notifications/preferences',
payload: {
maxPerMinute: 100, // Max is 60
},
});
expect(response.statusCode).toBe(400);
});
});
});

View File

@@ -0,0 +1,646 @@
/**
* Rule routes integration tests
*
* Tests the API endpoints for rule CRUD operations:
* - GET /rules - List all rules
* - POST /rules - Create a new rule
* - GET /rules/:id - Get a specific rule
* - PATCH /rules/:id - Update a rule
* - DELETE /rules/:id - Delete a rule
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
import type { AuthUser, Rule } from '@tracearr/shared';
// Mock the database module before importing routes
vi.mock('../../db/client.js', () => ({
db: {
select: vi.fn(),
insert: vi.fn(),
update: vi.fn(),
delete: vi.fn(),
},
}));
// Import the mocked db and the routes
import { db } from '../../db/client.js';
import { ruleRoutes } from '../rules.js';
/**
* Build a test Fastify instance with mocked auth
*/
async function buildTestApp(authUser: AuthUser): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
// Register sensible for HTTP error helpers (badRequest, notFound, etc.)
await app.register(sensible);
// Mock the authenticate decorator
app.decorate('authenticate', async (request: any) => {
request.user = authUser;
});
// Register routes
await app.register(ruleRoutes, { prefix: '/rules' });
return app;
}
/**
* Create a mock rule object
*/
function createTestRule(overrides: Partial<Rule> = {}): Rule {
return {
id: overrides.id ?? randomUUID(),
name: overrides.name ?? 'Test Rule',
type: overrides.type ?? 'concurrent_streams',
params: overrides.params ?? { maxStreams: 3 },
serverUserId: overrides.serverUserId ?? null,
isActive: overrides.isActive ?? true,
createdAt: overrides.createdAt ?? new Date(),
updatedAt: overrides.updatedAt ?? new Date(),
};
}
/**
* Create a mock owner auth user
*/
function createOwnerUser(): AuthUser {
return {
userId: randomUUID(),
username: 'owner',
role: 'owner',
serverIds: [randomUUID()],
};
}
/**
* Create a mock viewer auth user (non-owner)
*/
function createViewerUser(): AuthUser {
return {
userId: randomUUID(),
username: 'viewer',
role: 'viewer',
serverIds: [randomUUID()],
};
}
describe('Rule Routes', () => {
let app: FastifyInstance;
let mockDb: any;
beforeEach(() => {
vi.clearAllMocks();
mockDb = db as any;
});
afterEach(async () => {
if (app) {
await app.close();
}
});
describe('GET /rules', () => {
it('should return list of rules for owner', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const serverId = ownerUser.serverIds[0]; // Use owner's server
const testRules = [
createTestRule({ name: 'Rule 1' }), // Global rule
createTestRule({ name: 'Rule 2', serverUserId: randomUUID() }), // User-specific rule
];
// Mock the database chain (2 leftJoins: serverUsers and servers)
// Rule 1 is global (no serverUserId), Rule 2 is user-specific with serverId
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
orderBy: vi.fn().mockResolvedValue([
{ ...testRules[0], username: null, serverId: null, serverName: null },
{ ...testRules[1], username: 'testuser', serverId, serverName: 'Test Server' },
]),
}),
}),
}),
});
const response = await app.inject({
method: 'GET',
url: '/rules',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(2);
});
it('should filter user-specific rules for non-owners', async () => {
const guestUser = createViewerUser();
app = await buildTestApp(guestUser);
const globalRule = createTestRule({ name: 'Global Rule', serverUserId: null });
const userRule = createTestRule({ name: 'User Rule', serverUserId: randomUUID() });
// Mock the database chain (2 leftJoins: serverUsers and servers)
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
orderBy: vi.fn().mockResolvedValue([
{ ...globalRule, username: null, serverId: null, serverName: null },
{ ...userRule, username: 'someone', serverId: randomUUID(), serverName: 'Test Server' },
]),
}),
}),
}),
});
const response = await app.inject({
method: 'GET',
url: '/rules',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
// Guest should only see global rules
expect(body.data).toHaveLength(1);
expect(body.data[0].serverUserId).toBeNull();
});
});
describe('POST /rules', () => {
it('should create a rule for owner', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const newRule = createTestRule({
name: 'New Rule',
type: 'impossible_travel',
params: { maxSpeedKmh: 500 },
});
mockDb.insert.mockReturnValue({
values: vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue([newRule]),
}),
});
const response = await app.inject({
method: 'POST',
url: '/rules',
payload: {
name: 'New Rule',
type: 'impossible_travel',
params: { maxSpeedKmh: 500 },
},
});
expect(response.statusCode).toBe(201);
const body = JSON.parse(response.body);
expect(body.name).toBe('New Rule');
expect(body.type).toBe('impossible_travel');
});
it('should reject rule creation for non-owner', async () => {
const guestUser = createViewerUser();
app = await buildTestApp(guestUser);
const response = await app.inject({
method: 'POST',
url: '/rules',
payload: {
name: 'New Rule',
type: 'concurrent_streams',
params: { maxStreams: 3 },
},
});
expect(response.statusCode).toBe(403);
});
it('should reject invalid request body', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'POST',
url: '/rules',
payload: {
// Missing required fields
name: '',
},
});
expect(response.statusCode).toBe(400);
});
it('should reject invalid rule type', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'POST',
url: '/rules',
payload: {
name: 'Test Rule',
type: 'invalid_type',
params: {},
},
});
expect(response.statusCode).toBe(400);
});
it('should verify serverUserId exists when provided', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const serverUserId = randomUUID();
// Server user not found
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
});
const response = await app.inject({
method: 'POST',
url: '/rules',
payload: {
name: 'User Rule',
type: 'concurrent_streams',
params: { maxStreams: 3 },
serverUserId,
},
});
expect(response.statusCode).toBe(404);
});
it('should create rule with valid serverUserId', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const serverUserId = randomUUID();
const newRule = createTestRule({ serverUserId });
// Server user exists
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([{ id: serverUserId }]),
}),
}),
});
mockDb.insert.mockReturnValue({
values: vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue([newRule]),
}),
});
const response = await app.inject({
method: 'POST',
url: '/rules',
payload: {
name: 'User Rule',
type: 'concurrent_streams',
params: { maxStreams: 3 },
serverUserId,
},
});
expect(response.statusCode).toBe(201);
});
});
describe('GET /rules/:id', () => {
it('should return a specific rule', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const ruleId = randomUUID();
const testRule = createTestRule({ id: ruleId });
// Mock rule query (2 leftJoins: serverUsers and servers)
mockDb.select.mockReturnValueOnce({
from: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([{ ...testRule, username: null, serverId: null, serverName: null }]),
}),
}),
}),
}),
});
// Mock violation count query
mockDb.select.mockReturnValueOnce({
from: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue([{ count: 5 }]),
}),
});
const response = await app.inject({
method: 'GET',
url: `/rules/${ruleId}`,
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.id).toBe(ruleId);
expect(body.violationCount).toBe(5);
});
it('should return 404 for non-existent rule', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
// Mock rule query (2 leftJoins: serverUsers and servers)
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
}),
}),
});
const response = await app.inject({
method: 'GET',
url: `/rules/${randomUUID()}`,
});
expect(response.statusCode).toBe(404);
});
it('should reject invalid UUID', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: '/rules/not-a-uuid',
});
expect(response.statusCode).toBe(400);
});
});
describe('PATCH /rules/:id', () => {
it('should update rule for owner', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const ruleId = randomUUID();
const existingRule = createTestRule({ id: ruleId, name: 'Old Name' });
const updatedRule = { ...existingRule, name: 'New Name' };
// Rule exists check (1 leftJoin to serverUsers)
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([{ ...existingRule, serverId: null }]),
}),
}),
}),
});
// Update
mockDb.update.mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue([updatedRule]),
}),
}),
});
const response = await app.inject({
method: 'PATCH',
url: `/rules/${ruleId}`,
payload: {
name: 'New Name',
},
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.name).toBe('New Name');
});
it('should reject update for non-owner', async () => {
const guestUser = createViewerUser();
app = await buildTestApp(guestUser);
const response = await app.inject({
method: 'PATCH',
url: `/rules/${randomUUID()}`,
payload: {
name: 'New Name',
},
});
expect(response.statusCode).toBe(403);
});
it('should return 404 for non-existent rule', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
// Rule exists check (1 leftJoin to serverUsers)
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
}),
});
const response = await app.inject({
method: 'PATCH',
url: `/rules/${randomUUID()}`,
payload: {
name: 'New Name',
},
});
expect(response.statusCode).toBe(404);
});
it('should update isActive field', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const ruleId = randomUUID();
const existingRule = createTestRule({ id: ruleId, isActive: true });
const updatedRule = { ...existingRule, isActive: false };
// Rule exists check (1 leftJoin to serverUsers)
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([{ ...existingRule, serverId: null }]),
}),
}),
}),
});
mockDb.update.mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue([updatedRule]),
}),
}),
});
const response = await app.inject({
method: 'PATCH',
url: `/rules/${ruleId}`,
payload: {
isActive: false,
},
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.isActive).toBe(false);
});
it('should update params field', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const ruleId = randomUUID();
const existingRule = createTestRule({ id: ruleId });
const updatedRule = { ...existingRule, params: { maxStreams: 5 } };
// Rule exists check (1 leftJoin to serverUsers)
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([{ ...existingRule, serverId: null }]),
}),
}),
}),
});
mockDb.update.mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue([updatedRule]),
}),
}),
});
const response = await app.inject({
method: 'PATCH',
url: `/rules/${ruleId}`,
payload: {
params: { maxStreams: 5 },
},
});
expect(response.statusCode).toBe(200);
});
});
describe('DELETE /rules/:id', () => {
it('should delete rule for owner', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const ruleId = randomUUID();
const existingRule = createTestRule({ id: ruleId });
// Rule exists check (1 leftJoin to serverUsers)
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([{ ...existingRule, serverId: null }]),
}),
}),
}),
});
// Delete
mockDb.delete.mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
});
const response = await app.inject({
method: 'DELETE',
url: `/rules/${ruleId}`,
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.success).toBe(true);
});
it('should reject delete for non-owner', async () => {
const guestUser = createViewerUser();
app = await buildTestApp(guestUser);
const response = await app.inject({
method: 'DELETE',
url: `/rules/${randomUUID()}`,
});
expect(response.statusCode).toBe(403);
});
it('should return 404 for non-existent rule', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
// Rule exists check (1 leftJoin to serverUsers)
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
leftJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
}),
});
const response = await app.inject({
method: 'DELETE',
url: `/rules/${randomUUID()}`,
});
expect(response.statusCode).toBe(404);
});
it('should reject invalid UUID', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'DELETE',
url: '/rules/not-a-uuid',
});
expect(response.statusCode).toBe(400);
});
});
});

View File

@@ -0,0 +1,767 @@
/**
* Server routes tests
*
* Tests the API endpoints for server management:
* - GET /servers - List connected servers
* - POST /servers - Add a new server
* - DELETE /servers/:id - Remove a server
* - POST /servers/:id/sync - Force sync
* - GET /servers/:id/image/* - Proxy images
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
import type { AuthUser } from '@tracearr/shared';
// Mock dependencies before imports
vi.mock('../../db/client.js', () => ({
db: {
select: vi.fn(),
insert: vi.fn(),
delete: vi.fn(),
update: vi.fn(),
},
}));
vi.mock('../../utils/crypto.js', () => ({
encrypt: vi.fn((token: string) => `encrypted_${token}`),
decrypt: vi.fn((token: string) => token.replace('encrypted_', '')),
}));
vi.mock('../../services/mediaServer/index.js', () => ({
PlexClient: {
verifyServerAdmin: vi.fn(),
},
JellyfinClient: {
verifyServerAdmin: vi.fn(),
},
EmbyClient: {
verifyServerAdmin: vi.fn(),
},
}));
vi.mock('../../services/sync.js', () => ({
syncServer: vi.fn(),
}));
// Import mocked modules
import { db } from '../../db/client.js';
import { PlexClient, JellyfinClient, EmbyClient } from '../../services/mediaServer/index.js';
import { syncServer } from '../../services/sync.js';
import { serverRoutes } from '../servers.js';
// Mock global fetch for image proxy tests
const mockFetch = vi.fn();
vi.stubGlobal('fetch', mockFetch);
// Helper to create DB chain mocks
// For queries that end with .where() (no limit)
function mockDbSelectWhere(result: unknown[]) {
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockResolvedValue(result),
};
vi.mocked(db.select).mockReturnValue(chain as never);
return chain;
}
// For queries that end with .limit()
function mockDbSelectLimit(result: unknown[]) {
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(result),
};
vi.mocked(db.select).mockReturnValue(chain as never);
return chain;
}
function mockDbInsert(result: unknown[]) {
const chain = {
values: vi.fn().mockReturnThis(),
returning: vi.fn().mockResolvedValue(result),
};
vi.mocked(db.insert).mockReturnValue(chain as never);
return chain;
}
function mockDbDelete() {
const chain = {
where: vi.fn().mockResolvedValue(undefined),
};
vi.mocked(db.delete).mockReturnValue(chain as never);
return chain;
}
function mockDbUpdate() {
const chain = {
set: vi.fn().mockReturnThis(),
where: vi.fn().mockResolvedValue(undefined),
};
vi.mocked(db.update).mockReturnValue(chain as never);
return chain;
}
async function buildTestApp(authUser: AuthUser): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
await app.register(sensible);
// Mock authenticate
app.decorate('authenticate', async (request: unknown) => {
(request as { user: AuthUser }).user = authUser;
});
// Mock jwtVerify for image routes
app.decorateRequest('jwtVerify', async function (this: { user: AuthUser }) {
this.user = authUser;
});
await app.register(serverRoutes, { prefix: '/servers' });
return app;
}
const ownerUser: AuthUser = {
userId: randomUUID(),
username: 'admin',
role: 'owner',
serverIds: [],
};
const viewerUser: AuthUser = {
userId: randomUUID(),
username: 'viewer',
role: 'viewer',
serverIds: [randomUUID()],
};
const mockServer = {
id: randomUUID(),
name: 'Test Plex Server',
type: 'plex' as const,
url: 'http://localhost:32400',
token: 'encrypted_test-token',
createdAt: new Date(),
updatedAt: new Date(),
};
describe('Server Routes', () => {
let app: FastifyInstance;
afterEach(async () => {
await app?.close();
vi.clearAllMocks();
});
describe('GET /servers', () => {
it('returns all servers for owner', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectWhere([
{ id: mockServer.id, name: mockServer.name, type: mockServer.type, url: mockServer.url, createdAt: mockServer.createdAt, updatedAt: mockServer.updatedAt },
]);
const response = await app.inject({
method: 'GET',
url: '/servers',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.data).toHaveLength(1);
expect(body.data[0].name).toBe('Test Plex Server');
// Should not include token
expect(body.data[0].token).toBeUndefined();
});
it('returns only authorized servers for guest', async () => {
const guestServerId = randomUUID();
const guestWithServer: AuthUser = {
...viewerUser,
serverIds: [guestServerId],
};
app = await buildTestApp(guestWithServer);
mockDbSelectWhere([
{ id: guestServerId, name: 'Guest Server', type: 'jellyfin', url: 'http://localhost:8096', createdAt: new Date(), updatedAt: new Date() },
]);
const response = await app.inject({
method: 'GET',
url: '/servers',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.data).toHaveLength(1);
expect(body.data[0].id).toBe(guestServerId);
});
it('returns empty array when guest has no server access', async () => {
const guestNoAccess: AuthUser = {
...viewerUser,
serverIds: [],
};
app = await buildTestApp(guestNoAccess);
mockDbSelectWhere([]);
const response = await app.inject({
method: 'GET',
url: '/servers',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.data).toHaveLength(0);
});
});
describe('POST /servers', () => {
beforeEach(() => {
vi.mocked(PlexClient.verifyServerAdmin).mockResolvedValue(true);
vi.mocked(JellyfinClient.verifyServerAdmin).mockResolvedValue(true);
vi.mocked(EmbyClient.verifyServerAdmin).mockResolvedValue(true);
vi.mocked(syncServer).mockResolvedValue({ usersAdded: 5, usersUpdated: 0, librariesSynced: 3, errors: [] });
});
it('creates a new Plex server for owner', async () => {
app = await buildTestApp(ownerUser);
// No existing server
mockDbSelectLimit([]);
const newServer = {
id: randomUUID(),
name: 'New Plex',
type: 'plex',
url: 'http://plex.local:32400',
createdAt: new Date(),
updatedAt: new Date(),
};
mockDbInsert([newServer]);
const response = await app.inject({
method: 'POST',
url: '/servers',
payload: {
name: 'New Plex',
type: 'plex',
url: 'http://plex.local:32400',
token: 'my-plex-token',
},
});
expect(response.statusCode).toBe(201);
expect(PlexClient.verifyServerAdmin).toHaveBeenCalledWith('my-plex-token', 'http://plex.local:32400');
const body = response.json();
expect(body.name).toBe('New Plex');
expect(body.type).toBe('plex');
});
it('creates a new Jellyfin server for owner', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([]);
const newServer = {
id: randomUUID(),
name: 'New Jellyfin',
type: 'jellyfin',
url: 'http://jellyfin.local:8096',
createdAt: new Date(),
updatedAt: new Date(),
};
mockDbInsert([newServer]);
const response = await app.inject({
method: 'POST',
url: '/servers',
payload: {
name: 'New Jellyfin',
type: 'jellyfin',
url: 'http://jellyfin.local:8096',
token: 'my-jellyfin-token',
},
});
expect(response.statusCode).toBe(201);
expect(JellyfinClient.verifyServerAdmin).toHaveBeenCalledWith('my-jellyfin-token', 'http://jellyfin.local:8096');
});
it('creates a new Emby server for owner', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([]);
const newServer = {
id: randomUUID(),
name: 'New Emby',
type: 'emby',
url: 'http://emby.local:8096',
createdAt: new Date(),
updatedAt: new Date(),
};
mockDbInsert([newServer]);
const response = await app.inject({
method: 'POST',
url: '/servers',
payload: {
name: 'New Emby',
type: 'emby',
url: 'http://emby.local:8096',
token: 'my-emby-token',
},
});
expect(response.statusCode).toBe(201);
expect(EmbyClient.verifyServerAdmin).toHaveBeenCalledWith('my-emby-token', 'http://emby.local:8096');
});
it('rejects guest creating server', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'POST',
url: '/servers',
payload: {
name: 'Guest Server',
type: 'plex',
url: 'http://guest.local:32400',
token: 'guest-token',
},
});
expect(response.statusCode).toBe(403);
expect(response.json().message).toContain('Only server owners');
});
it('rejects duplicate server URL', async () => {
app = await buildTestApp(ownerUser);
// Existing server with same URL
mockDbSelectLimit([mockServer]);
const response = await app.inject({
method: 'POST',
url: '/servers',
payload: {
name: 'Duplicate',
type: 'plex',
url: mockServer.url,
token: 'test-token',
},
});
expect(response.statusCode).toBe(409);
expect(response.json().message).toContain('already exists');
});
it('rejects non-admin token', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([]);
vi.mocked(PlexClient.verifyServerAdmin).mockResolvedValue(false);
const response = await app.inject({
method: 'POST',
url: '/servers',
payload: {
name: 'Non-Admin',
type: 'plex',
url: 'http://nonadmin.local:32400',
token: 'non-admin-token',
},
});
expect(response.statusCode).toBe(403);
expect(response.json().message).toContain('admin access');
});
it('handles connection error to media server', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([]);
vi.mocked(PlexClient.verifyServerAdmin).mockRejectedValue(new Error('Connection refused'));
const response = await app.inject({
method: 'POST',
url: '/servers',
payload: {
name: 'Unreachable',
type: 'plex',
url: 'http://unreachable.local:32400',
token: 'test-token',
},
});
expect(response.statusCode).toBe(400);
expect(response.json().message).toContain('Failed to connect');
});
it('rejects invalid request body', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'POST',
url: '/servers',
payload: {
name: '', // Invalid: empty name
type: 'invalid-type',
url: 'not-a-url',
},
});
expect(response.statusCode).toBe(400);
});
});
describe('DELETE /servers/:id', () => {
it('deletes server for owner', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([mockServer]);
mockDbDelete();
const response = await app.inject({
method: 'DELETE',
url: `/servers/${mockServer.id}`,
});
expect(response.statusCode).toBe(200);
expect(response.json().success).toBe(true);
});
it('rejects guest deleting server', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'DELETE',
url: `/servers/${mockServer.id}`,
});
expect(response.statusCode).toBe(403);
});
it('returns 404 for non-existent server', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([]);
const response = await app.inject({
method: 'DELETE',
url: `/servers/${randomUUID()}`,
});
expect(response.statusCode).toBe(404);
});
it('returns 400 for invalid UUID', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'DELETE',
url: '/servers/not-a-uuid',
});
expect(response.statusCode).toBe(400);
});
});
describe('POST /servers/:id/sync', () => {
beforeEach(() => {
vi.mocked(syncServer).mockResolvedValue({
usersAdded: 3,
usersUpdated: 2,
librariesSynced: 5,
errors: [],
});
});
it('syncs server for owner', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([mockServer]);
mockDbUpdate();
const response = await app.inject({
method: 'POST',
url: `/servers/${mockServer.id}/sync`,
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(true);
expect(body.usersAdded).toBe(3);
expect(body.usersUpdated).toBe(2);
expect(body.librariesSynced).toBe(5);
expect(body.errors).toEqual([]);
expect(syncServer).toHaveBeenCalledWith(mockServer.id, { syncUsers: true, syncLibraries: true });
});
it('returns errors when sync has issues', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(syncServer).mockResolvedValue({
usersAdded: 1,
usersUpdated: 0,
librariesSynced: 0,
errors: ['Failed to fetch library 1', 'User sync timeout'],
});
mockDbSelectLimit([mockServer]);
mockDbUpdate();
const response = await app.inject({
method: 'POST',
url: `/servers/${mockServer.id}/sync`,
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.success).toBe(false);
expect(body.errors).toHaveLength(2);
});
it('rejects guest syncing server', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'POST',
url: `/servers/${mockServer.id}/sync`,
});
expect(response.statusCode).toBe(403);
});
it('returns 404 for non-existent server', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([]);
const response = await app.inject({
method: 'POST',
url: `/servers/${randomUUID()}/sync`,
});
expect(response.statusCode).toBe(404);
});
it('handles sync service error', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([mockServer]);
vi.mocked(syncServer).mockRejectedValue(new Error('Sync failed'));
const response = await app.inject({
method: 'POST',
url: `/servers/${mockServer.id}/sync`,
});
expect(response.statusCode).toBe(500);
});
});
describe('GET /servers/:id/image/*', () => {
it('proxies Plex image with token in URL', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([mockServer]);
const imageBuffer = Buffer.from('fake-image-data');
mockFetch.mockResolvedValue({
ok: true,
headers: new Map([['content-type', 'image/jpeg']]),
arrayBuffer: () => Promise.resolve(imageBuffer),
});
const response = await app.inject({
method: 'GET',
url: `/servers/${mockServer.id}/image/library/metadata/123/thumb/456`,
});
expect(response.statusCode).toBe(200);
expect(response.headers['content-type']).toBe('image/jpeg');
expect(response.headers['cache-control']).toContain('max-age=86400');
// Verify fetch was called with correct URL including Plex token
expect(mockFetch).toHaveBeenCalledWith(
expect.stringContaining('X-Plex-Token='),
expect.any(Object)
);
});
it('proxies Jellyfin image with auth header', async () => {
const jellyfinServer = {
...mockServer,
type: 'jellyfin' as const,
url: 'http://localhost:8096',
};
app = await buildTestApp(ownerUser);
mockDbSelectLimit([jellyfinServer]);
const imageBuffer = Buffer.from('fake-image-data');
mockFetch.mockResolvedValue({
ok: true,
headers: new Map([['content-type', 'image/png']]),
arrayBuffer: () => Promise.resolve(imageBuffer),
});
const response = await app.inject({
method: 'GET',
url: `/servers/${jellyfinServer.id}/image/Items/abc/Images/Primary`,
});
expect(response.statusCode).toBe(200);
// Verify fetch was called with X-Emby-Authorization header
expect(mockFetch).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({
headers: expect.objectContaining({
'X-Emby-Authorization': expect.stringContaining('MediaBrowser'),
}),
})
);
});
it('accepts auth via query param for img tags', async () => {
// Create app with custom jwtVerify that reads from query
const customApp = Fastify({ logger: false });
await customApp.register(sensible);
customApp.decorate('authenticate', async (request: unknown) => {
(request as { user: AuthUser }).user = ownerUser;
});
customApp.decorateRequest('jwtVerify', async function (this: { user: AuthUser; headers: { authorization?: string } }) {
// Simulate JWT verification - if header exists, it's valid
if (this.headers.authorization) {
this.user = ownerUser;
} else {
throw new Error('Missing token');
}
});
await customApp.register(serverRoutes, { prefix: '/servers' });
mockDbSelectLimit([mockServer]);
mockFetch.mockResolvedValue({
ok: true,
headers: new Map([['content-type', 'image/jpeg']]),
arrayBuffer: () => Promise.resolve(Buffer.from('image')),
});
const response = await customApp.inject({
method: 'GET',
url: `/servers/${mockServer.id}/image/thumb.jpg?token=valid-jwt-token`,
});
expect(response.statusCode).toBe(200);
await customApp.close();
});
it('returns 404 for non-existent server', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([]);
const response = await app.inject({
method: 'GET',
url: `/servers/${randomUUID()}/image/thumb.jpg`,
});
expect(response.statusCode).toBe(404);
});
it('returns 404 when upstream image not found', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([mockServer]);
mockFetch.mockResolvedValue({
ok: false,
status: 404,
});
const response = await app.inject({
method: 'GET',
url: `/servers/${mockServer.id}/image/nonexistent.jpg`,
});
expect(response.statusCode).toBe(404);
});
it('handles fetch error gracefully', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([mockServer]);
mockFetch.mockRejectedValue(new Error('Network error'));
const response = await app.inject({
method: 'GET',
url: `/servers/${mockServer.id}/image/thumb.jpg`,
});
expect(response.statusCode).toBe(500);
});
it('returns 400 when image path is missing', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: `/servers/${mockServer.id}/image/`,
});
// Wildcard route with empty path
expect(response.statusCode).toBe(400);
});
});
describe('GET /servers/:id/statistics', () => {
it('returns 404 for non-existent server', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([]);
const response = await app.inject({
method: 'GET',
url: `/servers/${randomUUID()}/statistics`,
});
expect(response.statusCode).toBe(404);
});
it('returns 400 for non-Plex server', async () => {
const jellyfinServer = {
...mockServer,
type: 'jellyfin' as const,
};
app = await buildTestApp(ownerUser);
mockDbSelectLimit([jellyfinServer]);
const response = await app.inject({
method: 'GET',
url: `/servers/${jellyfinServer.id}/statistics`,
});
expect(response.statusCode).toBe(400);
expect(response.json().message).toContain('only available for Plex');
});
it('returns 400 for invalid server ID', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: '/servers/not-a-uuid/statistics',
});
expect(response.statusCode).toBe(400);
});
});
});

View File

@@ -0,0 +1,628 @@
/**
* Session routes tests
*
* Tests the API endpoints for session queries:
* - GET /sessions - List historical sessions with filters
* - GET /sessions/active - Get currently active streams
* - GET /sessions/:id - Get a specific session
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
import type { AuthUser, ActiveSession } from '@tracearr/shared';
// Mock the database module before importing routes
vi.mock('../../db/client.js', () => ({
db: {
select: vi.fn(),
execute: vi.fn(),
},
}));
// Mock cache service - need to provide getAllActiveSessions for /active endpoint
const mockGetAllActiveSessions = vi.fn().mockResolvedValue([]);
vi.mock('../../services/cache.js', () => ({
getCacheService: vi.fn(() => ({
getAllActiveSessions: mockGetAllActiveSessions,
getSessionById: vi.fn().mockResolvedValue(null),
})),
}));
// Import the mocked db and the routes
import { db } from '../../db/client.js';
import { sessionRoutes } from '../sessions.js';
/**
* Build a test Fastify instance with mocked auth and redis
*/
async function buildTestApp(
authUser: AuthUser,
redisMock?: { get: ReturnType<typeof vi.fn> }
): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
await app.register(sensible);
// Mock the authenticate decorator
app.decorate('authenticate', async (request: any) => {
request.user = authUser;
});
// Mock Redis (cast to never for test mock)
app.decorate('redis', (redisMock ?? { get: vi.fn().mockResolvedValue(null) }) as never);
await app.register(sessionRoutes, { prefix: '/sessions' });
return app;
}
function createOwnerUser(serverIds?: string[]): AuthUser {
return {
userId: randomUUID(),
username: 'owner',
role: 'owner',
serverIds: serverIds ?? [randomUUID()],
};
}
function createViewerUser(serverIds?: string[]): AuthUser {
return {
userId: randomUUID(),
username: 'viewer',
role: 'viewer',
serverIds: serverIds ?? [randomUUID()],
};
}
function createActiveSession(overrides: Partial<ActiveSession> = {}): ActiveSession {
const serverId = overrides.serverId ?? randomUUID();
return {
id: overrides.id ?? randomUUID(),
sessionKey: overrides.sessionKey ?? 'session-123',
serverId,
serverUserId: overrides.serverUserId ?? randomUUID(),
state: overrides.state ?? 'playing',
mediaType: overrides.mediaType ?? 'movie',
mediaTitle: overrides.mediaTitle ?? 'Test Movie',
grandparentTitle: overrides.grandparentTitle ?? null,
seasonNumber: overrides.seasonNumber ?? null,
episodeNumber: overrides.episodeNumber ?? null,
year: overrides.year ?? 2024,
thumbPath: overrides.thumbPath ?? '/library/metadata/123/thumb',
ratingKey: overrides.ratingKey ?? 'media-123',
externalSessionId: overrides.externalSessionId ?? null,
startedAt: overrides.startedAt ?? new Date(),
stoppedAt: overrides.stoppedAt ?? null,
durationMs: overrides.durationMs ?? 0,
progressMs: overrides.progressMs ?? 0,
totalDurationMs: overrides.totalDurationMs ?? 7200000,
lastPausedAt: overrides.lastPausedAt ?? null,
pausedDurationMs: overrides.pausedDurationMs ?? 0,
referenceId: overrides.referenceId ?? null,
watched: overrides.watched ?? false,
ipAddress: overrides.ipAddress ?? '192.168.1.100',
geoCity: overrides.geoCity ?? 'New York',
geoRegion: overrides.geoRegion ?? 'NY',
geoCountry: overrides.geoCountry ?? 'US',
geoLat: overrides.geoLat ?? 40.7128,
geoLon: overrides.geoLon ?? -74.006,
playerName: overrides.playerName ?? 'Chrome',
deviceId: overrides.deviceId ?? 'device-123',
product: overrides.product ?? 'Plex Web',
device: overrides.device ?? 'Chrome',
platform: overrides.platform ?? 'Chrome',
quality: overrides.quality ?? '1080p',
isTranscode: overrides.isTranscode ?? false,
bitrate: overrides.bitrate ?? 20000,
user: overrides.user ?? {
id: randomUUID(),
username: 'testuser',
thumbUrl: null,
identityName: null,
},
server: overrides.server ?? {
id: serverId,
name: 'Test Server',
type: 'plex',
},
};
}
describe('Session Routes', () => {
let app: FastifyInstance;
let mockDb: any;
beforeEach(() => {
vi.clearAllMocks();
mockDb = db as any;
});
afterEach(async () => {
if (app) {
await app.close();
}
});
describe('GET /sessions', () => {
it('should return paginated sessions for owner', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const mockSessionRows = [
{
id: randomUUID(),
started_at: new Date(),
stopped_at: new Date(),
duration_ms: '3600000',
paused_duration_ms: '0',
progress_ms: 3600000,
segment_count: '1',
watched: true,
state: 'stopped',
server_id: ownerUser.serverIds[0],
server_name: 'Test Server',
server_type: 'plex',
server_user_id: randomUUID(),
username: 'testuser',
user_thumb: null,
session_key: 'session-1',
media_type: 'movie',
media_title: 'Test Movie',
grandparent_title: null,
season_number: null,
episode_number: null,
year: 2024,
thumb_path: '/thumb',
reference_id: null,
ip_address: '192.168.1.1',
geo_city: 'NYC',
geo_region: 'NY',
geo_country: 'US',
geo_lat: 40.7,
geo_lon: -74.0,
player_name: 'Chrome',
device_id: 'dev-1',
product: 'Plex Web',
device: 'Chrome',
platform: 'Chrome',
quality: '1080p',
is_transcode: false,
bitrate: 20000,
},
];
// Mock the main query
mockDb.execute.mockResolvedValueOnce({ rows: mockSessionRows });
// Mock the count query
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 1 }] });
const response = await app.inject({
method: 'GET',
url: '/sessions',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(1);
expect(body.page).toBe(1);
expect(body.total).toBe(1);
});
it('should filter by serverUserId', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
mockDb.execute.mockResolvedValueOnce({ rows: [] });
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 0 }] });
const serverUserId = randomUUID();
const response = await app.inject({
method: 'GET',
url: `/sessions?serverUserId=${serverUserId}`,
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(0);
});
it('should filter by mediaType', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
mockDb.execute.mockResolvedValueOnce({ rows: [] });
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 0 }] });
const response = await app.inject({
method: 'GET',
url: '/sessions?mediaType=movie',
});
expect(response.statusCode).toBe(200);
});
it('should filter by date range', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
mockDb.execute.mockResolvedValueOnce({ rows: [] });
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 0 }] });
const response = await app.inject({
method: 'GET',
url: '/sessions?startDate=2024-01-01&endDate=2024-12-31',
});
expect(response.statusCode).toBe(200);
});
it('should handle pagination', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
mockDb.execute.mockResolvedValueOnce({ rows: [] });
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 100 }] });
const response = await app.inject({
method: 'GET',
url: '/sessions?page=2&pageSize=25',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.page).toBe(2);
expect(body.pageSize).toBe(25);
expect(body.totalPages).toBe(4);
});
it('should reject invalid query parameters', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: '/sessions?page=-1',
});
expect(response.statusCode).toBe(400);
});
});
describe('GET /sessions/active', () => {
it('should return active sessions from cache', async () => {
const serverId = randomUUID();
const ownerUser = createOwnerUser([serverId]);
const activeSessions = [createActiveSession({ serverId })];
// Mock the cache service response
mockGetAllActiveSessions.mockResolvedValueOnce(activeSessions);
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: '/sessions/active',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(1);
expect(mockGetAllActiveSessions).toHaveBeenCalled();
});
it('should return empty array when cache is empty', async () => {
const ownerUser = createOwnerUser();
// Mock empty cache
mockGetAllActiveSessions.mockResolvedValueOnce([]);
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: '/sessions/active',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(0);
});
it('should filter sessions by user serverIds', async () => {
const serverId1 = randomUUID();
const serverId2 = randomUUID();
const viewerUser = createViewerUser([serverId1]);
const activeSessions = [
createActiveSession({ serverId: serverId1 }),
createActiveSession({ serverId: serverId2 }),
];
// Mock the cache service response
mockGetAllActiveSessions.mockResolvedValueOnce(activeSessions);
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'GET',
url: '/sessions/active',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(1);
expect(body.data[0].serverId).toBe(serverId1);
});
it('should handle invalid JSON in cache', async () => {
const ownerUser = createOwnerUser();
// getAllActiveSessions handles parsing internally, so this just tests empty
mockGetAllActiveSessions.mockResolvedValueOnce([]);
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: '/sessions/active',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(0);
});
});
describe('GET /sessions/:id', () => {
it('should return session from cache if active', async () => {
const serverId = randomUUID();
const sessionId = randomUUID();
const ownerUser = createOwnerUser([serverId]);
const activeSession = createActiveSession({ id: sessionId, serverId });
const redisMock = {
get: vi.fn().mockResolvedValue(JSON.stringify(activeSession)),
};
app = await buildTestApp(ownerUser, redisMock);
const response = await app.inject({
method: 'GET',
url: `/sessions/${sessionId}`,
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.id).toBe(sessionId);
expect(body.username).toBe(activeSession.user.username);
expect(body.serverName).toBe(activeSession.server.name);
});
it('should return session from database if not in cache', async () => {
const serverId = randomUUID();
const sessionId = randomUUID();
const ownerUser = createOwnerUser([serverId]);
const redisMock = {
get: vi.fn().mockResolvedValue(null),
};
app = await buildTestApp(ownerUser, redisMock);
const dbSession = {
id: sessionId,
serverId,
serverName: 'Test Server',
serverType: 'plex',
serverUserId: randomUUID(),
username: 'testuser',
userThumb: null,
sessionKey: 'session-1',
state: 'stopped',
mediaType: 'movie',
mediaTitle: 'Test Movie',
grandparentTitle: null,
seasonNumber: null,
episodeNumber: null,
year: 2024,
thumbPath: '/thumb',
startedAt: new Date(),
stoppedAt: new Date(),
durationMs: 3600000,
progressMs: 3600000,
totalDurationMs: 7200000,
lastPausedAt: null,
pausedDurationMs: 0,
referenceId: null,
watched: true,
ipAddress: '192.168.1.1',
geoCity: 'NYC',
geoRegion: 'NY',
geoCountry: 'US',
geoLat: 40.7,
geoLon: -74.0,
playerName: 'Chrome',
deviceId: 'dev-1',
product: 'Plex Web',
device: 'Chrome',
platform: 'Chrome',
quality: '1080p',
isTranscode: false,
bitrate: 20000,
};
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([dbSession]),
}),
}),
}),
}),
});
const response = await app.inject({
method: 'GET',
url: `/sessions/${sessionId}`,
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.id).toBe(sessionId);
});
it('should return 404 for non-existent session', async () => {
const ownerUser = createOwnerUser();
const redisMock = {
get: vi.fn().mockResolvedValue(null),
};
app = await buildTestApp(ownerUser, redisMock);
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
}),
}),
});
const response = await app.inject({
method: 'GET',
url: `/sessions/${randomUUID()}`,
});
expect(response.statusCode).toBe(404);
});
it('should return 400 for invalid UUID', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: '/sessions/not-a-uuid',
});
expect(response.statusCode).toBe(400);
});
it('should return 403 when user lacks access to session server', async () => {
const serverId = randomUUID();
const sessionId = randomUUID();
const differentServerId = randomUUID();
const viewerUser = createViewerUser([differentServerId]);
const redisMock = {
get: vi.fn().mockResolvedValue(null),
};
app = await buildTestApp(viewerUser, redisMock);
const dbSession = {
id: sessionId,
serverId,
serverName: 'Test Server',
serverType: 'plex',
serverUserId: randomUUID(),
username: 'testuser',
userThumb: null,
sessionKey: 'session-1',
state: 'stopped',
mediaType: 'movie',
mediaTitle: 'Test Movie',
grandparentTitle: null,
seasonNumber: null,
episodeNumber: null,
year: 2024,
thumbPath: '/thumb',
startedAt: new Date(),
stoppedAt: new Date(),
durationMs: 3600000,
progressMs: 3600000,
totalDurationMs: 7200000,
lastPausedAt: null,
pausedDurationMs: 0,
referenceId: null,
watched: true,
ipAddress: '192.168.1.1',
geoCity: 'NYC',
geoRegion: 'NY',
geoCountry: 'US',
geoLat: 40.7,
geoLon: -74.0,
playerName: 'Chrome',
deviceId: 'dev-1',
product: 'Plex Web',
device: 'Chrome',
platform: 'Chrome',
quality: '1080p',
isTranscode: false,
bitrate: 20000,
};
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([dbSession]),
}),
}),
}),
}),
});
const response = await app.inject({
method: 'GET',
url: `/sessions/${sessionId}`,
});
expect(response.statusCode).toBe(403);
});
it('should deny access to cached session from wrong server', async () => {
const serverId = randomUUID();
const sessionId = randomUUID();
const differentServerId = randomUUID();
const viewerUser = createViewerUser([differentServerId]);
const activeSession = createActiveSession({ id: sessionId, serverId });
const redisMock = {
get: vi.fn().mockResolvedValue(JSON.stringify(activeSession)),
};
app = await buildTestApp(viewerUser, redisMock);
// Should fall through to DB since server access denied
mockDb.select.mockReturnValue({
from: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue([]),
}),
}),
}),
}),
});
const response = await app.inject({
method: 'GET',
url: `/sessions/${sessionId}`,
});
expect(response.statusCode).toBe(404);
});
});
});

View File

@@ -0,0 +1,698 @@
/**
* Settings routes tests
*
* Tests the API endpoints for application settings:
* - GET /settings - Get application settings (owner only)
* - PATCH /settings - Update application settings (owner only)
*/
import { describe, it, expect, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
import type { AuthUser } from '@tracearr/shared';
// Mock the database module before importing routes
vi.mock('../../db/client.js', () => ({
db: {
select: vi.fn(),
insert: vi.fn(),
update: vi.fn(),
},
}));
// Import mocked modules
import { db } from '../../db/client.js';
import { settingsRoutes } from '../settings.js';
// Helper to create DB chain mocks
function mockDbSelectLimit(result: unknown[]) {
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(result),
};
vi.mocked(db.select).mockReturnValue(chain as never);
return chain;
}
function mockDbInsert(result: unknown[]) {
const chain = {
values: vi.fn().mockReturnThis(),
returning: vi.fn().mockResolvedValue(result),
};
vi.mocked(db.insert).mockReturnValue(chain as never);
return chain;
}
function mockDbUpdate() {
const chain = {
set: vi.fn().mockReturnThis(),
where: vi.fn().mockResolvedValue(undefined),
};
vi.mocked(db.update).mockReturnValue(chain as never);
return chain;
}
async function buildTestApp(authUser: AuthUser): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
await app.register(sensible);
app.decorate('authenticate', async (request: unknown) => {
(request as { user: AuthUser }).user = authUser;
});
await app.register(settingsRoutes, { prefix: '/settings' });
return app;
}
const ownerUser: AuthUser = {
userId: randomUUID(),
username: 'admin',
role: 'owner',
serverIds: [],
};
const viewerUser: AuthUser = {
userId: randomUUID(),
username: 'viewer',
role: 'viewer',
serverIds: [],
};
const mockSettingsRow = {
id: 1,
allowGuestAccess: false,
discordWebhookUrl: 'https://discord.com/api/webhooks/123',
customWebhookUrl: 'https://example.com/webhook',
webhookFormat: 'json' as const,
ntfyTopic: null,
pollerEnabled: true,
pollerIntervalMs: 15000,
tautulliUrl: 'http://localhost:8181',
tautulliApiKey: 'secret-api-key',
externalUrl: 'https://tracearr.example.com',
basePath: '/app',
trustProxy: true,
mobileEnabled: false,
createdAt: new Date(),
updatedAt: new Date(),
};
describe('Settings Routes', () => {
let app: FastifyInstance;
afterEach(async () => {
await app?.close();
vi.clearAllMocks();
});
describe('GET /settings', () => {
it('returns settings for owner', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([mockSettingsRow]);
const response = await app.inject({
method: 'GET',
url: '/settings',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.allowGuestAccess).toBe(false);
expect(body.discordWebhookUrl).toBe('https://discord.com/api/webhooks/123');
expect(body.pollerEnabled).toBe(true);
expect(body.pollerIntervalMs).toBe(15000);
expect(body.externalUrl).toBe('https://tracearr.example.com');
expect(body.basePath).toBe('/app');
expect(body.trustProxy).toBe(true);
});
it('masks tautulli API key in response', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([mockSettingsRow]);
const response = await app.inject({
method: 'GET',
url: '/settings',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.tautulliApiKey).toBe('********');
expect(body.tautulliUrl).toBe('http://localhost:8181');
});
it('returns null for tautulliApiKey when not set', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([{ ...mockSettingsRow, tautulliApiKey: null }]);
const response = await app.inject({
method: 'GET',
url: '/settings',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.tautulliApiKey).toBe(null);
});
it('creates default settings when none exist', async () => {
app = await buildTestApp(ownerUser);
// First select returns empty (no settings)
mockDbSelectLimit([]);
// Then insert creates defaults
const defaultSettings = {
id: 1,
allowGuestAccess: false,
discordWebhookUrl: null,
customWebhookUrl: null,
pollerEnabled: true,
pollerIntervalMs: 15000,
tautulliUrl: null,
tautulliApiKey: null,
externalUrl: null,
basePath: '',
trustProxy: false,
mobileEnabled: false,
};
mockDbInsert([defaultSettings]);
const response = await app.inject({
method: 'GET',
url: '/settings',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.allowGuestAccess).toBe(false);
});
it('rejects guest accessing settings', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'GET',
url: '/settings',
});
expect(response.statusCode).toBe(403);
expect(response.json().message).toContain('Only server owners');
});
it('returns webhook format settings', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectLimit([{ ...mockSettingsRow, webhookFormat: 'ntfy', ntfyTopic: 'my-topic' }]);
const response = await app.inject({
method: 'GET',
url: '/settings',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.webhookFormat).toBe('ntfy');
expect(body.ntfyTopic).toBe('my-topic');
});
});
describe('PATCH /settings', () => {
it('updates settings for owner', async () => {
app = await buildTestApp(ownerUser);
// First check existing settings
mockDbSelectLimit([mockSettingsRow]);
mockDbUpdate();
// Return updated settings on final select
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [mockSettingsRow]
: [{ ...mockSettingsRow, allowGuestAccess: true }]
),
};
return chain as never;
});
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
allowGuestAccess: true,
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.allowGuestAccess).toBe(true);
});
it('updates webhook URLs', async () => {
app = await buildTestApp(ownerUser);
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [mockSettingsRow]
: [{
...mockSettingsRow,
discordWebhookUrl: 'https://new-discord-webhook.com',
customWebhookUrl: 'https://new-custom-webhook.com',
}]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
discordWebhookUrl: 'https://new-discord-webhook.com',
customWebhookUrl: 'https://new-custom-webhook.com',
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.discordWebhookUrl).toBe('https://new-discord-webhook.com');
expect(body.customWebhookUrl).toBe('https://new-custom-webhook.com');
});
it('updates poller settings', async () => {
app = await buildTestApp(ownerUser);
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [mockSettingsRow]
: [{
...mockSettingsRow,
pollerEnabled: false,
pollerIntervalMs: 30000,
}]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
pollerEnabled: false,
pollerIntervalMs: 30000,
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.pollerEnabled).toBe(false);
expect(body.pollerIntervalMs).toBe(30000);
});
it('updates tautulli settings', async () => {
app = await buildTestApp(ownerUser);
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [mockSettingsRow]
: [{
...mockSettingsRow,
tautulliUrl: 'http://tautulli:8181',
tautulliApiKey: 'new-api-key',
}]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
tautulliUrl: 'http://tautulli:8181',
tautulliApiKey: 'new-api-key',
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.tautulliUrl).toBe('http://tautulli:8181');
expect(body.tautulliApiKey).toBe('********'); // Should be masked
});
it('updates network settings and normalizes externalUrl', async () => {
app = await buildTestApp(ownerUser);
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [mockSettingsRow]
: [{
...mockSettingsRow,
externalUrl: 'https://new-url.com', // Should strip trailing slash
trustProxy: false,
}]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
externalUrl: 'https://new-url.com/', // With trailing slash
trustProxy: false,
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.externalUrl).toBe('https://new-url.com');
expect(body.trustProxy).toBe(false);
});
it('normalizes basePath by adding leading slash', async () => {
app = await buildTestApp(ownerUser);
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [mockSettingsRow]
: [{
...mockSettingsRow,
basePath: '/custom-path', // Should have leading slash
}]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
basePath: 'custom-path', // Without leading slash
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.basePath).toBe('/custom-path');
});
it('creates settings when none exist', async () => {
app = await buildTestApp(ownerUser);
// First select returns empty (no settings)
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [] // No existing settings
: [{ ...mockSettingsRow, allowGuestAccess: true }] // After insert
),
};
return chain as never;
});
const insertChain = {
values: vi.fn().mockResolvedValue(undefined),
};
vi.mocked(db.insert).mockReturnValue(insertChain as never);
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
allowGuestAccess: true,
},
});
expect(response.statusCode).toBe(200);
expect(db.insert).toHaveBeenCalled();
});
it('rejects guest updating settings', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
allowGuestAccess: true,
},
});
expect(response.statusCode).toBe(403);
expect(response.json().message).toContain('Only server owners');
});
it('rejects invalid request body', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
pollerIntervalMs: 'not-a-number', // Should be number
},
});
expect(response.statusCode).toBe(400);
});
it('handles empty update body', async () => {
app = await buildTestApp(ownerUser);
vi.mocked(db.select).mockImplementation(() => {
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue([mockSettingsRow]),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {},
});
expect(response.statusCode).toBe(200);
// Should still update the updatedAt timestamp
expect(db.update).toHaveBeenCalled();
});
it('clears webhook URLs when set to null', async () => {
app = await buildTestApp(ownerUser);
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [mockSettingsRow]
: [{
...mockSettingsRow,
discordWebhookUrl: null,
customWebhookUrl: null,
}]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
discordWebhookUrl: null,
customWebhookUrl: null,
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.discordWebhookUrl).toBe(null);
expect(body.customWebhookUrl).toBe(null);
});
it('updates webhook format to ntfy', async () => {
app = await buildTestApp(ownerUser);
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [mockSettingsRow]
: [{
...mockSettingsRow,
webhookFormat: 'ntfy',
ntfyTopic: 'tracearr-alerts',
}]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
webhookFormat: 'ntfy',
ntfyTopic: 'tracearr-alerts',
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.webhookFormat).toBe('ntfy');
expect(body.ntfyTopic).toBe('tracearr-alerts');
});
it('updates webhook format to apprise', async () => {
app = await buildTestApp(ownerUser);
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [mockSettingsRow]
: [{
...mockSettingsRow,
webhookFormat: 'apprise',
}]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
webhookFormat: 'apprise',
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.webhookFormat).toBe('apprise');
});
it('rejects invalid webhook format', async () => {
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
webhookFormat: 'invalid-format',
},
});
expect(response.statusCode).toBe(400);
});
it('clears ntfy topic when set to null', async () => {
app = await buildTestApp(ownerUser);
let selectCount = 0;
vi.mocked(db.select).mockImplementation(() => {
selectCount++;
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(
selectCount === 1
? [{ ...mockSettingsRow, ntfyTopic: 'old-topic' }]
: [{
...mockSettingsRow,
ntfyTopic: null,
}]
),
};
return chain as never;
});
mockDbUpdate();
const response = await app.inject({
method: 'PATCH',
url: '/settings',
payload: {
ntfyTopic: null,
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.ntfyTopic).toBe(null);
});
});
});

View File

@@ -0,0 +1,264 @@
/**
* Setup routes unit tests
*
* Tests the API endpoint for checking Tracearr configuration status:
* - GET /status - Check if setup is needed
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
// Mock the database module before importing routes
vi.mock('../../db/client.js', () => ({
db: {
select: vi.fn(),
},
}));
// Import the mocked db and the routes
import { db } from '../../db/client.js';
import { setupRoutes } from '../setup.js';
/**
* Helper to mock db.select with multiple chained calls
* Setup route uses Promise.all with 4 parallel queries:
* 1. All servers
* 2. Jellyfin servers (where type = 'jellyfin')
* 3. Owners (where role = 'owner')
* 4. Password users (where passwordHash is not null)
* Plus a 5th query for settings (which may fail and default to 'local')
*/
function mockDbSelectMultiple(results: unknown[][]) {
let callIndex = 0;
const createChain = () => ({
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockImplementation(() => {
return Promise.resolve(results[callIndex++] || []);
}),
});
vi.mocked(db.select).mockImplementation(() => createChain() as never);
}
/**
* Build a test Fastify instance
* Note: Setup routes are public (no auth required)
*/
async function buildTestApp(): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
// Register sensible for HTTP error helpers
await app.register(sensible);
// Register routes
await app.register(setupRoutes, { prefix: '/setup' });
return app;
}
describe('Setup Routes', () => {
let app: FastifyInstance;
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(async () => {
if (app) {
await app.close();
}
});
describe('GET /setup/status', () => {
it('returns needsSetup true when no owners exist', async () => {
app = await buildTestApp();
// Mock: servers exist, no jellyfin servers, no owners, no password users
mockDbSelectMultiple([
[{ id: 'server-1' }], // servers query
[], // jellyfin servers query
[], // owners query (empty = needs setup)
[], // password users query
]);
const response = await app.inject({
method: 'GET',
url: '/setup/status',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({
needsSetup: true,
hasServers: true,
hasJellyfinServers: false,
hasPasswordAuth: false,
primaryAuthMethod: 'local',
});
});
it('returns needsSetup false when owner exists', async () => {
app = await buildTestApp();
// Mock: servers exist, jellyfin servers exist, owner exists, password user exists
mockDbSelectMultiple([
[{ id: 'server-1' }], // servers query
[{ id: 'server-1' }], // jellyfin servers query
[{ id: 'user-1' }], // owners query (has owner)
[{ id: 'user-1' }], // password users query
]);
const response = await app.inject({
method: 'GET',
url: '/setup/status',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({
needsSetup: false,
hasServers: true,
hasJellyfinServers: true,
hasPasswordAuth: true,
primaryAuthMethod: 'local',
});
});
it('returns hasServers false when no servers configured', async () => {
app = await buildTestApp();
// Mock: no servers, no jellyfin servers, no owners, no password users
mockDbSelectMultiple([
[], // servers query (empty)
[], // jellyfin servers query
[], // owners query
[], // password users query
]);
const response = await app.inject({
method: 'GET',
url: '/setup/status',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({
needsSetup: true,
hasServers: false,
hasJellyfinServers: false,
hasPasswordAuth: false,
primaryAuthMethod: 'local',
});
});
it('returns hasPasswordAuth true when user has password set', async () => {
app = await buildTestApp();
// Mock: no servers, no jellyfin servers, owner exists, password user exists
mockDbSelectMultiple([
[], // servers query
[], // jellyfin servers query
[{ id: 'user-1' }], // owners query
[{ id: 'user-1' }], // password users query (has password)
]);
const response = await app.inject({
method: 'GET',
url: '/setup/status',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({
needsSetup: false,
hasServers: false,
hasJellyfinServers: false,
hasPasswordAuth: true,
primaryAuthMethod: 'local',
});
});
it('returns hasPasswordAuth false when no users have passwords', async () => {
app = await buildTestApp();
// Mock: servers exist, jellyfin servers exist, owner exists, no password users
mockDbSelectMultiple([
[{ id: 'server-1' }], // servers query
[{ id: 'server-1' }], // jellyfin servers query
[{ id: 'user-1' }], // owners query
[], // password users query (empty)
]);
const response = await app.inject({
method: 'GET',
url: '/setup/status',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({
needsSetup: false,
hasServers: true,
hasJellyfinServers: true,
hasPasswordAuth: false,
primaryAuthMethod: 'local',
});
});
it('handles fresh installation state correctly', async () => {
app = await buildTestApp();
// Mock: completely empty database
mockDbSelectMultiple([
[], // no servers
[], // no jellyfin servers
[], // no owners
[], // no password users
]);
const response = await app.inject({
method: 'GET',
url: '/setup/status',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({
needsSetup: true,
hasServers: false,
hasJellyfinServers: false,
hasPasswordAuth: false,
primaryAuthMethod: 'local',
});
});
it('handles fully configured state correctly', async () => {
app = await buildTestApp();
// Mock: fully configured installation
mockDbSelectMultiple([
[{ id: 'server-1' }, { id: 'server-2' }], // multiple servers
[{ id: 'server-1' }], // jellyfin servers
[{ id: 'owner-1' }], // owner exists
[{ id: 'owner-1' }, { id: 'user-2' }], // multiple password users
]);
const response = await app.inject({
method: 'GET',
url: '/setup/status',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body).toEqual({
needsSetup: false,
hasServers: true,
hasJellyfinServers: true,
hasPasswordAuth: true,
primaryAuthMethod: 'local',
});
});
});
});

View File

@@ -0,0 +1,769 @@
/**
* Violation routes integration tests
*
* Tests the API endpoints for violation operations:
* - GET /violations - List violations with pagination and filters
* - GET /violations/:id - Get a specific violation
* - PATCH /violations/:id - Acknowledge a violation
* - DELETE /violations/:id - Dismiss (delete) a violation
*/
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
import type { AuthUser, ViolationSeverity } from '@tracearr/shared';
// Mock the database module before importing routes
vi.mock('../../db/client.js', () => ({
db: {
select: vi.fn(),
insert: vi.fn(),
update: vi.fn(),
delete: vi.fn(),
execute: vi.fn(),
},
}));
// Import the mocked db and the routes
import { db } from '../../db/client.js';
import { violationRoutes } from '../violations.js';
/**
* Build a test Fastify instance with mocked auth
*/
async function buildTestApp(authUser: AuthUser): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
// Register sensible for HTTP error helpers
await app.register(sensible);
// Mock the authenticate decorator
app.decorate('authenticate', async (request: any) => {
request.user = authUser;
});
// Register routes
await app.register(violationRoutes, { prefix: '/violations' });
return app;
}
/**
* Create a mock violation with joined data (as returned by routes)
*/
interface MockViolationWithJoins {
id: string;
ruleId: string;
ruleName: string;
ruleType: string;
serverUserId: string;
username: string;
userThumb: string | null;
identityName: string | null;
serverId: string;
serverName: string;
sessionId: string;
mediaTitle: string;
severity: ViolationSeverity;
data: Record<string, unknown>;
createdAt: Date;
acknowledgedAt: Date | null;
ipAddress?: string;
geoCity?: string | null;
geoCountry?: string | null;
playerName?: string | null;
platform?: string | null;
}
function createTestViolation(
overrides: Partial<MockViolationWithJoins> = {}
): MockViolationWithJoins {
const serverId = overrides.serverId ?? randomUUID();
return {
id: overrides.id ?? randomUUID(),
ruleId: overrides.ruleId ?? randomUUID(),
ruleName: overrides.ruleName ?? 'Test Rule',
ruleType: overrides.ruleType ?? 'concurrent_streams',
serverUserId: overrides.serverUserId ?? randomUUID(),
username: overrides.username ?? 'testuser',
userThumb: overrides.userThumb ?? null,
identityName: overrides.identityName ?? null,
serverId,
serverName: overrides.serverName ?? 'Test Server',
sessionId: overrides.sessionId ?? randomUUID(),
mediaTitle: overrides.mediaTitle ?? 'Test Movie',
severity: overrides.severity ?? 'warning',
data: overrides.data ?? { maxStreams: 3, actualStreams: 4 },
createdAt: overrides.createdAt ?? new Date(),
acknowledgedAt: overrides.acknowledgedAt ?? null,
ipAddress: overrides.ipAddress ?? '192.168.1.1',
geoCity: overrides.geoCity ?? 'New York',
geoCountry: overrides.geoCountry ?? 'US',
playerName: overrides.playerName ?? 'Test Player',
platform: overrides.platform ?? 'Windows',
};
}
/**
* Create a mock owner auth user
*/
function createOwnerUser(): AuthUser {
return {
userId: randomUUID(),
username: 'owner',
role: 'owner',
serverIds: [randomUUID()],
};
}
/**
* Create a mock viewer auth user (non-owner)
*/
function createViewerUser(): AuthUser {
return {
userId: randomUUID(),
username: 'viewer',
role: 'viewer',
serverIds: [randomUUID()],
};
}
/**
* Helper to create the mock chain for violation queries with 5 innerJoins
* (rules, serverUsers, users, servers, sessions)
*/
function createViolationSelectMock(resolvedValue: unknown) {
return {
from: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
orderBy: vi.fn().mockReturnValue({
limit: vi.fn().mockReturnValue({
offset: vi.fn().mockResolvedValue(resolvedValue),
}),
}),
}),
}),
}),
}),
}),
}),
}),
};
}
/**
* Helper to create the mock chain for single violation queries (GET /:id)
* with 5 innerJoins (rules, serverUsers, users, servers, sessions)
*/
function createSingleViolationSelectMock(resolvedValue: unknown) {
return {
from: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue(resolvedValue),
}),
}),
}),
}),
}),
}),
}),
};
}
/**
* Helper to create mock for violation existence check (PATCH/DELETE)
* Uses serverUsers join for server access check
*/
function createViolationExistsCheckMock(resolvedValue: unknown) {
return {
from: vi.fn().mockReturnValue({
innerJoin: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
limit: vi.fn().mockResolvedValue(resolvedValue),
}),
}),
}),
};
}
describe('Violation Routes', () => {
let app: FastifyInstance;
let mockDb: any;
beforeEach(() => {
vi.clearAllMocks();
mockDb = db as any;
});
afterEach(async () => {
if (app) {
await app.close();
}
});
describe('GET /violations', () => {
it('should return list of violations for owner', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const testViolations = [
createTestViolation({ severity: 'high' }),
createTestViolation({ severity: 'warning' }),
createTestViolation({ severity: 'low' }),
];
// Mock the violations query (4 innerJoins)
mockDb.select.mockReturnValueOnce(createViolationSelectMock(testViolations));
// Mock the count query (uses db.execute with raw SQL)
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 3 }] });
const response = await app.inject({
method: 'GET',
url: '/violations',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(3);
expect(body.total).toBe(3);
expect(body.page).toBe(1);
});
it('should apply default pagination', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
mockDb.select.mockReturnValueOnce(createViolationSelectMock([]));
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 0 }] });
const response = await app.inject({
method: 'GET',
url: '/violations',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.page).toBe(1);
expect(body.pageSize).toBe(20); // Schema default is 20
});
it('should accept pagination parameters', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
mockDb.select.mockReturnValueOnce(createViolationSelectMock([]));
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 100 }] });
const response = await app.inject({
method: 'GET',
url: '/violations?page=3&pageSize=25',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.page).toBe(3);
expect(body.pageSize).toBe(25);
expect(body.totalPages).toBe(4);
});
it('should filter by severity', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const highSeverityViolations = [
createTestViolation({ severity: 'high' }),
];
mockDb.select.mockReturnValueOnce(createViolationSelectMock(highSeverityViolations));
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 1 }] });
const response = await app.inject({
method: 'GET',
url: '/violations?severity=high',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(1);
expect(body.data[0].severity).toBe('high');
});
it('should filter by acknowledged status', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const unacknowledgedViolations = [
createTestViolation({ acknowledgedAt: null }),
];
mockDb.select.mockReturnValueOnce(createViolationSelectMock(unacknowledgedViolations));
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 1 }] });
const response = await app.inject({
method: 'GET',
url: '/violations?acknowledged=false',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(1);
expect(body.data[0].acknowledgedAt).toBeNull();
});
it('should filter by serverUserId', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const serverUserId = randomUUID();
const userViolations = [createTestViolation({ serverUserId })];
mockDb.select.mockReturnValueOnce(createViolationSelectMock(userViolations));
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 1 }] });
const response = await app.inject({
method: 'GET',
url: `/violations?serverUserId=${serverUserId}`,
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(1);
});
it('should filter by ruleId', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const ruleId = randomUUID();
const ruleViolations = [createTestViolation({ ruleId })];
mockDb.select.mockReturnValueOnce(createViolationSelectMock(ruleViolations));
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 1 }] });
const response = await app.inject({
method: 'GET',
url: `/violations?ruleId=${ruleId}`,
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(1);
});
it('should reject invalid severity filter', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: '/violations?severity=critical',
});
expect(response.statusCode).toBe(400);
});
it('should reject pageSize over 100', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: '/violations?pageSize=101',
});
expect(response.statusCode).toBe(400);
});
it('should return empty data for viewers with no server access', async () => {
// Viewer with empty serverIds returns empty result without querying
const viewerUser: AuthUser = {
userId: randomUUID(),
username: 'viewer',
role: 'viewer',
serverIds: [],
};
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'GET',
url: '/violations',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(0);
expect(body.total).toBe(0);
});
});
describe('GET /violations/:id', () => {
it('should return a specific violation', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const violationId = randomUUID();
const testViolation = createTestViolation({ id: violationId });
mockDb.select.mockReturnValue(createSingleViolationSelectMock([testViolation]));
const response = await app.inject({
method: 'GET',
url: `/violations/${violationId}`,
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.id).toBe(violationId);
expect(body.ruleName).toBe('Test Rule');
expect(body.username).toBe('testuser');
expect(body.serverName).toBe('Test Server');
});
it('should return 404 for non-existent violation', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
mockDb.select.mockReturnValue(createSingleViolationSelectMock([]));
const response = await app.inject({
method: 'GET',
url: `/violations/${randomUUID()}`,
});
expect(response.statusCode).toBe(404);
});
it('should reject invalid UUID', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'GET',
url: '/violations/not-a-uuid',
});
expect(response.statusCode).toBe(400);
});
it('should return violation with session details', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const violationId = randomUUID();
const testViolation = createTestViolation({
id: violationId,
ipAddress: '10.0.0.1',
geoCity: 'Los Angeles',
geoCountry: 'US',
playerName: 'Plex Player',
platform: 'macOS',
});
mockDb.select.mockReturnValue(createSingleViolationSelectMock([testViolation]));
const response = await app.inject({
method: 'GET',
url: `/violations/${violationId}`,
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.ipAddress).toBe('10.0.0.1');
expect(body.geoCity).toBe('Los Angeles');
expect(body.geoCountry).toBe('US');
expect(body.playerName).toBe('Plex Player');
expect(body.platform).toBe('macOS');
});
});
describe('PATCH /violations/:id', () => {
it('should acknowledge violation for owner', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const violationId = randomUUID();
const serverId = ownerUser.serverIds[0];
const acknowledgedAt = new Date();
// Violation exists check with serverUsers join
mockDb.select.mockReturnValue(createViolationExistsCheckMock([{ id: violationId, serverId }]));
// Update
mockDb.update.mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue([{ id: violationId, acknowledgedAt }]),
}),
}),
});
const response = await app.inject({
method: 'PATCH',
url: `/violations/${violationId}`,
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.success).toBe(true);
expect(body.acknowledgedAt).toBeDefined();
});
it('should reject acknowledgment for non-owner', async () => {
const guestUser = createViewerUser();
app = await buildTestApp(guestUser);
const response = await app.inject({
method: 'PATCH',
url: `/violations/${randomUUID()}`,
});
expect(response.statusCode).toBe(403);
});
it('should return 404 for non-existent violation', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
mockDb.select.mockReturnValue(createViolationExistsCheckMock([]));
const response = await app.inject({
method: 'PATCH',
url: `/violations/${randomUUID()}`,
});
expect(response.statusCode).toBe(404);
});
it('should reject invalid UUID', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'PATCH',
url: '/violations/not-a-uuid',
});
expect(response.statusCode).toBe(400);
});
it('should handle update failure gracefully', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const violationId = randomUUID();
const serverId = ownerUser.serverIds[0];
// Violation exists check
mockDb.select.mockReturnValue(createViolationExistsCheckMock([{ id: violationId, serverId }]));
// Update returns empty (failure)
mockDb.update.mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue([]),
}),
}),
});
const response = await app.inject({
method: 'PATCH',
url: `/violations/${violationId}`,
});
expect(response.statusCode).toBe(500);
});
});
describe('DELETE /violations/:id', () => {
it('should delete violation for owner', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const violationId = randomUUID();
const serverUserId = randomUUID();
const serverId = ownerUser.serverIds[0];
// Violation exists check with serverUsers join - now includes severity and serverUserId
mockDb.select.mockReturnValue(createViolationExistsCheckMock([{
id: violationId,
severity: 'warning',
serverUserId,
serverId,
}]));
// Mock transaction for delete + trust score restore
mockDb.transaction = vi.fn().mockImplementation(async (callback: (tx: any) => Promise<void>) => {
const txMock = {
delete: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
update: vi.fn().mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
}),
};
return callback(txMock);
});
const response = await app.inject({
method: 'DELETE',
url: `/violations/${violationId}`,
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.success).toBe(true);
});
it('should restore trust score when deleting violation', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const violationId = randomUUID();
const serverUserId = randomUUID();
const serverId = ownerUser.serverIds[0];
// Test with high severity (penalty: 20)
mockDb.select.mockReturnValue(createViolationExistsCheckMock([{
id: violationId,
severity: 'high',
serverUserId,
serverId,
}]));
// Track transaction calls
const deleteMock = vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
});
const updateMock = vi.fn().mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
});
mockDb.transaction = vi.fn().mockImplementation(async (callback: (tx: any) => Promise<void>) => {
const txMock = {
delete: deleteMock,
update: updateMock,
};
return callback(txMock);
});
const response = await app.inject({
method: 'DELETE',
url: `/violations/${violationId}`,
});
expect(response.statusCode).toBe(200);
// Verify transaction was called
expect(mockDb.transaction).toHaveBeenCalledTimes(1);
// Verify delete and update were called in transaction
expect(deleteMock).toHaveBeenCalled();
expect(updateMock).toHaveBeenCalled();
});
it('should reject delete for non-owner', async () => {
const guestUser = createViewerUser();
app = await buildTestApp(guestUser);
const response = await app.inject({
method: 'DELETE',
url: `/violations/${randomUUID()}`,
});
expect(response.statusCode).toBe(403);
});
it('should return 404 for non-existent violation', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
mockDb.select.mockReturnValue(createViolationExistsCheckMock([]));
const response = await app.inject({
method: 'DELETE',
url: `/violations/${randomUUID()}`,
});
expect(response.statusCode).toBe(404);
});
it('should reject invalid UUID', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const response = await app.inject({
method: 'DELETE',
url: '/violations/not-a-uuid',
});
expect(response.statusCode).toBe(400);
});
});
describe('Authorization', () => {
it('should allow owner to see all violations', async () => {
const ownerUser = createOwnerUser();
app = await buildTestApp(ownerUser);
const testViolations = [
createTestViolation({ serverUserId: randomUUID() }),
createTestViolation({ serverUserId: randomUUID() }),
];
mockDb.select.mockReturnValueOnce(createViolationSelectMock(testViolations));
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 2 }] });
const response = await app.inject({
method: 'GET',
url: '/violations',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(2);
});
it('should filter violations by server access for viewers', async () => {
const viewerServerId = randomUUID();
const viewerUser: AuthUser = {
userId: randomUUID(),
username: 'viewer',
role: 'viewer',
serverIds: [viewerServerId],
};
app = await buildTestApp(viewerUser);
// Return violations from the viewer's accessible server
const testViolations = [
createTestViolation({ serverId: viewerServerId }),
];
mockDb.select.mockReturnValueOnce(createViolationSelectMock(testViolations));
mockDb.execute.mockResolvedValueOnce({ rows: [{ count: 1 }] });
const response = await app.inject({
method: 'GET',
url: '/violations',
});
expect(response.statusCode).toBe(200);
const body = JSON.parse(response.body);
expect(body.data).toHaveLength(1);
expect(body.data[0].user.serverId).toBe(viewerServerId);
});
});
});

View File

@@ -0,0 +1,379 @@
/**
* Auth Security Tests
*
* Tests to ensure authentication and authorization cannot be bypassed.
* Covers: token validation, privilege escalation, injection attacks.
*/
import { describe, it, expect, beforeAll, afterAll } from 'vitest';
import type { FastifyInstance } from 'fastify';
import {
createTestApp,
generateTestToken,
createOwnerPayload,
createViewerPayload,
generateExpiredToken,
generateTamperedToken,
generateWrongSecretToken,
INJECTION_PAYLOADS,
} from '../test/helpers.js';
describe('Auth Security', () => {
let app: FastifyInstance;
beforeAll(async () => {
app = await createTestApp();
// Add a protected test route that requires authentication
app.get('/test/protected', { preHandler: [app.authenticate] }, async (request) => {
return { user: request.user, message: 'authenticated' };
});
// Add an owner-only test route
app.get('/test/owner-only', { preHandler: [app.requireOwner] }, async (request) => {
return { user: request.user, message: 'owner access granted' };
});
// Add a route that echoes back user input (for injection testing)
app.post('/test/echo', async (request) => {
const body = request.body as { input?: string };
return { received: body.input };
});
await app.ready();
});
afterAll(async () => {
await app.close();
});
describe('Token Validation', () => {
it('should reject requests with no token', async () => {
const res = await app.inject({
method: 'GET',
url: '/test/protected',
});
expect(res.statusCode).toBe(401);
expect(res.json().message).toContain('Invalid or expired token');
});
it('should reject requests with empty Authorization header', async () => {
const res = await app.inject({
method: 'GET',
url: '/test/protected',
headers: { Authorization: '' },
});
expect(res.statusCode).toBe(401);
});
it('should reject requests with malformed Authorization header', async () => {
const res = await app.inject({
method: 'GET',
url: '/test/protected',
headers: { Authorization: 'not-a-bearer-token' },
});
expect(res.statusCode).toBe(401);
});
it('should reject requests with Bearer but no token', async () => {
const res = await app.inject({
method: 'GET',
url: '/test/protected',
headers: { Authorization: 'Bearer ' },
});
expect(res.statusCode).toBe(401);
});
it('should reject expired tokens', async () => {
const expiredToken = generateExpiredToken(app, createOwnerPayload());
const res = await app.inject({
method: 'GET',
url: '/test/protected',
headers: { Authorization: `Bearer ${expiredToken}` },
});
expect(res.statusCode).toBe(401);
expect(res.json().message).toContain('Invalid or expired token');
});
it('should reject tampered tokens', async () => {
const validToken = generateTestToken(app, createViewerPayload());
const tamperedToken = generateTamperedToken(validToken);
const res = await app.inject({
method: 'GET',
url: '/test/protected',
headers: { Authorization: `Bearer ${tamperedToken}` },
});
expect(res.statusCode).toBe(401);
});
it('should reject tokens signed with wrong secret', async () => {
const wrongSecretToken = generateWrongSecretToken(createOwnerPayload());
const res = await app.inject({
method: 'GET',
url: '/test/protected',
headers: { Authorization: `Bearer ${wrongSecretToken}` },
});
expect(res.statusCode).toBe(401);
});
it('should reject random garbage tokens', async () => {
const garbageTokens = [
'not.a.jwt',
'aaa.bbb.ccc',
Buffer.from('garbage').toString('base64'),
'{"userId":"hack"}',
'eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJ1c2VySWQiOiJoYWNrIn0.',
];
for (const garbage of garbageTokens) {
const res = await app.inject({
method: 'GET',
url: '/test/protected',
headers: { Authorization: `Bearer ${garbage}` },
});
expect(res.statusCode).toBe(401);
}
});
it('should accept valid tokens', async () => {
const validToken = generateTestToken(app, createOwnerPayload());
const res = await app.inject({
method: 'GET',
url: '/test/protected',
headers: { Authorization: `Bearer ${validToken}` },
});
expect(res.statusCode).toBe(200);
expect(res.json().message).toBe('authenticated');
});
it('should preserve user data from valid token', async () => {
const payload = createOwnerPayload({ username: 'securitytest' });
const token = generateTestToken(app, payload);
const res = await app.inject({
method: 'GET',
url: '/test/protected',
headers: { Authorization: `Bearer ${token}` },
});
expect(res.statusCode).toBe(200);
const json = res.json();
expect(json.user.username).toBe('securitytest');
expect(json.user.role).toBe('owner');
});
});
describe('Authorization - Owner-Only Routes', () => {
it('should reject guest users on owner-only routes', async () => {
const guestToken = generateTestToken(app, createViewerPayload());
const res = await app.inject({
method: 'GET',
url: '/test/owner-only',
headers: { Authorization: `Bearer ${guestToken}` },
});
expect(res.statusCode).toBe(403);
expect(res.json().message).toContain('Owner access required');
});
it('should accept owner users on owner-only routes', async () => {
const ownerToken = generateTestToken(app, createOwnerPayload());
const res = await app.inject({
method: 'GET',
url: '/test/owner-only',
headers: { Authorization: `Bearer ${ownerToken}` },
});
expect(res.statusCode).toBe(200);
expect(res.json().message).toBe('owner access granted');
});
it('should reject unauthenticated users on owner-only routes with 401', async () => {
const res = await app.inject({
method: 'GET',
url: '/test/owner-only',
});
// Should return 401, not 403 (auth before authz)
expect(res.statusCode).toBe(401);
});
it('should prevent role escalation via token manipulation', async () => {
// Create a guest token and try to tamper it to become owner
const guestToken = generateTestToken(app, createViewerPayload());
// Try various tampering techniques
const tamperedTokens = [
generateTamperedToken(guestToken), // Modify payload, keep sig
guestToken.replace('guest', 'owner'), // Naive string replace
];
for (const tampered of tamperedTokens) {
const res = await app.inject({
method: 'GET',
url: '/test/owner-only',
headers: { Authorization: `Bearer ${tampered}` },
});
// Should either reject as invalid (401) or as unauthorized (403)
expect([401, 403]).toContain(res.statusCode);
}
});
});
describe('Injection Prevention', () => {
it('should safely handle SQL injection payloads in input', async () => {
for (const payload of INJECTION_PAYLOADS.sqlInjection) {
const res = await app.inject({
method: 'POST',
url: '/test/echo',
payload: { input: payload },
});
// Server should not crash and should echo back the input safely
expect(res.statusCode).toBe(200);
const json = res.json();
// The payload should be treated as a string, not executed
expect(json.received).toBe(payload);
}
});
it('should safely handle XSS payloads in input', async () => {
for (const payload of INJECTION_PAYLOADS.xss) {
const res = await app.inject({
method: 'POST',
url: '/test/echo',
payload: { input: payload },
});
expect(res.statusCode).toBe(200);
// XSS prevention is mainly a frontend concern, but backend should not crash
expect(res.json().received).toBe(payload);
}
});
it('should safely handle path traversal payloads', async () => {
for (const payload of INJECTION_PAYLOADS.pathTraversal) {
const res = await app.inject({
method: 'POST',
url: '/test/echo',
payload: { input: payload },
});
expect(res.statusCode).toBe(200);
}
});
it('should handle extremely long input without crashing', async () => {
const longInput = 'A'.repeat(100000);
const res = await app.inject({
method: 'POST',
url: '/test/echo',
payload: { input: longInput },
});
// Should either accept or reject, but not crash
expect([200, 413, 400]).toContain(res.statusCode);
});
it('should handle null bytes in input', async () => {
const res = await app.inject({
method: 'POST',
url: '/test/echo',
payload: { input: 'test\x00injection' },
});
expect(res.statusCode).toBe(200);
});
it('should handle unicode edge cases', async () => {
const unicodePayloads = [
'\u202E\u0041\u0042\u0043', // Right-to-left override
'\uFEFF\uFEFF\uFEFF', // BOM characters
'𝕳𝖊𝖑𝖑𝖔', // Mathematical symbols
'❤️💻🔒', // Emoji
];
for (const payload of unicodePayloads) {
const res = await app.inject({
method: 'POST',
url: '/test/echo',
payload: { input: payload },
});
expect(res.statusCode).toBe(200);
}
});
});
describe('Header Security', () => {
it('should not expose sensitive info in error responses', async () => {
const res = await app.inject({
method: 'GET',
url: '/test/protected',
});
const body = res.json();
// Error should not leak stack traces or internal paths
expect(JSON.stringify(body)).not.toContain('node_modules');
expect(JSON.stringify(body)).not.toContain('at Object');
expect(JSON.stringify(body)).not.toContain('.ts:');
expect(JSON.stringify(body)).not.toContain('JWT_SECRET');
});
it('should handle missing Content-Type gracefully', async () => {
const res = await app.inject({
method: 'POST',
url: '/test/echo',
payload: '{"input":"test"}',
// No content-type header
});
// Should handle gracefully, not crash
expect([200, 400, 415]).toContain(res.statusCode);
});
});
describe('Token Expiration Edge Cases', () => {
it('should handle tokens that expire during request', async () => {
// Token with 1 second expiry
const shortLivedToken = generateTestToken(app, createOwnerPayload(), { expiresIn: '1s' });
// First request should work
const res1 = await app.inject({
method: 'GET',
url: '/test/protected',
headers: { Authorization: `Bearer ${shortLivedToken}` },
});
expect(res1.statusCode).toBe(200);
// Wait for expiry
await new Promise((resolve) => setTimeout(resolve, 1500));
// Second request should fail
const res2 = await app.inject({
method: 'GET',
url: '/test/protected',
headers: { Authorization: `Bearer ${shortLivedToken}` },
});
expect(res2.statusCode).toBe(401);
});
});
});

View File

@@ -0,0 +1,389 @@
/**
* Plex auth routes tests
*
* Tests the API endpoints for Plex server discovery and connection:
* - GET /plex/available-servers - Discover available Plex servers
* - POST /plex/add-server - Add an additional Plex server
*/
import { describe, it, expect, afterEach, vi } from 'vitest';
import Fastify, { type FastifyInstance } from 'fastify';
import sensible from '@fastify/sensible';
import { randomUUID } from 'node:crypto';
import type { AuthUser } from '@tracearr/shared';
// Mock dependencies before imports
vi.mock('../../../db/client.js', () => ({
db: {
select: vi.fn(),
insert: vi.fn(),
update: vi.fn(),
},
}));
vi.mock('../../../utils/crypto.js', () => ({
encrypt: vi.fn((token: string) => `encrypted_${token}`),
decrypt: vi.fn((token: string) => token.replace('encrypted_', '')),
}));
vi.mock('../../../services/mediaServer/index.js', () => ({
PlexClient: {
getServers: vi.fn(),
verifyServerAdmin: vi.fn(),
},
}));
vi.mock('../../../services/sync.js', () => ({
syncServer: vi.fn(),
}));
// Import mocked modules
import { db } from '../../../db/client.js';
import { PlexClient } from '../../../services/mediaServer/index.js';
import { syncServer } from '../../../services/sync.js';
import { plexRoutes } from '../plex.js';
// Mock global fetch for connection testing
const mockFetch = vi.fn();
vi.stubGlobal('fetch', mockFetch);
// Helper to create DB chain mocks
function mockDbSelectWhere(result: unknown[]) {
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockResolvedValue(result),
};
vi.mocked(db.select).mockReturnValue(chain as never);
return chain;
}
// For queries that end with .limit()
function mockDbSelectLimit(result: unknown[]) {
const chain = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn().mockResolvedValue(result),
};
vi.mocked(db.select).mockReturnValue(chain as never);
return chain;
}
function mockDbInsert(result: unknown[]) {
const chain = {
values: vi.fn().mockReturnThis(),
returning: vi.fn().mockResolvedValue(result),
};
vi.mocked(db.insert).mockReturnValue(chain as never);
return chain;
}
function _mockDbUpdate() {
const chain = {
set: vi.fn().mockReturnThis(),
where: vi.fn().mockResolvedValue(undefined),
};
vi.mocked(db.update).mockReturnValue(chain as never);
return chain;
}
async function buildTestApp(authUser: AuthUser): Promise<FastifyInstance> {
const app = Fastify({ logger: false });
await app.register(sensible);
// Mock authenticate
app.decorate('authenticate', async (request: unknown) => {
(request as { user: AuthUser }).user = authUser;
});
await app.register(plexRoutes);
return app;
}
const ownerUser: AuthUser = {
userId: randomUUID(),
username: 'admin',
role: 'owner',
serverIds: [randomUUID()],
};
const viewerUser: AuthUser = {
userId: randomUUID(),
username: 'viewer',
role: 'viewer',
serverIds: [randomUUID()],
};
const mockExistingServer = {
id: randomUUID(),
name: 'Existing Plex Server',
type: 'plex' as const,
url: 'http://localhost:32400',
token: 'encrypted_test-token',
machineIdentifier: 'existing-machine-id',
createdAt: new Date(),
updatedAt: new Date(),
};
const mockPlexServer = {
name: 'New Plex Server',
product: 'Plex Media Server',
platform: 'Linux',
productVersion: '1.40.0',
clientIdentifier: 'new-machine-id',
owned: true,
accessToken: 'server-access-token',
publicAddress: '203.0.113.1',
connections: [
{ protocol: 'http', uri: 'http://192.168.1.100:32400', local: true, address: '192.168.1.100', port: 32400 },
{ protocol: 'https', uri: 'https://plex.example.com:32400', local: false, address: 'plex.example.com', port: 32400 },
],
};
describe('Plex Auth Routes', () => {
let app: FastifyInstance;
afterEach(async () => {
await app?.close();
vi.clearAllMocks();
});
describe('GET /plex/available-servers', () => {
it('returns 403 for non-owner users', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'GET',
url: '/plex/available-servers',
});
expect(response.statusCode).toBe(403);
});
it('returns hasPlexToken: false when no Plex servers connected', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectWhere([]);
const response = await app.inject({
method: 'GET',
url: '/plex/available-servers',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.hasPlexToken).toBe(false);
expect(body.servers).toEqual([]);
});
it('returns empty servers when all owned servers are connected', async () => {
app = await buildTestApp(ownerUser);
// First call returns existing servers
mockDbSelectWhere([mockExistingServer]);
// Mock PlexClient.getServers to return only the existing server
vi.mocked(PlexClient.getServers).mockResolvedValue([
{
...mockPlexServer,
clientIdentifier: mockExistingServer.machineIdentifier,
},
]);
const response = await app.inject({
method: 'GET',
url: '/plex/available-servers',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.hasPlexToken).toBe(true);
expect(body.servers).toEqual([]);
});
it('returns available servers with connection test results', async () => {
app = await buildTestApp(ownerUser);
mockDbSelectWhere([mockExistingServer]);
// Return a new server not yet connected
vi.mocked(PlexClient.getServers).mockResolvedValue([mockPlexServer]);
// Mock fetch for connection testing - first succeeds, second fails
mockFetch
.mockResolvedValueOnce({ ok: true }) // Local connection succeeds
.mockRejectedValueOnce(new Error('timeout')); // Remote connection fails
const response = await app.inject({
method: 'GET',
url: '/plex/available-servers',
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.hasPlexToken).toBe(true);
expect(body.servers).toHaveLength(1);
expect(body.servers[0].name).toBe('New Plex Server');
expect(body.servers[0].clientIdentifier).toBe('new-machine-id');
expect(body.servers[0].connections).toHaveLength(2);
// First connection should be reachable
expect(body.servers[0].connections[0].reachable).toBe(true);
// Second connection should be unreachable
expect(body.servers[0].connections[1].reachable).toBe(false);
});
});
describe('POST /plex/add-server', () => {
it('returns 403 for non-owner users', async () => {
app = await buildTestApp(viewerUser);
const response = await app.inject({
method: 'POST',
url: '/plex/add-server',
payload: {
serverUri: 'http://192.168.1.100:32400',
serverName: 'New Server',
clientIdentifier: 'new-machine-id',
},
});
expect(response.statusCode).toBe(403);
});
it('returns 400 when no Plex servers connected', async () => {
app = await buildTestApp(ownerUser);
// Mock the DB query with limit() returning empty
mockDbSelectLimit([]);
const response = await app.inject({
method: 'POST',
url: '/plex/add-server',
payload: {
serverUri: 'http://192.168.1.100:32400',
serverName: 'New Server',
clientIdentifier: 'new-machine-id',
},
});
expect(response.statusCode).toBe(400);
const body = response.json();
expect(body.message).toContain('No Plex servers connected');
});
it('returns 409 when server is already connected', async () => {
app = await buildTestApp(ownerUser);
// Mock all three limit() calls:
// 1. Get existing Plex server (has token)
// 2. Check machineIdentifier duplicate (found - conflict!)
const selectMock = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn()
.mockResolvedValueOnce([{ token: mockExistingServer.token }]) // First - get token
.mockResolvedValueOnce([{ id: mockExistingServer.id }]) // Second - duplicate found
};
vi.mocked(db.select).mockReturnValue(selectMock as never);
const response = await app.inject({
method: 'POST',
url: '/plex/add-server',
payload: {
serverUri: 'http://192.168.1.100:32400',
serverName: 'New Server',
clientIdentifier: mockExistingServer.machineIdentifier,
},
});
expect(response.statusCode).toBe(409);
const body = response.json();
expect(body.message).toContain('already connected');
});
it('successfully adds a new server', async () => {
app = await buildTestApp(ownerUser);
const newServerId = randomUUID();
const newServer = {
id: newServerId,
name: 'New Server',
type: 'plex',
url: 'http://192.168.1.100:32400',
token: 'encrypted_test-token',
machineIdentifier: 'new-machine-id',
createdAt: new Date(),
updatedAt: new Date(),
};
// Mock all three limit() calls:
// 1. Get existing Plex server (has token)
// 2. Check machineIdentifier duplicate (not found)
// 3. Check URL duplicate (not found)
const selectMock = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn()
.mockResolvedValueOnce([{ token: mockExistingServer.token }]) // First - get token
.mockResolvedValueOnce([]) // Second - no machineIdentifier duplicate
.mockResolvedValueOnce([]) // Third - no URL duplicate
};
vi.mocked(db.select).mockReturnValue(selectMock as never);
// Mock admin verification
vi.mocked(PlexClient.verifyServerAdmin).mockResolvedValue(true);
// Mock insert
mockDbInsert([newServer]);
// Mock sync
vi.mocked(syncServer).mockResolvedValue({ usersAdded: 5, usersUpdated: 0, librariesSynced: 3, errors: [] });
const response = await app.inject({
method: 'POST',
url: '/plex/add-server',
payload: {
serverUri: 'http://192.168.1.100:32400',
serverName: 'New Server',
clientIdentifier: 'new-machine-id',
},
});
expect(response.statusCode).toBe(200);
const body = response.json();
expect(body.server.id).toBe(newServerId);
expect(body.success).toBe(true);
});
it('returns 403 when not admin on server', async () => {
app = await buildTestApp(ownerUser);
// Mock all three limit() calls
const selectMock = {
from: vi.fn().mockReturnThis(),
where: vi.fn().mockReturnThis(),
limit: vi.fn()
.mockResolvedValueOnce([{ token: mockExistingServer.token }]) // Get token
.mockResolvedValueOnce([]) // No machineIdentifier duplicate
.mockResolvedValueOnce([]) // No URL duplicate
};
vi.mocked(db.select).mockReturnValue(selectMock as never);
// Mock admin verification - not admin
vi.mocked(PlexClient.verifyServerAdmin).mockResolvedValue(false);
const response = await app.inject({
method: 'POST',
url: '/plex/add-server',
payload: {
serverUri: 'http://192.168.1.100:32400',
serverName: 'New Server',
clientIdentifier: 'new-machine-id',
},
});
expect(response.statusCode).toBe(403);
const body = response.json();
expect(body.message).toContain('admin');
});
});
});

View File

@@ -0,0 +1,143 @@
/**
* Auth Route Utilities Tests
*
* Tests pure utility functions from routes/auth/utils.ts:
* - generateRefreshToken: Generate random refresh tokens
* - hashRefreshToken: Hash tokens for secure storage
* - generateTempToken: Generate temporary OAuth tokens
*/
import { describe, it, expect } from 'vitest';
import {
generateRefreshToken,
hashRefreshToken,
generateTempToken,
REFRESH_TOKEN_PREFIX,
PLEX_TEMP_TOKEN_PREFIX,
REFRESH_TOKEN_TTL,
PLEX_TEMP_TOKEN_TTL,
} from '../utils.js';
describe('generateRefreshToken', () => {
it('should generate a 64 character hex string', () => {
const token = generateRefreshToken();
expect(token).toHaveLength(64); // 32 bytes = 64 hex chars
expect(token).toMatch(/^[a-f0-9]+$/);
});
it('should generate unique tokens each call', () => {
const token1 = generateRefreshToken();
const token2 = generateRefreshToken();
const token3 = generateRefreshToken();
expect(token1).not.toBe(token2);
expect(token2).not.toBe(token3);
expect(token1).not.toBe(token3);
});
it('should generate cryptographically random tokens', () => {
// Generate many tokens and verify no collisions
const tokens = new Set<string>();
for (let i = 0; i < 100; i++) {
tokens.add(generateRefreshToken());
}
expect(tokens.size).toBe(100);
});
});
describe('hashRefreshToken', () => {
it('should return a 64 character SHA-256 hex hash', () => {
const hash = hashRefreshToken('test-token');
expect(hash).toHaveLength(64);
expect(hash).toMatch(/^[a-f0-9]+$/);
});
it('should produce consistent hashes for the same input', () => {
const token = 'my-refresh-token';
const hash1 = hashRefreshToken(token);
const hash2 = hashRefreshToken(token);
expect(hash1).toBe(hash2);
});
it('should produce different hashes for different inputs', () => {
const hash1 = hashRefreshToken('token-1');
const hash2 = hashRefreshToken('token-2');
expect(hash1).not.toBe(hash2);
});
it('should hash empty string without error', () => {
const hash = hashRefreshToken('');
expect(hash).toHaveLength(64);
// SHA-256 of empty string
expect(hash).toBe('e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855');
});
it('should be one-way (cannot derive original token)', () => {
const token = generateRefreshToken();
const hash = hashRefreshToken(token);
// Hash should not contain the token
expect(hash).not.toContain(token);
// Hash length is different from token length
expect(hash.length).toBe(token.length); // Both 64 but different content
});
});
describe('generateTempToken', () => {
it('should generate a 48 character hex string', () => {
const token = generateTempToken();
expect(token).toHaveLength(48); // 24 bytes = 48 hex chars
expect(token).toMatch(/^[a-f0-9]+$/);
});
it('should generate unique tokens each call', () => {
const token1 = generateTempToken();
const token2 = generateTempToken();
expect(token1).not.toBe(token2);
});
});
describe('Constants', () => {
describe('Redis key prefixes', () => {
it('should have correct REFRESH_TOKEN_PREFIX', () => {
expect(REFRESH_TOKEN_PREFIX).toBe('tracearr:refresh:');
});
it('should have correct PLEX_TEMP_TOKEN_PREFIX', () => {
expect(PLEX_TEMP_TOKEN_PREFIX).toBe('tracearr:plex_temp:');
});
});
describe('TTL values', () => {
it('should have REFRESH_TOKEN_TTL of 30 days in seconds', () => {
expect(REFRESH_TOKEN_TTL).toBe(30 * 24 * 60 * 60);
});
it('should have PLEX_TEMP_TOKEN_TTL of 10 minutes in seconds', () => {
expect(PLEX_TEMP_TOKEN_TTL).toBe(10 * 60);
});
});
});
describe('Integration: Token workflow', () => {
it('should support generate -> hash -> lookup workflow', () => {
// Simulate token creation and storage lookup
const refreshToken = generateRefreshToken();
const storedHash = hashRefreshToken(refreshToken);
// User sends token back, we hash it to look up
const lookupHash = hashRefreshToken(refreshToken);
// Should match for lookup
expect(lookupHash).toBe(storedHash);
});
it('should reject different token in lookup', () => {
const originalToken = generateRefreshToken();
const storedHash = hashRefreshToken(originalToken);
const differentToken = generateRefreshToken();
const lookupHash = hashRefreshToken(differentToken);
expect(lookupHash).not.toBe(storedHash);
});
});

View File

@@ -0,0 +1,105 @@
/**
* Emby Authentication Routes
*
* POST /emby/connect-api-key - Connect an Emby server with API key (requires authentication)
*/
import type { FastifyPluginAsync } from 'fastify';
import { eq, and } from 'drizzle-orm';
import { z } from 'zod';
import { db } from '../../db/client.js';
import { servers } from '../../db/schema.js';
import { EmbyClient } from '../../services/mediaServer/index.js';
// Token encryption removed - tokens now stored in plain text (DB is localhost-only)
import { generateTokens } from './utils.js';
import { syncServer } from '../../services/sync.js';
// Schema for API key connection
const embyConnectApiKeySchema = z.object({
serverUrl: z.url(),
serverName: z.string().min(1).max(100),
apiKey: z.string().min(1),
});
export const embyRoutes: FastifyPluginAsync = async (app) => {
/**
* POST /emby/connect-api-key - Connect an Emby server with API key (requires authentication)
*/
app.post(
'/emby/connect-api-key',
{ preHandler: [app.authenticate] },
async (request, reply) => {
const body = embyConnectApiKeySchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('serverUrl, serverName, and apiKey are required');
}
const authUser = request.user;
// Only owners can add servers
if (authUser.role !== 'owner') {
return reply.forbidden('Only owners can add servers');
}
const { serverUrl, serverName, apiKey } = body.data;
try {
// Verify the API key has admin access
const isAdmin = await EmbyClient.verifyServerAdmin(apiKey, serverUrl);
if (!isAdmin) {
return reply.forbidden('API key does not have administrator access to this Emby server');
}
// Create or update server
let server = await db
.select()
.from(servers)
.where(and(eq(servers.url, serverUrl), eq(servers.type, 'emby')))
.limit(1);
if (server.length === 0) {
const inserted = await db
.insert(servers)
.values({
name: serverName,
type: 'emby',
url: serverUrl,
token: apiKey,
})
.returning();
server = inserted;
} else {
const existingServer = server[0]!;
await db
.update(servers)
.set({
name: serverName,
token: apiKey,
updatedAt: new Date(),
})
.where(eq(servers.id, existingServer.id));
}
const serverId = server[0]!.id;
app.log.info({ userId: authUser.userId, serverId }, 'Emby server connected via API key');
// Auto-sync server users and libraries in background
syncServer(serverId, { syncUsers: true, syncLibraries: true })
.then((result) => {
app.log.info({ serverId, usersAdded: result.usersAdded, librariesSynced: result.librariesSynced }, 'Auto-sync completed for Emby server');
})
.catch((error) => {
app.log.error({ error, serverId }, 'Auto-sync failed for Emby server');
});
// Return updated tokens with new server access
return generateTokens(app, authUser.userId, authUser.username, authUser.role);
} catch (error) {
app.log.error({ error }, 'Emby connect-api-key failed');
return reply.internalServerError('Failed to connect Emby server');
}
}
);
};

View File

@@ -0,0 +1,45 @@
/**
* Authentication Routes Module
*
* Orchestrates all auth-related routes and provides unified export.
*
* Auth Flow Options:
* 1. Local signup: POST /signup → Create account with username/password
* 2. Local login: POST /login (type=local) → Login with username/password
* 3. Plex OAuth: POST /login (type=plex) → Login/signup with Plex
*
* Server Connection (separate from auth):
* - POST /plex/connect → Connect a Plex server after login
* - POST /jellyfin/connect → Connect a Jellyfin server after login
* - POST /emby/connect → Connect an Emby server after login
*
* Session Management:
* - GET /me → Get current user info
* - POST /refresh → Refresh access token
* - POST /logout → Revoke refresh token
*/
import type { FastifyPluginAsync } from 'fastify';
import { localRoutes } from './local.js';
import { plexRoutes } from './plex.js';
import { jellyfinRoutes } from './jellyfin.js';
import { embyRoutes } from './emby.js';
import { sessionRoutes } from './session.js';
export const authRoutes: FastifyPluginAsync = async (app) => {
// Register all sub-route plugins
// Each plugin defines its own paths (no additional prefix needed)
await app.register(localRoutes);
await app.register(plexRoutes);
await app.register(jellyfinRoutes);
await app.register(embyRoutes);
await app.register(sessionRoutes);
};
// Re-export utilities for potential use by other modules
export {
generateTokens,
generateRefreshToken,
hashRefreshToken,
getAllServerIds,
} from './utils.js';

View File

@@ -0,0 +1,190 @@
/**
* Jellyfin Authentication Routes
*
* POST /jellyfin/login - Login with Jellyfin username/password (checks all configured servers)
* POST /jellyfin/connect-api-key - Connect a Jellyfin server with API key (requires authentication)
*/
import type { FastifyPluginAsync } from 'fastify';
import { eq, and } from 'drizzle-orm';
import { z } from 'zod';
import { db } from '../../db/client.js';
import { servers, users } from '../../db/schema.js';
import { JellyfinClient } from '../../services/mediaServer/index.js';
// Token encryption removed - tokens now stored in plain text (DB is localhost-only)
import { generateTokens } from './utils.js';
import { syncServer } from '../../services/sync.js';
import { getUserByUsername, createUser } from '../../services/userService.js';
// Schema for Jellyfin login
const jellyfinLoginSchema = z.object({
username: z.string().min(1),
password: z.string().min(1),
});
// Schema for API key connection
const jellyfinConnectApiKeySchema = z.object({
serverUrl: z.url(),
serverName: z.string().min(1).max(100),
apiKey: z.string().min(1),
});
export const jellyfinRoutes: FastifyPluginAsync = async (app) => {
/**
* POST /jellyfin/login - Login with Jellyfin username/password
*
* Checks all configured Jellyfin servers and authenticates if user is admin on any server.
* Creates a new user with 'admin' role if user doesn't exist.
*/
app.post('/jellyfin/login', async (request, reply) => {
const body = jellyfinLoginSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Username and password are required');
}
const { username, password } = body.data;
try {
// Get all configured Jellyfin servers
const jellyfinServers = await db
.select()
.from(servers)
.where(eq(servers.type, 'jellyfin'));
if (jellyfinServers.length === 0) {
return reply.unauthorized('No Jellyfin servers configured. Please add a server first.');
}
// Try to authenticate with each server
for (const server of jellyfinServers) {
try {
const authResult = await JellyfinClient.authenticate(server.url, username, password);
if (authResult?.isAdmin) {
// User is admin on this server - proceed with login
app.log.info({ username, serverId: server.id }, 'Jellyfin admin authentication successful');
// Check if user already exists
let user = await getUserByUsername(username);
if (!user) {
// Create new user with admin role
user = await createUser({
username,
role: 'admin',
email: undefined, // Jellyfin doesn't expose email in auth response
thumbnail: undefined, // Can be populated later via sync
});
app.log.info({ userId: user.id, username }, 'Created new user from Jellyfin admin login');
} else {
// Update existing user role to admin if not already
if (user.role !== 'admin' && user.role !== 'owner') {
await db
.update(users)
.set({ role: 'admin', updatedAt: new Date() })
.where(eq(users.id, user.id));
user.role = 'admin';
app.log.info({ userId: user.id, username }, 'Updated user role to admin from Jellyfin login');
}
}
// Generate and return tokens
return generateTokens(app, user.id, user.username, user.role);
}
} catch (error) {
// Authentication failed on this server, try next one
app.log.debug({ error, serverId: server.id, username }, 'Jellyfin authentication failed on server');
continue;
}
}
// Authentication failed on all servers or user is not admin
app.log.warn({ username }, 'Jellyfin login failed: invalid credentials or not admin');
return reply.unauthorized('Invalid username or password, or user is not an administrator on any configured Jellyfin server');
} catch (error) {
app.log.error({ error, username }, 'Jellyfin login error');
return reply.internalServerError('Failed to authenticate with Jellyfin servers');
}
});
/**
* POST /jellyfin/connect-api-key - Connect a Jellyfin server with API key (requires authentication)
*/
app.post(
'/jellyfin/connect-api-key',
{ preHandler: [app.authenticate] },
async (request, reply) => {
const body = jellyfinConnectApiKeySchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('serverUrl, serverName, and apiKey are required');
}
const authUser = request.user;
// Only owners can add servers
if (authUser.role !== 'owner') {
return reply.forbidden('Only owners can add servers');
}
const { serverUrl, serverName, apiKey } = body.data;
try {
// Verify the API key has admin access
const isAdmin = await JellyfinClient.verifyServerAdmin(apiKey, serverUrl);
if (!isAdmin) {
return reply.forbidden('API key does not have administrator access to this Jellyfin server');
}
// Create or update server
let server = await db
.select()
.from(servers)
.where(and(eq(servers.url, serverUrl), eq(servers.type, 'jellyfin')))
.limit(1);
if (server.length === 0) {
const inserted = await db
.insert(servers)
.values({
name: serverName,
type: 'jellyfin',
url: serverUrl,
token: apiKey,
})
.returning();
server = inserted;
} else {
const existingServer = server[0]!;
await db
.update(servers)
.set({
name: serverName,
token: apiKey,
updatedAt: new Date(),
})
.where(eq(servers.id, existingServer.id));
}
const serverId = server[0]!.id;
app.log.info({ userId: authUser.userId, serverId }, 'Jellyfin server connected via API key');
// Auto-sync server users and libraries in background
syncServer(serverId, { syncUsers: true, syncLibraries: true })
.then((result) => {
app.log.info({ serverId, usersAdded: result.usersAdded, librariesSynced: result.librariesSynced }, 'Auto-sync completed for Jellyfin server');
})
.catch((error) => {
app.log.error({ error, serverId }, 'Auto-sync failed for Jellyfin server');
});
// Return updated tokens with new server access
return generateTokens(app, authUser.userId, authUser.username, authUser.role);
} catch (error) {
app.log.error({ error }, 'Jellyfin connect-api-key failed');
return reply.internalServerError('Failed to connect Jellyfin server');
}
}
);
};

View File

@@ -0,0 +1,137 @@
/**
* Local Authentication Routes
*
* POST /signup - Create a local account
* POST /login - Login with local credentials or initiate Plex OAuth
*/
import type { FastifyPluginAsync } from 'fastify';
import { eq, and, isNotNull } from 'drizzle-orm';
import { z } from 'zod';
import { db } from '../../db/client.js';
import { users } from '../../db/schema.js';
import { PlexClient } from '../../services/mediaServer/index.js';
import { hashPassword, verifyPassword } from '../../utils/password.js';
import { generateTokens } from './utils.js';
import { getUserByEmail, getOwnerUser } from '../../services/userService.js';
// Schemas
const signupSchema = z.object({
username: z.string().min(3).max(50), // Display name
email: z.email(),
password: z.string().min(8).max(100),
});
const localLoginSchema = z.object({
type: z.literal('local'),
email: z.email(),
password: z.string().min(1),
});
const plexLoginSchema = z.object({
type: z.literal('plex'),
forwardUrl: z.url().optional(),
});
// Note: Jellyfin login is handled at /auth/jellyfin/login, not here
const loginSchema = z.discriminatedUnion('type', [localLoginSchema, plexLoginSchema]);
export const localRoutes: FastifyPluginAsync = async (app) => {
/**
* POST /signup - Create a local account
*/
app.post('/signup', async (request, reply) => {
const body = signupSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Invalid signup data: email, username (3-50 chars), password (8+ chars) required');
}
const { username, email, password } = body.data;
// Check if email already exists
const existing = await getUserByEmail(email);
if (existing) {
return reply.conflict('Email already registered');
}
// Check if this is the first user (will be owner)
const owner = await getOwnerUser();
const isFirstUser = !owner;
// Create user with password hash
// First user becomes owner, subsequent users are viewers
const passwordHashValue = await hashPassword(password);
const role = isFirstUser ? 'owner' : 'viewer';
const [newUser] = await db
.insert(users)
.values({
username,
email,
passwordHash: passwordHashValue,
role,
})
.returning();
if (!newUser) {
return reply.internalServerError('Failed to create user');
}
app.log.info({ userId: newUser.id, role }, 'Local account created');
return generateTokens(app, newUser.id, newUser.username, newUser.role);
});
/**
* POST /login - Login with local credentials or initiate Plex OAuth
*/
app.post('/login', async (request, reply) => {
const body = loginSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Invalid login request');
}
const { type } = body.data;
if (type === 'local') {
const { email, password } = body.data;
// Find user by email with password hash
const userRows = await db
.select()
.from(users)
.where(and(eq(users.email, email), isNotNull(users.passwordHash)))
.limit(1);
const user = userRows[0];
if (!user?.passwordHash) {
return reply.unauthorized('Invalid email or password');
}
// Verify password
const valid = await verifyPassword(password, user.passwordHash);
if (!valid) {
return reply.unauthorized('Invalid email or password');
}
app.log.info({ userId: user.id }, 'Local login successful');
return generateTokens(app, user.id, user.username, user.role);
}
if (type === 'plex') {
// Plex OAuth - initiate flow
try {
const forwardUrl = body.data.forwardUrl;
const { pinId, authUrl } = await PlexClient.initiateOAuth(forwardUrl);
return { pinId, authUrl };
} catch (error) {
app.log.error({ error }, 'Failed to initiate Plex OAuth');
return reply.internalServerError('Failed to initiate Plex authentication');
}
}
// This should not be reached due to discriminated union, but handle gracefully
return reply.badRequest('Invalid login type');
});
};

View File

@@ -0,0 +1,552 @@
/**
* Plex Authentication Routes
*
* POST /plex/check-pin - Check Plex PIN status
* POST /plex/connect - Complete Plex signup and connect a server
* GET /plex/available-servers - Discover available Plex servers for adding
* POST /plex/add-server - Add an additional Plex server
*/
import type { FastifyPluginAsync } from 'fastify';
import { eq, and } from 'drizzle-orm';
import { z } from 'zod';
import type { PlexAvailableServersResponse, PlexDiscoveredServer, PlexDiscoveredConnection } from '@tracearr/shared';
import { db } from '../../db/client.js';
import { servers, users, serverUsers } from '../../db/schema.js';
import { PlexClient } from '../../services/mediaServer/index.js';
// Token encryption removed - tokens now stored in plain text (DB is localhost-only)
import { plexHeaders } from '../../utils/http.js';
import {
generateTokens,
generateTempToken,
PLEX_TEMP_TOKEN_PREFIX,
PLEX_TEMP_TOKEN_TTL,
} from './utils.js';
import { syncServer } from '../../services/sync.js';
import { getUserByPlexAccountId, getOwnerUser, getUserById } from '../../services/userService.js';
// Schemas
const plexCheckPinSchema = z.object({
pinId: z.string(),
});
const plexConnectSchema = z.object({
tempToken: z.string(),
serverUri: z.url(),
serverName: z.string().min(1).max(100),
clientIdentifier: z.string().optional(), // For storing machineIdentifier
});
const plexAddServerSchema = z.object({
serverUri: z.url(),
serverName: z.string().min(1).max(100),
clientIdentifier: z.string().min(1), // Required for dedup
});
// Connection testing timeout in milliseconds
const CONNECTION_TEST_TIMEOUT = 3000;
export const plexRoutes: FastifyPluginAsync = async (app) => {
/**
* POST /plex/check-pin - Check Plex PIN status
*
* Returns:
* - { authorized: false } if PIN not yet claimed
* - { authorized: true, accessToken, refreshToken, user } if user found by plexAccountId
* - { authorized: true, needsServerSelection: true, servers, tempToken } if new Plex user
*/
app.post('/plex/check-pin', async (request, reply) => {
const body = plexCheckPinSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('pinId is required');
}
const { pinId } = body.data;
try {
const authResult = await PlexClient.checkOAuthPin(pinId);
if (!authResult) {
return { authorized: false, message: 'PIN not yet authorized' };
}
// Check if user exists by Plex account ID (global Plex.tv ID)
let existingUser = await getUserByPlexAccountId(authResult.id);
// Fallback: Check by externalId in server_users (server-synced users may have Plex ID there)
if (!existingUser) {
const fallbackServerUsers = await db
.select({ userId: serverUsers.userId })
.from(serverUsers)
.where(eq(serverUsers.externalId, authResult.id))
.limit(1);
if (fallbackServerUsers[0]) {
existingUser = await getUserById(fallbackServerUsers[0].userId);
}
}
if (existingUser) {
// Returning Plex user - update their info and link plex_account_id
const user = existingUser;
await db
.update(users)
.set({
username: authResult.username,
email: authResult.email,
thumbnail: authResult.thumb,
plexAccountId: authResult.id, // Link the Plex account ID
updatedAt: new Date(),
})
.where(eq(users.id, user.id));
app.log.info({ userId: user.id }, 'Returning Plex user login');
return {
authorized: true,
...(await generateTokens(app, user.id, authResult.username, user.role)),
};
}
// New Plex user - check if they own any servers
const plexServers = await PlexClient.getServers(authResult.token);
// Check if this is the first owner
const owner = await getOwnerUser();
const isFirstUser = !owner;
// Store temp token for completing registration
const tempToken = generateTempToken();
await app.redis.setex(
`${PLEX_TEMP_TOKEN_PREFIX}${tempToken}`,
PLEX_TEMP_TOKEN_TTL,
JSON.stringify({
plexAccountId: authResult.id,
plexUsername: authResult.username,
plexEmail: authResult.email,
plexThumb: authResult.thumb,
plexToken: authResult.token,
isFirstUser,
})
);
// If they have servers, let them select one to connect
if (plexServers.length > 0) {
const formattedServers = plexServers.map((s) => ({
name: s.name,
platform: s.platform,
version: s.productVersion,
clientIdentifier: s.clientIdentifier, // For storing machineIdentifier
connections: s.connections.map((c) => ({
uri: c.uri,
local: c.local,
address: c.address,
port: c.port,
})),
}));
return {
authorized: true,
needsServerSelection: true,
servers: formattedServers,
tempToken,
};
}
// No servers - create account without server connection
// First user becomes owner, subsequent users are viewers
const role = isFirstUser ? 'owner' : 'viewer';
const [newUser] = await db
.insert(users)
.values({
username: authResult.username,
email: authResult.email,
thumbnail: authResult.thumb,
plexAccountId: authResult.id,
role,
})
.returning();
if (!newUser) {
return reply.internalServerError('Failed to create user');
}
// Clean up temp token
await app.redis.del(`${PLEX_TEMP_TOKEN_PREFIX}${tempToken}`);
app.log.info({ userId: newUser.id, role }, 'New Plex user created (no servers)');
return {
authorized: true,
...(await generateTokens(app, newUser.id, newUser.username, newUser.role)),
};
} catch (error) {
app.log.error({ error }, 'Plex check-pin failed');
return reply.internalServerError('Failed to check Plex authorization');
}
});
/**
* POST /plex/connect - Complete Plex signup and connect a server
*/
app.post('/plex/connect', async (request, reply) => {
const body = plexConnectSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('tempToken, serverUri, and serverName are required');
}
const { tempToken, serverUri, serverName, clientIdentifier } = body.data;
// Get stored Plex auth from temp token
const stored = await app.redis.get(`${PLEX_TEMP_TOKEN_PREFIX}${tempToken}`);
if (!stored) {
return reply.unauthorized('Invalid or expired temp token. Please restart login.');
}
// Delete temp token (one-time use)
await app.redis.del(`${PLEX_TEMP_TOKEN_PREFIX}${tempToken}`);
const { plexAccountId, plexUsername, plexEmail, plexThumb, plexToken, isFirstUser } = JSON.parse(
stored
) as {
plexAccountId: string;
plexUsername: string;
plexEmail: string;
plexThumb: string;
plexToken: string;
isFirstUser: boolean;
};
try {
// Verify user is admin on the selected server
const isAdmin = await PlexClient.verifyServerAdmin(plexToken, serverUri);
if (!isAdmin) {
return reply.forbidden('You must be an admin on the selected Plex server');
}
// Create or update server
let server = await db
.select()
.from(servers)
.where(and(eq(servers.url, serverUri), eq(servers.type, 'plex')))
.limit(1);
if (server.length === 0) {
const inserted = await db
.insert(servers)
.values({
name: serverName,
type: 'plex',
url: serverUri,
token: plexToken,
machineIdentifier: clientIdentifier,
})
.returning();
server = inserted;
} else {
const existingServer = server[0]!;
await db
.update(servers)
.set({
token: plexToken,
updatedAt: new Date(),
// Update machineIdentifier if not already set
...(clientIdentifier && !existingServer.machineIdentifier
? { machineIdentifier: clientIdentifier }
: {}),
})
.where(eq(servers.id, existingServer.id));
}
const serverId = server[0]!.id;
// Create user identity (no serverId on users table)
// First user becomes owner, subsequent users are viewers
const role = isFirstUser ? 'owner' : 'viewer';
const [newUser] = await db
.insert(users)
.values({
username: plexUsername,
email: plexEmail,
thumbnail: plexThumb,
plexAccountId: plexAccountId,
role,
})
.returning();
if (!newUser) {
return reply.internalServerError('Failed to create user');
}
// Create server_user linking the identity to this server
await db.insert(serverUsers).values({
userId: newUser.id,
serverId,
externalId: plexAccountId,
username: plexUsername,
email: plexEmail,
thumbUrl: plexThumb,
isServerAdmin: true, // They verified as admin
});
app.log.info({ userId: newUser.id, serverId, role }, 'New Plex user with server created');
// Auto-sync server users and libraries in background
syncServer(serverId, { syncUsers: true, syncLibraries: true })
.then((result) => {
app.log.info({ serverId, usersAdded: result.usersAdded, librariesSynced: result.librariesSynced }, 'Auto-sync completed for Plex server');
})
.catch((error) => {
app.log.error({ error, serverId }, 'Auto-sync failed for Plex server');
});
return generateTokens(app, newUser.id, newUser.username, newUser.role);
} catch (error) {
app.log.error({ error }, 'Plex connect failed');
return reply.internalServerError('Failed to connect to Plex server');
}
});
/**
* GET /plex/available-servers - Discover available Plex servers for adding
*
* Requires authentication and owner role.
* Returns list of user's owned Plex servers that aren't already connected,
* with connection testing results.
*/
app.get(
'/plex/available-servers',
{ preHandler: [app.authenticate] },
async (request, reply): Promise<PlexAvailableServersResponse> => {
const authUser = request.user;
// Only owners can add servers
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can add servers');
}
// Get existing Plex servers to find a token
const existingPlexServers = await db
.select({
id: servers.id,
token: servers.token,
machineIdentifier: servers.machineIdentifier,
})
.from(servers)
.where(eq(servers.type, 'plex'));
if (existingPlexServers.length === 0) {
// No Plex servers connected - user needs to link their Plex account
return { servers: [], hasPlexToken: false };
}
// Use the first server's token to query plex.tv
const plexToken = existingPlexServers[0]!.token;
// Get all servers the user owns from plex.tv
let allServers;
try {
allServers = await PlexClient.getServers(plexToken);
} catch (error) {
app.log.error({ error }, 'Failed to fetch servers from plex.tv');
return reply.internalServerError('Failed to fetch servers from Plex');
}
// Get list of already-connected machine identifiers
const connectedMachineIds = new Set(
existingPlexServers
.map((s) => s.machineIdentifier)
.filter((id): id is string => id !== null)
);
// Filter out already-connected servers
const availableServers = allServers.filter(
(s) => !connectedMachineIds.has(s.clientIdentifier)
);
if (availableServers.length === 0) {
return { servers: [], hasPlexToken: true };
}
// Test connections for each server in parallel
const testedServers: PlexDiscoveredServer[] = await Promise.all(
availableServers.map(async (server) => {
// Test all connections in parallel
const connectionResults = await Promise.all(
server.connections.map(async (conn): Promise<PlexDiscoveredConnection> => {
const start = Date.now();
try {
const response = await fetch(`${conn.uri}/`, {
headers: plexHeaders(plexToken),
signal: AbortSignal.timeout(CONNECTION_TEST_TIMEOUT),
});
if (response.ok) {
return {
uri: conn.uri,
local: conn.local,
address: conn.address,
port: conn.port,
reachable: true,
latencyMs: Date.now() - start,
};
}
} catch {
// Connection failed or timed out
}
return {
uri: conn.uri,
local: conn.local,
address: conn.address,
port: conn.port,
reachable: false,
latencyMs: null,
};
})
);
// Sort connections: reachable first, then by local preference, then by latency
const sortedConnections = connectionResults.sort((a, b) => {
// Reachable first
if (a.reachable !== b.reachable) return a.reachable ? -1 : 1;
// Then local preference (local before remote)
if (a.local !== b.local) return a.local ? -1 : 1;
// Then by latency (lower is better)
if (a.latencyMs !== null && b.latencyMs !== null) {
return a.latencyMs - b.latencyMs;
}
return 0;
});
// Pick the best connection as recommended
const recommended = sortedConnections.find((c) => c.reachable);
return {
name: server.name,
platform: server.platform,
version: server.productVersion,
clientIdentifier: server.clientIdentifier,
recommendedUri: recommended?.uri ?? null,
connections: sortedConnections,
};
})
);
return { servers: testedServers, hasPlexToken: true };
}
);
/**
* POST /plex/add-server - Add an additional Plex server
*
* Requires authentication and owner role.
* Uses existing Plex token from another connected server.
*/
app.post(
'/plex/add-server',
{ preHandler: [app.authenticate] },
async (request, reply) => {
const body = plexAddServerSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('serverUri, serverName, and clientIdentifier are required');
}
const { serverUri, serverName, clientIdentifier } = body.data;
const authUser = request.user;
// Only owners can add servers
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can add servers');
}
// Get existing Plex server to retrieve token
const existingPlexServer = await db
.select({ token: servers.token })
.from(servers)
.where(eq(servers.type, 'plex'))
.limit(1);
if (existingPlexServer.length === 0) {
return reply.badRequest('No Plex servers connected. Please link your Plex account first.');
}
const plexToken = existingPlexServer[0]!.token;
// Check if server already exists (by machineIdentifier or URL)
const existing = await db
.select({ id: servers.id })
.from(servers)
.where(
eq(servers.machineIdentifier, clientIdentifier)
)
.limit(1);
if (existing.length > 0) {
return reply.conflict('This server is already connected');
}
// Also check by URL
const existingByUrl = await db
.select({ id: servers.id })
.from(servers)
.where(eq(servers.url, serverUri))
.limit(1);
if (existingByUrl.length > 0) {
return reply.conflict('A server with this URL is already connected');
}
try {
// Verify admin access on the new server
const isAdmin = await PlexClient.verifyServerAdmin(plexToken, serverUri);
if (!isAdmin) {
return reply.forbidden('You must be an admin on the selected Plex server');
}
// Create server record
const [newServer] = await db
.insert(servers)
.values({
name: serverName,
type: 'plex',
url: serverUri,
token: plexToken,
machineIdentifier: clientIdentifier,
})
.returning();
if (!newServer) {
return reply.internalServerError('Failed to create server');
}
app.log.info({ serverId: newServer.id, serverName }, 'Additional Plex server added');
// Auto-sync server users and libraries in background
syncServer(newServer.id, { syncUsers: true, syncLibraries: true })
.then((result) => {
app.log.info(
{ serverId: newServer.id, usersAdded: result.usersAdded, librariesSynced: result.librariesSynced },
'Auto-sync completed for new Plex server'
);
})
.catch((error) => {
app.log.error({ error, serverId: newServer.id }, 'Auto-sync failed for new Plex server');
});
return {
success: true,
server: {
id: newServer.id,
name: newServer.name,
type: newServer.type,
url: newServer.url,
},
};
} catch (error) {
app.log.error({ error }, 'Failed to add Plex server');
return reply.internalServerError('Failed to add Plex server');
}
}
);
};

View File

@@ -0,0 +1,132 @@
/**
* Session Management Routes
*
* POST /refresh - Refresh access token
* POST /logout - Revoke refresh token
* GET /me - Get current user info
*/
import type { FastifyPluginAsync } from 'fastify';
import { z } from 'zod';
import { JWT_CONFIG, canLogin, type AuthUser } from '@tracearr/shared';
import {
generateRefreshToken,
hashRefreshToken,
getAllServerIds,
REFRESH_TOKEN_PREFIX,
REFRESH_TOKEN_TTL,
} from './utils.js';
import { getUserById } from '../../services/userService.js';
// Schema
const refreshSchema = z.object({
refreshToken: z.string(),
});
export const sessionRoutes: FastifyPluginAsync = async (app) => {
/**
* POST /refresh - Refresh access token
*/
app.post('/refresh', async (request, reply) => {
const body = refreshSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Invalid request body');
}
const { refreshToken } = body.data;
const refreshTokenHash = hashRefreshToken(refreshToken);
const stored = await app.redis.get(`${REFRESH_TOKEN_PREFIX}${refreshTokenHash}`);
if (!stored) {
return reply.unauthorized('Invalid or expired refresh token');
}
const { userId } = JSON.parse(stored) as { userId: string; serverIds: string[] };
const user = await getUserById(userId);
if (!user) {
await app.redis.del(`${REFRESH_TOKEN_PREFIX}${refreshTokenHash}`);
return reply.unauthorized('User not found');
}
// Check if user can still log in
if (!canLogin(user.role)) {
await app.redis.del(`${REFRESH_TOKEN_PREFIX}${refreshTokenHash}`);
return reply.unauthorized('Account is not active');
}
// Get fresh server IDs (in case servers were added/removed)
// TODO: Admins should get servers where they're isServerAdmin=true
const serverIds = user.role === 'owner' ? await getAllServerIds() : [];
const accessPayload: AuthUser = {
userId,
username: user.username,
role: user.role,
serverIds,
};
const accessToken = app.jwt.sign(accessPayload, {
expiresIn: JWT_CONFIG.ACCESS_TOKEN_EXPIRY,
});
// Rotate refresh token
const newRefreshToken = generateRefreshToken();
const newRefreshTokenHash = hashRefreshToken(newRefreshToken);
await app.redis.del(`${REFRESH_TOKEN_PREFIX}${refreshTokenHash}`);
await app.redis.setex(
`${REFRESH_TOKEN_PREFIX}${newRefreshTokenHash}`,
REFRESH_TOKEN_TTL,
JSON.stringify({ userId, serverIds })
);
return { accessToken, refreshToken: newRefreshToken };
});
/**
* POST /logout - Revoke refresh token
*/
app.post('/logout', { preHandler: [app.authenticate] }, async (request, reply) => {
const body = refreshSchema.safeParse(request.body);
if (body.success) {
const { refreshToken } = body.data;
await app.redis.del(`${REFRESH_TOKEN_PREFIX}${hashRefreshToken(refreshToken)}`);
}
reply.clearCookie('token');
return { success: true };
});
/**
* GET /me - Get current user info
*/
app.get('/me', { preHandler: [app.authenticate] }, async (request) => {
const authUser = request.user;
const user = await getUserById(authUser.userId);
if (!user) {
// User in JWT doesn't exist in database - token is invalid
throw app.httpErrors.unauthorized('User no longer exists');
}
// Get fresh server IDs
// TODO: Admins should get servers where they're isServerAdmin=true
const serverIds = user.role === 'owner' ? await getAllServerIds() : [];
return {
userId: user.id,
username: user.username,
email: user.email,
thumbnail: user.thumbnail,
role: user.role,
aggregateTrustScore: user.aggregateTrustScore,
serverIds,
hasPassword: !!user.passwordHash,
hasPlexLinked: !!user.plexAccountId,
};
});
};

View File

@@ -0,0 +1,84 @@
/**
* Auth Route Utilities
*
* Shared helpers for authentication routes including token generation,
* hashing, and Redis key management.
*/
import { createHash, randomBytes } from 'crypto';
import type { FastifyInstance } from 'fastify';
import { JWT_CONFIG, type AuthUser, type UserRole } from '@tracearr/shared';
import { db } from '../../db/client.js';
import { servers } from '../../db/schema.js';
// Redis key prefixes
export const REFRESH_TOKEN_PREFIX = 'tracearr:refresh:';
export const PLEX_TEMP_TOKEN_PREFIX = 'tracearr:plex_temp:';
export const REFRESH_TOKEN_TTL = 30 * 24 * 60 * 60; // 30 days
export const PLEX_TEMP_TOKEN_TTL = 10 * 60; // 10 minutes for server selection
/**
* Generate a random refresh token
*/
export function generateRefreshToken(): string {
return randomBytes(32).toString('hex');
}
/**
* Hash a refresh token for secure storage
*/
export function hashRefreshToken(token: string): string {
return createHash('sha256').update(token).digest('hex');
}
/**
* Generate a temporary token for Plex OAuth flow
*/
export function generateTempToken(): string {
return randomBytes(24).toString('hex');
}
/**
* Get all server IDs for owner tokens
*/
export async function getAllServerIds(): Promise<string[]> {
const allServers = await db.select({ id: servers.id }).from(servers);
return allServers.map((s) => s.id);
}
/**
* Generate access and refresh tokens for a user
* Note: Caller must verify canLogin(role) before calling this function
*/
export async function generateTokens(
app: FastifyInstance,
userId: string,
username: string,
role: UserRole
) {
// Owners get access to ALL servers
// TODO: Admins should get servers where they're isServerAdmin=true
const serverIds = role === 'owner' ? await getAllServerIds() : [];
const accessPayload: AuthUser = {
userId,
username,
role,
serverIds,
};
const accessToken = app.jwt.sign(accessPayload, {
expiresIn: JWT_CONFIG.ACCESS_TOKEN_EXPIRY,
});
const refreshToken = generateRefreshToken();
const refreshTokenHash = hashRefreshToken(refreshToken);
await app.redis.setex(
`${REFRESH_TOKEN_PREFIX}${refreshTokenHash}`,
REFRESH_TOKEN_TTL,
JSON.stringify({ userId, serverIds })
);
return { accessToken, refreshToken, user: accessPayload };
}

View File

@@ -0,0 +1,256 @@
/**
* Notification Channel Routing routes - Controls which channels receive which events
*
* Web admin endpoints:
* - GET /settings/notifications/routing - Get all routing configuration
* - PATCH /settings/notifications/routing/:eventType - Update routing for specific event
*/
import type { FastifyPluginAsync } from 'fastify';
import { eq } from 'drizzle-orm';
import { z } from 'zod';
import type { NotificationChannelRouting, NotificationEventType } from '@tracearr/shared';
import { db } from '../db/client.js';
import { notificationChannelRouting, notificationEventTypeEnum } from '../db/schema.js';
// Valid event types for validation
const validEventTypes = notificationEventTypeEnum as readonly string[];
// Update routing schema
const updateRoutingSchema = z.object({
discordEnabled: z.boolean().optional(),
webhookEnabled: z.boolean().optional(),
pushEnabled: z.boolean().optional(),
webToastEnabled: z.boolean().optional(),
});
/**
* Transform DB row to API response
*/
function toApiResponse(
row: typeof notificationChannelRouting.$inferSelect
): NotificationChannelRouting {
return {
id: row.id,
eventType: row.eventType,
discordEnabled: row.discordEnabled,
webhookEnabled: row.webhookEnabled,
pushEnabled: row.pushEnabled,
webToastEnabled: row.webToastEnabled,
createdAt: row.createdAt,
updatedAt: row.updatedAt,
};
}
export const channelRoutingRoutes: FastifyPluginAsync = async (app) => {
/**
* GET /settings/notifications/routing - Get all routing configuration
*
* Requires owner authentication. Returns routing configuration for all event types.
*/
app.get('/routing', { preHandler: [app.authenticate] }, async (request, reply) => {
const authUser = request.user;
// Only owners can view routing settings
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can view notification routing');
}
// Get all routing configuration
const rows = await db.select().from(notificationChannelRouting).orderBy(notificationChannelRouting.eventType);
// If no rows exist (shouldn't happen due to seed), create defaults
if (rows.length === 0) {
const defaultRouting = notificationEventTypeEnum.map((eventType) => ({
eventType,
discordEnabled: !['stream_started', 'stream_stopped', 'trust_score_changed'].includes(eventType),
webhookEnabled: !['stream_started', 'stream_stopped', 'trust_score_changed'].includes(eventType),
pushEnabled: !['stream_started', 'stream_stopped', 'trust_score_changed'].includes(eventType),
webToastEnabled: !['stream_started', 'stream_stopped', 'trust_score_changed'].includes(eventType),
}));
const inserted = await db
.insert(notificationChannelRouting)
.values(defaultRouting)
.returning();
return inserted.map(toApiResponse);
}
return rows.map(toApiResponse);
});
/**
* PATCH /settings/notifications/routing/:eventType - Update routing for specific event
*
* Requires owner authentication. Updates channel routing for a specific event type.
*/
app.patch<{ Params: { eventType: string } }>(
'/routing/:eventType',
{ preHandler: [app.authenticate] },
async (request, reply) => {
const { eventType } = request.params;
// Validate event type
if (!validEventTypes.includes(eventType)) {
return reply.badRequest(`Invalid event type: ${eventType}`);
}
const body = updateRoutingSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Invalid request body');
}
const authUser = request.user;
// Only owners can update routing settings
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can update notification routing');
}
// Find existing routing
const existing = await db
.select()
.from(notificationChannelRouting)
.where(eq(notificationChannelRouting.eventType, eventType as NotificationEventType))
.limit(1);
let routingId: string;
if (existing.length === 0) {
// Create new routing record
const inserted = await db
.insert(notificationChannelRouting)
.values({
eventType: eventType as NotificationEventType,
discordEnabled: body.data.discordEnabled ?? true,
webhookEnabled: body.data.webhookEnabled ?? true,
pushEnabled: body.data.pushEnabled ?? true,
webToastEnabled: body.data.webToastEnabled ?? true,
})
.returning();
if (!inserted[0]) {
return reply.internalServerError('Failed to create routing configuration');
}
routingId = inserted[0].id;
} else {
routingId = existing[0]!.id;
// Build update object
const updateData: Partial<typeof notificationChannelRouting.$inferInsert> = {
updatedAt: new Date(),
};
if (body.data.discordEnabled !== undefined) {
updateData.discordEnabled = body.data.discordEnabled;
}
if (body.data.webhookEnabled !== undefined) {
updateData.webhookEnabled = body.data.webhookEnabled;
}
if (body.data.pushEnabled !== undefined) {
updateData.pushEnabled = body.data.pushEnabled;
}
if (body.data.webToastEnabled !== undefined) {
updateData.webToastEnabled = body.data.webToastEnabled;
}
await db
.update(notificationChannelRouting)
.set(updateData)
.where(eq(notificationChannelRouting.id, routingId));
}
// Return updated routing
const updated = await db
.select()
.from(notificationChannelRouting)
.where(eq(notificationChannelRouting.id, routingId))
.limit(1);
const row = updated[0];
if (!row) {
return reply.internalServerError('Failed to update routing configuration');
}
app.log.info(
{ userId: authUser.userId, eventType },
'Notification channel routing updated'
);
return toApiResponse(row);
}
);
};
/**
* Channel routing for a specific event type (internal use by notification services)
*/
export interface ChannelRoutingConfig {
discordEnabled: boolean;
webhookEnabled: boolean;
pushEnabled: boolean;
webToastEnabled: boolean;
}
/**
* Get channel routing for a specific event type (internal use)
*/
export async function getChannelRouting(
eventType: NotificationEventType
): Promise<ChannelRoutingConfig> {
const row = await db
.select({
discordEnabled: notificationChannelRouting.discordEnabled,
webhookEnabled: notificationChannelRouting.webhookEnabled,
pushEnabled: notificationChannelRouting.pushEnabled,
webToastEnabled: notificationChannelRouting.webToastEnabled,
})
.from(notificationChannelRouting)
.where(eq(notificationChannelRouting.eventType, eventType))
.limit(1);
const routing = row[0];
if (!routing) {
// Return defaults if no routing exists
// Most events default to enabled, except stream started/stopped
const isLowPriorityEvent = ['stream_started', 'stream_stopped', 'trust_score_changed'].includes(eventType);
return {
discordEnabled: !isLowPriorityEvent,
webhookEnabled: !isLowPriorityEvent,
pushEnabled: !isLowPriorityEvent,
webToastEnabled: !isLowPriorityEvent,
};
}
return routing;
}
/**
* Get all channel routing configuration (internal use for caching)
*/
export async function getAllChannelRouting(): Promise<Map<NotificationEventType, ChannelRoutingConfig>> {
const rows = await db
.select({
eventType: notificationChannelRouting.eventType,
discordEnabled: notificationChannelRouting.discordEnabled,
webhookEnabled: notificationChannelRouting.webhookEnabled,
pushEnabled: notificationChannelRouting.pushEnabled,
webToastEnabled: notificationChannelRouting.webToastEnabled,
})
.from(notificationChannelRouting);
const routingMap = new Map<NotificationEventType, ChannelRoutingConfig>();
for (const row of rows) {
routingMap.set(row.eventType, {
discordEnabled: row.discordEnabled,
webhookEnabled: row.webhookEnabled,
pushEnabled: row.pushEnabled,
webToastEnabled: row.webToastEnabled,
});
}
return routingMap;
}

View File

@@ -0,0 +1,275 @@
/**
* Debug Routes Security Tests
*
* Ensures debug routes are properly protected and only accessible by owners.
* These routes can cause significant data loss, so security is critical.
*/
import { describe, it, expect, beforeAll, afterAll, vi } from 'vitest';
import type { FastifyInstance } from 'fastify';
import {
createTestApp,
generateTestToken,
createOwnerPayload,
createViewerPayload,
} from '../test/helpers.js';
import { debugRoutes } from './debug.js';
// Mock the database module
vi.mock('../db/client.js', () => ({
db: {
select: vi.fn().mockReturnValue({
from: vi.fn().mockReturnValue([{ count: 0 }]),
}),
delete: vi.fn().mockReturnValue({
where: vi.fn().mockReturnValue({
returning: vi.fn().mockResolvedValue([]),
}),
returning: vi.fn().mockResolvedValue([]),
}),
update: vi.fn().mockReturnValue({
set: vi.fn().mockReturnValue({
where: vi.fn().mockResolvedValue(undefined),
}),
}),
execute: vi.fn().mockResolvedValue({ rows: [{ size: '10 MB' }] }),
},
}));
vi.mock('../db/schema.js', () => ({
sessions: { id: 'id' },
violations: { id: 'id' },
users: { id: 'id' },
servers: { id: 'id' },
rules: { id: 'id' },
settings: { id: 'id' },
}));
describe('Debug Routes Security', () => {
let app: FastifyInstance;
beforeAll(async () => {
app = await createTestApp();
// Register debug routes
await app.register(debugRoutes, { prefix: '/api/v1/debug' });
await app.ready();
});
afterAll(async () => {
await app.close();
});
// All debug endpoints that need testing
const debugEndpoints = [
{ method: 'GET', url: '/api/v1/debug/stats' },
{ method: 'DELETE', url: '/api/v1/debug/sessions' },
{ method: 'DELETE', url: '/api/v1/debug/violations' },
{ method: 'DELETE', url: '/api/v1/debug/users' },
{ method: 'DELETE', url: '/api/v1/debug/servers' },
{ method: 'DELETE', url: '/api/v1/debug/rules' },
{ method: 'POST', url: '/api/v1/debug/reset' },
{ method: 'POST', url: '/api/v1/debug/refresh-aggregates' },
{ method: 'GET', url: '/api/v1/debug/env' },
];
describe('Unauthenticated Access Prevention', () => {
it.each(debugEndpoints)(
'should reject unauthenticated requests to $method $url',
async ({ method, url }) => {
const res = await app.inject({ method: method as any, url });
expect(res.statusCode).toBe(401);
expect(res.json().message).toContain('Invalid or expired token');
}
);
});
describe('Guest User Access Prevention', () => {
it.each(debugEndpoints)(
'should reject guest users on $method $url',
async ({ method, url }) => {
const guestToken = generateTestToken(app, createViewerPayload());
const res = await app.inject({
method: method as any,
url,
headers: { Authorization: `Bearer ${guestToken}` },
});
expect(res.statusCode).toBe(403);
expect(res.json().message).toContain('Owner access required');
}
);
});
describe('Owner Access Allowed', () => {
it.each(debugEndpoints)(
'should allow owner access to $method $url',
async ({ method, url }) => {
const ownerToken = generateTestToken(app, createOwnerPayload());
const res = await app.inject({
method: method as any,
url,
headers: { Authorization: `Bearer ${ownerToken}` },
});
// Owner should not get 401 or 403
expect(res.statusCode).not.toBe(401);
expect(res.statusCode).not.toBe(403);
// Should get 200 or 500 (500 possible due to mocked DB)
expect([200, 500]).toContain(res.statusCode);
}
);
});
describe('Privilege Escalation Prevention', () => {
it('should not allow role manipulation to access debug routes', async () => {
// Start with a guest token
const guestPayload = createViewerPayload();
const guestToken = generateTestToken(app, guestPayload);
// Try to manipulate the token to have owner role
const parts = guestToken.split('.');
const payload = JSON.parse(Buffer.from(parts[1]!, 'base64url').toString());
payload.role = 'owner'; // Try to escalate
const tamperedPayload = Buffer.from(JSON.stringify(payload)).toString('base64url');
const tamperedToken = `${parts[0]}.${tamperedPayload}.${parts[2]}`;
const res = await app.inject({
method: 'GET',
url: '/api/v1/debug/stats',
headers: { Authorization: `Bearer ${tamperedToken}` },
});
// Should be rejected - either invalid token (401) or still guest (403)
expect([401, 403]).toContain(res.statusCode);
});
it('should not allow adding owner role to token claims', async () => {
// Create a token with an extra claim trying to grant owner
const payload = {
...createViewerPayload(),
isOwner: true, // Extra claim that shouldn't work
admin: true, // Another attempt
};
const token = generateTestToken(app, payload);
const res = await app.inject({
method: 'GET',
url: '/api/v1/debug/stats',
headers: { Authorization: `Bearer ${token}` },
});
// Role is still 'guest', so should be forbidden
expect(res.statusCode).toBe(403);
});
});
describe('Expired Token Handling', () => {
it('should reject expired owner tokens on debug routes', async () => {
const ownerPayload = createOwnerPayload();
// Create a manually crafted expired token (signature will be invalid too)
const validToken = generateTestToken(app, ownerPayload);
const parts = validToken.split('.');
const payload = JSON.parse(Buffer.from(parts[1]!, 'base64url').toString());
payload.exp = Math.floor(Date.now() / 1000) - 3600; // Expired 1 hour ago
const expiredPayload = Buffer.from(JSON.stringify(payload)).toString('base64url');
const expiredToken = `${parts[0]}.${expiredPayload}.${parts[2]}`;
const res = await app.inject({
method: 'GET',
url: '/api/v1/debug/stats',
headers: { Authorization: `Bearer ${expiredToken}` },
});
expect(res.statusCode).toBe(401);
});
});
describe('Invalid Token Formats', () => {
const invalidTokens = [
'',
'invalid',
'not.a.jwt',
'Bearer ',
'eyJhbGciOiJub25lIn0.eyJyb2xlIjoib3duZXIifQ.', // alg:none attack
'null',
'undefined',
'{"role":"owner"}',
];
it.each(invalidTokens)(
'should reject invalid token format: %s',
async (invalidToken) => {
const res = await app.inject({
method: 'GET',
url: '/api/v1/debug/stats',
headers: { Authorization: `Bearer ${invalidToken}` },
});
expect(res.statusCode).toBe(401);
}
);
});
});
describe('Debug Routes - Destructive Operation Safeguards', () => {
let app: FastifyInstance;
let ownerToken: string;
beforeAll(async () => {
app = await createTestApp();
await app.register(debugRoutes, { prefix: '/api/v1/debug' });
await app.ready();
ownerToken = generateTestToken(app, createOwnerPayload());
});
afterAll(async () => {
await app.close();
});
it('should not expose database credentials in /env', async () => {
const res = await app.inject({
method: 'GET',
url: '/api/v1/debug/env',
headers: { Authorization: `Bearer ${ownerToken}` },
});
// Even if there's an error, check the format
if (res.statusCode === 200) {
const body = res.json();
const envString = JSON.stringify(body);
// Should not contain actual secrets
expect(envString).not.toContain('password');
expect(envString).not.toMatch(/postgresql:\/\/[^:]+:[^@]+@/); // DB URL with password
expect(envString).not.toMatch(/redis:\/\/:[^@]+@/); // Redis URL with password
}
});
it('should return structured stats without exposing internals', async () => {
const res = await app.inject({
method: 'GET',
url: '/api/v1/debug/stats',
headers: { Authorization: `Bearer ${ownerToken}` },
});
// Check structure is correct even with mocked data
if (res.statusCode === 200) {
const body = res.json();
// Should have expected structure
expect(body).toHaveProperty('counts');
expect(body).toHaveProperty('database');
// Should not leak internal paths
const bodyString = JSON.stringify(body);
expect(bodyString).not.toContain('/Users/');
expect(bodyString).not.toContain('node_modules');
}
});
});

View File

@@ -0,0 +1,252 @@
/**
* Debug routes - owner only
*
* Hidden utilities for development and troubleshooting.
* All routes require owner authentication.
*/
import type { FastifyPluginAsync } from 'fastify';
import { sql } from 'drizzle-orm';
import { db } from '../db/client.js';
import {
sessions,
violations,
users,
servers,
serverUsers,
rules,
settings,
mobileTokens,
mobileSessions,
notificationPreferences,
notificationChannelRouting,
terminationLogs,
} from '../db/schema.js';
export const debugRoutes: FastifyPluginAsync = async (app) => {
// All debug routes require owner
app.addHook('preHandler', async (request, reply) => {
await app.authenticate(request, reply);
if (request.user?.role !== 'owner') {
return reply.forbidden('Owner access required');
}
});
/**
* GET /debug/stats - Database statistics
*/
app.get('/stats', async () => {
const [
sessionCount,
violationCount,
userCount,
serverCount,
ruleCount,
] = await Promise.all([
db.select({ count: sql<number>`count(*)::int` }).from(sessions),
db.select({ count: sql<number>`count(*)::int` }).from(violations),
db.select({ count: sql<number>`count(*)::int` }).from(users),
db.select({ count: sql<number>`count(*)::int` }).from(servers),
db.select({ count: sql<number>`count(*)::int` }).from(rules),
]);
// Get database size
const dbSize = await db.execute(sql`
SELECT pg_size_pretty(pg_database_size(current_database())) as size
`);
// Get table sizes
const tableSizes = await db.execute(sql`
SELECT
relname as table_name,
pg_size_pretty(pg_total_relation_size(relid)) as total_size
FROM pg_catalog.pg_statio_user_tables
ORDER BY pg_total_relation_size(relid) DESC
LIMIT 10
`);
return {
counts: {
sessions: sessionCount[0]?.count ?? 0,
violations: violationCount[0]?.count ?? 0,
users: userCount[0]?.count ?? 0,
servers: serverCount[0]?.count ?? 0,
rules: ruleCount[0]?.count ?? 0,
},
database: {
size: (dbSize.rows[0] as { size: string })?.size ?? 'unknown',
tables: tableSizes.rows as { table_name: string; total_size: string }[],
},
};
});
/**
* DELETE /debug/sessions - Clear all sessions
*/
app.delete('/sessions', async () => {
// Delete violations first (FK constraint)
const violationsDeleted = await db.delete(violations).returning({ id: violations.id });
const sessionsDeleted = await db.delete(sessions).returning({ id: sessions.id });
return {
success: true,
deleted: {
sessions: sessionsDeleted.length,
violations: violationsDeleted.length,
},
};
});
/**
* DELETE /debug/violations - Clear all violations
*/
app.delete('/violations', async () => {
const deleted = await db.delete(violations).returning({ id: violations.id });
return {
success: true,
deleted: deleted.length,
};
});
/**
* DELETE /debug/users - Clear all non-owner users
*/
app.delete('/users', async () => {
// Delete sessions and violations for non-owner users first
const nonOwnerUsers = await db
.select({ id: users.id })
.from(users)
.where(sql`is_owner = false`);
const userIds = nonOwnerUsers.map((u) => u.id);
if (userIds.length === 0) {
return { success: true, deleted: 0 };
}
// Build explicit PostgreSQL array literal (Drizzle doesn't auto-convert JS arrays for ANY())
const userIdArray = sql.raw(`ARRAY[${userIds.map(id => `'${id}'::uuid`).join(',')}]`);
// Delete violations for these users
await db.delete(violations).where(sql`user_id = ANY(${userIdArray})`);
// Delete sessions for these users
await db.delete(sessions).where(sql`user_id = ANY(${userIdArray})`);
// Delete the users
const deleted = await db
.delete(users)
.where(sql`is_owner = false`)
.returning({ id: users.id });
return {
success: true,
deleted: deleted.length,
};
});
/**
* DELETE /debug/servers - Clear all servers (cascades to users, sessions, violations)
*/
app.delete('/servers', async () => {
const deleted = await db.delete(servers).returning({ id: servers.id });
return {
success: true,
deleted: deleted.length,
};
});
/**
* DELETE /debug/rules - Clear all rules
*/
app.delete('/rules', async () => {
// Delete violations first (FK constraint)
await db.delete(violations);
const deleted = await db.delete(rules).returning({ id: rules.id });
return {
success: true,
deleted: deleted.length,
};
});
/**
* POST /debug/reset - Full factory reset (deletes everything including owner)
*/
app.post('/reset', async () => {
// Delete everything in order respecting FK constraints
// Start with tables that have FK dependencies on other tables
await db.delete(violations);
await db.delete(terminationLogs);
await db.delete(sessions);
await db.delete(rules);
await db.delete(notificationChannelRouting);
await db.delete(notificationPreferences);
await db.delete(mobileSessions);
await db.delete(mobileTokens);
await db.delete(serverUsers);
await db.delete(users);
await db.delete(servers);
// Reset settings to defaults
await db
.update(settings)
.set({
allowGuestAccess: false,
discordWebhookUrl: null,
customWebhookUrl: null,
pollerEnabled: true,
pollerIntervalMs: 15000,
tautulliUrl: null,
tautulliApiKey: null,
})
.where(sql`id = 1`);
return {
success: true,
message: 'Factory reset complete. Please set up Tracearr again.',
};
});
/**
* POST /debug/refresh-aggregates - Refresh TimescaleDB continuous aggregates
*/
app.post('/refresh-aggregates', async () => {
try {
// Refresh all continuous aggregates
await db.execute(sql`
CALL refresh_continuous_aggregate('hourly_stats', NULL, NULL)
`);
await db.execute(sql`
CALL refresh_continuous_aggregate('daily_stats', NULL, NULL)
`);
return { success: true, message: 'Aggregates refreshed' };
} catch {
// Aggregates might not exist yet
return { success: false, message: 'Aggregates not configured or refresh failed' };
}
});
/**
* GET /debug/env - Safe environment info (no secrets)
*/
app.get('/env', async () => {
return {
nodeVersion: process.version,
platform: process.platform,
arch: process.arch,
uptime: Math.round(process.uptime()),
memoryUsage: {
heapUsed: `${Math.round(process.memoryUsage().heapUsed / 1024 / 1024)} MB`,
heapTotal: `${Math.round(process.memoryUsage().heapTotal / 1024 / 1024)} MB`,
rss: `${Math.round(process.memoryUsage().rss / 1024 / 1024)} MB`,
},
env: {
NODE_ENV: process.env.NODE_ENV ?? 'development',
DATABASE_URL: process.env.DATABASE_URL ? '[set]' : '[not set]',
REDIS_URL: process.env.REDIS_URL ? '[set]' : '[not set]',
ENCRYPTION_KEY: process.env.ENCRYPTION_KEY ? '[set]' : '[not set]',
GEOIP_DB_PATH: process.env.GEOIP_DB_PATH ?? '[not set]',
},
};
});
};

View File

@@ -0,0 +1,120 @@
/**
* Image proxy routes
*
* Provides a proxy endpoint for fetching images from Plex/Jellyfin servers.
* This solves CORS issues and allows resizing/caching of images.
*/
import type { FastifyPluginAsync } from 'fastify';
import { z } from 'zod';
import { proxyImage, type FallbackType } from '../services/imageProxy.js';
const proxyQuerySchema = z.object({
server: z.uuid({ error: 'Invalid server ID' }),
url: z.string().min(1, 'Image URL is required'),
width: z.coerce.number().int().min(10).max(2000).optional().default(300),
height: z.coerce.number().int().min(10).max(2000).optional().default(450),
fallback: z.enum(['poster', 'avatar', 'art']).optional().default('poster'),
});
export const imageRoutes: FastifyPluginAsync = async (app) => {
/**
* GET /images/proxy - Proxy an image from a media server
*
* Note: No authentication required - images are public once you have
* a valid server ID. This allows <img> tags to work without auth headers.
* Server ID is validated in proxyImage service.
*
* Query params:
* - server: UUID of the server to fetch from
* - url: The image path (e.g., /library/metadata/123/thumb/456)
* - width: Resize width (default 300)
* - height: Resize height (default 450)
* - fallback: Placeholder type if image fails (poster, avatar, art)
*/
app.get(
'/proxy',
async (request, reply) => {
const parseResult = proxyQuerySchema.safeParse(request.query);
if (!parseResult.success) {
return reply.status(400).send({
error: 'Invalid query parameters',
details: z.treeifyError(parseResult.error),
});
}
const { server, url, width, height, fallback } = parseResult.data;
const result = await proxyImage({
serverId: server,
imagePath: url,
width,
height,
fallback: fallback as FallbackType,
});
// Set cache headers
if (result.cached) {
reply.header('X-Cache', 'HIT');
} else {
reply.header('X-Cache', 'MISS');
}
// Cache for 1 hour in browser, allow CDN caching
reply.header('Cache-Control', 'public, max-age=3600, stale-while-revalidate=86400');
reply.header('Content-Type', result.contentType);
return reply.send(result.data);
}
);
/**
* GET /images/avatar - Get a user avatar (with gravatar fallback)
*
* Note: No authentication required for same reason as /proxy
*
* Query params:
* - server: UUID of the server (optional if using gravatar)
* - url: The avatar path from server (optional)
* - email: Email for gravatar fallback (optional)
* - size: Avatar size (default 100)
*/
app.get(
'/avatar',
async (request, reply) => {
const query = request.query as Record<string, string | undefined>;
const server = query.server;
const url = query.url;
const size = parseInt(query.size ?? '100', 10);
// If we have server URL, try to fetch from media server
if (server && url) {
const result = await proxyImage({
serverId: server,
imagePath: url,
width: size,
height: size,
fallback: 'avatar',
});
reply.header('Cache-Control', 'public, max-age=3600');
reply.header('Content-Type', result.contentType);
return reply.send(result.data);
}
// Return fallback avatar
const result = await proxyImage({
serverId: 'fallback',
imagePath: 'fallback',
width: size,
height: size,
fallback: 'avatar',
});
reply.header('Cache-Control', 'public, max-age=86400');
reply.header('Content-Type', result.contentType);
return reply.send(result.data);
}
);
};

View File

@@ -0,0 +1,214 @@
/**
* Import routes - Data import from external sources
*/
import type { FastifyPluginAsync } from 'fastify';
import { tautulliImportSchema } from '@tracearr/shared';
import { TautulliService } from '../services/tautulli.js';
import { getPubSubService } from '../services/cache.js';
import { syncServer } from '../services/sync.js';
import {
enqueueImport,
getImportStatus,
cancelImport,
getImportQueueStats,
getActiveImportForServer,
} from '../jobs/importQueue.js';
export const importRoutes: FastifyPluginAsync = async (app) => {
/**
* POST /import/tautulli - Start Tautulli import (enqueues job)
*/
app.post(
'/tautulli',
{ preHandler: [app.authenticate] },
async (request, reply) => {
const body = tautulliImportSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Invalid request body: serverId is required');
}
const authUser = request.user;
// Only owners can import data
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can import data');
}
const { serverId } = body.data;
// Sync server users first to ensure we have all users before importing history
try {
app.log.info({ serverId }, 'Syncing server before Tautulli import');
await syncServer(serverId, { syncUsers: true, syncLibraries: false });
app.log.info({ serverId }, 'Server sync completed, enqueueing import');
} catch (error) {
app.log.error({ error, serverId }, 'Failed to sync server before import');
return reply.internalServerError('Failed to sync server users before import');
}
// Enqueue import job
try {
const jobId = await enqueueImport(serverId, authUser.userId);
return {
status: 'queued',
jobId,
message:
'Import queued. Use jobId to track progress via WebSocket or GET /import/tautulli/:jobId',
};
} catch (error) {
if (error instanceof Error && error.message.includes('already in progress')) {
return reply.conflict(error.message);
}
// Fallback to direct execution if queue is not available
app.log.warn({ error }, 'Import queue unavailable, falling back to direct execution');
const pubSubService = getPubSubService();
// Start import in background (non-blocking)
TautulliService.importHistory(serverId, pubSubService ?? undefined)
.then((result) => {
console.log(`[Import] Tautulli import completed:`, result);
})
.catch((err: unknown) => {
console.error(`[Import] Tautulli import failed:`, err);
});
return {
status: 'started',
message: 'Import started (direct execution). Watch for progress updates via WebSocket.',
};
}
}
);
/**
* GET /import/tautulli/active/:serverId - Get active import for a server (if any)
* Use this to recover import status after page refresh
*/
app.get<{ Params: { serverId: string } }>(
'/tautulli/active/:serverId',
{ preHandler: [app.authenticate] },
async (request, _reply) => {
const { serverId } = request.params;
const jobId = await getActiveImportForServer(serverId);
if (!jobId) {
return { active: false };
}
const status = await getImportStatus(jobId);
if (!status) {
return { active: false };
}
return { active: true, ...status };
}
);
/**
* GET /import/tautulli/:jobId - Get import job status
*/
app.get<{ Params: { jobId: string } }>(
'/tautulli/:jobId',
{ preHandler: [app.authenticate] },
async (request, reply) => {
const { jobId } = request.params;
const status = await getImportStatus(jobId);
if (!status) {
return reply.notFound('Import job not found');
}
return status;
}
);
/**
* DELETE /import/tautulli/:jobId - Cancel import job
*/
app.delete<{ Params: { jobId: string } }>(
'/tautulli/:jobId',
{ preHandler: [app.authenticate] },
async (request, reply) => {
const authUser = request.user;
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can cancel imports');
}
const { jobId } = request.params;
const cancelled = await cancelImport(jobId);
if (!cancelled) {
return reply.badRequest('Cannot cancel job (may be active or not found)');
}
return { status: 'cancelled', jobId };
}
);
/**
* GET /import/stats - Get import queue statistics
*/
app.get('/stats', { preHandler: [app.authenticate] }, async (_request, reply) => {
const stats = await getImportQueueStats();
if (!stats) {
return reply.serviceUnavailable('Import queue not available');
}
return stats;
});
/**
* POST /import/tautulli/test - Test Tautulli connection
*/
app.post(
'/tautulli/test',
{ preHandler: [app.authenticate] },
async (request, reply) => {
const authUser = request.user;
// Only owners can test connection
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can test Tautulli connection');
}
const body = request.body as { url?: string; apiKey?: string } | undefined;
if (!body?.url || !body?.apiKey) {
return reply.badRequest('URL and API key are required');
}
try {
const tautulli = new TautulliService(body.url, body.apiKey);
const connected = await tautulli.testConnection();
if (connected) {
// Get user count to verify full access
const users = await tautulli.getUsers();
const { total } = await tautulli.getHistory(0, 1);
return {
success: true,
message: 'Connection successful',
users: users.length,
historyRecords: total,
};
} else {
return {
success: false,
message: 'Connection failed. Please check URL and API key.',
};
}
} catch (error) {
return {
success: false,
message: error instanceof Error ? error.message : 'Connection failed',
};
}
}
);
};

View File

@@ -0,0 +1,919 @@
/**
* Mobile routes - Mobile app pairing, authentication, and session management
*
* Settings endpoints (owner only):
* - GET /mobile - Get mobile config (enabled status, sessions)
* - POST /mobile/enable - Enable mobile access
* - POST /mobile/disable - Disable mobile access
* - POST /mobile/pair-token - Generate one-time pairing token
* - DELETE /mobile/sessions - Revoke all mobile sessions
* - DELETE /mobile/sessions/:id - Revoke single mobile session
*
* Auth endpoints (mobile app):
* - POST /mobile/pair - Exchange pairing token for JWT
* - POST /mobile/refresh - Refresh mobile JWT
* - POST /mobile/push-token - Register push token
*
* Stream management (admin/owner via mobile):
* - POST /mobile/streams/:id/terminate - Terminate a playback session
*/
import type { FastifyPluginAsync } from 'fastify';
import { createHash, randomBytes } from 'crypto';
import { eq, and, gt, isNull, sql } from 'drizzle-orm';
import { z } from 'zod';
import type { MobileConfig, MobileSession, MobilePairResponse, MobilePairTokenResponse } from '@tracearr/shared';
import { REDIS_KEYS, CACHE_TTL, sessionIdParamSchema, terminateSessionBodySchema } from '@tracearr/shared';
import { db } from '../db/client.js';
import { mobileTokens, mobileSessions, servers, users, settings, sessions } from '../db/schema.js';
import { terminateSession } from '../services/termination.js';
import { hasServerAccess } from '../utils/serverFiltering.js';
// Rate limits for mobile auth endpoints
const MOBILE_PAIR_MAX_ATTEMPTS = 5; // 5 attempts per 15 minutes
const MOBILE_REFRESH_MAX_ATTEMPTS = 30; // 30 attempts per 15 minutes
// Beta mode: allows reusable tokens, no expiry, unlimited devices
// Useful for TestFlight/beta testing where you need to share a single token
// Using a function to allow dynamic checking (useful for testing)
function isBetaMode(): boolean {
return process.env.MOBILE_BETA_MODE === 'true';
}
// Limits
const MAX_PAIRED_DEVICES = 5;
const MAX_PENDING_TOKENS = 3;
const TOKEN_EXPIRY_MINUTES = 15;
const BETA_TOKEN_EXPIRY_YEARS = 100; // Effectively never expires
const TOKEN_GEN_RATE_LIMIT = 3; // Max tokens per 5 minutes
const TOKEN_GEN_RATE_WINDOW = 5 * 60; // 5 minutes in seconds
// Token format: trr_mob_<32 random bytes as base64url>
const MOBILE_TOKEN_PREFIX = 'trr_mob_';
// Redis key prefixes for mobile refresh tokens
const MOBILE_REFRESH_PREFIX = 'tracearr:mobile_refresh:';
const MOBILE_REFRESH_TTL = 90 * 24 * 60 * 60; // 90 days
// Mobile JWT expiry (longer than web)
const MOBILE_ACCESS_EXPIRY = '7d';
// Schemas
const mobilePairSchema = z.object({
token: z.string().min(1),
deviceName: z.string().min(1).max(100),
deviceId: z.string().min(1).max(100),
platform: z.enum(['ios', 'android']),
deviceSecret: z.string().min(32).max(64).optional(), // Base64-encoded device secret for push encryption
});
const mobileRefreshSchema = z.object({
refreshToken: z.string().min(1),
});
const pushTokenSchema = z.object({
expoPushToken: z.string().min(1).regex(/^ExponentPushToken\[.+\]$/, 'Invalid Expo push token format'),
deviceSecret: z.string().min(32).max(64).optional(), // Update device secret for push encryption
});
/**
* Generate a new mobile access token
*/
function generateMobileToken(): string {
const randomPart = randomBytes(32).toString('base64url');
return `${MOBILE_TOKEN_PREFIX}${randomPart}`;
}
/**
* Hash a token using SHA-256
*/
function hashToken(token: string): string {
return createHash('sha256').update(token).digest('hex');
}
/**
* Generate a refresh token
*/
function generateRefreshToken(): string {
return randomBytes(32).toString('hex');
}
export const mobileRoutes: FastifyPluginAsync = async (app) => {
// Log beta mode status on startup
if (isBetaMode()) {
app.log.warn('MOBILE_BETA_MODE enabled: tokens are reusable, never expire, unlimited devices allowed');
}
// ============================================
// Settings endpoints (owner only)
// ============================================
/**
* GET /mobile - Get mobile config
*/
app.get('/', { preHandler: [app.authenticate] }, async (request, reply) => {
const authUser = request.user;
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can access mobile settings');
}
// Get mobile enabled status from settings
const settingsRow = await db.select({ mobileEnabled: settings.mobileEnabled }).from(settings).limit(1);
const isEnabled = settingsRow[0]?.mobileEnabled ?? false;
// Get mobile sessions
const sessionsRows = await db.select().from(mobileSessions);
// Count pending tokens (unexpired and unused)
const pendingTokensResult = await db
.select({ count: sql<number>`count(*)::int` })
.from(mobileTokens)
.where(
and(
gt(mobileTokens.expiresAt, new Date()),
isNull(mobileTokens.usedAt)
)
);
const pendingTokens = pendingTokensResult[0]?.count ?? 0;
// Get server name
const serverRow = await db.select({ name: servers.name }).from(servers).limit(1);
const serverName = serverRow[0]?.name || 'Tracearr';
const sessions: MobileSession[] = sessionsRows.map((s) => ({
id: s.id,
deviceName: s.deviceName,
deviceId: s.deviceId,
platform: s.platform,
expoPushToken: s.expoPushToken,
lastSeenAt: s.lastSeenAt,
createdAt: s.createdAt,
}));
const config: MobileConfig = {
isEnabled,
sessions,
serverName,
pendingTokens,
maxDevices: MAX_PAIRED_DEVICES,
};
return config;
});
/**
* POST /mobile/enable - Enable mobile access (no token generated)
*/
app.post('/enable', { preHandler: [app.authenticate] }, async (request, reply) => {
const authUser = request.user;
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can enable mobile access');
}
// Update settings to enable mobile
await db
.update(settings)
.set({ mobileEnabled: true, updatedAt: new Date() })
.where(eq(settings.id, 1));
// Get current state for response
const sessionsRows = await db.select().from(mobileSessions);
const serverRow = await db.select({ name: servers.name }).from(servers).limit(1);
const serverName = serverRow[0]?.name || 'Tracearr';
const sessions: MobileSession[] = sessionsRows.map((s) => ({
id: s.id,
deviceName: s.deviceName,
deviceId: s.deviceId,
platform: s.platform,
expoPushToken: s.expoPushToken,
lastSeenAt: s.lastSeenAt,
createdAt: s.createdAt,
}));
const config: MobileConfig = {
isEnabled: true,
sessions,
serverName,
pendingTokens: 0,
maxDevices: MAX_PAIRED_DEVICES,
};
app.log.info({ userId: authUser.userId }, 'Mobile access enabled');
return config;
});
/**
* POST /mobile/pair-token - Generate a one-time pairing token
*
* Rate limited: 3 tokens per 5 minutes per user
* Max pending tokens: 3
* Max paired devices: 5
*/
app.post('/pair-token', { preHandler: [app.authenticate] }, async (request, reply) => {
const authUser = request.user;
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can generate pairing tokens');
}
// Check if mobile is enabled
const settingsRow = await db.select({ mobileEnabled: settings.mobileEnabled }).from(settings).limit(1);
if (!settingsRow[0]?.mobileEnabled) {
return reply.badRequest('Mobile access is not enabled');
}
// Rate limiting: max 3 tokens per 5 minutes
// Use Lua script for atomic INCR + EXPIRE operation
const rateLimitKey = `mobile_token_gen:${authUser.userId}`;
const luaScript = `
local current = redis.call('INCR', KEYS[1])
if current == 1 then
redis.call('EXPIRE', KEYS[1], ARGV[1])
end
return current
`;
const currentCount = await app.redis.eval(luaScript, 1, rateLimitKey, TOKEN_GEN_RATE_WINDOW) as number;
if (currentCount > TOKEN_GEN_RATE_LIMIT) {
const ttl = await app.redis.ttl(rateLimitKey);
reply.header('Retry-After', String(ttl > 0 ? ttl : TOKEN_GEN_RATE_WINDOW));
return reply.tooManyRequests('Too many token generation attempts. Please try again later.');
}
// Use transaction to prevent race conditions on device and token limit checks
let plainToken: string;
let expiresAt: Date;
try {
const result = await db.transaction(async (tx) => {
// Set serializable isolation level to prevent phantom reads
await tx.execute(sql`SET TRANSACTION ISOLATION LEVEL SERIALIZABLE`);
// Check max pending tokens (within transaction for consistency)
const pendingTokensResult = await tx
.select({ count: sql<number>`count(*)::int` })
.from(mobileTokens)
.where(
and(
gt(mobileTokens.expiresAt, new Date()),
isNull(mobileTokens.usedAt)
)
);
const pendingCount = pendingTokensResult[0]?.count ?? 0;
if (pendingCount >= MAX_PENDING_TOKENS) {
throw new Error('MAX_PENDING_TOKENS');
}
// Check max paired devices (within transaction to prevent race condition)
const sessionsCount = await tx
.select({ count: sql<number>`count(*)::int` })
.from(mobileSessions);
const deviceCount = sessionsCount[0]?.count ?? 0;
// In beta mode, allow unlimited devices
if (!isBetaMode() && deviceCount >= MAX_PAIRED_DEVICES) {
throw new Error('MAX_PAIRED_DEVICES');
}
// Generate token
const token = generateMobileToken();
const tokenHash = hashToken(token);
// In beta mode, tokens effectively never expire
const expiryMs = isBetaMode()
? BETA_TOKEN_EXPIRY_YEARS * 365 * 24 * 60 * 60 * 1000
: TOKEN_EXPIRY_MINUTES * 60 * 1000;
const expires = new Date(Date.now() + expiryMs);
await tx.insert(mobileTokens).values({
tokenHash,
expiresAt: expires,
createdBy: authUser.userId,
});
return { token, expires };
});
plainToken = result.token;
expiresAt = result.expires;
} catch (err) {
const message = err instanceof Error ? err.message : 'Unknown error';
if (message === 'MAX_PENDING_TOKENS') {
return reply.badRequest(
`Maximum of ${MAX_PENDING_TOKENS} pending tokens allowed. Wait for expiry or use an existing token.`
);
}
if (message === 'MAX_PAIRED_DEVICES') {
return reply.badRequest(
`Maximum of ${MAX_PAIRED_DEVICES} devices allowed. Remove a device first.`
);
}
app.log.error({ err }, 'Token generation transaction failed');
return reply.internalServerError('Failed to generate token. Please try again.');
}
app.log.info({ userId: authUser.userId }, 'Mobile pairing token generated');
const response: MobilePairTokenResponse = {
token: plainToken,
expiresAt,
};
return response;
});
/**
* POST /mobile/disable - Disable mobile access
*/
app.post('/disable', { preHandler: [app.authenticate] }, async (request, reply) => {
const authUser = request.user;
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can disable mobile access');
}
// Disable in settings
await db
.update(settings)
.set({ mobileEnabled: false, updatedAt: new Date() })
.where(eq(settings.id, 1));
// Revoke all mobile sessions (delete from DB and Redis)
const sessionsRows = await db.select().from(mobileSessions);
for (const session of sessionsRows) {
await app.redis.del(`${MOBILE_REFRESH_PREFIX}${session.refreshTokenHash}`);
}
await db.delete(mobileSessions);
// Delete all pending tokens
await db.delete(mobileTokens);
app.log.info({ userId: authUser.userId }, 'Mobile access disabled');
return { success: true };
});
/**
* DELETE /mobile/sessions - Revoke all mobile sessions
*/
app.delete('/sessions', { preHandler: [app.authenticate] }, async (request, reply) => {
const authUser = request.user;
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can revoke mobile sessions');
}
// Delete all sessions from Redis and DB
const sessionsRows = await db.select().from(mobileSessions);
for (const session of sessionsRows) {
await app.redis.del(`${MOBILE_REFRESH_PREFIX}${session.refreshTokenHash}`);
}
await db.delete(mobileSessions);
app.log.info({ userId: authUser.userId, count: sessionsRows.length }, 'All mobile sessions revoked');
return { success: true, revokedCount: sessionsRows.length };
});
/**
* DELETE /mobile/sessions/:id - Revoke a single mobile session
*/
app.delete('/sessions/:id', { preHandler: [app.authenticate] }, async (request, reply) => {
const authUser = request.user;
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can revoke mobile sessions');
}
const { id } = request.params as { id: string };
// Validate UUID format
const uuidRegex = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
if (!uuidRegex.test(id)) {
return reply.badRequest('Invalid session ID format');
}
// Find the session
const sessionRow = await db
.select()
.from(mobileSessions)
.where(eq(mobileSessions.id, id))
.limit(1);
if (sessionRow.length === 0) {
return reply.notFound('Mobile session not found');
}
const session = sessionRow[0]!;
// Delete refresh token from Redis
await app.redis.del(`${MOBILE_REFRESH_PREFIX}${session.refreshTokenHash}`);
// Delete session from DB (notification_preferences cascade-deleted via FK)
await db.delete(mobileSessions).where(eq(mobileSessions.id, id));
app.log.info(
{ userId: authUser.userId, sessionId: id, deviceName: session.deviceName },
'Mobile session revoked'
);
return { success: true };
});
// ============================================
// Auth endpoints (mobile app)
// ============================================
/**
* POST /mobile/pair - Exchange pairing token for JWT
*
* Rate limited: 5 attempts per IP per 15 minutes to prevent brute force
*/
app.post('/pair', async (request, reply) => {
// Rate limiting check - use Lua script for atomic INCR + EXPIRE
const clientIp = request.ip;
const rateLimitKey = REDIS_KEYS.RATE_LIMIT_MOBILE_PAIR(clientIp);
const luaScript = `
local current = redis.call('INCR', KEYS[1])
if current == 1 then
redis.call('EXPIRE', KEYS[1], ARGV[1])
end
return current
`;
const currentCount = await app.redis.eval(luaScript, 1, rateLimitKey, CACHE_TTL.RATE_LIMIT) as number;
if (currentCount > MOBILE_PAIR_MAX_ATTEMPTS) {
const ttl = await app.redis.ttl(rateLimitKey);
app.log.warn({ ip: clientIp, count: currentCount }, 'Mobile pair rate limit exceeded');
reply.header('Retry-After', String(ttl > 0 ? ttl : CACHE_TTL.RATE_LIMIT));
return reply.tooManyRequests('Too many pairing attempts. Please try again later.');
}
const body = mobilePairSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Invalid pairing request');
}
const { token, deviceName, deviceId, platform, deviceSecret } = body.data;
// Verify token starts with correct prefix
if (!token.startsWith(MOBILE_TOKEN_PREFIX)) {
return reply.unauthorized('Invalid mobile token');
}
const tokenHash = hashToken(token);
// Check max devices before attempting pair
const sessionsCount = await db
.select({ count: sql<number>`count(*)::int` })
.from(mobileSessions);
const deviceCount = sessionsCount[0]?.count ?? 0;
// Check if this device is already paired (would be an update, not new)
const existingSession = await db
.select()
.from(mobileSessions)
.where(eq(mobileSessions.deviceId, deviceId))
.limit(1);
// In beta mode, allow unlimited devices
if (!isBetaMode() && existingSession.length === 0 && deviceCount >= MAX_PAIRED_DEVICES) {
return reply.badRequest(
`Maximum of ${MAX_PAIRED_DEVICES} devices allowed. Remove a device first.`
);
}
// Use transaction with row-level locking to prevent race conditions
let result: {
accessToken: string;
refreshToken: string;
owner: { id: string; username: string };
serverName: string;
serverId: string;
serverType: 'plex' | 'jellyfin' | 'emby';
serverIds: string[];
oldRefreshTokenHash?: string; // Track old hash for cleanup outside transaction
};
try {
result = await db.transaction(async (tx) => {
// Set serializable isolation level to prevent phantom reads
await tx.execute(sql`SET TRANSACTION ISOLATION LEVEL SERIALIZABLE`);
// Lock and validate token
const tokenRows = await tx
.select()
.from(mobileTokens)
.where(eq(mobileTokens.tokenHash, tokenHash))
.for('update')
.limit(1);
if (tokenRows.length === 0) {
throw new Error('INVALID_TOKEN');
}
const tokenRow = tokenRows[0]!;
// In beta mode, allow tokens to be reused
if (tokenRow.usedAt && !isBetaMode()) {
throw new Error('TOKEN_ALREADY_USED');
}
if (tokenRow.expiresAt < new Date()) {
throw new Error('TOKEN_EXPIRED');
}
// Get the owner user
const ownerRow = await tx
.select()
.from(users)
.where(eq(users.role, 'owner'))
.limit(1);
if (ownerRow.length === 0) {
throw new Error('NO_OWNER');
}
const owner = ownerRow[0]!;
// Get all server IDs for the JWT
const allServers = await tx.select({ id: servers.id, name: servers.name, type: servers.type }).from(servers);
const serverIds = allServers.map((s) => s.id);
// Get primary server info for the response (first server)
const primaryServer = allServers[0];
const serverName = primaryServer?.name || 'Tracearr';
const serverId = primaryServer?.id || '';
const serverType = primaryServer?.type || 'plex';
// Generate refresh token
const newRefreshToken = generateRefreshToken();
const refreshTokenHash = hashToken(newRefreshToken);
// Track old refresh token hash for cleanup (if updating existing session)
let oldHash: string | undefined;
// Create or update session
if (existingSession.length > 0) {
// Update existing session - save old hash for cleanup outside transaction
oldHash = existingSession[0]!.refreshTokenHash;
await tx
.update(mobileSessions)
.set({
refreshTokenHash,
deviceName,
platform,
deviceSecret: deviceSecret ?? null,
lastSeenAt: new Date(),
// Update userId in case the token creator changed
userId: owner.id,
})
.where(eq(mobileSessions.id, existingSession[0]!.id));
} else {
// Create new session - link to the owner user who generated the pairing token
await tx.insert(mobileSessions).values({
refreshTokenHash,
deviceName,
deviceId,
platform,
deviceSecret: deviceSecret ?? null,
userId: owner.id,
});
}
// Mark token as used (not deleted - for audit trail)
// In beta mode, don't mark as used so token can be reused
if (!isBetaMode()) {
await tx
.update(mobileTokens)
.set({ usedAt: new Date() })
.where(eq(mobileTokens.id, tokenRow.id));
}
// Generate access token
const accessToken = app.jwt.sign(
{
userId: owner.id,
username: owner.username,
role: 'owner',
serverIds,
mobile: true,
deviceId,
},
{ expiresIn: MOBILE_ACCESS_EXPIRY }
);
return {
accessToken,
refreshToken: newRefreshToken,
owner: { id: owner.id, username: owner.username },
serverName,
serverId,
serverType,
serverIds,
oldRefreshTokenHash: oldHash,
};
});
} catch (err) {
const message = err instanceof Error ? err.message : 'Unknown error';
if (message === 'INVALID_TOKEN') {
return reply.unauthorized('Invalid mobile token');
}
if (message === 'TOKEN_ALREADY_USED') {
return reply.unauthorized('This pairing token has already been used');
}
if (message === 'TOKEN_EXPIRED') {
return reply.unauthorized('This pairing token has expired');
}
if (message === 'NO_OWNER') {
return reply.internalServerError('No owner account found');
}
app.log.error({ err }, 'Mobile pairing transaction failed');
return reply.internalServerError('Pairing failed. Please try again.');
}
// Redis operations AFTER transaction commits (to prevent inconsistency on rollback)
// Delete old refresh token from Redis if we updated an existing session
if (result.oldRefreshTokenHash) {
await app.redis.del(`${MOBILE_REFRESH_PREFIX}${result.oldRefreshTokenHash}`);
}
// Store new refresh token in Redis
await app.redis.setex(
`${MOBILE_REFRESH_PREFIX}${hashToken(result.refreshToken)}`,
MOBILE_REFRESH_TTL,
JSON.stringify({ userId: result.owner.id, deviceId })
);
app.log.info({ deviceName, platform, deviceId }, 'Mobile device paired');
const response: MobilePairResponse = {
accessToken: result.accessToken,
refreshToken: result.refreshToken,
server: {
id: result.serverId,
name: result.serverName,
type: result.serverType,
},
user: {
userId: result.owner.id,
username: result.owner.username,
role: 'owner',
},
};
return response;
});
/**
* POST /mobile/refresh - Refresh mobile JWT
*
* Rate limited: 30 attempts per IP per 15 minutes to prevent abuse
*/
app.post('/refresh', async (request, reply) => {
// Rate limiting check - use Lua script for atomic INCR + EXPIRE
const clientIp = request.ip;
const rateLimitKey = REDIS_KEYS.RATE_LIMIT_MOBILE_REFRESH(clientIp);
const luaScript = `
local current = redis.call('INCR', KEYS[1])
if current == 1 then
redis.call('EXPIRE', KEYS[1], ARGV[1])
end
return current
`;
const currentCount = await app.redis.eval(luaScript, 1, rateLimitKey, CACHE_TTL.RATE_LIMIT) as number;
if (currentCount > MOBILE_REFRESH_MAX_ATTEMPTS) {
const ttl = await app.redis.ttl(rateLimitKey);
app.log.warn({ ip: clientIp, count: currentCount }, 'Mobile refresh rate limit exceeded');
reply.header('Retry-After', String(ttl > 0 ? ttl : CACHE_TTL.RATE_LIMIT));
return reply.tooManyRequests('Too many refresh attempts. Please try again later.');
}
const body = mobileRefreshSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Invalid refresh request');
}
const { refreshToken } = body.data;
const refreshTokenHash = hashToken(refreshToken);
// Check Redis for valid refresh token
const stored = await app.redis.get(`${MOBILE_REFRESH_PREFIX}${refreshTokenHash}`);
if (!stored) {
return reply.unauthorized('Invalid or expired refresh token');
}
const { userId, deviceId } = JSON.parse(stored) as { userId: string; deviceId: string };
// Verify user still exists and is owner
const userRow = await db.select().from(users).where(eq(users.id, userId)).limit(1);
if (userRow.length === 0 || userRow[0]!.role !== 'owner') {
await app.redis.del(`${MOBILE_REFRESH_PREFIX}${refreshTokenHash}`);
return reply.unauthorized('User no longer valid');
}
const user = userRow[0]!;
// Verify mobile session still exists
const sessionRow = await db
.select()
.from(mobileSessions)
.where(eq(mobileSessions.refreshTokenHash, refreshTokenHash))
.limit(1);
if (sessionRow.length === 0) {
await app.redis.del(`${MOBILE_REFRESH_PREFIX}${refreshTokenHash}`);
return reply.unauthorized('Session has been revoked');
}
// Get all server IDs
const allServers = await db.select({ id: servers.id }).from(servers);
const serverIds = allServers.map((s) => s.id);
// Generate new access token
const accessToken = app.jwt.sign(
{
userId: user.id,
username: user.username,
role: 'owner',
serverIds,
mobile: true,
deviceId, // Device identifier for session targeting
},
{ expiresIn: MOBILE_ACCESS_EXPIRY }
);
// Rotate refresh token
const newRefreshToken = generateRefreshToken();
const newRefreshTokenHash = hashToken(newRefreshToken);
// Update session with new refresh token
await db
.update(mobileSessions)
.set({
refreshTokenHash: newRefreshTokenHash,
lastSeenAt: new Date(),
})
.where(eq(mobileSessions.id, sessionRow[0]!.id));
// Update Redis
await app.redis.del(`${MOBILE_REFRESH_PREFIX}${refreshTokenHash}`);
await app.redis.setex(
`${MOBILE_REFRESH_PREFIX}${newRefreshTokenHash}`,
MOBILE_REFRESH_TTL,
JSON.stringify({ userId, deviceId })
);
return {
accessToken,
refreshToken: newRefreshToken,
};
});
/**
* POST /mobile/push-token - Register/update Expo push token for notifications
*/
app.post('/push-token', { preHandler: [app.requireMobile] }, async (request, reply) => {
const body = pushTokenSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Invalid push token format. Expected ExponentPushToken[...]');
}
const { expoPushToken, deviceSecret } = body.data;
const authUser = request.user;
// Ensure we have deviceId from JWT (required for mobile tokens)
if (!authUser.deviceId) {
return reply.badRequest('Invalid mobile token: missing deviceId. Please re-pair the device.');
}
// Build update object (only include deviceSecret if provided)
const updateData: { expoPushToken: string; lastSeenAt: Date; deviceSecret?: string } = {
expoPushToken,
lastSeenAt: new Date(),
};
if (deviceSecret) {
updateData.deviceSecret = deviceSecret;
}
// Update only the specific device session identified by deviceId
const updated = await db
.update(mobileSessions)
.set(updateData)
.where(eq(mobileSessions.deviceId, authUser.deviceId))
.returning({ id: mobileSessions.id });
if (updated.length === 0) {
return reply.notFound('No mobile session found for this device. Please pair the device first.');
}
app.log.info(
{ userId: authUser.userId, deviceId: authUser.deviceId },
'Push token registered for mobile session'
);
return { success: true, updatedSessions: updated.length };
});
// ============================================================================
// Stream Management Endpoints (admin/owner via mobile)
// ============================================================================
/**
* POST /mobile/streams/:id/terminate - Terminate a playback session
*
* Requires mobile authentication with admin/owner role.
* Sends a stop command to the media server and logs the termination.
*/
app.post(
'/streams/:id/terminate',
{ preHandler: [app.requireMobile] },
async (request, reply) => {
const params = sessionIdParamSchema.safeParse(request.params);
if (!params.success) {
return reply.badRequest('Invalid session ID');
}
const body = terminateSessionBodySchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Invalid request body');
}
const { id } = params.data;
const { reason } = body.data;
const authUser = request.user;
// Only admins and owners can terminate sessions
if (authUser.role !== 'owner' && authUser.role !== 'admin') {
return reply.forbidden('Only administrators can terminate sessions');
}
// Verify the session exists and user has access to its server
const session = await db
.select({
id: sessions.id,
serverId: sessions.serverId,
serverUserId: sessions.serverUserId,
state: sessions.state,
})
.from(sessions)
.where(eq(sessions.id, id))
.limit(1);
const sessionData = session[0];
if (!sessionData) {
return reply.notFound('Session not found');
}
if (!hasServerAccess(authUser, sessionData.serverId)) {
return reply.forbidden('You do not have access to this server');
}
// Check if session is already stopped
if (sessionData.state === 'stopped') {
return reply.conflict('Session has already ended');
}
// Attempt termination
const result = await terminateSession({
sessionId: id,
trigger: 'manual',
triggeredByUserId: authUser.userId,
reason,
});
if (!result.success) {
app.log.error(
{ sessionId: id, error: result.error, terminationLogId: result.terminationLogId },
'Failed to terminate session from mobile'
);
return reply.code(500).send({
success: false,
error: result.error,
terminationLogId: result.terminationLogId,
});
}
app.log.info(
{ sessionId: id, userId: authUser.userId, deviceId: authUser.deviceId },
'Session terminated from mobile app'
);
return {
success: true,
terminationLogId: result.terminationLogId,
message: 'Stream termination command sent successfully',
};
}
);
};

View File

@@ -0,0 +1,328 @@
/**
* Notification Preferences routes - Per-device notification configuration
*
* Mobile device endpoints:
* - GET /notifications/preferences - Get preferences for current device
* - PATCH /notifications/preferences - Update preferences for current device
*/
import type { FastifyPluginAsync } from 'fastify';
import { eq, desc } from 'drizzle-orm';
import { z } from 'zod';
import type { NotificationPreferences, NotificationPreferencesWithStatus } from '@tracearr/shared';
import { db } from '../db/client.js';
import { mobileSessions, notificationPreferences } from '../db/schema.js';
import { getPushRateLimiter } from '../services/pushRateLimiter.js';
// Update preferences schema
const updatePreferencesSchema = z.object({
pushEnabled: z.boolean().optional(),
onViolationDetected: z.boolean().optional(),
onStreamStarted: z.boolean().optional(),
onStreamStopped: z.boolean().optional(),
onConcurrentStreams: z.boolean().optional(),
onNewDevice: z.boolean().optional(),
onTrustScoreChanged: z.boolean().optional(),
onServerDown: z.boolean().optional(),
onServerUp: z.boolean().optional(),
violationMinSeverity: z.number().int().min(1).max(3).optional(),
violationRuleTypes: z.array(z.string()).optional(),
maxPerMinute: z.number().int().min(1).max(60).optional(),
maxPerHour: z.number().int().min(1).max(1000).optional(),
quietHoursEnabled: z.boolean().optional(),
quietHoursStart: z.string().regex(/^\d{2}:\d{2}$/).optional().nullable(),
quietHoursEnd: z.string().regex(/^\d{2}:\d{2}$/).optional().nullable(),
quietHoursTimezone: z.string().max(50).optional(),
quietHoursOverrideCritical: z.boolean().optional(),
});
/**
* Find mobile session by deviceId from JWT claims
* Mobile JWTs include deviceId for targeting the correct device session
*/
async function findMobileSessionByDeviceId(deviceId: string): Promise<{ id: string } | null> {
const session = await db
.select({ id: mobileSessions.id })
.from(mobileSessions)
.where(eq(mobileSessions.deviceId, deviceId))
.limit(1);
return session[0] ?? null;
}
/**
* Find mobile session for user (fallback for legacy tokens without deviceId)
* Gets the most recently active session for the owner
*/
async function findMobileSessionForUserFallback(_userId: string): Promise<{ id: string } | null> {
// Get the most recently active session (desc ordering)
const session = await db
.select({ id: mobileSessions.id })
.from(mobileSessions)
.orderBy(desc(mobileSessions.lastSeenAt))
.limit(1);
return session[0] ?? null;
}
/**
* Transform DB row to API response
*/
function toApiResponse(row: typeof notificationPreferences.$inferSelect): NotificationPreferences {
return {
id: row.id,
mobileSessionId: row.mobileSessionId,
pushEnabled: row.pushEnabled,
onViolationDetected: row.onViolationDetected,
onStreamStarted: row.onStreamStarted,
onStreamStopped: row.onStreamStopped,
onConcurrentStreams: row.onConcurrentStreams,
onNewDevice: row.onNewDevice,
onTrustScoreChanged: row.onTrustScoreChanged,
onServerDown: row.onServerDown,
onServerUp: row.onServerUp,
violationMinSeverity: row.violationMinSeverity,
violationRuleTypes: row.violationRuleTypes ?? [],
maxPerMinute: row.maxPerMinute,
maxPerHour: row.maxPerHour,
quietHoursEnabled: row.quietHoursEnabled,
quietHoursStart: row.quietHoursStart,
quietHoursEnd: row.quietHoursEnd,
quietHoursTimezone: row.quietHoursTimezone ?? 'UTC',
quietHoursOverrideCritical: row.quietHoursOverrideCritical,
createdAt: row.createdAt,
updatedAt: row.updatedAt,
};
}
export const notificationPreferencesRoutes: FastifyPluginAsync = async (app) => {
/**
* GET /notifications/preferences - Get preferences for current device
*
* Requires mobile authentication. Returns preferences for the device's session,
* or creates default preferences if none exist.
*/
app.get('/preferences', { preHandler: [app.requireMobile] }, async (request, reply) => {
const authUser = request.user;
// Find mobile session using deviceId (preferred) or fallback to user lookup
const mobileSession = authUser.deviceId
? await findMobileSessionByDeviceId(authUser.deviceId)
: await findMobileSessionForUserFallback(authUser.userId);
if (!mobileSession) {
return reply.notFound('No mobile session found. Please pair the device first.');
}
// Get or create preferences
let prefsRow = await db
.select()
.from(notificationPreferences)
.where(eq(notificationPreferences.mobileSessionId, mobileSession.id))
.limit(1);
if (prefsRow.length === 0) {
// Create default preferences
const inserted = await db
.insert(notificationPreferences)
.values({
mobileSessionId: mobileSession.id,
})
.returning();
prefsRow = inserted;
}
const row = prefsRow[0];
if (!row) {
return reply.internalServerError('Failed to load notification preferences');
}
// Get live rate limit status from Redis
const prefs = toApiResponse(row);
const rateLimiter = getPushRateLimiter();
if (rateLimiter) {
const status = await rateLimiter.getStatus(mobileSession.id, {
maxPerMinute: prefs.maxPerMinute,
maxPerHour: prefs.maxPerHour,
});
const response: NotificationPreferencesWithStatus = {
...prefs,
rateLimitStatus: {
remainingMinute: status.remainingMinute,
remainingHour: status.remainingHour,
resetMinuteIn: status.resetMinuteIn,
resetHourIn: status.resetHourIn,
},
};
return response;
}
return prefs;
});
/**
* PATCH /notifications/preferences - Update preferences for current device
*
* Requires mobile authentication. Updates notification preferences for the
* device's session.
*/
app.patch('/preferences', { preHandler: [app.requireMobile] }, async (request, reply) => {
const body = updatePreferencesSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Invalid request body');
}
const authUser = request.user;
// Find mobile session using deviceId (preferred) or fallback to user lookup
const mobileSession = authUser.deviceId
? await findMobileSessionByDeviceId(authUser.deviceId)
: await findMobileSessionForUserFallback(authUser.userId);
if (!mobileSession) {
return reply.notFound('No mobile session found. Please pair the device first.');
}
// Ensure preferences row exists
let existing = await db
.select()
.from(notificationPreferences)
.where(eq(notificationPreferences.mobileSessionId, mobileSession.id))
.limit(1);
if (existing.length === 0) {
// Create with defaults first
const inserted = await db
.insert(notificationPreferences)
.values({
mobileSessionId: mobileSession.id,
})
.returning();
existing = inserted;
}
const prefsId = existing[0]!.id;
// Build update object
const updateData: Partial<typeof notificationPreferences.$inferInsert> = {
updatedAt: new Date(),
};
if (body.data.pushEnabled !== undefined) {
updateData.pushEnabled = body.data.pushEnabled;
}
if (body.data.onViolationDetected !== undefined) {
updateData.onViolationDetected = body.data.onViolationDetected;
}
if (body.data.onStreamStarted !== undefined) {
updateData.onStreamStarted = body.data.onStreamStarted;
}
if (body.data.onStreamStopped !== undefined) {
updateData.onStreamStopped = body.data.onStreamStopped;
}
if (body.data.onConcurrentStreams !== undefined) {
updateData.onConcurrentStreams = body.data.onConcurrentStreams;
}
if (body.data.onNewDevice !== undefined) {
updateData.onNewDevice = body.data.onNewDevice;
}
if (body.data.onTrustScoreChanged !== undefined) {
updateData.onTrustScoreChanged = body.data.onTrustScoreChanged;
}
if (body.data.onServerDown !== undefined) {
updateData.onServerDown = body.data.onServerDown;
}
if (body.data.onServerUp !== undefined) {
updateData.onServerUp = body.data.onServerUp;
}
if (body.data.violationMinSeverity !== undefined) {
updateData.violationMinSeverity = body.data.violationMinSeverity;
}
if (body.data.violationRuleTypes !== undefined) {
updateData.violationRuleTypes = body.data.violationRuleTypes;
}
if (body.data.maxPerMinute !== undefined) {
updateData.maxPerMinute = body.data.maxPerMinute;
}
if (body.data.maxPerHour !== undefined) {
updateData.maxPerHour = body.data.maxPerHour;
}
if (body.data.quietHoursEnabled !== undefined) {
updateData.quietHoursEnabled = body.data.quietHoursEnabled;
}
if (body.data.quietHoursStart !== undefined) {
updateData.quietHoursStart = body.data.quietHoursStart;
}
if (body.data.quietHoursEnd !== undefined) {
updateData.quietHoursEnd = body.data.quietHoursEnd;
}
if (body.data.quietHoursTimezone !== undefined) {
updateData.quietHoursTimezone = body.data.quietHoursTimezone;
}
if (body.data.quietHoursOverrideCritical !== undefined) {
updateData.quietHoursOverrideCritical = body.data.quietHoursOverrideCritical;
}
// Update preferences
await db
.update(notificationPreferences)
.set(updateData)
.where(eq(notificationPreferences.id, prefsId));
// Return updated preferences
const updated = await db
.select()
.from(notificationPreferences)
.where(eq(notificationPreferences.id, prefsId))
.limit(1);
const row = updated[0];
if (!row) {
return reply.internalServerError('Failed to update notification preferences');
}
app.log.info(
{ userId: authUser.userId, mobileSessionId: mobileSession.id },
'Notification preferences updated'
);
return toApiResponse(row);
});
};
/**
* Get notification preferences for a specific mobile session (internal use)
*/
export async function getPreferencesForSession(
mobileSessionId: string
): Promise<typeof notificationPreferences.$inferSelect | null> {
const prefs = await db
.select()
.from(notificationPreferences)
.where(eq(notificationPreferences.mobileSessionId, mobileSessionId))
.limit(1);
return prefs[0] ?? null;
}
/**
* Get notification preferences for a push token (internal use by push service)
*/
export async function getPreferencesForPushToken(
expoPushToken: string
): Promise<typeof notificationPreferences.$inferSelect | null> {
// Find the mobile session with this push token
const session = await db
.select({ id: mobileSessions.id })
.from(mobileSessions)
.where(eq(mobileSessions.expoPushToken, expoPushToken))
.limit(1);
if (session.length === 0 || !session[0]) {
return null;
}
return getPreferencesForSession(session[0].id);
}

View File

@@ -0,0 +1,320 @@
/**
* Rule management routes - CRUD for sharing detection rules
*/
import type { FastifyPluginAsync } from 'fastify';
import { eq, sql } from 'drizzle-orm';
import {
createRuleSchema,
updateRuleSchema,
ruleIdParamSchema,
} from '@tracearr/shared';
import { db } from '../db/client.js';
import { rules, serverUsers, violations, servers } from '../db/schema.js';
import { hasServerAccess } from '../utils/serverFiltering.js';
export const ruleRoutes: FastifyPluginAsync = async (app) => {
/**
* GET /rules - List all rules
*
* Rules can be:
* - Global (serverUserId = null) - applies to all servers, visible to all
* - User-specific (serverUserId set) - only visible if user has access to that server
*/
app.get(
'/',
{ preHandler: [app.authenticate] },
async (request) => {
const authUser = request.user;
// Get all rules with server user and server information
const ruleList = await db
.select({
id: rules.id,
name: rules.name,
type: rules.type,
params: rules.params,
serverUserId: rules.serverUserId,
username: serverUsers.username,
serverId: serverUsers.serverId,
serverName: servers.name,
isActive: rules.isActive,
createdAt: rules.createdAt,
updatedAt: rules.updatedAt,
})
.from(rules)
.leftJoin(serverUsers, eq(rules.serverUserId, serverUsers.id))
.leftJoin(servers, eq(serverUsers.serverId, servers.id))
.orderBy(rules.name);
// Filter rules by server access
// Global rules (serverUserId = null) are visible to all
// User-specific rules require server access
const filteredRules = ruleList.filter((rule) => {
// Global rule - visible to everyone
if (!rule.serverUserId) return true;
// User-specific rule - check server access
if (!rule.serverId) return false; // Shouldn't happen, but defensive
return hasServerAccess(authUser, rule.serverId);
});
return { data: filteredRules };
}
);
/**
* POST /rules - Create a new rule
*/
app.post(
'/',
{ preHandler: [app.authenticate] },
async (request, reply) => {
const body = createRuleSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Invalid request body');
}
const authUser = request.user;
// Only owners can create rules
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can create rules');
}
const { name, type, params, serverUserId, isActive } = body.data;
// Verify serverUserId exists and user has access if provided
if (serverUserId) {
const serverUserRows = await db
.select({
id: serverUsers.id,
serverId: serverUsers.serverId,
})
.from(serverUsers)
.where(eq(serverUsers.id, serverUserId))
.limit(1);
const serverUser = serverUserRows[0];
if (!serverUser) {
return reply.notFound('Server user not found');
}
// Verify owner has access to this server
if (!hasServerAccess(authUser, serverUser.serverId)) {
return reply.forbidden('You do not have access to this server');
}
}
// Create rule
const inserted = await db
.insert(rules)
.values({
name,
type,
params,
serverUserId,
isActive,
})
.returning();
const rule = inserted[0];
if (!rule) {
return reply.internalServerError('Failed to create rule');
}
return reply.status(201).send(rule);
}
);
/**
* GET /rules/:id - Get a specific rule
*/
app.get(
'/:id',
{ preHandler: [app.authenticate] },
async (request, reply) => {
const params = ruleIdParamSchema.safeParse(request.params);
if (!params.success) {
return reply.badRequest('Invalid rule ID');
}
const { id } = params.data;
const authUser = request.user;
const ruleRows = await db
.select({
id: rules.id,
name: rules.name,
type: rules.type,
params: rules.params,
serverUserId: rules.serverUserId,
username: serverUsers.username,
serverId: serverUsers.serverId,
serverName: servers.name,
isActive: rules.isActive,
createdAt: rules.createdAt,
updatedAt: rules.updatedAt,
})
.from(rules)
.leftJoin(serverUsers, eq(rules.serverUserId, serverUsers.id))
.leftJoin(servers, eq(serverUsers.serverId, servers.id))
.where(eq(rules.id, id))
.limit(1);
const rule = ruleRows[0];
if (!rule) {
return reply.notFound('Rule not found');
}
// Check access for user-specific rules
if (rule.serverUserId && rule.serverId && !hasServerAccess(authUser, rule.serverId)) {
return reply.forbidden('You do not have access to this rule');
}
// Get violation count for this rule
const violationCount = await db
.select({ count: sql<number>`count(*)::int` })
.from(violations)
.where(eq(violations.ruleId, id));
return {
...rule,
violationCount: violationCount[0]?.count ?? 0,
};
}
);
/**
* PATCH /rules/:id - Update a rule
*/
app.patch(
'/:id',
{ preHandler: [app.authenticate] },
async (request, reply) => {
const params = ruleIdParamSchema.safeParse(request.params);
if (!params.success) {
return reply.badRequest('Invalid rule ID');
}
const body = updateRuleSchema.safeParse(request.body);
if (!body.success) {
return reply.badRequest('Invalid request body');
}
const { id } = params.data;
const authUser = request.user;
// Only owners can update rules
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can update rules');
}
// Check rule exists and get server info
const ruleRows = await db
.select({
id: rules.id,
serverUserId: rules.serverUserId,
serverId: serverUsers.serverId,
})
.from(rules)
.leftJoin(serverUsers, eq(rules.serverUserId, serverUsers.id))
.where(eq(rules.id, id))
.limit(1);
const existingRule = ruleRows[0];
if (!existingRule) {
return reply.notFound('Rule not found');
}
// Check access for user-specific rules
if (existingRule.serverUserId && existingRule.serverId && !hasServerAccess(authUser, existingRule.serverId)) {
return reply.forbidden('You do not have access to this rule');
}
// Build update object
const updateData: Partial<{
name: string;
params: Record<string, unknown>;
isActive: boolean;
updatedAt: Date;
}> = {
updatedAt: new Date(),
};
if (body.data.name !== undefined) {
updateData.name = body.data.name;
}
if (body.data.params !== undefined) {
updateData.params = body.data.params;
}
if (body.data.isActive !== undefined) {
updateData.isActive = body.data.isActive;
}
// Update rule
const updated = await db
.update(rules)
.set(updateData)
.where(eq(rules.id, id))
.returning();
const updatedRule = updated[0];
if (!updatedRule) {
return reply.internalServerError('Failed to update rule');
}
return updatedRule;
}
);
/**
* DELETE /rules/:id - Delete a rule
*/
app.delete(
'/:id',
{ preHandler: [app.authenticate] },
async (request, reply) => {
const params = ruleIdParamSchema.safeParse(request.params);
if (!params.success) {
return reply.badRequest('Invalid rule ID');
}
const { id } = params.data;
const authUser = request.user;
// Only owners can delete rules
if (authUser.role !== 'owner') {
return reply.forbidden('Only server owners can delete rules');
}
// Check rule exists and get server info
const ruleRows = await db
.select({
id: rules.id,
serverUserId: rules.serverUserId,
serverId: serverUsers.serverId,
})
.from(rules)
.leftJoin(serverUsers, eq(rules.serverUserId, serverUsers.id))
.where(eq(rules.id, id))
.limit(1);
const existingRule = ruleRows[0];
if (!existingRule) {
return reply.notFound('Rule not found');
}
// Check access for user-specific rules
if (existingRule.serverUserId && existingRule.serverId && !hasServerAccess(authUser, existingRule.serverId)) {
return reply.forbidden('You do not have access to this rule');
}
// Delete rule (cascade will handle violations)
await db.delete(rules).where(eq(rules.id, id));
return { success: true };
}
);
};

Some files were not shown because too many files have changed in this diff Show More