Initial Upload
Some checks failed
CI / Lint & Typecheck (push) Has been cancelled
CI / Test (routes) (push) Has been cancelled
CI / Test (security) (push) Has been cancelled
CI / Test (services) (push) Has been cancelled
CI / Test (unit) (push) Has been cancelled
CI / Test (integration) (push) Has been cancelled
CI / Test Coverage (push) Has been cancelled
CI / Build (push) Has been cancelled

This commit is contained in:
2025-12-17 12:32:50 +13:00
commit 3015f48118
471 changed files with 141143 additions and 0 deletions

View File

@@ -0,0 +1,50 @@
/**
* Database client and connection pool
*/
import { drizzle } from 'drizzle-orm/node-postgres';
import { migrate } from 'drizzle-orm/node-postgres/migrator';
import pg from 'pg';
import * as schema from './schema.js';
const { Pool } = pg;
if (!process.env.DATABASE_URL) {
throw new Error('DATABASE_URL environment variable is required');
}
const pool = new Pool({
connectionString: process.env.DATABASE_URL,
max: 20, // Maximum connections
idleTimeoutMillis: 20000, // Close idle connections after 20s
connectionTimeoutMillis: 10000, // Connection timeout (increased for complex queries)
maxUses: 7500, // Max queries per connection before refresh (prevents memory leaks)
allowExitOnIdle: false, // Keep pool alive during idle periods
});
// Log pool errors for debugging
pool.on('error', (err) => {
console.error('[DB Pool Error]', err.message);
});
export const db = drizzle(pool, { schema });
export async function closeDatabase(): Promise<void> {
await pool.end();
}
export async function checkDatabaseConnection(): Promise<boolean> {
try {
const client = await pool.connect();
await client.query('SELECT 1');
client.release();
return true;
} catch (error) {
console.error('Database connection check failed:', error);
return false;
}
}
export async function runMigrations(migrationsFolder: string): Promise<void> {
await migrate(db, { migrationsFolder });
}

View File

@@ -0,0 +1,154 @@
CREATE TABLE "rules" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"name" varchar(100) NOT NULL,
"type" varchar(50) NOT NULL,
"params" jsonb NOT NULL,
"server_user_id" uuid,
"is_active" boolean DEFAULT true NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE TABLE "server_users" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"user_id" uuid NOT NULL,
"server_id" uuid NOT NULL,
"external_id" varchar(255) NOT NULL,
"username" varchar(255) NOT NULL,
"email" varchar(255),
"thumb_url" text,
"is_server_admin" boolean DEFAULT false NOT NULL,
"trust_score" integer DEFAULT 100 NOT NULL,
"session_count" integer DEFAULT 0 NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE TABLE "servers" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"name" varchar(100) NOT NULL,
"type" varchar(20) NOT NULL,
"url" text NOT NULL,
"token" text NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE TABLE "sessions" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"server_id" uuid NOT NULL,
"server_user_id" uuid NOT NULL,
"session_key" varchar(255) NOT NULL,
"state" varchar(20) NOT NULL,
"media_type" varchar(20) NOT NULL,
"media_title" text NOT NULL,
"grandparent_title" varchar(500),
"season_number" integer,
"episode_number" integer,
"year" integer,
"thumb_path" varchar(500),
"rating_key" varchar(255),
"external_session_id" varchar(255),
"started_at" timestamp with time zone DEFAULT now() NOT NULL,
"stopped_at" timestamp with time zone,
"duration_ms" integer,
"total_duration_ms" integer,
"progress_ms" integer,
"last_paused_at" timestamp with time zone,
"paused_duration_ms" integer DEFAULT 0 NOT NULL,
"reference_id" uuid,
"watched" boolean DEFAULT false NOT NULL,
"ip_address" varchar(45) NOT NULL,
"geo_city" varchar(255),
"geo_region" varchar(255),
"geo_country" varchar(100),
"geo_lat" real,
"geo_lon" real,
"player_name" varchar(255),
"device_id" varchar(255),
"product" varchar(255),
"device" varchar(255),
"platform" varchar(100),
"quality" varchar(100),
"is_transcode" boolean DEFAULT false NOT NULL,
"bitrate" integer
);
--> statement-breakpoint
CREATE TABLE "settings" (
"id" integer PRIMARY KEY DEFAULT 1 NOT NULL,
"allow_guest_access" boolean DEFAULT false NOT NULL,
"discord_webhook_url" text,
"custom_webhook_url" text,
"notify_on_violation" boolean DEFAULT true NOT NULL,
"notify_on_session_start" boolean DEFAULT false NOT NULL,
"notify_on_session_stop" boolean DEFAULT false NOT NULL,
"notify_on_server_down" boolean DEFAULT true NOT NULL,
"poller_enabled" boolean DEFAULT true NOT NULL,
"poller_interval_ms" integer DEFAULT 15000 NOT NULL,
"tautulli_url" text,
"tautulli_api_key" text,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE TABLE "users" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"username" varchar(100) NOT NULL,
"name" varchar(255),
"thumbnail" text,
"email" varchar(255),
"password_hash" text,
"plex_account_id" varchar(255),
"role" varchar(20) DEFAULT 'member' NOT NULL,
"aggregate_trust_score" integer DEFAULT 100 NOT NULL,
"total_violations" integer DEFAULT 0 NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
CREATE TABLE "violations" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"rule_id" uuid NOT NULL,
"server_user_id" uuid NOT NULL,
"session_id" uuid NOT NULL,
"severity" varchar(20) NOT NULL,
"data" jsonb NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"acknowledged_at" timestamp with time zone
);
--> statement-breakpoint
ALTER TABLE "rules" ADD CONSTRAINT "rules_server_user_id_server_users_id_fk" FOREIGN KEY ("server_user_id") REFERENCES "public"."server_users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "server_users" ADD CONSTRAINT "server_users_user_id_users_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "server_users" ADD CONSTRAINT "server_users_server_id_servers_id_fk" FOREIGN KEY ("server_id") REFERENCES "public"."servers"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "sessions" ADD CONSTRAINT "sessions_server_id_servers_id_fk" FOREIGN KEY ("server_id") REFERENCES "public"."servers"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "sessions" ADD CONSTRAINT "sessions_server_user_id_server_users_id_fk" FOREIGN KEY ("server_user_id") REFERENCES "public"."server_users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "violations" ADD CONSTRAINT "violations_rule_id_rules_id_fk" FOREIGN KEY ("rule_id") REFERENCES "public"."rules"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "violations" ADD CONSTRAINT "violations_server_user_id_server_users_id_fk" FOREIGN KEY ("server_user_id") REFERENCES "public"."server_users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "violations" ADD CONSTRAINT "violations_session_id_sessions_id_fk" FOREIGN KEY ("session_id") REFERENCES "public"."sessions"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
CREATE INDEX "rules_active_idx" ON "rules" USING btree ("is_active");--> statement-breakpoint
CREATE INDEX "rules_server_user_id_idx" ON "rules" USING btree ("server_user_id");--> statement-breakpoint
CREATE UNIQUE INDEX "server_users_user_server_unique" ON "server_users" USING btree ("user_id","server_id");--> statement-breakpoint
CREATE UNIQUE INDEX "server_users_server_external_unique" ON "server_users" USING btree ("server_id","external_id");--> statement-breakpoint
CREATE INDEX "server_users_user_idx" ON "server_users" USING btree ("user_id");--> statement-breakpoint
CREATE INDEX "server_users_server_idx" ON "server_users" USING btree ("server_id");--> statement-breakpoint
CREATE INDEX "server_users_username_idx" ON "server_users" USING btree ("username");--> statement-breakpoint
CREATE INDEX "sessions_server_user_time_idx" ON "sessions" USING btree ("server_user_id","started_at");--> statement-breakpoint
CREATE INDEX "sessions_server_time_idx" ON "sessions" USING btree ("server_id","started_at");--> statement-breakpoint
CREATE INDEX "sessions_state_idx" ON "sessions" USING btree ("state");--> statement-breakpoint
CREATE INDEX "sessions_external_session_idx" ON "sessions" USING btree ("server_id","external_session_id");--> statement-breakpoint
CREATE INDEX "sessions_device_idx" ON "sessions" USING btree ("server_user_id","device_id");--> statement-breakpoint
CREATE INDEX "sessions_reference_idx" ON "sessions" USING btree ("reference_id");--> statement-breakpoint
CREATE INDEX "sessions_server_user_rating_idx" ON "sessions" USING btree ("server_user_id","rating_key");--> statement-breakpoint
CREATE INDEX "sessions_geo_idx" ON "sessions" USING btree ("geo_lat","geo_lon");--> statement-breakpoint
CREATE INDEX "sessions_geo_time_idx" ON "sessions" USING btree ("started_at","geo_lat","geo_lon");--> statement-breakpoint
CREATE INDEX "sessions_media_type_idx" ON "sessions" USING btree ("media_type");--> statement-breakpoint
CREATE INDEX "sessions_transcode_idx" ON "sessions" USING btree ("is_transcode");--> statement-breakpoint
CREATE INDEX "sessions_platform_idx" ON "sessions" USING btree ("platform");--> statement-breakpoint
CREATE INDEX "sessions_top_movies_idx" ON "sessions" USING btree ("media_type","media_title","year");--> statement-breakpoint
CREATE INDEX "sessions_top_shows_idx" ON "sessions" USING btree ("media_type","grandparent_title");--> statement-breakpoint
CREATE UNIQUE INDEX "users_username_unique" ON "users" USING btree ("username");--> statement-breakpoint
CREATE UNIQUE INDEX "users_email_unique" ON "users" USING btree ("email");--> statement-breakpoint
CREATE INDEX "users_plex_account_id_idx" ON "users" USING btree ("plex_account_id");--> statement-breakpoint
CREATE INDEX "users_role_idx" ON "users" USING btree ("role");--> statement-breakpoint
CREATE INDEX "violations_server_user_id_idx" ON "violations" USING btree ("server_user_id");--> statement-breakpoint
CREATE INDEX "violations_rule_id_idx" ON "violations" USING btree ("rule_id");--> statement-breakpoint
CREATE INDEX "violations_created_at_idx" ON "violations" USING btree ("created_at");

View File

@@ -0,0 +1,2 @@
DROP INDEX "users_username_unique";--> statement-breakpoint
CREATE INDEX "users_username_idx" ON "users" USING btree ("username");

View File

@@ -0,0 +1,26 @@
CREATE TABLE "mobile_sessions" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"refresh_token_hash" varchar(64) NOT NULL,
"device_name" varchar(100) NOT NULL,
"device_id" varchar(100) NOT NULL,
"platform" varchar(20) NOT NULL,
"expo_push_token" varchar(255),
"last_seen_at" timestamp with time zone DEFAULT now() NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
CONSTRAINT "mobile_sessions_refresh_token_hash_unique" UNIQUE("refresh_token_hash")
);
--> statement-breakpoint
CREATE TABLE "mobile_tokens" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"token_hash" varchar(64) NOT NULL,
"is_enabled" boolean DEFAULT true NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"rotated_at" timestamp with time zone,
CONSTRAINT "mobile_tokens_token_hash_unique" UNIQUE("token_hash")
);
--> statement-breakpoint
ALTER TABLE "settings" ADD COLUMN "external_url" text;--> statement-breakpoint
ALTER TABLE "settings" ADD COLUMN "base_path" varchar(100) DEFAULT '' NOT NULL;--> statement-breakpoint
ALTER TABLE "settings" ADD COLUMN "trust_proxy" boolean DEFAULT false NOT NULL;--> statement-breakpoint
CREATE INDEX "mobile_sessions_device_id_idx" ON "mobile_sessions" USING btree ("device_id");--> statement-breakpoint
CREATE INDEX "mobile_sessions_refresh_token_idx" ON "mobile_sessions" USING btree ("refresh_token_hash");

View File

@@ -0,0 +1,28 @@
CREATE TABLE "notification_preferences" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"mobile_session_id" uuid NOT NULL,
"push_enabled" boolean DEFAULT true NOT NULL,
"on_violation_detected" boolean DEFAULT true NOT NULL,
"on_stream_started" boolean DEFAULT false NOT NULL,
"on_stream_stopped" boolean DEFAULT false NOT NULL,
"on_concurrent_streams" boolean DEFAULT true NOT NULL,
"on_new_device" boolean DEFAULT true NOT NULL,
"on_trust_score_changed" boolean DEFAULT false NOT NULL,
"on_server_down" boolean DEFAULT true NOT NULL,
"on_server_up" boolean DEFAULT true NOT NULL,
"violation_min_severity" integer DEFAULT 1 NOT NULL,
"violation_rule_types" text[] DEFAULT '{}',
"max_per_minute" integer DEFAULT 10 NOT NULL,
"max_per_hour" integer DEFAULT 60 NOT NULL,
"quiet_hours_enabled" boolean DEFAULT false NOT NULL,
"quiet_hours_start" varchar(5),
"quiet_hours_end" varchar(5),
"quiet_hours_timezone" varchar(50) DEFAULT 'UTC',
"quiet_hours_override_critical" boolean DEFAULT true NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL,
CONSTRAINT "notification_preferences_mobile_session_id_unique" UNIQUE("mobile_session_id")
);
--> statement-breakpoint
ALTER TABLE "notification_preferences" ADD CONSTRAINT "notification_preferences_mobile_session_id_mobile_sessions_id_fk" FOREIGN KEY ("mobile_session_id") REFERENCES "public"."mobile_sessions"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
CREATE INDEX "notification_prefs_mobile_session_idx" ON "notification_preferences" USING btree ("mobile_session_id");

View File

@@ -0,0 +1,25 @@
CREATE TABLE "notification_channel_routing" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"event_type" varchar(50) NOT NULL,
"discord_enabled" boolean DEFAULT true NOT NULL,
"webhook_enabled" boolean DEFAULT true NOT NULL,
"push_enabled" boolean DEFAULT true NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL,
CONSTRAINT "notification_channel_routing_event_type_unique" UNIQUE("event_type")
);
--> statement-breakpoint
CREATE INDEX "notification_channel_routing_event_type_idx" ON "notification_channel_routing" USING btree ("event_type");
--> statement-breakpoint
-- Seed default routing configuration for all event types
INSERT INTO "notification_channel_routing" ("event_type", "discord_enabled", "webhook_enabled", "push_enabled")
VALUES
('violation_detected', true, true, true),
('stream_started', false, false, false),
('stream_stopped', false, false, false),
('concurrent_streams', true, true, true),
('new_device', true, true, true),
('trust_score_changed', false, false, false),
('server_down', true, true, true),
('server_up', true, true, true)
ON CONFLICT ("event_type") DO NOTHING;

View File

@@ -0,0 +1 @@
ALTER TABLE "mobile_sessions" ADD COLUMN "device_secret" varchar(64);

View File

@@ -0,0 +1,3 @@
CREATE INDEX "mobile_sessions_expo_push_token_idx" ON "mobile_sessions" USING btree ("expo_push_token");--> statement-breakpoint
ALTER TABLE "notification_preferences" ADD CONSTRAINT "quiet_hours_start_format" CHECK ("notification_preferences"."quiet_hours_start" IS NULL OR "notification_preferences"."quiet_hours_start" ~ '^([01][0-9]|2[0-3]):[0-5][0-9]$');--> statement-breakpoint
ALTER TABLE "notification_preferences" ADD CONSTRAINT "quiet_hours_end_format" CHECK ("notification_preferences"."quiet_hours_end" IS NULL OR "notification_preferences"."quiet_hours_end" ~ '^([01][0-9]|2[0-3]):[0-5][0-9]$');

View File

@@ -0,0 +1 @@
ALTER TABLE "settings" ADD COLUMN "mobile_enabled" boolean DEFAULT false NOT NULL;

View File

@@ -0,0 +1,41 @@
-- Custom SQL migration file, put your code below! --
-- Update mobile_tokens schema for one-time pairing tokens
-- Remove old columns (is_enabled, rotated_at) and add new columns (expires_at, created_by, used_at)
-- Step 1: Clear existing tokens (breaking change - old schema incompatible)
DELETE FROM "mobile_tokens";
-- Step 2: Drop old columns
ALTER TABLE "mobile_tokens" DROP COLUMN IF EXISTS "is_enabled";
ALTER TABLE "mobile_tokens" DROP COLUMN IF EXISTS "rotated_at";
-- Step 3: Add new required column with temporary default (IF NOT EXISTS for idempotency)
DO $$ BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'mobile_tokens' AND column_name = 'expires_at') THEN
ALTER TABLE "mobile_tokens" ADD COLUMN "expires_at" timestamp with time zone NOT NULL DEFAULT NOW() + INTERVAL '15 minutes';
END IF;
END $$;
-- Step 4: Add nullable columns (IF NOT EXISTS for idempotency)
DO $$ BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'mobile_tokens' AND column_name = 'created_by') THEN
ALTER TABLE "mobile_tokens" ADD COLUMN "created_by" uuid;
END IF;
END $$;
DO $$ BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'mobile_tokens' AND column_name = 'used_at') THEN
ALTER TABLE "mobile_tokens" ADD COLUMN "used_at" timestamp with time zone;
END IF;
END $$;
-- Step 5: Add foreign key constraint (IF NOT EXISTS for idempotency)
DO $$ BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.table_constraints WHERE constraint_name = 'mobile_tokens_created_by_users_id_fk') THEN
ALTER TABLE "mobile_tokens" ADD CONSTRAINT "mobile_tokens_created_by_users_id_fk" FOREIGN KEY ("created_by") REFERENCES "users"("id") ON DELETE CASCADE ON UPDATE NO ACTION;
END IF;
END $$;
-- Step 6: Remove temporary default from expires_at
ALTER TABLE "mobile_tokens" ALTER COLUMN "expires_at" DROP DEFAULT;

View File

@@ -0,0 +1,20 @@
-- TimescaleDB hypertables with columnstore don't allow non-constant defaults like now()
-- So we add columns as nullable first, backfill, then set NOT NULL
-- Add last_seen_at as nullable first
ALTER TABLE "sessions" ADD COLUMN "last_seen_at" timestamp with time zone;--> statement-breakpoint
-- Backfill existing rows: use started_at as the initial last_seen_at value
UPDATE "sessions" SET "last_seen_at" = "started_at" WHERE "last_seen_at" IS NULL;--> statement-breakpoint
-- Now set NOT NULL constraint (no default needed - app always provides value)
ALTER TABLE "sessions" ALTER COLUMN "last_seen_at" SET NOT NULL;--> statement-breakpoint
-- Add force_stopped column
ALTER TABLE "sessions" ADD COLUMN "force_stopped" boolean DEFAULT false NOT NULL;--> statement-breakpoint
-- Add short_session column
ALTER TABLE "sessions" ADD COLUMN "short_session" boolean DEFAULT false NOT NULL;--> statement-breakpoint
-- Create index for stale session detection
CREATE INDEX "sessions_stale_detection_idx" ON "sessions" USING btree ("last_seen_at","stopped_at");

View File

@@ -0,0 +1,18 @@
-- Multi-server support: Add user_id to mobile_sessions
-- BREAKING CHANGE: Clears existing mobile sessions - users must re-pair devices
-- Clear existing data (notification_preferences has FK to mobile_sessions)
DELETE FROM "notification_preferences";--> statement-breakpoint
DELETE FROM "mobile_sessions";--> statement-breakpoint
-- Unrelated schema drift fix from drizzle-kit
ALTER TABLE "sessions" ALTER COLUMN "last_seen_at" DROP DEFAULT;--> statement-breakpoint
-- Add user_id column (required for multi-user mobile support)
ALTER TABLE "mobile_sessions" ADD COLUMN "user_id" uuid NOT NULL;--> statement-breakpoint
-- Add foreign key constraint
ALTER TABLE "mobile_sessions" ADD CONSTRAINT "mobile_sessions_user_id_users_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
-- Add index for efficient user lookups
CREATE INDEX "mobile_sessions_user_idx" ON "mobile_sessions" USING btree ("user_id");

View File

@@ -0,0 +1,27 @@
-- Note: session_id has no FK constraint because sessions is a TimescaleDB hypertable
-- (hypertables don't support foreign key references to their primary key)
CREATE TABLE "termination_logs" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"session_id" uuid NOT NULL,
"server_id" uuid NOT NULL,
"server_user_id" uuid NOT NULL,
"trigger" varchar(20) NOT NULL,
"triggered_by_user_id" uuid,
"rule_id" uuid,
"violation_id" uuid,
"reason" text,
"success" boolean NOT NULL,
"error_message" text,
"created_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
ALTER TABLE "termination_logs" ADD CONSTRAINT "termination_logs_server_id_servers_id_fk" FOREIGN KEY ("server_id") REFERENCES "public"."servers"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "termination_logs" ADD CONSTRAINT "termination_logs_server_user_id_server_users_id_fk" FOREIGN KEY ("server_user_id") REFERENCES "public"."server_users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "termination_logs" ADD CONSTRAINT "termination_logs_triggered_by_user_id_users_id_fk" FOREIGN KEY ("triggered_by_user_id") REFERENCES "public"."users"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "termination_logs" ADD CONSTRAINT "termination_logs_rule_id_rules_id_fk" FOREIGN KEY ("rule_id") REFERENCES "public"."rules"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "termination_logs" ADD CONSTRAINT "termination_logs_violation_id_violations_id_fk" FOREIGN KEY ("violation_id") REFERENCES "public"."violations"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
CREATE INDEX "termination_logs_session_idx" ON "termination_logs" USING btree ("session_id");--> statement-breakpoint
CREATE INDEX "termination_logs_server_user_idx" ON "termination_logs" USING btree ("server_user_id");--> statement-breakpoint
CREATE INDEX "termination_logs_triggered_by_idx" ON "termination_logs" USING btree ("triggered_by_user_id");--> statement-breakpoint
CREATE INDEX "termination_logs_rule_idx" ON "termination_logs" USING btree ("rule_id");--> statement-breakpoint
CREATE INDEX "termination_logs_created_at_idx" ON "termination_logs" USING btree ("created_at");

View File

@@ -0,0 +1 @@
ALTER TABLE "sessions" ADD COLUMN "plex_session_id" varchar(255);

View File

@@ -0,0 +1,3 @@
ALTER TABLE "termination_logs" DROP CONSTRAINT IF EXISTS "termination_logs_session_id_sessions_id_fk";
--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "sessions_dedup_fallback_idx" ON "sessions" USING btree ("server_id","server_user_id","rating_key","started_at");

View File

@@ -0,0 +1,2 @@
ALTER TABLE "settings" ADD COLUMN "webhook_format" text;--> statement-breakpoint
ALTER TABLE "settings" ADD COLUMN "ntfy_topic" text;

View File

@@ -0,0 +1 @@
ALTER TABLE "servers" ADD COLUMN "machine_identifier" varchar(100);

View File

@@ -0,0 +1,4 @@
ALTER TABLE "settings" DROP COLUMN "notify_on_violation";--> statement-breakpoint
ALTER TABLE "settings" DROP COLUMN "notify_on_session_start";--> statement-breakpoint
ALTER TABLE "settings" DROP COLUMN "notify_on_session_stop";--> statement-breakpoint
ALTER TABLE "settings" DROP COLUMN "notify_on_server_down";

View File

@@ -0,0 +1 @@
ALTER TABLE "settings" ADD COLUMN "primary_auth_method" varchar(20) DEFAULT 'local' NOT NULL;

View File

@@ -0,0 +1,2 @@
ALTER TABLE "notification_channel_routing" ADD COLUMN "web_toast_enabled" boolean DEFAULT true NOT NULL;--> statement-breakpoint
ALTER TABLE "settings" ADD COLUMN "unit_system" varchar(20) DEFAULT 'metric' NOT NULL;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,139 @@
{
"version": "7",
"dialect": "postgresql",
"entries": [
{
"idx": 0,
"version": "7",
"when": 1764697096357,
"tag": "0000_lying_dorian_gray",
"breakpoints": true
},
{
"idx": 1,
"version": "7",
"when": 1764705329215,
"tag": "0001_graceful_starjammers",
"breakpoints": true
},
{
"idx": 2,
"version": "7",
"when": 1764788872702,
"tag": "0002_rainy_bishop",
"breakpoints": true
},
{
"idx": 3,
"version": "7",
"when": 1764799561925,
"tag": "0003_black_maginty",
"breakpoints": true
},
{
"idx": 4,
"version": "7",
"when": 1764800111127,
"tag": "0004_bent_unus",
"breakpoints": true
},
{
"idx": 5,
"version": "7",
"when": 1764801611462,
"tag": "0005_elite_wendell_vaughn",
"breakpoints": true
},
{
"idx": 6,
"version": "7",
"when": 1764806894704,
"tag": "0006_worthless_blue_shield",
"breakpoints": true
},
{
"idx": 7,
"version": "7",
"when": 1764865910195,
"tag": "0007_tense_pestilence",
"breakpoints": true
},
{
"idx": 8,
"version": "7",
"when": 1764871905960,
"tag": "0008_update_mobile_tokens_schema",
"breakpoints": true
},
{
"idx": 9,
"version": "7",
"when": 1764996689797,
"tag": "0009_quiet_vertigo",
"breakpoints": true
},
{
"idx": 10,
"version": "7",
"when": 1765214112454,
"tag": "0010_fair_zuras",
"breakpoints": true
},
{
"idx": 11,
"version": "7",
"when": 1765302271434,
"tag": "0011_breezy_ultron",
"breakpoints": true
},
{
"idx": 12,
"version": "7",
"when": 1765303740465,
"tag": "0012_strong_hannibal_king",
"breakpoints": true
},
{
"idx": 13,
"version": "7",
"when": 1765468947659,
"tag": "0013_same_rage",
"breakpoints": true
},
{
"idx": 14,
"version": "7",
"when": 1765479413354,
"tag": "0014_past_molly_hayes",
"breakpoints": true
},
{
"idx": 15,
"version": "7",
"when": 1765482999236,
"tag": "0015_gifted_the_liberteens",
"breakpoints": true
},
{
"idx": 16,
"version": "7",
"when": 1765571040426,
"tag": "0016_yummy_enchantress",
"breakpoints": true
},
{
"idx": 17,
"version": "7",
"when": 1765667812248,
"tag": "0017_broken_husk",
"breakpoints": true
},
{
"idx": 18,
"version": "7",
"when": 1765858132064,
"tag": "0018_robust_shotgun",
"breakpoints": true
}
]
}

View File

@@ -0,0 +1,325 @@
/**
* Prepared statements for hot-path queries
*
* Prepared statements optimize performance by allowing PostgreSQL to reuse
* query plans across executions. These are particularly valuable for:
* - Queries called on every page load (dashboard)
* - Queries called frequently during polling
* - Queries with predictable parameter patterns
*
* @see https://orm.drizzle.team/docs/perf-queries
*/
import { eq, gte, and, isNull, desc, sql } from 'drizzle-orm';
import { db } from './client.js';
import { sessions, violations, users, serverUsers, servers, rules } from './schema.js';
// ============================================================================
// Dashboard Stats Queries
// ============================================================================
/**
* Count unique plays (grouped by reference_id) since a given date
* Used for: Dashboard "Today's Plays" metric
* Called: Every dashboard page load
*/
export const playsCountSince = db
.select({
count: sql<number>`count(DISTINCT COALESCE(reference_id, id))::int`,
})
.from(sessions)
.where(gte(sessions.startedAt, sql.placeholder('since')))
.prepare('plays_count_since');
/**
* Sum total watch time since a given date
* Used for: Dashboard "Watch Time" metric
* Called: Every dashboard page load
*/
export const watchTimeSince = db
.select({
totalMs: sql<number>`COALESCE(SUM(duration_ms), 0)::bigint`,
})
.from(sessions)
.where(gte(sessions.startedAt, sql.placeholder('since')))
.prepare('watch_time_since');
/**
* Count violations since a given date
* Used for: Dashboard "Alerts" metric
* Called: Every dashboard page load
*/
export const violationsCountSince = db
.select({
count: sql<number>`count(*)::int`,
})
.from(violations)
.where(gte(violations.createdAt, sql.placeholder('since')))
.prepare('violations_count_since');
/**
* Count unique active users since a given date
* Used for: Dashboard "Active Users Today" metric
* Called: Every dashboard page load
*/
export const uniqueUsersSince = db
.select({
count: sql<number>`count(DISTINCT server_user_id)::int`,
})
.from(sessions)
.where(gte(sessions.startedAt, sql.placeholder('since')))
.prepare('unique_users_since');
/**
* Count unacknowledged violations
* Used for: Alert badge in navigation
* Called: On app load and after acknowledgment
*/
export const unacknowledgedViolationsCount = db
.select({
count: sql<number>`count(*)::int`,
})
.from(violations)
.where(isNull(violations.acknowledgedAt))
.prepare('unacknowledged_violations_count');
// ============================================================================
// Polling Queries
// ============================================================================
/**
* Find server user by server ID and external ID
* Used for: Server user lookup during session polling
* Called: Every poll cycle for each active session (potentially 10+ times per 15 seconds)
*/
export const serverUserByExternalId = db
.select()
.from(serverUsers)
.where(
and(
eq(serverUsers.serverId, sql.placeholder('serverId')),
eq(serverUsers.externalId, sql.placeholder('externalId'))
)
)
.limit(1)
.prepare('server_user_by_external_id');
/**
* Find session by server ID and session key
* Used for: Session lookup during polling to check for existing sessions
* Called: Every poll cycle for each active session
*/
export const sessionByServerAndKey = db
.select()
.from(sessions)
.where(
and(
eq(sessions.serverId, sql.placeholder('serverId')),
eq(sessions.sessionKey, sql.placeholder('sessionKey'))
)
)
.limit(1)
.prepare('session_by_server_and_key');
// ============================================================================
// User Queries
// ============================================================================
/**
* Get server user by ID with basic info
* Used for: Server user details in violations, sessions
* Called: Frequently for UI enrichment
*/
export const serverUserById = db
.select({
id: serverUsers.id,
userId: serverUsers.userId,
username: serverUsers.username,
thumbUrl: serverUsers.thumbUrl,
trustScore: serverUsers.trustScore,
})
.from(serverUsers)
.where(eq(serverUsers.id, sql.placeholder('id')))
.limit(1)
.prepare('server_user_by_id');
/**
* Get user identity by ID
* Used for: User identity info (the real person)
* Called: When viewing user profile
*/
export const userById = db
.select({
id: users.id,
name: users.name,
thumbnail: users.thumbnail,
email: users.email,
role: users.role,
aggregateTrustScore: users.aggregateTrustScore,
})
.from(users)
.where(eq(users.id, sql.placeholder('id')))
.limit(1)
.prepare('user_by_id');
// ============================================================================
// Session Queries
// ============================================================================
/**
* Get session by ID
* Used for: Session detail page, violation context
* Called: When viewing session details
*/
export const sessionById = db
.select()
.from(sessions)
.where(eq(sessions.id, sql.placeholder('id')))
.limit(1)
.prepare('session_by_id');
// ============================================================================
// Stats Queries (hot-path for dashboard and analytics pages)
// ============================================================================
/**
* Plays by platform since a given date
* Used for: Stats platform breakdown chart
* Called: Every stats page load
*/
export const playsByPlatformSince = db
.select({
platform: sessions.platform,
count: sql<number>`count(DISTINCT COALESCE(reference_id, id))::int`,
})
.from(sessions)
.where(gte(sessions.startedAt, sql.placeholder('since')))
.groupBy(sessions.platform)
.orderBy(sql`count(DISTINCT COALESCE(reference_id, id)) DESC`)
.prepare('plays_by_platform_since');
/**
* Quality breakdown (direct vs transcode) since a given date
* Used for: Stats quality chart
* Called: Every stats page load
*/
export const qualityStatsSince = db
.select({
isTranscode: sessions.isTranscode,
count: sql<number>`count(DISTINCT COALESCE(reference_id, id))::int`,
})
.from(sessions)
.where(gte(sessions.startedAt, sql.placeholder('since')))
.groupBy(sessions.isTranscode)
.prepare('quality_stats_since');
/**
* Watch time by media type since a given date
* Used for: Watch time breakdown by content type
* Called: Stats page load
*/
export const watchTimeByTypeSince = db
.select({
mediaType: sessions.mediaType,
totalMs: sql<number>`COALESCE(SUM(duration_ms), 0)::bigint`,
})
.from(sessions)
.where(gte(sessions.startedAt, sql.placeholder('since')))
.groupBy(sessions.mediaType)
.prepare('watch_time_by_type_since');
// ============================================================================
// Rule Queries (hot-path for poller)
// ============================================================================
/**
* Get all active rules
* Used for: Rule evaluation during session polling
* Called: Every poll cycle (~15 seconds per server)
*/
export const getActiveRules = db
.select()
.from(rules)
.where(eq(rules.isActive, true))
.prepare('get_active_rules');
/**
* Get recent sessions for a user (for rule evaluation)
* Used for: Evaluating device velocity, concurrent streams rules
* Called: During rule evaluation for active sessions
*/
export const getUserRecentSessions = db
.select({
id: sessions.id,
startedAt: sessions.startedAt,
stoppedAt: sessions.stoppedAt,
ipAddress: sessions.ipAddress,
deviceId: sessions.deviceId,
geoLat: sessions.geoLat,
geoLon: sessions.geoLon,
geoCity: sessions.geoCity,
geoCountry: sessions.geoCountry,
state: sessions.state,
})
.from(sessions)
.where(
and(
eq(sessions.serverUserId, sql.placeholder('serverUserId')),
gte(sessions.startedAt, sql.placeholder('since'))
)
)
.orderBy(desc(sessions.startedAt))
.limit(100)
.prepare('get_user_recent_sessions');
// ============================================================================
// Violation Queries
// ============================================================================
/**
* Get unacknowledged violations with pagination
* Used for: Violation list in dashboard
* Called: Frequently for alert displays
*/
export const getUnackedViolations = db
.select()
.from(violations)
.where(isNull(violations.acknowledgedAt))
.orderBy(desc(violations.createdAt))
.limit(sql.placeholder('limit'))
.prepare('get_unacked_violations');
// ============================================================================
// Server Queries
// ============================================================================
/**
* Get server by ID
* Used for: Server details, validation
* Called: Frequently during API requests
*/
export const serverById = db
.select()
.from(servers)
.where(eq(servers.id, sql.placeholder('id')))
.limit(1)
.prepare('server_by_id');
// ============================================================================
// Type exports for execute results
// ============================================================================
export type PlaysCountResult = Awaited<ReturnType<typeof playsCountSince.execute>>;
export type WatchTimeResult = Awaited<ReturnType<typeof watchTimeSince.execute>>;
export type ViolationsCountResult = Awaited<ReturnType<typeof violationsCountSince.execute>>;
export type ServerUserByExternalIdResult = Awaited<ReturnType<typeof serverUserByExternalId.execute>>;
export type ServerUserByIdResult = Awaited<ReturnType<typeof serverUserById.execute>>;
export type UserByIdResult = Awaited<ReturnType<typeof userById.execute>>;
export type SessionByIdResult = Awaited<ReturnType<typeof sessionById.execute>>;
export type PlaysByPlatformResult = Awaited<ReturnType<typeof playsByPlatformSince.execute>>;
export type QualityStatsResult = Awaited<ReturnType<typeof qualityStatsSince.execute>>;
export type WatchTimeByTypeResult = Awaited<ReturnType<typeof watchTimeByTypeSince.execute>>;
export type ActiveRulesResult = Awaited<ReturnType<typeof getActiveRules.execute>>;
export type UserRecentSessionsResult = Awaited<ReturnType<typeof getUserRecentSessions.execute>>;
export type UnackedViolationsResult = Awaited<ReturnType<typeof getUnackedViolations.execute>>;
export type ServerByIdResult = Awaited<ReturnType<typeof serverById.execute>>;

View File

@@ -0,0 +1,624 @@
/**
* Drizzle ORM schema definitions for Tracearr
*
* Multi-Server User Architecture:
* - `users` = Identity (the real human)
* - `server_users` = Account on a specific server (Plex/Jellyfin/Emby)
* - One user can have multiple server_users (accounts across servers)
* - Sessions and violations link to server_users (server-specific)
*/
import {
pgTable,
uuid,
varchar,
text,
timestamp,
boolean,
integer,
real,
jsonb,
index,
uniqueIndex,
check,
} from 'drizzle-orm/pg-core';
import { relations, sql } from 'drizzle-orm';
// Server types enum
export const serverTypeEnum = ['plex', 'jellyfin', 'emby'] as const;
// Session state enum
export const sessionStateEnum = ['playing', 'paused', 'stopped'] as const;
// Media type enum
export const mediaTypeEnum = ['movie', 'episode', 'track'] as const;
// Rule type enum
export const ruleTypeEnum = [
'impossible_travel',
'simultaneous_locations',
'device_velocity',
'concurrent_streams',
'geo_restriction',
] as const;
// Violation severity enum
export const violationSeverityEnum = ['low', 'warning', 'high'] as const;
// Media servers (Plex/Jellyfin/Emby instances)
export const servers = pgTable('servers', {
id: uuid('id').primaryKey().defaultRandom(),
name: varchar('name', { length: 100 }).notNull(),
type: varchar('type', { length: 20 }).notNull().$type<(typeof serverTypeEnum)[number]>(),
url: text('url').notNull(),
token: text('token').notNull(), // Encrypted
machineIdentifier: varchar('machine_identifier', { length: 100 }), // Plex clientIdentifier for dedup
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
});
/**
* Users - Identity table representing real humans
*
* This is the "anchor" identity that can own multiple server accounts.
* Stores authentication credentials and aggregated metrics.
*/
export const users = pgTable(
'users',
{
id: uuid('id').primaryKey().defaultRandom(),
// Identity
username: varchar('username', { length: 100 }).notNull(), // Login identifier (unique)
name: varchar('name', { length: 255 }), // Display name (optional, defaults to null)
thumbnail: text('thumbnail'), // Custom avatar (nullable)
email: varchar('email', { length: 255 }), // For identity matching (nullable)
// Authentication (nullable - not all users authenticate directly)
passwordHash: text('password_hash'), // bcrypt hash for local login
plexAccountId: varchar('plex_account_id', { length: 255 }), // Plex.tv global account ID for OAuth
// Access control - combined permission level and account status
// Can log in: 'owner', 'admin', 'viewer'
// Cannot log in: 'member' (default), 'disabled', 'pending'
role: varchar('role', { length: 20 })
.notNull()
.$type<'owner' | 'admin' | 'viewer' | 'member' | 'disabled' | 'pending'>()
.default('member'),
// Aggregated metrics (cached, updated by triggers)
aggregateTrustScore: integer('aggregate_trust_score').notNull().default(100),
totalViolations: integer('total_violations').notNull().default(0),
// Timestamps
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [
// Username is display name from media server (not unique across servers)
index('users_username_idx').on(table.username),
uniqueIndex('users_email_unique').on(table.email),
index('users_plex_account_id_idx').on(table.plexAccountId),
index('users_role_idx').on(table.role),
]
);
/**
* Server Users - Account on a specific media server
*
* Represents a user's account on a Plex/Jellyfin/Emby server.
* One user (identity) can have multiple server_users (accounts across servers).
* Sessions and violations link here for per-server tracking.
*/
export const serverUsers = pgTable(
'server_users',
{
id: uuid('id').primaryKey().defaultRandom(),
// Relationships - always linked to both user and server
userId: uuid('user_id')
.notNull()
.references(() => users.id, { onDelete: 'cascade' }),
serverId: uuid('server_id')
.notNull()
.references(() => servers.id, { onDelete: 'cascade' }),
// Server-specific identity
externalId: varchar('external_id', { length: 255 }).notNull(), // Plex/Jellyfin user ID
username: varchar('username', { length: 255 }).notNull(), // Username on this server
email: varchar('email', { length: 255 }), // Email from server sync (may differ from users.email)
thumbUrl: text('thumb_url'), // Avatar from server
// Server-specific permissions
isServerAdmin: boolean('is_server_admin').notNull().default(false),
// Per-server trust
trustScore: integer('trust_score').notNull().default(100),
sessionCount: integer('session_count').notNull().default(0), // For aggregate weighting
// Timestamps
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [
// One account per user per server
uniqueIndex('server_users_user_server_unique').on(table.userId, table.serverId),
// Atomic upsert during sync
uniqueIndex('server_users_server_external_unique').on(table.serverId, table.externalId),
// Query optimization
index('server_users_user_idx').on(table.userId),
index('server_users_server_idx').on(table.serverId),
index('server_users_username_idx').on(table.username),
]
);
// Session history (will be converted to hypertable)
export const sessions = pgTable(
'sessions',
{
id: uuid('id').primaryKey().defaultRandom(),
serverId: uuid('server_id')
.notNull()
.references(() => servers.id, { onDelete: 'cascade' }),
// Links to server_users for per-server tracking
serverUserId: uuid('server_user_id')
.notNull()
.references(() => serverUsers.id, { onDelete: 'cascade' }),
sessionKey: varchar('session_key', { length: 255 }).notNull(),
// Plex Session.id - required for termination API (different from sessionKey)
// For Jellyfin/Emby, sessionKey is used directly for termination
plexSessionId: varchar('plex_session_id', { length: 255 }),
state: varchar('state', { length: 20 }).notNull().$type<(typeof sessionStateEnum)[number]>(),
mediaType: varchar('media_type', { length: 20 })
.notNull()
.$type<(typeof mediaTypeEnum)[number]>(),
mediaTitle: text('media_title').notNull(),
// Enhanced media metadata for episodes
grandparentTitle: varchar('grandparent_title', { length: 500 }), // Show name (for episodes)
seasonNumber: integer('season_number'), // Season number (for episodes)
episodeNumber: integer('episode_number'), // Episode number (for episodes)
year: integer('year'), // Release year
thumbPath: varchar('thumb_path', { length: 500 }), // Poster path (e.g., /library/metadata/123/thumb)
ratingKey: varchar('rating_key', { length: 255 }), // Plex/Jellyfin media identifier
externalSessionId: varchar('external_session_id', { length: 255 }), // External reference for deduplication
startedAt: timestamp('started_at', { withTimezone: true }).notNull().defaultNow(),
stoppedAt: timestamp('stopped_at', { withTimezone: true }),
lastSeenAt: timestamp('last_seen_at', { withTimezone: true }).notNull(), // Last time session was seen in poll (for stale detection) - no default, app always provides
durationMs: integer('duration_ms'), // Actual watch duration (excludes paused time)
totalDurationMs: integer('total_duration_ms'), // Total media length
progressMs: integer('progress_ms'), // Current playback position
// Pause tracking - accumulates total paused time across pause/resume cycles
lastPausedAt: timestamp('last_paused_at', { withTimezone: true }), // When current pause started
pausedDurationMs: integer('paused_duration_ms').notNull().default(0), // Accumulated pause time
// Session grouping for "resume where left off" tracking
referenceId: uuid('reference_id'), // Links to first session in resume chain
watched: boolean('watched').notNull().default(false), // True if user watched 85%+
forceStopped: boolean('force_stopped').notNull().default(false), // True if session was force-stopped due to inactivity
shortSession: boolean('short_session').notNull().default(false), // True if session duration < MIN_PLAY_TIME_MS (120s)
ipAddress: varchar('ip_address', { length: 45 }).notNull(),
geoCity: varchar('geo_city', { length: 255 }),
geoRegion: varchar('geo_region', { length: 255 }), // State/province/subdivision
geoCountry: varchar('geo_country', { length: 100 }),
geoLat: real('geo_lat'),
geoLon: real('geo_lon'),
playerName: varchar('player_name', { length: 255 }), // Player title/friendly name
deviceId: varchar('device_id', { length: 255 }), // Machine identifier (unique device UUID)
product: varchar('product', { length: 255 }), // Product name (e.g., "Plex for iOS")
device: varchar('device', { length: 255 }), // Device type (e.g., "iPhone", "Android TV")
platform: varchar('platform', { length: 100 }),
quality: varchar('quality', { length: 100 }),
isTranscode: boolean('is_transcode').notNull().default(false),
bitrate: integer('bitrate'),
},
(table) => [
index('sessions_server_user_time_idx').on(table.serverUserId, table.startedAt),
index('sessions_server_time_idx').on(table.serverId, table.startedAt),
index('sessions_state_idx').on(table.state),
index('sessions_external_session_idx').on(table.serverId, table.externalSessionId),
index('sessions_device_idx').on(table.serverUserId, table.deviceId),
index('sessions_reference_idx').on(table.referenceId), // For session grouping queries
index('sessions_server_user_rating_idx').on(table.serverUserId, table.ratingKey), // For resume detection
// Index for Tautulli import deduplication fallback (when externalSessionId not found)
index('sessions_dedup_fallback_idx').on(
table.serverId,
table.serverUserId,
table.ratingKey,
table.startedAt
),
// Indexes for stats queries
index('sessions_geo_idx').on(table.geoLat, table.geoLon), // For /stats/locations basic geo lookup
index('sessions_geo_time_idx').on(table.startedAt, table.geoLat, table.geoLon), // For time-filtered map queries
index('sessions_media_type_idx').on(table.mediaType), // For media type aggregations
index('sessions_transcode_idx').on(table.isTranscode), // For quality stats
index('sessions_platform_idx').on(table.platform), // For platform stats
// Indexes for top-content queries (movies and shows aggregation)
index('sessions_top_movies_idx').on(table.mediaType, table.mediaTitle, table.year), // For top movies GROUP BY
index('sessions_top_shows_idx').on(table.mediaType, table.grandparentTitle), // For top shows GROUP BY series
// Index for stale session detection (active sessions that haven't been seen recently)
index('sessions_stale_detection_idx').on(table.lastSeenAt, table.stoppedAt),
]
);
// Sharing detection rules
export const rules = pgTable(
'rules',
{
id: uuid('id').primaryKey().defaultRandom(),
name: varchar('name', { length: 100 }).notNull(),
type: varchar('type', { length: 50 }).notNull().$type<(typeof ruleTypeEnum)[number]>(),
params: jsonb('params').notNull().$type<Record<string, unknown>>(),
// Nullable: null = global rule, set = specific server user
serverUserId: uuid('server_user_id').references(() => serverUsers.id, { onDelete: 'cascade' }),
isActive: boolean('is_active').notNull().default(true),
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [
index('rules_active_idx').on(table.isActive),
index('rules_server_user_id_idx').on(table.serverUserId),
]
);
// Rule violations
export const violations = pgTable(
'violations',
{
id: uuid('id').primaryKey().defaultRandom(),
ruleId: uuid('rule_id')
.notNull()
.references(() => rules.id, { onDelete: 'cascade' }),
// Links to server_users for per-server tracking
serverUserId: uuid('server_user_id')
.notNull()
.references(() => serverUsers.id, { onDelete: 'cascade' }),
sessionId: uuid('session_id')
.notNull()
.references(() => sessions.id, { onDelete: 'cascade' }),
severity: varchar('severity', { length: 20 })
.notNull()
.$type<(typeof violationSeverityEnum)[number]>(),
data: jsonb('data').notNull().$type<Record<string, unknown>>(),
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
acknowledgedAt: timestamp('acknowledged_at', { withTimezone: true }),
},
(table) => [
index('violations_server_user_id_idx').on(table.serverUserId),
index('violations_rule_id_idx').on(table.ruleId),
index('violations_created_at_idx').on(table.createdAt),
]
);
// Mobile pairing tokens (one-time use, expire after 15 minutes)
export const mobileTokens = pgTable('mobile_tokens', {
id: uuid('id').primaryKey().defaultRandom(),
tokenHash: varchar('token_hash', { length: 64 }).notNull().unique(), // SHA-256 of trr_mob_xxx token
expiresAt: timestamp('expires_at', { withTimezone: true }).notNull(),
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
createdBy: uuid('created_by').references(() => users.id, { onDelete: 'cascade' }),
usedAt: timestamp('used_at', { withTimezone: true }), // Set when token is used, null = unused
});
// Mobile sessions (paired devices)
export const mobileSessions = pgTable(
'mobile_sessions',
{
id: uuid('id').primaryKey().defaultRandom(),
// Link to user identity for multi-user support
userId: uuid('user_id')
.notNull()
.references(() => users.id, { onDelete: 'cascade' }),
refreshTokenHash: varchar('refresh_token_hash', { length: 64 }).notNull().unique(), // SHA-256
deviceName: varchar('device_name', { length: 100 }).notNull(),
deviceId: varchar('device_id', { length: 100 }).notNull(),
platform: varchar('platform', { length: 20 }).notNull().$type<'ios' | 'android'>(),
expoPushToken: varchar('expo_push_token', { length: 255 }), // For push notifications
deviceSecret: varchar('device_secret', { length: 64 }), // For push payload encryption (base64)
lastSeenAt: timestamp('last_seen_at', { withTimezone: true }).notNull().defaultNow(),
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [
index('mobile_sessions_user_idx').on(table.userId),
index('mobile_sessions_device_id_idx').on(table.deviceId),
index('mobile_sessions_refresh_token_idx').on(table.refreshTokenHash),
index('mobile_sessions_expo_push_token_idx').on(table.expoPushToken),
]
);
// Notification preferences per mobile device
export const notificationPreferences = pgTable(
'notification_preferences',
{
id: uuid('id').primaryKey().defaultRandom(),
mobileSessionId: uuid('mobile_session_id')
.notNull()
.unique()
.references(() => mobileSessions.id, { onDelete: 'cascade' }),
// Global toggles
pushEnabled: boolean('push_enabled').notNull().default(true),
// Event type toggles
onViolationDetected: boolean('on_violation_detected').notNull().default(true),
onStreamStarted: boolean('on_stream_started').notNull().default(false),
onStreamStopped: boolean('on_stream_stopped').notNull().default(false),
onConcurrentStreams: boolean('on_concurrent_streams').notNull().default(true),
onNewDevice: boolean('on_new_device').notNull().default(true),
onTrustScoreChanged: boolean('on_trust_score_changed').notNull().default(false),
onServerDown: boolean('on_server_down').notNull().default(true),
onServerUp: boolean('on_server_up').notNull().default(true),
// Severity filtering (violations only)
violationMinSeverity: integer('violation_min_severity').notNull().default(1), // 1=low, 2=warning, 3=high
violationRuleTypes: text('violation_rule_types').array().default([]), // Empty = all types
// Rate limiting
maxPerMinute: integer('max_per_minute').notNull().default(10),
maxPerHour: integer('max_per_hour').notNull().default(60),
// Quiet hours
quietHoursEnabled: boolean('quiet_hours_enabled').notNull().default(false),
quietHoursStart: varchar('quiet_hours_start', { length: 5 }), // HH:MM format
quietHoursEnd: varchar('quiet_hours_end', { length: 5 }), // HH:MM format
quietHoursTimezone: varchar('quiet_hours_timezone', { length: 50 }).default('UTC'),
quietHoursOverrideCritical: boolean('quiet_hours_override_critical').notNull().default(true),
// Timestamps
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [
index('notification_prefs_mobile_session_idx').on(table.mobileSessionId),
// Validate quiet hours format: HH:MM where HH is 00-23 and MM is 00-59
check(
'quiet_hours_start_format',
sql`${table.quietHoursStart} IS NULL OR ${table.quietHoursStart} ~ '^([01][0-9]|2[0-3]):[0-5][0-9]$'`
),
check(
'quiet_hours_end_format',
sql`${table.quietHoursEnd} IS NULL OR ${table.quietHoursEnd} ~ '^([01][0-9]|2[0-3]):[0-5][0-9]$'`
),
]
);
// Notification event type enum
export const notificationEventTypeEnum = [
'violation_detected',
'stream_started',
'stream_stopped',
'concurrent_streams',
'new_device',
'trust_score_changed',
'server_down',
'server_up',
] as const;
// Notification channel routing configuration
// Controls which channels receive which event types (web admin configurable)
export const notificationChannelRouting = pgTable(
'notification_channel_routing',
{
id: uuid('id').primaryKey().defaultRandom(),
eventType: varchar('event_type', { length: 50 })
.notNull()
.unique()
.$type<(typeof notificationEventTypeEnum)[number]>(),
// Channel toggles
discordEnabled: boolean('discord_enabled').notNull().default(true),
webhookEnabled: boolean('webhook_enabled').notNull().default(true),
pushEnabled: boolean('push_enabled').notNull().default(true),
webToastEnabled: boolean('web_toast_enabled').notNull().default(true),
// Timestamps
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [index('notification_channel_routing_event_type_idx').on(table.eventType)]
);
// Termination trigger type enum
export const terminationTriggerEnum = ['manual', 'rule'] as const;
// Stream termination audit log
export const terminationLogs = pgTable(
'termination_logs',
{
id: uuid('id').primaryKey().defaultRandom(),
// What was terminated
// Note: No FK constraint because sessions is a TimescaleDB hypertable
// (hypertables don't support foreign key references to their primary key)
// The relationship is maintained via Drizzle ORM relations
sessionId: uuid('session_id').notNull(),
serverId: uuid('server_id')
.notNull()
.references(() => servers.id, { onDelete: 'cascade' }),
// The user whose stream was terminated
serverUserId: uuid('server_user_id')
.notNull()
.references(() => serverUsers.id, { onDelete: 'cascade' }),
// How it was triggered
trigger: varchar('trigger', { length: 20 })
.notNull()
.$type<(typeof terminationTriggerEnum)[number]>(),
// Who triggered it (for manual) - nullable for rule-triggered
triggeredByUserId: uuid('triggered_by_user_id').references(() => users.id, {
onDelete: 'set null',
}),
// What rule triggered it (for rule-triggered) - nullable for manual
ruleId: uuid('rule_id').references(() => rules.id, { onDelete: 'set null' }),
violationId: uuid('violation_id').references(() => violations.id, { onDelete: 'set null' }),
// Message shown to user (Plex only)
reason: text('reason'),
// Result
success: boolean('success').notNull(),
errorMessage: text('error_message'), // If success=false
// Timestamp
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
},
(table) => [
index('termination_logs_session_idx').on(table.sessionId),
index('termination_logs_server_user_idx').on(table.serverUserId),
index('termination_logs_triggered_by_idx').on(table.triggeredByUserId),
index('termination_logs_rule_idx').on(table.ruleId),
index('termination_logs_created_at_idx').on(table.createdAt),
]
);
// Unit system enum for display preferences
export const unitSystemEnum = ['metric', 'imperial'] as const;
// Application settings (single row)
export const settings = pgTable('settings', {
id: integer('id').primaryKey().default(1),
allowGuestAccess: boolean('allow_guest_access').notNull().default(false),
// Display preferences
unitSystem: varchar('unit_system', { length: 20 })
.notNull()
.$type<(typeof unitSystemEnum)[number]>()
.default('metric'),
discordWebhookUrl: text('discord_webhook_url'),
customWebhookUrl: text('custom_webhook_url'),
webhookFormat: text('webhook_format').$type<'json' | 'ntfy' | 'apprise'>(), // Format for custom webhook payloads
ntfyTopic: text('ntfy_topic'), // Topic for ntfy notifications (required when webhookFormat is 'ntfy')
// Poller settings
pollerEnabled: boolean('poller_enabled').notNull().default(true),
pollerIntervalMs: integer('poller_interval_ms').notNull().default(15000),
// Tautulli integration
tautulliUrl: text('tautulli_url'),
tautulliApiKey: text('tautulli_api_key'), // Encrypted
// Network/access settings for self-hosted deployments
externalUrl: text('external_url'), // Public URL for mobile/external access (e.g., https://tracearr.example.com)
basePath: varchar('base_path', { length: 100 }).notNull().default(''), // For subfolder proxies (e.g., /tracearr)
trustProxy: boolean('trust_proxy').notNull().default(false), // Trust X-Forwarded-* headers from reverse proxy
// Mobile access
mobileEnabled: boolean('mobile_enabled').notNull().default(false),
// Authentication settings
primaryAuthMethod: varchar('primary_auth_method', { length: 20 })
.$type<'jellyfin' | 'local'>()
.notNull()
.default('local'), // Default to local auth
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
});
// ============================================================================
// Relations
// ============================================================================
export const serversRelations = relations(servers, ({ many }) => ({
serverUsers: many(serverUsers),
sessions: many(sessions),
}));
export const usersRelations = relations(users, ({ many }) => ({
serverUsers: many(serverUsers),
mobileSessions: many(mobileSessions),
mobileTokens: many(mobileTokens),
}));
export const serverUsersRelations = relations(serverUsers, ({ one, many }) => ({
user: one(users, {
fields: [serverUsers.userId],
references: [users.id],
}),
server: one(servers, {
fields: [serverUsers.serverId],
references: [servers.id],
}),
sessions: many(sessions),
rules: many(rules),
violations: many(violations),
}));
export const sessionsRelations = relations(sessions, ({ one, many }) => ({
server: one(servers, {
fields: [sessions.serverId],
references: [servers.id],
}),
serverUser: one(serverUsers, {
fields: [sessions.serverUserId],
references: [serverUsers.id],
}),
violations: many(violations),
}));
export const rulesRelations = relations(rules, ({ one, many }) => ({
serverUser: one(serverUsers, {
fields: [rules.serverUserId],
references: [serverUsers.id],
}),
violations: many(violations),
}));
export const violationsRelations = relations(violations, ({ one }) => ({
rule: one(rules, {
fields: [violations.ruleId],
references: [rules.id],
}),
serverUser: one(serverUsers, {
fields: [violations.serverUserId],
references: [serverUsers.id],
}),
session: one(sessions, {
fields: [violations.sessionId],
references: [sessions.id],
}),
}));
export const mobileSessionsRelations = relations(mobileSessions, ({ one }) => ({
user: one(users, {
fields: [mobileSessions.userId],
references: [users.id],
}),
notificationPreferences: one(notificationPreferences, {
fields: [mobileSessions.id],
references: [notificationPreferences.mobileSessionId],
}),
}));
export const notificationPreferencesRelations = relations(notificationPreferences, ({ one }) => ({
mobileSession: one(mobileSessions, {
fields: [notificationPreferences.mobileSessionId],
references: [mobileSessions.id],
}),
}));
export const mobileTokensRelations = relations(mobileTokens, ({ one }) => ({
createdByUser: one(users, {
fields: [mobileTokens.createdBy],
references: [users.id],
}),
}));
export const terminationLogsRelations = relations(terminationLogs, ({ one }) => ({
session: one(sessions, {
fields: [terminationLogs.sessionId],
references: [sessions.id],
}),
server: one(servers, {
fields: [terminationLogs.serverId],
references: [servers.id],
}),
serverUser: one(serverUsers, {
fields: [terminationLogs.serverUserId],
references: [serverUsers.id],
}),
triggeredByUser: one(users, {
fields: [terminationLogs.triggeredByUserId],
references: [users.id],
}),
rule: one(rules, {
fields: [terminationLogs.ruleId],
references: [rules.id],
}),
violation: one(violations, {
fields: [terminationLogs.violationId],
references: [violations.id],
}),
}));

View File

@@ -0,0 +1,649 @@
/**
* TimescaleDB initialization and setup
*
* This module ensures TimescaleDB features are properly configured for the sessions table.
* It runs on every server startup and is idempotent - safe to run multiple times.
*/
import { db } from './client.js';
import { sql } from 'drizzle-orm';
export interface TimescaleStatus {
extensionInstalled: boolean;
sessionsIsHypertable: boolean;
compressionEnabled: boolean;
continuousAggregates: string[];
chunkCount: number;
}
/**
* Check if TimescaleDB extension is available
*/
async function isTimescaleInstalled(): Promise<boolean> {
try {
const result = await db.execute(sql`
SELECT EXISTS(
SELECT 1 FROM pg_extension WHERE extname = 'timescaledb'
) as installed
`);
return (result.rows[0] as { installed: boolean })?.installed ?? false;
} catch {
return false;
}
}
/**
* Check if sessions table is already a hypertable
*/
async function isSessionsHypertable(): Promise<boolean> {
try {
const result = await db.execute(sql`
SELECT EXISTS(
SELECT 1 FROM timescaledb_information.hypertables
WHERE hypertable_name = 'sessions'
) as is_hypertable
`);
return (result.rows[0] as { is_hypertable: boolean })?.is_hypertable ?? false;
} catch {
// If timescaledb_information doesn't exist, extension isn't installed
return false;
}
}
/**
* Get list of existing continuous aggregates
*/
async function getContinuousAggregates(): Promise<string[]> {
try {
const result = await db.execute(sql`
SELECT view_name
FROM timescaledb_information.continuous_aggregates
WHERE hypertable_name = 'sessions'
`);
return (result.rows as { view_name: string }[]).map((r) => r.view_name);
} catch {
return [];
}
}
/**
* Check if compression is enabled on sessions
*/
async function isCompressionEnabled(): Promise<boolean> {
try {
const result = await db.execute(sql`
SELECT compression_enabled
FROM timescaledb_information.hypertables
WHERE hypertable_name = 'sessions'
`);
return (result.rows[0] as { compression_enabled: boolean })?.compression_enabled ?? false;
} catch {
return false;
}
}
/**
* Get chunk count for sessions hypertable
*/
async function getChunkCount(): Promise<number> {
try {
const result = await db.execute(sql`
SELECT count(*)::int as count
FROM timescaledb_information.chunks
WHERE hypertable_name = 'sessions'
`);
return (result.rows[0] as { count: number })?.count ?? 0;
} catch {
return 0;
}
}
/**
* Convert sessions table to hypertable
* This is idempotent - if_not_exists ensures it won't fail if already a hypertable
*/
async function convertToHypertable(): Promise<void> {
// First, we need to handle the primary key change
// TimescaleDB requires the partition column (started_at) in the primary key
// Check if we need to modify the primary key
const pkResult = await db.execute(sql`
SELECT constraint_name
FROM information_schema.table_constraints
WHERE table_name = 'sessions'
AND constraint_type = 'PRIMARY KEY'
`);
const pkName = (pkResult.rows[0] as { constraint_name: string })?.constraint_name;
// Check if started_at is already in the primary key
const pkColsResult = await db.execute(sql`
SELECT column_name
FROM information_schema.key_column_usage
WHERE table_name = 'sessions'
AND constraint_name = ${pkName}
`);
const pkColumns = (pkColsResult.rows as { column_name: string }[]).map((r) => r.column_name);
if (!pkColumns.includes('started_at')) {
// Need to modify primary key for hypertable conversion
// Drop FK constraint from violations if it exists
await db.execute(sql`
ALTER TABLE "violations" DROP CONSTRAINT IF EXISTS "violations_session_id_sessions_id_fk"
`);
// Drop existing primary key
if (pkName) {
await db.execute(sql.raw(`ALTER TABLE "sessions" DROP CONSTRAINT IF EXISTS "${pkName}"`));
}
// Add composite primary key
await db.execute(sql`
ALTER TABLE "sessions" ADD PRIMARY KEY ("id", "started_at")
`);
// Add index for violations session lookup (since we can't have FK to hypertable)
await db.execute(sql`
CREATE INDEX IF NOT EXISTS "violations_session_lookup_idx" ON "violations" ("session_id")
`);
}
// Convert to hypertable
await db.execute(sql`
SELECT create_hypertable('sessions', 'started_at',
chunk_time_interval => INTERVAL '7 days',
migrate_data => true,
if_not_exists => true
)
`);
// Create expression indexes for COALESCE(reference_id, id) pattern
// This pattern is used throughout the codebase for play grouping
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_play_id
ON sessions ((COALESCE(reference_id, id)))
`);
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_time_play_id
ON sessions (started_at DESC, (COALESCE(reference_id, id)))
`);
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_user_play_id
ON sessions (server_user_id, (COALESCE(reference_id, id)))
`);
}
/**
* Create partial indexes for common filtered queries
* These reduce scan size by excluding irrelevant rows
*/
async function createPartialIndexes(): Promise<void> {
// Partial index for geo queries (excludes NULL rows - ~20% savings)
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_geo_partial
ON sessions (geo_lat, geo_lon, started_at DESC)
WHERE geo_lat IS NOT NULL AND geo_lon IS NOT NULL
`);
// Partial index for unacknowledged violations by user (hot path for user-specific alerts)
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_violations_unacked_partial
ON violations (server_user_id, created_at DESC)
WHERE acknowledged_at IS NULL
`);
// Partial index for unacknowledged violations list (hot path for main violations list)
// This index is optimized for the common query: ORDER BY created_at DESC WHERE acknowledged_at IS NULL
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_violations_unacked_list
ON violations (created_at DESC)
WHERE acknowledged_at IS NULL
`);
// Partial index for active/playing sessions
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_active_partial
ON sessions (server_id, server_user_id, started_at DESC)
WHERE state = 'playing'
`);
// Partial index for transcoded sessions (quality analysis)
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_transcode_partial
ON sessions (started_at DESC, quality, bitrate)
WHERE is_transcode = true
`);
}
/**
* Create optimized indexes for top content queries
* Time-prefixed indexes enable efficient time-filtered aggregations
*/
async function createContentIndexes(): Promise<void> {
// Time-prefixed index for media title queries
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_media_time
ON sessions (started_at DESC, media_type, media_title)
`);
// Time-prefixed index for show/episode queries (excludes NULLs)
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_show_time
ON sessions (started_at DESC, grandparent_title, season_number, episode_number)
WHERE grandparent_title IS NOT NULL
`);
// Covering index for top content query (includes frequently accessed columns)
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_top_content_covering
ON sessions (started_at DESC, media_title, media_type)
INCLUDE (duration_ms, server_user_id)
`);
// Device tracking index for device velocity rule
await db.execute(sql`
CREATE INDEX IF NOT EXISTS idx_sessions_device_tracking
ON sessions (server_user_id, started_at DESC, device_id, ip_address)
`);
}
/**
* Check if TimescaleDB Toolkit is installed
*/
async function isToolkitInstalled(): Promise<boolean> {
try {
const result = await db.execute(sql`
SELECT EXISTS(
SELECT 1 FROM pg_extension WHERE extname = 'timescaledb_toolkit'
) as installed
`);
return (result.rows[0] as { installed: boolean })?.installed ?? false;
} catch {
return false;
}
}
/**
* Check if TimescaleDB Toolkit is available to be installed on the system
*/
async function isToolkitAvailableOnSystem(): Promise<boolean> {
try {
const result = await db.execute(sql`
SELECT EXISTS(
SELECT 1 FROM pg_available_extensions WHERE name = 'timescaledb_toolkit'
) as available
`);
return (result.rows[0] as { available: boolean })?.available ?? false;
} catch {
return false;
}
}
/**
* Create continuous aggregates for dashboard performance
*
* Uses HyperLogLog from TimescaleDB Toolkit for approximate distinct counts
* (99.5% accuracy) since TimescaleDB doesn't support COUNT(DISTINCT) in
* continuous aggregates. Falls back to COUNT(*) if Toolkit unavailable.
*/
async function createContinuousAggregates(): Promise<void> {
const hasToolkit = await isToolkitInstalled();
// Drop old unused aggregates
// daily_plays_by_platform: platform stats use prepared statement instead
// daily_play_patterns/hourly_play_patterns: never wired up, missing server_id for multi-server filtering
await db.execute(sql`DROP MATERIALIZED VIEW IF EXISTS daily_plays_by_platform CASCADE`);
await db.execute(sql`DROP MATERIALIZED VIEW IF EXISTS daily_play_patterns CASCADE`);
await db.execute(sql`DROP MATERIALIZED VIEW IF EXISTS hourly_play_patterns CASCADE`);
if (hasToolkit) {
// Use HyperLogLog for accurate distinct play counting
// hyperloglog(32768, ...) gives ~0.4% error rate
// Daily plays by user with HyperLogLog
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS daily_plays_by_user
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
SELECT
time_bucket('1 day', started_at) AS day,
server_user_id,
hyperloglog(32768, COALESCE(reference_id, id)) AS plays_hll,
SUM(COALESCE(duration_ms, 0)) AS total_duration_ms
FROM sessions
GROUP BY day, server_user_id
WITH NO DATA
`);
// Daily plays by server with HyperLogLog
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS daily_plays_by_server
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
SELECT
time_bucket('1 day', started_at) AS day,
server_id,
hyperloglog(32768, COALESCE(reference_id, id)) AS plays_hll,
SUM(COALESCE(duration_ms, 0)) AS total_duration_ms
FROM sessions
GROUP BY day, server_id
WITH NO DATA
`);
// Daily stats summary (main dashboard aggregate) with HyperLogLog
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS daily_stats_summary
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
SELECT
time_bucket('1 day', started_at) AS day,
hyperloglog(32768, COALESCE(reference_id, id)) AS plays_hll,
hyperloglog(32768, server_user_id) AS users_hll,
hyperloglog(32768, server_id) AS servers_hll,
SUM(COALESCE(duration_ms, 0)) AS total_duration_ms,
AVG(COALESCE(duration_ms, 0))::bigint AS avg_duration_ms
FROM sessions
GROUP BY day
WITH NO DATA
`);
// Hourly concurrent streams (used by /concurrent endpoint)
// Note: This uses COUNT(*) since concurrent streams isn't about unique plays
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS hourly_concurrent_streams
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
SELECT
time_bucket('1 hour', started_at) AS hour,
server_id,
COUNT(*) AS stream_count
FROM sessions
WHERE state IN ('playing', 'paused')
GROUP BY hour, server_id
WITH NO DATA
`);
} else {
// Fallback: Standard aggregates without HyperLogLog
// Note: These use COUNT(*) which overcounts resumed sessions
console.warn('TimescaleDB Toolkit not available - using COUNT(*) aggregates');
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS daily_plays_by_user
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 day', started_at) AS day,
server_user_id,
COUNT(*) AS play_count,
SUM(COALESCE(duration_ms, 0)) AS total_duration_ms
FROM sessions
GROUP BY day, server_user_id
WITH NO DATA
`);
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS daily_plays_by_server
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 day', started_at) AS day,
server_id,
COUNT(*) AS play_count,
SUM(COALESCE(duration_ms, 0)) AS total_duration_ms
FROM sessions
GROUP BY day, server_id
WITH NO DATA
`);
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS daily_stats_summary
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 day', started_at) AS day,
COUNT(DISTINCT COALESCE(reference_id, id)) AS play_count,
COUNT(DISTINCT server_user_id) AS user_count,
COUNT(DISTINCT server_id) AS server_count,
SUM(COALESCE(duration_ms, 0)) AS total_duration_ms,
AVG(COALESCE(duration_ms, 0))::bigint AS avg_duration_ms
FROM sessions
GROUP BY day
WITH NO DATA
`);
// Hourly concurrent streams (used by /concurrent endpoint)
await db.execute(sql`
CREATE MATERIALIZED VIEW IF NOT EXISTS hourly_concurrent_streams
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 hour', started_at) AS hour,
server_id,
COUNT(*) AS stream_count
FROM sessions
WHERE state IN ('playing', 'paused')
GROUP BY hour, server_id
WITH NO DATA
`);
}
}
/**
* Set up refresh policies for continuous aggregates
* Refreshes every 5 minutes with 1 hour lag for real-time dashboard
*/
async function setupRefreshPolicies(): Promise<void> {
await db.execute(sql`
SELECT add_continuous_aggregate_policy('daily_plays_by_user',
start_offset => INTERVAL '3 days',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '5 minutes',
if_not_exists => true
)
`);
await db.execute(sql`
SELECT add_continuous_aggregate_policy('daily_plays_by_server',
start_offset => INTERVAL '3 days',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '5 minutes',
if_not_exists => true
)
`);
await db.execute(sql`
SELECT add_continuous_aggregate_policy('daily_stats_summary',
start_offset => INTERVAL '3 days',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '5 minutes',
if_not_exists => true
)
`);
await db.execute(sql`
SELECT add_continuous_aggregate_policy('hourly_concurrent_streams',
start_offset => INTERVAL '1 day',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '5 minutes',
if_not_exists => true
)
`);
}
/**
* Enable compression on sessions hypertable
*/
async function enableCompression(): Promise<void> {
// Enable compression settings
await db.execute(sql`
ALTER TABLE sessions SET (
timescaledb.compress,
timescaledb.compress_segmentby = 'server_user_id, server_id'
)
`);
// Add compression policy (compress chunks older than 7 days)
await db.execute(sql`
SELECT add_compression_policy('sessions', INTERVAL '7 days', if_not_exists => true)
`);
}
/**
* Manually refresh all continuous aggregates
* Call this after bulk data imports (e.g., Tautulli import) to make the data immediately available
*/
export async function refreshAggregates(): Promise<void> {
const hasExtension = await isTimescaleInstalled();
if (!hasExtension) return;
const aggregates = await getContinuousAggregates();
for (const aggregate of aggregates) {
try {
// Refresh the entire aggregate (no time bounds = full refresh)
await db.execute(
sql.raw(`CALL refresh_continuous_aggregate('${aggregate}', NULL, NULL)`)
);
} catch (err) {
// Log but don't fail - aggregate might not have data yet
console.warn(`Failed to refresh aggregate ${aggregate}:`, err);
}
}
}
/**
* Get current TimescaleDB status
*/
export async function getTimescaleStatus(): Promise<TimescaleStatus> {
const extensionInstalled = await isTimescaleInstalled();
if (!extensionInstalled) {
return {
extensionInstalled: false,
sessionsIsHypertable: false,
compressionEnabled: false,
continuousAggregates: [],
chunkCount: 0,
};
}
return {
extensionInstalled: true,
sessionsIsHypertable: await isSessionsHypertable(),
compressionEnabled: await isCompressionEnabled(),
continuousAggregates: await getContinuousAggregates(),
chunkCount: await getChunkCount(),
};
}
/**
* Initialize TimescaleDB for the sessions table
*
* This function is idempotent and safe to run on:
* - Fresh installs (sets everything up)
* - Existing installs with TimescaleDB already configured (no-op)
* - Partially configured installs (completes setup)
* - Installs without TimescaleDB extension (graceful skip)
*/
export async function initTimescaleDB(): Promise<{
success: boolean;
status: TimescaleStatus;
actions: string[];
}> {
const actions: string[] = [];
// Check if TimescaleDB extension is available
const hasExtension = await isTimescaleInstalled();
if (!hasExtension) {
return {
success: true, // Not a failure - just no TimescaleDB
status: {
extensionInstalled: false,
sessionsIsHypertable: false,
compressionEnabled: false,
continuousAggregates: [],
chunkCount: 0,
},
actions: ['TimescaleDB extension not installed - skipping setup'],
};
}
actions.push('TimescaleDB extension found');
// Enable TimescaleDB Toolkit for HyperLogLog (approximate distinct counts)
// Check if available first to avoid noisy PostgreSQL errors in logs
const toolkitAvailable = await isToolkitAvailableOnSystem();
if (toolkitAvailable) {
const toolkitInstalled = await isToolkitInstalled();
if (!toolkitInstalled) {
await db.execute(sql`CREATE EXTENSION IF NOT EXISTS timescaledb_toolkit`);
actions.push('TimescaleDB Toolkit extension enabled');
} else {
actions.push('TimescaleDB Toolkit extension already enabled');
}
} else {
actions.push('TimescaleDB Toolkit not available (optional - using standard aggregates)');
}
// Check if sessions is already a hypertable
const isHypertable = await isSessionsHypertable();
if (!isHypertable) {
await convertToHypertable();
actions.push('Converted sessions table to hypertable');
} else {
actions.push('Sessions already a hypertable');
}
// Check and create continuous aggregates
const existingAggregates = await getContinuousAggregates();
const expectedAggregates = [
'daily_plays_by_user',
'daily_plays_by_server',
'daily_stats_summary',
'hourly_concurrent_streams',
];
const missingAggregates = expectedAggregates.filter(
(agg) => !existingAggregates.includes(agg)
);
if (missingAggregates.length > 0) {
await createContinuousAggregates();
await setupRefreshPolicies();
actions.push(`Created continuous aggregates: ${missingAggregates.join(', ')}`);
} else {
actions.push('All continuous aggregates exist');
}
// Check and enable compression
const hasCompression = await isCompressionEnabled();
if (!hasCompression) {
await enableCompression();
actions.push('Enabled compression on sessions');
} else {
actions.push('Compression already enabled');
}
// Create partial indexes for optimized filtered queries
try {
await createPartialIndexes();
actions.push('Created partial indexes (geo, violations, active, transcode)');
} catch (err) {
console.warn('Failed to create some partial indexes:', err);
actions.push('Partial indexes: some may already exist');
}
// Create content and device tracking indexes
try {
await createContentIndexes();
actions.push('Created content and device tracking indexes');
} catch (err) {
console.warn('Failed to create some content indexes:', err);
actions.push('Content indexes: some may already exist');
}
// Get final status
const status = await getTimescaleStatus();
return {
success: true,
status,
actions,
};
}