From 80368a811bcd1ebe4be9b241f6adf32ae85688b7 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 12 Mar 2026 07:02:05 +0800 Subject: [PATCH 1/9] sql script and related changes --- .../graphile-settings/src/upload-resolver.ts | 203 +++++- graphql/server/src/middleware/upload.ts | 12 +- migrations/object_store.sql | 607 ++++++++++++++++++ pnpm-lock.yaml | 423 ++++++++++++ uploads/s3-streamer/package.json | 1 + uploads/s3-streamer/src/index.ts | 1 + uploads/s3-streamer/src/storage-provider.ts | 198 ++++++ 7 files changed, 1417 insertions(+), 28 deletions(-) create mode 100644 migrations/object_store.sql create mode 100644 uploads/s3-streamer/src/storage-provider.ts diff --git a/graphile/graphile-settings/src/upload-resolver.ts b/graphile/graphile-settings/src/upload-resolver.ts index 90df46a61..c744ee11f 100644 --- a/graphile/graphile-settings/src/upload-resolver.ts +++ b/graphile/graphile-settings/src/upload-resolver.ts @@ -7,6 +7,16 @@ * Lazily initializes the S3 streamer on first upload to avoid requiring * env vars at module load time. * + * V2 mode (UPLOAD_V2_ENABLED=true): + * - Key format: {database_id}/{bucket_key}/{uuid}_origin + * - INSERT into object_store_public.files after S3 upload + * - Returns { key, url, mime, filename } for image/upload types + * + * Legacy mode (UPLOAD_V2_ENABLED=false, default): + * - Key format: {random24hex}-{sanitized-filename} + * - No files table INSERT + * - Returns { url, mime, filename } for image/upload types + * * ENV VARS: * BUCKET_PROVIDER - 'minio' | 's3' (default: 'minio') * BUCKET_NAME - bucket name (default: 'test-bucket') @@ -14,13 +24,17 @@ * AWS_ACCESS_KEY - access key (default: 'minioadmin') * AWS_SECRET_KEY - secret key (default: 'minioadmin') * MINIO_ENDPOINT - MinIO endpoint (default: 'http://localhost:9000') + * UPLOAD_V2_ENABLED - enable v2 upload with files index (default: 'false') */ import Streamer from '@constructive-io/s3-streamer'; +import { S3StorageProvider } from '@constructive-io/s3-streamer'; +import type { StorageProvider } from '@constructive-io/s3-streamer'; import uploadNames from '@constructive-io/upload-names'; import { getEnvOptions } from '@constructive-io/graphql-env'; import { Logger } from '@pgpmjs/logger'; -import { randomBytes } from 'crypto'; +import { randomBytes, randomUUID } from 'crypto'; +import { Pool } from 'pg'; import type { Readable } from 'stream'; import type { FileUpload, @@ -32,73 +46,178 @@ const log = new Logger('upload-resolver'); const DEFAULT_IMAGE_MIME_TYPES = ['image/jpeg', 'image/png', 'image/svg+xml']; let streamer: Streamer | null = null; +let storageProvider: StorageProvider | null = null; let bucketName: string; +let pgPool: Pool | null = null; -function getStreamer(): Streamer { - if (streamer) return streamer; +const isV2Enabled = (): boolean => + process.env.UPLOAD_V2_ENABLED === 'true' || process.env.UPLOAD_V2_ENABLED === '1'; +function getCdnConfig() { const opts = getEnvOptions(); const cdn = opts.cdn || {}; + return { + provider: (cdn.provider || 'minio') as 'minio' | 's3', + bucketName: cdn.bucketName || 'test-bucket', + awsRegion: cdn.awsRegion || 'us-east-1', + awsAccessKey: cdn.awsAccessKey || 'minioadmin', + awsSecretKey: cdn.awsSecretKey || 'minioadmin', + minioEndpoint: cdn.minioEndpoint || 'http://localhost:9000', + }; +} - const provider = cdn.provider || 'minio'; - bucketName = cdn.bucketName || 'test-bucket'; - const awsRegion = cdn.awsRegion || 'us-east-1'; - const awsAccessKey = cdn.awsAccessKey || 'minioadmin'; - const awsSecretKey = cdn.awsSecretKey || 'minioadmin'; - const minioEndpoint = cdn.minioEndpoint || 'http://localhost:9000'; +function getStreamer(): Streamer { + if (streamer) return streamer; + + const cdn = getCdnConfig(); + bucketName = cdn.bucketName; if (process.env.NODE_ENV === 'production') { - if (!cdn.awsAccessKey || !cdn.awsSecretKey) { + if (cdn.awsAccessKey === 'minioadmin' || cdn.awsSecretKey === 'minioadmin') { log.warn('[upload-resolver] WARNING: Using default credentials in production.'); } } log.info( - `[upload-resolver] Initializing: provider=${provider} bucket=${bucketName}`, + `[upload-resolver] Initializing: provider=${cdn.provider} bucket=${bucketName}`, ); streamer = new Streamer({ defaultBucket: bucketName, - awsRegion, - awsSecretKey, - awsAccessKey, - minioEndpoint, - provider, + awsRegion: cdn.awsRegion, + awsSecretKey: cdn.awsSecretKey, + awsAccessKey: cdn.awsAccessKey, + minioEndpoint: cdn.minioEndpoint, + provider: cdn.provider, }); return streamer; } +function getStorageProvider(): StorageProvider { + if (storageProvider) return storageProvider; + + const cdn = getCdnConfig(); + bucketName = cdn.bucketName; + + storageProvider = new S3StorageProvider({ + bucket: cdn.bucketName, + awsRegion: cdn.awsRegion, + awsAccessKey: cdn.awsAccessKey, + awsSecretKey: cdn.awsSecretKey, + minioEndpoint: cdn.minioEndpoint, + provider: cdn.provider, + }); + + return storageProvider; +} + +function getPgPool(): Pool { + if (pgPool) return pgPool; + pgPool = new Pool({ + host: process.env.PGHOST || 'localhost', + port: Number(process.env.PGPORT || 5432), + database: process.env.PGDATABASE || 'constructive', + user: process.env.PGUSER || 'postgres', + password: process.env.PGPASSWORD || 'password', + max: 3, + }); + return pgPool; +} + /** - * Generates a randomized storage key from a filename. - * Format: {random10chars}-{sanitized-filename} + * Generates a randomized storage key from a filename (legacy format). + * Format: {random24hex}-{sanitized-filename} */ -function generateKey(filename: string): string { +function generateLegacyKey(filename: string): string { const rand = randomBytes(12).toString('hex'); return `${rand}-${uploadNames(filename)}`; } +/** + * Generates a v2 storage key. + * Format: {database_id}/{bucket_key}/{uuid}_origin + */ +function generateV2Key(databaseId: string, bucketKey: string): { key: string; fileId: string } { + const fileId = randomUUID(); + return { key: `${databaseId}/${bucketKey}/${fileId}_origin`, fileId }; +} + +/** + * INSERTs a row into object_store_public.files. + * Fires the AFTER INSERT trigger which enqueues a process-image job. + */ +async function insertFileRecord( + fileId: string, + databaseId: string, + bucketKey: string, + key: string, + etag: string, + createdBy: string | null, +): Promise { + const pool = getPgPool(); + await pool.query( + `INSERT INTO object_store_public.files + (id, database_id, bucket_key, key, etag, created_by) + VALUES ($1, $2, $3, $4, $5, $6)`, + [fileId, Number(databaseId), bucketKey, key, etag, createdBy], + ); +} + +/** + * Extracts databaseId and userId from the GraphQL context. + * In PostGraphile, context contains the Express request. + */ +function extractContextInfo(context: any): { databaseId: string | null; userId: string | null } { + // PostGraphile v5 stores the request on context + const req = context?.req || context?.request; + const databaseId = req?.api?.databaseId || req?.databaseId || null; + const userId = req?.token?.user_id || null; + return { databaseId, userId }; +} + /** * Streams a file to S3/MinIO storage and returns the URL and metadata. * * Reusable by both the GraphQL upload resolver and REST /upload endpoint. + * + * When UPLOAD_V2_ENABLED, uses the new key format and INSERTs a files row. */ export async function streamToStorage( readStream: Readable, filename: string, -): Promise<{ url: string; filename: string; mime: string }> { + opts?: { databaseId?: string; userId?: string; bucketKey?: string }, +): Promise<{ url: string; filename: string; mime: string; key?: string }> { + if (isV2Enabled() && opts?.databaseId) { + const storage = getStorageProvider(); + const bucketKey = opts.bucketKey || 'default'; + const { key, fileId } = generateV2Key(opts.databaseId, bucketKey); + + const s3 = getStreamer(); + const detected = await s3.detectContentType({ readStream, filename }); + const contentType = detected.contentType; + + const result = await storage.upload(key, detected.stream, { contentType }); + + await insertFileRecord(fileId, opts.databaseId, bucketKey, key, result.etag, opts.userId || null); + + const url = await storage.presignGet(key, 3600); + return { key, url, filename, mime: contentType }; + } + + // Legacy path const s3 = getStreamer(); - const key = generateKey(filename); - const result = await s3.upload({ + const key = generateLegacyKey(filename); + const uploadResult = await s3.upload({ readStream, filename, key, bucket: bucketName, }); return { - url: result.upload.Location, + url: uploadResult.upload.Location, filename, - mime: result.contentType, + mime: uploadResult.contentType, }; } @@ -106,7 +225,7 @@ export async function streamToStorage( * Upload resolver that streams files to S3/MinIO. * * Returns different shapes based on the column's type hint: - * - 'image' / 'upload' → { filename, mime, url } (for jsonb domain columns) + * - 'image' / 'upload' → { key, url, mime, filename } (v2) or { url, mime, filename } (legacy) * - 'attachment' / default → url string (for text domain columns) * * MIME validation happens before persistence: content type is detected from @@ -121,7 +240,6 @@ async function uploadResolver( const { tags, type } = info.uploadPlugin; const s3 = getStreamer(); const { filename } = upload; - const key = generateKey(filename); // MIME type validation from smart tags const typ = type || tags?.type; @@ -147,6 +265,39 @@ async function uploadResolver( throw new Error('UPLOAD_MIMETYPE'); } + // V2 path: new key format + files table INSERT + if (isV2Enabled()) { + const { databaseId, userId } = extractContextInfo(_context); + + if (databaseId) { + const storage = getStorageProvider(); + const bucketKey = 'default'; + const { key, fileId } = generateV2Key(databaseId, bucketKey); + + const result = await storage.upload(key, detected.stream, { + contentType: detectedContentType, + }); + + await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, userId); + + const url = await storage.presignGet(key, 3600); + + switch (typ) { + case 'image': + case 'upload': + return { key, filename, mime: detectedContentType, url }; + case 'attachment': + default: + return url; + } + } + + log.warn('[upload-resolver] V2 enabled but no databaseId in context, falling back to legacy'); + } + + // Legacy path + const key = generateLegacyKey(filename); + const result = await s3.uploadWithContentType({ readStream: detected.stream, contentType: detectedContentType, diff --git a/graphql/server/src/middleware/upload.ts b/graphql/server/src/middleware/upload.ts index 89c513115..71ab851b1 100644 --- a/graphql/server/src/middleware/upload.ts +++ b/graphql/server/src/middleware/upload.ts @@ -266,8 +266,11 @@ export const createUploadAuthenticateMiddleware = ( * Accepts a single file via multipart/form-data, streams it to S3/MinIO, * and returns file metadata. The frontend uses this in a two-step flow: * - * 1. POST /upload -> { url, filename, mime, size } + * 1. POST /upload -> { key?, url, filename, mime, size } * 2. GraphQL mutation -> patch row with the returned metadata + * + * When UPLOAD_V2_ENABLED=true, passes databaseId and userId to streamToStorage + * so it can use the new key format and INSERT into object_store_public.files. */ export const uploadRoute: RequestHandler[] = [ parseFileWithErrors, @@ -287,13 +290,18 @@ export const uploadRoute: RequestHandler[] = [ try { const readStream = fs.createReadStream(req.file.path); - const result = await streamToStorage(readStream, req.file.originalname); + const result = await streamToStorage(readStream, req.file.originalname, { + databaseId: req.api?.databaseId, + userId: req.token.user_id, + bucketKey: 'default', + }); uploadLog.debug( `[upload] Uploaded file for user=${req.token.user_id} filename=${req.file.originalname} mime=${result.mime} size=${req.file.size}`, ); res.json({ + ...(result.key ? { key: result.key } : {}), url: result.url, filename: result.filename, mime: result.mime, diff --git a/migrations/object_store.sql b/migrations/object_store.sql new file mode 100644 index 000000000..b370ffdca --- /dev/null +++ b/migrations/object_store.sql @@ -0,0 +1,607 @@ +-- ============================================================================= +-- Constructive Upload System -- object_store_public schema +-- ============================================================================= +-- Run: psql -h localhost -U postgres -d constructive < migrations/object_store.sql +-- ============================================================================= + +BEGIN; + +-- Ensure required roles exist (idempotent for dev environments) +DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'authenticated') THEN + CREATE ROLE authenticated NOLOGIN; + END IF; + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'service_role') THEN + CREATE ROLE service_role NOLOGIN; + END IF; +END $$; + +-- Ensure app_jobs schema + stub add_job exist (required by trigger functions). +-- In production, app_jobs is deployed by the database-jobs pgpm module. +-- This stub is a no-op that prevents trigger creation from failing in dev. +CREATE SCHEMA IF NOT EXISTS app_jobs; + +CREATE OR REPLACE FUNCTION app_jobs.add_job( + identifier text, + payload json DEFAULT '{}'::json, + queue_name text DEFAULT NULL, + run_at timestamptz DEFAULT NULL, + max_attempts integer DEFAULT NULL, + job_key text DEFAULT NULL, + priority integer DEFAULT NULL, + flags text[] DEFAULT NULL +) RETURNS void AS $$ +BEGIN + -- Stub: in production this is provided by database-jobs pgpm module. + -- In dev, jobs are enqueued but not processed unless the job worker is running. + RAISE NOTICE 'app_jobs.add_job stub called: % %', identifier, payload; +END; +$$ LANGUAGE plpgsql; + +-- Ensure schema exists +CREATE SCHEMA IF NOT EXISTS object_store_public; + +-- --------------------------------------------------------------------------- +-- 1. Status ENUM +-- --------------------------------------------------------------------------- + +CREATE TYPE object_store_public.file_status AS ENUM ( + 'pending', + 'processing', + 'ready', + 'error', + 'deleting' +); + +COMMENT ON TYPE object_store_public.file_status IS + 'Lifecycle states for managed files. Transitions: pending->{processing,error}, processing->{ready,error,deleting}, ready->deleting, error->{deleting,pending(retry)}.'; + +-- --------------------------------------------------------------------------- +-- 2. Files Table +-- --------------------------------------------------------------------------- + +CREATE TABLE object_store_public.files ( + id uuid NOT NULL DEFAULT gen_random_uuid(), + database_id integer NOT NULL, + bucket_key text NOT NULL DEFAULT 'default', + key text NOT NULL, + status object_store_public.file_status NOT NULL DEFAULT 'pending', + status_reason text, + etag text, + source_table text, + source_column text, + source_id uuid, + processing_started_at timestamptz, + created_by uuid, + created_at timestamptz NOT NULL DEFAULT now(), + updated_at timestamptz NOT NULL DEFAULT now(), + + CONSTRAINT files_pkey PRIMARY KEY (id, database_id), + CONSTRAINT files_key_unique UNIQUE (key, database_id), + CONSTRAINT files_key_not_empty CHECK (key <> ''), + CONSTRAINT files_key_max_length CHECK (length(key) <= 1024), + CONSTRAINT files_bucket_key_format CHECK (bucket_key ~ '^[a-z][a-z0-9_-]*$'), + CONSTRAINT files_source_table_format CHECK ( + source_table IS NULL OR source_table ~ '^[a-z_]+\.[a-z_]+$' + ), + CONSTRAINT files_source_complete CHECK ( + (source_table IS NULL AND source_column IS NULL AND source_id IS NULL) + OR (source_table IS NOT NULL AND source_column IS NOT NULL AND source_id IS NOT NULL) + ) +); + +COMMENT ON TABLE object_store_public.files IS + 'Operational index for S3 objects. Each row = one physical S3 object (including generated versions). NOT a source of truth for file metadata -- domain tables own that.'; +COMMENT ON COLUMN object_store_public.files.key IS + 'Full S3 object key. Format: {database_id}/{bucket_key}/{uuid}_{version_name}. Origin files use _origin suffix.'; +COMMENT ON COLUMN object_store_public.files.etag IS + 'S3 ETag for reconciliation and cache validation.'; +COMMENT ON COLUMN object_store_public.files.status_reason IS + 'Human-readable reason for current status (error details, deletion reason).'; +COMMENT ON COLUMN object_store_public.files.processing_started_at IS + 'Timestamp when processing began. Used to detect stuck jobs (alert at 15 min).'; +COMMENT ON COLUMN object_store_public.files.source_table IS + 'Schema-qualified table name referencing this file (e.g. constructive_users_public.users). NULL until the domain trigger populates it. Free text -- no FK possible.'; +COMMENT ON COLUMN object_store_public.files.source_column IS + 'Column name on the source table (e.g. profile_picture). NULL until domain trigger populates it.'; +COMMENT ON COLUMN object_store_public.files.source_id IS + 'Primary key of the row in the source table. NULL until domain trigger populates it.'; + +-- --------------------------------------------------------------------------- +-- 3. Buckets Table +-- --------------------------------------------------------------------------- + +CREATE TABLE object_store_public.buckets ( + id uuid NOT NULL DEFAULT gen_random_uuid(), + database_id integer NOT NULL, + key text NOT NULL, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT false, + config jsonb NOT NULL DEFAULT '{}'::jsonb, + created_by uuid, + updated_by uuid, + created_at timestamptz NOT NULL DEFAULT now(), + updated_at timestamptz NOT NULL DEFAULT now(), + + CONSTRAINT buckets_pkey PRIMARY KEY (id, database_id), + CONSTRAINT buckets_key_unique UNIQUE (key, database_id), + CONSTRAINT buckets_key_format CHECK (key ~ '^[a-z][a-z0-9_-]*$') +); + +COMMENT ON TABLE object_store_public.buckets IS + 'Logical bucket configuration per tenant. The bucket key maps to the S3 key prefix segment. is_public controls RLS policy for anonymous reads.'; + +-- --------------------------------------------------------------------------- +-- 4. Indexes +-- --------------------------------------------------------------------------- + +-- Tenant queries +CREATE INDEX files_database_id_idx + ON object_store_public.files (database_id); + +-- Bucket + tenant queries +CREATE INDEX files_bucket_database_id_idx + ON object_store_public.files (bucket_key, database_id); + +-- "My uploads" queries +CREATE INDEX files_created_by_database_id_created_at_idx + ON object_store_public.files (created_by, database_id, created_at DESC); + +-- Back-reference lookups (cleanup worker, attachment queries) +CREATE INDEX files_source_ref_idx + ON object_store_public.files (source_table, source_column, source_id); + +-- Pending file reaper (hourly cron) +CREATE INDEX files_pending_created_at_idx + ON object_store_public.files (created_at) + WHERE status = 'pending'; + +-- Stuck processing detection +CREATE INDEX files_processing_idx + ON object_store_public.files (processing_started_at) + WHERE status = 'processing'; + +-- Deletion job queue +CREATE INDEX files_deleting_idx + ON object_store_public.files (updated_at) + WHERE status = 'deleting'; + +-- Time-range scans on large tables +CREATE INDEX files_created_at_brin_idx + ON object_store_public.files USING brin (created_at); + +-- --------------------------------------------------------------------------- +-- 5. Triggers +-- --------------------------------------------------------------------------- + +-- 5a. AFTER INSERT -- enqueue process-image job +-- NOTE: Version rows are inserted with status = 'ready', which intentionally +-- bypasses this trigger (condition: NEW.status = 'pending'). Only origin +-- uploads (status = 'pending') need processing. + +CREATE OR REPLACE FUNCTION object_store_public.files_after_insert_queue_processing() +RETURNS trigger AS $$ +BEGIN + PERFORM app_jobs.add_job( + 'process-image', + json_build_object( + 'file_id', NEW.id, + 'database_id', NEW.database_id + ), + job_key := 'file:' || NEW.id::text + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_insert_queue_processing + AFTER INSERT ON object_store_public.files + FOR EACH ROW + WHEN (NEW.status = 'pending') + EXECUTE FUNCTION object_store_public.files_after_insert_queue_processing(); + +COMMENT ON TRIGGER files_after_insert_queue_processing ON object_store_public.files IS + 'Enqueues process-image job for new origin uploads. Version rows inserted as ready intentionally bypass this trigger -- they do not need processing.'; + +-- 5b. BEFORE UPDATE -- timestamp + state machine + +CREATE OR REPLACE FUNCTION object_store_public.files_before_update_timestamp() +RETURNS trigger AS $$ +BEGIN + -- Always update timestamp + NEW.updated_at := now(); + + -- State machine validation (only when status changes) + IF OLD.status IS DISTINCT FROM NEW.status THEN + IF NOT ( + (OLD.status = 'pending' AND NEW.status IN ('processing', 'error')) + OR (OLD.status = 'processing' AND NEW.status IN ('ready', 'error', 'deleting')) + OR (OLD.status = 'ready' AND NEW.status = 'deleting') + OR (OLD.status = 'error' AND NEW.status IN ('deleting', 'pending')) + ) THEN + RAISE EXCEPTION 'Invalid status transition from % to %', OLD.status, NEW.status; + END IF; + + -- Track processing start/end + IF NEW.status = 'processing' THEN + NEW.processing_started_at := now(); + ELSIF OLD.status = 'processing' AND NEW.status <> 'processing' THEN + NEW.processing_started_at := NULL; + END IF; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_before_update_timestamp + BEFORE UPDATE ON object_store_public.files + FOR EACH ROW + EXECUTE FUNCTION object_store_public.files_before_update_timestamp(); + +COMMENT ON TRIGGER files_before_update_timestamp ON object_store_public.files IS + 'Enforces status transition rules and maintains updated_at / processing_started_at timestamps.'; + +-- 5c. AFTER UPDATE -- enqueue delete_s3_object job + +CREATE OR REPLACE FUNCTION object_store_public.files_after_update_queue_deletion() +RETURNS trigger AS $$ +BEGIN + PERFORM app_jobs.add_job( + 'delete_s3_object', + json_build_object( + 'file_id', NEW.id, + 'database_id', NEW.database_id, + 'key', NEW.key + ), + job_key := 'delete:' || NEW.id::text + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_update_queue_deletion + AFTER UPDATE ON object_store_public.files + FOR EACH ROW + WHEN (NEW.status = 'deleting' AND OLD.status <> 'deleting') + EXECUTE FUNCTION object_store_public.files_after_update_queue_deletion(); + +COMMENT ON TRIGGER files_after_update_queue_deletion ON object_store_public.files IS + 'Enqueues delete_s3_object job when a file transitions to deleting status. Each version row gets its own deletion job.'; + +-- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry + +CREATE OR REPLACE FUNCTION object_store_public.files_after_update_queue_retry() +RETURNS trigger AS $$ +BEGIN + PERFORM app_jobs.add_job( + 'process-image', + json_build_object( + 'file_id', NEW.id, + 'database_id', NEW.database_id + ), + job_key := 'file:' || NEW.id::text + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_update_queue_retry + AFTER UPDATE ON object_store_public.files + FOR EACH ROW + WHEN (OLD.status = 'error' AND NEW.status = 'pending') + EXECUTE FUNCTION object_store_public.files_after_update_queue_retry(); + +COMMENT ON TRIGGER files_after_update_queue_retry ON object_store_public.files IS + 'Re-enqueues process-image job when a file is retried (error->pending). Without this trigger, the retry would change status but never re-enqueue the processing job.'; + +-- --------------------------------------------------------------------------- +-- 6. RLS Policies & Grants +-- --------------------------------------------------------------------------- + +ALTER TABLE object_store_public.files ENABLE ROW LEVEL SECURITY; +ALTER TABLE object_store_public.files FORCE ROW LEVEL SECURITY; + +-- Policy 1: Tenant isolation (all operations, all authenticated roles) +CREATE POLICY files_tenant_isolation ON object_store_public.files + FOR ALL + USING (database_id = current_setting('app.database_id')::integer) + WITH CHECK (database_id = current_setting('app.database_id')::integer); + +-- Policy 2: Creator-only for non-ready files (SELECT) +CREATE POLICY files_visibility ON object_store_public.files + FOR SELECT + USING ( + status = 'ready' + OR created_by = current_setting('app.user_id')::uuid + ); + +-- Policy 3: Public bucket read (SELECT, for anonymous access) +CREATE POLICY files_public_bucket_read ON object_store_public.files + FOR SELECT + USING ( + EXISTS ( + SELECT 1 FROM object_store_public.buckets b + WHERE b.key = bucket_key + AND b.database_id = files.database_id + AND b.is_public = true + ) + AND status = 'ready' + ); + +-- Policy 4: Admin override (all operations) +CREATE POLICY files_admin_override ON object_store_public.files + FOR ALL + USING (current_setting('app.role', true) = 'administrator') + WITH CHECK (current_setting('app.role', true) = 'administrator'); + +-- Grants +GRANT SELECT, INSERT, UPDATE ON object_store_public.files TO authenticated; +GRANT SELECT, INSERT, UPDATE, DELETE ON object_store_public.files TO service_role; + +COMMENT ON POLICY files_tenant_isolation ON object_store_public.files IS + 'Every query is scoped to the current tenant via app.database_id session variable.'; +COMMENT ON POLICY files_visibility ON object_store_public.files IS + 'Users see all ready files in their tenant. Non-ready files visible only to the uploader.'; +COMMENT ON POLICY files_public_bucket_read ON object_store_public.files IS + 'Allows unauthenticated reads on ready files in public buckets.'; +COMMENT ON POLICY files_admin_override ON object_store_public.files IS + 'Administrators can see and modify all files in the tenant regardless of status or creator.'; + +-- --------------------------------------------------------------------------- +-- 7. Domain Table Triggers +-- --------------------------------------------------------------------------- + +-- 7a. Generic trigger function: back-reference population +-- +-- When a domain table's image/upload/attachment column is updated with an S3 key, +-- find the files row by key and populate source_table, source_column, source_id. +-- Also finds version rows by key prefix and populates the same back-reference. +-- +-- Parameters (passed via TG_ARGV): +-- TG_ARGV[0] = column name (e.g. 'profile_picture') +-- TG_ARGV[1] = schema-qualified table name (e.g. 'constructive_users_public.users') + +CREATE OR REPLACE FUNCTION object_store_public.populate_file_back_reference() +RETURNS trigger AS $$ +DECLARE + col_name text := TG_ARGV[0]; + table_name text := TG_ARGV[1]; + new_val jsonb; + old_val jsonb; + new_key text; + old_key text; + base_key text; + db_id integer; +BEGIN + -- Get the database_id from session context + db_id := current_setting('app.database_id')::integer; + + -- Extract the jsonb value from the specified column (dynamic) + EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO new_val USING NEW; + EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO old_val USING OLD; + + -- Extract the key from the new and old values + new_key := new_val ->> 'key'; + old_key := old_val ->> 'key'; + + -- If no key change, nothing to do + IF new_key IS NOT DISTINCT FROM old_key THEN + RETURN NEW; + END IF; + + -- Handle file replacement: mark old files as deleting + IF old_key IS NOT NULL AND old_key <> '' THEN + -- Derive base key for the old file (strip version suffix) + base_key := regexp_replace(old_key, '_[^_]+$', ''); + + -- Mark old origin + all versions as deleting + UPDATE object_store_public.files + SET status = 'deleting', status_reason = 'replaced by new file' + WHERE database_id = db_id + AND (key = old_key OR key LIKE base_key || '_%') + AND status NOT IN ('deleting'); + END IF; + + -- Populate back-reference on new file (origin + versions) + IF new_key IS NOT NULL AND new_key <> '' THEN + -- Derive base key for the new file + base_key := regexp_replace(new_key, '_[^_]+$', ''); + + -- Set back-reference on origin + all version rows + UPDATE object_store_public.files + SET source_table = table_name, + source_column = col_name, + source_id = NEW.id + WHERE database_id = db_id + AND (key = new_key OR key LIKE base_key || '_%'); + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +COMMENT ON FUNCTION object_store_public.populate_file_back_reference() IS + 'Generic trigger function for domain tables. Populates source_table/source_column/source_id on files rows when image/upload/attachment columns are updated. Handles file replacement by marking old files as deleting.'; + +-- 7b. Generic trigger function: source row deletion +-- +-- When a domain row is deleted, mark all associated files as deleting. + +CREATE OR REPLACE FUNCTION object_store_public.mark_files_deleting_on_source_delete() +RETURNS trigger AS $$ +DECLARE + col_name text := TG_ARGV[0]; + table_name text := TG_ARGV[1]; + db_id integer; +BEGIN + db_id := current_setting('app.database_id')::integer; + + -- Mark all files for this source row + column as deleting + UPDATE object_store_public.files + SET status = 'deleting', status_reason = 'source row deleted' + WHERE database_id = db_id + AND source_table = table_name + AND source_column = col_name + AND source_id = OLD.id + AND status NOT IN ('deleting'); + + RETURN OLD; +END; +$$ LANGUAGE plpgsql; + +COMMENT ON FUNCTION object_store_public.mark_files_deleting_on_source_delete() IS + 'Generic trigger function for domain tables. Marks all associated files as deleting when a domain row is deleted.'; + +-- 7c. CREATE TRIGGER statements for all 6 tables, 9 columns +-- +-- Each domain column gets two triggers: +-- - AFTER UPDATE: back-reference population + file replacement +-- - BEFORE DELETE: mark files deleting on source row deletion +-- +-- These are wrapped in a DO block so they gracefully skip tables that +-- don't exist yet (e.g. in fresh dev environments). In production, +-- domain tables will exist before this migration runs. + +DO $domain_triggers$ +DECLARE + _tbl text; +BEGIN + -- constructive_users_public.users.profile_picture + SELECT 'constructive_users_public.users' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'constructive_users_public' AND table_name = 'users'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER users_profile_picture_file_ref + AFTER UPDATE OF profile_picture ON constructive_users_public.users + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''profile_picture'', ''constructive_users_public.users'')'; + EXECUTE 'CREATE TRIGGER users_profile_picture_file_delete + BEFORE DELETE ON constructive_users_public.users + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''profile_picture'', ''constructive_users_public.users'')'; + RAISE NOTICE 'Created triggers for constructive_users_public.users.profile_picture'; + ELSE + RAISE NOTICE 'Skipped triggers for constructive_users_public.users (table not found)'; + END IF; + + -- constructive_status_public.app_levels.image + SELECT 'constructive_status_public.app_levels' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'constructive_status_public' AND table_name = 'app_levels'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER app_levels_image_file_ref + AFTER UPDATE OF image ON constructive_status_public.app_levels + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''image'', ''constructive_status_public.app_levels'')'; + EXECUTE 'CREATE TRIGGER app_levels_image_file_delete + BEFORE DELETE ON constructive_status_public.app_levels + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''image'', ''constructive_status_public.app_levels'')'; + RAISE NOTICE 'Created triggers for constructive_status_public.app_levels.image'; + ELSE + RAISE NOTICE 'Skipped triggers for constructive_status_public.app_levels (table not found)'; + END IF; + + -- services_public.sites (og_image, apple_touch_icon, logo, favicon) + SELECT 'services_public.sites' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'services_public' AND table_name = 'sites'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER sites_og_image_file_ref + AFTER UPDATE OF og_image ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''og_image'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_og_image_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.sites'')'; + + EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_ref + AFTER UPDATE OF apple_touch_icon ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''apple_touch_icon'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''apple_touch_icon'', ''services_public.sites'')'; + + EXECUTE 'CREATE TRIGGER sites_logo_file_ref + AFTER UPDATE OF logo ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''logo'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_logo_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''logo'', ''services_public.sites'')'; + + EXECUTE 'CREATE TRIGGER sites_favicon_file_ref + AFTER UPDATE OF favicon ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''favicon'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_favicon_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''favicon'', ''services_public.sites'')'; + RAISE NOTICE 'Created triggers for services_public.sites (og_image, apple_touch_icon, logo, favicon)'; + ELSE + RAISE NOTICE 'Skipped triggers for services_public.sites (table not found)'; + END IF; + + -- services_public.apps.app_image + SELECT 'services_public.apps' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'services_public' AND table_name = 'apps'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER apps_app_image_file_ref + AFTER UPDATE OF app_image ON services_public.apps + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''app_image'', ''services_public.apps'')'; + EXECUTE 'CREATE TRIGGER apps_app_image_file_delete + BEFORE DELETE ON services_public.apps + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''app_image'', ''services_public.apps'')'; + RAISE NOTICE 'Created triggers for services_public.apps.app_image'; + ELSE + RAISE NOTICE 'Skipped triggers for services_public.apps (table not found)'; + END IF; + + -- services_public.site_metadata.og_image + SELECT 'services_public.site_metadata' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'services_public' AND table_name = 'site_metadata'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_ref + AFTER UPDATE OF og_image ON services_public.site_metadata + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''og_image'', ''services_public.site_metadata'')'; + EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_delete + BEFORE DELETE ON services_public.site_metadata + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.site_metadata'')'; + RAISE NOTICE 'Created triggers for services_public.site_metadata.og_image'; + ELSE + RAISE NOTICE 'Skipped triggers for services_public.site_metadata (table not found)'; + END IF; + + -- db_migrate.migrate_files.upload + SELECT 'db_migrate.migrate_files' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'db_migrate' AND table_name = 'migrate_files'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER migrate_files_upload_file_ref + AFTER UPDATE OF upload ON db_migrate.migrate_files + FOR EACH ROW + EXECUTE FUNCTION object_store_public.populate_file_back_reference(''upload'', ''db_migrate.migrate_files'')'; + EXECUTE 'CREATE TRIGGER migrate_files_upload_file_delete + BEFORE DELETE ON db_migrate.migrate_files + FOR EACH ROW + EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''upload'', ''db_migrate.migrate_files'')'; + RAISE NOTICE 'Created triggers for db_migrate.migrate_files.upload'; + ELSE + RAISE NOTICE 'Skipped triggers for db_migrate.migrate_files (table not found)'; + END IF; +END +$domain_triggers$; + +COMMIT; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index efe3dee61..6ea1ca196 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -2628,6 +2628,9 @@ importers: '@aws-sdk/lib-storage': specifier: ^3.1001.0 version: 3.1001.0(@aws-sdk/client-s3@3.1001.0) + '@aws-sdk/s3-request-presigner': + specifier: ^3.1001.0 + version: 3.1007.0 '@constructive-io/content-type-stream': specifier: workspace:^ version: link:../content-type-stream/dist @@ -2749,6 +2752,10 @@ packages: resolution: {integrity: sha512-Nasoyb5K4jfvncTKQyA13q55xHoz9as01NVYP05B0Kzux/X5UhMn3qXsZDyWOSXkfSCAIrMBKmVVWbI0vUapdQ==} engines: {node: '>=20.0.0'} + '@aws-sdk/core@3.973.19': + resolution: {integrity: sha512-56KePyOcZnKTWCd89oJS1G6j3HZ9Kc+bh/8+EbvtaCCXdP6T7O7NzCiPuHRhFLWnzXIaXX3CxAz0nI5My9spHQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/crc64-nvme@3.972.3': resolution: {integrity: sha512-UExeK+EFiq5LAcbHm96CQLSia+5pvpUVSAsVApscBzayb7/6dJBJKwV4/onsk4VbWSmqxDMcfuTD+pC4RxgZHg==} engines: {node: '>=20.0.0'} @@ -2823,6 +2830,10 @@ packages: resolution: {integrity: sha512-U4K1rqyJYvT/zgTI3+rN+MToa51dFnnq1VSsVJuJWPNEKcEnuZVqf7yTpkJJMkYixVW5TTi1dgupd+nmJ0JyWw==} engines: {node: '>=20.0.0'} + '@aws-sdk/middleware-sdk-s3@3.972.19': + resolution: {integrity: sha512-/CtOHHVFg4ZuN6CnLnYkrqWgVEnbOBC4kNiKa+4fldJ9cioDt3dD/f5vpq0cWLOXwmGL2zgVrVxNhjxWpxNMkg==} + engines: {node: '>=20.0.0'} + '@aws-sdk/middleware-ssec@3.972.6': resolution: {integrity: sha512-acvMUX9jF4I2Ew+Z/EA6gfaFaz9ehci5wxBmXCZeulLuv8m+iGf6pY9uKz8TPjg39bdAz3hxoE0eLP8Qz+IYlA==} engines: {node: '>=20.0.0'} @@ -2839,10 +2850,18 @@ packages: resolution: {integrity: sha512-Aa5PusHLXAqLTX1UKDvI3pHQJtIsF7Q+3turCHqfz/1F61/zDMWfbTC8evjhrrYVAtz9Vsv3SJ/waSUeu7B6gw==} engines: {node: '>=20.0.0'} + '@aws-sdk/s3-request-presigner@3.1007.0': + resolution: {integrity: sha512-TZmNzomZxwmIlyi+h8i0j561j4ryDNazUnoEszJTYOuk57RA7NUKQzNvRYUoKOChbFfvDzTy6PR5SRXfu0vaVw==} + engines: {node: '>=20.0.0'} + '@aws-sdk/signature-v4-multi-region@3.996.4': resolution: {integrity: sha512-MGa8ro0onekYIiesHX60LwKdkxK3Kd61p7TTbLwZemBqlnD9OLrk9sXZdFOIxXanJ+3AaJnV/jiX866eD/4PDg==} engines: {node: '>=20.0.0'} + '@aws-sdk/signature-v4-multi-region@3.996.7': + resolution: {integrity: sha512-mYhh7FY+7OOqjkYkd6+6GgJOsXK1xBWmuR+c5mxJPj2kr5TBNeZq+nUvE9kANWAux5UxDVrNOSiEM/wlHzC3Lg==} + engines: {node: '>=20.0.0'} + '@aws-sdk/token-providers@3.1001.0': resolution: {integrity: sha512-09XAq/uIYgeZhohuGRrR/R+ek3+ljFNdzWCXdqb9rlIERDjSfNiLjTtpHgSK1xTPmC5G4yWoEAyMfTXiggS6wA==} engines: {node: '>=20.0.0'} @@ -2851,14 +2870,26 @@ packages: resolution: {integrity: sha512-RW60aH26Bsc016Y9B98hC0Plx6fK5P2v/iQYwMzrSjiDh1qRMUCP6KrXHYEHe3uFvKiOC93Z9zk4BJsUi6Tj1Q==} engines: {node: '>=20.0.0'} + '@aws-sdk/types@3.973.5': + resolution: {integrity: sha512-hl7BGwDCWsjH8NkZfx+HgS7H2LyM2lTMAI7ba9c8O0KqdBLTdNJivsHpqjg9rNlAlPyREb6DeDRXUl0s8uFdmQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/util-arn-parser@3.972.2': resolution: {integrity: sha512-VkykWbqMjlSgBFDyrY3nOSqupMc6ivXuGmvci6Q3NnLq5kC+mKQe2QBZ4nrWRE/jqOxeFP2uYzLtwncYYcvQDg==} engines: {node: '>=20.0.0'} + '@aws-sdk/util-arn-parser@3.972.3': + resolution: {integrity: sha512-HzSD8PMFrvgi2Kserxuff5VitNq2sgf3w9qxmskKDiDTThWfVteJxuCS9JXiPIPtmCrp+7N9asfIaVhBFORllA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/util-endpoints@3.996.3': resolution: {integrity: sha512-yWIQSNiCjykLL+ezN5A+DfBb1gfXTytBxm57e64lYmwxDHNmInYHRJYYRAGWG1o77vKEiWaw4ui28e3yb1k5aQ==} engines: {node: '>=20.0.0'} + '@aws-sdk/util-format-url@3.972.7': + resolution: {integrity: sha512-V+PbnWfUl93GuFwsOHsAq7hY/fnm9kElRqR8IexIJr5Rvif9e614X5sGSyz3mVSf1YAZ+VTy63W1/pGdA55zyA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/util-locate-window@3.965.4': resolution: {integrity: sha512-H1onv5SkgPBK2P6JR2MjGgbOnttoNzSPIRoeZTNPZYyaplwGg50zS3amXvXqF0/qfXpWEC9rLWU564QTB9bSog==} engines: {node: '>=20.0.0'} @@ -2875,6 +2906,10 @@ packages: aws-crt: optional: true + '@aws-sdk/xml-builder@3.972.10': + resolution: {integrity: sha512-OnejAIVD+CxzyAUrVic7lG+3QRltyja9LoNqCE/1YVs8ichoTbJlVSaZ9iSMcnHLyzrSNtvaOGjSDRP+d/ouFA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/xml-builder@3.972.9': resolution: {integrity: sha512-ItnlMgSqkPrUfJs7EsvU/01zw5UeIb2tNPhD09LBLHbg+g+HDiKibSLwpkuz/ZIlz4F2IMn+5XgE4AK/pfPuog==} engines: {node: '>=20.0.0'} @@ -3906,24 +3941,28 @@ packages: engines: {node: '>= 10'} cpu: [arm64] os: [linux] + libc: [glibc] '@nx/nx-linux-arm64-musl@20.8.3': resolution: {integrity: sha512-LTTGzI8YVPlF1v0YlVf+exM+1q7rpsiUbjTTHJcfHFRU5t4BsiZD54K19Y1UBg1XFx5cwhEaIomSmJ88RwPPVQ==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] + libc: [musl] '@nx/nx-linux-x64-gnu@20.8.3': resolution: {integrity: sha512-SlA4GtXvQbSzSIWLgiIiLBOjdINPOUR/im+TUbaEMZ8wiGrOY8cnk0PVt95TIQJVBeXBCeb5HnoY0lHJpMOODg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] + libc: [glibc] '@nx/nx-linux-x64-musl@20.8.3': resolution: {integrity: sha512-MNzkEwPktp5SQH9dJDH2wP9hgG9LsBDhKJXJfKw6sUI/6qz5+/aAjFziKy+zBnhU4AO1yXt5qEWzR8lDcIriVQ==} engines: {node: '>= 10'} cpu: [x64] os: [linux] + libc: [musl] '@nx/nx-win32-arm64-msvc@20.8.3': resolution: {integrity: sha512-qUV7CyXKwRCM/lkvyS6Xa1MqgAuK5da6w27RAehh7LATBUKn1I4/M7DGn6L7ERCxpZuh1TrDz9pUzEy0R+Ekkg==} @@ -4042,48 +4081,56 @@ packages: engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] + libc: [glibc] '@oxfmt/binding-linux-arm64-musl@0.36.0': resolution: {integrity: sha512-SPGLJkOIHSIC6ABUQ5V8NqJpvYhMJueJv26NYqfCnwi/Mn6A61amkpJJ9Suy0Nmvs+OWESJpcebrBUbXPGZyQQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] + libc: [musl] '@oxfmt/binding-linux-ppc64-gnu@0.36.0': resolution: {integrity: sha512-3EuoyB8x9x8ysYJjbEO/M9fkSk72zQKnXCvpZMDHXlnY36/1qMp55Nm0PrCwjGO/1pen5hdOVkz9WmP3nAp2IQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] + libc: [glibc] '@oxfmt/binding-linux-riscv64-gnu@0.36.0': resolution: {integrity: sha512-MpY3itLwpGh8dnywtrZtaZ604T1m715SydCKy0+qTxetv+IHzuA+aO/AGzrlzUNYZZmtWtmDBrChZGibvZxbRQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] + libc: [glibc] '@oxfmt/binding-linux-riscv64-musl@0.36.0': resolution: {integrity: sha512-mmDhe4Vtx+XwQPRPn/V25+APnkApYgZ23q+6GVsNYY98pf3aU0aI3Me96pbRs/AfJ1jIiGC+/6q71FEu8dHcHw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] + libc: [musl] '@oxfmt/binding-linux-s390x-gnu@0.36.0': resolution: {integrity: sha512-AYXhU+DmNWLSnvVwkHM92fuYhogtVHab7UQrPNaDf1sxadugg9gWVmcgJDlIwxJdpk5CVW/TFvwUKwI432zhhA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] + libc: [glibc] '@oxfmt/binding-linux-x64-gnu@0.36.0': resolution: {integrity: sha512-H16QhhQ3usoakMleiAAQ2mg0NsBDAdyE9agUgfC8IHHh3jZEbr0rIKwjEqwbOHK5M0EmfhJmr+aGO/MgZPsneA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] + libc: [glibc] '@oxfmt/binding-linux-x64-musl@0.36.0': resolution: {integrity: sha512-EFFGkixA39BcmHiCe2ECdrq02D6FCve5ka6ObbvrheXl4V+R0U/E+/uLyVx1X65LW8TA8QQHdnbdDallRekohw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] + libc: [musl] '@oxfmt/binding-openharmony-arm64@0.36.0': resolution: {integrity: sha512-zr/t369wZWFOj1qf06Z5gGNjFymfUNDrxKMmr7FKiDRVI1sNsdKRCuRL4XVjtcptKQ+ao3FfxLN1vrynivmCYg==} @@ -4599,66 +4646,79 @@ packages: resolution: {integrity: sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==} cpu: [arm] os: [linux] + libc: [glibc] '@rollup/rollup-linux-arm-musleabihf@4.57.1': resolution: {integrity: sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==} cpu: [arm] os: [linux] + libc: [musl] '@rollup/rollup-linux-arm64-gnu@4.57.1': resolution: {integrity: sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==} cpu: [arm64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-arm64-musl@4.57.1': resolution: {integrity: sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==} cpu: [arm64] os: [linux] + libc: [musl] '@rollup/rollup-linux-loong64-gnu@4.57.1': resolution: {integrity: sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==} cpu: [loong64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-loong64-musl@4.57.1': resolution: {integrity: sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==} cpu: [loong64] os: [linux] + libc: [musl] '@rollup/rollup-linux-ppc64-gnu@4.57.1': resolution: {integrity: sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==} cpu: [ppc64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-ppc64-musl@4.57.1': resolution: {integrity: sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==} cpu: [ppc64] os: [linux] + libc: [musl] '@rollup/rollup-linux-riscv64-gnu@4.57.1': resolution: {integrity: sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==} cpu: [riscv64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-riscv64-musl@4.57.1': resolution: {integrity: sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==} cpu: [riscv64] os: [linux] + libc: [musl] '@rollup/rollup-linux-s390x-gnu@4.57.1': resolution: {integrity: sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==} cpu: [s390x] os: [linux] + libc: [glibc] '@rollup/rollup-linux-x64-gnu@4.57.1': resolution: {integrity: sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==} cpu: [x64] os: [linux] + libc: [glibc] '@rollup/rollup-linux-x64-musl@4.57.1': resolution: {integrity: sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==} cpu: [x64] os: [linux] + libc: [musl] '@rollup/rollup-openbsd-x64@4.57.1': resolution: {integrity: sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==} @@ -4730,6 +4790,10 @@ packages: resolution: {integrity: sha512-qocxM/X4XGATqQtUkbE9SPUB6wekBi+FyJOMbPj0AhvyvFGYEmOlz6VB22iMePCQsFmMIvFSeViDvA7mZJG47g==} engines: {node: '>=18.0.0'} + '@smithy/abort-controller@4.2.11': + resolution: {integrity: sha512-Hj4WoYWMJnSpM6/kchsm4bUNTL9XiSyhvoMb2KIq4VJzyDt7JpGHUZHkVNPZVC7YE1tf8tPeVauxpFBKGW4/KQ==} + engines: {node: '>=18.0.0'} + '@smithy/chunked-blob-reader-native@4.2.2': resolution: {integrity: sha512-QzzYIlf4yg0w5TQaC9VId3B3ugSk1MI/wb7tgcHtd7CBV9gNRKZrhc2EPSxSZuDy10zUZ0lomNMgkc6/VVe8xg==} engines: {node: '>=18.0.0'} @@ -4746,6 +4810,10 @@ packages: resolution: {integrity: sha512-/+ldRdtiO5Cb26afAZOG1FZM0x7D4AYdjpyOv2OScJw+4C7X+OLdRnNKF5UyUE0VpPgSKr3rnF/kvprRA4h2kg==} engines: {node: '>=18.0.0'} + '@smithy/core@3.23.9': + resolution: {integrity: sha512-1Vcut4LEL9HZsdpI0vFiRYIsaoPwZLjAxnVQDUMQK8beMS+EYPLDQCXtbzfxmM5GzSgjfe2Q9M7WaXwIMQllyQ==} + engines: {node: '>=18.0.0'} + '@smithy/credential-provider-imds@4.2.10': resolution: {integrity: sha512-3bsMLJJLTZGZqVGGeBVFfLzuRulVsGTj12BzRKODTHqUABpIr0jMN1vN3+u6r2OfyhAQ2pXaMZWX/swBK5I6PQ==} engines: {node: '>=18.0.0'} @@ -4774,6 +4842,10 @@ packages: resolution: {integrity: sha512-muS5tFw+A/uo+U+yig06vk1776UFM+aAp9hFM8efI4ZcHhTcgv6NTeK4x7ltHeMPBwnhEjcf0MULTyxNkSNxDw==} engines: {node: '>=18.0.0'} + '@smithy/fetch-http-handler@5.3.13': + resolution: {integrity: sha512-U2Hcfl2s3XaYjikN9cT4mPu8ybDbImV3baXR0PkVlC0TTx808bRP3FaPGAzPtB8OByI+JqJ1kyS+7GEgae7+qQ==} + engines: {node: '>=18.0.0'} + '@smithy/hash-blob-browser@4.2.11': resolution: {integrity: sha512-DrcAx3PM6AEbWZxsKl6CWAGnVwiz28Wp1ZhNu+Hi4uI/6C1PIZBIaPM2VoqBDAsOWbM6ZVzOEQMxFLLdmb4eBQ==} engines: {node: '>=18.0.0'} @@ -4798,6 +4870,10 @@ packages: resolution: {integrity: sha512-Yfu664Qbf1B4IYIsYgKoABt010daZjkaCRvdU/sPnZG6TtHOB0md0RjNdLGzxe5UIdn9js4ftPICzmkRa9RJ4Q==} engines: {node: '>=18.0.0'} + '@smithy/is-array-buffer@4.2.2': + resolution: {integrity: sha512-n6rQ4N8Jj4YTQO3YFrlgZuwKodf4zUFs7EJIWH86pSCWBaAtAGBFfCM7Wx6D2bBJ2xqFNxGBSrUWswT3M0VJow==} + engines: {node: '>=18.0.0'} + '@smithy/md5-js@4.2.10': resolution: {integrity: sha512-Op+Dh6dPLWTjWITChFayDllIaCXRofOed8ecpggTC5fkh8yXes0vAEX7gRUfjGK+TlyxoCAA05gHbZW/zB9JwQ==} engines: {node: '>=18.0.0'} @@ -4810,6 +4886,10 @@ packages: resolution: {integrity: sha512-CoVGZaqIC0tEjz0ga3ciwCMA5fd/4lIOwO2wx0fH+cTi1zxSFZnMJbIiIF9G1d4vRSDyTupDrpS3FKBBJGkRZg==} engines: {node: '>=18.0.0'} + '@smithy/middleware-endpoint@4.4.23': + resolution: {integrity: sha512-UEFIejZy54T1EJn2aWJ45voB7RP2T+IRzUqocIdM6GFFa5ClZncakYJfcYnoXt3UsQrZZ9ZRauGm77l9UCbBLw==} + engines: {node: '>=18.0.0'} + '@smithy/middleware-retry@4.4.38': resolution: {integrity: sha512-WdHvdhjE6Fj78vxFwDKFDwlqGOGRUWrwGeuENUbTVE46Su9mnQM+dXHtbnCaQvwuSYrRsjpe8zUsFpwUp/azlA==} engines: {node: '>=18.0.0'} @@ -4818,34 +4898,66 @@ packages: resolution: {integrity: sha512-STQdONGPwbbC7cusL60s7vOa6He6A9w2jWhoapL0mgVjmR19pr26slV+yoSP76SIssMTX/95e5nOZ6UQv6jolg==} engines: {node: '>=18.0.0'} + '@smithy/middleware-serde@4.2.12': + resolution: {integrity: sha512-W9g1bOLui7Xn5FABRVS0o3rXL0gfN37d/8I/W7i0N7oxjx9QecUmXEMSUMADTODwdtka9cN43t5BI2CodLJpng==} + engines: {node: '>=18.0.0'} + '@smithy/middleware-stack@4.2.10': resolution: {integrity: sha512-pmts/WovNcE/tlyHa8z/groPeOtqtEpp61q3W0nW1nDJuMq/x+hWa/OVQBtgU0tBqupeXq0VBOLA4UZwE8I0YA==} engines: {node: '>=18.0.0'} + '@smithy/middleware-stack@4.2.11': + resolution: {integrity: sha512-s+eenEPW6RgliDk2IhjD2hWOxIx1NKrOHxEwNUaUXxYBxIyCcDfNULZ2Mu15E3kwcJWBedTET/kEASPV1A1Akg==} + engines: {node: '>=18.0.0'} + '@smithy/node-config-provider@4.3.10': resolution: {integrity: sha512-UALRbJtVX34AdP2VECKVlnNgidLHA2A7YgcJzwSBg1hzmnO/bZBHl/LDQQyYifzUwp1UOODnl9JJ3KNawpUJ9w==} engines: {node: '>=18.0.0'} + '@smithy/node-config-provider@4.3.11': + resolution: {integrity: sha512-xD17eE7kaLgBBGf5CZQ58hh2YmwK1Z0O8YhffwB/De2jsL0U3JklmhVYJ9Uf37OtUDLF2gsW40Xwwag9U869Gg==} + engines: {node: '>=18.0.0'} + '@smithy/node-http-handler@4.4.13': resolution: {integrity: sha512-o8CP8w6tlUA0lk+Qfwm6Ed0jCWk3bEY6iBOJjdBaowbXKCSClk8zIHQvUL6RUZMvuNafF27cbRCMYqw6O1v4aA==} engines: {node: '>=18.0.0'} + '@smithy/node-http-handler@4.4.14': + resolution: {integrity: sha512-DamSqaU8nuk0xTJDrYnRzZndHwwRnyj/n/+RqGGCcBKB4qrQem0mSDiWdupaNWdwxzyMU91qxDmHOCazfhtO3A==} + engines: {node: '>=18.0.0'} + '@smithy/property-provider@4.2.10': resolution: {integrity: sha512-5jm60P0CU7tom0eNrZ7YrkgBaoLFXzmqB0wVS+4uK8PPGmosSrLNf6rRd50UBvukztawZ7zyA8TxlrKpF5z9jw==} engines: {node: '>=18.0.0'} + '@smithy/property-provider@4.2.11': + resolution: {integrity: sha512-14T1V64o6/ndyrnl1ze1ZhyLzIeYNN47oF/QU6P5m82AEtyOkMJTb0gO1dPubYjyyKuPD6OSVMPDKe+zioOnCg==} + engines: {node: '>=18.0.0'} + '@smithy/protocol-http@5.3.10': resolution: {integrity: sha512-2NzVWpYY0tRdfeCJLsgrR89KE3NTWT2wGulhNUxYlRmtRmPwLQwKzhrfVaiNlA9ZpJvbW7cjTVChYKgnkqXj1A==} engines: {node: '>=18.0.0'} + '@smithy/protocol-http@5.3.11': + resolution: {integrity: sha512-hI+barOVDJBkNt4y0L2mu3Ugc0w7+BpJ2CZuLwXtSltGAAwCb3IvnalGlbDV/UCS6a9ZuT3+exd1WxNdLb5IlQ==} + engines: {node: '>=18.0.0'} + '@smithy/querystring-builder@4.2.10': resolution: {integrity: sha512-HeN7kEvuzO2DmAzLukE9UryiUvejD3tMp9a1D1NJETerIfKobBUCLfviP6QEk500166eD2IATaXM59qgUI+YDA==} engines: {node: '>=18.0.0'} + '@smithy/querystring-builder@4.2.11': + resolution: {integrity: sha512-7spdikrYiljpket6u0up2Ck2mxhy7dZ0+TDd+S53Dg2DHd6wg+YNJrTCHiLdgZmEXZKI7LJZcwL3721ZRDFiqA==} + engines: {node: '>=18.0.0'} + '@smithy/querystring-parser@4.2.10': resolution: {integrity: sha512-4Mh18J26+ao1oX5wXJfWlTT+Q1OpDR8ssiC9PDOuEgVBGloqg18Fw7h5Ct8DyT9NBYwJgtJ2nLjKKFU6RP1G1Q==} engines: {node: '>=18.0.0'} + '@smithy/querystring-parser@4.2.11': + resolution: {integrity: sha512-nE3IRNjDltvGcoThD2abTozI1dkSy8aX+a2N1Rs55en5UsdyyIXgGEmevUL3okZFoJC77JgRGe99xYohhsjivQ==} + engines: {node: '>=18.0.0'} + '@smithy/service-error-classification@4.2.10': resolution: {integrity: sha512-0R/+/Il5y8nB/By90o8hy/bWVYptbIfvoTYad0igYQO5RefhNCDmNzqxaMx7K1t/QWo0d6UynqpqN5cCQt1MCg==} engines: {node: '>=18.0.0'} @@ -4854,14 +4966,26 @@ packages: resolution: {integrity: sha512-pHgASxl50rrtOztgQCPmOXFjRW+mCd7ALr/3uXNzRrRoGV5G2+78GOsQ3HlQuBVHCh9o6xqMNvlIKZjWn4Euug==} engines: {node: '>=18.0.0'} + '@smithy/shared-ini-file-loader@4.4.6': + resolution: {integrity: sha512-IB/M5I8G0EeXZTHsAxpx51tMQ5R719F3aq+fjEB6VtNcCHDc0ajFDIGDZw+FW9GxtEkgTduiPpjveJdA/CX7sw==} + engines: {node: '>=18.0.0'} + '@smithy/signature-v4@5.3.10': resolution: {integrity: sha512-Wab3wW8468WqTKIxI+aZe3JYO52/RYT/8sDOdzkUhjnLakLe9qoQqIcfih/qxcF4qWEFoWBszY0mj5uxffaVXA==} engines: {node: '>=18.0.0'} + '@smithy/signature-v4@5.3.11': + resolution: {integrity: sha512-V1L6N9aKOBAN4wEHLyqjLBnAz13mtILU0SeDrjOaIZEeN6IFa6DxwRt1NNpOdmSpQUfkBj0qeD3m6P77uzMhgQ==} + engines: {node: '>=18.0.0'} + '@smithy/smithy-client@4.12.1': resolution: {integrity: sha512-Xf9UFHlAihewfkmLNZ6I/Ek6kcYBKoU3cbRS9Z4q++9GWoW0YFbAHs7wMbuXm+nGuKHZ5OKheZMuDdaWPv8DJw==} engines: {node: '>=18.0.0'} + '@smithy/smithy-client@4.12.3': + resolution: {integrity: sha512-7k4UxjSpHmPN2AxVhvIazRSzFQjWnud3sOsXcFStzagww17j1cFQYqTSiQ8xuYK3vKLR1Ni8FzuT3VlKr3xCNw==} + engines: {node: '>=18.0.0'} + '@smithy/types@4.13.0': resolution: {integrity: sha512-COuLsZILbbQsdrwKQpkkpyep7lCsByxwj7m0Mg5v66/ZTyenlfBc40/QFQ5chO0YN/PNEH1Bi3fGtfXPnYNeDw==} engines: {node: '>=18.0.0'} @@ -4870,14 +4994,26 @@ packages: resolution: {integrity: sha512-uypjF7fCDsRk26u3qHmFI/ePL7bxxB9vKkE+2WKEciHhz+4QtbzWiHRVNRJwU3cKhrYDYQE3b0MRFtqfLYdA4A==} engines: {node: '>=18.0.0'} + '@smithy/url-parser@4.2.11': + resolution: {integrity: sha512-oTAGGHo8ZYc5VZsBREzuf5lf2pAurJQsccMusVZ85wDkX66ojEc/XauiGjzCj50A61ObFTPe6d7Pyt6UBYaing==} + engines: {node: '>=18.0.0'} + '@smithy/util-base64@4.3.1': resolution: {integrity: sha512-BKGuawX4Doq/bI/uEmg+Zyc36rJKWuin3py89PquXBIBqmbnJwBBsmKhdHfNEp0+A4TDgLmT/3MSKZ1SxHcR6w==} engines: {node: '>=18.0.0'} + '@smithy/util-base64@4.3.2': + resolution: {integrity: sha512-XRH6b0H/5A3SgblmMa5ErXQ2XKhfbQB+Fm/oyLZ2O2kCUrwgg55bU0RekmzAhuwOjA9qdN5VU2BprOvGGUkOOQ==} + engines: {node: '>=18.0.0'} + '@smithy/util-body-length-browser@4.2.1': resolution: {integrity: sha512-SiJeLiozrAoCrgDBUgsVbmqHmMgg/2bA15AzcbcW+zan7SuyAVHN4xTSbq0GlebAIwlcaX32xacnrG488/J/6g==} engines: {node: '>=18.0.0'} + '@smithy/util-body-length-browser@4.2.2': + resolution: {integrity: sha512-JKCrLNOup3OOgmzeaKQwi4ZCTWlYR5H4Gm1r2uTMVBXoemo1UEghk5vtMi1xSu2ymgKVGW631e2fp9/R610ZjQ==} + engines: {node: '>=18.0.0'} + '@smithy/util-body-length-node@4.2.2': resolution: {integrity: sha512-4rHqBvxtJEBvsZcFQSPQqXP2b/yy/YlB66KlcEgcH2WNoOKCKB03DSLzXmOsXjbl8dJ4OEYTn31knhdznwk7zw==} engines: {node: '>=18.0.0'} @@ -4890,10 +5026,18 @@ packages: resolution: {integrity: sha512-/swhmt1qTiVkaejlmMPPDgZhEaWb/HWMGRBheaxwuVkusp/z+ErJyQxO6kaXumOciZSWlmq6Z5mNylCd33X7Ig==} engines: {node: '>=18.0.0'} + '@smithy/util-buffer-from@4.2.2': + resolution: {integrity: sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q==} + engines: {node: '>=18.0.0'} + '@smithy/util-config-provider@4.2.1': resolution: {integrity: sha512-462id/00U8JWFw6qBuTSWfN5TxOHvDu4WliI97qOIOnuC/g+NDAknTU8eoGXEPlLkRVgWEr03jJBLV4o2FL8+A==} engines: {node: '>=18.0.0'} + '@smithy/util-config-provider@4.2.2': + resolution: {integrity: sha512-dWU03V3XUprJwaUIFVv4iOnS1FC9HnMHDfUrlNDSh4315v0cWyaIErP8KiqGVbf5z+JupoVpNM7ZB3jFiTejvQ==} + engines: {node: '>=18.0.0'} + '@smithy/util-defaults-mode-browser@4.3.37': resolution: {integrity: sha512-JlPZhV1kQCGNJgofRTU6E8kHrjCKsb6cps8gco8QDVaFl7biFYzHg0p1x89ytIWyVyCkY3nOpO8tJPM47Vqlww==} engines: {node: '>=18.0.0'} @@ -4910,10 +5054,18 @@ packages: resolution: {integrity: sha512-c1hHtkgAWmE35/50gmdKajgGAKV3ePJ7t6UtEmpfCWJmQE9BQAQPz0URUVI89eSkcDqCtzqllxzG28IQoZPvwA==} engines: {node: '>=18.0.0'} + '@smithy/util-hex-encoding@4.2.2': + resolution: {integrity: sha512-Qcz3W5vuHK4sLQdyT93k/rfrUwdJ8/HZ+nMUOyGdpeGA1Wxt65zYwi3oEl9kOM+RswvYq90fzkNDahPS8K0OIg==} + engines: {node: '>=18.0.0'} + '@smithy/util-middleware@4.2.10': resolution: {integrity: sha512-LxaQIWLp4y0r72eA8mwPNQ9va4h5KeLM0I3M/HV9klmFaY2kN766wf5vsTzmaOpNNb7GgXAd9a25P3h8T49PSA==} engines: {node: '>=18.0.0'} + '@smithy/util-middleware@4.2.11': + resolution: {integrity: sha512-r3dtF9F+TpSZUxpOVVtPfk09Rlo4lT6ORBqEvX3IBT6SkQAdDSVKR5GcfmZbtl7WKhKnmb3wbDTQ6ibR2XHClw==} + engines: {node: '>=18.0.0'} + '@smithy/util-retry@4.2.10': resolution: {integrity: sha512-HrBzistfpyE5uqTwiyLsFHscgnwB0kgv8vySp7q5kZ0Eltn/tjosaSGGDj/jJ9ys7pWzIP/icE2d+7vMKXLv7A==} engines: {node: '>=18.0.0'} @@ -4922,10 +5074,18 @@ packages: resolution: {integrity: sha512-c7awZV6cxY0czgDDSr+Bz0XfRtg8AwW2BWhrHhLJISrpmwv8QzA2qzTllWyMVNdy1+UJr9vCm29hzuh3l8TTFw==} engines: {node: '>=18.0.0'} + '@smithy/util-stream@4.5.17': + resolution: {integrity: sha512-793BYZ4h2JAQkNHcEnyFxDTcZbm9bVybD0UV/LEWmZ5bkTms7JqjfrLMi2Qy0E5WFcCzLwCAPgcvcvxoeALbAQ==} + engines: {node: '>=18.0.0'} + '@smithy/util-uri-escape@4.2.1': resolution: {integrity: sha512-YmiUDn2eo2IOiWYYvGQkgX5ZkBSiTQu4FlDo5jNPpAxng2t6Sjb6WutnZV9l6VR4eJul1ABmCrnWBC9hKHQa6Q==} engines: {node: '>=18.0.0'} + '@smithy/util-uri-escape@4.2.2': + resolution: {integrity: sha512-2kAStBlvq+lTXHyAZYfJRb/DfS3rsinLiwb+69SstC9Vb0s9vNWkRwpnj918Pfi85mzi42sOqdV72OLxWAISnw==} + engines: {node: '>=18.0.0'} + '@smithy/util-utf8@2.3.0': resolution: {integrity: sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A==} engines: {node: '>=14.0.0'} @@ -4934,6 +5094,10 @@ packages: resolution: {integrity: sha512-DSIwNaWtmzrNQHv8g7DBGR9mulSit65KSj5ymGEIAknmIN8IpbZefEep10LaMG/P/xquwbmJ1h9ectz8z6mV6g==} engines: {node: '>=18.0.0'} + '@smithy/util-utf8@4.2.2': + resolution: {integrity: sha512-75MeYpjdWRe8M5E3AW0O4Cx3UadweS+cwdXjwYGBW5h/gxxnbeZ877sLPX/ZJA9GVTlL/qG0dXP29JWFCD1Ayw==} + engines: {node: '>=18.0.0'} + '@smithy/util-waiter@4.2.10': resolution: {integrity: sha512-4eTWph/Lkg1wZEDAyObwme0kmhEb7J/JjibY2znJdrYRgKbKqB7YoEhhJVJ4R1g/SYih4zuwX7LpJaM8RsnTVg==} engines: {node: '>=18.0.0'} @@ -4942,6 +5106,10 @@ packages: resolution: {integrity: sha512-dSfDCeihDmZlV2oyr0yWPTUfh07suS+R5OB+FZGiv/hHyK3hrFBW5rR1UYjfa57vBsrP9lciFkRPzebaV1Qujw==} engines: {node: '>=18.0.0'} + '@smithy/uuid@1.1.2': + resolution: {integrity: sha512-O/IEdcCUKkubz60tFbGA7ceITTAJsty+lBjNoorP4Z6XRqaFb/OjQjZODophEcuq68nKm6/0r+6/lLQ+XVpk8g==} + engines: {node: '>=18.0.0'} + '@styled-system/background@5.1.2': resolution: {integrity: sha512-jtwH2C/U6ssuGSvwTN3ri/IyjdHb8W9X/g8Y0JLcrH02G+BW3OS8kZdHphF1/YyRklnrKrBT2ngwGUK6aqqV3A==} @@ -5322,41 +5490,49 @@ packages: resolution: {integrity: sha512-34gw7PjDGB9JgePJEmhEqBhWvCiiWCuXsL9hYphDF7crW7UgI05gyBAi6MF58uGcMOiOqSJ2ybEeCvHcq0BCmQ==} cpu: [arm64] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-arm64-musl@1.11.1': resolution: {integrity: sha512-RyMIx6Uf53hhOtJDIamSbTskA99sPHS96wxVE/bJtePJJtpdKGXO1wY90oRdXuYOGOTuqjT8ACccMc4K6QmT3w==} cpu: [arm64] os: [linux] + libc: [musl] '@unrs/resolver-binding-linux-ppc64-gnu@1.11.1': resolution: {integrity: sha512-D8Vae74A4/a+mZH0FbOkFJL9DSK2R6TFPC9M+jCWYia/q2einCubX10pecpDiTmkJVUH+y8K3BZClycD8nCShA==} cpu: [ppc64] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-riscv64-gnu@1.11.1': resolution: {integrity: sha512-frxL4OrzOWVVsOc96+V3aqTIQl1O2TjgExV4EKgRY09AJ9leZpEg8Ak9phadbuX0BA4k8U5qtvMSQQGGmaJqcQ==} cpu: [riscv64] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-riscv64-musl@1.11.1': resolution: {integrity: sha512-mJ5vuDaIZ+l/acv01sHoXfpnyrNKOk/3aDoEdLO/Xtn9HuZlDD6jKxHlkN8ZhWyLJsRBxfv9GYM2utQ1SChKew==} cpu: [riscv64] os: [linux] + libc: [musl] '@unrs/resolver-binding-linux-s390x-gnu@1.11.1': resolution: {integrity: sha512-kELo8ebBVtb9sA7rMe1Cph4QHreByhaZ2QEADd9NzIQsYNQpt9UkM9iqr2lhGr5afh885d/cB5QeTXSbZHTYPg==} cpu: [s390x] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-x64-gnu@1.11.1': resolution: {integrity: sha512-C3ZAHugKgovV5YvAMsxhq0gtXuwESUKc5MhEtjBpLoHPLYM+iuwSj3lflFwK3DPm68660rZ7G8BMcwSro7hD5w==} cpu: [x64] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-x64-musl@1.11.1': resolution: {integrity: sha512-rV0YSoyhK2nZ4vEswT/QwqzqQXw5I6CjoaYMOX0TqBlWhojUf8P94mvI7nuJTeaCkkds3QE4+zS8Ko+GdXuZtA==} cpu: [x64] os: [linux] + libc: [musl] '@unrs/resolver-binding-wasm32-wasi@1.11.1': resolution: {integrity: sha512-5u4RkfxJm+Ng7IWgkzi3qrFOvLvQYnPBmjmZQ8+szTK/b31fQCnleNl1GgEt7nIsZRIf5PLhPwT0WM+q45x/UQ==} @@ -9873,6 +10049,22 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 + '@aws-sdk/core@3.973.19': + dependencies: + '@aws-sdk/types': 3.973.5 + '@aws-sdk/xml-builder': 3.972.10 + '@smithy/core': 3.23.9 + '@smithy/node-config-provider': 4.3.11 + '@smithy/property-provider': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/signature-v4': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + '@aws-sdk/crc64-nvme@3.972.3': dependencies: '@smithy/types': 4.13.0 @@ -10071,6 +10263,23 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 + '@aws-sdk/middleware-sdk-s3@3.972.19': + dependencies: + '@aws-sdk/core': 3.973.19 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-arn-parser': 3.972.3 + '@smithy/core': 3.23.9 + '@smithy/node-config-provider': 4.3.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/signature-v4': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/util-config-provider': 4.2.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-stream': 4.5.17 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + '@aws-sdk/middleware-ssec@3.972.6': dependencies: '@aws-sdk/types': 3.973.4 @@ -10138,6 +10347,17 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/s3-request-presigner@3.1007.0': + dependencies: + '@aws-sdk/signature-v4-multi-region': 3.996.7 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-format-url': 3.972.7 + '@smithy/middleware-endpoint': 4.4.23 + '@smithy/protocol-http': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/signature-v4-multi-region@3.996.4': dependencies: '@aws-sdk/middleware-sdk-s3': 3.972.16 @@ -10147,6 +10367,15 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/signature-v4-multi-region@3.996.7': + dependencies: + '@aws-sdk/middleware-sdk-s3': 3.972.19 + '@aws-sdk/types': 3.973.5 + '@smithy/protocol-http': 5.3.11 + '@smithy/signature-v4': 5.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/token-providers@3.1001.0': dependencies: '@aws-sdk/core': 3.973.16 @@ -10164,10 +10393,19 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/types@3.973.5': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/util-arn-parser@3.972.2': dependencies: tslib: 2.8.1 + '@aws-sdk/util-arn-parser@3.972.3': + dependencies: + tslib: 2.8.1 + '@aws-sdk/util-endpoints@3.996.3': dependencies: '@aws-sdk/types': 3.973.4 @@ -10176,6 +10414,13 @@ snapshots: '@smithy/util-endpoints': 3.3.1 tslib: 2.8.1 + '@aws-sdk/util-format-url@3.972.7': + dependencies: + '@aws-sdk/types': 3.973.5 + '@smithy/querystring-builder': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@aws-sdk/util-locate-window@3.965.4': dependencies: tslib: 2.8.1 @@ -10195,6 +10440,12 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/xml-builder@3.972.10': + dependencies: + '@smithy/types': 4.13.0 + fast-xml-parser: 5.4.1 + tslib: 2.8.1 + '@aws-sdk/xml-builder@3.972.9': dependencies: '@smithy/types': 4.13.0 @@ -12212,6 +12463,11 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/abort-controller@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/chunked-blob-reader-native@4.2.2': dependencies: '@smithy/util-base64': 4.3.1 @@ -12243,6 +12499,19 @@ snapshots: '@smithy/uuid': 1.1.1 tslib: 2.8.1 + '@smithy/core@3.23.9': + dependencies: + '@smithy/middleware-serde': 4.2.12 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-stream': 4.5.17 + '@smithy/util-utf8': 4.2.2 + '@smithy/uuid': 1.1.2 + tslib: 2.8.1 + '@smithy/credential-provider-imds@4.2.10': dependencies: '@smithy/node-config-provider': 4.3.10 @@ -12289,6 +12558,14 @@ snapshots: '@smithy/util-base64': 4.3.1 tslib: 2.8.1 + '@smithy/fetch-http-handler@5.3.13': + dependencies: + '@smithy/protocol-http': 5.3.11 + '@smithy/querystring-builder': 4.2.11 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + tslib: 2.8.1 + '@smithy/hash-blob-browser@4.2.11': dependencies: '@smithy/chunked-blob-reader': 5.2.1 @@ -12322,6 +12599,10 @@ snapshots: dependencies: tslib: 2.8.1 + '@smithy/is-array-buffer@4.2.2': + dependencies: + tslib: 2.8.1 + '@smithy/md5-js@4.2.10': dependencies: '@smithy/types': 4.13.0 @@ -12345,6 +12626,17 @@ snapshots: '@smithy/util-middleware': 4.2.10 tslib: 2.8.1 + '@smithy/middleware-endpoint@4.4.23': + dependencies: + '@smithy/core': 3.23.9 + '@smithy/middleware-serde': 4.2.12 + '@smithy/node-config-provider': 4.3.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + '@smithy/url-parser': 4.2.11 + '@smithy/util-middleware': 4.2.11 + tslib: 2.8.1 + '@smithy/middleware-retry@4.4.38': dependencies: '@smithy/node-config-provider': 4.3.10 @@ -12363,11 +12655,22 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/middleware-serde@4.2.12': + dependencies: + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/middleware-stack@4.2.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/middleware-stack@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/node-config-provider@4.3.10': dependencies: '@smithy/property-provider': 4.2.10 @@ -12375,6 +12678,13 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/node-config-provider@4.3.11': + dependencies: + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/node-http-handler@4.4.13': dependencies: '@smithy/abort-controller': 4.2.10 @@ -12383,27 +12693,56 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/node-http-handler@4.4.14': + dependencies: + '@smithy/abort-controller': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/querystring-builder': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/property-provider@4.2.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/property-provider@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/protocol-http@5.3.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/protocol-http@5.3.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/querystring-builder@4.2.10': dependencies: '@smithy/types': 4.13.0 '@smithy/util-uri-escape': 4.2.1 tslib: 2.8.1 + '@smithy/querystring-builder@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + '@smithy/util-uri-escape': 4.2.2 + tslib: 2.8.1 + '@smithy/querystring-parser@4.2.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/querystring-parser@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/service-error-classification@4.2.10': dependencies: '@smithy/types': 4.13.0 @@ -12413,6 +12752,11 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/shared-ini-file-loader@4.4.6': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/signature-v4@5.3.10': dependencies: '@smithy/is-array-buffer': 4.2.1 @@ -12424,6 +12768,17 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 + '@smithy/signature-v4@5.3.11': + dependencies: + '@smithy/is-array-buffer': 4.2.2 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-hex-encoding': 4.2.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-uri-escape': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + '@smithy/smithy-client@4.12.1': dependencies: '@smithy/core': 3.23.7 @@ -12434,6 +12789,16 @@ snapshots: '@smithy/util-stream': 4.5.16 tslib: 2.8.1 + '@smithy/smithy-client@4.12.3': + dependencies: + '@smithy/core': 3.23.9 + '@smithy/middleware-endpoint': 4.4.23 + '@smithy/middleware-stack': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-stream': 4.5.17 + tslib: 2.8.1 + '@smithy/types@4.13.0': dependencies: tslib: 2.8.1 @@ -12444,16 +12809,32 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/url-parser@4.2.11': + dependencies: + '@smithy/querystring-parser': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/util-base64@4.3.1': dependencies: '@smithy/util-buffer-from': 4.2.1 '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 + '@smithy/util-base64@4.3.2': + dependencies: + '@smithy/util-buffer-from': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + '@smithy/util-body-length-browser@4.2.1': dependencies: tslib: 2.8.1 + '@smithy/util-body-length-browser@4.2.2': + dependencies: + tslib: 2.8.1 + '@smithy/util-body-length-node@4.2.2': dependencies: tslib: 2.8.1 @@ -12468,10 +12849,19 @@ snapshots: '@smithy/is-array-buffer': 4.2.1 tslib: 2.8.1 + '@smithy/util-buffer-from@4.2.2': + dependencies: + '@smithy/is-array-buffer': 4.2.2 + tslib: 2.8.1 + '@smithy/util-config-provider@4.2.1': dependencies: tslib: 2.8.1 + '@smithy/util-config-provider@4.2.2': + dependencies: + tslib: 2.8.1 + '@smithy/util-defaults-mode-browser@4.3.37': dependencies: '@smithy/property-provider': 4.2.10 @@ -12499,11 +12889,20 @@ snapshots: dependencies: tslib: 2.8.1 + '@smithy/util-hex-encoding@4.2.2': + dependencies: + tslib: 2.8.1 + '@smithy/util-middleware@4.2.10': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@smithy/util-middleware@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + '@smithy/util-retry@4.2.10': dependencies: '@smithy/service-error-classification': 4.2.10 @@ -12521,10 +12920,25 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 + '@smithy/util-stream@4.5.17': + dependencies: + '@smithy/fetch-http-handler': 5.3.13 + '@smithy/node-http-handler': 4.4.14 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + '@smithy/util-buffer-from': 4.2.2 + '@smithy/util-hex-encoding': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + '@smithy/util-uri-escape@4.2.1': dependencies: tslib: 2.8.1 + '@smithy/util-uri-escape@4.2.2': + dependencies: + tslib: 2.8.1 + '@smithy/util-utf8@2.3.0': dependencies: '@smithy/util-buffer-from': 2.2.0 @@ -12535,6 +12949,11 @@ snapshots: '@smithy/util-buffer-from': 4.2.1 tslib: 2.8.1 + '@smithy/util-utf8@4.2.2': + dependencies: + '@smithy/util-buffer-from': 4.2.2 + tslib: 2.8.1 + '@smithy/util-waiter@4.2.10': dependencies: '@smithy/abort-controller': 4.2.10 @@ -12545,6 +12964,10 @@ snapshots: dependencies: tslib: 2.8.1 + '@smithy/uuid@1.1.2': + dependencies: + tslib: 2.8.1 + '@styled-system/background@5.1.2': dependencies: '@styled-system/core': 5.1.2 diff --git a/uploads/s3-streamer/package.json b/uploads/s3-streamer/package.json index dbe965c73..520faf5f1 100644 --- a/uploads/s3-streamer/package.json +++ b/uploads/s3-streamer/package.json @@ -37,6 +37,7 @@ "dependencies": { "@aws-sdk/client-s3": "^3.1001.0", "@aws-sdk/lib-storage": "^3.1001.0", + "@aws-sdk/s3-request-presigner": "^3.1001.0", "@constructive-io/content-type-stream": "workspace:^", "@pgpmjs/types": "workspace:^" }, diff --git a/uploads/s3-streamer/src/index.ts b/uploads/s3-streamer/src/index.ts index 74c9f3389..90cf965e1 100644 --- a/uploads/s3-streamer/src/index.ts +++ b/uploads/s3-streamer/src/index.ts @@ -2,6 +2,7 @@ import getClient from './s3'; import Streamer from './streamer'; export * from './utils'; +export * from './storage-provider'; export { getClient }; export { Streamer }; diff --git a/uploads/s3-streamer/src/storage-provider.ts b/uploads/s3-streamer/src/storage-provider.ts new file mode 100644 index 000000000..336bc4e41 --- /dev/null +++ b/uploads/s3-streamer/src/storage-provider.ts @@ -0,0 +1,198 @@ +/** + * StorageProvider interface and S3 implementation. + * + * The StorageProvider interface abstracts storage operations so that + * future implementations (GCS, Azure, local filesystem) can be swapped + * in without changing consumers. + * + * S3StorageProvider is the only implementation for now. It is + * MinIO-compatible (forcePathStyle: true, configurable endpoint). + */ + +import { + DeleteObjectCommand, + DeleteObjectsCommand, + GetObjectCommand, + HeadObjectCommand, + ListObjectsV2Command, + PutObjectCommand, + S3Client, +} from '@aws-sdk/client-s3'; +import { getSignedUrl } from '@aws-sdk/s3-request-presigner'; +import { Upload } from '@aws-sdk/lib-storage'; +import type { Readable } from 'stream'; + +import getS3 from './s3'; + +// -- Interfaces -- + +export interface UploadOpts { + contentType: string; + size?: number; + metadata?: Record; +} + +export interface StorageUploadResult { + etag: string; + versionId?: string; +} + +export interface ObjectMeta { + key: string; + size: number; + etag: string; + lastModified: Date; + contentType?: string; +} + +export interface StorageProvider { + upload(key: string, stream: Readable, opts: UploadOpts): Promise; + download(key: string): Promise; + delete(key: string): Promise; + deleteMany(keys: string[]): Promise; + head(key: string): Promise; + presignGet(key: string, expiresIn: number): Promise; + presignPut(key: string, expiresIn: number, contentType: string): Promise; + listPrefix(prefix: string): AsyncIterable; +} + +// -- S3 Implementation -- + +export interface S3StorageProviderOptions { + bucket: string; + awsRegion: string; + awsAccessKey: string; + awsSecretKey: string; + minioEndpoint?: string; + provider?: 'minio' | 's3'; +} + +export class S3StorageProvider implements StorageProvider { + private client: S3Client; + private bucket: string; + + constructor(opts: S3StorageProviderOptions) { + this.bucket = opts.bucket; + this.client = getS3({ + awsRegion: opts.awsRegion, + awsAccessKey: opts.awsAccessKey, + awsSecretKey: opts.awsSecretKey, + minioEndpoint: opts.minioEndpoint, + provider: opts.provider, + }); + } + + async upload(key: string, stream: Readable, opts: UploadOpts): Promise { + const upload = new Upload({ + client: this.client, + params: { + Bucket: this.bucket, + Key: key, + Body: stream, + ContentType: opts.contentType, + ...(opts.metadata ? { Metadata: opts.metadata } : {}), + }, + }); + + const result = await upload.done(); + return { + etag: result.ETag?.replace(/"/g, '') || '', + versionId: result.VersionId, + }; + } + + async download(key: string): Promise { + const result = await this.client.send( + new GetObjectCommand({ Bucket: this.bucket, Key: key }) + ); + return result.Body as Readable; + } + + async delete(key: string): Promise { + await this.client.send( + new DeleteObjectCommand({ Bucket: this.bucket, Key: key }) + ); + } + + async deleteMany(keys: string[]): Promise { + if (keys.length === 0) return; + + // DeleteObjectsCommand supports max 1000 keys per request + for (let i = 0; i < keys.length; i += 1000) { + const batch = keys.slice(i, i + 1000); + await this.client.send( + new DeleteObjectsCommand({ + Bucket: this.bucket, + Delete: { + Objects: batch.map((key) => ({ Key: key })), + Quiet: true, + }, + }) + ); + } + } + + async head(key: string): Promise { + const result = await this.client.send( + new HeadObjectCommand({ Bucket: this.bucket, Key: key }) + ); + return { + key, + size: result.ContentLength || 0, + etag: result.ETag?.replace(/"/g, '') || '', + lastModified: result.LastModified || new Date(), + contentType: result.ContentType, + }; + } + + async presignGet(key: string, expiresIn: number): Promise { + return getSignedUrl( + this.client as any, + new GetObjectCommand({ Bucket: this.bucket, Key: key }), + { expiresIn } + ); + } + + async presignPut(key: string, expiresIn: number, contentType: string): Promise { + return getSignedUrl( + this.client as any, + new PutObjectCommand({ + Bucket: this.bucket, + Key: key, + ContentType: contentType, + }), + { expiresIn } + ); + } + + async *listPrefix(prefix: string): AsyncIterable { + let continuationToken: string | undefined; + + do { + const result = await this.client.send( + new ListObjectsV2Command({ + Bucket: this.bucket, + Prefix: prefix, + ContinuationToken: continuationToken, + }) + ); + + for (const obj of result.Contents || []) { + yield { + key: obj.Key || '', + size: obj.Size || 0, + etag: obj.ETag?.replace(/"/g, '') || '', + lastModified: obj.LastModified || new Date(), + }; + } + + continuationToken = result.IsTruncated + ? result.NextContinuationToken + : undefined; + } while (continuationToken); + } + + destroy(): void { + this.client.destroy(); + } +} From a24f5a14bc4665abc5fed200c224d964240ebd3d Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 12 Mar 2026 09:41:45 +0800 Subject: [PATCH 2/9] e2e tests --- .../__tests__/object-store-lifecycle.test.ts | 875 ++++++++++++++++++ migrations/__tests__/object-store-rls.test.ts | 733 +++++++++++++++ migrations/jest.config.js | 18 + migrations/object_store.sql | 42 +- migrations/package.json | 12 + pnpm-lock.yaml | 6 + pnpm-workspace.yaml | 1 + 7 files changed, 1682 insertions(+), 5 deletions(-) create mode 100644 migrations/__tests__/object-store-lifecycle.test.ts create mode 100644 migrations/__tests__/object-store-rls.test.ts create mode 100644 migrations/jest.config.js create mode 100644 migrations/package.json diff --git a/migrations/__tests__/object-store-lifecycle.test.ts b/migrations/__tests__/object-store-lifecycle.test.ts new file mode 100644 index 000000000..be2b7ac80 --- /dev/null +++ b/migrations/__tests__/object-store-lifecycle.test.ts @@ -0,0 +1,875 @@ +jest.setTimeout(60000); + +import { resolve } from 'path'; + +import { getConnections, PgTestClient, seed } from 'pgsql-test'; + +const MIGRATION_PATH = resolve(__dirname, '../object_store.sql'); + +const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; + +let pg: PgTestClient; +let teardown: () => Promise; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +async function switchRole( + role: string, + context: Record = {} +) { + await pg.query(`SET LOCAL ROLE ${role}`); + for (const [key, value] of Object.entries(context)) { + await pg.query('SELECT set_config($1, $2, true)', [key, value]); + } +} + +/** Read all recorded jobs from the job_log table */ +async function getJobLog() { + const result = await pg.query( + 'SELECT identifier, payload, job_key FROM _test_job_log ORDER BY logged_at' + ); + return result.rows; +} + +async function clearJobLog() { + await pg.query('DELETE FROM _test_job_log'); +} + +// --------------------------------------------------------------------------- +// Setup +// --------------------------------------------------------------------------- + +beforeAll(async () => { + ({ pg, teardown } = await getConnections( + {}, + [seed.sqlfile([MIGRATION_PATH])] + )); + + // Ensure anonymous role exists + await pg.query(` + DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'anonymous') THEN + CREATE ROLE anonymous NOLOGIN; + END IF; + END $$ + `); + + // Grants needed for isolated test (normally from pgpm extension deploy) + await pg.query('GRANT USAGE ON SCHEMA object_store_public TO authenticated'); + await pg.query('GRANT USAGE ON SCHEMA object_store_public TO service_role'); + await pg.query('GRANT SELECT ON object_store_public.buckets TO authenticated'); + await pg.query('GRANT SELECT ON object_store_public.buckets TO service_role'); + + // Replace the app_jobs.add_job stub with one that records calls + await pg.query(` + CREATE TABLE _test_job_log ( + logged_at timestamptz NOT NULL DEFAULT now(), + identifier text NOT NULL, + payload json, + job_key text + ) + `); + + await pg.query(` + CREATE OR REPLACE FUNCTION app_jobs.add_job( + identifier text, + payload json DEFAULT '{}'::json, + queue_name text DEFAULT NULL, + run_at timestamptz DEFAULT NULL, + max_attempts integer DEFAULT NULL, + job_key text DEFAULT NULL, + priority integer DEFAULT NULL, + flags text[] DEFAULT NULL + ) RETURNS void AS $$ + BEGIN + INSERT INTO _test_job_log (identifier, payload, job_key) + VALUES (identifier, payload, job_key); + END; + $$ LANGUAGE plpgsql + `); + + // Grant app_jobs access to roles that trigger job-enqueuing functions. + // In production, the database-jobs pgpm module handles these grants. + await pg.query('GRANT USAGE ON SCHEMA app_jobs TO authenticated, service_role'); + await pg.query('GRANT EXECUTE ON FUNCTION app_jobs.add_job(text, json, text, timestamptz, integer, text, integer, text[]) TO authenticated, service_role'); + await pg.query('GRANT INSERT ON _test_job_log TO authenticated, service_role'); + + // Seed a default bucket + await pg.query(` + INSERT INTO object_store_public.buckets (database_id, key, name, is_public, config) + VALUES (1, 'default', 'Default Bucket', false, '{}') + `); +}); + +afterAll(async () => { + await teardown(); +}); + +// ========================================================================== +// E2E-01: Full Upload Lifecycle (happy path) +// ========================================================================== + +describe('E2E-01: Upload Lifecycle -- happy path', () => { + const ORIGIN_ID = '10000000-0000-0000-0000-000000000001'; + const ORIGIN_KEY = '1/default/abc123_origin'; + const VERSION_THUMB_KEY = '1/default/abc123_thumb'; + const VERSION_LARGE_KEY = '1/default/abc123_large'; + + beforeEach(async () => { + await pg.beforeEach(); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('step 1: user uploads file → status=pending, process-image job queued', async () => { + // Authenticated user inserts a file (simulates upload endpoint) + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag) + VALUES ($1, 1, $2, 'default', $3, 'etag-origin') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + + // Verify file exists with pending status + const file = await pg.query( + 'SELECT * FROM object_store_public.files WHERE id = $1', + [ORIGIN_ID] + ); + expect(file.rowCount).toBe(1); + expect(file.rows[0].status).toBe('pending'); + expect(file.rows[0].created_by).toBe(USER_A); + + // Verify process-image job was queued (read job log as superuser) + await pg.query('RESET ROLE'); + const jobs = await getJobLog(); + expect(jobs).toEqual([ + expect.objectContaining({ + identifier: 'process-image', + job_key: `file:${ORIGIN_ID}`, + }), + ]); + const payload = jobs[0].payload; + expect(payload.file_id).toBe(ORIGIN_ID); + expect(payload.database_id).toBe(1); + }); + + it('step 2: service_role transitions pending → processing', async () => { + // Insert as superuser first + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag-origin', 'pending') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + await clearJobLog(); + + // Service role picks up the job + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = 1`, + [ORIGIN_ID] + ); + + // Verify processing_started_at is set + await pg.query('RESET ROLE'); + const file = await pg.query( + 'SELECT status, processing_started_at FROM object_store_public.files WHERE id = $1', + [ORIGIN_ID] + ); + expect(file.rows[0].status).toBe('processing'); + expect(file.rows[0].processing_started_at).not.toBeNull(); + }); + + it('step 3: service_role inserts version rows (status=ready, bypasses job trigger)', async () => { + // Setup: origin in processing state + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag-origin', 'processing') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + await clearJobLog(); + + // Service role creates version rows with status='ready' + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag, status) + VALUES + (1, $1, 'default', $2, 'etag-thumb', 'ready'), + (1, $3, 'default', $2, 'etag-large', 'ready') + `, [VERSION_THUMB_KEY, USER_A, VERSION_LARGE_KEY]); + + // Version rows with status='ready' should NOT trigger process-image + await pg.query('RESET ROLE'); + const jobs = await getJobLog(); + expect(jobs).toHaveLength(0); + + // Verify all three rows exist + const files = await pg.query( + `SELECT key, status FROM object_store_public.files WHERE database_id = 1 ORDER BY key` + ); + expect(files.rowCount).toBe(3); + expect(files.rows.map((r: any) => ({ key: r.key, status: r.status }))).toEqual([ + { key: VERSION_LARGE_KEY, status: 'ready' }, + { key: ORIGIN_KEY, status: 'processing' }, + { key: VERSION_THUMB_KEY, status: 'ready' }, + ]); + }); + + it('step 4: service_role transitions origin processing → ready', async () => { + // Setup: origin in processing state + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag-origin', 'processing') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = 1`, + [ORIGIN_ID] + ); + + // Verify status and processing_started_at cleared + await pg.query('RESET ROLE'); + const file = await pg.query( + 'SELECT status, processing_started_at, updated_at, created_at FROM object_store_public.files WHERE id = $1', + [ORIGIN_ID] + ); + expect(file.rows[0].status).toBe('ready'); + expect(file.rows[0].processing_started_at).toBeNull(); + // updated_at should be refreshed + expect(new Date(file.rows[0].updated_at).getTime()) + .toBeGreaterThanOrEqual(new Date(file.rows[0].created_at).getTime()); + }); + + it('step 5: user sees origin + versions after processing completes', async () => { + // Setup: origin ready + 2 version rows ready + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES + ($1, 1, $2, 'default', $4, 'etag-origin', 'ready'), + (gen_random_uuid(), 1, $3, 'default', $4, 'etag-thumb', 'ready'), + (gen_random_uuid(), 1, $5, 'default', $4, 'etag-large', 'ready') + `, [ORIGIN_ID, ORIGIN_KEY, VERSION_THUMB_KEY, USER_A, VERSION_LARGE_KEY]); + + // Authenticated user queries + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const files = await pg.query( + `SELECT key, status FROM object_store_public.files WHERE key LIKE '1/default/abc123%' ORDER BY key` + ); + expect(files.rowCount).toBe(3); + expect(files.rows.every((r: any) => r.status === 'ready')).toBe(true); + }); +}); + +// ========================================================================== +// E2E-02: Error + Retry Path +// ========================================================================== + +describe('E2E-02: Error + Retry Path', () => { + const ORIGIN_ID = '20000000-0000-0000-0000-000000000001'; + const ORIGIN_KEY = '1/default/err123_origin'; + + beforeEach(async () => { + await pg.beforeEach(); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('processing → error stores status_reason', async () => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', 'processing') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files + SET status = 'error', status_reason = 'sharp: unsupported image format' + WHERE id = $1 AND database_id = 1`, + [ORIGIN_ID] + ); + + await pg.query('RESET ROLE'); + const file = await pg.query( + 'SELECT status, status_reason, processing_started_at FROM object_store_public.files WHERE id = $1', + [ORIGIN_ID] + ); + expect(file.rows[0].status).toBe('error'); + expect(file.rows[0].status_reason).toBe('sharp: unsupported image format'); + // processing_started_at cleared on exit from processing + expect(file.rows[0].processing_started_at).toBeNull(); + }); + + it('error → pending (retry) re-queues process-image job', async () => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', 'error') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + await clearJobLog(); + + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files SET status = 'pending' WHERE id = $1 AND database_id = 1`, + [ORIGIN_ID] + ); + + // Verify retry job queued + await pg.query('RESET ROLE'); + const jobs = await getJobLog(); + expect(jobs).toEqual([ + expect.objectContaining({ + identifier: 'process-image', + job_key: `file:${ORIGIN_ID}`, + }), + ]); + }); + + it('full retry cycle: pending → processing → error → pending → processing → ready', async () => { + // Step 1: upload (pending) + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag) + VALUES ($1, 1, $2, 'default', $3, 'etag') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + + await clearJobLog(); + + // Step 2: processing + await pg.query( + `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1`, + [ORIGIN_ID] + ); + let file = await pg.query('SELECT * FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + expect(file.rows[0].status).toBe('processing'); + expect(file.rows[0].processing_started_at).not.toBeNull(); + + // Step 3: error + await pg.query( + `UPDATE object_store_public.files SET status = 'error', status_reason = 'timeout' WHERE id = $1`, + [ORIGIN_ID] + ); + file = await pg.query('SELECT * FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + expect(file.rows[0].status).toBe('error'); + expect(file.rows[0].processing_started_at).toBeNull(); + + // Step 4: retry (error → pending) — should re-queue job + await pg.query( + `UPDATE object_store_public.files SET status = 'pending' WHERE id = $1`, + [ORIGIN_ID] + ); + let jobs = await getJobLog(); + expect(jobs).toHaveLength(1); + expect(jobs[0].identifier).toBe('process-image'); + + // Step 5: processing again + await clearJobLog(); + await pg.query( + `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1`, + [ORIGIN_ID] + ); + + // Step 6: ready + await pg.query( + `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1`, + [ORIGIN_ID] + ); + file = await pg.query('SELECT * FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + expect(file.rows[0].status).toBe('ready'); + expect(file.rows[0].processing_started_at).toBeNull(); + }); +}); + +// ========================================================================== +// E2E-03: Deletion Flow +// ========================================================================== + +describe('E2E-03: Deletion Flow', () => { + const ORIGIN_ID = '30000000-0000-0000-0000-000000000001'; + const ORIGIN_KEY = '1/default/del123_origin'; + const VERSION_KEY = '1/default/del123_thumb'; + + beforeEach(async () => { + await pg.beforeEach(); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('ready → deleting queues delete_s3_object job', async () => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', 'ready') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + await clearJobLog(); + + await pg.query( + `UPDATE object_store_public.files SET status = 'deleting' WHERE id = $1`, + [ORIGIN_ID] + ); + + const jobs = await getJobLog(); + expect(jobs).toHaveLength(1); + expect(jobs[0].identifier).toBe('delete_s3_object'); + expect(jobs[0].job_key).toBe(`delete:${ORIGIN_ID}`); + expect(jobs[0].payload.key).toBe(ORIGIN_KEY); + }); + + it('deleting origin + version rows each queue separate jobs', async () => { + const VERSION_ID = '30000000-0000-0000-0000-000000000002'; + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES + ($1, 1, $2, 'default', $4, 'etag-origin', 'ready'), + ($3, 1, $5, 'default', $4, 'etag-thumb', 'ready') + `, [ORIGIN_ID, ORIGIN_KEY, VERSION_ID, USER_A, VERSION_KEY]); + await clearJobLog(); + + // Delete both + await pg.query( + `UPDATE object_store_public.files SET status = 'deleting' + WHERE database_id = 1 AND key LIKE '1/default/del123%'` + ); + + const jobs = await getJobLog(); + expect(jobs).toHaveLength(2); + const keys = jobs.map((j: any) => j.payload.key).sort(); + expect(keys).toEqual([ORIGIN_KEY, VERSION_KEY]); + }); + + it('error → deleting is valid (skip processing on permanent failure)', async () => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', 'error') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + await clearJobLog(); + + await pg.query( + `UPDATE object_store_public.files SET status = 'deleting', status_reason = 'user cancelled' + WHERE id = $1`, + [ORIGIN_ID] + ); + + const file = await pg.query('SELECT status, status_reason FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + expect(file.rows[0].status).toBe('deleting'); + expect(file.rows[0].status_reason).toBe('user cancelled'); + + const jobs = await getJobLog(); + expect(jobs).toHaveLength(1); + expect(jobs[0].identifier).toBe('delete_s3_object'); + }); + + it('service_role can hard-DELETE after marking as deleting', async () => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', 'deleting') + `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); + + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query( + 'DELETE FROM object_store_public.files WHERE id = $1 AND database_id = 1', + [ORIGIN_ID] + ); + expect(result.rowCount).toBe(1); + + // Verify gone + await pg.query('RESET ROLE'); + const check = await pg.query( + 'SELECT * FROM object_store_public.files WHERE id = $1', + [ORIGIN_ID] + ); + expect(check.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// E2E-04: State Machine Validation +// ========================================================================== + +describe('E2E-04: State Machine Validation', () => { + const ORIGIN_ID = '40000000-0000-0000-0000-000000000001'; + const ORIGIN_KEY = '1/default/sm123_origin'; + + beforeEach(async () => { + await pg.beforeEach(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + const invalidTransitions = [ + ['pending', 'ready'], + ['pending', 'deleting'], + ['processing', 'pending'], + ['ready', 'pending'], + ['ready', 'processing'], + ['ready', 'error'], + ['error', 'processing'], + ['error', 'ready'], + ]; + + it.each(invalidTransitions)( + 'rejects %s → %s', + async (from, to) => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', $4) + `, [ORIGIN_ID, ORIGIN_KEY, USER_A, from]); + + await expect( + pg.query( + `UPDATE object_store_public.files SET status = $1 WHERE id = $2`, + [to, ORIGIN_ID] + ) + ).rejects.toThrow(/Invalid status transition/); + } + ); + + const validTransitions = [ + ['pending', 'processing'], + ['pending', 'error'], + ['processing', 'ready'], + ['processing', 'error'], + ['processing', 'deleting'], + ['ready', 'deleting'], + ['error', 'deleting'], + ['error', 'pending'], + ]; + + it.each(validTransitions)( + 'allows %s → %s', + async (from, to) => { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES ($1, 1, $2, 'default', $3, 'etag', $4) + `, [ORIGIN_ID, ORIGIN_KEY, USER_A, from]); + + await pg.query( + `UPDATE object_store_public.files SET status = $1 WHERE id = $2`, + [to, ORIGIN_ID] + ); + + const file = await pg.query( + 'SELECT status FROM object_store_public.files WHERE id = $1', + [ORIGIN_ID] + ); + expect(file.rows[0].status).toBe(to); + } + ); +}); + +// ========================================================================== +// E2E-05: Constraints +// ========================================================================== + +describe('E2E-05: Constraints', () => { + beforeEach(async () => { + await pg.beforeEach(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('rejects empty key', async () => { + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, '', 'default', 'x') + `) + ).rejects.toThrow(/files_key_not_empty/); + }); + + it('rejects key exceeding 1024 chars', async () => { + const longKey = '1/default/' + 'a'.repeat(1020); + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, $1, 'default', 'x') + `, [longKey]) + ).rejects.toThrow(/files_key_max_length/); + }); + + it('rejects invalid bucket_key format', async () => { + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, '1/BAD/test_origin', 'BAD-BUCKET', 'x') + `) + ).rejects.toThrow(/files_bucket_key_format/); + }); + + it('rejects partial source reference (source_table without source_column)', async () => { + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag, source_table) + VALUES (1, '1/default/partial_origin', 'default', 'x', 'some_schema.some_table') + `) + ).rejects.toThrow(/files_source_complete/); + }); + + it('accepts complete source reference', async () => { + const result = await pg.query(` + INSERT INTO object_store_public.files + (database_id, key, bucket_key, etag, source_table, source_column, source_id) + VALUES (1, '1/default/ref_origin', 'default', 'x', + 'some_schema.some_table', 'image', gen_random_uuid()) + RETURNING source_table, source_column, source_id + `); + expect(result.rowCount).toBe(1); + expect(result.rows[0].source_table).toBe('some_schema.some_table'); + }); + + it('enforces unique key per tenant', async () => { + await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, '1/default/dup_origin', 'default', 'e1') + `); + + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, '1/default/dup_origin', 'default', 'e2') + `) + ).rejects.toThrow(/files_key_unique/); + }); + + it('allows same key in different tenants', async () => { + await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, '1/default/shared_origin', 'default', 'e1') + `); + + const result = await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (2, '1/default/shared_origin', 'default', 'e2') + RETURNING * + `); + expect(result.rowCount).toBe(1); + }); +}); + +// ========================================================================== +// E2E-06: Full E2E -- upload through versions processed (under RLS) +// ========================================================================== + +describe('E2E-06: Full lifecycle under RLS', () => { + const ORIGIN_KEY = '1/default/full_e2e_origin'; + const THUMB_KEY = '1/default/full_e2e_thumb'; + const LARGE_KEY = '1/default/full_e2e_large'; + + beforeEach(async () => { + await pg.beforeEach(); + await pg.query(` + INSERT INTO object_store_public.buckets (database_id, key, name, is_public, config) + VALUES (1, 'default', 'Default Bucket', false, '{}') + ON CONFLICT DO NOTHING + `); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('complete upload → process → versions → ready → visible → delete', async () => { + // --------------------------------------------------------------- + // 1. User uploads an image (INSERT as authenticated) + // --------------------------------------------------------------- + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag) + VALUES (1, $1, 'default', $2, 'etag-origin') + `, [ORIGIN_KEY, USER_A]); + + // Verify: user sees their pending file + let myFiles = await pg.query( + `SELECT key, status FROM object_store_public.files WHERE key = $1`, + [ORIGIN_KEY] + ); + expect(myFiles.rowCount).toBe(1); + expect(myFiles.rows[0].status).toBe('pending'); + + // Verify: process-image job queued + await pg.query('RESET ROLE'); + let jobs = await getJobLog(); + expect(jobs).toHaveLength(1); + expect(jobs[0].identifier).toBe('process-image'); + + // Get the origin ID for later + const originRow = await pg.query( + `SELECT id FROM object_store_public.files WHERE key = $1 AND database_id = 1`, + [ORIGIN_KEY] + ); + const originId = originRow.rows[0].id; + + // --------------------------------------------------------------- + // 2. Job worker picks up → pending → processing (as service_role) + // --------------------------------------------------------------- + await clearJobLog(); + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = 1`, + [originId] + ); + + await pg.query('RESET ROLE'); + let origin = await pg.query( + 'SELECT status, processing_started_at FROM object_store_public.files WHERE id = $1', + [originId] + ); + expect(origin.rows[0].status).toBe('processing'); + expect(origin.rows[0].processing_started_at).not.toBeNull(); + + // --------------------------------------------------------------- + // 3. Processor creates version rows (thumb + large) + // --------------------------------------------------------------- + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag, status) + VALUES + (1, $1, 'default', $2, 'etag-thumb', 'ready'), + (1, $3, 'default', $2, 'etag-large', 'ready') + `, [THUMB_KEY, USER_A, LARGE_KEY]); + + // No additional jobs should be queued (version rows are ready, not pending) + await pg.query('RESET ROLE'); + jobs = await getJobLog(); + expect(jobs).toHaveLength(0); + + // --------------------------------------------------------------- + // 4. Processor marks origin as ready + // --------------------------------------------------------------- + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = 1`, + [originId] + ); + + await pg.query('RESET ROLE'); + origin = await pg.query( + 'SELECT status, processing_started_at FROM object_store_public.files WHERE id = $1', + [originId] + ); + expect(origin.rows[0].status).toBe('ready'); + expect(origin.rows[0].processing_started_at).toBeNull(); + + // --------------------------------------------------------------- + // 5. User can see all 3 files (origin + 2 versions) + // --------------------------------------------------------------- + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const allFiles = await pg.query( + `SELECT key, status FROM object_store_public.files + WHERE key LIKE '1/default/full_e2e%' + ORDER BY key` + ); + expect(allFiles.rowCount).toBe(3); + expect(allFiles.rows).toEqual([ + { key: LARGE_KEY, status: 'ready' }, + { key: ORIGIN_KEY, status: 'ready' }, + { key: THUMB_KEY, status: 'ready' }, + ]); + + // --------------------------------------------------------------- + // 6. Deletion: mark all as deleting (as service_role) + // --------------------------------------------------------------- + await pg.query('RESET ROLE'); + await clearJobLog(); + + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await pg.query( + `UPDATE object_store_public.files SET status = 'deleting' + WHERE key LIKE '1/default/full_e2e%' AND database_id = 1` + ); + + // All 3 deletion jobs queued + await pg.query('RESET ROLE'); + jobs = await getJobLog(); + expect(jobs).toHaveLength(3); + expect(jobs.every((j: any) => j.identifier === 'delete_s3_object')).toBe(true); + const deletedKeys = jobs.map((j: any) => j.payload.key).sort(); + expect(deletedKeys).toEqual([LARGE_KEY, ORIGIN_KEY, THUMB_KEY]); + + // --------------------------------------------------------------- + // 7. Cleanup worker hard-deletes rows + // --------------------------------------------------------------- + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const deleted = await pg.query( + `DELETE FROM object_store_public.files + WHERE key LIKE '1/default/full_e2e%' AND database_id = 1` + ); + expect(deleted.rowCount).toBe(3); + + // Verify: no files remain + await pg.query('RESET ROLE'); + const remaining = await pg.query( + `SELECT * FROM object_store_public.files WHERE key LIKE '1/default/full_e2e%'` + ); + expect(remaining.rowCount).toBe(0); + }); +}); diff --git a/migrations/__tests__/object-store-rls.test.ts b/migrations/__tests__/object-store-rls.test.ts new file mode 100644 index 000000000..d6b2d9a85 --- /dev/null +++ b/migrations/__tests__/object-store-rls.test.ts @@ -0,0 +1,733 @@ +jest.setTimeout(60000); + +import { resolve } from 'path'; + +import { getConnections, PgTestClient, seed } from 'pgsql-test'; + +const MIGRATION_PATH = resolve(__dirname, '../object_store.sql'); + +const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; +const USER_B = 'bbbbbbbb-0000-0000-0000-000000000002'; +const USER_C = 'cccccccc-0000-0000-0000-000000000003'; + +let pg: PgTestClient; +let teardown: () => Promise; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +async function switchRole( + role: string, + context: Record = {} +) { + await pg.query(`SET LOCAL ROLE ${role}`); + for (const [key, value] of Object.entries(context)) { + await pg.query('SELECT set_config($1, $2, true)', [key, value]); + } +} + +async function insertBuckets() { + await pg.query(` + INSERT INTO object_store_public.buckets (database_id, key, name, is_public, config) + VALUES + (1, 'default', 'Default Bucket', false, '{}'), + (1, 'public-assets', 'Public Assets', true, '{}'), + (2, 'default', 'Default Bucket (Tenant 2)', false, '{}') + `); +} + +async function insertFixtures() { + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + VALUES + ('11111111-0000-0000-0000-000000000001', 1, 'default', '1/default/aaa_origin', 'ready', $1, 'etag1'), + ('11111111-0000-0000-0000-000000000002', 1, 'default', '1/default/bbb_origin', 'pending', $1, 'etag2'), + ('11111111-0000-0000-0000-000000000003', 1, 'default', '1/default/ccc_origin', 'processing', $1, 'etag3'), + ('11111111-0000-0000-0000-000000000004', 1, 'default', '1/default/ddd_origin', 'error', $1, 'etag4') + `, [USER_A]); + + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + VALUES + ('22222222-0000-0000-0000-000000000001', 1, 'default', '1/default/eee_origin', 'ready', $1, 'etag5'), + ('22222222-0000-0000-0000-000000000002', 1, 'default', '1/default/fff_origin', 'pending', $1, 'etag6') + `, [USER_B]); + + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + VALUES + ('33333333-0000-0000-0000-000000000001', 1, 'public-assets', '1/public-assets/ggg_origin', 'ready', $1, 'etag7'), + ('33333333-0000-0000-0000-000000000002', 1, 'public-assets', '1/public-assets/hhh_origin', 'pending', $1, 'etag8') + `, [USER_A]); + + await pg.query(` + INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + VALUES + ('44444444-0000-0000-0000-000000000001', 2, 'default', '2/default/iii_origin', 'ready', $1, 'etag9') + `, [USER_C]); +} + +// --------------------------------------------------------------------------- +// Setup +// --------------------------------------------------------------------------- + +beforeAll(async () => { + ({ pg, teardown } = await getConnections( + {}, + [seed.sqlfile([MIGRATION_PATH])] + )); + + // Ensure anonymous role exists (cluster-wide, idempotent) + await pg.query(` + DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'anonymous') THEN + CREATE ROLE anonymous NOLOGIN; + END IF; + END $$ + `); + + // The migration assumes object_store_public schema USAGE is already granted + // (from the original object-store pgpm extension). In isolation, grant explicitly. + await pg.query('GRANT USAGE ON SCHEMA object_store_public TO authenticated'); + await pg.query('GRANT USAGE ON SCHEMA object_store_public TO service_role'); + await pg.query('GRANT USAGE ON SCHEMA object_store_public TO anonymous'); + + // Grant SELECT on buckets to roles that need it for the public_bucket_read policy subquery. + // Without this, the EXISTS subquery in files_public_bucket_read fails with + // "permission denied for table buckets". + await pg.query('GRANT SELECT ON object_store_public.buckets TO authenticated'); + await pg.query('GRANT SELECT ON object_store_public.buckets TO service_role'); + await pg.query('GRANT SELECT ON object_store_public.buckets TO anonymous'); +}); + +afterAll(async () => { + await teardown(); +}); + +// ========================================================================== +// RLS-07: Superuser Bypass (negative control -- run first) +// ========================================================================== + +describe('RLS-07: Superuser Bypass', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-07a: superuser sees all tenants', async () => { + const result = await pg.query('SELECT * FROM object_store_public.files'); + expect(result.rowCount).toBe(9); + }); + + it('RLS-07b: superuser can INSERT into any tenant', async () => { + const result = await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (999, '999/default/su_origin', 'default', 'su-etag') + RETURNING id + `); + expect(result.rowCount).toBe(1); + }); + + it('RLS-07c: superuser can DELETE any row', async () => { + const result = await pg.query( + 'DELETE FROM object_store_public.files WHERE database_id = 2' + ); + expect(result.rowCount).toBeGreaterThan(0); + }); +}); + +// ========================================================================== +// RLS-01: Tenant Isolation (authenticated) +// ========================================================================== + +describe('RLS-01: Tenant Isolation', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + // RLS-01d runs FIRST so app.database_id has never been set in this session. + // current_setting('app.database_id') without missing_ok raises "unrecognized". + it('RLS-01d: missing app.database_id raises error', async () => { + await switchRole('authenticated'); + + await expect( + pg.query('SELECT * FROM object_store_public.files') + ).rejects.toThrow(/app\.database_id|invalid input syntax for type integer/); + }); + + it('RLS-01a: SELECT scoped to own tenant', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); + expect(result.rows.find((r: any) => r.database_id === 2)).toBeUndefined(); + }); + + it('RLS-01b: INSERT rejected for wrong tenant', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, bucket_key, key, created_by, etag) + VALUES (2, 'default', '2/default/bad_origin', $1, 'bad-etag') + `, [USER_A]) + ).rejects.toThrow(/row-level security/i); + }); + + it('RLS-01c: UPDATE rejected for wrong tenant (0 rows)', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query(` + UPDATE object_store_public.files + SET status_reason = 'test' + WHERE id = '44444444-0000-0000-0000-000000000001' AND database_id = 2 + `); + expect(result.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// RLS-02: Visibility (authenticated) +// ========================================================================== + +describe('RLS-02: Visibility', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-02a: User A sees own files in all statuses', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query( + 'SELECT * FROM object_store_public.files WHERE created_by = $1', + [USER_A] + ); + expect(result.rowCount).toBe(6); + }); + + it('RLS-02b: User A sees other users\' ready files only', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query( + 'SELECT * FROM object_store_public.files WHERE created_by = $1', + [USER_B] + ); + expect(result.rowCount).toBe(1); + expect(result.rows[0].status).toBe('ready'); + }); + + it('RLS-02c: User B sees own pending files', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_B, + }); + + const result = await pg.query( + 'SELECT * FROM object_store_public.files WHERE created_by = $1', + [USER_B] + ); + expect(result.rowCount).toBe(2); + }); + + it('RLS-02d: User B cannot see User A\'s non-ready files', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_B, + }); + + const result = await pg.query( + `SELECT * FROM object_store_public.files + WHERE created_by = $1 AND status != 'ready'`, + [USER_A] + ); + expect(result.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// RLS-03: INSERT/UPDATE Permissions (authenticated) +// ========================================================================== + +describe('RLS-03: INSERT/UPDATE Permissions', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-03a: INSERT succeeds with correct tenant', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query(` + INSERT INTO object_store_public.files (database_id, bucket_key, key, created_by, etag) + VALUES (1, 'default', '1/default/new_origin', $1, 'newtag') + RETURNING * + `, [USER_A]); + expect(result.rowCount).toBe(1); + expect(result.rows[0].status).toBe('pending'); + }); + + it('RLS-03b: UPDATE own file succeeds', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query(` + UPDATE object_store_public.files + SET status_reason = 'user note' + WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 + `); + expect(result.rowCount).toBe(1); + }); + + it('RLS-03c: DELETE denied (no DELETE grant)', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + await expect( + pg.query(` + DELETE FROM object_store_public.files + WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 + `) + ).rejects.toThrow(/permission denied/i); + }); + + it('RLS-03d: UPDATE invisible file (other user\'s pending) -- 0 rows', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query(` + UPDATE object_store_public.files + SET status_reason = 'hacked' + WHERE id = '22222222-0000-0000-0000-000000000002' AND database_id = 1 + `); + expect(result.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// RLS-04: Anonymous -- No Access +// ========================================================================== + +describe('RLS-04: Anonymous -- No Access', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-04a: SELECT denied', async () => { + await switchRole('anonymous', { 'app.database_id': '1' }); + + await expect( + pg.query('SELECT * FROM object_store_public.files') + ).rejects.toThrow(/permission denied/i); + }); + + it('RLS-04b: INSERT denied', async () => { + await switchRole('anonymous', { 'app.database_id': '1' }); + + await expect( + pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + VALUES (1, '1/default/anon_origin', 'default', 'x') + `) + ).rejects.toThrow(/permission denied/i); + }); + + it('RLS-04c: public bucket policy works with temporary GRANT', async () => { + // Temporarily grant SELECT to anonymous (rolled back in afterEach) + await pg.query('GRANT SELECT ON object_store_public.files TO anonymous'); + + await switchRole('anonymous', { 'app.database_id': '1' }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + + // Anonymous only has files_public_bucket_read (files_visibility is TO authenticated). + // Should see only public-assets bucket + ready status. + expect(result.rows.length).toBe(1); + expect(result.rows[0].id).toBe('33333333-0000-0000-0000-000000000001'); + expect(result.rows[0].bucket_key).toBe('public-assets'); + expect(result.rows[0].status).toBe('ready'); + }); +}); + +// ========================================================================== +// RLS-05: Administrator Override +// ========================================================================== + +describe('RLS-05: Administrator Override', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-05a: admin sees all files in tenant regardless of status/creator', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + expect(result.rowCount).toBe(8); + expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); + }); + + it('RLS-05b: admin sees other users\' pending/error files', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query(` + SELECT * FROM object_store_public.files + WHERE status IN ('pending', 'error') + `); + expect(result.rowCount).toBe(4); + }); + + it('RLS-05c: admin can UPDATE any file in tenant', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query(` + UPDATE object_store_public.files + SET status_reason = 'admin override' + WHERE id = '22222222-0000-0000-0000-000000000002' AND database_id = 1 + `); + expect(result.rowCount).toBe(1); + }); + + it('RLS-05d: admin still cannot access other tenants', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query( + 'SELECT * FROM object_store_public.files WHERE database_id = 2' + ); + expect(result.rowCount).toBe(0); + }); + + it('RLS-05e: admin DELETE still denied (no DELETE grant on authenticated)', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + await expect( + pg.query(` + DELETE FROM object_store_public.files + WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 + `) + ).rejects.toThrow(/permission denied/i); + }); +}); + +// ========================================================================== +// RLS-06: service_role -- Full Access Including DELETE +// ========================================================================== + +describe('RLS-06: service_role', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-06a: service_role sees all files in tenant (with admin override)', async () => { + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + expect(result.rowCount).toBeGreaterThanOrEqual(8); + expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); + }); + + it('RLS-06b: service_role with app.role=administrator sees all', async () => { + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + expect(result.rowCount).toBe(8); + }); + + it('RLS-06c: service_role without app.role sees only ready (visibility gap)', async () => { + await switchRole('service_role', { + 'app.database_id': '1', + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + // Without app.role and without app.user_id, visibility policy reduces to + // status = 'ready' (NULLIF on empty user_id → NULL → created_by check is NULL). + // Expect ready files in tenant 1: 111...01, 222...01, 333...01 = 3 + expect(result.rowCount).toBe(3); + expect(result.rows.every((r: any) => r.status === 'ready')).toBe(true); + }); + + it('RLS-06d: service_role can DELETE files', async () => { + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query(` + DELETE FROM object_store_public.files + WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 + `); + expect(result.rowCount).toBe(1); + }); + + it('RLS-06e: service_role cannot DELETE cross-tenant', async () => { + await switchRole('service_role', { + 'app.database_id': '1', + 'app.role': 'administrator', + }); + + const result = await pg.query(` + DELETE FROM object_store_public.files + WHERE id = '44444444-0000-0000-0000-000000000001' AND database_id = 2 + `); + expect(result.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// RLS-08: Buckets Table Access +// ========================================================================== + +describe('RLS-08: Buckets Table Access', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-08a: authenticated role can read buckets (GRANT added for policy subquery)', async () => { + await switchRole('authenticated', { 'app.database_id': '1' }); + + const result = await pg.query('SELECT * FROM object_store_public.buckets'); + expect(result.rowCount).toBeGreaterThan(0); + }); + + it('RLS-08b: service_role can read buckets (GRANT added for policy subquery)', async () => { + await switchRole('service_role', { 'app.database_id': '1' }); + + const result = await pg.query('SELECT * FROM object_store_public.buckets'); + expect(result.rowCount).toBeGreaterThan(0); + }); +}); + +// ========================================================================== +// RLS-09: Edge Cases +// ========================================================================== + +describe('RLS-09: Edge Cases', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-09a: app.database_id type mismatch', async () => { + await switchRole('authenticated', { + 'app.database_id': 'not-a-number', + 'app.user_id': USER_A, + }); + + await expect( + pg.query('SELECT * FROM object_store_public.files') + ).rejects.toThrow(/invalid input syntax for type integer/); + }); + + it('RLS-09b: app.user_id type mismatch', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': 'not-a-uuid', + }); + + await expect( + pg.query('SELECT * FROM object_store_public.files') + ).rejects.toThrow(/invalid input syntax for type uuid/); + }); + + it('RLS-09c: empty tenant (no files for database_id=999)', async () => { + await switchRole('authenticated', { + 'app.database_id': '999', + 'app.user_id': USER_A, + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + expect(result.rowCount).toBe(0); + }); + + it('RLS-09d: INSERT with mismatched created_by (spoofing)', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + // created_by is NOT enforced by RLS -- application layer must set it correctly. + // Note: RETURNING * would fail here because SELECT policies block reading + // the row back (created_by=USER_B != app.user_id=USER_A and status='pending'). + const result = await pg.query(` + INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag) + VALUES (1, '1/default/spoof_origin', 'default', $1, 'x') + `, [USER_B]); + expect(result.rowCount).toBe(1); + + // Verify the spoofed created_by was persisted by reading as superuser + await pg.query('RESET ROLE'); + const verify = await pg.query( + `SELECT created_by FROM object_store_public.files WHERE key = '1/default/spoof_origin'` + ); + expect(verify.rows[0].created_by).toBe(USER_B); + }); + + it('RLS-09e: multiple policies combine with OR for SELECT', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + 'app.role': 'authenticated', + }); + + const result = await pg.query('SELECT * FROM object_store_public.files'); + // Policies: RESTRICTIVE(tenant_isolation) AND PERMISSIVE(visibility OR public_bucket_read OR admin_override) + // User A sees: own files (all 6) + User B's ready file (1) = 7 + // (User B's pending file is invisible; admin_override is false) + expect(result.rowCount).toBe(7); + }); +}); + +// ========================================================================== +// RLS-10: State Machine with RLS +// ========================================================================== + +describe('RLS-10: State Machine with RLS', () => { + beforeEach(async () => { + await pg.beforeEach(); + await insertBuckets(); + await insertFixtures(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('RLS-10a: authenticated user can transition own file pending->processing', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query(` + UPDATE object_store_public.files + SET status = 'processing' + WHERE id = '11111111-0000-0000-0000-000000000002' AND database_id = 1 + RETURNING * + `); + expect(result.rowCount).toBe(1); + expect(result.rows[0].processing_started_at).not.toBeNull(); + }); + + it('RLS-10b: authenticated user cannot transition other\'s pending file (invisible)', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + const result = await pg.query(` + UPDATE object_store_public.files + SET status = 'processing' + WHERE id = '22222222-0000-0000-0000-000000000002' AND database_id = 1 + `); + expect(result.rowCount).toBe(0); + }); + + it('RLS-10c: invalid transition still raises under RLS', async () => { + await switchRole('authenticated', { + 'app.database_id': '1', + 'app.user_id': USER_A, + }); + + await expect( + pg.query(` + UPDATE object_store_public.files + SET status = 'deleting' + WHERE id = '11111111-0000-0000-0000-000000000002' AND database_id = 1 + `) + ).rejects.toThrow(/Invalid status transition from pending to deleting/); + }); +}); diff --git a/migrations/jest.config.js b/migrations/jest.config.js new file mode 100644 index 000000000..230290906 --- /dev/null +++ b/migrations/jest.config.js @@ -0,0 +1,18 @@ +/** @type {import('ts-jest').JestConfigWithTsJest} */ +module.exports = { + preset: 'ts-jest', + testEnvironment: 'node', + transform: { + '^.+\\.tsx?$': [ + 'ts-jest', + { + babelConfig: false, + tsconfig: '../tsconfig.json', + }, + ], + }, + transformIgnorePatterns: [`/node_modules/*`], + testRegex: '(/__tests__/.*|(\\.|/)(test|spec))\\.(jsx?|tsx?)$', + moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'], + modulePathIgnorePatterns: ['dist/*'], +}; diff --git a/migrations/object_store.sql b/migrations/object_store.sql index b370ffdca..658b7169e 100644 --- a/migrations/object_store.sql +++ b/migrations/object_store.sql @@ -302,21 +302,28 @@ COMMENT ON TRIGGER files_after_update_queue_retry ON object_store_public.files I ALTER TABLE object_store_public.files ENABLE ROW LEVEL SECURITY; ALTER TABLE object_store_public.files FORCE ROW LEVEL SECURITY; --- Policy 1: Tenant isolation (all operations, all authenticated roles) +-- Policy 1: Tenant isolation (RESTRICTIVE -- always ANDed with all other policies) +-- Without this being RESTRICTIVE, permissive policies would OR together and +-- allow cross-tenant access (e.g. a ready file in tenant 2 visible via files_visibility). CREATE POLICY files_tenant_isolation ON object_store_public.files + AS RESTRICTIVE FOR ALL USING (database_id = current_setting('app.database_id')::integer) WITH CHECK (database_id = current_setting('app.database_id')::integer); --- Policy 2: Creator-only for non-ready files (SELECT) +-- Policy 2: Visibility for SELECT (authenticated + service_role only) +-- Non-ready files visible only to the uploader. Uses NULLIF for safe uuid handling +-- when app.user_id is missing or empty (returns NULL instead of cast error). +-- Scoped to authenticated/service_role so anonymous only gets public_bucket_read. CREATE POLICY files_visibility ON object_store_public.files FOR SELECT + TO authenticated, service_role USING ( status = 'ready' - OR created_by = current_setting('app.user_id')::uuid + OR created_by = NULLIF(current_setting('app.user_id', true), '')::uuid ); --- Policy 3: Public bucket read (SELECT, for anonymous access) +-- Policy 3: Public bucket read for SELECT (all roles including anonymous) CREATE POLICY files_public_bucket_read ON object_store_public.files FOR SELECT USING ( @@ -329,12 +336,37 @@ CREATE POLICY files_public_bucket_read ON object_store_public.files AND status = 'ready' ); --- Policy 4: Admin override (all operations) +-- Policy 4: Admin override (all operations, authenticated + service_role) CREATE POLICY files_admin_override ON object_store_public.files FOR ALL + TO authenticated, service_role USING (current_setting('app.role', true) = 'administrator') WITH CHECK (current_setting('app.role', true) = 'administrator'); +-- Policy 5: INSERT access (permissive base so non-admin users can insert) +CREATE POLICY files_insert_access ON object_store_public.files + FOR INSERT + TO authenticated, service_role + WITH CHECK (true); + +-- Policy 6: UPDATE access (replicates visibility for row targeting) +-- Non-admin users can only update rows they can see (ready or own). +-- Admin override policy covers admin UPDATE access separately. +CREATE POLICY files_update_access ON object_store_public.files + FOR UPDATE + TO authenticated, service_role + USING ( + status = 'ready' + OR created_by = NULLIF(current_setting('app.user_id', true), '')::uuid + ) + WITH CHECK (true); + +-- Policy 7: DELETE access (service_role only, grants already restrict authenticated) +CREATE POLICY files_delete_access ON object_store_public.files + FOR DELETE + TO service_role + USING (true); + -- Grants GRANT SELECT, INSERT, UPDATE ON object_store_public.files TO authenticated; GRANT SELECT, INSERT, UPDATE, DELETE ON object_store_public.files TO service_role; diff --git a/migrations/package.json b/migrations/package.json new file mode 100644 index 000000000..a5e831e03 --- /dev/null +++ b/migrations/package.json @@ -0,0 +1,12 @@ +{ + "name": "@constructive/migrations", + "version": "0.0.1", + "private": true, + "scripts": { + "test": "jest", + "test:watch": "jest --watch" + }, + "devDependencies": { + "pgsql-test": "workspace:^" + } +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 6ea1ca196..a915c7516 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1647,6 +1647,12 @@ importers: version: link:../../postgres/pgsql-test/dist publishDirectory: dist + migrations: + devDependencies: + pgsql-test: + specifier: workspace:^ + version: link:../postgres/pgsql-test/dist + packages/12factor-env: dependencies: envalid: diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index fcc12b909..0c0b8d497 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -9,3 +9,4 @@ packages: - 'graphile/*' - 'jobs/*' - 'functions/*' + - 'migrations' From 6d845c6f46b3fd5aa009a29276760570ec7b5cd3 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 12 Mar 2026 09:46:55 +0800 Subject: [PATCH 3/9] fixed tests --- migrations/__tests__/object-store-rls.test.ts | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/migrations/__tests__/object-store-rls.test.ts b/migrations/__tests__/object-store-rls.test.ts index d6b2d9a85..2c8c7ab86 100644 --- a/migrations/__tests__/object-store-rls.test.ts +++ b/migrations/__tests__/object-store-rls.test.ts @@ -174,6 +174,8 @@ describe('RLS-01: Tenant Isolation', () => { }); const result = await pg.query('SELECT * FROM object_store_public.files'); + // Must return rows (prevents vacuous pass on empty result from Array.every) + expect(result.rowCount).toBeGreaterThan(0); expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); expect(result.rows.find((r: any) => r.database_id === 2)).toBeUndefined(); }); @@ -503,7 +505,7 @@ describe('RLS-06: service_role', () => { }); const result = await pg.query('SELECT * FROM object_store_public.files'); - expect(result.rowCount).toBeGreaterThanOrEqual(8); + expect(result.rowCount).toBe(8); expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); }); @@ -575,14 +577,15 @@ describe('RLS-08: Buckets Table Access', () => { await switchRole('authenticated', { 'app.database_id': '1' }); const result = await pg.query('SELECT * FROM object_store_public.buckets'); - expect(result.rowCount).toBeGreaterThan(0); + // Buckets has no RLS -- all 3 seeded buckets visible (2 tenant 1 + 1 tenant 2) + expect(result.rowCount).toBe(3); }); it('RLS-08b: service_role can read buckets (GRANT added for policy subquery)', async () => { await switchRole('service_role', { 'app.database_id': '1' }); const result = await pg.query('SELECT * FROM object_store_public.buckets'); - expect(result.rowCount).toBeGreaterThan(0); + expect(result.rowCount).toBe(3); }); }); From f82bd177f757641e5a82f365b2df13a48796bdc6 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 12 Mar 2026 11:48:20 +0800 Subject: [PATCH 4/9] added tests and debug --- .../__tests__/upload-resolver.e2e.test.ts | 240 ++++++++++++++++++ .../graphile-settings/src/upload-resolver.ts | 20 ++ 2 files changed, 260 insertions(+) create mode 100644 graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts diff --git a/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts b/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts new file mode 100644 index 000000000..5965f7460 --- /dev/null +++ b/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts @@ -0,0 +1,240 @@ +import { S3StorageProvider } from '@constructive-io/s3-streamer'; +import { Client as PgClient } from 'pg'; +import { Readable } from 'stream'; + +jest.setTimeout(60000); + +const SCHEMA = 'object_store_public'; +const TABLE = 'files'; +const BUCKET = 'test-bucket'; +const USER_ID = 'aaaaaaaa-0000-0000-0000-000000000001'; +const MINIMAL_PNG = Buffer.from( + 'iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMCAO6xM4cAAAAASUVORK5CYII=', + 'base64', +); + +type UploadResolverModule = typeof import('../src/upload-resolver'); + +function makePg(): PgClient { + return new PgClient({ + host: 'localhost', + port: 5432, + user: 'postgres', + password: 'password', + database: 'constructive', + }); +} + +function makeStorage(): S3StorageProvider { + return new S3StorageProvider({ + bucket: BUCKET, + awsRegion: 'us-east-1', + awsAccessKey: 'minioadmin', + awsSecretKey: 'minioadmin', + minioEndpoint: 'http://localhost:9000', + provider: 'minio', + }); +} + +async function setupObjectStoreSchema(pg: PgClient): Promise { + await pg.query('CREATE EXTENSION IF NOT EXISTS pgcrypto'); + await pg.query(`CREATE SCHEMA IF NOT EXISTS ${SCHEMA}`); + await pg.query(` + DO $$ BEGIN + CREATE TYPE ${SCHEMA}.file_status AS ENUM ( + 'pending', 'processing', 'ready', 'error', 'deleting' + ); + EXCEPTION WHEN duplicate_object THEN NULL; + END $$ + `); + await pg.query(` + CREATE TABLE IF NOT EXISTS ${SCHEMA}.${TABLE} ( + id uuid NOT NULL DEFAULT gen_random_uuid(), + database_id integer NOT NULL, + bucket_key text NOT NULL DEFAULT 'default', + key text NOT NULL, + status ${SCHEMA}.file_status NOT NULL DEFAULT 'pending', + status_reason text, + etag text, + source_table text, + source_column text, + source_id uuid, + processing_started_at timestamptz, + created_by uuid, + created_at timestamptz NOT NULL DEFAULT now(), + updated_at timestamptz NOT NULL DEFAULT now(), + CONSTRAINT graphile_settings_object_store_files_pkey PRIMARY KEY (id, database_id) + ) + `); +} + +async function cleanupObjectStoreRows(pg: PgClient): Promise { + await pg.query(`DELETE FROM ${SCHEMA}.${TABLE}`); +} + +async function objectExists(storage: S3StorageProvider, key: string): Promise { + try { + await storage.head(key); + return true; + } catch { + return false; + } +} + +async function loadUploadResolverModule(): Promise { + jest.resetModules(); + return import('../src/upload-resolver'); +} + +function makeUpload(filename: string, body: Buffer) { + return { + filename, + createReadStream: () => Readable.from(body), + }; +} + +describe('upload-resolver e2e', () => { + let pg: PgClient; + let storage: S3StorageProvider; + let uploadResolverModule: UploadResolverModule | null = null; + const originalEnv = { ...process.env }; + const uploadedKeys = new Set(); + + beforeAll(async () => { + process.env.UPLOAD_V2_ENABLED = 'true'; + process.env.BUCKET_PROVIDER = 'minio'; + process.env.BUCKET_NAME = BUCKET; + process.env.AWS_REGION = 'us-east-1'; + process.env.AWS_ACCESS_KEY = 'minioadmin'; + process.env.AWS_SECRET_KEY = 'minioadmin'; + process.env.MINIO_ENDPOINT = 'http://localhost:9000'; + process.env.PGHOST = 'localhost'; + process.env.PGPORT = '5432'; + process.env.PGUSER = 'postgres'; + process.env.PGPASSWORD = 'password'; + process.env.PGDATABASE = 'constructive'; + + pg = makePg(); + await pg.connect(); + storage = makeStorage(); + await setupObjectStoreSchema(pg); + }); + + afterEach(async () => { + if (uploadResolverModule) { + await uploadResolverModule.__resetUploadResolverForTests(); + uploadResolverModule = null; + } + + for (const key of uploadedKeys) { + try { + await storage.delete(key); + } catch { + // ignore cleanup failures for already-deleted objects + } + } + uploadedKeys.clear(); + + await cleanupObjectStoreRows(pg); + }); + + afterAll(async () => { + process.env = originalEnv; + await pg.end(); + storage.destroy(); + }); + + it('streams a REST upload to storage and inserts a pending files row', async () => { + uploadResolverModule = await loadUploadResolverModule(); + + const result = await uploadResolverModule.streamToStorage( + Readable.from(MINIMAL_PNG), + 'avatar.png', + { + databaseId: '1', + userId: USER_ID, + bucketKey: 'default', + } + ); + + expect(result.mime).toBe('image/png'); + expect(result.filename).toBe('avatar.png'); + expect(result.key).toMatch(/^1\/default\/[0-9a-f-]+_origin$/); + + uploadedKeys.add(result.key as string); + expect(await objectExists(storage, result.key as string)).toBe(true); + + const dbResult = await pg.query( + `SELECT database_id, bucket_key, key, status, created_by, etag + FROM ${SCHEMA}.${TABLE} + WHERE key = $1`, + [result.key] + ); + + expect(dbResult.rowCount).toBe(1); + expect(dbResult.rows[0]).toEqual( + expect.objectContaining({ + database_id: 1, + bucket_key: 'default', + key: result.key, + status: 'pending', + created_by: USER_ID, + }) + ); + expect(dbResult.rows[0].etag).toEqual(expect.any(String)); + expect(dbResult.rows[0].etag.length).toBeGreaterThan(0); + }); + + it('handles inline image uploads and inserts the same pending files row shape', async () => { + uploadResolverModule = await loadUploadResolverModule(); + + const imageUploadDefinition = uploadResolverModule.constructiveUploadFieldDefinitions.find( + (definition) => 'name' in definition && definition.name === 'image' + ); + + if (!imageUploadDefinition) { + throw new Error('Missing image upload definition'); + } + + const result = await imageUploadDefinition.resolve( + makeUpload('inline.png', MINIMAL_PNG) as any, + {}, + { + req: { + api: { databaseId: '1' }, + token: { user_id: USER_ID }, + }, + }, + { uploadPlugin: { tags: {}, type: 'image' } } as any + ); + + expect(result).toEqual( + expect.objectContaining({ + filename: 'inline.png', + mime: 'image/png', + key: expect.stringMatching(/^1\/default\/[0-9a-f-]+_origin$/), + url: expect.any(String), + }) + ); + + const key = (result as { key: string }).key; + uploadedKeys.add(key); + expect(await objectExists(storage, key)).toBe(true); + + const dbResult = await pg.query( + `SELECT database_id, bucket_key, key, status, created_by + FROM ${SCHEMA}.${TABLE} + WHERE key = $1`, + [key] + ); + + expect(dbResult.rowCount).toBe(1); + expect(dbResult.rows[0]).toEqual({ + database_id: 1, + bucket_key: 'default', + key, + status: 'pending', + created_by: USER_ID, + }); + }); +}); diff --git a/graphile/graphile-settings/src/upload-resolver.ts b/graphile/graphile-settings/src/upload-resolver.ts index c744ee11f..fb9754565 100644 --- a/graphile/graphile-settings/src/upload-resolver.ts +++ b/graphile/graphile-settings/src/upload-resolver.ts @@ -221,6 +221,26 @@ export async function streamToStorage( }; } +export async function __resetUploadResolverForTests(): Promise { + if (streamer && typeof (streamer as { destroy?: () => void }).destroy === 'function') { + streamer.destroy(); + } + streamer = null; + + if ( + storageProvider + && typeof (storageProvider as StorageProvider & { destroy?: () => void }).destroy === 'function' + ) { + (storageProvider as StorageProvider & { destroy: () => void }).destroy(); + } + storageProvider = null; + + if (pgPool) { + await pgPool.end(); + } + pgPool = null; +} + /** * Upload resolver that streams files to S3/MinIO. * From ce9b8c4b770aa7566a180b8b161e0adb7b3b5718 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Fri, 13 Mar 2026 16:31:17 +0800 Subject: [PATCH 5/9] change schema names --- .../__tests__/upload-resolver.e2e.test.ts | 12 +- .../graphile-settings/src/upload-resolver.ts | 6 +- graphql/server/src/middleware/upload.ts | 2 +- .../__tests__/object-store-lifecycle.test.ts | 140 ++-- migrations/__tests__/object-store-rls.test.ts | 106 +-- migrations/files_store.sql | 639 ++++++++++++++++++ migrations/object_store.sql | 164 ++--- 7 files changed, 854 insertions(+), 215 deletions(-) create mode 100644 migrations/files_store.sql diff --git a/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts b/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts index 5965f7460..a06dd603e 100644 --- a/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts +++ b/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts @@ -4,7 +4,7 @@ import { Readable } from 'stream'; jest.setTimeout(60000); -const SCHEMA = 'object_store_public'; +const SCHEMA = 'files_store_public'; const TABLE = 'files'; const BUCKET = 'test-bucket'; const USER_ID = 'aaaaaaaa-0000-0000-0000-000000000001'; @@ -36,7 +36,7 @@ function makeStorage(): S3StorageProvider { }); } -async function setupObjectStoreSchema(pg: PgClient): Promise { +async function setupFilesStoreSchema(pg: PgClient): Promise { await pg.query('CREATE EXTENSION IF NOT EXISTS pgcrypto'); await pg.query(`CREATE SCHEMA IF NOT EXISTS ${SCHEMA}`); await pg.query(` @@ -63,12 +63,12 @@ async function setupObjectStoreSchema(pg: PgClient): Promise { created_by uuid, created_at timestamptz NOT NULL DEFAULT now(), updated_at timestamptz NOT NULL DEFAULT now(), - CONSTRAINT graphile_settings_object_store_files_pkey PRIMARY KEY (id, database_id) + CONSTRAINT graphile_settings_files_store_files_pkey PRIMARY KEY (id, database_id) ) `); } -async function cleanupObjectStoreRows(pg: PgClient): Promise { +async function cleanupFilesStoreRows(pg: PgClient): Promise { await pg.query(`DELETE FROM ${SCHEMA}.${TABLE}`); } @@ -117,7 +117,7 @@ describe('upload-resolver e2e', () => { pg = makePg(); await pg.connect(); storage = makeStorage(); - await setupObjectStoreSchema(pg); + await setupFilesStoreSchema(pg); }); afterEach(async () => { @@ -135,7 +135,7 @@ describe('upload-resolver e2e', () => { } uploadedKeys.clear(); - await cleanupObjectStoreRows(pg); + await cleanupFilesStoreRows(pg); }); afterAll(async () => { diff --git a/graphile/graphile-settings/src/upload-resolver.ts b/graphile/graphile-settings/src/upload-resolver.ts index fb9754565..2e5d08647 100644 --- a/graphile/graphile-settings/src/upload-resolver.ts +++ b/graphile/graphile-settings/src/upload-resolver.ts @@ -9,7 +9,7 @@ * * V2 mode (UPLOAD_V2_ENABLED=true): * - Key format: {database_id}/{bucket_key}/{uuid}_origin - * - INSERT into object_store_public.files after S3 upload + * - INSERT into files_store_public.files after S3 upload * - Returns { key, url, mime, filename } for image/upload types * * Legacy mode (UPLOAD_V2_ENABLED=false, default): @@ -144,7 +144,7 @@ function generateV2Key(databaseId: string, bucketKey: string): { key: string; fi } /** - * INSERTs a row into object_store_public.files. + * INSERTs a row into files_store_public.files. * Fires the AFTER INSERT trigger which enqueues a process-image job. */ async function insertFileRecord( @@ -157,7 +157,7 @@ async function insertFileRecord( ): Promise { const pool = getPgPool(); await pool.query( - `INSERT INTO object_store_public.files + `INSERT INTO files_store_public.files (id, database_id, bucket_key, key, etag, created_by) VALUES ($1, $2, $3, $4, $5, $6)`, [fileId, Number(databaseId), bucketKey, key, etag, createdBy], diff --git a/graphql/server/src/middleware/upload.ts b/graphql/server/src/middleware/upload.ts index 71ab851b1..0d34775c4 100644 --- a/graphql/server/src/middleware/upload.ts +++ b/graphql/server/src/middleware/upload.ts @@ -270,7 +270,7 @@ export const createUploadAuthenticateMiddleware = ( * 2. GraphQL mutation -> patch row with the returned metadata * * When UPLOAD_V2_ENABLED=true, passes databaseId and userId to streamToStorage - * so it can use the new key format and INSERT into object_store_public.files. + * so it can use the new key format and INSERT into files_store_public.files. */ export const uploadRoute: RequestHandler[] = [ parseFileWithErrors, diff --git a/migrations/__tests__/object-store-lifecycle.test.ts b/migrations/__tests__/object-store-lifecycle.test.ts index be2b7ac80..e18cf0aad 100644 --- a/migrations/__tests__/object-store-lifecycle.test.ts +++ b/migrations/__tests__/object-store-lifecycle.test.ts @@ -4,7 +4,7 @@ import { resolve } from 'path'; import { getConnections, PgTestClient, seed } from 'pgsql-test'; -const MIGRATION_PATH = resolve(__dirname, '../object_store.sql'); +const MIGRATION_PATH = resolve(__dirname, '../files_store.sql'); const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; @@ -57,10 +57,10 @@ beforeAll(async () => { `); // Grants needed for isolated test (normally from pgpm extension deploy) - await pg.query('GRANT USAGE ON SCHEMA object_store_public TO authenticated'); - await pg.query('GRANT USAGE ON SCHEMA object_store_public TO service_role'); - await pg.query('GRANT SELECT ON object_store_public.buckets TO authenticated'); - await pg.query('GRANT SELECT ON object_store_public.buckets TO service_role'); + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO authenticated'); + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO service_role'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO authenticated'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO service_role'); // Replace the app_jobs.add_job stub with one that records calls await pg.query(` @@ -98,7 +98,7 @@ beforeAll(async () => { // Seed a default bucket await pg.query(` - INSERT INTO object_store_public.buckets (database_id, key, name, is_public, config) + INSERT INTO files_store_public.buckets (database_id, key, name, is_public, config) VALUES (1, 'default', 'Default Bucket', false, '{}') `); }); @@ -134,13 +134,13 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { }); await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag) VALUES ($1, 1, $2, 'default', $3, 'etag-origin') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); // Verify file exists with pending status const file = await pg.query( - 'SELECT * FROM object_store_public.files WHERE id = $1', + 'SELECT * FROM files_store_public.files WHERE id = $1', [ORIGIN_ID] ); expect(file.rowCount).toBe(1); @@ -164,7 +164,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { it('step 2: service_role transitions pending → processing', async () => { // Insert as superuser first await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag-origin', 'pending') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); await clearJobLog(); @@ -176,14 +176,14 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { }); await pg.query( - `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = 1`, + `UPDATE files_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = 1`, [ORIGIN_ID] ); // Verify processing_started_at is set await pg.query('RESET ROLE'); const file = await pg.query( - 'SELECT status, processing_started_at FROM object_store_public.files WHERE id = $1', + 'SELECT status, processing_started_at FROM files_store_public.files WHERE id = $1', [ORIGIN_ID] ); expect(file.rows[0].status).toBe('processing'); @@ -193,7 +193,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { it('step 3: service_role inserts version rows (status=ready, bypasses job trigger)', async () => { // Setup: origin in processing state await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag-origin', 'processing') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); await clearJobLog(); @@ -205,7 +205,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { }); await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (database_id, key, bucket_key, created_by, etag, status) VALUES (1, $1, 'default', $2, 'etag-thumb', 'ready'), (1, $3, 'default', $2, 'etag-large', 'ready') @@ -218,7 +218,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { // Verify all three rows exist const files = await pg.query( - `SELECT key, status FROM object_store_public.files WHERE database_id = 1 ORDER BY key` + `SELECT key, status FROM files_store_public.files WHERE database_id = 1 ORDER BY key` ); expect(files.rowCount).toBe(3); expect(files.rows.map((r: any) => ({ key: r.key, status: r.status }))).toEqual([ @@ -231,7 +231,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { it('step 4: service_role transitions origin processing → ready', async () => { // Setup: origin in processing state await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag-origin', 'processing') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); @@ -241,14 +241,14 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { }); await pg.query( - `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = 1`, + `UPDATE files_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = 1`, [ORIGIN_ID] ); // Verify status and processing_started_at cleared await pg.query('RESET ROLE'); const file = await pg.query( - 'SELECT status, processing_started_at, updated_at, created_at FROM object_store_public.files WHERE id = $1', + 'SELECT status, processing_started_at, updated_at, created_at FROM files_store_public.files WHERE id = $1', [ORIGIN_ID] ); expect(file.rows[0].status).toBe('ready'); @@ -261,7 +261,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { it('step 5: user sees origin + versions after processing completes', async () => { // Setup: origin ready + 2 version rows ready await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $4, 'etag-origin', 'ready'), (gen_random_uuid(), 1, $3, 'default', $4, 'etag-thumb', 'ready'), @@ -275,7 +275,7 @@ describe('E2E-01: Upload Lifecycle -- happy path', () => { }); const files = await pg.query( - `SELECT key, status FROM object_store_public.files WHERE key LIKE '1/default/abc123%' ORDER BY key` + `SELECT key, status FROM files_store_public.files WHERE key LIKE '1/default/abc123%' ORDER BY key` ); expect(files.rowCount).toBe(3); expect(files.rows.every((r: any) => r.status === 'ready')).toBe(true); @@ -301,7 +301,7 @@ describe('E2E-02: Error + Retry Path', () => { it('processing → error stores status_reason', async () => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', 'processing') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); @@ -311,7 +311,7 @@ describe('E2E-02: Error + Retry Path', () => { }); await pg.query( - `UPDATE object_store_public.files + `UPDATE files_store_public.files SET status = 'error', status_reason = 'sharp: unsupported image format' WHERE id = $1 AND database_id = 1`, [ORIGIN_ID] @@ -319,7 +319,7 @@ describe('E2E-02: Error + Retry Path', () => { await pg.query('RESET ROLE'); const file = await pg.query( - 'SELECT status, status_reason, processing_started_at FROM object_store_public.files WHERE id = $1', + 'SELECT status, status_reason, processing_started_at FROM files_store_public.files WHERE id = $1', [ORIGIN_ID] ); expect(file.rows[0].status).toBe('error'); @@ -330,7 +330,7 @@ describe('E2E-02: Error + Retry Path', () => { it('error → pending (retry) re-queues process-image job', async () => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', 'error') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); await clearJobLog(); @@ -341,7 +341,7 @@ describe('E2E-02: Error + Retry Path', () => { }); await pg.query( - `UPDATE object_store_public.files SET status = 'pending' WHERE id = $1 AND database_id = 1`, + `UPDATE files_store_public.files SET status = 'pending' WHERE id = $1 AND database_id = 1`, [ORIGIN_ID] ); @@ -359,7 +359,7 @@ describe('E2E-02: Error + Retry Path', () => { it('full retry cycle: pending → processing → error → pending → processing → ready', async () => { // Step 1: upload (pending) await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag) VALUES ($1, 1, $2, 'default', $3, 'etag') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); @@ -367,25 +367,25 @@ describe('E2E-02: Error + Retry Path', () => { // Step 2: processing await pg.query( - `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1`, + `UPDATE files_store_public.files SET status = 'processing' WHERE id = $1`, [ORIGIN_ID] ); - let file = await pg.query('SELECT * FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + let file = await pg.query('SELECT * FROM files_store_public.files WHERE id = $1', [ORIGIN_ID]); expect(file.rows[0].status).toBe('processing'); expect(file.rows[0].processing_started_at).not.toBeNull(); // Step 3: error await pg.query( - `UPDATE object_store_public.files SET status = 'error', status_reason = 'timeout' WHERE id = $1`, + `UPDATE files_store_public.files SET status = 'error', status_reason = 'timeout' WHERE id = $1`, [ORIGIN_ID] ); - file = await pg.query('SELECT * FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + file = await pg.query('SELECT * FROM files_store_public.files WHERE id = $1', [ORIGIN_ID]); expect(file.rows[0].status).toBe('error'); expect(file.rows[0].processing_started_at).toBeNull(); // Step 4: retry (error → pending) — should re-queue job await pg.query( - `UPDATE object_store_public.files SET status = 'pending' WHERE id = $1`, + `UPDATE files_store_public.files SET status = 'pending' WHERE id = $1`, [ORIGIN_ID] ); let jobs = await getJobLog(); @@ -395,16 +395,16 @@ describe('E2E-02: Error + Retry Path', () => { // Step 5: processing again await clearJobLog(); await pg.query( - `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1`, + `UPDATE files_store_public.files SET status = 'processing' WHERE id = $1`, [ORIGIN_ID] ); // Step 6: ready await pg.query( - `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1`, + `UPDATE files_store_public.files SET status = 'ready' WHERE id = $1`, [ORIGIN_ID] ); - file = await pg.query('SELECT * FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + file = await pg.query('SELECT * FROM files_store_public.files WHERE id = $1', [ORIGIN_ID]); expect(file.rows[0].status).toBe('ready'); expect(file.rows[0].processing_started_at).toBeNull(); }); @@ -430,13 +430,13 @@ describe('E2E-03: Deletion Flow', () => { it('ready → deleting queues delete_s3_object job', async () => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', 'ready') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); await clearJobLog(); await pg.query( - `UPDATE object_store_public.files SET status = 'deleting' WHERE id = $1`, + `UPDATE files_store_public.files SET status = 'deleting' WHERE id = $1`, [ORIGIN_ID] ); @@ -450,7 +450,7 @@ describe('E2E-03: Deletion Flow', () => { it('deleting origin + version rows each queue separate jobs', async () => { const VERSION_ID = '30000000-0000-0000-0000-000000000002'; await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $4, 'etag-origin', 'ready'), ($3, 1, $5, 'default', $4, 'etag-thumb', 'ready') @@ -459,7 +459,7 @@ describe('E2E-03: Deletion Flow', () => { // Delete both await pg.query( - `UPDATE object_store_public.files SET status = 'deleting' + `UPDATE files_store_public.files SET status = 'deleting' WHERE database_id = 1 AND key LIKE '1/default/del123%'` ); @@ -471,18 +471,18 @@ describe('E2E-03: Deletion Flow', () => { it('error → deleting is valid (skip processing on permanent failure)', async () => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', 'error') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); await clearJobLog(); await pg.query( - `UPDATE object_store_public.files SET status = 'deleting', status_reason = 'user cancelled' + `UPDATE files_store_public.files SET status = 'deleting', status_reason = 'user cancelled' WHERE id = $1`, [ORIGIN_ID] ); - const file = await pg.query('SELECT status, status_reason FROM object_store_public.files WHERE id = $1', [ORIGIN_ID]); + const file = await pg.query('SELECT status, status_reason FROM files_store_public.files WHERE id = $1', [ORIGIN_ID]); expect(file.rows[0].status).toBe('deleting'); expect(file.rows[0].status_reason).toBe('user cancelled'); @@ -493,7 +493,7 @@ describe('E2E-03: Deletion Flow', () => { it('service_role can hard-DELETE after marking as deleting', async () => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', 'deleting') `, [ORIGIN_ID, ORIGIN_KEY, USER_A]); @@ -503,7 +503,7 @@ describe('E2E-03: Deletion Flow', () => { }); const result = await pg.query( - 'DELETE FROM object_store_public.files WHERE id = $1 AND database_id = 1', + 'DELETE FROM files_store_public.files WHERE id = $1 AND database_id = 1', [ORIGIN_ID] ); expect(result.rowCount).toBe(1); @@ -511,7 +511,7 @@ describe('E2E-03: Deletion Flow', () => { // Verify gone await pg.query('RESET ROLE'); const check = await pg.query( - 'SELECT * FROM object_store_public.files WHERE id = $1', + 'SELECT * FROM files_store_public.files WHERE id = $1', [ORIGIN_ID] ); expect(check.rowCount).toBe(0); @@ -549,13 +549,13 @@ describe('E2E-04: State Machine Validation', () => { 'rejects %s → %s', async (from, to) => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', $4) `, [ORIGIN_ID, ORIGIN_KEY, USER_A, from]); await expect( pg.query( - `UPDATE object_store_public.files SET status = $1 WHERE id = $2`, + `UPDATE files_store_public.files SET status = $1 WHERE id = $2`, [to, ORIGIN_ID] ) ).rejects.toThrow(/Invalid status transition/); @@ -577,17 +577,17 @@ describe('E2E-04: State Machine Validation', () => { 'allows %s → %s', async (from, to) => { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', $4) `, [ORIGIN_ID, ORIGIN_KEY, USER_A, from]); await pg.query( - `UPDATE object_store_public.files SET status = $1 WHERE id = $2`, + `UPDATE files_store_public.files SET status = $1 WHERE id = $2`, [to, ORIGIN_ID] ); const file = await pg.query( - 'SELECT status FROM object_store_public.files WHERE id = $1', + 'SELECT status FROM files_store_public.files WHERE id = $1', [ORIGIN_ID] ); expect(file.rows[0].status).toBe(to); @@ -611,7 +611,7 @@ describe('E2E-05: Constraints', () => { it('rejects empty key', async () => { await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, '', 'default', 'x') `) ).rejects.toThrow(/files_key_not_empty/); @@ -621,7 +621,7 @@ describe('E2E-05: Constraints', () => { const longKey = '1/default/' + 'a'.repeat(1020); await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, $1, 'default', 'x') `, [longKey]) ).rejects.toThrow(/files_key_max_length/); @@ -630,7 +630,7 @@ describe('E2E-05: Constraints', () => { it('rejects invalid bucket_key format', async () => { await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, '1/BAD/test_origin', 'BAD-BUCKET', 'x') `) ).rejects.toThrow(/files_bucket_key_format/); @@ -639,7 +639,7 @@ describe('E2E-05: Constraints', () => { it('rejects partial source reference (source_table without source_column)', async () => { await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag, source_table) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag, source_table) VALUES (1, '1/default/partial_origin', 'default', 'x', 'some_schema.some_table') `) ).rejects.toThrow(/files_source_complete/); @@ -647,7 +647,7 @@ describe('E2E-05: Constraints', () => { it('accepts complete source reference', async () => { const result = await pg.query(` - INSERT INTO object_store_public.files + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag, source_table, source_column, source_id) VALUES (1, '1/default/ref_origin', 'default', 'x', 'some_schema.some_table', 'image', gen_random_uuid()) @@ -659,13 +659,13 @@ describe('E2E-05: Constraints', () => { it('enforces unique key per tenant', async () => { await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, '1/default/dup_origin', 'default', 'e1') `); await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, '1/default/dup_origin', 'default', 'e2') `) ).rejects.toThrow(/files_key_unique/); @@ -673,12 +673,12 @@ describe('E2E-05: Constraints', () => { it('allows same key in different tenants', async () => { await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, '1/default/shared_origin', 'default', 'e1') `); const result = await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (2, '1/default/shared_origin', 'default', 'e2') RETURNING * `); @@ -698,7 +698,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { beforeEach(async () => { await pg.beforeEach(); await pg.query(` - INSERT INTO object_store_public.buckets (database_id, key, name, is_public, config) + INSERT INTO files_store_public.buckets (database_id, key, name, is_public, config) VALUES (1, 'default', 'Default Bucket', false, '{}') ON CONFLICT DO NOTHING `); @@ -719,13 +719,13 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, created_by, etag) VALUES (1, $1, 'default', $2, 'etag-origin') `, [ORIGIN_KEY, USER_A]); // Verify: user sees their pending file let myFiles = await pg.query( - `SELECT key, status FROM object_store_public.files WHERE key = $1`, + `SELECT key, status FROM files_store_public.files WHERE key = $1`, [ORIGIN_KEY] ); expect(myFiles.rowCount).toBe(1); @@ -739,7 +739,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { // Get the origin ID for later const originRow = await pg.query( - `SELECT id FROM object_store_public.files WHERE key = $1 AND database_id = 1`, + `SELECT id FROM files_store_public.files WHERE key = $1 AND database_id = 1`, [ORIGIN_KEY] ); const originId = originRow.rows[0].id; @@ -754,13 +754,13 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); await pg.query( - `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = 1`, + `UPDATE files_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = 1`, [originId] ); await pg.query('RESET ROLE'); let origin = await pg.query( - 'SELECT status, processing_started_at FROM object_store_public.files WHERE id = $1', + 'SELECT status, processing_started_at FROM files_store_public.files WHERE id = $1', [originId] ); expect(origin.rows[0].status).toBe('processing'); @@ -775,7 +775,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag, status) + INSERT INTO files_store_public.files (database_id, key, bucket_key, created_by, etag, status) VALUES (1, $1, 'default', $2, 'etag-thumb', 'ready'), (1, $3, 'default', $2, 'etag-large', 'ready') @@ -795,13 +795,13 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); await pg.query( - `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = 1`, + `UPDATE files_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = 1`, [originId] ); await pg.query('RESET ROLE'); origin = await pg.query( - 'SELECT status, processing_started_at FROM object_store_public.files WHERE id = $1', + 'SELECT status, processing_started_at FROM files_store_public.files WHERE id = $1', [originId] ); expect(origin.rows[0].status).toBe('ready'); @@ -816,7 +816,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); const allFiles = await pg.query( - `SELECT key, status FROM object_store_public.files + `SELECT key, status FROM files_store_public.files WHERE key LIKE '1/default/full_e2e%' ORDER BY key` ); @@ -839,7 +839,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); await pg.query( - `UPDATE object_store_public.files SET status = 'deleting' + `UPDATE files_store_public.files SET status = 'deleting' WHERE key LIKE '1/default/full_e2e%' AND database_id = 1` ); @@ -860,7 +860,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { }); const deleted = await pg.query( - `DELETE FROM object_store_public.files + `DELETE FROM files_store_public.files WHERE key LIKE '1/default/full_e2e%' AND database_id = 1` ); expect(deleted.rowCount).toBe(3); @@ -868,7 +868,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { // Verify: no files remain await pg.query('RESET ROLE'); const remaining = await pg.query( - `SELECT * FROM object_store_public.files WHERE key LIKE '1/default/full_e2e%'` + `SELECT * FROM files_store_public.files WHERE key LIKE '1/default/full_e2e%'` ); expect(remaining.rowCount).toBe(0); }); diff --git a/migrations/__tests__/object-store-rls.test.ts b/migrations/__tests__/object-store-rls.test.ts index 2c8c7ab86..62b7ed40b 100644 --- a/migrations/__tests__/object-store-rls.test.ts +++ b/migrations/__tests__/object-store-rls.test.ts @@ -4,7 +4,7 @@ import { resolve } from 'path'; import { getConnections, PgTestClient, seed } from 'pgsql-test'; -const MIGRATION_PATH = resolve(__dirname, '../object_store.sql'); +const MIGRATION_PATH = resolve(__dirname, '../files_store.sql'); const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; const USER_B = 'bbbbbbbb-0000-0000-0000-000000000002'; @@ -29,7 +29,7 @@ async function switchRole( async function insertBuckets() { await pg.query(` - INSERT INTO object_store_public.buckets (database_id, key, name, is_public, config) + INSERT INTO files_store_public.buckets (database_id, key, name, is_public, config) VALUES (1, 'default', 'Default Bucket', false, '{}'), (1, 'public-assets', 'Public Assets', true, '{}'), @@ -39,7 +39,7 @@ async function insertBuckets() { async function insertFixtures() { await pg.query(` - INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + INSERT INTO files_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) VALUES ('11111111-0000-0000-0000-000000000001', 1, 'default', '1/default/aaa_origin', 'ready', $1, 'etag1'), ('11111111-0000-0000-0000-000000000002', 1, 'default', '1/default/bbb_origin', 'pending', $1, 'etag2'), @@ -48,21 +48,21 @@ async function insertFixtures() { `, [USER_A]); await pg.query(` - INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + INSERT INTO files_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) VALUES ('22222222-0000-0000-0000-000000000001', 1, 'default', '1/default/eee_origin', 'ready', $1, 'etag5'), ('22222222-0000-0000-0000-000000000002', 1, 'default', '1/default/fff_origin', 'pending', $1, 'etag6') `, [USER_B]); await pg.query(` - INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + INSERT INTO files_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) VALUES ('33333333-0000-0000-0000-000000000001', 1, 'public-assets', '1/public-assets/ggg_origin', 'ready', $1, 'etag7'), ('33333333-0000-0000-0000-000000000002', 1, 'public-assets', '1/public-assets/hhh_origin', 'pending', $1, 'etag8') `, [USER_A]); await pg.query(` - INSERT INTO object_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) + INSERT INTO files_store_public.files (id, database_id, bucket_key, key, status, created_by, etag) VALUES ('44444444-0000-0000-0000-000000000001', 2, 'default', '2/default/iii_origin', 'ready', $1, 'etag9') `, [USER_C]); @@ -87,18 +87,18 @@ beforeAll(async () => { END $$ `); - // The migration assumes object_store_public schema USAGE is already granted + // The migration assumes files_store_public schema USAGE is already granted // (from the original object-store pgpm extension). In isolation, grant explicitly. - await pg.query('GRANT USAGE ON SCHEMA object_store_public TO authenticated'); - await pg.query('GRANT USAGE ON SCHEMA object_store_public TO service_role'); - await pg.query('GRANT USAGE ON SCHEMA object_store_public TO anonymous'); + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO authenticated'); + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO service_role'); + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO anonymous'); // Grant SELECT on buckets to roles that need it for the public_bucket_read policy subquery. // Without this, the EXISTS subquery in files_public_bucket_read fails with // "permission denied for table buckets". - await pg.query('GRANT SELECT ON object_store_public.buckets TO authenticated'); - await pg.query('GRANT SELECT ON object_store_public.buckets TO service_role'); - await pg.query('GRANT SELECT ON object_store_public.buckets TO anonymous'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO authenticated'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO service_role'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO anonymous'); }); afterAll(async () => { @@ -121,13 +121,13 @@ describe('RLS-07: Superuser Bypass', () => { }); it('RLS-07a: superuser sees all tenants', async () => { - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); expect(result.rowCount).toBe(9); }); it('RLS-07b: superuser can INSERT into any tenant', async () => { const result = await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (999, '999/default/su_origin', 'default', 'su-etag') RETURNING id `); @@ -136,7 +136,7 @@ describe('RLS-07: Superuser Bypass', () => { it('RLS-07c: superuser can DELETE any row', async () => { const result = await pg.query( - 'DELETE FROM object_store_public.files WHERE database_id = 2' + 'DELETE FROM files_store_public.files WHERE database_id = 2' ); expect(result.rowCount).toBeGreaterThan(0); }); @@ -163,7 +163,7 @@ describe('RLS-01: Tenant Isolation', () => { await switchRole('authenticated'); await expect( - pg.query('SELECT * FROM object_store_public.files') + pg.query('SELECT * FROM files_store_public.files') ).rejects.toThrow(/app\.database_id|invalid input syntax for type integer/); }); @@ -173,7 +173,7 @@ describe('RLS-01: Tenant Isolation', () => { 'app.user_id': USER_A, }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); // Must return rows (prevents vacuous pass on empty result from Array.every) expect(result.rowCount).toBeGreaterThan(0); expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); @@ -188,7 +188,7 @@ describe('RLS-01: Tenant Isolation', () => { await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, bucket_key, key, created_by, etag) + INSERT INTO files_store_public.files (database_id, bucket_key, key, created_by, etag) VALUES (2, 'default', '2/default/bad_origin', $1, 'bad-etag') `, [USER_A]) ).rejects.toThrow(/row-level security/i); @@ -201,7 +201,7 @@ describe('RLS-01: Tenant Isolation', () => { }); const result = await pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status_reason = 'test' WHERE id = '44444444-0000-0000-0000-000000000001' AND database_id = 2 `); @@ -231,7 +231,7 @@ describe('RLS-02: Visibility', () => { }); const result = await pg.query( - 'SELECT * FROM object_store_public.files WHERE created_by = $1', + 'SELECT * FROM files_store_public.files WHERE created_by = $1', [USER_A] ); expect(result.rowCount).toBe(6); @@ -244,7 +244,7 @@ describe('RLS-02: Visibility', () => { }); const result = await pg.query( - 'SELECT * FROM object_store_public.files WHERE created_by = $1', + 'SELECT * FROM files_store_public.files WHERE created_by = $1', [USER_B] ); expect(result.rowCount).toBe(1); @@ -258,7 +258,7 @@ describe('RLS-02: Visibility', () => { }); const result = await pg.query( - 'SELECT * FROM object_store_public.files WHERE created_by = $1', + 'SELECT * FROM files_store_public.files WHERE created_by = $1', [USER_B] ); expect(result.rowCount).toBe(2); @@ -271,7 +271,7 @@ describe('RLS-02: Visibility', () => { }); const result = await pg.query( - `SELECT * FROM object_store_public.files + `SELECT * FROM files_store_public.files WHERE created_by = $1 AND status != 'ready'`, [USER_A] ); @@ -301,7 +301,7 @@ describe('RLS-03: INSERT/UPDATE Permissions', () => { }); const result = await pg.query(` - INSERT INTO object_store_public.files (database_id, bucket_key, key, created_by, etag) + INSERT INTO files_store_public.files (database_id, bucket_key, key, created_by, etag) VALUES (1, 'default', '1/default/new_origin', $1, 'newtag') RETURNING * `, [USER_A]); @@ -316,7 +316,7 @@ describe('RLS-03: INSERT/UPDATE Permissions', () => { }); const result = await pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status_reason = 'user note' WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 `); @@ -331,7 +331,7 @@ describe('RLS-03: INSERT/UPDATE Permissions', () => { await expect( pg.query(` - DELETE FROM object_store_public.files + DELETE FROM files_store_public.files WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 `) ).rejects.toThrow(/permission denied/i); @@ -344,7 +344,7 @@ describe('RLS-03: INSERT/UPDATE Permissions', () => { }); const result = await pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status_reason = 'hacked' WHERE id = '22222222-0000-0000-0000-000000000002' AND database_id = 1 `); @@ -371,7 +371,7 @@ describe('RLS-04: Anonymous -- No Access', () => { await switchRole('anonymous', { 'app.database_id': '1' }); await expect( - pg.query('SELECT * FROM object_store_public.files') + pg.query('SELECT * FROM files_store_public.files') ).rejects.toThrow(/permission denied/i); }); @@ -380,7 +380,7 @@ describe('RLS-04: Anonymous -- No Access', () => { await expect( pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, etag) VALUES (1, '1/default/anon_origin', 'default', 'x') `) ).rejects.toThrow(/permission denied/i); @@ -388,11 +388,11 @@ describe('RLS-04: Anonymous -- No Access', () => { it('RLS-04c: public bucket policy works with temporary GRANT', async () => { // Temporarily grant SELECT to anonymous (rolled back in afterEach) - await pg.query('GRANT SELECT ON object_store_public.files TO anonymous'); + await pg.query('GRANT SELECT ON files_store_public.files TO anonymous'); await switchRole('anonymous', { 'app.database_id': '1' }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); // Anonymous only has files_public_bucket_read (files_visibility is TO authenticated). // Should see only public-assets bucket + ready status. @@ -424,7 +424,7 @@ describe('RLS-05: Administrator Override', () => { 'app.role': 'administrator', }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); expect(result.rowCount).toBe(8); expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); }); @@ -436,7 +436,7 @@ describe('RLS-05: Administrator Override', () => { }); const result = await pg.query(` - SELECT * FROM object_store_public.files + SELECT * FROM files_store_public.files WHERE status IN ('pending', 'error') `); expect(result.rowCount).toBe(4); @@ -449,7 +449,7 @@ describe('RLS-05: Administrator Override', () => { }); const result = await pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status_reason = 'admin override' WHERE id = '22222222-0000-0000-0000-000000000002' AND database_id = 1 `); @@ -463,7 +463,7 @@ describe('RLS-05: Administrator Override', () => { }); const result = await pg.query( - 'SELECT * FROM object_store_public.files WHERE database_id = 2' + 'SELECT * FROM files_store_public.files WHERE database_id = 2' ); expect(result.rowCount).toBe(0); }); @@ -476,7 +476,7 @@ describe('RLS-05: Administrator Override', () => { await expect( pg.query(` - DELETE FROM object_store_public.files + DELETE FROM files_store_public.files WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 `) ).rejects.toThrow(/permission denied/i); @@ -504,7 +504,7 @@ describe('RLS-06: service_role', () => { 'app.role': 'administrator', }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); expect(result.rowCount).toBe(8); expect(result.rows.every((r: any) => r.database_id === 1)).toBe(true); }); @@ -515,7 +515,7 @@ describe('RLS-06: service_role', () => { 'app.role': 'administrator', }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); expect(result.rowCount).toBe(8); }); @@ -524,7 +524,7 @@ describe('RLS-06: service_role', () => { 'app.database_id': '1', }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); // Without app.role and without app.user_id, visibility policy reduces to // status = 'ready' (NULLIF on empty user_id → NULL → created_by check is NULL). // Expect ready files in tenant 1: 111...01, 222...01, 333...01 = 3 @@ -539,7 +539,7 @@ describe('RLS-06: service_role', () => { }); const result = await pg.query(` - DELETE FROM object_store_public.files + DELETE FROM files_store_public.files WHERE id = '11111111-0000-0000-0000-000000000001' AND database_id = 1 `); expect(result.rowCount).toBe(1); @@ -552,7 +552,7 @@ describe('RLS-06: service_role', () => { }); const result = await pg.query(` - DELETE FROM object_store_public.files + DELETE FROM files_store_public.files WHERE id = '44444444-0000-0000-0000-000000000001' AND database_id = 2 `); expect(result.rowCount).toBe(0); @@ -576,7 +576,7 @@ describe('RLS-08: Buckets Table Access', () => { it('RLS-08a: authenticated role can read buckets (GRANT added for policy subquery)', async () => { await switchRole('authenticated', { 'app.database_id': '1' }); - const result = await pg.query('SELECT * FROM object_store_public.buckets'); + const result = await pg.query('SELECT * FROM files_store_public.buckets'); // Buckets has no RLS -- all 3 seeded buckets visible (2 tenant 1 + 1 tenant 2) expect(result.rowCount).toBe(3); }); @@ -584,7 +584,7 @@ describe('RLS-08: Buckets Table Access', () => { it('RLS-08b: service_role can read buckets (GRANT added for policy subquery)', async () => { await switchRole('service_role', { 'app.database_id': '1' }); - const result = await pg.query('SELECT * FROM object_store_public.buckets'); + const result = await pg.query('SELECT * FROM files_store_public.buckets'); expect(result.rowCount).toBe(3); }); }); @@ -611,7 +611,7 @@ describe('RLS-09: Edge Cases', () => { }); await expect( - pg.query('SELECT * FROM object_store_public.files') + pg.query('SELECT * FROM files_store_public.files') ).rejects.toThrow(/invalid input syntax for type integer/); }); @@ -622,7 +622,7 @@ describe('RLS-09: Edge Cases', () => { }); await expect( - pg.query('SELECT * FROM object_store_public.files') + pg.query('SELECT * FROM files_store_public.files') ).rejects.toThrow(/invalid input syntax for type uuid/); }); @@ -632,7 +632,7 @@ describe('RLS-09: Edge Cases', () => { 'app.user_id': USER_A, }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); expect(result.rowCount).toBe(0); }); @@ -646,7 +646,7 @@ describe('RLS-09: Edge Cases', () => { // Note: RETURNING * would fail here because SELECT policies block reading // the row back (created_by=USER_B != app.user_id=USER_A and status='pending'). const result = await pg.query(` - INSERT INTO object_store_public.files (database_id, key, bucket_key, created_by, etag) + INSERT INTO files_store_public.files (database_id, key, bucket_key, created_by, etag) VALUES (1, '1/default/spoof_origin', 'default', $1, 'x') `, [USER_B]); expect(result.rowCount).toBe(1); @@ -654,7 +654,7 @@ describe('RLS-09: Edge Cases', () => { // Verify the spoofed created_by was persisted by reading as superuser await pg.query('RESET ROLE'); const verify = await pg.query( - `SELECT created_by FROM object_store_public.files WHERE key = '1/default/spoof_origin'` + `SELECT created_by FROM files_store_public.files WHERE key = '1/default/spoof_origin'` ); expect(verify.rows[0].created_by).toBe(USER_B); }); @@ -666,7 +666,7 @@ describe('RLS-09: Edge Cases', () => { 'app.role': 'authenticated', }); - const result = await pg.query('SELECT * FROM object_store_public.files'); + const result = await pg.query('SELECT * FROM files_store_public.files'); // Policies: RESTRICTIVE(tenant_isolation) AND PERMISSIVE(visibility OR public_bucket_read OR admin_override) // User A sees: own files (all 6) + User B's ready file (1) = 7 // (User B's pending file is invisible; admin_override is false) @@ -696,7 +696,7 @@ describe('RLS-10: State Machine with RLS', () => { }); const result = await pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status = 'processing' WHERE id = '11111111-0000-0000-0000-000000000002' AND database_id = 1 RETURNING * @@ -712,7 +712,7 @@ describe('RLS-10: State Machine with RLS', () => { }); const result = await pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status = 'processing' WHERE id = '22222222-0000-0000-0000-000000000002' AND database_id = 1 `); @@ -727,7 +727,7 @@ describe('RLS-10: State Machine with RLS', () => { await expect( pg.query(` - UPDATE object_store_public.files + UPDATE files_store_public.files SET status = 'deleting' WHERE id = '11111111-0000-0000-0000-000000000002' AND database_id = 1 `) diff --git a/migrations/files_store.sql b/migrations/files_store.sql new file mode 100644 index 000000000..169ec8f99 --- /dev/null +++ b/migrations/files_store.sql @@ -0,0 +1,639 @@ +-- ============================================================================= +-- Constructive Upload System -- files_store_public schema +-- ============================================================================= +-- Run: psql -h localhost -U postgres -d constructive < migrations/files_store.sql +-- ============================================================================= + +BEGIN; + +-- Ensure required roles exist (idempotent for dev environments) +DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'authenticated') THEN + CREATE ROLE authenticated NOLOGIN; + END IF; + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'service_role') THEN + CREATE ROLE service_role NOLOGIN; + END IF; +END $$; + +-- Ensure app_jobs schema + stub add_job exist (required by trigger functions). +-- In production, app_jobs is deployed by the database-jobs pgpm module. +-- This stub is a no-op that prevents trigger creation from failing in dev. +CREATE SCHEMA IF NOT EXISTS app_jobs; + +CREATE OR REPLACE FUNCTION app_jobs.add_job( + identifier text, + payload json DEFAULT '{}'::json, + queue_name text DEFAULT NULL, + run_at timestamptz DEFAULT NULL, + max_attempts integer DEFAULT NULL, + job_key text DEFAULT NULL, + priority integer DEFAULT NULL, + flags text[] DEFAULT NULL +) RETURNS void AS $$ +BEGIN + -- Stub: in production this is provided by database-jobs pgpm module. + -- In dev, jobs are enqueued but not processed unless the job worker is running. + RAISE NOTICE 'app_jobs.add_job stub called: % %', identifier, payload; +END; +$$ LANGUAGE plpgsql; + +-- Ensure schema exists +CREATE SCHEMA IF NOT EXISTS files_store_public; + +-- --------------------------------------------------------------------------- +-- 1. Status ENUM +-- --------------------------------------------------------------------------- + +CREATE TYPE files_store_public.file_status AS ENUM ( + 'pending', + 'processing', + 'ready', + 'error', + 'deleting' +); + +COMMENT ON TYPE files_store_public.file_status IS + 'Lifecycle states for managed files. Transitions: pending->{processing,error}, processing->{ready,error,deleting}, ready->deleting, error->{deleting,pending(retry)}.'; + +-- --------------------------------------------------------------------------- +-- 2. Files Table +-- --------------------------------------------------------------------------- + +CREATE TABLE files_store_public.files ( + id uuid NOT NULL DEFAULT gen_random_uuid(), + database_id integer NOT NULL, + bucket_key text NOT NULL DEFAULT 'default', + key text NOT NULL, + status files_store_public.file_status NOT NULL DEFAULT 'pending', + status_reason text, + etag text, + source_table text, + source_column text, + source_id uuid, + processing_started_at timestamptz, + created_by uuid, + created_at timestamptz NOT NULL DEFAULT now(), + updated_at timestamptz NOT NULL DEFAULT now(), + + CONSTRAINT files_pkey PRIMARY KEY (id, database_id), + CONSTRAINT files_key_unique UNIQUE (key, database_id), + CONSTRAINT files_key_not_empty CHECK (key <> ''), + CONSTRAINT files_key_max_length CHECK (length(key) <= 1024), + CONSTRAINT files_bucket_key_format CHECK (bucket_key ~ '^[a-z][a-z0-9_-]*$'), + CONSTRAINT files_source_table_format CHECK ( + source_table IS NULL OR source_table ~ '^[a-z_]+\.[a-z_]+$' + ), + CONSTRAINT files_source_complete CHECK ( + (source_table IS NULL AND source_column IS NULL AND source_id IS NULL) + OR (source_table IS NOT NULL AND source_column IS NOT NULL AND source_id IS NOT NULL) + ) +); + +COMMENT ON TABLE files_store_public.files IS + 'Operational index for S3 objects. Each row = one physical S3 object (including generated versions). NOT a source of truth for file metadata -- domain tables own that.'; +COMMENT ON COLUMN files_store_public.files.key IS + 'Full S3 object key. Format: {database_id}/{bucket_key}/{uuid}_{version_name}. Origin files use _origin suffix.'; +COMMENT ON COLUMN files_store_public.files.etag IS + 'S3 ETag for reconciliation and cache validation.'; +COMMENT ON COLUMN files_store_public.files.status_reason IS + 'Human-readable reason for current status (error details, deletion reason).'; +COMMENT ON COLUMN files_store_public.files.processing_started_at IS + 'Timestamp when processing began. Used to detect stuck jobs (alert at 15 min).'; +COMMENT ON COLUMN files_store_public.files.source_table IS + 'Schema-qualified table name referencing this file (e.g. constructive_users_public.users). NULL until the domain trigger populates it. Free text -- no FK possible.'; +COMMENT ON COLUMN files_store_public.files.source_column IS + 'Column name on the source table (e.g. profile_picture). NULL until domain trigger populates it.'; +COMMENT ON COLUMN files_store_public.files.source_id IS + 'Primary key of the row in the source table. NULL until domain trigger populates it.'; + +-- --------------------------------------------------------------------------- +-- 3. Buckets Table +-- --------------------------------------------------------------------------- + +CREATE TABLE files_store_public.buckets ( + id uuid NOT NULL DEFAULT gen_random_uuid(), + database_id integer NOT NULL, + key text NOT NULL, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT false, + config jsonb NOT NULL DEFAULT '{}'::jsonb, + created_by uuid, + updated_by uuid, + created_at timestamptz NOT NULL DEFAULT now(), + updated_at timestamptz NOT NULL DEFAULT now(), + + CONSTRAINT buckets_pkey PRIMARY KEY (id, database_id), + CONSTRAINT buckets_key_unique UNIQUE (key, database_id), + CONSTRAINT buckets_key_format CHECK (key ~ '^[a-z][a-z0-9_-]*$') +); + +COMMENT ON TABLE files_store_public.buckets IS + 'Logical bucket configuration per tenant. The bucket key maps to the S3 key prefix segment. is_public controls RLS policy for anonymous reads.'; + +-- --------------------------------------------------------------------------- +-- 4. Indexes +-- --------------------------------------------------------------------------- + +-- Tenant queries +CREATE INDEX files_database_id_idx + ON files_store_public.files (database_id); + +-- Bucket + tenant queries +CREATE INDEX files_bucket_database_id_idx + ON files_store_public.files (bucket_key, database_id); + +-- "My uploads" queries +CREATE INDEX files_created_by_database_id_created_at_idx + ON files_store_public.files (created_by, database_id, created_at DESC); + +-- Back-reference lookups (cleanup worker, attachment queries) +CREATE INDEX files_source_ref_idx + ON files_store_public.files (source_table, source_column, source_id); + +-- Pending file reaper (hourly cron) +CREATE INDEX files_pending_created_at_idx + ON files_store_public.files (created_at) + WHERE status = 'pending'; + +-- Stuck processing detection +CREATE INDEX files_processing_idx + ON files_store_public.files (processing_started_at) + WHERE status = 'processing'; + +-- Deletion job queue +CREATE INDEX files_deleting_idx + ON files_store_public.files (updated_at) + WHERE status = 'deleting'; + +-- Time-range scans on large tables +CREATE INDEX files_created_at_brin_idx + ON files_store_public.files USING brin (created_at); + +-- --------------------------------------------------------------------------- +-- 5. Triggers +-- --------------------------------------------------------------------------- + +-- 5a. AFTER INSERT -- enqueue process-image job +-- NOTE: Version rows are inserted with status = 'ready', which intentionally +-- bypasses this trigger (condition: NEW.status = 'pending'). Only origin +-- uploads (status = 'pending') need processing. + +CREATE OR REPLACE FUNCTION files_store_public.files_after_insert_queue_processing() +RETURNS trigger AS $$ +BEGIN + PERFORM app_jobs.add_job( + 'process-image', + json_build_object( + 'file_id', NEW.id, + 'database_id', NEW.database_id + ), + job_key := 'file:' || NEW.id::text + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_insert_queue_processing + AFTER INSERT ON files_store_public.files + FOR EACH ROW + WHEN (NEW.status = 'pending') + EXECUTE FUNCTION files_store_public.files_after_insert_queue_processing(); + +COMMENT ON TRIGGER files_after_insert_queue_processing ON files_store_public.files IS + 'Enqueues process-image job for new origin uploads. Version rows inserted as ready intentionally bypass this trigger -- they do not need processing.'; + +-- 5b. BEFORE UPDATE -- timestamp + state machine + +CREATE OR REPLACE FUNCTION files_store_public.files_before_update_timestamp() +RETURNS trigger AS $$ +BEGIN + -- Always update timestamp + NEW.updated_at := now(); + + -- State machine validation (only when status changes) + IF OLD.status IS DISTINCT FROM NEW.status THEN + IF NOT ( + (OLD.status = 'pending' AND NEW.status IN ('processing', 'error')) + OR (OLD.status = 'processing' AND NEW.status IN ('ready', 'error', 'deleting')) + OR (OLD.status = 'ready' AND NEW.status = 'deleting') + OR (OLD.status = 'error' AND NEW.status IN ('deleting', 'pending')) + ) THEN + RAISE EXCEPTION 'Invalid status transition from % to %', OLD.status, NEW.status; + END IF; + + -- Track processing start/end + IF NEW.status = 'processing' THEN + NEW.processing_started_at := now(); + ELSIF OLD.status = 'processing' AND NEW.status <> 'processing' THEN + NEW.processing_started_at := NULL; + END IF; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_before_update_timestamp + BEFORE UPDATE ON files_store_public.files + FOR EACH ROW + EXECUTE FUNCTION files_store_public.files_before_update_timestamp(); + +COMMENT ON TRIGGER files_before_update_timestamp ON files_store_public.files IS + 'Enforces status transition rules and maintains updated_at / processing_started_at timestamps.'; + +-- 5c. AFTER UPDATE -- enqueue delete_s3_object job + +CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_deletion() +RETURNS trigger AS $$ +BEGIN + PERFORM app_jobs.add_job( + 'delete_s3_object', + json_build_object( + 'file_id', NEW.id, + 'database_id', NEW.database_id, + 'key', NEW.key + ), + job_key := 'delete:' || NEW.id::text + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_update_queue_deletion + AFTER UPDATE ON files_store_public.files + FOR EACH ROW + WHEN (NEW.status = 'deleting' AND OLD.status <> 'deleting') + EXECUTE FUNCTION files_store_public.files_after_update_queue_deletion(); + +COMMENT ON TRIGGER files_after_update_queue_deletion ON files_store_public.files IS + 'Enqueues delete_s3_object job when a file transitions to deleting status. Each version row gets its own deletion job.'; + +-- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry + +CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_retry() +RETURNS trigger AS $$ +BEGIN + PERFORM app_jobs.add_job( + 'process-image', + json_build_object( + 'file_id', NEW.id, + 'database_id', NEW.database_id + ), + job_key := 'file:' || NEW.id::text + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER files_after_update_queue_retry + AFTER UPDATE ON files_store_public.files + FOR EACH ROW + WHEN (OLD.status = 'error' AND NEW.status = 'pending') + EXECUTE FUNCTION files_store_public.files_after_update_queue_retry(); + +COMMENT ON TRIGGER files_after_update_queue_retry ON files_store_public.files IS + 'Re-enqueues process-image job when a file is retried (error->pending). Without this trigger, the retry would change status but never re-enqueue the processing job.'; + +-- --------------------------------------------------------------------------- +-- 6. RLS Policies & Grants +-- --------------------------------------------------------------------------- + +ALTER TABLE files_store_public.files ENABLE ROW LEVEL SECURITY; +ALTER TABLE files_store_public.files FORCE ROW LEVEL SECURITY; + +-- Policy 1: Tenant isolation (RESTRICTIVE -- always ANDed with all other policies) +-- Without this being RESTRICTIVE, permissive policies would OR together and +-- allow cross-tenant access (e.g. a ready file in tenant 2 visible via files_visibility). +CREATE POLICY files_tenant_isolation ON files_store_public.files + AS RESTRICTIVE + FOR ALL + USING (database_id = current_setting('app.database_id')::integer) + WITH CHECK (database_id = current_setting('app.database_id')::integer); + +-- Policy 2: Visibility for SELECT (authenticated + service_role only) +-- Non-ready files visible only to the uploader. Uses NULLIF for safe uuid handling +-- when app.user_id is missing or empty (returns NULL instead of cast error). +-- Scoped to authenticated/service_role so anonymous only gets public_bucket_read. +CREATE POLICY files_visibility ON files_store_public.files + FOR SELECT + TO authenticated, service_role + USING ( + status = 'ready' + OR created_by = NULLIF(current_setting('app.user_id', true), '')::uuid + ); + +-- Policy 3: Public bucket read for SELECT (all roles including anonymous) +CREATE POLICY files_public_bucket_read ON files_store_public.files + FOR SELECT + USING ( + EXISTS ( + SELECT 1 FROM files_store_public.buckets b + WHERE b.key = bucket_key + AND b.database_id = files.database_id + AND b.is_public = true + ) + AND status = 'ready' + ); + +-- Policy 4: Admin override (all operations, authenticated + service_role) +CREATE POLICY files_admin_override ON files_store_public.files + FOR ALL + TO authenticated, service_role + USING (current_setting('app.role', true) = 'administrator') + WITH CHECK (current_setting('app.role', true) = 'administrator'); + +-- Policy 5: INSERT access (permissive base so non-admin users can insert) +CREATE POLICY files_insert_access ON files_store_public.files + FOR INSERT + TO authenticated, service_role + WITH CHECK (true); + +-- Policy 6: UPDATE access (replicates visibility for row targeting) +-- Non-admin users can only update rows they can see (ready or own). +-- Admin override policy covers admin UPDATE access separately. +CREATE POLICY files_update_access ON files_store_public.files + FOR UPDATE + TO authenticated, service_role + USING ( + status = 'ready' + OR created_by = NULLIF(current_setting('app.user_id', true), '')::uuid + ) + WITH CHECK (true); + +-- Policy 7: DELETE access (service_role only, grants already restrict authenticated) +CREATE POLICY files_delete_access ON files_store_public.files + FOR DELETE + TO service_role + USING (true); + +-- Grants +GRANT SELECT, INSERT, UPDATE ON files_store_public.files TO authenticated; +GRANT SELECT, INSERT, UPDATE, DELETE ON files_store_public.files TO service_role; + +COMMENT ON POLICY files_tenant_isolation ON files_store_public.files IS + 'Every query is scoped to the current tenant via app.database_id session variable.'; +COMMENT ON POLICY files_visibility ON files_store_public.files IS + 'Users see all ready files in their tenant. Non-ready files visible only to the uploader.'; +COMMENT ON POLICY files_public_bucket_read ON files_store_public.files IS + 'Allows unauthenticated reads on ready files in public buckets.'; +COMMENT ON POLICY files_admin_override ON files_store_public.files IS + 'Administrators can see and modify all files in the tenant regardless of status or creator.'; + +-- --------------------------------------------------------------------------- +-- 7. Domain Table Triggers +-- --------------------------------------------------------------------------- + +-- 7a. Generic trigger function: back-reference population +-- +-- When a domain table's image/upload/attachment column is updated with an S3 key, +-- find the files row by key and populate source_table, source_column, source_id. +-- Also finds version rows by key prefix and populates the same back-reference. +-- +-- Parameters (passed via TG_ARGV): +-- TG_ARGV[0] = column name (e.g. 'profile_picture') +-- TG_ARGV[1] = schema-qualified table name (e.g. 'constructive_users_public.users') + +CREATE OR REPLACE FUNCTION files_store_public.populate_file_back_reference() +RETURNS trigger AS $$ +DECLARE + col_name text := TG_ARGV[0]; + table_name text := TG_ARGV[1]; + new_val jsonb; + old_val jsonb; + new_key text; + old_key text; + base_key text; + db_id integer; +BEGIN + -- Get the database_id from session context + db_id := current_setting('app.database_id')::integer; + + -- Extract the jsonb value from the specified column (dynamic) + EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO new_val USING NEW; + EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO old_val USING OLD; + + -- Extract the key from the new and old values + new_key := new_val ->> 'key'; + old_key := old_val ->> 'key'; + + -- If no key change, nothing to do + IF new_key IS NOT DISTINCT FROM old_key THEN + RETURN NEW; + END IF; + + -- Handle file replacement: mark old files as deleting + IF old_key IS NOT NULL AND old_key <> '' THEN + -- Derive base key for the old file (strip version suffix) + base_key := regexp_replace(old_key, '_[^_]+$', ''); + + -- Mark old origin + all versions as deleting + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'replaced by new file' + WHERE database_id = db_id + AND (key = old_key OR key LIKE base_key || '_%') + AND status NOT IN ('deleting'); + END IF; + + -- Populate back-reference on new file (origin + versions) + IF new_key IS NOT NULL AND new_key <> '' THEN + -- Derive base key for the new file + base_key := regexp_replace(new_key, '_[^_]+$', ''); + + -- Set back-reference on origin + all version rows + UPDATE files_store_public.files + SET source_table = table_name, + source_column = col_name, + source_id = NEW.id + WHERE database_id = db_id + AND (key = new_key OR key LIKE base_key || '_%'); + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +COMMENT ON FUNCTION files_store_public.populate_file_back_reference() IS + 'Generic trigger function for domain tables. Populates source_table/source_column/source_id on files rows when image/upload/attachment columns are updated. Handles file replacement by marking old files as deleting.'; + +-- 7b. Generic trigger function: source row deletion +-- +-- When a domain row is deleted, mark all associated files as deleting. + +CREATE OR REPLACE FUNCTION files_store_public.mark_files_deleting_on_source_delete() +RETURNS trigger AS $$ +DECLARE + col_name text := TG_ARGV[0]; + table_name text := TG_ARGV[1]; + db_id integer; +BEGIN + db_id := current_setting('app.database_id')::integer; + + -- Mark all files for this source row + column as deleting + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'source row deleted' + WHERE database_id = db_id + AND source_table = table_name + AND source_column = col_name + AND source_id = OLD.id + AND status NOT IN ('deleting'); + + RETURN OLD; +END; +$$ LANGUAGE plpgsql; + +COMMENT ON FUNCTION files_store_public.mark_files_deleting_on_source_delete() IS + 'Generic trigger function for domain tables. Marks all associated files as deleting when a domain row is deleted.'; + +-- 7c. CREATE TRIGGER statements for all 6 tables, 9 columns +-- +-- Each domain column gets two triggers: +-- - AFTER UPDATE: back-reference population + file replacement +-- - BEFORE DELETE: mark files deleting on source row deletion +-- +-- These are wrapped in a DO block so they gracefully skip tables that +-- don't exist yet (e.g. in fresh dev environments). In production, +-- domain tables will exist before this migration runs. + +DO $domain_triggers$ +DECLARE + _tbl text; +BEGIN + -- constructive_users_public.users.profile_picture + SELECT 'constructive_users_public.users' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'constructive_users_public' AND table_name = 'users'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER users_profile_picture_file_ref + AFTER UPDATE OF profile_picture ON constructive_users_public.users + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''profile_picture'', ''constructive_users_public.users'')'; + EXECUTE 'CREATE TRIGGER users_profile_picture_file_delete + BEFORE DELETE ON constructive_users_public.users + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''profile_picture'', ''constructive_users_public.users'')'; + RAISE NOTICE 'Created triggers for constructive_users_public.users.profile_picture'; + ELSE + RAISE NOTICE 'Skipped triggers for constructive_users_public.users (table not found)'; + END IF; + + -- constructive_status_public.app_levels.image + SELECT 'constructive_status_public.app_levels' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'constructive_status_public' AND table_name = 'app_levels'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER app_levels_image_file_ref + AFTER UPDATE OF image ON constructive_status_public.app_levels + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''image'', ''constructive_status_public.app_levels'')'; + EXECUTE 'CREATE TRIGGER app_levels_image_file_delete + BEFORE DELETE ON constructive_status_public.app_levels + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''image'', ''constructive_status_public.app_levels'')'; + RAISE NOTICE 'Created triggers for constructive_status_public.app_levels.image'; + ELSE + RAISE NOTICE 'Skipped triggers for constructive_status_public.app_levels (table not found)'; + END IF; + + -- services_public.sites (og_image, apple_touch_icon, logo, favicon) + SELECT 'services_public.sites' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'services_public' AND table_name = 'sites'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER sites_og_image_file_ref + AFTER UPDATE OF og_image ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''og_image'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_og_image_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.sites'')'; + + EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_ref + AFTER UPDATE OF apple_touch_icon ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''apple_touch_icon'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''apple_touch_icon'', ''services_public.sites'')'; + + EXECUTE 'CREATE TRIGGER sites_logo_file_ref + AFTER UPDATE OF logo ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''logo'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_logo_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''logo'', ''services_public.sites'')'; + + EXECUTE 'CREATE TRIGGER sites_favicon_file_ref + AFTER UPDATE OF favicon ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''favicon'', ''services_public.sites'')'; + EXECUTE 'CREATE TRIGGER sites_favicon_file_delete + BEFORE DELETE ON services_public.sites + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''favicon'', ''services_public.sites'')'; + RAISE NOTICE 'Created triggers for services_public.sites (og_image, apple_touch_icon, logo, favicon)'; + ELSE + RAISE NOTICE 'Skipped triggers for services_public.sites (table not found)'; + END IF; + + -- services_public.apps.app_image + SELECT 'services_public.apps' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'services_public' AND table_name = 'apps'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER apps_app_image_file_ref + AFTER UPDATE OF app_image ON services_public.apps + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''app_image'', ''services_public.apps'')'; + EXECUTE 'CREATE TRIGGER apps_app_image_file_delete + BEFORE DELETE ON services_public.apps + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''app_image'', ''services_public.apps'')'; + RAISE NOTICE 'Created triggers for services_public.apps.app_image'; + ELSE + RAISE NOTICE 'Skipped triggers for services_public.apps (table not found)'; + END IF; + + -- services_public.site_metadata.og_image + SELECT 'services_public.site_metadata' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'services_public' AND table_name = 'site_metadata'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_ref + AFTER UPDATE OF og_image ON services_public.site_metadata + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''og_image'', ''services_public.site_metadata'')'; + EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_delete + BEFORE DELETE ON services_public.site_metadata + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.site_metadata'')'; + RAISE NOTICE 'Created triggers for services_public.site_metadata.og_image'; + ELSE + RAISE NOTICE 'Skipped triggers for services_public.site_metadata (table not found)'; + END IF; + + -- db_migrate.migrate_files.upload + SELECT 'db_migrate.migrate_files' INTO _tbl + FROM information_schema.tables + WHERE table_schema = 'db_migrate' AND table_name = 'migrate_files'; + IF FOUND THEN + EXECUTE 'CREATE TRIGGER migrate_files_upload_file_ref + AFTER UPDATE OF upload ON db_migrate.migrate_files + FOR EACH ROW + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''upload'', ''db_migrate.migrate_files'')'; + EXECUTE 'CREATE TRIGGER migrate_files_upload_file_delete + BEFORE DELETE ON db_migrate.migrate_files + FOR EACH ROW + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''upload'', ''db_migrate.migrate_files'')'; + RAISE NOTICE 'Created triggers for db_migrate.migrate_files.upload'; + ELSE + RAISE NOTICE 'Skipped triggers for db_migrate.migrate_files (table not found)'; + END IF; +END +$domain_triggers$; + +COMMIT; diff --git a/migrations/object_store.sql b/migrations/object_store.sql index 658b7169e..169ec8f99 100644 --- a/migrations/object_store.sql +++ b/migrations/object_store.sql @@ -1,7 +1,7 @@ -- ============================================================================= --- Constructive Upload System -- object_store_public schema +-- Constructive Upload System -- files_store_public schema -- ============================================================================= --- Run: psql -h localhost -U postgres -d constructive < migrations/object_store.sql +-- Run: psql -h localhost -U postgres -d constructive < migrations/files_store.sql -- ============================================================================= BEGIN; @@ -39,13 +39,13 @@ END; $$ LANGUAGE plpgsql; -- Ensure schema exists -CREATE SCHEMA IF NOT EXISTS object_store_public; +CREATE SCHEMA IF NOT EXISTS files_store_public; -- --------------------------------------------------------------------------- -- 1. Status ENUM -- --------------------------------------------------------------------------- -CREATE TYPE object_store_public.file_status AS ENUM ( +CREATE TYPE files_store_public.file_status AS ENUM ( 'pending', 'processing', 'ready', @@ -53,19 +53,19 @@ CREATE TYPE object_store_public.file_status AS ENUM ( 'deleting' ); -COMMENT ON TYPE object_store_public.file_status IS +COMMENT ON TYPE files_store_public.file_status IS 'Lifecycle states for managed files. Transitions: pending->{processing,error}, processing->{ready,error,deleting}, ready->deleting, error->{deleting,pending(retry)}.'; -- --------------------------------------------------------------------------- -- 2. Files Table -- --------------------------------------------------------------------------- -CREATE TABLE object_store_public.files ( +CREATE TABLE files_store_public.files ( id uuid NOT NULL DEFAULT gen_random_uuid(), database_id integer NOT NULL, bucket_key text NOT NULL DEFAULT 'default', key text NOT NULL, - status object_store_public.file_status NOT NULL DEFAULT 'pending', + status files_store_public.file_status NOT NULL DEFAULT 'pending', status_reason text, etag text, source_table text, @@ -90,28 +90,28 @@ CREATE TABLE object_store_public.files ( ) ); -COMMENT ON TABLE object_store_public.files IS +COMMENT ON TABLE files_store_public.files IS 'Operational index for S3 objects. Each row = one physical S3 object (including generated versions). NOT a source of truth for file metadata -- domain tables own that.'; -COMMENT ON COLUMN object_store_public.files.key IS +COMMENT ON COLUMN files_store_public.files.key IS 'Full S3 object key. Format: {database_id}/{bucket_key}/{uuid}_{version_name}. Origin files use _origin suffix.'; -COMMENT ON COLUMN object_store_public.files.etag IS +COMMENT ON COLUMN files_store_public.files.etag IS 'S3 ETag for reconciliation and cache validation.'; -COMMENT ON COLUMN object_store_public.files.status_reason IS +COMMENT ON COLUMN files_store_public.files.status_reason IS 'Human-readable reason for current status (error details, deletion reason).'; -COMMENT ON COLUMN object_store_public.files.processing_started_at IS +COMMENT ON COLUMN files_store_public.files.processing_started_at IS 'Timestamp when processing began. Used to detect stuck jobs (alert at 15 min).'; -COMMENT ON COLUMN object_store_public.files.source_table IS +COMMENT ON COLUMN files_store_public.files.source_table IS 'Schema-qualified table name referencing this file (e.g. constructive_users_public.users). NULL until the domain trigger populates it. Free text -- no FK possible.'; -COMMENT ON COLUMN object_store_public.files.source_column IS +COMMENT ON COLUMN files_store_public.files.source_column IS 'Column name on the source table (e.g. profile_picture). NULL until domain trigger populates it.'; -COMMENT ON COLUMN object_store_public.files.source_id IS +COMMENT ON COLUMN files_store_public.files.source_id IS 'Primary key of the row in the source table. NULL until domain trigger populates it.'; -- --------------------------------------------------------------------------- -- 3. Buckets Table -- --------------------------------------------------------------------------- -CREATE TABLE object_store_public.buckets ( +CREATE TABLE files_store_public.buckets ( id uuid NOT NULL DEFAULT gen_random_uuid(), database_id integer NOT NULL, key text NOT NULL, @@ -128,7 +128,7 @@ CREATE TABLE object_store_public.buckets ( CONSTRAINT buckets_key_format CHECK (key ~ '^[a-z][a-z0-9_-]*$') ); -COMMENT ON TABLE object_store_public.buckets IS +COMMENT ON TABLE files_store_public.buckets IS 'Logical bucket configuration per tenant. The bucket key maps to the S3 key prefix segment. is_public controls RLS policy for anonymous reads.'; -- --------------------------------------------------------------------------- @@ -137,38 +137,38 @@ COMMENT ON TABLE object_store_public.buckets IS -- Tenant queries CREATE INDEX files_database_id_idx - ON object_store_public.files (database_id); + ON files_store_public.files (database_id); -- Bucket + tenant queries CREATE INDEX files_bucket_database_id_idx - ON object_store_public.files (bucket_key, database_id); + ON files_store_public.files (bucket_key, database_id); -- "My uploads" queries CREATE INDEX files_created_by_database_id_created_at_idx - ON object_store_public.files (created_by, database_id, created_at DESC); + ON files_store_public.files (created_by, database_id, created_at DESC); -- Back-reference lookups (cleanup worker, attachment queries) CREATE INDEX files_source_ref_idx - ON object_store_public.files (source_table, source_column, source_id); + ON files_store_public.files (source_table, source_column, source_id); -- Pending file reaper (hourly cron) CREATE INDEX files_pending_created_at_idx - ON object_store_public.files (created_at) + ON files_store_public.files (created_at) WHERE status = 'pending'; -- Stuck processing detection CREATE INDEX files_processing_idx - ON object_store_public.files (processing_started_at) + ON files_store_public.files (processing_started_at) WHERE status = 'processing'; -- Deletion job queue CREATE INDEX files_deleting_idx - ON object_store_public.files (updated_at) + ON files_store_public.files (updated_at) WHERE status = 'deleting'; -- Time-range scans on large tables CREATE INDEX files_created_at_brin_idx - ON object_store_public.files USING brin (created_at); + ON files_store_public.files USING brin (created_at); -- --------------------------------------------------------------------------- -- 5. Triggers @@ -179,7 +179,7 @@ CREATE INDEX files_created_at_brin_idx -- bypasses this trigger (condition: NEW.status = 'pending'). Only origin -- uploads (status = 'pending') need processing. -CREATE OR REPLACE FUNCTION object_store_public.files_after_insert_queue_processing() +CREATE OR REPLACE FUNCTION files_store_public.files_after_insert_queue_processing() RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( @@ -195,17 +195,17 @@ END; $$ LANGUAGE plpgsql; CREATE TRIGGER files_after_insert_queue_processing - AFTER INSERT ON object_store_public.files + AFTER INSERT ON files_store_public.files FOR EACH ROW WHEN (NEW.status = 'pending') - EXECUTE FUNCTION object_store_public.files_after_insert_queue_processing(); + EXECUTE FUNCTION files_store_public.files_after_insert_queue_processing(); -COMMENT ON TRIGGER files_after_insert_queue_processing ON object_store_public.files IS +COMMENT ON TRIGGER files_after_insert_queue_processing ON files_store_public.files IS 'Enqueues process-image job for new origin uploads. Version rows inserted as ready intentionally bypass this trigger -- they do not need processing.'; -- 5b. BEFORE UPDATE -- timestamp + state machine -CREATE OR REPLACE FUNCTION object_store_public.files_before_update_timestamp() +CREATE OR REPLACE FUNCTION files_store_public.files_before_update_timestamp() RETURNS trigger AS $$ BEGIN -- Always update timestamp @@ -235,16 +235,16 @@ END; $$ LANGUAGE plpgsql; CREATE TRIGGER files_before_update_timestamp - BEFORE UPDATE ON object_store_public.files + BEFORE UPDATE ON files_store_public.files FOR EACH ROW - EXECUTE FUNCTION object_store_public.files_before_update_timestamp(); + EXECUTE FUNCTION files_store_public.files_before_update_timestamp(); -COMMENT ON TRIGGER files_before_update_timestamp ON object_store_public.files IS +COMMENT ON TRIGGER files_before_update_timestamp ON files_store_public.files IS 'Enforces status transition rules and maintains updated_at / processing_started_at timestamps.'; -- 5c. AFTER UPDATE -- enqueue delete_s3_object job -CREATE OR REPLACE FUNCTION object_store_public.files_after_update_queue_deletion() +CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_deletion() RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( @@ -261,17 +261,17 @@ END; $$ LANGUAGE plpgsql; CREATE TRIGGER files_after_update_queue_deletion - AFTER UPDATE ON object_store_public.files + AFTER UPDATE ON files_store_public.files FOR EACH ROW WHEN (NEW.status = 'deleting' AND OLD.status <> 'deleting') - EXECUTE FUNCTION object_store_public.files_after_update_queue_deletion(); + EXECUTE FUNCTION files_store_public.files_after_update_queue_deletion(); -COMMENT ON TRIGGER files_after_update_queue_deletion ON object_store_public.files IS +COMMENT ON TRIGGER files_after_update_queue_deletion ON files_store_public.files IS 'Enqueues delete_s3_object job when a file transitions to deleting status. Each version row gets its own deletion job.'; -- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry -CREATE OR REPLACE FUNCTION object_store_public.files_after_update_queue_retry() +CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_retry() RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( @@ -287,25 +287,25 @@ END; $$ LANGUAGE plpgsql; CREATE TRIGGER files_after_update_queue_retry - AFTER UPDATE ON object_store_public.files + AFTER UPDATE ON files_store_public.files FOR EACH ROW WHEN (OLD.status = 'error' AND NEW.status = 'pending') - EXECUTE FUNCTION object_store_public.files_after_update_queue_retry(); + EXECUTE FUNCTION files_store_public.files_after_update_queue_retry(); -COMMENT ON TRIGGER files_after_update_queue_retry ON object_store_public.files IS +COMMENT ON TRIGGER files_after_update_queue_retry ON files_store_public.files IS 'Re-enqueues process-image job when a file is retried (error->pending). Without this trigger, the retry would change status but never re-enqueue the processing job.'; -- --------------------------------------------------------------------------- -- 6. RLS Policies & Grants -- --------------------------------------------------------------------------- -ALTER TABLE object_store_public.files ENABLE ROW LEVEL SECURITY; -ALTER TABLE object_store_public.files FORCE ROW LEVEL SECURITY; +ALTER TABLE files_store_public.files ENABLE ROW LEVEL SECURITY; +ALTER TABLE files_store_public.files FORCE ROW LEVEL SECURITY; -- Policy 1: Tenant isolation (RESTRICTIVE -- always ANDed with all other policies) -- Without this being RESTRICTIVE, permissive policies would OR together and -- allow cross-tenant access (e.g. a ready file in tenant 2 visible via files_visibility). -CREATE POLICY files_tenant_isolation ON object_store_public.files +CREATE POLICY files_tenant_isolation ON files_store_public.files AS RESTRICTIVE FOR ALL USING (database_id = current_setting('app.database_id')::integer) @@ -315,7 +315,7 @@ CREATE POLICY files_tenant_isolation ON object_store_public.files -- Non-ready files visible only to the uploader. Uses NULLIF for safe uuid handling -- when app.user_id is missing or empty (returns NULL instead of cast error). -- Scoped to authenticated/service_role so anonymous only gets public_bucket_read. -CREATE POLICY files_visibility ON object_store_public.files +CREATE POLICY files_visibility ON files_store_public.files FOR SELECT TO authenticated, service_role USING ( @@ -324,11 +324,11 @@ CREATE POLICY files_visibility ON object_store_public.files ); -- Policy 3: Public bucket read for SELECT (all roles including anonymous) -CREATE POLICY files_public_bucket_read ON object_store_public.files +CREATE POLICY files_public_bucket_read ON files_store_public.files FOR SELECT USING ( EXISTS ( - SELECT 1 FROM object_store_public.buckets b + SELECT 1 FROM files_store_public.buckets b WHERE b.key = bucket_key AND b.database_id = files.database_id AND b.is_public = true @@ -337,14 +337,14 @@ CREATE POLICY files_public_bucket_read ON object_store_public.files ); -- Policy 4: Admin override (all operations, authenticated + service_role) -CREATE POLICY files_admin_override ON object_store_public.files +CREATE POLICY files_admin_override ON files_store_public.files FOR ALL TO authenticated, service_role USING (current_setting('app.role', true) = 'administrator') WITH CHECK (current_setting('app.role', true) = 'administrator'); -- Policy 5: INSERT access (permissive base so non-admin users can insert) -CREATE POLICY files_insert_access ON object_store_public.files +CREATE POLICY files_insert_access ON files_store_public.files FOR INSERT TO authenticated, service_role WITH CHECK (true); @@ -352,7 +352,7 @@ CREATE POLICY files_insert_access ON object_store_public.files -- Policy 6: UPDATE access (replicates visibility for row targeting) -- Non-admin users can only update rows they can see (ready or own). -- Admin override policy covers admin UPDATE access separately. -CREATE POLICY files_update_access ON object_store_public.files +CREATE POLICY files_update_access ON files_store_public.files FOR UPDATE TO authenticated, service_role USING ( @@ -362,22 +362,22 @@ CREATE POLICY files_update_access ON object_store_public.files WITH CHECK (true); -- Policy 7: DELETE access (service_role only, grants already restrict authenticated) -CREATE POLICY files_delete_access ON object_store_public.files +CREATE POLICY files_delete_access ON files_store_public.files FOR DELETE TO service_role USING (true); -- Grants -GRANT SELECT, INSERT, UPDATE ON object_store_public.files TO authenticated; -GRANT SELECT, INSERT, UPDATE, DELETE ON object_store_public.files TO service_role; +GRANT SELECT, INSERT, UPDATE ON files_store_public.files TO authenticated; +GRANT SELECT, INSERT, UPDATE, DELETE ON files_store_public.files TO service_role; -COMMENT ON POLICY files_tenant_isolation ON object_store_public.files IS +COMMENT ON POLICY files_tenant_isolation ON files_store_public.files IS 'Every query is scoped to the current tenant via app.database_id session variable.'; -COMMENT ON POLICY files_visibility ON object_store_public.files IS +COMMENT ON POLICY files_visibility ON files_store_public.files IS 'Users see all ready files in their tenant. Non-ready files visible only to the uploader.'; -COMMENT ON POLICY files_public_bucket_read ON object_store_public.files IS +COMMENT ON POLICY files_public_bucket_read ON files_store_public.files IS 'Allows unauthenticated reads on ready files in public buckets.'; -COMMENT ON POLICY files_admin_override ON object_store_public.files IS +COMMENT ON POLICY files_admin_override ON files_store_public.files IS 'Administrators can see and modify all files in the tenant regardless of status or creator.'; -- --------------------------------------------------------------------------- @@ -394,7 +394,7 @@ COMMENT ON POLICY files_admin_override ON object_store_public.files IS -- TG_ARGV[0] = column name (e.g. 'profile_picture') -- TG_ARGV[1] = schema-qualified table name (e.g. 'constructive_users_public.users') -CREATE OR REPLACE FUNCTION object_store_public.populate_file_back_reference() +CREATE OR REPLACE FUNCTION files_store_public.populate_file_back_reference() RETURNS trigger AS $$ DECLARE col_name text := TG_ARGV[0]; @@ -428,7 +428,7 @@ BEGIN base_key := regexp_replace(old_key, '_[^_]+$', ''); -- Mark old origin + all versions as deleting - UPDATE object_store_public.files + UPDATE files_store_public.files SET status = 'deleting', status_reason = 'replaced by new file' WHERE database_id = db_id AND (key = old_key OR key LIKE base_key || '_%') @@ -441,7 +441,7 @@ BEGIN base_key := regexp_replace(new_key, '_[^_]+$', ''); -- Set back-reference on origin + all version rows - UPDATE object_store_public.files + UPDATE files_store_public.files SET source_table = table_name, source_column = col_name, source_id = NEW.id @@ -453,14 +453,14 @@ BEGIN END; $$ LANGUAGE plpgsql; -COMMENT ON FUNCTION object_store_public.populate_file_back_reference() IS +COMMENT ON FUNCTION files_store_public.populate_file_back_reference() IS 'Generic trigger function for domain tables. Populates source_table/source_column/source_id on files rows when image/upload/attachment columns are updated. Handles file replacement by marking old files as deleting.'; -- 7b. Generic trigger function: source row deletion -- -- When a domain row is deleted, mark all associated files as deleting. -CREATE OR REPLACE FUNCTION object_store_public.mark_files_deleting_on_source_delete() +CREATE OR REPLACE FUNCTION files_store_public.mark_files_deleting_on_source_delete() RETURNS trigger AS $$ DECLARE col_name text := TG_ARGV[0]; @@ -470,7 +470,7 @@ BEGIN db_id := current_setting('app.database_id')::integer; -- Mark all files for this source row + column as deleting - UPDATE object_store_public.files + UPDATE files_store_public.files SET status = 'deleting', status_reason = 'source row deleted' WHERE database_id = db_id AND source_table = table_name @@ -482,7 +482,7 @@ BEGIN END; $$ LANGUAGE plpgsql; -COMMENT ON FUNCTION object_store_public.mark_files_deleting_on_source_delete() IS +COMMENT ON FUNCTION files_store_public.mark_files_deleting_on_source_delete() IS 'Generic trigger function for domain tables. Marks all associated files as deleting when a domain row is deleted.'; -- 7c. CREATE TRIGGER statements for all 6 tables, 9 columns @@ -507,11 +507,11 @@ BEGIN EXECUTE 'CREATE TRIGGER users_profile_picture_file_ref AFTER UPDATE OF profile_picture ON constructive_users_public.users FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''profile_picture'', ''constructive_users_public.users'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''profile_picture'', ''constructive_users_public.users'')'; EXECUTE 'CREATE TRIGGER users_profile_picture_file_delete BEFORE DELETE ON constructive_users_public.users FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''profile_picture'', ''constructive_users_public.users'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''profile_picture'', ''constructive_users_public.users'')'; RAISE NOTICE 'Created triggers for constructive_users_public.users.profile_picture'; ELSE RAISE NOTICE 'Skipped triggers for constructive_users_public.users (table not found)'; @@ -525,11 +525,11 @@ BEGIN EXECUTE 'CREATE TRIGGER app_levels_image_file_ref AFTER UPDATE OF image ON constructive_status_public.app_levels FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''image'', ''constructive_status_public.app_levels'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''image'', ''constructive_status_public.app_levels'')'; EXECUTE 'CREATE TRIGGER app_levels_image_file_delete BEFORE DELETE ON constructive_status_public.app_levels FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''image'', ''constructive_status_public.app_levels'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''image'', ''constructive_status_public.app_levels'')'; RAISE NOTICE 'Created triggers for constructive_status_public.app_levels.image'; ELSE RAISE NOTICE 'Skipped triggers for constructive_status_public.app_levels (table not found)'; @@ -543,38 +543,38 @@ BEGIN EXECUTE 'CREATE TRIGGER sites_og_image_file_ref AFTER UPDATE OF og_image ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''og_image'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''og_image'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_og_image_file_delete BEFORE DELETE ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_ref AFTER UPDATE OF apple_touch_icon ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''apple_touch_icon'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''apple_touch_icon'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_delete BEFORE DELETE ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''apple_touch_icon'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''apple_touch_icon'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_logo_file_ref AFTER UPDATE OF logo ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''logo'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''logo'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_logo_file_delete BEFORE DELETE ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''logo'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''logo'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_favicon_file_ref AFTER UPDATE OF favicon ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''favicon'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''favicon'', ''services_public.sites'')'; EXECUTE 'CREATE TRIGGER sites_favicon_file_delete BEFORE DELETE ON services_public.sites FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''favicon'', ''services_public.sites'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''favicon'', ''services_public.sites'')'; RAISE NOTICE 'Created triggers for services_public.sites (og_image, apple_touch_icon, logo, favicon)'; ELSE RAISE NOTICE 'Skipped triggers for services_public.sites (table not found)'; @@ -588,11 +588,11 @@ BEGIN EXECUTE 'CREATE TRIGGER apps_app_image_file_ref AFTER UPDATE OF app_image ON services_public.apps FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''app_image'', ''services_public.apps'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''app_image'', ''services_public.apps'')'; EXECUTE 'CREATE TRIGGER apps_app_image_file_delete BEFORE DELETE ON services_public.apps FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''app_image'', ''services_public.apps'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''app_image'', ''services_public.apps'')'; RAISE NOTICE 'Created triggers for services_public.apps.app_image'; ELSE RAISE NOTICE 'Skipped triggers for services_public.apps (table not found)'; @@ -606,11 +606,11 @@ BEGIN EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_ref AFTER UPDATE OF og_image ON services_public.site_metadata FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''og_image'', ''services_public.site_metadata'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''og_image'', ''services_public.site_metadata'')'; EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_delete BEFORE DELETE ON services_public.site_metadata FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.site_metadata'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.site_metadata'')'; RAISE NOTICE 'Created triggers for services_public.site_metadata.og_image'; ELSE RAISE NOTICE 'Skipped triggers for services_public.site_metadata (table not found)'; @@ -624,11 +624,11 @@ BEGIN EXECUTE 'CREATE TRIGGER migrate_files_upload_file_ref AFTER UPDATE OF upload ON db_migrate.migrate_files FOR EACH ROW - EXECUTE FUNCTION object_store_public.populate_file_back_reference(''upload'', ''db_migrate.migrate_files'')'; + EXECUTE FUNCTION files_store_public.populate_file_back_reference(''upload'', ''db_migrate.migrate_files'')'; EXECUTE 'CREATE TRIGGER migrate_files_upload_file_delete BEFORE DELETE ON db_migrate.migrate_files FOR EACH ROW - EXECUTE FUNCTION object_store_public.mark_files_deleting_on_source_delete(''upload'', ''db_migrate.migrate_files'')'; + EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''upload'', ''db_migrate.migrate_files'')'; RAISE NOTICE 'Created triggers for db_migrate.migrate_files.upload'; ELSE RAISE NOTICE 'Skipped triggers for db_migrate.migrate_files (table not found)'; From f5ddd9adcfa016350fbccb466aaa5cb18ccb64b2 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Sat, 14 Mar 2026 13:26:40 +0800 Subject: [PATCH 6/9] app jobs fix --- migrations/__tests__/app-jobs-stub.sql | 20 ++++++++++++ .../__tests__/object-store-lifecycle.test.ts | 3 +- migrations/__tests__/object-store-rls.test.ts | 3 +- migrations/files_store.sql | 32 ++++++++----------- migrations/object_store.sql | 32 ++++++++----------- 5 files changed, 50 insertions(+), 40 deletions(-) create mode 100644 migrations/__tests__/app-jobs-stub.sql diff --git a/migrations/__tests__/app-jobs-stub.sql b/migrations/__tests__/app-jobs-stub.sql new file mode 100644 index 000000000..29989606c --- /dev/null +++ b/migrations/__tests__/app-jobs-stub.sql @@ -0,0 +1,20 @@ +-- Test-only stub for app_jobs.add_job. +-- In production, this is provided by pgpm-database-jobs. +-- This file must be loaded BEFORE files_store.sql in test seeds. + +CREATE SCHEMA IF NOT EXISTS app_jobs; + +CREATE FUNCTION app_jobs.add_job( + identifier text, + payload json DEFAULT '{}'::json, + queue_name text DEFAULT NULL, + run_at timestamptz DEFAULT NULL, + max_attempts integer DEFAULT NULL, + job_key text DEFAULT NULL, + priority integer DEFAULT NULL, + flags text[] DEFAULT NULL +) RETURNS void AS $$ +BEGIN + RAISE NOTICE '[TEST STUB] app_jobs.add_job: % %', identifier, payload; +END; +$$ LANGUAGE plpgsql; diff --git a/migrations/__tests__/object-store-lifecycle.test.ts b/migrations/__tests__/object-store-lifecycle.test.ts index e18cf0aad..49f91a01c 100644 --- a/migrations/__tests__/object-store-lifecycle.test.ts +++ b/migrations/__tests__/object-store-lifecycle.test.ts @@ -4,6 +4,7 @@ import { resolve } from 'path'; import { getConnections, PgTestClient, seed } from 'pgsql-test'; +const APP_JOBS_STUB_PATH = resolve(__dirname, 'app-jobs-stub.sql'); const MIGRATION_PATH = resolve(__dirname, '../files_store.sql'); const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; @@ -44,7 +45,7 @@ async function clearJobLog() { beforeAll(async () => { ({ pg, teardown } = await getConnections( {}, - [seed.sqlfile([MIGRATION_PATH])] + [seed.sqlfile([APP_JOBS_STUB_PATH, MIGRATION_PATH])] )); // Ensure anonymous role exists diff --git a/migrations/__tests__/object-store-rls.test.ts b/migrations/__tests__/object-store-rls.test.ts index 62b7ed40b..f06df2883 100644 --- a/migrations/__tests__/object-store-rls.test.ts +++ b/migrations/__tests__/object-store-rls.test.ts @@ -4,6 +4,7 @@ import { resolve } from 'path'; import { getConnections, PgTestClient, seed } from 'pgsql-test'; +const APP_JOBS_STUB_PATH = resolve(__dirname, 'app-jobs-stub.sql'); const MIGRATION_PATH = resolve(__dirname, '../files_store.sql'); const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; @@ -75,7 +76,7 @@ async function insertFixtures() { beforeAll(async () => { ({ pg, teardown } = await getConnections( {}, - [seed.sqlfile([MIGRATION_PATH])] + [seed.sqlfile([APP_JOBS_STUB_PATH, MIGRATION_PATH])] )); // Ensure anonymous role exists (cluster-wide, idempotent) diff --git a/migrations/files_store.sql b/migrations/files_store.sql index 169ec8f99..c3f522200 100644 --- a/migrations/files_store.sql +++ b/migrations/files_store.sql @@ -16,27 +16,21 @@ DO $$ BEGIN END IF; END $$; --- Ensure app_jobs schema + stub add_job exist (required by trigger functions). --- In production, app_jobs is deployed by the database-jobs pgpm module. --- This stub is a no-op that prevents trigger creation from failing in dev. +-- Require app_jobs.add_job to exist (provided by pgpm-database-jobs). +-- Deploy pgpm-database-jobs BEFORE running this migration. +-- DO NOT stub this function here -- CREATE OR REPLACE would silently overwrite +-- the production implementation, causing all trigger-enqueued jobs to be lost. CREATE SCHEMA IF NOT EXISTS app_jobs; -CREATE OR REPLACE FUNCTION app_jobs.add_job( - identifier text, - payload json DEFAULT '{}'::json, - queue_name text DEFAULT NULL, - run_at timestamptz DEFAULT NULL, - max_attempts integer DEFAULT NULL, - job_key text DEFAULT NULL, - priority integer DEFAULT NULL, - flags text[] DEFAULT NULL -) RETURNS void AS $$ -BEGIN - -- Stub: in production this is provided by database-jobs pgpm module. - -- In dev, jobs are enqueued but not processed unless the job worker is running. - RAISE NOTICE 'app_jobs.add_job stub called: % %', identifier, payload; -END; -$$ LANGUAGE plpgsql; +DO $$ BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_proc p + JOIN pg_namespace n ON p.pronamespace = n.oid + WHERE n.nspname = 'app_jobs' AND p.proname = 'add_job' + ) THEN + RAISE EXCEPTION 'app_jobs.add_job not found. Deploy pgpm-database-jobs before running this migration.'; + END IF; +END $$; -- Ensure schema exists CREATE SCHEMA IF NOT EXISTS files_store_public; diff --git a/migrations/object_store.sql b/migrations/object_store.sql index 169ec8f99..c3f522200 100644 --- a/migrations/object_store.sql +++ b/migrations/object_store.sql @@ -16,27 +16,21 @@ DO $$ BEGIN END IF; END $$; --- Ensure app_jobs schema + stub add_job exist (required by trigger functions). --- In production, app_jobs is deployed by the database-jobs pgpm module. --- This stub is a no-op that prevents trigger creation from failing in dev. +-- Require app_jobs.add_job to exist (provided by pgpm-database-jobs). +-- Deploy pgpm-database-jobs BEFORE running this migration. +-- DO NOT stub this function here -- CREATE OR REPLACE would silently overwrite +-- the production implementation, causing all trigger-enqueued jobs to be lost. CREATE SCHEMA IF NOT EXISTS app_jobs; -CREATE OR REPLACE FUNCTION app_jobs.add_job( - identifier text, - payload json DEFAULT '{}'::json, - queue_name text DEFAULT NULL, - run_at timestamptz DEFAULT NULL, - max_attempts integer DEFAULT NULL, - job_key text DEFAULT NULL, - priority integer DEFAULT NULL, - flags text[] DEFAULT NULL -) RETURNS void AS $$ -BEGIN - -- Stub: in production this is provided by database-jobs pgpm module. - -- In dev, jobs are enqueued but not processed unless the job worker is running. - RAISE NOTICE 'app_jobs.add_job stub called: % %', identifier, payload; -END; -$$ LANGUAGE plpgsql; +DO $$ BEGIN + IF NOT EXISTS ( + SELECT 1 FROM pg_proc p + JOIN pg_namespace n ON p.pronamespace = n.oid + WHERE n.nspname = 'app_jobs' AND p.proname = 'add_job' + ) THEN + RAISE EXCEPTION 'app_jobs.add_job not found. Deploy pgpm-database-jobs before running this migration.'; + END IF; +END $$; -- Ensure schema exists CREATE SCHEMA IF NOT EXISTS files_store_public; From f6a70413639cfe3259bf290cec62c653a1357bb5 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Sat, 14 Mar 2026 13:38:55 +0800 Subject: [PATCH 7/9] Job identifier mismatch --- migrations/__tests__/object-store-lifecycle.test.ts | 8 ++++---- migrations/files_store.sql | 6 +++--- migrations/object_store.sql | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/migrations/__tests__/object-store-lifecycle.test.ts b/migrations/__tests__/object-store-lifecycle.test.ts index 49f91a01c..83d827c5f 100644 --- a/migrations/__tests__/object-store-lifecycle.test.ts +++ b/migrations/__tests__/object-store-lifecycle.test.ts @@ -429,7 +429,7 @@ describe('E2E-03: Deletion Flow', () => { await pg.afterEach(); }); - it('ready → deleting queues delete_s3_object job', async () => { + it('ready → deleting queues delete-s3-object job', async () => { await pg.query(` INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) VALUES ($1, 1, $2, 'default', $3, 'etag', 'ready') @@ -443,7 +443,7 @@ describe('E2E-03: Deletion Flow', () => { const jobs = await getJobLog(); expect(jobs).toHaveLength(1); - expect(jobs[0].identifier).toBe('delete_s3_object'); + expect(jobs[0].identifier).toBe('delete-s3-object'); expect(jobs[0].job_key).toBe(`delete:${ORIGIN_ID}`); expect(jobs[0].payload.key).toBe(ORIGIN_KEY); }); @@ -489,7 +489,7 @@ describe('E2E-03: Deletion Flow', () => { const jobs = await getJobLog(); expect(jobs).toHaveLength(1); - expect(jobs[0].identifier).toBe('delete_s3_object'); + expect(jobs[0].identifier).toBe('delete-s3-object'); }); it('service_role can hard-DELETE after marking as deleting', async () => { @@ -848,7 +848,7 @@ describe('E2E-06: Full lifecycle under RLS', () => { await pg.query('RESET ROLE'); jobs = await getJobLog(); expect(jobs).toHaveLength(3); - expect(jobs.every((j: any) => j.identifier === 'delete_s3_object')).toBe(true); + expect(jobs.every((j: any) => j.identifier === 'delete-s3-object')).toBe(true); const deletedKeys = jobs.map((j: any) => j.payload.key).sort(); expect(deletedKeys).toEqual([LARGE_KEY, ORIGIN_KEY, THUMB_KEY]); diff --git a/migrations/files_store.sql b/migrations/files_store.sql index c3f522200..76a6a8026 100644 --- a/migrations/files_store.sql +++ b/migrations/files_store.sql @@ -236,13 +236,13 @@ CREATE TRIGGER files_before_update_timestamp COMMENT ON TRIGGER files_before_update_timestamp ON files_store_public.files IS 'Enforces status transition rules and maintains updated_at / processing_started_at timestamps.'; --- 5c. AFTER UPDATE -- enqueue delete_s3_object job +-- 5c. AFTER UPDATE -- enqueue delete-s3-object job CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_deletion() RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( - 'delete_s3_object', + 'delete-s3-object', json_build_object( 'file_id', NEW.id, 'database_id', NEW.database_id, @@ -261,7 +261,7 @@ CREATE TRIGGER files_after_update_queue_deletion EXECUTE FUNCTION files_store_public.files_after_update_queue_deletion(); COMMENT ON TRIGGER files_after_update_queue_deletion ON files_store_public.files IS - 'Enqueues delete_s3_object job when a file transitions to deleting status. Each version row gets its own deletion job.'; + 'Enqueues delete-s3-object job when a file transitions to deleting status. Each version row gets its own deletion job.'; -- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry diff --git a/migrations/object_store.sql b/migrations/object_store.sql index c3f522200..76a6a8026 100644 --- a/migrations/object_store.sql +++ b/migrations/object_store.sql @@ -236,13 +236,13 @@ CREATE TRIGGER files_before_update_timestamp COMMENT ON TRIGGER files_before_update_timestamp ON files_store_public.files IS 'Enforces status transition rules and maintains updated_at / processing_started_at timestamps.'; --- 5c. AFTER UPDATE -- enqueue delete_s3_object job +-- 5c. AFTER UPDATE -- enqueue delete-s3-object job CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_deletion() RETURNS trigger AS $$ BEGIN PERFORM app_jobs.add_job( - 'delete_s3_object', + 'delete-s3-object', json_build_object( 'file_id', NEW.id, 'database_id', NEW.database_id, @@ -261,7 +261,7 @@ CREATE TRIGGER files_after_update_queue_deletion EXECUTE FUNCTION files_store_public.files_after_update_queue_deletion(); COMMENT ON TRIGGER files_after_update_queue_deletion ON files_store_public.files IS - 'Enqueues delete_s3_object job when a file transitions to deleting status. Each version row gets its own deletion job.'; + 'Enqueues delete-s3-object job when a file transitions to deleting status. Each version row gets its own deletion job.'; -- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry From 443a56bd16df6564f46ca62f87b68cb14b4ed2e7 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Sat, 14 Mar 2026 14:03:00 +0800 Subject: [PATCH 8/9] Cron scheduling implemented --- migrations/__tests__/file-cleanup.test.ts | 370 ++++++++++++++++++++++ migrations/files_store.sql | 75 +++++ migrations/object_store.sql | 75 +++++ 3 files changed, 520 insertions(+) create mode 100644 migrations/__tests__/file-cleanup.test.ts diff --git a/migrations/__tests__/file-cleanup.test.ts b/migrations/__tests__/file-cleanup.test.ts new file mode 100644 index 000000000..95502ac47 --- /dev/null +++ b/migrations/__tests__/file-cleanup.test.ts @@ -0,0 +1,370 @@ +jest.setTimeout(60000); + +import { resolve } from 'path'; + +import { getConnections, PgTestClient, seed } from 'pgsql-test'; + +const APP_JOBS_STUB_PATH = resolve(__dirname, 'app-jobs-stub.sql'); +const MIGRATION_PATH = resolve(__dirname, '../files_store.sql'); + +const USER_A = 'aaaaaaaa-0000-0000-0000-000000000001'; + +let pg: PgTestClient; +let teardown: () => Promise; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/** Read all recorded jobs from the job_log table */ +async function getJobLog() { + const result = await pg.query( + 'SELECT identifier, payload, job_key FROM _test_job_log ORDER BY logged_at' + ); + return result.rows; +} + +async function clearJobLog() { + await pg.query('DELETE FROM _test_job_log'); +} + +// --------------------------------------------------------------------------- +// Setup +// --------------------------------------------------------------------------- + +beforeAll(async () => { + ({ pg, teardown } = await getConnections( + {}, + [seed.sqlfile([APP_JOBS_STUB_PATH, MIGRATION_PATH])] + )); + + // Ensure anonymous role exists + await pg.query(` + DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'anonymous') THEN + CREATE ROLE anonymous NOLOGIN; + END IF; + END $$ + `); + + // Grants needed for isolated test + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO authenticated'); + await pg.query('GRANT USAGE ON SCHEMA files_store_public TO service_role'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO authenticated'); + await pg.query('GRANT SELECT ON files_store_public.buckets TO service_role'); + + // Replace the app_jobs.add_job stub with one that records calls + await pg.query(` + CREATE TABLE _test_job_log ( + logged_at timestamptz NOT NULL DEFAULT now(), + identifier text NOT NULL, + payload json, + job_key text + ) + `); + + await pg.query(` + CREATE OR REPLACE FUNCTION app_jobs.add_job( + identifier text, + payload json DEFAULT '{}'::json, + queue_name text DEFAULT NULL, + run_at timestamptz DEFAULT NULL, + max_attempts integer DEFAULT NULL, + job_key text DEFAULT NULL, + priority integer DEFAULT NULL, + flags text[] DEFAULT NULL + ) RETURNS void AS $$ + BEGIN + INSERT INTO _test_job_log (identifier, payload, job_key) + VALUES (identifier, payload, job_key); + END; + $$ LANGUAGE plpgsql + `); + + await pg.query('GRANT USAGE ON SCHEMA app_jobs TO authenticated, service_role'); + await pg.query('GRANT EXECUTE ON FUNCTION app_jobs.add_job(text, json, text, timestamptz, integer, text, integer, text[]) TO authenticated, service_role'); + await pg.query('GRANT INSERT ON _test_job_log TO authenticated, service_role'); + + // Seed a default bucket + await pg.query(` + INSERT INTO files_store_public.buckets (database_id, key, name, is_public, config) + VALUES (1, 'default', 'Default Bucket', false, '{}') + `); +}); + +afterAll(async () => { + await teardown(); +}); + +// ========================================================================== +// Cleanup-01: pending_reaper -- pending → error (valid transition) +// ========================================================================== + +describe('Cleanup-01: pending_reaper', () => { + beforeEach(async () => { + await pg.beforeEach(); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('marks stale pending files as error', async () => { + // Insert a pending file with created_at older than 24 hours + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status, created_at) + VALUES + ('c1000000-0000-0000-0000-000000000001', 1, '1/default/stale_pending', 'default', $1, 'etag1', 'pending', now() - interval '25 hours') + `, [USER_A]); + await clearJobLog(); + + // Run the cleanup query directly (simulates what the handler does) + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'error', status_reason = 'upload timeout' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'pending' AND created_at < now() - interval '24 hours' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(1); + + // Verify the file is now in error status + const file = await pg.query( + "SELECT status, status_reason FROM files_store_public.files WHERE id = 'c1000000-0000-0000-0000-000000000001'" + ); + expect(file.rows[0].status).toBe('error'); + expect(file.rows[0].status_reason).toBe('upload timeout'); + }); + + it('does not affect recent pending files', async () => { + // Insert a pending file with recent created_at + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES + ('c1000000-0000-0000-0000-000000000002', 1, '1/default/recent_pending', 'default', $1, 'etag2', 'pending') + `, [USER_A]); + + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'error', status_reason = 'upload timeout' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'pending' AND created_at < now() - interval '24 hours' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(0); + + // File should still be pending + const file = await pg.query( + "SELECT status FROM files_store_public.files WHERE id = 'c1000000-0000-0000-0000-000000000002'" + ); + expect(file.rows[0].status).toBe('pending'); + }); +}); + +// ========================================================================== +// Cleanup-02: error_cleanup -- error → deleting (valid transition) +// ========================================================================== + +describe('Cleanup-02: error_cleanup', () => { + beforeEach(async () => { + await pg.beforeEach(); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('marks old error files as deleting', async () => { + // Insert an error file with updated_at older than 30 days + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status, updated_at) + VALUES + ('c2000000-0000-0000-0000-000000000001', 1, '1/default/old_error', 'default', $1, 'etag1', 'error', now() - interval '31 days') + `, [USER_A]); + await clearJobLog(); + + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'expired error' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'error' AND updated_at < now() - interval '30 days' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(1); + + const file = await pg.query( + "SELECT status, status_reason FROM files_store_public.files WHERE id = 'c2000000-0000-0000-0000-000000000001'" + ); + expect(file.rows[0].status).toBe('deleting'); + expect(file.rows[0].status_reason).toBe('expired error'); + + // Verify the delete-s3-object job was auto-enqueued by the trigger + const jobs = await getJobLog(); + expect(jobs).toHaveLength(1); + expect(jobs[0].identifier).toBe('delete-s3-object'); + }); + + it('does not affect recent error files', async () => { + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES + ('c2000000-0000-0000-0000-000000000002', 1, '1/default/recent_error', 'default', $1, 'etag2', 'error') + `, [USER_A]); + + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'expired error' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'error' AND updated_at < now() - interval '30 days' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// Cleanup-03: unattached_cleanup -- ready → deleting (valid transition) +// This is the ISSUE-006 fix regression test. +// ========================================================================== + +describe('Cleanup-03: unattached_cleanup', () => { + beforeEach(async () => { + await pg.beforeEach(); + await clearJobLog(); + }); + + afterEach(async () => { + await pg.afterEach(); + }); + + it('marks unattached ready files as deleting (not error)', async () => { + // Insert a ready file with no source_table, older than 7 days + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status, created_at) + VALUES + ('c3000000-0000-0000-0000-000000000001', 1, '1/default/unattached', 'default', $1, 'etag1', 'ready', now() - interval '8 days') + `, [USER_A]); + await clearJobLog(); + + // Run the FIXED cleanup query (ready → deleting, NOT ready → error) + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'never attached' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'ready' AND source_table IS NULL AND created_at < now() - interval '7 days' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(1); + + const file = await pg.query( + "SELECT status, status_reason FROM files_store_public.files WHERE id = 'c3000000-0000-0000-0000-000000000001'" + ); + expect(file.rows[0].status).toBe('deleting'); + expect(file.rows[0].status_reason).toBe('never attached'); + + // Verify the delete-s3-object job was auto-enqueued + const jobs = await getJobLog(); + expect(jobs).toHaveLength(1); + expect(jobs[0].identifier).toBe('delete-s3-object'); + }); + + it('ready → error is rejected by state machine (regression for ISSUE-006)', async () => { + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status, created_at) + VALUES + ('c3000000-0000-0000-0000-000000000002', 1, '1/default/unattached2', 'default', $1, 'etag2', 'ready', now() - interval '8 days') + `, [USER_A]); + + // The OLD buggy query (ready → error) should be rejected + await expect( + pg.query(` + UPDATE files_store_public.files + SET status = 'error', status_reason = 'never attached' + WHERE id = 'c3000000-0000-0000-0000-000000000002' + `) + ).rejects.toThrow(/Invalid status transition from ready to error/); + }); + + it('does not affect attached ready files', async () => { + // Insert a ready file WITH source_table (attached) + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status, created_at, + source_table, source_column, source_id) + VALUES + ('c3000000-0000-0000-0000-000000000003', 1, '1/default/attached', 'default', $1, 'etag3', 'ready', + now() - interval '8 days', 'some_schema.some_table', 'image', gen_random_uuid()) + `, [USER_A]); + + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'never attached' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'ready' AND source_table IS NULL AND created_at < now() - interval '7 days' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(0); + }); + + it('does not affect recent unattached files', async () => { + // Insert a ready file with no source_table but recent created_at + await pg.query(` + INSERT INTO files_store_public.files (id, database_id, key, bucket_key, created_by, etag, status) + VALUES + ('c3000000-0000-0000-0000-000000000004', 1, '1/default/recent_unattached', 'default', $1, 'etag4', 'ready') + `, [USER_A]); + + const result = await pg.query(` + UPDATE files_store_public.files + SET status = 'deleting', status_reason = 'never attached' + WHERE id IN ( + SELECT id FROM files_store_public.files + WHERE status = 'ready' AND source_table IS NULL AND created_at < now() - interval '7 days' + LIMIT 1000 + ) + `); + + expect(result.rowCount).toBe(0); + }); +}); + +// ========================================================================== +// Cleanup-04: Scheduled job registration +// ========================================================================== + +describe('Cleanup-04: Scheduled job registration', () => { + it('migration registers file-cleanup scheduled jobs when metaschema is present', async () => { + // The migration's cron block looks up metaschema_public.database. + // In isolated test DBs this table doesn't exist, so scheduled jobs + // are not registered (the block skips silently). This test verifies + // the skip path doesn't error. + // + // To test actual registration, we'd need to deploy metaschema first. + // Instead, we verify the schedule SQL is syntactically valid by checking + // it didn't abort the migration transaction. + const result = await pg.query( + "SELECT COUNT(*) as cnt FROM files_store_public.files WHERE 1=0" + ); + // If migration committed successfully, table exists + expect(result.rows[0].cnt).toBe('0'); + }); +}); diff --git a/migrations/files_store.sql b/migrations/files_store.sql index 76a6a8026..d98a2c042 100644 --- a/migrations/files_store.sql +++ b/migrations/files_store.sql @@ -630,4 +630,79 @@ BEGIN END $domain_triggers$; +-- --------------------------------------------------------------------------- +-- 8. Scheduled cleanup jobs (requires pgpm-database-jobs with scheduling) +-- --------------------------------------------------------------------------- +-- Register recurring file-cleanup jobs via app_jobs.add_scheduled_job. +-- The scheduler (knative-job-service) picks these up and spawns one-shot jobs +-- on the configured schedule. Each job calls the file-cleanup function with +-- the appropriate cleanup type. +-- +-- Schedules: +-- pending_reaper: every hour (clear stale pending uploads) +-- error_cleanup: daily at 03:00 UTC (expire old error files) +-- unattached_cleanup: daily at 04:00 UTC (clean unattached ready files) +-- --------------------------------------------------------------------------- + +DO $cron$ +DECLARE + v_db_id uuid; +BEGIN + -- Look up the database ID for the current database. + -- If metaschema_public.database is not deployed yet, skip silently. + BEGIN + SELECT id INTO v_db_id + FROM metaschema_public.database + ORDER BY created_at + LIMIT 1; + EXCEPTION WHEN undefined_table THEN + RAISE NOTICE 'metaschema_public.database not found, skipping scheduled job registration.'; + RETURN; + END; + + IF v_db_id IS NULL THEN + RAISE NOTICE 'No database row found, skipping scheduled job registration.'; + RETURN; + END IF; + + -- pending_reaper: every hour (minute 0) + PERFORM app_jobs.add_scheduled_job( + db_id := v_db_id, + identifier := 'file-cleanup', + payload := '{"type":"pending_reaper"}'::json, + schedule_info := '{"minute": 0}'::json, + job_key := 'file-cleanup:pending_reaper', + queue_name := 'maintenance', + max_attempts := 3, + priority := 100 + ); + + -- error_cleanup: daily at 03:00 UTC + PERFORM app_jobs.add_scheduled_job( + db_id := v_db_id, + identifier := 'file-cleanup', + payload := '{"type":"error_cleanup"}'::json, + schedule_info := '{"hour": 3, "minute": 0}'::json, + job_key := 'file-cleanup:error_cleanup', + queue_name := 'maintenance', + max_attempts := 3, + priority := 100 + ); + + -- unattached_cleanup: daily at 04:00 UTC + PERFORM app_jobs.add_scheduled_job( + db_id := v_db_id, + identifier := 'file-cleanup', + payload := '{"type":"unattached_cleanup"}'::json, + schedule_info := '{"hour": 4, "minute": 0}'::json, + job_key := 'file-cleanup:unattached_cleanup', + queue_name := 'maintenance', + max_attempts := 3, + priority := 100 + ); + + RAISE NOTICE 'Registered 3 file-cleanup scheduled jobs for database %', v_db_id; +END +$cron$; + COMMIT; diff --git a/migrations/object_store.sql b/migrations/object_store.sql index 76a6a8026..d98a2c042 100644 --- a/migrations/object_store.sql +++ b/migrations/object_store.sql @@ -630,4 +630,79 @@ BEGIN END $domain_triggers$; +-- --------------------------------------------------------------------------- +-- 8. Scheduled cleanup jobs (requires pgpm-database-jobs with scheduling) +-- --------------------------------------------------------------------------- +-- Register recurring file-cleanup jobs via app_jobs.add_scheduled_job. +-- The scheduler (knative-job-service) picks these up and spawns one-shot jobs +-- on the configured schedule. Each job calls the file-cleanup function with +-- the appropriate cleanup type. +-- +-- Schedules: +-- pending_reaper: every hour (clear stale pending uploads) +-- error_cleanup: daily at 03:00 UTC (expire old error files) +-- unattached_cleanup: daily at 04:00 UTC (clean unattached ready files) +-- --------------------------------------------------------------------------- + +DO $cron$ +DECLARE + v_db_id uuid; +BEGIN + -- Look up the database ID for the current database. + -- If metaschema_public.database is not deployed yet, skip silently. + BEGIN + SELECT id INTO v_db_id + FROM metaschema_public.database + ORDER BY created_at + LIMIT 1; + EXCEPTION WHEN undefined_table THEN + RAISE NOTICE 'metaschema_public.database not found, skipping scheduled job registration.'; + RETURN; + END; + + IF v_db_id IS NULL THEN + RAISE NOTICE 'No database row found, skipping scheduled job registration.'; + RETURN; + END IF; + + -- pending_reaper: every hour (minute 0) + PERFORM app_jobs.add_scheduled_job( + db_id := v_db_id, + identifier := 'file-cleanup', + payload := '{"type":"pending_reaper"}'::json, + schedule_info := '{"minute": 0}'::json, + job_key := 'file-cleanup:pending_reaper', + queue_name := 'maintenance', + max_attempts := 3, + priority := 100 + ); + + -- error_cleanup: daily at 03:00 UTC + PERFORM app_jobs.add_scheduled_job( + db_id := v_db_id, + identifier := 'file-cleanup', + payload := '{"type":"error_cleanup"}'::json, + schedule_info := '{"hour": 3, "minute": 0}'::json, + job_key := 'file-cleanup:error_cleanup', + queue_name := 'maintenance', + max_attempts := 3, + priority := 100 + ); + + -- unattached_cleanup: daily at 04:00 UTC + PERFORM app_jobs.add_scheduled_job( + db_id := v_db_id, + identifier := 'file-cleanup', + payload := '{"type":"unattached_cleanup"}'::json, + schedule_info := '{"hour": 4, "minute": 0}'::json, + job_key := 'file-cleanup:unattached_cleanup', + queue_name := 'maintenance', + max_attempts := 3, + priority := 100 + ); + + RAISE NOTICE 'Registered 3 file-cleanup scheduled jobs for database %', v_db_id; +END +$cron$; + COMMIT; From 3a80ebcd0c33b29b3eed63591c092117a61c5b4a Mon Sep 17 00:00:00 2001 From: zetazzz Date: Sat, 14 Mar 2026 14:33:07 +0800 Subject: [PATCH 9/9] remove missleading flag --- .../__tests__/upload-resolver.e2e.test.ts | 1 - .../__tests__/upload-resolver.test.ts | 121 +-- .../graphile-settings/src/upload-resolver.ts | 159 +--- graphql/explorer/src/resolvers/uploads.ts | 32 +- migrations/object_store.sql | 708 ------------------ uploads/s3-streamer/src/index.ts | 1 + 6 files changed, 129 insertions(+), 893 deletions(-) delete mode 100644 migrations/object_store.sql diff --git a/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts b/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts index a06dd603e..e9b6c30fd 100644 --- a/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts +++ b/graphile/graphile-settings/__tests__/upload-resolver.e2e.test.ts @@ -101,7 +101,6 @@ describe('upload-resolver e2e', () => { const uploadedKeys = new Set(); beforeAll(async () => { - process.env.UPLOAD_V2_ENABLED = 'true'; process.env.BUCKET_PROVIDER = 'minio'; process.env.BUCKET_NAME = BUCKET; process.env.AWS_REGION = 'us-east-1'; diff --git a/graphile/graphile-settings/__tests__/upload-resolver.test.ts b/graphile/graphile-settings/__tests__/upload-resolver.test.ts index d7f0d1caa..349b4a36f 100644 --- a/graphile/graphile-settings/__tests__/upload-resolver.test.ts +++ b/graphile/graphile-settings/__tests__/upload-resolver.test.ts @@ -1,31 +1,29 @@ import { Readable } from 'stream'; -interface MockUploadResult { - upload: { Location: string }; - contentType: string; -} - async function loadUploadResolverModule(opts: { detectedContentType: string; - uploadResultContentType?: string; }) { jest.resetModules(); - const mockDetectContentType = jest.fn().mockResolvedValue({ + const mockStreamContentType = jest.fn().mockResolvedValue({ stream: Readable.from([Buffer.alloc(16)]), magic: { type: opts.detectedContentType, charset: 'binary' }, contentType: opts.detectedContentType, }); - const mockUploadWithContentType = jest.fn().mockResolvedValue({ - upload: { Location: 'https://cdn.example.com/uploaded-file' }, - contentType: opts.uploadResultContentType ?? opts.detectedContentType, - } as MockUploadResult); + const mockUpload = jest.fn().mockResolvedValue({ etag: 'test-etag' }); + const mockPresignGet = jest.fn().mockResolvedValue('https://cdn.example.com/signed-url'); - const mockUpload = jest.fn().mockResolvedValue({ - upload: { Location: 'https://cdn.example.com/storage-upload' }, - contentType: 'application/octet-stream', - } as MockUploadResult); + const MockS3StorageProvider = jest.fn().mockImplementation(() => ({ + upload: mockUpload, + presignGet: mockPresignGet, + })); + + const mockPoolQuery = jest.fn().mockResolvedValue({ rows: [], rowCount: 0 }); + const MockPool = jest.fn().mockImplementation(() => ({ + query: mockPoolQuery, + end: jest.fn(), + })); jest.doMock('@constructive-io/graphql-env', () => ({ getEnvOptions: jest.fn(() => ({ @@ -40,25 +38,24 @@ async function loadUploadResolverModule(opts: { })), })); - jest.doMock('@constructive-io/s3-streamer', () => { - const StreamerMock = jest.fn().mockImplementation(() => ({ - upload: mockUpload, - uploadWithContentType: mockUploadWithContentType, - detectContentType: mockDetectContentType, - })); - return { - __esModule: true, - default: StreamerMock, - }; - }); + jest.doMock('@constructive-io/s3-streamer', () => ({ + __esModule: true, + S3StorageProvider: MockS3StorageProvider, + streamContentType: mockStreamContentType, + })); + + jest.doMock('pg', () => ({ + Pool: MockPool, + })); const mod = await import('../src/upload-resolver'); return { ...mod, - mockDetectContentType, - mockUploadWithContentType, + mockStreamContentType, mockUpload, + mockPresignGet, + mockPoolQuery, }; } @@ -69,12 +66,21 @@ function makeFakeUpload(filename: string) { }; } +function makeFakeContext(databaseId?: string, userId?: string) { + return { + req: { + api: { databaseId }, + token: { user_id: userId }, + }, + }; +} + describe('uploadResolver MIME validation', () => { it('rejects disallowed MIME before uploading to storage', async () => { const { constructiveUploadFieldDefinitions, - mockDetectContentType, - mockUploadWithContentType, + mockStreamContentType, + mockUpload, } = await loadUploadResolverModule({ detectedContentType: 'application/pdf', }); @@ -92,23 +98,23 @@ describe('uploadResolver MIME validation', () => { imageDef.resolve( fakeUpload as any, {}, - {}, + makeFakeContext('1'), { uploadPlugin: { tags: {}, type: 'image' } }, ), ).rejects.toThrow('UPLOAD_MIMETYPE'); - expect(mockDetectContentType).toHaveBeenCalledTimes(1); - expect(mockUploadWithContentType).not.toHaveBeenCalled(); + expect(mockStreamContentType).toHaveBeenCalledTimes(1); + expect(mockUpload).not.toHaveBeenCalled(); }); it('uploads and returns image metadata when MIME is allowed', async () => { const { constructiveUploadFieldDefinitions, - mockDetectContentType, - mockUploadWithContentType, + mockStreamContentType, + mockUpload, + mockPresignGet, } = await loadUploadResolverModule({ detectedContentType: 'image/png', - uploadResultContentType: 'image/png', }); const imageDef = constructiveUploadFieldDefinitions.find( @@ -123,21 +129,44 @@ describe('uploadResolver MIME validation', () => { const result = await imageDef.resolve( fakeUpload as any, {}, - {}, + makeFakeContext('1', 'user-123'), { uploadPlugin: { tags: {}, type: 'image' } }, ); - expect(result).toEqual({ - filename: 'photo.png', - mime: 'image/png', - url: 'https://cdn.example.com/uploaded-file', - }); - expect(mockDetectContentType).toHaveBeenCalledTimes(1); - expect(mockUploadWithContentType).toHaveBeenCalledTimes(1); - expect(mockUploadWithContentType).toHaveBeenCalledWith( + expect(result).toEqual( expect.objectContaining({ - contentType: 'image/png', + filename: 'photo.png', + mime: 'image/png', + url: 'https://cdn.example.com/signed-url', + key: expect.stringMatching(/^1\/default\/[0-9a-f-]+_origin$/), }), ); + expect(mockStreamContentType).toHaveBeenCalledTimes(1); + expect(mockUpload).toHaveBeenCalledTimes(1); + expect(mockPresignGet).toHaveBeenCalledTimes(1); + }); + + it('throws when databaseId is missing', async () => { + const { constructiveUploadFieldDefinitions } = await loadUploadResolverModule({ + detectedContentType: 'image/png', + }); + + const imageDef = constructiveUploadFieldDefinitions.find( + (def) => 'name' in def && def.name === 'image', + ); + if (!imageDef) { + throw new Error('Missing image upload field definition'); + } + + const fakeUpload = makeFakeUpload('photo.png'); + + await expect( + imageDef.resolve( + fakeUpload as any, + {}, + {}, // no databaseId + { uploadPlugin: { tags: {}, type: 'image' } }, + ), + ).rejects.toThrow('databaseId is required'); }); }); diff --git a/graphile/graphile-settings/src/upload-resolver.ts b/graphile/graphile-settings/src/upload-resolver.ts index 2e5d08647..a60a1704b 100644 --- a/graphile/graphile-settings/src/upload-resolver.ts +++ b/graphile/graphile-settings/src/upload-resolver.ts @@ -4,18 +4,16 @@ * Reads CDN/S3/MinIO configuration from environment variables (via getEnvOptions) * and streams uploaded files to the configured storage backend. * - * Lazily initializes the S3 streamer on first upload to avoid requiring + * Lazily initializes the S3 storage provider on first upload to avoid requiring * env vars at module load time. * - * V2 mode (UPLOAD_V2_ENABLED=true): - * - Key format: {database_id}/{bucket_key}/{uuid}_origin - * - INSERT into files_store_public.files after S3 upload - * - Returns { key, url, mime, filename } for image/upload types + * Key format: {database_id}/{bucket_key}/{uuid}_origin + * INSERTs into files_store_public.files after S3 upload. + * The AFTER INSERT trigger enqueues a process-image job automatically. * - * Legacy mode (UPLOAD_V2_ENABLED=false, default): - * - Key format: {random24hex}-{sanitized-filename} - * - No files table INSERT - * - Returns { url, mime, filename } for image/upload types + * Callers must associate the returned metadata with a domain table row via a + * GraphQL mutation; the domain trigger automatically populates source_* fields; + * files not associated within 7 days are cleaned up by unattached_cleanup cron. * * ENV VARS: * BUCKET_PROVIDER - 'minio' | 's3' (default: 'minio') @@ -24,16 +22,14 @@ * AWS_ACCESS_KEY - access key (default: 'minioadmin') * AWS_SECRET_KEY - secret key (default: 'minioadmin') * MINIO_ENDPOINT - MinIO endpoint (default: 'http://localhost:9000') - * UPLOAD_V2_ENABLED - enable v2 upload with files index (default: 'false') */ -import Streamer from '@constructive-io/s3-streamer'; -import { S3StorageProvider } from '@constructive-io/s3-streamer'; +import { S3StorageProvider, streamContentType } from '@constructive-io/s3-streamer'; import type { StorageProvider } from '@constructive-io/s3-streamer'; import uploadNames from '@constructive-io/upload-names'; import { getEnvOptions } from '@constructive-io/graphql-env'; import { Logger } from '@pgpmjs/logger'; -import { randomBytes, randomUUID } from 'crypto'; +import { randomUUID } from 'crypto'; import { Pool } from 'pg'; import type { Readable } from 'stream'; import type { @@ -45,14 +41,10 @@ import type { const log = new Logger('upload-resolver'); const DEFAULT_IMAGE_MIME_TYPES = ['image/jpeg', 'image/png', 'image/svg+xml']; -let streamer: Streamer | null = null; let storageProvider: StorageProvider | null = null; let bucketName: string; let pgPool: Pool | null = null; -const isV2Enabled = (): boolean => - process.env.UPLOAD_V2_ENABLED === 'true' || process.env.UPLOAD_V2_ENABLED === '1'; - function getCdnConfig() { const opts = getEnvOptions(); const cdn = opts.cdn || {}; @@ -66,8 +58,8 @@ function getCdnConfig() { }; } -function getStreamer(): Streamer { - if (streamer) return streamer; +function getStorageProvider(): StorageProvider { + if (storageProvider) return storageProvider; const cdn = getCdnConfig(); bucketName = cdn.bucketName; @@ -82,24 +74,6 @@ function getStreamer(): Streamer { `[upload-resolver] Initializing: provider=${cdn.provider} bucket=${bucketName}`, ); - streamer = new Streamer({ - defaultBucket: bucketName, - awsRegion: cdn.awsRegion, - awsSecretKey: cdn.awsSecretKey, - awsAccessKey: cdn.awsAccessKey, - minioEndpoint: cdn.minioEndpoint, - provider: cdn.provider, - }); - - return streamer; -} - -function getStorageProvider(): StorageProvider { - if (storageProvider) return storageProvider; - - const cdn = getCdnConfig(); - bucketName = cdn.bucketName; - storageProvider = new S3StorageProvider({ bucket: cdn.bucketName, awsRegion: cdn.awsRegion, @@ -125,15 +99,6 @@ function getPgPool(): Pool { return pgPool; } -/** - * Generates a randomized storage key from a filename (legacy format). - * Format: {random24hex}-{sanitized-filename} - */ -function generateLegacyKey(filename: string): string { - const rand = randomBytes(12).toString('hex'); - return `${rand}-${uploadNames(filename)}`; -} - /** * Generates a v2 storage key. * Format: {database_id}/{bucket_key}/{uuid}_origin @@ -169,7 +134,6 @@ async function insertFileRecord( * In PostGraphile, context contains the Express request. */ function extractContextInfo(context: any): { databaseId: string | null; userId: string | null } { - // PostGraphile v5 stores the request on context const req = context?.req || context?.request; const databaseId = req?.api?.databaseId || req?.databaseId || null; const userId = req?.token?.user_id || null; @@ -180,53 +144,34 @@ function extractContextInfo(context: any): { databaseId: string | null; userId: * Streams a file to S3/MinIO storage and returns the URL and metadata. * * Reusable by both the GraphQL upload resolver and REST /upload endpoint. - * - * When UPLOAD_V2_ENABLED, uses the new key format and INSERTs a files row. */ export async function streamToStorage( readStream: Readable, filename: string, opts?: { databaseId?: string; userId?: string; bucketKey?: string }, ): Promise<{ url: string; filename: string; mime: string; key?: string }> { - if (isV2Enabled() && opts?.databaseId) { - const storage = getStorageProvider(); - const bucketKey = opts.bucketKey || 'default'; - const { key, fileId } = generateV2Key(opts.databaseId, bucketKey); + const storage = getStorageProvider(); + const bucketKey = opts?.bucketKey || 'default'; + const databaseId = opts?.databaseId; - const s3 = getStreamer(); - const detected = await s3.detectContentType({ readStream, filename }); - const contentType = detected.contentType; + if (!databaseId) { + throw new Error('[upload-resolver] databaseId is required for file uploads'); + } - const result = await storage.upload(key, detected.stream, { contentType }); + const { key, fileId } = generateV2Key(databaseId, bucketKey); - await insertFileRecord(fileId, opts.databaseId, bucketKey, key, result.etag, opts.userId || null); + const detected = await streamContentType({ readStream, filename }); + const contentType = detected.contentType; - const url = await storage.presignGet(key, 3600); - return { key, url, filename, mime: contentType }; - } + const result = await storage.upload(key, detected.stream, { contentType }); - // Legacy path - const s3 = getStreamer(); - const key = generateLegacyKey(filename); - const uploadResult = await s3.upload({ - readStream, - filename, - key, - bucket: bucketName, - }); - return { - url: uploadResult.upload.Location, - filename, - mime: uploadResult.contentType, - }; + await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, opts?.userId || null); + + const url = await storage.presignGet(key, 3600); + return { key, url, filename, mime: contentType }; } export async function __resetUploadResolverForTests(): Promise { - if (streamer && typeof (streamer as { destroy?: () => void }).destroy === 'function') { - streamer.destroy(); - } - streamer = null; - if ( storageProvider && typeof (storageProvider as StorageProvider & { destroy?: () => void }).destroy === 'function' @@ -245,7 +190,7 @@ export async function __resetUploadResolverForTests(): Promise { * Upload resolver that streams files to S3/MinIO. * * Returns different shapes based on the column's type hint: - * - 'image' / 'upload' → { key, url, mime, filename } (v2) or { url, mime, filename } (legacy) + * - 'image' / 'upload' → { key, url, mime, filename } * - 'attachment' / default → url string (for text domain columns) * * MIME validation happens before persistence: content type is detected from @@ -258,7 +203,6 @@ async function uploadResolver( info: { uploadPlugin: UploadPluginInfo }, ): Promise { const { tags, type } = info.uploadPlugin; - const s3 = getStreamer(); const { filename } = upload; // MIME type validation from smart tags @@ -274,7 +218,7 @@ async function uploadResolver( ? DEFAULT_IMAGE_MIME_TYPES : []; - const detected = await s3.detectContentType({ + const detected = await streamContentType({ readStream: upload.createReadStream(), filename, }); @@ -285,54 +229,29 @@ async function uploadResolver( throw new Error('UPLOAD_MIMETYPE'); } - // V2 path: new key format + files table INSERT - if (isV2Enabled()) { - const { databaseId, userId } = extractContextInfo(_context); - - if (databaseId) { - const storage = getStorageProvider(); - const bucketKey = 'default'; - const { key, fileId } = generateV2Key(databaseId, bucketKey); - - const result = await storage.upload(key, detected.stream, { - contentType: detectedContentType, - }); - - await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, userId); + const { databaseId, userId } = extractContextInfo(_context); - const url = await storage.presignGet(key, 3600); - - switch (typ) { - case 'image': - case 'upload': - return { key, filename, mime: detectedContentType, url }; - case 'attachment': - default: - return url; - } - } - - log.warn('[upload-resolver] V2 enabled but no databaseId in context, falling back to legacy'); + if (!databaseId) { + detected.stream.destroy(); + throw new Error('[upload-resolver] databaseId is required for file uploads'); } - // Legacy path - const key = generateLegacyKey(filename); + const storage = getStorageProvider(); + const bucketKey = 'default'; + const { key, fileId } = generateV2Key(databaseId, bucketKey); - const result = await s3.uploadWithContentType({ - readStream: detected.stream, + const result = await storage.upload(key, detected.stream, { contentType: detectedContentType, - magic: detected.magic, - key, - bucket: bucketName, }); - const url = result.upload.Location; - const { contentType } = result; + await insertFileRecord(fileId, databaseId, bucketKey, key, result.etag, userId); + + const url = await storage.presignGet(key, 3600); switch (typ) { case 'image': case 'upload': - return { filename, mime: contentType, url }; + return { key, filename, mime: detectedContentType, url }; case 'attachment': default: return url; diff --git a/graphql/explorer/src/resolvers/uploads.ts b/graphql/explorer/src/resolvers/uploads.ts index b06aed646..1561c7b71 100644 --- a/graphql/explorer/src/resolvers/uploads.ts +++ b/graphql/explorer/src/resolvers/uploads.ts @@ -1,4 +1,4 @@ -import Streamer from '@constructive-io/s3-streamer'; +import { S3StorageProvider, streamContentType } from '@constructive-io/s3-streamer'; import uploadNames from '@constructive-io/upload-names'; import { ReadStream } from 'fs'; import type { GraphQLResolveInfo } from 'graphql'; @@ -26,16 +26,18 @@ interface UploadPluginInfo { } export class UploadHandler { - private streamer: Streamer; + private storage: S3StorageProvider; + private bucketName: string; constructor(private options: UploaderOptions) { - this.streamer = new Streamer({ - defaultBucket: options.bucketName, + this.bucketName = options.bucketName; + this.storage = new S3StorageProvider({ + bucket: options.bucketName, awsRegion: options.awsRegion, awsSecretKey: options.awsSecretKey, awsAccessKey: options.awsAccessKey, minioEndpoint: options.minioEndpoint, - provider: options.provider + provider: options.provider, }); } @@ -50,25 +52,15 @@ export class UploadHandler { } = info; const readStream = upload.createReadStream() as ReadStream; - const { filename, mimetype } = upload; + const { filename } = upload; const rand = Math.random().toString(36).substring(2, 7) + Math.random().toString(36).substring(2, 7); const key = rand + '-' + uploadNames(filename); - const result = await this.streamer.upload({ - readStream, - filename, - key, - bucket: this.options.bucketName - }); - - const url = result.upload.Location; - const { - contentType, - magic: { charset } - } = result; + const detected = await streamContentType({ readStream, filename }); + const { contentType } = detected; const typ = type || tags.type; @@ -79,9 +71,13 @@ export class UploadHandler { : []; if (mim.length && !mim.includes(contentType)) { + detected.stream.destroy(); throw new Error(`UPLOAD_MIMETYPE ${mim.join(',')}`); } + await this.storage.upload(key, detected.stream, { contentType }); + const url = await this.storage.presignGet(key, 3600); + switch (typ) { case 'image': case 'upload': diff --git a/migrations/object_store.sql b/migrations/object_store.sql deleted file mode 100644 index d98a2c042..000000000 --- a/migrations/object_store.sql +++ /dev/null @@ -1,708 +0,0 @@ --- ============================================================================= --- Constructive Upload System -- files_store_public schema --- ============================================================================= --- Run: psql -h localhost -U postgres -d constructive < migrations/files_store.sql --- ============================================================================= - -BEGIN; - --- Ensure required roles exist (idempotent for dev environments) -DO $$ BEGIN - IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'authenticated') THEN - CREATE ROLE authenticated NOLOGIN; - END IF; - IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'service_role') THEN - CREATE ROLE service_role NOLOGIN; - END IF; -END $$; - --- Require app_jobs.add_job to exist (provided by pgpm-database-jobs). --- Deploy pgpm-database-jobs BEFORE running this migration. --- DO NOT stub this function here -- CREATE OR REPLACE would silently overwrite --- the production implementation, causing all trigger-enqueued jobs to be lost. -CREATE SCHEMA IF NOT EXISTS app_jobs; - -DO $$ BEGIN - IF NOT EXISTS ( - SELECT 1 FROM pg_proc p - JOIN pg_namespace n ON p.pronamespace = n.oid - WHERE n.nspname = 'app_jobs' AND p.proname = 'add_job' - ) THEN - RAISE EXCEPTION 'app_jobs.add_job not found. Deploy pgpm-database-jobs before running this migration.'; - END IF; -END $$; - --- Ensure schema exists -CREATE SCHEMA IF NOT EXISTS files_store_public; - --- --------------------------------------------------------------------------- --- 1. Status ENUM --- --------------------------------------------------------------------------- - -CREATE TYPE files_store_public.file_status AS ENUM ( - 'pending', - 'processing', - 'ready', - 'error', - 'deleting' -); - -COMMENT ON TYPE files_store_public.file_status IS - 'Lifecycle states for managed files. Transitions: pending->{processing,error}, processing->{ready,error,deleting}, ready->deleting, error->{deleting,pending(retry)}.'; - --- --------------------------------------------------------------------------- --- 2. Files Table --- --------------------------------------------------------------------------- - -CREATE TABLE files_store_public.files ( - id uuid NOT NULL DEFAULT gen_random_uuid(), - database_id integer NOT NULL, - bucket_key text NOT NULL DEFAULT 'default', - key text NOT NULL, - status files_store_public.file_status NOT NULL DEFAULT 'pending', - status_reason text, - etag text, - source_table text, - source_column text, - source_id uuid, - processing_started_at timestamptz, - created_by uuid, - created_at timestamptz NOT NULL DEFAULT now(), - updated_at timestamptz NOT NULL DEFAULT now(), - - CONSTRAINT files_pkey PRIMARY KEY (id, database_id), - CONSTRAINT files_key_unique UNIQUE (key, database_id), - CONSTRAINT files_key_not_empty CHECK (key <> ''), - CONSTRAINT files_key_max_length CHECK (length(key) <= 1024), - CONSTRAINT files_bucket_key_format CHECK (bucket_key ~ '^[a-z][a-z0-9_-]*$'), - CONSTRAINT files_source_table_format CHECK ( - source_table IS NULL OR source_table ~ '^[a-z_]+\.[a-z_]+$' - ), - CONSTRAINT files_source_complete CHECK ( - (source_table IS NULL AND source_column IS NULL AND source_id IS NULL) - OR (source_table IS NOT NULL AND source_column IS NOT NULL AND source_id IS NOT NULL) - ) -); - -COMMENT ON TABLE files_store_public.files IS - 'Operational index for S3 objects. Each row = one physical S3 object (including generated versions). NOT a source of truth for file metadata -- domain tables own that.'; -COMMENT ON COLUMN files_store_public.files.key IS - 'Full S3 object key. Format: {database_id}/{bucket_key}/{uuid}_{version_name}. Origin files use _origin suffix.'; -COMMENT ON COLUMN files_store_public.files.etag IS - 'S3 ETag for reconciliation and cache validation.'; -COMMENT ON COLUMN files_store_public.files.status_reason IS - 'Human-readable reason for current status (error details, deletion reason).'; -COMMENT ON COLUMN files_store_public.files.processing_started_at IS - 'Timestamp when processing began. Used to detect stuck jobs (alert at 15 min).'; -COMMENT ON COLUMN files_store_public.files.source_table IS - 'Schema-qualified table name referencing this file (e.g. constructive_users_public.users). NULL until the domain trigger populates it. Free text -- no FK possible.'; -COMMENT ON COLUMN files_store_public.files.source_column IS - 'Column name on the source table (e.g. profile_picture). NULL until domain trigger populates it.'; -COMMENT ON COLUMN files_store_public.files.source_id IS - 'Primary key of the row in the source table. NULL until domain trigger populates it.'; - --- --------------------------------------------------------------------------- --- 3. Buckets Table --- --------------------------------------------------------------------------- - -CREATE TABLE files_store_public.buckets ( - id uuid NOT NULL DEFAULT gen_random_uuid(), - database_id integer NOT NULL, - key text NOT NULL, - name text NOT NULL, - is_public boolean NOT NULL DEFAULT false, - config jsonb NOT NULL DEFAULT '{}'::jsonb, - created_by uuid, - updated_by uuid, - created_at timestamptz NOT NULL DEFAULT now(), - updated_at timestamptz NOT NULL DEFAULT now(), - - CONSTRAINT buckets_pkey PRIMARY KEY (id, database_id), - CONSTRAINT buckets_key_unique UNIQUE (key, database_id), - CONSTRAINT buckets_key_format CHECK (key ~ '^[a-z][a-z0-9_-]*$') -); - -COMMENT ON TABLE files_store_public.buckets IS - 'Logical bucket configuration per tenant. The bucket key maps to the S3 key prefix segment. is_public controls RLS policy for anonymous reads.'; - --- --------------------------------------------------------------------------- --- 4. Indexes --- --------------------------------------------------------------------------- - --- Tenant queries -CREATE INDEX files_database_id_idx - ON files_store_public.files (database_id); - --- Bucket + tenant queries -CREATE INDEX files_bucket_database_id_idx - ON files_store_public.files (bucket_key, database_id); - --- "My uploads" queries -CREATE INDEX files_created_by_database_id_created_at_idx - ON files_store_public.files (created_by, database_id, created_at DESC); - --- Back-reference lookups (cleanup worker, attachment queries) -CREATE INDEX files_source_ref_idx - ON files_store_public.files (source_table, source_column, source_id); - --- Pending file reaper (hourly cron) -CREATE INDEX files_pending_created_at_idx - ON files_store_public.files (created_at) - WHERE status = 'pending'; - --- Stuck processing detection -CREATE INDEX files_processing_idx - ON files_store_public.files (processing_started_at) - WHERE status = 'processing'; - --- Deletion job queue -CREATE INDEX files_deleting_idx - ON files_store_public.files (updated_at) - WHERE status = 'deleting'; - --- Time-range scans on large tables -CREATE INDEX files_created_at_brin_idx - ON files_store_public.files USING brin (created_at); - --- --------------------------------------------------------------------------- --- 5. Triggers --- --------------------------------------------------------------------------- - --- 5a. AFTER INSERT -- enqueue process-image job --- NOTE: Version rows are inserted with status = 'ready', which intentionally --- bypasses this trigger (condition: NEW.status = 'pending'). Only origin --- uploads (status = 'pending') need processing. - -CREATE OR REPLACE FUNCTION files_store_public.files_after_insert_queue_processing() -RETURNS trigger AS $$ -BEGIN - PERFORM app_jobs.add_job( - 'process-image', - json_build_object( - 'file_id', NEW.id, - 'database_id', NEW.database_id - ), - job_key := 'file:' || NEW.id::text - ); - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER files_after_insert_queue_processing - AFTER INSERT ON files_store_public.files - FOR EACH ROW - WHEN (NEW.status = 'pending') - EXECUTE FUNCTION files_store_public.files_after_insert_queue_processing(); - -COMMENT ON TRIGGER files_after_insert_queue_processing ON files_store_public.files IS - 'Enqueues process-image job for new origin uploads. Version rows inserted as ready intentionally bypass this trigger -- they do not need processing.'; - --- 5b. BEFORE UPDATE -- timestamp + state machine - -CREATE OR REPLACE FUNCTION files_store_public.files_before_update_timestamp() -RETURNS trigger AS $$ -BEGIN - -- Always update timestamp - NEW.updated_at := now(); - - -- State machine validation (only when status changes) - IF OLD.status IS DISTINCT FROM NEW.status THEN - IF NOT ( - (OLD.status = 'pending' AND NEW.status IN ('processing', 'error')) - OR (OLD.status = 'processing' AND NEW.status IN ('ready', 'error', 'deleting')) - OR (OLD.status = 'ready' AND NEW.status = 'deleting') - OR (OLD.status = 'error' AND NEW.status IN ('deleting', 'pending')) - ) THEN - RAISE EXCEPTION 'Invalid status transition from % to %', OLD.status, NEW.status; - END IF; - - -- Track processing start/end - IF NEW.status = 'processing' THEN - NEW.processing_started_at := now(); - ELSIF OLD.status = 'processing' AND NEW.status <> 'processing' THEN - NEW.processing_started_at := NULL; - END IF; - END IF; - - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER files_before_update_timestamp - BEFORE UPDATE ON files_store_public.files - FOR EACH ROW - EXECUTE FUNCTION files_store_public.files_before_update_timestamp(); - -COMMENT ON TRIGGER files_before_update_timestamp ON files_store_public.files IS - 'Enforces status transition rules and maintains updated_at / processing_started_at timestamps.'; - --- 5c. AFTER UPDATE -- enqueue delete-s3-object job - -CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_deletion() -RETURNS trigger AS $$ -BEGIN - PERFORM app_jobs.add_job( - 'delete-s3-object', - json_build_object( - 'file_id', NEW.id, - 'database_id', NEW.database_id, - 'key', NEW.key - ), - job_key := 'delete:' || NEW.id::text - ); - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER files_after_update_queue_deletion - AFTER UPDATE ON files_store_public.files - FOR EACH ROW - WHEN (NEW.status = 'deleting' AND OLD.status <> 'deleting') - EXECUTE FUNCTION files_store_public.files_after_update_queue_deletion(); - -COMMENT ON TRIGGER files_after_update_queue_deletion ON files_store_public.files IS - 'Enqueues delete-s3-object job when a file transitions to deleting status. Each version row gets its own deletion job.'; - --- 5d. AFTER UPDATE -- re-enqueue process-image on error->pending retry - -CREATE OR REPLACE FUNCTION files_store_public.files_after_update_queue_retry() -RETURNS trigger AS $$ -BEGIN - PERFORM app_jobs.add_job( - 'process-image', - json_build_object( - 'file_id', NEW.id, - 'database_id', NEW.database_id - ), - job_key := 'file:' || NEW.id::text - ); - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER files_after_update_queue_retry - AFTER UPDATE ON files_store_public.files - FOR EACH ROW - WHEN (OLD.status = 'error' AND NEW.status = 'pending') - EXECUTE FUNCTION files_store_public.files_after_update_queue_retry(); - -COMMENT ON TRIGGER files_after_update_queue_retry ON files_store_public.files IS - 'Re-enqueues process-image job when a file is retried (error->pending). Without this trigger, the retry would change status but never re-enqueue the processing job.'; - --- --------------------------------------------------------------------------- --- 6. RLS Policies & Grants --- --------------------------------------------------------------------------- - -ALTER TABLE files_store_public.files ENABLE ROW LEVEL SECURITY; -ALTER TABLE files_store_public.files FORCE ROW LEVEL SECURITY; - --- Policy 1: Tenant isolation (RESTRICTIVE -- always ANDed with all other policies) --- Without this being RESTRICTIVE, permissive policies would OR together and --- allow cross-tenant access (e.g. a ready file in tenant 2 visible via files_visibility). -CREATE POLICY files_tenant_isolation ON files_store_public.files - AS RESTRICTIVE - FOR ALL - USING (database_id = current_setting('app.database_id')::integer) - WITH CHECK (database_id = current_setting('app.database_id')::integer); - --- Policy 2: Visibility for SELECT (authenticated + service_role only) --- Non-ready files visible only to the uploader. Uses NULLIF for safe uuid handling --- when app.user_id is missing or empty (returns NULL instead of cast error). --- Scoped to authenticated/service_role so anonymous only gets public_bucket_read. -CREATE POLICY files_visibility ON files_store_public.files - FOR SELECT - TO authenticated, service_role - USING ( - status = 'ready' - OR created_by = NULLIF(current_setting('app.user_id', true), '')::uuid - ); - --- Policy 3: Public bucket read for SELECT (all roles including anonymous) -CREATE POLICY files_public_bucket_read ON files_store_public.files - FOR SELECT - USING ( - EXISTS ( - SELECT 1 FROM files_store_public.buckets b - WHERE b.key = bucket_key - AND b.database_id = files.database_id - AND b.is_public = true - ) - AND status = 'ready' - ); - --- Policy 4: Admin override (all operations, authenticated + service_role) -CREATE POLICY files_admin_override ON files_store_public.files - FOR ALL - TO authenticated, service_role - USING (current_setting('app.role', true) = 'administrator') - WITH CHECK (current_setting('app.role', true) = 'administrator'); - --- Policy 5: INSERT access (permissive base so non-admin users can insert) -CREATE POLICY files_insert_access ON files_store_public.files - FOR INSERT - TO authenticated, service_role - WITH CHECK (true); - --- Policy 6: UPDATE access (replicates visibility for row targeting) --- Non-admin users can only update rows they can see (ready or own). --- Admin override policy covers admin UPDATE access separately. -CREATE POLICY files_update_access ON files_store_public.files - FOR UPDATE - TO authenticated, service_role - USING ( - status = 'ready' - OR created_by = NULLIF(current_setting('app.user_id', true), '')::uuid - ) - WITH CHECK (true); - --- Policy 7: DELETE access (service_role only, grants already restrict authenticated) -CREATE POLICY files_delete_access ON files_store_public.files - FOR DELETE - TO service_role - USING (true); - --- Grants -GRANT SELECT, INSERT, UPDATE ON files_store_public.files TO authenticated; -GRANT SELECT, INSERT, UPDATE, DELETE ON files_store_public.files TO service_role; - -COMMENT ON POLICY files_tenant_isolation ON files_store_public.files IS - 'Every query is scoped to the current tenant via app.database_id session variable.'; -COMMENT ON POLICY files_visibility ON files_store_public.files IS - 'Users see all ready files in their tenant. Non-ready files visible only to the uploader.'; -COMMENT ON POLICY files_public_bucket_read ON files_store_public.files IS - 'Allows unauthenticated reads on ready files in public buckets.'; -COMMENT ON POLICY files_admin_override ON files_store_public.files IS - 'Administrators can see and modify all files in the tenant regardless of status or creator.'; - --- --------------------------------------------------------------------------- --- 7. Domain Table Triggers --- --------------------------------------------------------------------------- - --- 7a. Generic trigger function: back-reference population --- --- When a domain table's image/upload/attachment column is updated with an S3 key, --- find the files row by key and populate source_table, source_column, source_id. --- Also finds version rows by key prefix and populates the same back-reference. --- --- Parameters (passed via TG_ARGV): --- TG_ARGV[0] = column name (e.g. 'profile_picture') --- TG_ARGV[1] = schema-qualified table name (e.g. 'constructive_users_public.users') - -CREATE OR REPLACE FUNCTION files_store_public.populate_file_back_reference() -RETURNS trigger AS $$ -DECLARE - col_name text := TG_ARGV[0]; - table_name text := TG_ARGV[1]; - new_val jsonb; - old_val jsonb; - new_key text; - old_key text; - base_key text; - db_id integer; -BEGIN - -- Get the database_id from session context - db_id := current_setting('app.database_id')::integer; - - -- Extract the jsonb value from the specified column (dynamic) - EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO new_val USING NEW; - EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO old_val USING OLD; - - -- Extract the key from the new and old values - new_key := new_val ->> 'key'; - old_key := old_val ->> 'key'; - - -- If no key change, nothing to do - IF new_key IS NOT DISTINCT FROM old_key THEN - RETURN NEW; - END IF; - - -- Handle file replacement: mark old files as deleting - IF old_key IS NOT NULL AND old_key <> '' THEN - -- Derive base key for the old file (strip version suffix) - base_key := regexp_replace(old_key, '_[^_]+$', ''); - - -- Mark old origin + all versions as deleting - UPDATE files_store_public.files - SET status = 'deleting', status_reason = 'replaced by new file' - WHERE database_id = db_id - AND (key = old_key OR key LIKE base_key || '_%') - AND status NOT IN ('deleting'); - END IF; - - -- Populate back-reference on new file (origin + versions) - IF new_key IS NOT NULL AND new_key <> '' THEN - -- Derive base key for the new file - base_key := regexp_replace(new_key, '_[^_]+$', ''); - - -- Set back-reference on origin + all version rows - UPDATE files_store_public.files - SET source_table = table_name, - source_column = col_name, - source_id = NEW.id - WHERE database_id = db_id - AND (key = new_key OR key LIKE base_key || '_%'); - END IF; - - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -COMMENT ON FUNCTION files_store_public.populate_file_back_reference() IS - 'Generic trigger function for domain tables. Populates source_table/source_column/source_id on files rows when image/upload/attachment columns are updated. Handles file replacement by marking old files as deleting.'; - --- 7b. Generic trigger function: source row deletion --- --- When a domain row is deleted, mark all associated files as deleting. - -CREATE OR REPLACE FUNCTION files_store_public.mark_files_deleting_on_source_delete() -RETURNS trigger AS $$ -DECLARE - col_name text := TG_ARGV[0]; - table_name text := TG_ARGV[1]; - db_id integer; -BEGIN - db_id := current_setting('app.database_id')::integer; - - -- Mark all files for this source row + column as deleting - UPDATE files_store_public.files - SET status = 'deleting', status_reason = 'source row deleted' - WHERE database_id = db_id - AND source_table = table_name - AND source_column = col_name - AND source_id = OLD.id - AND status NOT IN ('deleting'); - - RETURN OLD; -END; -$$ LANGUAGE plpgsql; - -COMMENT ON FUNCTION files_store_public.mark_files_deleting_on_source_delete() IS - 'Generic trigger function for domain tables. Marks all associated files as deleting when a domain row is deleted.'; - --- 7c. CREATE TRIGGER statements for all 6 tables, 9 columns --- --- Each domain column gets two triggers: --- - AFTER UPDATE: back-reference population + file replacement --- - BEFORE DELETE: mark files deleting on source row deletion --- --- These are wrapped in a DO block so they gracefully skip tables that --- don't exist yet (e.g. in fresh dev environments). In production, --- domain tables will exist before this migration runs. - -DO $domain_triggers$ -DECLARE - _tbl text; -BEGIN - -- constructive_users_public.users.profile_picture - SELECT 'constructive_users_public.users' INTO _tbl - FROM information_schema.tables - WHERE table_schema = 'constructive_users_public' AND table_name = 'users'; - IF FOUND THEN - EXECUTE 'CREATE TRIGGER users_profile_picture_file_ref - AFTER UPDATE OF profile_picture ON constructive_users_public.users - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''profile_picture'', ''constructive_users_public.users'')'; - EXECUTE 'CREATE TRIGGER users_profile_picture_file_delete - BEFORE DELETE ON constructive_users_public.users - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''profile_picture'', ''constructive_users_public.users'')'; - RAISE NOTICE 'Created triggers for constructive_users_public.users.profile_picture'; - ELSE - RAISE NOTICE 'Skipped triggers for constructive_users_public.users (table not found)'; - END IF; - - -- constructive_status_public.app_levels.image - SELECT 'constructive_status_public.app_levels' INTO _tbl - FROM information_schema.tables - WHERE table_schema = 'constructive_status_public' AND table_name = 'app_levels'; - IF FOUND THEN - EXECUTE 'CREATE TRIGGER app_levels_image_file_ref - AFTER UPDATE OF image ON constructive_status_public.app_levels - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''image'', ''constructive_status_public.app_levels'')'; - EXECUTE 'CREATE TRIGGER app_levels_image_file_delete - BEFORE DELETE ON constructive_status_public.app_levels - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''image'', ''constructive_status_public.app_levels'')'; - RAISE NOTICE 'Created triggers for constructive_status_public.app_levels.image'; - ELSE - RAISE NOTICE 'Skipped triggers for constructive_status_public.app_levels (table not found)'; - END IF; - - -- services_public.sites (og_image, apple_touch_icon, logo, favicon) - SELECT 'services_public.sites' INTO _tbl - FROM information_schema.tables - WHERE table_schema = 'services_public' AND table_name = 'sites'; - IF FOUND THEN - EXECUTE 'CREATE TRIGGER sites_og_image_file_ref - AFTER UPDATE OF og_image ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''og_image'', ''services_public.sites'')'; - EXECUTE 'CREATE TRIGGER sites_og_image_file_delete - BEFORE DELETE ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.sites'')'; - - EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_ref - AFTER UPDATE OF apple_touch_icon ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''apple_touch_icon'', ''services_public.sites'')'; - EXECUTE 'CREATE TRIGGER sites_apple_touch_icon_file_delete - BEFORE DELETE ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''apple_touch_icon'', ''services_public.sites'')'; - - EXECUTE 'CREATE TRIGGER sites_logo_file_ref - AFTER UPDATE OF logo ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''logo'', ''services_public.sites'')'; - EXECUTE 'CREATE TRIGGER sites_logo_file_delete - BEFORE DELETE ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''logo'', ''services_public.sites'')'; - - EXECUTE 'CREATE TRIGGER sites_favicon_file_ref - AFTER UPDATE OF favicon ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''favicon'', ''services_public.sites'')'; - EXECUTE 'CREATE TRIGGER sites_favicon_file_delete - BEFORE DELETE ON services_public.sites - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''favicon'', ''services_public.sites'')'; - RAISE NOTICE 'Created triggers for services_public.sites (og_image, apple_touch_icon, logo, favicon)'; - ELSE - RAISE NOTICE 'Skipped triggers for services_public.sites (table not found)'; - END IF; - - -- services_public.apps.app_image - SELECT 'services_public.apps' INTO _tbl - FROM information_schema.tables - WHERE table_schema = 'services_public' AND table_name = 'apps'; - IF FOUND THEN - EXECUTE 'CREATE TRIGGER apps_app_image_file_ref - AFTER UPDATE OF app_image ON services_public.apps - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''app_image'', ''services_public.apps'')'; - EXECUTE 'CREATE TRIGGER apps_app_image_file_delete - BEFORE DELETE ON services_public.apps - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''app_image'', ''services_public.apps'')'; - RAISE NOTICE 'Created triggers for services_public.apps.app_image'; - ELSE - RAISE NOTICE 'Skipped triggers for services_public.apps (table not found)'; - END IF; - - -- services_public.site_metadata.og_image - SELECT 'services_public.site_metadata' INTO _tbl - FROM information_schema.tables - WHERE table_schema = 'services_public' AND table_name = 'site_metadata'; - IF FOUND THEN - EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_ref - AFTER UPDATE OF og_image ON services_public.site_metadata - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''og_image'', ''services_public.site_metadata'')'; - EXECUTE 'CREATE TRIGGER site_metadata_og_image_file_delete - BEFORE DELETE ON services_public.site_metadata - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''og_image'', ''services_public.site_metadata'')'; - RAISE NOTICE 'Created triggers for services_public.site_metadata.og_image'; - ELSE - RAISE NOTICE 'Skipped triggers for services_public.site_metadata (table not found)'; - END IF; - - -- db_migrate.migrate_files.upload - SELECT 'db_migrate.migrate_files' INTO _tbl - FROM information_schema.tables - WHERE table_schema = 'db_migrate' AND table_name = 'migrate_files'; - IF FOUND THEN - EXECUTE 'CREATE TRIGGER migrate_files_upload_file_ref - AFTER UPDATE OF upload ON db_migrate.migrate_files - FOR EACH ROW - EXECUTE FUNCTION files_store_public.populate_file_back_reference(''upload'', ''db_migrate.migrate_files'')'; - EXECUTE 'CREATE TRIGGER migrate_files_upload_file_delete - BEFORE DELETE ON db_migrate.migrate_files - FOR EACH ROW - EXECUTE FUNCTION files_store_public.mark_files_deleting_on_source_delete(''upload'', ''db_migrate.migrate_files'')'; - RAISE NOTICE 'Created triggers for db_migrate.migrate_files.upload'; - ELSE - RAISE NOTICE 'Skipped triggers for db_migrate.migrate_files (table not found)'; - END IF; -END -$domain_triggers$; - --- --------------------------------------------------------------------------- --- 8. Scheduled cleanup jobs (requires pgpm-database-jobs with scheduling) --- --------------------------------------------------------------------------- --- Register recurring file-cleanup jobs via app_jobs.add_scheduled_job. --- The scheduler (knative-job-service) picks these up and spawns one-shot jobs --- on the configured schedule. Each job calls the file-cleanup function with --- the appropriate cleanup type. --- --- Schedules: --- pending_reaper: every hour (clear stale pending uploads) --- error_cleanup: daily at 03:00 UTC (expire old error files) --- unattached_cleanup: daily at 04:00 UTC (clean unattached ready files) --- --------------------------------------------------------------------------- - -DO $cron$ -DECLARE - v_db_id uuid; -BEGIN - -- Look up the database ID for the current database. - -- If metaschema_public.database is not deployed yet, skip silently. - BEGIN - SELECT id INTO v_db_id - FROM metaschema_public.database - ORDER BY created_at - LIMIT 1; - EXCEPTION WHEN undefined_table THEN - RAISE NOTICE 'metaschema_public.database not found, skipping scheduled job registration.'; - RETURN; - END; - - IF v_db_id IS NULL THEN - RAISE NOTICE 'No database row found, skipping scheduled job registration.'; - RETURN; - END IF; - - -- pending_reaper: every hour (minute 0) - PERFORM app_jobs.add_scheduled_job( - db_id := v_db_id, - identifier := 'file-cleanup', - payload := '{"type":"pending_reaper"}'::json, - schedule_info := '{"minute": 0}'::json, - job_key := 'file-cleanup:pending_reaper', - queue_name := 'maintenance', - max_attempts := 3, - priority := 100 - ); - - -- error_cleanup: daily at 03:00 UTC - PERFORM app_jobs.add_scheduled_job( - db_id := v_db_id, - identifier := 'file-cleanup', - payload := '{"type":"error_cleanup"}'::json, - schedule_info := '{"hour": 3, "minute": 0}'::json, - job_key := 'file-cleanup:error_cleanup', - queue_name := 'maintenance', - max_attempts := 3, - priority := 100 - ); - - -- unattached_cleanup: daily at 04:00 UTC - PERFORM app_jobs.add_scheduled_job( - db_id := v_db_id, - identifier := 'file-cleanup', - payload := '{"type":"unattached_cleanup"}'::json, - schedule_info := '{"hour": 4, "minute": 0}'::json, - job_key := 'file-cleanup:unattached_cleanup', - queue_name := 'maintenance', - max_attempts := 3, - priority := 100 - ); - - RAISE NOTICE 'Registered 3 file-cleanup scheduled jobs for database %', v_db_id; -END -$cron$; - -COMMIT; diff --git a/uploads/s3-streamer/src/index.ts b/uploads/s3-streamer/src/index.ts index 90cf965e1..8cd5fa38f 100644 --- a/uploads/s3-streamer/src/index.ts +++ b/uploads/s3-streamer/src/index.ts @@ -3,6 +3,7 @@ import Streamer from './streamer'; export * from './utils'; export * from './storage-provider'; +export { streamContentType } from '@constructive-io/content-type-stream'; export { getClient }; export { Streamer };