From 01ac2e5947a67b1f8589200bc4beaf615dca75b9 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Mon, 9 Mar 2026 14:50:37 +0800 Subject: [PATCH 01/13] img process cloud functions --- docker-compose.yml | 31 + functions/process-image/handler.json | 12 + functions/process-image/handler.ts | 395 +++++++ pnpm-lock.yaml | 1634 +++++++++++++++++++++++++- scripts/dev.ts | 15 +- 5 files changed, 2058 insertions(+), 29 deletions(-) create mode 100644 functions/process-image/handler.json create mode 100644 functions/process-image/handler.ts diff --git a/docker-compose.yml b/docker-compose.yml index 594bf62..50ba57f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -87,6 +87,36 @@ services: ports: - "3002:3000" + minio: + image: minio/minio:latest + command: server /data --console-address ":9001" + environment: + MINIO_ROOT_USER: minioadmin + MINIO_ROOT_PASSWORD: minioadmin + ports: + - "9000:9000" # S3 API + - "9001:9001" # Console UI + volumes: + - miniodata:/data + healthcheck: + test: ["CMD", "mc", "ready", "local"] + interval: 5s + timeout: 5s + retries: 5 + + minio-setup: + image: minio/mc:latest + depends_on: + minio: + condition: service_healthy + entrypoint: ["sh", "-c"] + command: + - | + mc alias set local http://minio:9000 minioadmin minioadmin + mc mb --ignore-existing local/test-bucket + mc anonymous set download local/test-bucket + echo "MinIO bucket ready" + mailpit: image: axllent/mailpit:latest ports: @@ -95,3 +125,4 @@ services: volumes: pgdata: + miniodata: diff --git a/functions/process-image/handler.json b/functions/process-image/handler.json new file mode 100644 index 0000000..e76b43f --- /dev/null +++ b/functions/process-image/handler.json @@ -0,0 +1,12 @@ +{ + "name": "process-image", + "version": "1.0.0", + "type": "node-graphql", + "description": "Downloads images from MinIO, generates resized versions via sharp, uploads back, and updates the database record", + "dependencies": { + "@aws-sdk/client-s3": "^3.1001.0", + "@aws-sdk/lib-storage": "^3.1001.0", + "pg-cache": "^3.1.0", + "sharp": "^0.33.0" + } +} diff --git a/functions/process-image/handler.ts b/functions/process-image/handler.ts new file mode 100644 index 0000000..cbeeac5 --- /dev/null +++ b/functions/process-image/handler.ts @@ -0,0 +1,395 @@ +import { DeleteObjectCommand, GetObjectCommand, S3Client } from '@aws-sdk/client-s3'; +import { Upload } from '@aws-sdk/lib-storage'; +import type { FunctionHandler } from '@constructive-io/fn-runtime'; +import { getPgPool } from 'pg-cache'; +import { extname } from 'path'; +import sharp from 'sharp'; +import { Readable } from 'stream'; + +// --------------------------------------------------------------------------- +// Types +// --------------------------------------------------------------------------- + +interface VersionConfig { + name: string; + maxWidth: number; + maxHeight: number; +} + +interface ProcessImageParams { + schema: string; + table: string; + idFields: string[]; + idValues: (string | number)[]; + fields: string[]; + versions?: VersionConfig[]; +} + +interface ImageFieldValue { + url?: string; + id?: string; + key?: string; + bucket?: string; + provider?: string; + mime?: string; + filename?: string; + versions?: ImageVersion[]; +} + +interface ImageVersion { + name: string; + key: string; + bucket: string; + url: string; + width: number; + height: number; + mime: string; +} + +const DEFAULT_VERSIONS: VersionConfig[] = [ + { name: 'thumbnail', maxWidth: 150, maxHeight: 150 }, + { name: 'medium', maxWidth: 600, maxHeight: 600 }, + { name: 'large', maxWidth: 1200, maxHeight: 1200 }, +]; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function validateIdentifier(name: string): string { + if (!/^[a-zA-Z_][a-zA-Z0-9_]*$/.test(name)) { + throw new Error(`Invalid SQL identifier: "${name}"`); + } + return name; +} + +function createS3Client(env: Record): S3Client { + const provider = env.BUCKET_PROVIDER || 'minio'; + const isMinio = provider === 'minio'; + + return new S3Client({ + region: env.AWS_REGION || 'us-east-1', + credentials: { + accessKeyId: env.AWS_ACCESS_KEY || 'minioadmin', + secretAccessKey: env.AWS_SECRET_KEY || 'minioadmin', + }, + ...(isMinio + ? { + endpoint: env.MINIO_ENDPOINT || 'http://localhost:9000', + forcePathStyle: true, + } + : {}), + }); +} + +function parseS3Url(url: string): { bucket: string; key: string } | null { + try { + const parsed = new URL(url); + const parts = parsed.pathname.split('/').filter(Boolean); + + const host = parsed.hostname; + if (host.endsWith('.s3.amazonaws.com')) { + const bucket = host.replace('.s3.amazonaws.com', ''); + return { bucket, key: parts.join('/') }; + } + + if (parts.length >= 2) { + return { bucket: parts[0], key: parts.slice(1).join('/') }; + } + } catch { + // not a valid URL + } + return null; +} + +function deriveVersionKey(originalKey: string, versionName: string): string { + const ext = extname(originalKey); + const base = ext ? originalKey.slice(0, -ext.length) : originalKey; + return `${base}_${versionName}${ext}`; +} + +function buildObjectUrl( + env: Record, + bucket: string, + key: string, +): string { + const provider = env.BUCKET_PROVIDER || 'minio'; + if (provider === 'minio') { + const endpoint = env.MINIO_ENDPOINT || 'http://localhost:9000'; + return `${endpoint.replace(/\/$/, '')}/${bucket}/${key}`; + } + const region = env.AWS_REGION || 'us-east-1'; + return `https://${bucket}.s3.${region}.amazonaws.com/${key}`; +} + +const PROCESSABLE_FORMATS = new Set([ + 'jpeg', 'png', 'webp', 'gif', 'tiff', 'avif', 'heif', 'jp2', +]); + +async function streamToBuffer(stream: Readable): Promise { + const chunks: Buffer[] = []; + for await (const chunk of stream) { + chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk)); + } + return Buffer.concat(chunks); +} + +async function deleteS3Objects( + s3: S3Client, + objects: { bucket: string; key: string }[], + log: { info: (...args: unknown[]) => void; error: (...args: unknown[]) => void }, +): Promise { + for (const obj of objects) { + try { + await s3.send(new DeleteObjectCommand({ Bucket: obj.bucket, Key: obj.key })); + log.info(`[process-image] rolled back: deleted ${obj.key}`); + } catch (err) { + log.error(`[process-image] rollback failed for ${obj.key}`, err); + } + } +} + +// --------------------------------------------------------------------------- +// Handler +// --------------------------------------------------------------------------- + +const handler: FunctionHandler = async (params, context) => { + const { log, env } = context; + const { + schema, + table, + idFields, + idValues, + fields, + versions = DEFAULT_VERSIONS, + } = params; + + // --- Validation --- + + if (!schema || !table) return { error: 'Missing schema or table' }; + if (!idFields?.length || !idValues?.length) + return { error: 'Missing idFields or idValues' }; + if (idFields.length !== idValues.length) + return { error: 'idFields and idValues must have same length' }; + if (!fields?.length) return { error: 'Missing fields' }; + + validateIdentifier(schema); + validateIdentifier(table); + idFields.forEach(validateIdentifier); + fields.forEach(validateIdentifier); + + const defaultBucket = env.BUCKET_NAME || 'test-bucket'; + + log.info('[process-image] starting', { + schema, + table, + idFields, + fields, + versionCount: versions.length, + }); + + const s3 = createS3Client(env); + const pool = getPgPool({ + host: env.PGHOST || 'localhost', + port: Number(env.PGPORT || 5432), + database: env.PGDATABASE || 'constructive', + user: env.PGUSER || 'postgres', + password: env.PGPASSWORD || 'password', + }); + + // --- Query the record --- + + const fieldList = fields.map((f: string) => `"${f}"`).join(', '); + const whereClauses = idFields + .map((f: string, i: number) => `"${f}" = $${i + 1}`) + .join(' AND '); + const selectSql = `SELECT ${fieldList} FROM "${schema}"."${table}" WHERE ${whereClauses}`; + + log.info('[process-image] querying record'); + const { rows } = await pool.query(selectSql, idValues); + + if (rows.length === 0) { + s3.destroy(); + return { error: 'Record not found' }; + } + + const record = rows[0]; + const results: Record = {}; + + try { + for (const field of fields) { + const fieldValue: ImageFieldValue | null = + typeof record[field] === 'string' + ? JSON.parse(record[field]) + : record[field]; + + if (!fieldValue) { + log.info(`[process-image] field "${field}" is null, skipping`); + results[field] = { skipped: true, reason: 'null_value' }; + continue; + } + + if (fieldValue.versions && fieldValue.versions.length > 0) { + log.info( + `[process-image] field "${field}" already has ${fieldValue.versions.length} versions, skipping`, + ); + results[field] = { skipped: true, reason: 'versions_exist' }; + continue; + } + + // Resolve bucket + key + let key = fieldValue.key; + let bucket = fieldValue.bucket || defaultBucket; + + if (!key && fieldValue.url) { + const parsed = parseS3Url(fieldValue.url); + if (parsed) { + key = parsed.key; + bucket = parsed.bucket; + } + } + + if (!key) { + log.warn( + `[process-image] field "${field}" has no resolvable key, skipping`, + ); + results[field] = { skipped: true, reason: 'no_key' }; + continue; + } + + log.info(`[process-image] processing "${field}"`, { key, bucket }); + + // --- Download original to buffer --- + + const response = await s3.send( + new GetObjectCommand({ Bucket: bucket, Key: key }), + ); + + if (!(response.Body instanceof Readable)) { + throw new Error(`S3 response body is not a readable stream for key=${key}`); + } + + const originalBuffer = await streamToBuffer(response.Body); + + // --- Gate: verify this is a processable image --- + + let metadata: sharp.Metadata; + try { + metadata = await sharp(originalBuffer).metadata(); + } catch { + log.warn(`[process-image] field "${field}" is not a valid image, skipping`); + results[field] = { skipped: true, reason: 'not_an_image' }; + continue; + } + + if (!metadata.format || !PROCESSABLE_FORMATS.has(metadata.format)) { + log.warn( + `[process-image] field "${field}" has unsupported format "${metadata.format || 'unknown'}", skipping`, + ); + results[field] = { skipped: true, reason: 'unsupported_format', format: metadata.format }; + continue; + } + + const originalWidth = metadata.width || 0; + const originalHeight = metadata.height || 0; + + if (!originalWidth || !originalHeight) { + log.warn(`[process-image] field "${field}" has no dimensions, skipping`); + results[field] = { skipped: true, reason: 'no_dimensions' }; + continue; + } + + log.info(`[process-image] original: ${originalWidth}x${originalHeight} (${metadata.format})`); + + // --- Generate versions --- + + const generatedVersions: ImageVersion[] = []; + + for (const ver of versions) { + if (originalWidth <= ver.maxWidth && originalHeight <= ver.maxHeight) { + log.info( + `[process-image] original (${originalWidth}x${originalHeight}) fits within ${ver.name} (${ver.maxWidth}x${ver.maxHeight}), skipping`, + ); + continue; + } + + const resized = await sharp(originalBuffer) + .resize(ver.maxWidth, ver.maxHeight, { + fit: 'inside', + withoutEnlargement: true, + }) + .toBuffer({ resolveWithObject: true }); + + const vKey = deriveVersionKey(key, ver.name); + const vUrl = buildObjectUrl(env, bucket, vKey); + const mime = fieldValue.mime || 'image/jpeg'; + + const uploadResult = await new Upload({ + client: s3, + params: { + Bucket: bucket, + Key: vKey, + Body: resized.data, + ContentType: mime, + }, + }).done(); + + generatedVersions.push({ + name: ver.name, + key: vKey, + bucket, + url: uploadResult.Location || vUrl, + width: resized.info.width, + height: resized.info.height, + mime, + }); + + log.info( + `[process-image] uploaded ${ver.name}: ${resized.info.width}x${resized.info.height}`, + ); + } + + // --- Update database (rollback uploads on failure) --- + + if (generatedVersions.length > 0) { + const updatedValue: ImageFieldValue = { + ...fieldValue, + versions: generatedVersions, + }; + + const updateWhere = idFields + .map((f: string, i: number) => `"${f}" = $${i + 2}`) + .join(' AND '); + const updateSql = `UPDATE "${schema}"."${table}" SET "${field}" = $1::jsonb WHERE ${updateWhere}`; + const updateValues = [JSON.stringify(updatedValue), ...idValues]; + + try { + await pool.query(updateSql, updateValues); + log.info( + `[process-image] updated "${field}" with ${generatedVersions.length} versions`, + ); + } catch (err) { + log.error(`[process-image] DB update failed for "${field}", rolling back uploads`, err); + await deleteS3Objects( + s3, + generatedVersions.map((v) => ({ bucket: v.bucket, key: v.key })), + log, + ); + throw err; + } + } + + results[field] = { + original: { width: originalWidth, height: originalHeight }, + versions: generatedVersions, + }; + } + } finally { + s3.destroy(); + } + + log.info('[process-image] complete'); + return { success: true, results }; +}; + +export default handler; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7df5653..f2368d2 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -64,6 +64,34 @@ importers: specifier: ^5.1.6 version: 5.9.3 + generated/process-image: + dependencies: + '@aws-sdk/client-s3': + specifier: ^3.1001.0 + version: 3.1004.0 + '@aws-sdk/lib-storage': + specifier: ^3.1001.0 + version: 3.1004.0(@aws-sdk/client-s3@3.1004.0) + '@constructive-io/fn-runtime': + specifier: workspace:^ + version: link:../../packages/fn-runtime + pg-cache: + specifier: ^3.1.0 + version: 3.1.1 + sharp: + specifier: ^0.33.0 + version: 0.33.5 + devDependencies: + '@types/node': + specifier: ^22.10.4 + version: 22.19.3 + makage: + specifier: ^0.1.10 + version: 0.1.12 + typescript: + specifier: ^5.1.6 + version: 5.9.3 + generated/send-email-link: dependencies: '@constructive-io/fn-runtime': @@ -316,6 +344,171 @@ packages: 12factor-env@1.1.0: resolution: {integrity: sha512-LQDEquBFj5Ndqm+7kNxB8fBjxg4VrOtM+Uwf71uyY4Anxy77DH0SzyBmLIW0A+fpB3MTcg7BEFreD+tM2nm9mw==} + '@aws-crypto/crc32@5.2.0': + resolution: {integrity: sha512-nLbCWqQNgUiwwtFsen1AdzAtvuLRsQS8rYgMuxCrdKf9kOssamGLuPwyTY9wyYblNr9+1XM8v6zoDTPPSIeANg==} + engines: {node: '>=16.0.0'} + + '@aws-crypto/crc32c@5.2.0': + resolution: {integrity: sha512-+iWb8qaHLYKrNvGRbiYRHSdKRWhto5XlZUEBwDjYNf+ly5SVYG6zEoYIdxvf5R3zyeP16w4PLBn3rH1xc74Rag==} + + '@aws-crypto/sha1-browser@5.2.0': + resolution: {integrity: sha512-OH6lveCFfcDjX4dbAvCFSYUjJZjDr/3XJ3xHtjn3Oj5b9RjojQo8npoLeA/bNwkOkrSQ0wgrHzXk4tDRxGKJeg==} + + '@aws-crypto/sha256-browser@5.2.0': + resolution: {integrity: sha512-AXfN/lGotSQwu6HNcEsIASo7kWXZ5HYWvfOmSNKDsEqC4OashTp8alTmaz+F7TC2L083SFv5RdB+qU3Vs1kZqw==} + + '@aws-crypto/sha256-js@5.2.0': + resolution: {integrity: sha512-FFQQyu7edu4ufvIZ+OadFpHHOt+eSTBaYaki44c+akjg7qZg9oOQeLlk77F6tSYqjDAFClrHJk9tMf0HdVyOvA==} + engines: {node: '>=16.0.0'} + + '@aws-crypto/supports-web-crypto@5.2.0': + resolution: {integrity: sha512-iAvUotm021kM33eCdNfwIN//F77/IADDSs58i+MDaOqFrVjZo9bAal0NK7HurRuWLLpF1iLX7gbWrjHjeo+YFg==} + + '@aws-crypto/util@5.2.0': + resolution: {integrity: sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ==} + + '@aws-sdk/client-s3@3.1004.0': + resolution: {integrity: sha512-m0zNfpsona9jQdX1cHtHArOiuvSGZPsgp/KRZS2YjJhKah96G2UN3UNGZQ6aVjXIQjCY6UanCJo0uW9Xf2U41w==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/core@3.973.18': + resolution: {integrity: sha512-GUIlegfcK2LO1J2Y98sCJy63rQSiLiDOgVw7HiHPRqfI2vb3XozTVqemwO0VSGXp54ngCnAQz0Lf0YPCBINNxA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/crc64-nvme@3.972.4': + resolution: {integrity: sha512-HKZIZLbRyvzo/bXZU7Zmk6XqU+1C9DjI56xd02vwuDIxedxBEqP17t9ExhbP9QFeNq/a3l9GOcyirFXxmbDhmw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-env@3.972.16': + resolution: {integrity: sha512-HrdtnadvTGAQUr18sPzGlE5El3ICphnH6SU7UQOMOWFgRKbTRNN8msTxM4emzguUso9CzaHU2xy5ctSrmK5YNA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-http@3.972.18': + resolution: {integrity: sha512-NyB6smuZAixND5jZumkpkunQ0voc4Mwgkd+SZ6cvAzIB7gK8HV8Zd4rS8Kn5MmoGgusyNfVGG+RLoYc4yFiw+A==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-ini@3.972.17': + resolution: {integrity: sha512-dFqh7nfX43B8dO1aPQHOcjC0SnCJ83H3F+1LoCh3X1P7E7N09I+0/taID0asU6GCddfDExqnEvQtDdkuMe5tKQ==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-login@3.972.17': + resolution: {integrity: sha512-gf2E5b7LpKb+JX2oQsRIDxdRZjBFZt2olCGlWCdb3vBERbXIPgm2t1R5mEnwd4j0UEO/Tbg5zN2KJbHXttJqwA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-node@3.972.18': + resolution: {integrity: sha512-ZDJa2gd1xiPg/nBDGhUlat02O8obaDEnICBAVS8qieZ0+nDfaB0Z3ec6gjZj27OqFTjnB/Q5a0GwQwb7rMVViw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-process@3.972.16': + resolution: {integrity: sha512-n89ibATwnLEg0ZdZmUds5bq8AfBAdoYEDpqP3uzPLaRuGelsKlIvCYSNNvfgGLi8NaHPNNhs1HjJZYbqkW9b+g==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-sso@3.972.17': + resolution: {integrity: sha512-wGtte+48xnhnhHMl/MsxzacBPs5A+7JJedjiP452IkHY7vsbYKcvQBqFye8LwdTJVeHtBHv+JFeTscnwepoWGg==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/credential-provider-web-identity@3.972.17': + resolution: {integrity: sha512-8aiVJh6fTdl8gcyL+sVNcNwTtWpmoFa1Sh7xlj6Z7L/cZ/tYMEBHq44wTYG8Kt0z/PpGNopD89nbj3FHl9QmTA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/lib-storage@3.1004.0': + resolution: {integrity: sha512-4W6UkeLVd/1FyXFvD9PHMw5FSOY7tsf6+I52jmgdZwDZ9gJcJBx6wF9IhaVp1AXhScZGY9HqHiqYt0qlrSHrGw==} + engines: {node: '>=20.0.0'} + peerDependencies: + '@aws-sdk/client-s3': ^3.1004.0 + + '@aws-sdk/middleware-bucket-endpoint@3.972.7': + resolution: {integrity: sha512-goX+axlJ6PQlRnzE2bQisZ8wVrlm6dXJfBzMJhd8LhAIBan/w1Kl73fJnalM/S+18VnpzIHumyV6DtgmvqG5IA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-expect-continue@3.972.7': + resolution: {integrity: sha512-mvWqvm61bmZUKmmrtl2uWbokqpenY3Mc3Jf4nXB/Hse6gWxLPaCQThmhPBDzsPSV8/Odn8V6ovWt3pZ7vy4BFQ==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-flexible-checksums@3.973.4': + resolution: {integrity: sha512-7CH2jcGmkvkHc5Buz9IGbdjq1729AAlgYJiAvGq7qhCHqYleCsriWdSnmsqWTwdAfXHMT+pkxX3w6v5tJNcSug==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-host-header@3.972.7': + resolution: {integrity: sha512-aHQZgztBFEpDU1BB00VWCIIm85JjGjQW1OG9+98BdmaOpguJvzmXBGbnAiYcciCd+IS4e9BEq664lhzGnWJHgQ==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-location-constraint@3.972.7': + resolution: {integrity: sha512-vdK1LJfffBp87Lj0Bw3WdK1rJk9OLDYdQpqoKgmpIZPe+4+HawZ6THTbvjhJt4C4MNnRrHTKHQjkwBiIpDBoig==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-logger@3.972.7': + resolution: {integrity: sha512-LXhiWlWb26txCU1vcI9PneESSeRp/RYY/McuM4SpdrimQR5NgwaPb4VJCadVeuGWgh6QmqZ6rAKSoL1ob16W6w==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-recursion-detection@3.972.7': + resolution: {integrity: sha512-l2VQdcBcYLzIzykCHtXlbpiVCZ94/xniLIkAj0jpnpjY4xlgZx7f56Ypn+uV1y3gG0tNVytJqo3K9bfMFee7SQ==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-sdk-s3@3.972.18': + resolution: {integrity: sha512-5E3XxaElrdyk6ZJ0TjH7Qm6ios4b/qQCiLr6oQ8NK7e4Kn6JBTJCaYioQCQ65BpZ1+l1mK5wTAac2+pEz0Smpw==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-ssec@3.972.7': + resolution: {integrity: sha512-G9clGVuAml7d8DYzY6DnRi7TIIDRvZ3YpqJPz/8wnWS5fYx/FNWNmkO6iJVlVkQg9BfeMzd+bVPtPJOvC4B+nQ==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/middleware-user-agent@3.972.19': + resolution: {integrity: sha512-Km90fcXt3W/iqujHzuM6IaDkYCj73gsYufcuWXApWdzoTy6KGk8fnchAjePMARU0xegIR3K4N3yIo1vy7OVe8A==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/nested-clients@3.996.7': + resolution: {integrity: sha512-MlGWA8uPaOs5AiTZ5JLM4uuWDm9EEAnm9cqwvqQIc6kEgel/8s1BaOWm9QgUcfc9K8qd7KkC3n43yDbeXOA2tg==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/region-config-resolver@3.972.7': + resolution: {integrity: sha512-/Ev/6AI8bvt4HAAptzSjThGUMjcWaX3GX8oERkB0F0F9x2dLSBdgFDiyrRz3i0u0ZFZFQ1b28is4QhyqXTUsVA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/signature-v4-multi-region@3.996.6': + resolution: {integrity: sha512-NnsOQsVmJXy4+IdPFUjRCWPn9qNH1TzS/f7MiWgXeoHs903tJpAWQWQtoFvLccyPoBgomKP9L89RRr2YsT/L0g==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/token-providers@3.1004.0': + resolution: {integrity: sha512-j9BwZZId9sFp+4GPhf6KrwO8Tben2sXibZA8D1vv2I1zBdvkUHcBA2g4pkqIpTRalMTLC0NPkBPX0gERxfy/iA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/types@3.973.5': + resolution: {integrity: sha512-hl7BGwDCWsjH8NkZfx+HgS7H2LyM2lTMAI7ba9c8O0KqdBLTdNJivsHpqjg9rNlAlPyREb6DeDRXUl0s8uFdmQ==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/util-arn-parser@3.972.3': + resolution: {integrity: sha512-HzSD8PMFrvgi2Kserxuff5VitNq2sgf3w9qxmskKDiDTThWfVteJxuCS9JXiPIPtmCrp+7N9asfIaVhBFORllA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/util-endpoints@3.996.4': + resolution: {integrity: sha512-Hek90FBmd4joCFj+Vc98KLJh73Zqj3s2W56gjAcTkrNLMDI5nIFkG9YpfcJiVI1YlE2Ne1uOQNe+IgQ/Vz2XRA==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/util-locate-window@3.965.5': + resolution: {integrity: sha512-WhlJNNINQB+9qtLtZJcpQdgZw3SCDCpXdUJP7cToGwHbCWCnRckGlc6Bx/OhWwIYFNAn+FIydY8SZ0QmVu3xTQ==} + engines: {node: '>=20.0.0'} + + '@aws-sdk/util-user-agent-browser@3.972.7': + resolution: {integrity: sha512-7SJVuvhKhMF/BkNS1n0QAJYgvEwYbK2QLKBrzDiwQGiTRU6Yf1f3nehTzm/l21xdAOtWSfp2uWSddPnP2ZtsVw==} + + '@aws-sdk/util-user-agent-node@3.973.4': + resolution: {integrity: sha512-uqKeLqZ9D3nQjH7HGIERNXK9qnSpUK08l4MlJ5/NZqSSdeJsVANYp437EM9sEzwU28c2xfj2V6qlkqzsgtKs6Q==} + engines: {node: '>=20.0.0'} + peerDependencies: + aws-crt: '>=1.0.0' + peerDependenciesMeta: + aws-crt: + optional: true + + '@aws-sdk/xml-builder@3.972.10': + resolution: {integrity: sha512-OnejAIVD+CxzyAUrVic7lG+3QRltyja9LoNqCE/1YVs8ichoTbJlVSaZ9iSMcnHLyzrSNtvaOGjSDRP+d/ouFA==} + engines: {node: '>=20.0.0'} + + '@aws/lambda-invoke-store@0.2.3': + resolution: {integrity: sha512-oLvsaPMTBejkkmHhjf09xTgk71mOqyr/409NKhRIL08If7AhVfUsJhVsx386uJaqNd42v9kWamQ9lFbkoC2dYw==} + engines: {node: '>=18.0.0'} + '@babel/code-frame@7.27.1': resolution: {integrity: sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==} engines: {node: '>=6.9.0'} @@ -585,6 +778,111 @@ packages: resolution: {integrity: sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==} engines: {node: '>=18.18'} + '@img/sharp-darwin-arm64@0.33.5': + resolution: {integrity: sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [darwin] + + '@img/sharp-darwin-x64@0.33.5': + resolution: {integrity: sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [darwin] + + '@img/sharp-libvips-darwin-arm64@1.0.4': + resolution: {integrity: sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg==} + cpu: [arm64] + os: [darwin] + + '@img/sharp-libvips-darwin-x64@1.0.4': + resolution: {integrity: sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ==} + cpu: [x64] + os: [darwin] + + '@img/sharp-libvips-linux-arm64@1.0.4': + resolution: {integrity: sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA==} + cpu: [arm64] + os: [linux] + + '@img/sharp-libvips-linux-arm@1.0.5': + resolution: {integrity: sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==} + cpu: [arm] + os: [linux] + + '@img/sharp-libvips-linux-s390x@1.0.4': + resolution: {integrity: sha512-u7Wz6ntiSSgGSGcjZ55im6uvTrOxSIS8/dgoVMoiGE9I6JAfU50yH5BoDlYA1tcuGS7g/QNtetJnxA6QEsCVTA==} + cpu: [s390x] + os: [linux] + + '@img/sharp-libvips-linux-x64@1.0.4': + resolution: {integrity: sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==} + cpu: [x64] + os: [linux] + + '@img/sharp-libvips-linuxmusl-arm64@1.0.4': + resolution: {integrity: sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA==} + cpu: [arm64] + os: [linux] + + '@img/sharp-libvips-linuxmusl-x64@1.0.4': + resolution: {integrity: sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw==} + cpu: [x64] + os: [linux] + + '@img/sharp-linux-arm64@0.33.5': + resolution: {integrity: sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + + '@img/sharp-linux-arm@0.33.5': + resolution: {integrity: sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm] + os: [linux] + + '@img/sharp-linux-s390x@0.33.5': + resolution: {integrity: sha512-y/5PCd+mP4CA/sPDKl2961b+C9d+vPAveS33s6Z3zfASk2j5upL6fXVPZi7ztePZ5CuH+1kW8JtvxgbuXHRa4Q==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [s390x] + os: [linux] + + '@img/sharp-linux-x64@0.33.5': + resolution: {integrity: sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + + '@img/sharp-linuxmusl-arm64@0.33.5': + resolution: {integrity: sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + + '@img/sharp-linuxmusl-x64@0.33.5': + resolution: {integrity: sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + + '@img/sharp-wasm32@0.33.5': + resolution: {integrity: sha512-ykUW4LVGaMcU9lu9thv85CbRMAwfeadCJHRsg2GmeRa/cJxsVY9Rbd57JcMxBkKHag5U/x7TSBpScF4U8ElVzg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [wasm32] + + '@img/sharp-win32-ia32@0.33.5': + resolution: {integrity: sha512-T36PblLaTwuVJ/zw/LaH0PdZkRz5rd3SmMHX8GSmR7vtNSP5Z6bQkExdSK7xGWyxLw4sUknBuugTelgw2faBbQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [ia32] + os: [win32] + + '@img/sharp-win32-x64@0.33.5': + resolution: {integrity: sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [win32] + '@isaacs/balanced-match@4.0.1': resolution: {integrity: sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==} engines: {node: 20 || >=22} @@ -731,9 +1029,15 @@ packages: '@pgpmjs/logger@2.1.0': resolution: {integrity: sha512-AQHt6BMnb+0iv8MXmb9kuQfe7/PDBqkkiIaGtzV6WFH4i0oNB43rQBwX4oKc6cSGl6bRXH03o4KFutS3duWCaA==} + '@pgpmjs/logger@2.2.1': + resolution: {integrity: sha512-9mnh7iGaBYiwWwlUwcmV7S7zKyMsZWaPtcEpGPowM8HFSnj0E55F/H6x8HULay3VEpsXRf0EduxoAdFm15+Nlw==} + '@pgpmjs/types@2.16.0': resolution: {integrity: sha512-be/RIFg2TYB2X9LAVZ4mFkhu3ZZMpzBCBR9umvQUDEfMcb7aUYDFdEw+mc7CHBgifXNliUswXmllZVsrurh6TQ==} + '@pgpmjs/types@2.17.0': + resolution: {integrity: sha512-qMIi67ZNWkzV/oOWf9BvR3aat2hNLCqsCP4YkMOcPYi42WsIsch9mev3K+jRPkTPKrUAVkkEAnTbnCx3vW8y7w==} + '@pkgjs/parseargs@0.11.0': resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} engines: {node: '>=14'} @@ -751,6 +1055,222 @@ packages: '@sinonjs/fake-timers@13.0.5': resolution: {integrity: sha512-36/hTbH2uaWuGVERyC6da9YwGWnzUZXuPro/F2LfsdOsLnCojz/iSH8MxUt/FD2S5XBSVPhmArFUXcpCQ2Hkiw==} + '@smithy/abort-controller@4.2.11': + resolution: {integrity: sha512-Hj4WoYWMJnSpM6/kchsm4bUNTL9XiSyhvoMb2KIq4VJzyDt7JpGHUZHkVNPZVC7YE1tf8tPeVauxpFBKGW4/KQ==} + engines: {node: '>=18.0.0'} + + '@smithy/chunked-blob-reader-native@4.2.3': + resolution: {integrity: sha512-jA5k5Udn7Y5717L86h4EIv06wIr3xn8GM1qHRi/Nf31annXcXHJjBKvgztnbn2TxH3xWrPBfgwHsOwZf0UmQWw==} + engines: {node: '>=18.0.0'} + + '@smithy/chunked-blob-reader@5.2.2': + resolution: {integrity: sha512-St+kVicSyayWQca+I1rGitaOEH6uKgE8IUWoYnnEX26SWdWQcL6LvMSD19Lg+vYHKdT9B2Zuu7rd3i6Wnyb/iw==} + engines: {node: '>=18.0.0'} + + '@smithy/config-resolver@4.4.10': + resolution: {integrity: sha512-IRTkd6ps0ru+lTWnfnsbXzW80A8Od8p3pYiZnW98K2Hb20rqfsX7VTlfUwhrcOeSSy68Gn9WBofwPuw3e5CCsg==} + engines: {node: '>=18.0.0'} + + '@smithy/core@3.23.9': + resolution: {integrity: sha512-1Vcut4LEL9HZsdpI0vFiRYIsaoPwZLjAxnVQDUMQK8beMS+EYPLDQCXtbzfxmM5GzSgjfe2Q9M7WaXwIMQllyQ==} + engines: {node: '>=18.0.0'} + + '@smithy/credential-provider-imds@4.2.11': + resolution: {integrity: sha512-lBXrS6ku0kTj3xLmsJW0WwqWbGQ6ueooYyp/1L9lkyT0M02C+DWwYwc5aTyXFbRaK38ojALxNixg+LxKSHZc0g==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-codec@4.2.11': + resolution: {integrity: sha512-Sf39Ml0iVX+ba/bgMPxaXWAAFmHqYLTmbjAPfLPLY8CrYkRDEqZdUsKC1OwVMCdJXfAt0v4j49GIJ8DoSYAe6w==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-serde-browser@4.2.11': + resolution: {integrity: sha512-3rEpo3G6f/nRS7fQDsZmxw/ius6rnlIpz4UX6FlALEzz8JoSxFmdBt0SZnthis+km7sQo6q5/3e+UJcuQivoXA==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-serde-config-resolver@4.3.11': + resolution: {integrity: sha512-XeNIA8tcP/GDWnnKkO7qEm/bg0B/bP9lvIXZBXcGZwZ+VYM8h8k9wuDvUODtdQ2Wcp2RcBkPTCSMmaniVHrMlA==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-serde-node@4.2.11': + resolution: {integrity: sha512-fzbCh18rscBDTQSCrsp1fGcclLNF//nJyhjldsEl/5wCYmgpHblv5JSppQAyQI24lClsFT0wV06N1Porn0IsEw==} + engines: {node: '>=18.0.0'} + + '@smithy/eventstream-serde-universal@4.2.11': + resolution: {integrity: sha512-MJ7HcI+jEkqoWT5vp+uoVaAjBrmxBtKhZTeynDRG/seEjJfqyg3SiqMMqyPnAMzmIfLaeJ/uiuSDP/l9AnMy/Q==} + engines: {node: '>=18.0.0'} + + '@smithy/fetch-http-handler@5.3.13': + resolution: {integrity: sha512-U2Hcfl2s3XaYjikN9cT4mPu8ybDbImV3baXR0PkVlC0TTx808bRP3FaPGAzPtB8OByI+JqJ1kyS+7GEgae7+qQ==} + engines: {node: '>=18.0.0'} + + '@smithy/hash-blob-browser@4.2.12': + resolution: {integrity: sha512-1wQE33DsxkM/waftAhCH9VtJbUGyt1PJ9YRDpOu+q9FUi73LLFUZ2fD8A61g2mT1UY9k7b99+V1xZ41Rz4SHRQ==} + engines: {node: '>=18.0.0'} + + '@smithy/hash-node@4.2.11': + resolution: {integrity: sha512-T+p1pNynRkydpdL015ruIoyPSRw9e/SQOWmSAMmmprfswMrd5Ow5igOWNVlvyVFZlxXqGmyH3NQwfwy8r5Jx0A==} + engines: {node: '>=18.0.0'} + + '@smithy/hash-stream-node@4.2.11': + resolution: {integrity: sha512-hQsTjwPCRY8w9GK07w1RqJi3e+myh0UaOWBBhZ1UMSDgofH/Q1fEYzU1teaX6HkpX/eWDdm7tAGR0jBPlz9QEQ==} + engines: {node: '>=18.0.0'} + + '@smithy/invalid-dependency@4.2.11': + resolution: {integrity: sha512-cGNMrgykRmddrNhYy1yBdrp5GwIgEkniS7k9O1VLB38yxQtlvrxpZtUVvo6T4cKpeZsriukBuuxfJcdZQc/f/g==} + engines: {node: '>=18.0.0'} + + '@smithy/is-array-buffer@2.2.0': + resolution: {integrity: sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA==} + engines: {node: '>=14.0.0'} + + '@smithy/is-array-buffer@4.2.2': + resolution: {integrity: sha512-n6rQ4N8Jj4YTQO3YFrlgZuwKodf4zUFs7EJIWH86pSCWBaAtAGBFfCM7Wx6D2bBJ2xqFNxGBSrUWswT3M0VJow==} + engines: {node: '>=18.0.0'} + + '@smithy/md5-js@4.2.11': + resolution: {integrity: sha512-350X4kGIrty0Snx2OWv7rPM6p6vM7RzryvFs6B/56Cux3w3sChOb3bymo5oidXJlPcP9fIRxGUCk7GqpiSOtng==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-content-length@4.2.11': + resolution: {integrity: sha512-UvIfKYAKhCzr4p6jFevPlKhQwyQwlJ6IeKLDhmV1PlYfcW3RL4ROjNEDtSik4NYMi9kDkH7eSwyTP3vNJ/u/Dw==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-endpoint@4.4.23': + resolution: {integrity: sha512-UEFIejZy54T1EJn2aWJ45voB7RP2T+IRzUqocIdM6GFFa5ClZncakYJfcYnoXt3UsQrZZ9ZRauGm77l9UCbBLw==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-retry@4.4.40': + resolution: {integrity: sha512-YhEMakG1Ae57FajERdHNZ4ShOPIY7DsgV+ZoAxo/5BT0KIe+f6DDU2rtIymNNFIj22NJfeeI6LWIifrwM0f+rA==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-serde@4.2.12': + resolution: {integrity: sha512-W9g1bOLui7Xn5FABRVS0o3rXL0gfN37d/8I/W7i0N7oxjx9QecUmXEMSUMADTODwdtka9cN43t5BI2CodLJpng==} + engines: {node: '>=18.0.0'} + + '@smithy/middleware-stack@4.2.11': + resolution: {integrity: sha512-s+eenEPW6RgliDk2IhjD2hWOxIx1NKrOHxEwNUaUXxYBxIyCcDfNULZ2Mu15E3kwcJWBedTET/kEASPV1A1Akg==} + engines: {node: '>=18.0.0'} + + '@smithy/node-config-provider@4.3.11': + resolution: {integrity: sha512-xD17eE7kaLgBBGf5CZQ58hh2YmwK1Z0O8YhffwB/De2jsL0U3JklmhVYJ9Uf37OtUDLF2gsW40Xwwag9U869Gg==} + engines: {node: '>=18.0.0'} + + '@smithy/node-http-handler@4.4.14': + resolution: {integrity: sha512-DamSqaU8nuk0xTJDrYnRzZndHwwRnyj/n/+RqGGCcBKB4qrQem0mSDiWdupaNWdwxzyMU91qxDmHOCazfhtO3A==} + engines: {node: '>=18.0.0'} + + '@smithy/property-provider@4.2.11': + resolution: {integrity: sha512-14T1V64o6/ndyrnl1ze1ZhyLzIeYNN47oF/QU6P5m82AEtyOkMJTb0gO1dPubYjyyKuPD6OSVMPDKe+zioOnCg==} + engines: {node: '>=18.0.0'} + + '@smithy/protocol-http@5.3.11': + resolution: {integrity: sha512-hI+barOVDJBkNt4y0L2mu3Ugc0w7+BpJ2CZuLwXtSltGAAwCb3IvnalGlbDV/UCS6a9ZuT3+exd1WxNdLb5IlQ==} + engines: {node: '>=18.0.0'} + + '@smithy/querystring-builder@4.2.11': + resolution: {integrity: sha512-7spdikrYiljpket6u0up2Ck2mxhy7dZ0+TDd+S53Dg2DHd6wg+YNJrTCHiLdgZmEXZKI7LJZcwL3721ZRDFiqA==} + engines: {node: '>=18.0.0'} + + '@smithy/querystring-parser@4.2.11': + resolution: {integrity: sha512-nE3IRNjDltvGcoThD2abTozI1dkSy8aX+a2N1Rs55en5UsdyyIXgGEmevUL3okZFoJC77JgRGe99xYohhsjivQ==} + engines: {node: '>=18.0.0'} + + '@smithy/service-error-classification@4.2.11': + resolution: {integrity: sha512-HkMFJZJUhzU3HvND1+Yw/kYWXp4RPDLBWLcK1n+Vqw8xn4y2YiBhdww8IxhkQjP/QlZun5bwm3vcHc8AqIU3zw==} + engines: {node: '>=18.0.0'} + + '@smithy/shared-ini-file-loader@4.4.6': + resolution: {integrity: sha512-IB/M5I8G0EeXZTHsAxpx51tMQ5R719F3aq+fjEB6VtNcCHDc0ajFDIGDZw+FW9GxtEkgTduiPpjveJdA/CX7sw==} + engines: {node: '>=18.0.0'} + + '@smithy/signature-v4@5.3.11': + resolution: {integrity: sha512-V1L6N9aKOBAN4wEHLyqjLBnAz13mtILU0SeDrjOaIZEeN6IFa6DxwRt1NNpOdmSpQUfkBj0qeD3m6P77uzMhgQ==} + engines: {node: '>=18.0.0'} + + '@smithy/smithy-client@4.12.3': + resolution: {integrity: sha512-7k4UxjSpHmPN2AxVhvIazRSzFQjWnud3sOsXcFStzagww17j1cFQYqTSiQ8xuYK3vKLR1Ni8FzuT3VlKr3xCNw==} + engines: {node: '>=18.0.0'} + + '@smithy/types@4.13.0': + resolution: {integrity: sha512-COuLsZILbbQsdrwKQpkkpyep7lCsByxwj7m0Mg5v66/ZTyenlfBc40/QFQ5chO0YN/PNEH1Bi3fGtfXPnYNeDw==} + engines: {node: '>=18.0.0'} + + '@smithy/url-parser@4.2.11': + resolution: {integrity: sha512-oTAGGHo8ZYc5VZsBREzuf5lf2pAurJQsccMusVZ85wDkX66ojEc/XauiGjzCj50A61ObFTPe6d7Pyt6UBYaing==} + engines: {node: '>=18.0.0'} + + '@smithy/util-base64@4.3.2': + resolution: {integrity: sha512-XRH6b0H/5A3SgblmMa5ErXQ2XKhfbQB+Fm/oyLZ2O2kCUrwgg55bU0RekmzAhuwOjA9qdN5VU2BprOvGGUkOOQ==} + engines: {node: '>=18.0.0'} + + '@smithy/util-body-length-browser@4.2.2': + resolution: {integrity: sha512-JKCrLNOup3OOgmzeaKQwi4ZCTWlYR5H4Gm1r2uTMVBXoemo1UEghk5vtMi1xSu2ymgKVGW631e2fp9/R610ZjQ==} + engines: {node: '>=18.0.0'} + + '@smithy/util-body-length-node@4.2.3': + resolution: {integrity: sha512-ZkJGvqBzMHVHE7r/hcuCxlTY8pQr1kMtdsVPs7ex4mMU+EAbcXppfo5NmyxMYi2XU49eqaz56j2gsk4dHHPG/g==} + engines: {node: '>=18.0.0'} + + '@smithy/util-buffer-from@2.2.0': + resolution: {integrity: sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA==} + engines: {node: '>=14.0.0'} + + '@smithy/util-buffer-from@4.2.2': + resolution: {integrity: sha512-FDXD7cvUoFWwN6vtQfEta540Y/YBe5JneK3SoZg9bThSoOAC/eGeYEua6RkBgKjGa/sz6Y+DuBZj3+YEY21y4Q==} + engines: {node: '>=18.0.0'} + + '@smithy/util-config-provider@4.2.2': + resolution: {integrity: sha512-dWU03V3XUprJwaUIFVv4iOnS1FC9HnMHDfUrlNDSh4315v0cWyaIErP8KiqGVbf5z+JupoVpNM7ZB3jFiTejvQ==} + engines: {node: '>=18.0.0'} + + '@smithy/util-defaults-mode-browser@4.3.39': + resolution: {integrity: sha512-ui7/Ho/+VHqS7Km2wBw4/Ab4RktoiSshgcgpJzC4keFPs6tLJS4IQwbeahxQS3E/w98uq6E1mirCH/id9xIXeQ==} + engines: {node: '>=18.0.0'} + + '@smithy/util-defaults-mode-node@4.2.42': + resolution: {integrity: sha512-QDA84CWNe8Akpj15ofLO+1N3Rfg8qa2K5uX0y6HnOp4AnRYRgWrKx/xzbYNbVF9ZsyJUYOfcoaN3y93wA/QJ2A==} + engines: {node: '>=18.0.0'} + + '@smithy/util-endpoints@3.3.2': + resolution: {integrity: sha512-+4HFLpE5u29AbFlTdlKIT7jfOzZ8PDYZKTb3e+AgLz986OYwqTourQ5H+jg79/66DB69Un1+qKecLnkZdAsYcA==} + engines: {node: '>=18.0.0'} + + '@smithy/util-hex-encoding@4.2.2': + resolution: {integrity: sha512-Qcz3W5vuHK4sLQdyT93k/rfrUwdJ8/HZ+nMUOyGdpeGA1Wxt65zYwi3oEl9kOM+RswvYq90fzkNDahPS8K0OIg==} + engines: {node: '>=18.0.0'} + + '@smithy/util-middleware@4.2.11': + resolution: {integrity: sha512-r3dtF9F+TpSZUxpOVVtPfk09Rlo4lT6ORBqEvX3IBT6SkQAdDSVKR5GcfmZbtl7WKhKnmb3wbDTQ6ibR2XHClw==} + engines: {node: '>=18.0.0'} + + '@smithy/util-retry@4.2.11': + resolution: {integrity: sha512-XSZULmL5x6aCTTii59wJqKsY1l3eMIAomRAccW7Tzh9r8s7T/7rdo03oektuH5jeYRlJMPcNP92EuRDvk9aXbw==} + engines: {node: '>=18.0.0'} + + '@smithy/util-stream@4.5.17': + resolution: {integrity: sha512-793BYZ4h2JAQkNHcEnyFxDTcZbm9bVybD0UV/LEWmZ5bkTms7JqjfrLMi2Qy0E5WFcCzLwCAPgcvcvxoeALbAQ==} + engines: {node: '>=18.0.0'} + + '@smithy/util-uri-escape@4.2.2': + resolution: {integrity: sha512-2kAStBlvq+lTXHyAZYfJRb/DfS3rsinLiwb+69SstC9Vb0s9vNWkRwpnj918Pfi85mzi42sOqdV72OLxWAISnw==} + engines: {node: '>=18.0.0'} + + '@smithy/util-utf8@2.3.0': + resolution: {integrity: sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A==} + engines: {node: '>=14.0.0'} + + '@smithy/util-utf8@4.2.2': + resolution: {integrity: sha512-75MeYpjdWRe8M5E3AW0O4Cx3UadweS+cwdXjwYGBW5h/gxxnbeZ877sLPX/ZJA9GVTlL/qG0dXP29JWFCD1Ayw==} + engines: {node: '>=18.0.0'} + + '@smithy/util-waiter@4.2.11': + resolution: {integrity: sha512-x7Rh2azQPs3XxbvCzcttRErKKvLnbZfqRf/gOjw2pb+ZscX88e5UkRPCB67bVnsFHxayvMvmePfKTqsRb+is1A==} + engines: {node: '>=18.0.0'} + + '@smithy/uuid@1.1.2': + resolution: {integrity: sha512-O/IEdcCUKkubz60tFbGA7ceITTAJsty+lBjNoorP4Z6XRqaFb/OjQjZODophEcuq68nKm6/0r+6/lLQ+XVpk8g==} + engines: {node: '>=18.0.0'} + '@styled-system/background@5.1.2': resolution: {integrity: sha512-jtwH2C/U6ssuGSvwTN3ri/IyjdHb8W9X/g8Y0JLcrH02G+BW3OS8kZdHphF1/YyRklnrKrBT2ngwGUK6aqqV3A==} @@ -1157,6 +1677,9 @@ packages: base-64@1.0.0: resolution: {integrity: sha512-kwDPIFCGx0NZHog36dj+tHiwP4QMzsZ3AgMViUBKI0+V5n4U0ufTCUMhnQ04diaRI8EX/QcPfql7zlhZ7j4zgg==} + base64-js@1.5.1: + resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} + baseline-browser-mapping@2.9.11: resolution: {integrity: sha512-Sg0xJUNDU1sJNGdfGWhVHX0kkZ+HWcvmVymJbj6NSgZZmW/8S9Y2HQ5euytnIgakgxN6papOAWiwDo1ctFDcoQ==} hasBin: true @@ -1179,6 +1702,9 @@ packages: boolbase@1.0.0: resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} + bowser@2.14.1: + resolution: {integrity: sha512-tzPjzCxygAKWFOJP011oxFHs57HzIhOEracIgAePE4pqB3LikALKnSzUyU4MGs9/iCEUuHlAJTjTc5M+u7YEGg==} + brace-expansion@1.1.12: resolution: {integrity: sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==} @@ -1204,6 +1730,9 @@ packages: buffer-from@1.1.2: resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} + buffer@5.6.0: + resolution: {integrity: sha512-/gDYp/UtU0eA1ys8bOs9J6a+E/KWIY+DZ+Q2WESNUA0jFRsJOc0SNUO6xJ5SGA1xueg3NL65W6s+NY5l9cunuw==} + bytes@3.1.0: resolution: {integrity: sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==} engines: {node: '>= 0.8'} @@ -1303,6 +1832,13 @@ packages: color-name@1.1.4: resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + color-string@1.9.1: + resolution: {integrity: sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==} + + color@4.2.3: + resolution: {integrity: sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==} + engines: {node: '>=12.5.0'} + combined-stream@1.0.8: resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} engines: {node: '>= 0.8'} @@ -1441,6 +1977,10 @@ packages: resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==} engines: {node: '>= 0.8'} + detect-libc@2.1.2: + resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==} + engines: {node: '>=8'} + detect-newline@3.1.0: resolution: {integrity: sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==} engines: {node: '>=8'} @@ -1655,6 +2195,10 @@ packages: resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} engines: {node: '>= 0.6'} + events@3.3.0: + resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} + engines: {node: '>=0.8.x'} + execa@5.1.1: resolution: {integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==} engines: {node: '>=10'} @@ -1687,6 +2231,13 @@ packages: fast-levenshtein@2.0.6: resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + fast-xml-builder@1.0.0: + resolution: {integrity: sha512-fpZuDogrAgnyt9oDDz+5DBz0zgPdPZz6D4IR7iESxRXElrlGTRkHJ9eEt+SACRJwT0FNFrt71DFQIUFBJfX/uQ==} + + fast-xml-parser@5.4.1: + resolution: {integrity: sha512-BQ30U1mKkvXQXXkAGcuyUA/GA26oEB7NzOtsxCDtyu62sjGw5QraKFhx2Em3WQNjPw9PG6MQ9yuIIgkSDfGu5A==} + hasBin: true + fb-watchman@2.0.2: resolution: {integrity: sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==} @@ -1941,6 +2492,9 @@ packages: resolution: {integrity: sha512-2Tth85cXwGFHfvRgZWszZSvdo+0Xsqmw8k8ZwxScfcBneNUraK+dxRxRm24nszx80Y0TVio8kKLt5sLE7ZCLlw==} engines: {node: '>=0.10.0'} + ieee754@1.2.1: + resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} + ignore@5.3.2: resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} engines: {node: '>= 4'} @@ -1982,6 +2536,9 @@ packages: is-arrayish@0.2.1: resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} + is-arrayish@0.3.4: + resolution: {integrity: sha512-m6UrgzFVUYawGBh1dUsWR5M2Clqic9RVXC/9f8ceNlv2IcO9j9J/z8UoCLPqtsPBFNzEpfR3xftohbfqDx8EQA==} + is-binary-path@2.1.0: resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} engines: {node: '>=8'} @@ -2289,6 +2846,10 @@ packages: resolution: {integrity: sha512-vFrFJkWtJvJnD5hg+hJvVE8Lh/TcMzKnTgCWmtBipwI5yLX/iX+5UB2tfuyODF5E7k9xEzMdYgGqaSb1c0c5Yw==} engines: {node: 20 || >=22} + lru-cache@11.2.6: + resolution: {integrity: sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==} + engines: {node: 20 || >=22} + lru-cache@5.1.1: resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} @@ -2670,15 +3231,24 @@ packages: pg-cache@2.1.0: resolution: {integrity: sha512-r3cMPc62l2EHZwbCPS20X0gJPp/wjz66wknN38eiTYzQE7CShXHGAKbS96xDvWxVAcDGEDhiRJrx5eV1Qu+sUA==} + pg-cache@3.1.1: + resolution: {integrity: sha512-ulDWLOP9qd33CtBaUudCXFQH8ceDrUVY0LjuTZcN6DcmgEvVyAYu//6q6HtqrfX2nb9Srh8bwNBNfJb3haoqxg==} + pg-cloudflare@1.3.0: resolution: {integrity: sha512-6lswVVSztmHiRtD6I8hw4qP/nDm1EJbKMRhf3HCYaqud7frGysPv7FYJ5noZQdhQtN2xJnimfMtvQq21pdbzyQ==} pg-connection-string@2.11.0: resolution: {integrity: sha512-kecgoJwhOpxYU21rZjULrmrBJ698U2RxXofKVzOn5UDj61BPj/qMb7diYUR1nLScCDbrztQFl1TaQZT0t1EtzQ==} + pg-connection-string@2.12.0: + resolution: {integrity: sha512-U7qg+bpswf3Cs5xLzRqbXbQl85ng0mfSV/J0nnA31MCLgvEaAo7CIhmeyrmJpOr7o+zm0rXK+hNnT5l9RHkCkQ==} + pg-env@1.4.0: resolution: {integrity: sha512-Xl56AT5Gs/38ubNXSekW02n9USfA+UkIrsl/T0jhES/oKLQccsWPYm+tPeXHc0asdwnFhWxoqbDr2K1vvMv5mA==} + pg-env@1.5.0: + resolution: {integrity: sha512-VHtDiIj5ha8+m0WowxOPuKfPqm4srt+/VOFhFdyqXwSpsXu0TKFmkWrmzsypveUXtsASVlCFa7MDWSgezCyExQ==} + pg-int8@1.0.1: resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==} engines: {node: '>=4.0.0'} @@ -2688,9 +3258,17 @@ packages: peerDependencies: pg: '>=8.0' + pg-pool@3.13.0: + resolution: {integrity: sha512-gB+R+Xud1gLFuRD/QgOIgGOBE2KCQPaPwkzBBGC9oG69pHTkhQeIuejVIk3/cnDyX39av2AxomQiyPT13WKHQA==} + peerDependencies: + pg: '>=8.0' + pg-protocol@1.11.0: resolution: {integrity: sha512-pfsxk2M9M3BuGgDOfuy37VNRRX3jmKgMjcvAcWqNDpZSf4cUmv8HSOl5ViRQFsfARFn0KuUQTgLxVMbNq5NW3g==} + pg-protocol@1.13.0: + resolution: {integrity: sha512-zzdvXfS6v89r6v7OcFCHfHlyG/wvry1ALxZo4LqgUoy7W9xhBDMaqOuMiF3qEV45VqsN6rdlcehHrfDtlCPc8w==} + pg-types@2.2.0: resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==} engines: {node: '>=4'} @@ -2704,6 +3282,15 @@ packages: pg-native: optional: true + pg@8.20.0: + resolution: {integrity: sha512-ldhMxz2r8fl/6QkXnBD3CR9/xg694oT6DZQ2s6c/RI28OjtSOpxnPrUCGOBJ46RCUxcWdx3p6kw/xnDHjKvaRA==} + engines: {node: '>= 16.0.0'} + peerDependencies: + pg-native: '>=3.0.1' + peerDependenciesMeta: + pg-native: + optional: true + pgpass@1.0.5: resolution: {integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==} @@ -2909,6 +3496,10 @@ packages: shallowequal@1.1.0: resolution: {integrity: sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==} + sharp@0.33.5: + resolution: {integrity: sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + shebang-command@2.0.0: resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} engines: {node: '>=8'} @@ -2943,6 +3534,9 @@ packages: simple-smtp-server@0.3.0: resolution: {integrity: sha512-5jKWG+G/Jl1I3pxquaAO6GZrQQmXE2e/9vNGWKECinWVBWvx/Hv8Y7oKjneCboN/+aXvFG9E0PSD8Fh84GjSog==} + simple-swizzle@0.2.4: + resolution: {integrity: sha512-nAu1WFPQSMNr2Zn9PGSZK9AGn4t/y97lEm+MXTtUDwfP0ksAIX4nO+6ruD9Jwut4C49SB1Ws+fbXsm/yScWOHw==} + slash@3.0.0: resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} engines: {node: '>=8'} @@ -2984,6 +3578,9 @@ packages: resolution: {integrity: sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==} engines: {node: '>= 0.8'} + stream-browserify@3.0.0: + resolution: {integrity: sha512-H73RAHsVBapbim0tU2JwwOiXUj+fikfiaoYAKHF3VJfA0pe2BCzkhAHBlLG6REzE+2WNZcxOXjK7lkso+9euLA==} + string-length@4.0.2: resolution: {integrity: sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==} engines: {node: '>=10'} @@ -3019,6 +3616,9 @@ packages: resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} engines: {node: '>=8'} + strnum@2.2.0: + resolution: {integrity: sha512-Y7Bj8XyJxnPAORMZj/xltsfo55uOiyHcU2tnAVzHUnSJR/KsEX+9RoDeXEnsXtl/CX4fAcrt64gZ13aGaWPeBg==} + styled-components@5.3.11: resolution: {integrity: sha512-uuzIIfnVkagcVHv9nE0VPlHPSCmXIUGKfJ42LNjxCCTDTL5sgnJ8Z7GZBq0EnLYGln77tPpEpExt2+qa+cZqSw==} engines: {node: '>=10'} @@ -3319,6 +3919,460 @@ snapshots: dependencies: envalid: 8.1.1 + '@aws-crypto/crc32@5.2.0': + dependencies: + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.973.5 + tslib: 2.8.1 + + '@aws-crypto/crc32c@5.2.0': + dependencies: + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.973.5 + tslib: 2.8.1 + + '@aws-crypto/sha1-browser@5.2.0': + dependencies: + '@aws-crypto/supports-web-crypto': 5.2.0 + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-locate-window': 3.965.5 + '@smithy/util-utf8': 2.3.0 + tslib: 2.8.1 + + '@aws-crypto/sha256-browser@5.2.0': + dependencies: + '@aws-crypto/sha256-js': 5.2.0 + '@aws-crypto/supports-web-crypto': 5.2.0 + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-locate-window': 3.965.5 + '@smithy/util-utf8': 2.3.0 + tslib: 2.8.1 + + '@aws-crypto/sha256-js@5.2.0': + dependencies: + '@aws-crypto/util': 5.2.0 + '@aws-sdk/types': 3.973.5 + tslib: 2.8.1 + + '@aws-crypto/supports-web-crypto@5.2.0': + dependencies: + tslib: 2.8.1 + + '@aws-crypto/util@5.2.0': + dependencies: + '@aws-sdk/types': 3.973.5 + '@smithy/util-utf8': 2.3.0 + tslib: 2.8.1 + + '@aws-sdk/client-s3@3.1004.0': + dependencies: + '@aws-crypto/sha1-browser': 5.2.0 + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/core': 3.973.18 + '@aws-sdk/credential-provider-node': 3.972.18 + '@aws-sdk/middleware-bucket-endpoint': 3.972.7 + '@aws-sdk/middleware-expect-continue': 3.972.7 + '@aws-sdk/middleware-flexible-checksums': 3.973.4 + '@aws-sdk/middleware-host-header': 3.972.7 + '@aws-sdk/middleware-location-constraint': 3.972.7 + '@aws-sdk/middleware-logger': 3.972.7 + '@aws-sdk/middleware-recursion-detection': 3.972.7 + '@aws-sdk/middleware-sdk-s3': 3.972.18 + '@aws-sdk/middleware-ssec': 3.972.7 + '@aws-sdk/middleware-user-agent': 3.972.19 + '@aws-sdk/region-config-resolver': 3.972.7 + '@aws-sdk/signature-v4-multi-region': 3.996.6 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-endpoints': 3.996.4 + '@aws-sdk/util-user-agent-browser': 3.972.7 + '@aws-sdk/util-user-agent-node': 3.973.4 + '@smithy/config-resolver': 4.4.10 + '@smithy/core': 3.23.9 + '@smithy/eventstream-serde-browser': 4.2.11 + '@smithy/eventstream-serde-config-resolver': 4.3.11 + '@smithy/eventstream-serde-node': 4.2.11 + '@smithy/fetch-http-handler': 5.3.13 + '@smithy/hash-blob-browser': 4.2.12 + '@smithy/hash-node': 4.2.11 + '@smithy/hash-stream-node': 4.2.11 + '@smithy/invalid-dependency': 4.2.11 + '@smithy/md5-js': 4.2.11 + '@smithy/middleware-content-length': 4.2.11 + '@smithy/middleware-endpoint': 4.4.23 + '@smithy/middleware-retry': 4.4.40 + '@smithy/middleware-serde': 4.2.12 + '@smithy/middleware-stack': 4.2.11 + '@smithy/node-config-provider': 4.3.11 + '@smithy/node-http-handler': 4.4.14 + '@smithy/protocol-http': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/url-parser': 4.2.11 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-body-length-node': 4.2.3 + '@smithy/util-defaults-mode-browser': 4.3.39 + '@smithy/util-defaults-mode-node': 4.2.42 + '@smithy/util-endpoints': 3.3.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-retry': 4.2.11 + '@smithy/util-stream': 4.5.17 + '@smithy/util-utf8': 4.2.2 + '@smithy/util-waiter': 4.2.11 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/core@3.973.18': + dependencies: + '@aws-sdk/types': 3.973.5 + '@aws-sdk/xml-builder': 3.972.10 + '@smithy/core': 3.23.9 + '@smithy/node-config-provider': 4.3.11 + '@smithy/property-provider': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/signature-v4': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@aws-sdk/crc64-nvme@3.972.4': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/credential-provider-env@3.972.16': + dependencies: + '@aws-sdk/core': 3.973.18 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/credential-provider-http@3.972.18': + dependencies: + '@aws-sdk/core': 3.973.18 + '@aws-sdk/types': 3.973.5 + '@smithy/fetch-http-handler': 5.3.13 + '@smithy/node-http-handler': 4.4.14 + '@smithy/property-provider': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/util-stream': 4.5.17 + tslib: 2.8.1 + + '@aws-sdk/credential-provider-ini@3.972.17': + dependencies: + '@aws-sdk/core': 3.973.18 + '@aws-sdk/credential-provider-env': 3.972.16 + '@aws-sdk/credential-provider-http': 3.972.18 + '@aws-sdk/credential-provider-login': 3.972.17 + '@aws-sdk/credential-provider-process': 3.972.16 + '@aws-sdk/credential-provider-sso': 3.972.17 + '@aws-sdk/credential-provider-web-identity': 3.972.17 + '@aws-sdk/nested-clients': 3.996.7 + '@aws-sdk/types': 3.973.5 + '@smithy/credential-provider-imds': 4.2.11 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/credential-provider-login@3.972.17': + dependencies: + '@aws-sdk/core': 3.973.18 + '@aws-sdk/nested-clients': 3.996.7 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/credential-provider-node@3.972.18': + dependencies: + '@aws-sdk/credential-provider-env': 3.972.16 + '@aws-sdk/credential-provider-http': 3.972.18 + '@aws-sdk/credential-provider-ini': 3.972.17 + '@aws-sdk/credential-provider-process': 3.972.16 + '@aws-sdk/credential-provider-sso': 3.972.17 + '@aws-sdk/credential-provider-web-identity': 3.972.17 + '@aws-sdk/types': 3.973.5 + '@smithy/credential-provider-imds': 4.2.11 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/credential-provider-process@3.972.16': + dependencies: + '@aws-sdk/core': 3.973.18 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/credential-provider-sso@3.972.17': + dependencies: + '@aws-sdk/core': 3.973.18 + '@aws-sdk/nested-clients': 3.996.7 + '@aws-sdk/token-providers': 3.1004.0 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/credential-provider-web-identity@3.972.17': + dependencies: + '@aws-sdk/core': 3.973.18 + '@aws-sdk/nested-clients': 3.996.7 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/lib-storage@3.1004.0(@aws-sdk/client-s3@3.1004.0)': + dependencies: + '@aws-sdk/client-s3': 3.1004.0 + '@smithy/abort-controller': 4.2.11 + '@smithy/middleware-endpoint': 4.4.23 + '@smithy/smithy-client': 4.12.3 + buffer: 5.6.0 + events: 3.3.0 + stream-browserify: 3.0.0 + tslib: 2.8.1 + + '@aws-sdk/middleware-bucket-endpoint@3.972.7': + dependencies: + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-arn-parser': 3.972.3 + '@smithy/node-config-provider': 4.3.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-config-provider': 4.2.2 + tslib: 2.8.1 + + '@aws-sdk/middleware-expect-continue@3.972.7': + dependencies: + '@aws-sdk/types': 3.973.5 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/middleware-flexible-checksums@3.973.4': + dependencies: + '@aws-crypto/crc32': 5.2.0 + '@aws-crypto/crc32c': 5.2.0 + '@aws-crypto/util': 5.2.0 + '@aws-sdk/core': 3.973.18 + '@aws-sdk/crc64-nvme': 3.972.4 + '@aws-sdk/types': 3.973.5 + '@smithy/is-array-buffer': 4.2.2 + '@smithy/node-config-provider': 4.3.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-stream': 4.5.17 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@aws-sdk/middleware-host-header@3.972.7': + dependencies: + '@aws-sdk/types': 3.973.5 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/middleware-location-constraint@3.972.7': + dependencies: + '@aws-sdk/types': 3.973.5 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/middleware-logger@3.972.7': + dependencies: + '@aws-sdk/types': 3.973.5 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/middleware-recursion-detection@3.972.7': + dependencies: + '@aws-sdk/types': 3.973.5 + '@aws/lambda-invoke-store': 0.2.3 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/middleware-sdk-s3@3.972.18': + dependencies: + '@aws-sdk/core': 3.973.18 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-arn-parser': 3.972.3 + '@smithy/core': 3.23.9 + '@smithy/node-config-provider': 4.3.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/signature-v4': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/util-config-provider': 4.2.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-stream': 4.5.17 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@aws-sdk/middleware-ssec@3.972.7': + dependencies: + '@aws-sdk/types': 3.973.5 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/middleware-user-agent@3.972.19': + dependencies: + '@aws-sdk/core': 3.973.18 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-endpoints': 3.996.4 + '@smithy/core': 3.23.9 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-retry': 4.2.11 + tslib: 2.8.1 + + '@aws-sdk/nested-clients@3.996.7': + dependencies: + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/core': 3.973.18 + '@aws-sdk/middleware-host-header': 3.972.7 + '@aws-sdk/middleware-logger': 3.972.7 + '@aws-sdk/middleware-recursion-detection': 3.972.7 + '@aws-sdk/middleware-user-agent': 3.972.19 + '@aws-sdk/region-config-resolver': 3.972.7 + '@aws-sdk/types': 3.973.5 + '@aws-sdk/util-endpoints': 3.996.4 + '@aws-sdk/util-user-agent-browser': 3.972.7 + '@aws-sdk/util-user-agent-node': 3.973.4 + '@smithy/config-resolver': 4.4.10 + '@smithy/core': 3.23.9 + '@smithy/fetch-http-handler': 5.3.13 + '@smithy/hash-node': 4.2.11 + '@smithy/invalid-dependency': 4.2.11 + '@smithy/middleware-content-length': 4.2.11 + '@smithy/middleware-endpoint': 4.4.23 + '@smithy/middleware-retry': 4.4.40 + '@smithy/middleware-serde': 4.2.12 + '@smithy/middleware-stack': 4.2.11 + '@smithy/node-config-provider': 4.3.11 + '@smithy/node-http-handler': 4.4.14 + '@smithy/protocol-http': 5.3.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/url-parser': 4.2.11 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-body-length-node': 4.2.3 + '@smithy/util-defaults-mode-browser': 4.3.39 + '@smithy/util-defaults-mode-node': 4.2.42 + '@smithy/util-endpoints': 3.3.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-retry': 4.2.11 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/region-config-resolver@3.972.7': + dependencies: + '@aws-sdk/types': 3.973.5 + '@smithy/config-resolver': 4.4.10 + '@smithy/node-config-provider': 4.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/signature-v4-multi-region@3.996.6': + dependencies: + '@aws-sdk/middleware-sdk-s3': 3.972.18 + '@aws-sdk/types': 3.973.5 + '@smithy/protocol-http': 5.3.11 + '@smithy/signature-v4': 5.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/token-providers@3.1004.0': + dependencies: + '@aws-sdk/core': 3.973.18 + '@aws-sdk/nested-clients': 3.996.7 + '@aws-sdk/types': 3.973.5 + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + + '@aws-sdk/types@3.973.5': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/util-arn-parser@3.972.3': + dependencies: + tslib: 2.8.1 + + '@aws-sdk/util-endpoints@3.996.4': + dependencies: + '@aws-sdk/types': 3.973.5 + '@smithy/types': 4.13.0 + '@smithy/url-parser': 4.2.11 + '@smithy/util-endpoints': 3.3.2 + tslib: 2.8.1 + + '@aws-sdk/util-locate-window@3.965.5': + dependencies: + tslib: 2.8.1 + + '@aws-sdk/util-user-agent-browser@3.972.7': + dependencies: + '@aws-sdk/types': 3.973.5 + '@smithy/types': 4.13.0 + bowser: 2.14.1 + tslib: 2.8.1 + + '@aws-sdk/util-user-agent-node@3.973.4': + dependencies: + '@aws-sdk/middleware-user-agent': 3.972.19 + '@aws-sdk/types': 3.973.5 + '@smithy/node-config-provider': 4.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@aws-sdk/xml-builder@3.972.10': + dependencies: + '@smithy/types': 4.13.0 + fast-xml-parser: 5.4.1 + tslib: 2.8.1 + + '@aws/lambda-invoke-store@0.2.3': {} + '@babel/code-frame@7.27.1': dependencies: '@babel/helper-validator-identifier': 7.28.5 @@ -3600,43 +4654,118 @@ snapshots: dependencies: '@types/json-schema': 7.0.15 - '@eslint/eslintrc@3.3.3': - dependencies: - ajv: 6.12.6 - debug: 4.4.3(supports-color@5.5.0) - espree: 10.4.0 - globals: 14.0.0 - ignore: 5.3.2 - import-fresh: 3.3.1 - js-yaml: 4.1.1 - minimatch: 3.1.2 - strip-json-comments: 3.1.1 - transitivePeerDependencies: - - supports-color + '@eslint/eslintrc@3.3.3': + dependencies: + ajv: 6.12.6 + debug: 4.4.3(supports-color@5.5.0) + espree: 10.4.0 + globals: 14.0.0 + ignore: 5.3.2 + import-fresh: 3.3.1 + js-yaml: 4.1.1 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + + '@eslint/js@9.39.2': {} + + '@eslint/object-schema@2.1.7': {} + + '@eslint/plugin-kit@0.4.1': + dependencies: + '@eslint/core': 0.17.0 + levn: 0.4.1 + + '@graphql-typed-document-node/core@3.2.0(graphql@16.12.0)': + dependencies: + graphql: 16.12.0 + + '@humanfs/core@0.19.1': {} + + '@humanfs/node@0.16.7': + dependencies: + '@humanfs/core': 0.19.1 + '@humanwhocodes/retry': 0.4.3 + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/retry@0.4.3': {} + + '@img/sharp-darwin-arm64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-darwin-arm64': 1.0.4 + optional: true + + '@img/sharp-darwin-x64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-darwin-x64': 1.0.4 + optional: true + + '@img/sharp-libvips-darwin-arm64@1.0.4': + optional: true + + '@img/sharp-libvips-darwin-x64@1.0.4': + optional: true + + '@img/sharp-libvips-linux-arm64@1.0.4': + optional: true + + '@img/sharp-libvips-linux-arm@1.0.5': + optional: true + + '@img/sharp-libvips-linux-s390x@1.0.4': + optional: true + + '@img/sharp-libvips-linux-x64@1.0.4': + optional: true + + '@img/sharp-libvips-linuxmusl-arm64@1.0.4': + optional: true + + '@img/sharp-libvips-linuxmusl-x64@1.0.4': + optional: true - '@eslint/js@9.39.2': {} + '@img/sharp-linux-arm64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linux-arm64': 1.0.4 + optional: true - '@eslint/object-schema@2.1.7': {} + '@img/sharp-linux-arm@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linux-arm': 1.0.5 + optional: true - '@eslint/plugin-kit@0.4.1': - dependencies: - '@eslint/core': 0.17.0 - levn: 0.4.1 + '@img/sharp-linux-s390x@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linux-s390x': 1.0.4 + optional: true - '@graphql-typed-document-node/core@3.2.0(graphql@16.12.0)': - dependencies: - graphql: 16.12.0 + '@img/sharp-linux-x64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linux-x64': 1.0.4 + optional: true - '@humanfs/core@0.19.1': {} + '@img/sharp-linuxmusl-arm64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-arm64': 1.0.4 + optional: true - '@humanfs/node@0.16.7': + '@img/sharp-linuxmusl-x64@0.33.5': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-x64': 1.0.4 + optional: true + + '@img/sharp-wasm32@0.33.5': dependencies: - '@humanfs/core': 0.19.1 - '@humanwhocodes/retry': 0.4.3 + '@emnapi/runtime': 1.8.1 + optional: true - '@humanwhocodes/module-importer@1.0.1': {} + '@img/sharp-win32-ia32@0.33.5': + optional: true - '@humanwhocodes/retry@0.4.3': {} + '@img/sharp-win32-x64@0.33.5': + optional: true '@isaacs/balanced-match@4.0.1': {} @@ -3908,10 +5037,18 @@ snapshots: dependencies: yanse: 0.2.1 + '@pgpmjs/logger@2.2.1': + dependencies: + yanse: 0.2.1 + '@pgpmjs/types@2.16.0': dependencies: pg-env: 1.4.0 + '@pgpmjs/types@2.17.0': + dependencies: + pg-env: 1.5.0 + '@pkgjs/parseargs@0.11.0': optional: true @@ -3927,6 +5064,344 @@ snapshots: dependencies: '@sinonjs/commons': 3.0.1 + '@smithy/abort-controller@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/chunked-blob-reader-native@4.2.3': + dependencies: + '@smithy/util-base64': 4.3.2 + tslib: 2.8.1 + + '@smithy/chunked-blob-reader@5.2.2': + dependencies: + tslib: 2.8.1 + + '@smithy/config-resolver@4.4.10': + dependencies: + '@smithy/node-config-provider': 4.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-config-provider': 4.2.2 + '@smithy/util-endpoints': 3.3.2 + '@smithy/util-middleware': 4.2.11 + tslib: 2.8.1 + + '@smithy/core@3.23.9': + dependencies: + '@smithy/middleware-serde': 4.2.12 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-stream': 4.5.17 + '@smithy/util-utf8': 4.2.2 + '@smithy/uuid': 1.1.2 + tslib: 2.8.1 + + '@smithy/credential-provider-imds@4.2.11': + dependencies: + '@smithy/node-config-provider': 4.3.11 + '@smithy/property-provider': 4.2.11 + '@smithy/types': 4.13.0 + '@smithy/url-parser': 4.2.11 + tslib: 2.8.1 + + '@smithy/eventstream-codec@4.2.11': + dependencies: + '@aws-crypto/crc32': 5.2.0 + '@smithy/types': 4.13.0 + '@smithy/util-hex-encoding': 4.2.2 + tslib: 2.8.1 + + '@smithy/eventstream-serde-browser@4.2.11': + dependencies: + '@smithy/eventstream-serde-universal': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/eventstream-serde-config-resolver@4.3.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/eventstream-serde-node@4.2.11': + dependencies: + '@smithy/eventstream-serde-universal': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/eventstream-serde-universal@4.2.11': + dependencies: + '@smithy/eventstream-codec': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/fetch-http-handler@5.3.13': + dependencies: + '@smithy/protocol-http': 5.3.11 + '@smithy/querystring-builder': 4.2.11 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + tslib: 2.8.1 + + '@smithy/hash-blob-browser@4.2.12': + dependencies: + '@smithy/chunked-blob-reader': 5.2.2 + '@smithy/chunked-blob-reader-native': 4.2.3 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/hash-node@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + '@smithy/util-buffer-from': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@smithy/hash-stream-node@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@smithy/invalid-dependency@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/is-array-buffer@2.2.0': + dependencies: + tslib: 2.8.1 + + '@smithy/is-array-buffer@4.2.2': + dependencies: + tslib: 2.8.1 + + '@smithy/md5-js@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@smithy/middleware-content-length@4.2.11': + dependencies: + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/middleware-endpoint@4.4.23': + dependencies: + '@smithy/core': 3.23.9 + '@smithy/middleware-serde': 4.2.12 + '@smithy/node-config-provider': 4.3.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + '@smithy/url-parser': 4.2.11 + '@smithy/util-middleware': 4.2.11 + tslib: 2.8.1 + + '@smithy/middleware-retry@4.4.40': + dependencies: + '@smithy/node-config-provider': 4.3.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/service-error-classification': 4.2.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-retry': 4.2.11 + '@smithy/uuid': 1.1.2 + tslib: 2.8.1 + + '@smithy/middleware-serde@4.2.12': + dependencies: + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/middleware-stack@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/node-config-provider@4.3.11': + dependencies: + '@smithy/property-provider': 4.2.11 + '@smithy/shared-ini-file-loader': 4.4.6 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/node-http-handler@4.4.14': + dependencies: + '@smithy/abort-controller': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/querystring-builder': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/property-provider@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/protocol-http@5.3.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/querystring-builder@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + '@smithy/util-uri-escape': 4.2.2 + tslib: 2.8.1 + + '@smithy/querystring-parser@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/service-error-classification@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + + '@smithy/shared-ini-file-loader@4.4.6': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/signature-v4@5.3.11': + dependencies: + '@smithy/is-array-buffer': 4.2.2 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-hex-encoding': 4.2.2 + '@smithy/util-middleware': 4.2.11 + '@smithy/util-uri-escape': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@smithy/smithy-client@4.12.3': + dependencies: + '@smithy/core': 3.23.9 + '@smithy/middleware-endpoint': 4.4.23 + '@smithy/middleware-stack': 4.2.11 + '@smithy/protocol-http': 5.3.11 + '@smithy/types': 4.13.0 + '@smithy/util-stream': 4.5.17 + tslib: 2.8.1 + + '@smithy/types@4.13.0': + dependencies: + tslib: 2.8.1 + + '@smithy/url-parser@4.2.11': + dependencies: + '@smithy/querystring-parser': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/util-base64@4.3.2': + dependencies: + '@smithy/util-buffer-from': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@smithy/util-body-length-browser@4.2.2': + dependencies: + tslib: 2.8.1 + + '@smithy/util-body-length-node@4.2.3': + dependencies: + tslib: 2.8.1 + + '@smithy/util-buffer-from@2.2.0': + dependencies: + '@smithy/is-array-buffer': 2.2.0 + tslib: 2.8.1 + + '@smithy/util-buffer-from@4.2.2': + dependencies: + '@smithy/is-array-buffer': 4.2.2 + tslib: 2.8.1 + + '@smithy/util-config-provider@4.2.2': + dependencies: + tslib: 2.8.1 + + '@smithy/util-defaults-mode-browser@4.3.39': + dependencies: + '@smithy/property-provider': 4.2.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/util-defaults-mode-node@4.2.42': + dependencies: + '@smithy/config-resolver': 4.4.10 + '@smithy/credential-provider-imds': 4.2.11 + '@smithy/node-config-provider': 4.3.11 + '@smithy/property-provider': 4.2.11 + '@smithy/smithy-client': 4.12.3 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/util-endpoints@3.3.2': + dependencies: + '@smithy/node-config-provider': 4.3.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/util-hex-encoding@4.2.2': + dependencies: + tslib: 2.8.1 + + '@smithy/util-middleware@4.2.11': + dependencies: + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/util-retry@4.2.11': + dependencies: + '@smithy/service-error-classification': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/util-stream@4.5.17': + dependencies: + '@smithy/fetch-http-handler': 5.3.13 + '@smithy/node-http-handler': 4.4.14 + '@smithy/types': 4.13.0 + '@smithy/util-base64': 4.3.2 + '@smithy/util-buffer-from': 4.2.2 + '@smithy/util-hex-encoding': 4.2.2 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + + '@smithy/util-uri-escape@4.2.2': + dependencies: + tslib: 2.8.1 + + '@smithy/util-utf8@2.3.0': + dependencies: + '@smithy/util-buffer-from': 2.2.0 + tslib: 2.8.1 + + '@smithy/util-utf8@4.2.2': + dependencies: + '@smithy/util-buffer-from': 4.2.2 + tslib: 2.8.1 + + '@smithy/util-waiter@4.2.11': + dependencies: + '@smithy/abort-controller': 4.2.11 + '@smithy/types': 4.13.0 + tslib: 2.8.1 + + '@smithy/uuid@1.1.2': + dependencies: + tslib: 2.8.1 + '@styled-system/background@5.1.2': dependencies: '@styled-system/core': 5.1.2 @@ -4393,6 +5868,8 @@ snapshots: base-64@1.0.0: {} + base64-js@1.5.1: {} + baseline-browser-mapping@2.9.11: {} bcrypt-pbkdf@1.0.2: @@ -4432,6 +5909,8 @@ snapshots: boolbase@1.0.0: {} + bowser@2.14.1: {} + brace-expansion@1.1.12: dependencies: balanced-match: 1.0.2 @@ -4463,6 +5942,11 @@ snapshots: buffer-from@1.1.2: {} + buffer@5.6.0: + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + bytes@3.1.0: {} bytes@3.1.2: {} @@ -4582,6 +6066,16 @@ snapshots: color-name@1.1.4: {} + color-string@1.9.1: + dependencies: + color-name: 1.1.4 + simple-swizzle: 0.2.4 + + color@4.2.3: + dependencies: + color-convert: 2.0.1 + color-string: 1.9.1 + combined-stream@1.0.8: dependencies: delayed-stream: 1.0.0 @@ -4693,6 +6187,8 @@ snapshots: depd@2.0.0: {} + detect-libc@2.1.2: {} + detect-newline@3.1.0: {} dom-serializer@0.1.1: @@ -4921,6 +6417,8 @@ snapshots: etag@1.8.1: {} + events@3.3.0: {} + execa@5.1.1: dependencies: cross-spawn: 7.0.6 @@ -4987,6 +6485,13 @@ snapshots: fast-levenshtein@2.0.6: {} + fast-xml-builder@1.0.0: {} + + fast-xml-parser@5.4.1: + dependencies: + fast-xml-builder: 1.0.0 + strnum: 2.2.0 + fb-watchman@2.0.2: dependencies: bser: 2.1.1 @@ -5270,6 +6775,8 @@ snapshots: dependencies: safer-buffer: 2.1.2 + ieee754@1.2.1: {} + ignore@5.3.2: {} ignore@7.0.5: {} @@ -5301,6 +6808,8 @@ snapshots: is-arrayish@0.2.1: {} + is-arrayish@0.3.4: {} + is-binary-path@2.1.0: dependencies: binary-extensions: 2.3.0 @@ -5779,6 +7288,8 @@ snapshots: lru-cache@11.2.5: {} + lru-cache@11.2.6: {} + lru-cache@5.1.1: dependencies: yallist: 3.1.1 @@ -6325,21 +7836,41 @@ snapshots: transitivePeerDependencies: - pg-native + pg-cache@3.1.1: + dependencies: + '@pgpmjs/logger': 2.2.1 + '@pgpmjs/types': 2.17.0 + lru-cache: 11.2.6 + pg: 8.20.0 + pg-env: 1.5.0 + transitivePeerDependencies: + - pg-native + pg-cloudflare@1.3.0: optional: true pg-connection-string@2.11.0: {} + pg-connection-string@2.12.0: {} + pg-env@1.4.0: {} + pg-env@1.5.0: {} + pg-int8@1.0.1: {} pg-pool@3.11.0(pg@8.17.1): dependencies: pg: 8.17.1 + pg-pool@3.13.0(pg@8.20.0): + dependencies: + pg: 8.20.0 + pg-protocol@1.11.0: {} + pg-protocol@1.13.0: {} + pg-types@2.2.0: dependencies: pg-int8: 1.0.1 @@ -6358,6 +7889,16 @@ snapshots: optionalDependencies: pg-cloudflare: 1.3.0 + pg@8.20.0: + dependencies: + pg-connection-string: 2.12.0 + pg-pool: 3.13.0(pg@8.20.0) + pg-protocol: 1.13.0 + pg-types: 2.2.0 + pgpass: 1.0.5 + optionalDependencies: + pg-cloudflare: 1.3.0 + pgpass@1.0.5: dependencies: split2: 4.2.0 @@ -6577,6 +8118,32 @@ snapshots: shallowequal@1.1.0: {} + sharp@0.33.5: + dependencies: + color: 4.2.3 + detect-libc: 2.1.2 + semver: 7.7.3 + optionalDependencies: + '@img/sharp-darwin-arm64': 0.33.5 + '@img/sharp-darwin-x64': 0.33.5 + '@img/sharp-libvips-darwin-arm64': 1.0.4 + '@img/sharp-libvips-darwin-x64': 1.0.4 + '@img/sharp-libvips-linux-arm': 1.0.5 + '@img/sharp-libvips-linux-arm64': 1.0.4 + '@img/sharp-libvips-linux-s390x': 1.0.4 + '@img/sharp-libvips-linux-x64': 1.0.4 + '@img/sharp-libvips-linuxmusl-arm64': 1.0.4 + '@img/sharp-libvips-linuxmusl-x64': 1.0.4 + '@img/sharp-linux-arm': 0.33.5 + '@img/sharp-linux-arm64': 0.33.5 + '@img/sharp-linux-s390x': 0.33.5 + '@img/sharp-linux-x64': 0.33.5 + '@img/sharp-linuxmusl-arm64': 0.33.5 + '@img/sharp-linuxmusl-x64': 0.33.5 + '@img/sharp-wasm32': 0.33.5 + '@img/sharp-win32-ia32': 0.33.5 + '@img/sharp-win32-x64': 0.33.5 + shebang-command@2.0.0: dependencies: shebang-regex: 3.0.0 @@ -6621,6 +8188,10 @@ snapshots: '@pgpmjs/types': 2.16.0 nodemailer: 6.10.1 + simple-swizzle@0.2.4: + dependencies: + is-arrayish: 0.3.4 + slash@3.0.0: {} slick@1.12.2: {} @@ -6658,6 +8229,11 @@ snapshots: statuses@2.0.2: {} + stream-browserify@3.0.0: + dependencies: + inherits: 2.0.4 + readable-stream: 3.6.2 + string-length@4.0.2: dependencies: char-regex: 1.0.2 @@ -6693,6 +8269,8 @@ snapshots: strip-json-comments@3.1.1: {} + strnum@2.2.0: {} + styled-components@5.3.11(@babel/core@7.28.5)(react-dom@16.14.0(react@16.14.0))(react-is@18.3.1)(react@16.14.0): dependencies: '@babel/helper-module-imports': 7.27.1(supports-color@5.5.0) diff --git a/scripts/dev.ts b/scripts/dev.ts index f2ed187..25dd45f 100644 --- a/scripts/dev.ts +++ b/scripts/dev.ts @@ -38,6 +38,13 @@ const sharedEnv: Record = { SMTP_PORT: '1025', LOCAL_APP_PORT: '3000', SEND_EMAIL_LINK_DRY_RUN: 'true', + // MinIO / S3 (for process-image and other upload functions) + BUCKET_PROVIDER: 'minio', + BUCKET_NAME: 'test-bucket', + AWS_ACCESS_KEY: 'minioadmin', + AWS_SECRET_KEY: 'minioadmin', + AWS_REGION: 'us-east-1', + MINIO_ENDPOINT: 'http://localhost:9000', }; // --- Process definitions --- @@ -64,6 +71,11 @@ const allProcesses: ProcessDef[] = [ script: path.resolve(ROOT, 'generated/send-email-link/dist/index.js'), port: 8082, }, + { + name: 'process-image', + script: path.resolve(ROOT, 'generated/process-image/dist/index.js'), + port: 8083, + }, ]; // --- CLI args --- @@ -78,7 +90,7 @@ function getJobServiceEnv(): Record { return { JOBS_SCHEMA: 'app_jobs', JOBS_SUPPORT_ANY: 'false', - JOBS_SUPPORTED: 'send-email-link', + JOBS_SUPPORTED: 'send-email-link,process-image', HOSTNAME: 'knative-job-service-local', INTERNAL_JOBS_CALLBACK_PORT: '8080', INTERNAL_JOBS_CALLBACK_URL: 'http://localhost:8080/callback', @@ -87,6 +99,7 @@ function getJobServiceEnv(): Record { INTERNAL_GATEWAY_DEVELOPMENT_MAP: JSON.stringify({ 'send-email-link': 'http://localhost:8082', 'simple-email': 'http://localhost:8081', + 'process-image': 'http://localhost:8083', }), }; } From e0f97e4999a79a38a22218c295acd2c7694361f1 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Mon, 9 Mar 2026 16:41:03 +0800 Subject: [PATCH 02/13] add e2e test and fixed review items --- .../__tests__/handler.e2e.test.ts | 312 ++++++++++++++++++ functions/process-image/handler.ts | 111 ++++--- package.json | 8 +- pnpm-lock.yaml | 96 ++++-- tests/__mocks__/@pgpmjs/logger.ts | 8 + 5 files changed, 457 insertions(+), 78 deletions(-) create mode 100644 functions/process-image/__tests__/handler.e2e.test.ts diff --git a/functions/process-image/__tests__/handler.e2e.test.ts b/functions/process-image/__tests__/handler.e2e.test.ts new file mode 100644 index 0000000..3518fcd --- /dev/null +++ b/functions/process-image/__tests__/handler.e2e.test.ts @@ -0,0 +1,312 @@ +/** + * E2E tests for process-image handler. + * + * Requires: Postgres on :5432, MinIO on :9000 (docker compose up -d) + * Run: pnpm test:unit -- handler.e2e + */ + +import { S3Client, PutObjectCommand, DeleteObjectCommand } from '@aws-sdk/client-s3'; +import { Client as PgClient } from 'pg'; +import sharp from 'sharp'; + +import handler from '../handler'; +import { createMockContext } from '../../../tests/helpers/mock-context'; + +// --------------------------------------------------------------------------- +// Infra helpers +// --------------------------------------------------------------------------- + +const TEST_SCHEMA = 'public'; +const TEST_TABLE = 'test_process_images'; +const BUCKET = 'test-bucket'; + +const ENV: Record = { + PGHOST: 'localhost', + PGPORT: '5432', + PGUSER: 'postgres', + PGPASSWORD: 'password', + PGDATABASE: 'constructive', + BUCKET_PROVIDER: 'minio', + BUCKET_NAME: BUCKET, + AWS_ACCESS_KEY: 'minioadmin', + AWS_SECRET_KEY: 'minioadmin', + AWS_REGION: 'us-east-1', + MINIO_ENDPOINT: 'http://localhost:9000', +}; + +function makeS3(): S3Client { + return new S3Client({ + region: 'us-east-1', + credentials: { accessKeyId: 'minioadmin', secretAccessKey: 'minioadmin' }, + endpoint: 'http://localhost:9000', + forcePathStyle: true, + }); +} + +function makePg(): PgClient { + return new PgClient({ + host: 'localhost', + port: 5432, + user: 'postgres', + password: 'password', + database: 'constructive', + }); +} + +async function generateTestImage( + width: number, + height: number, + format: 'jpeg' | 'png' = 'jpeg', +): Promise { + const raw = Buffer.alloc(width * height * 3, 0); + // paint a simple gradient so it's not blank + for (let i = 0; i < raw.length; i += 3) { + raw[i] = (i / 3) % 256; // R + raw[i + 1] = ((i / 3) >> 8) % 256; // G + raw[i + 2] = 128; // B + } + return sharp(raw, { raw: { width, height, channels: 3 } }) + [format]() + .toBuffer(); +} + +// --------------------------------------------------------------------------- +// Suite +// --------------------------------------------------------------------------- + +describe('process-image handler e2e', () => { + let pg: PgClient; + let s3: S3Client; + const s3Keys: string[] = []; + + beforeAll(async () => { + pg = makePg(); + await pg.connect(); + s3 = makeS3(); + + // Create test table + await pg.query(` + CREATE TABLE IF NOT EXISTS ${TEST_SCHEMA}.${TEST_TABLE} ( + id serial PRIMARY KEY, + image jsonb + ) + `); + }); + + afterAll(async () => { + // Cleanup S3 objects + for (const key of s3Keys) { + try { + await s3.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: key })); + } catch { /* ignore */ } + } + + // Drop test table + await pg.query(`DROP TABLE IF EXISTS ${TEST_SCHEMA}.${TEST_TABLE}`); + await pg.end(); + s3.destroy(); + + // Teardown pg-cache pools created by the handler + const { teardownPgPools } = require('pg-cache'); + if (teardownPgPools) await teardownPgPools(); + }); + + // Helper: upload image to MinIO and insert DB row, return row id + key + async function setupTestRow( + imageBuffer: Buffer, + mime: string, + filename: string, + ): Promise<{ id: number; key: string }> { + const key = `e2e-test-${Date.now()}-${filename}`; + s3Keys.push(key); + + await s3.send(new PutObjectCommand({ + Bucket: BUCKET, + Key: key, + Body: imageBuffer, + ContentType: mime, + })); + + const url = `http://localhost:9000/${BUCKET}/${key}`; + const imageValue = JSON.stringify({ url, mime, filename }); + + const res = await pg.query( + `INSERT INTO ${TEST_SCHEMA}.${TEST_TABLE} (image) VALUES ($1::jsonb) RETURNING id`, + [imageValue], + ); + return { id: res.rows[0].id, key }; + } + + function callHandler( + id: number, + envOverrides: Record = {}, + ) { + const ctx = createMockContext({ env: { ...ENV, ...envOverrides } }); + return handler( + { + schema: TEST_SCHEMA, + table: TEST_TABLE, + idFields: ['id'], + idValues: [id], + fields: ['image'], + }, + ctx as any, + ); + } + + // ----------------------------------------------------------------------- + // Test 1: Happy path — generates versions with correct MIME + // ----------------------------------------------------------------------- + + it('generates thumbnail + medium versions for an 800x600 JPEG', async () => { + const jpegBuf = await generateTestImage(800, 600, 'jpeg'); + const { id } = await setupTestRow(jpegBuf, 'image/jpeg', 'test.jpg'); + + const result: any = await callHandler(id); + + expect(result.success).toBe(true); + const imageResult = result.results.image; + expect(imageResult.versions).toBeDefined(); + expect(imageResult.versions.length).toBeGreaterThanOrEqual(1); + + // thumbnail should exist (800 > 150) + const thumb = imageResult.versions.find((v: any) => v.name === 'thumbnail'); + expect(thumb).toBeDefined(); + expect(thumb.width).toBeLessThanOrEqual(150); + expect(thumb.height).toBeLessThanOrEqual(150); + + // MIME should be derived from format, not hardcoded + expect(thumb.mime).toBe('image/jpeg'); + + // medium should exist (800 > 600) + const medium = imageResult.versions.find((v: any) => v.name === 'medium'); + expect(medium).toBeDefined(); + expect(medium.width).toBeLessThanOrEqual(600); + + // large should NOT exist (800 < 1200) + const large = imageResult.versions.find((v: any) => v.name === 'large'); + expect(large).toBeUndefined(); + + // Track version keys for cleanup + for (const v of imageResult.versions) { + s3Keys.push(v.key); + } + + // Verify DB was updated + const dbRow = await pg.query( + `SELECT image FROM ${TEST_SCHEMA}.${TEST_TABLE} WHERE id = $1`, + [id], + ); + const dbImage = dbRow.rows[0].image; + expect(dbImage.versions.length).toBe(imageResult.versions.length); + }); + + // ----------------------------------------------------------------------- + // Test 2: Idempotency — second call skips + // ----------------------------------------------------------------------- + + it('skips processing when versions already exist', async () => { + const jpegBuf = await generateTestImage(800, 600, 'jpeg'); + const { id } = await setupTestRow(jpegBuf, 'image/jpeg', 'idem.jpg'); + + // First call: process + await callHandler(id); + + // Second call: should skip + const result: any = await callHandler(id); + expect(result.success).toBe(true); + const imageResult = result.results.image; + expect(imageResult.skipped).toBe(true); + expect(imageResult.reason).toBe('versions_exist'); + }); + + // ----------------------------------------------------------------------- + // Test 3: File too large — MAX_IMAGE_SIZE guard + // ----------------------------------------------------------------------- + + it('skips files exceeding MAX_IMAGE_SIZE', async () => { + const jpegBuf = await generateTestImage(400, 300, 'jpeg'); + const { id } = await setupTestRow(jpegBuf, 'image/jpeg', 'big.jpg'); + + // Set MAX_IMAGE_SIZE absurdly low so even our small test image exceeds it + const result: any = await callHandler(id, { MAX_IMAGE_SIZE: '100' }); + + expect(result.success).toBe(true); + const imageResult = result.results.image; + expect(imageResult.skipped).toBe(true); + expect(imageResult.reason).toBe('file_too_large'); + expect(imageResult.size).toBeGreaterThan(100); + }); + + // ----------------------------------------------------------------------- + // Test 4: MIME derivation — PNG gets image/png, not image/jpeg + // ----------------------------------------------------------------------- + + it('derives MIME from detected format (PNG → image/png)', async () => { + const pngBuf = await generateTestImage(800, 600, 'png'); + const { id } = await setupTestRow(pngBuf, 'image/png', 'test.png'); + + const result: any = await callHandler(id); + + expect(result.success).toBe(true); + const imageResult = result.results.image; + expect(imageResult.versions.length).toBeGreaterThanOrEqual(1); + + // All generated versions should have image/png + for (const v of imageResult.versions) { + expect(v.mime).toBe('image/png'); + s3Keys.push(v.key); + } + }); + + // ----------------------------------------------------------------------- + // Test 5: MIME derivation — mismatched stored MIME corrected + // ----------------------------------------------------------------------- + + it('corrects wrong stored MIME using detected format', async () => { + // Upload a real JPEG but store it with wrong MIME (image/png) + const jpegBuf = await generateTestImage(800, 600, 'jpeg'); + const key = `e2e-test-${Date.now()}-wrong-mime.jpg`; + s3Keys.push(key); + + await s3.send(new PutObjectCommand({ + Bucket: BUCKET, + Key: key, + Body: jpegBuf, + ContentType: 'image/jpeg', + })); + + const url = `http://localhost:9000/${BUCKET}/${key}`; + // Deliberately store wrong MIME in DB + const imageValue = JSON.stringify({ url, mime: 'image/png', filename: 'wrong-mime.jpg' }); + const res = await pg.query( + `INSERT INTO ${TEST_SCHEMA}.${TEST_TABLE} (image) VALUES ($1::jsonb) RETURNING id`, + [imageValue], + ); + const id = res.rows[0].id; + + const result: any = await callHandler(id); + + expect(result.success).toBe(true); + const imageResult = result.results.image; + + // Versions should have image/jpeg (detected), NOT image/png (stored) + for (const v of imageResult.versions) { + expect(v.mime).toBe('image/jpeg'); + s3Keys.push(v.key); + } + }); + + // ----------------------------------------------------------------------- + // Test 6: Validation — missing params + // ----------------------------------------------------------------------- + + it('returns error for missing schema', async () => { + const ctx = createMockContext({ env: ENV }); + const result: any = await handler( + { schema: '', table: 'foo', idFields: ['id'], idValues: [1], fields: ['image'] }, + ctx as any, + ); + expect(result.error).toMatch(/Missing/); + }); +}); diff --git a/functions/process-image/handler.ts b/functions/process-image/handler.ts index cbeeac5..99d3a7f 100644 --- a/functions/process-image/handler.ts +++ b/functions/process-image/handler.ts @@ -1,6 +1,6 @@ import { DeleteObjectCommand, GetObjectCommand, S3Client } from '@aws-sdk/client-s3'; import { Upload } from '@aws-sdk/lib-storage'; -import type { FunctionHandler } from '@constructive-io/fn-runtime'; +import type { FunctionContext, FunctionHandler } from '@constructive-io/fn-runtime'; import { getPgPool } from 'pg-cache'; import { extname } from 'path'; import sharp from 'sharp'; @@ -137,7 +137,7 @@ async function streamToBuffer(stream: Readable): Promise { async function deleteS3Objects( s3: S3Client, objects: { bucket: string; key: string }[], - log: { info: (...args: unknown[]) => void; error: (...args: unknown[]) => void }, + log: FunctionContext['log'], ): Promise { for (const obj of objects) { try { @@ -265,12 +265,29 @@ const handler: FunctionHandler = async (params, context) => new GetObjectCommand({ Bucket: bucket, Key: key }), ); + const maxImageSize = Number(env.MAX_IMAGE_SIZE) || 52_428_800; // 50MB + if (response.ContentLength && response.ContentLength > maxImageSize) { + log.warn( + `[process-image] field "${field}" file too large (${response.ContentLength} bytes, max ${maxImageSize}), skipping`, + ); + results[field] = { skipped: true, reason: 'file_too_large', size: response.ContentLength }; + continue; + } + if (!(response.Body instanceof Readable)) { throw new Error(`S3 response body is not a readable stream for key=${key}`); } const originalBuffer = await streamToBuffer(response.Body); + if (originalBuffer.length > maxImageSize) { + log.warn( + `[process-image] field "${field}" buffer too large (${originalBuffer.length} bytes, max ${maxImageSize}), skipping`, + ); + results[field] = { skipped: true, reason: 'file_too_large', size: originalBuffer.length }; + continue; + } + // --- Gate: verify this is a processable image --- let metadata: sharp.Metadata; @@ -304,49 +321,63 @@ const handler: FunctionHandler = async (params, context) => // --- Generate versions --- const generatedVersions: ImageVersion[] = []; + const uploadedObjects: { bucket: string; key: string }[] = []; + + try { + for (const ver of versions) { + if (originalWidth <= ver.maxWidth && originalHeight <= ver.maxHeight) { + log.info( + `[process-image] original (${originalWidth}x${originalHeight}) fits within ${ver.name} (${ver.maxWidth}x${ver.maxHeight}), skipping`, + ); + continue; + } + + const resized = await sharp(originalBuffer) + .resize(ver.maxWidth, ver.maxHeight, { + fit: 'inside', + withoutEnlargement: true, + }) + .toBuffer({ resolveWithObject: true }); + + const vKey = deriveVersionKey(key, ver.name); + const vUrl = buildObjectUrl(env, bucket, vKey); + const mime = resized.info.format + ? `image/${resized.info.format}` + : (fieldValue.mime || 'image/jpeg'); + + const uploadResult = await new Upload({ + client: s3, + params: { + Bucket: bucket, + Key: vKey, + Body: resized.data, + ContentType: mime, + }, + }).done(); + + uploadedObjects.push({ bucket, key: vKey }); + + generatedVersions.push({ + name: ver.name, + key: vKey, + bucket, + url: uploadResult.Location || vUrl, + width: resized.info.width, + height: resized.info.height, + mime, + }); - for (const ver of versions) { - if (originalWidth <= ver.maxWidth && originalHeight <= ver.maxHeight) { log.info( - `[process-image] original (${originalWidth}x${originalHeight}) fits within ${ver.name} (${ver.maxWidth}x${ver.maxHeight}), skipping`, + `[process-image] uploaded ${ver.name}: ${resized.info.width}x${resized.info.height}`, ); - continue; } - - const resized = await sharp(originalBuffer) - .resize(ver.maxWidth, ver.maxHeight, { - fit: 'inside', - withoutEnlargement: true, - }) - .toBuffer({ resolveWithObject: true }); - - const vKey = deriveVersionKey(key, ver.name); - const vUrl = buildObjectUrl(env, bucket, vKey); - const mime = fieldValue.mime || 'image/jpeg'; - - const uploadResult = await new Upload({ - client: s3, - params: { - Bucket: bucket, - Key: vKey, - Body: resized.data, - ContentType: mime, - }, - }).done(); - - generatedVersions.push({ - name: ver.name, - key: vKey, - bucket, - url: uploadResult.Location || vUrl, - width: resized.info.width, - height: resized.info.height, - mime, - }); - - log.info( - `[process-image] uploaded ${ver.name}: ${resized.info.width}x${resized.info.height}`, + } catch (err) { + log.error( + `[process-image] version generation failed for "${field}", rolling back ${uploadedObjects.length} uploads`, + err, ); + await deleteS3Objects(s3, uploadedObjects, log); + throw err; } // --- Update database (rollback uploads on failure) --- diff --git a/package.json b/package.json index 61ff782..a433c50 100644 --- a/package.json +++ b/package.json @@ -30,19 +30,23 @@ "test:integration": "jest --testPathPatterns='tests/integration'" }, "devDependencies": { + "@aws-sdk/client-s3": "^3.1004.0", + "@aws-sdk/lib-storage": "^3.1004.0", "@eslint/js": "^9.39.2", "@types/jest": "^30.0.0", "@types/node": "^22.10.4", + "@types/pg": "^8.16.0", "eslint": "^9.39.2", "eslint-config-prettier": "^10.1.8", "eslint-plugin-simple-import-sort": "^12.1.0", "eslint-plugin-unused-imports": "^4.0.0", "globals": "^16.5.0", "jest": "^30.2.0", + "pg": "8.17.1", "prettier": "^3.7.4", + "sharp": "^0.33.5", "ts-jest": "^29.4.0", "typescript": "^5.1.6", "typescript-eslint": "^8.33.0" - }, - "dependencies": {} + } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f2368d2..9a1e190 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -8,6 +8,12 @@ importers: .: devDependencies: + '@aws-sdk/client-s3': + specifier: ^3.1004.0 + version: 3.1004.0 + '@aws-sdk/lib-storage': + specifier: ^3.1004.0 + version: 3.1004.0(@aws-sdk/client-s3@3.1004.0) '@eslint/js': specifier: ^9.39.2 version: 9.39.2 @@ -17,6 +23,9 @@ importers: '@types/node': specifier: ^22.10.4 version: 22.19.3 + '@types/pg': + specifier: ^8.16.0 + version: 8.16.0 eslint: specifier: ^9.39.2 version: 9.39.2 @@ -35,9 +44,15 @@ importers: jest: specifier: ^30.2.0 version: 30.2.0(@types/node@22.19.3) + pg: + specifier: 8.17.1 + version: 8.17.1 prettier: specifier: ^3.7.4 version: 3.7.4 + sharp: + specifier: ^0.33.5 + version: 0.33.5 ts-jest: specifier: ^29.4.0 version: 29.4.6(@babel/core@7.28.5)(@jest/transform@30.2.0)(@jest/types@30.2.0)(babel-jest@30.2.0(@babel/core@7.28.5))(jest-util@30.2.0)(jest@30.2.0(@types/node@22.19.3))(typescript@5.9.3) @@ -2361,6 +2376,7 @@ packages: glob@10.5.0: resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==} + deprecated: Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me hasBin: true glob@11.1.0: @@ -3237,9 +3253,6 @@ packages: pg-cloudflare@1.3.0: resolution: {integrity: sha512-6lswVVSztmHiRtD6I8hw4qP/nDm1EJbKMRhf3HCYaqud7frGysPv7FYJ5noZQdhQtN2xJnimfMtvQq21pdbzyQ==} - pg-connection-string@2.11.0: - resolution: {integrity: sha512-kecgoJwhOpxYU21rZjULrmrBJ698U2RxXofKVzOn5UDj61BPj/qMb7diYUR1nLScCDbrztQFl1TaQZT0t1EtzQ==} - pg-connection-string@2.12.0: resolution: {integrity: sha512-U7qg+bpswf3Cs5xLzRqbXbQl85ng0mfSV/J0nnA31MCLgvEaAo7CIhmeyrmJpOr7o+zm0rXK+hNnT5l9RHkCkQ==} @@ -3253,19 +3266,11 @@ packages: resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==} engines: {node: '>=4.0.0'} - pg-pool@3.11.0: - resolution: {integrity: sha512-MJYfvHwtGp870aeusDh+hg9apvOe2zmpZJpyt+BMtzUWlVqbhFmMK6bOBXLBUPd7iRtIF9fZplDc7KrPN3PN7w==} - peerDependencies: - pg: '>=8.0' - pg-pool@3.13.0: resolution: {integrity: sha512-gB+R+Xud1gLFuRD/QgOIgGOBE2KCQPaPwkzBBGC9oG69pHTkhQeIuejVIk3/cnDyX39av2AxomQiyPT13WKHQA==} peerDependencies: pg: '>=8.0' - pg-protocol@1.11.0: - resolution: {integrity: sha512-pfsxk2M9M3BuGgDOfuy37VNRRX3jmKgMjcvAcWqNDpZSf4cUmv8HSOl5ViRQFsfARFn0KuUQTgLxVMbNq5NW3g==} - pg-protocol@1.13.0: resolution: {integrity: sha512-zzdvXfS6v89r6v7OcFCHfHlyG/wvry1ALxZo4LqgUoy7W9xhBDMaqOuMiF3qEV45VqsN6rdlcehHrfDtlCPc8w==} @@ -4390,11 +4395,11 @@ snapshots: '@babel/helpers': 7.28.4 '@babel/parser': 7.28.5 '@babel/template': 7.27.2 - '@babel/traverse': 7.28.5(supports-color@5.5.0) + '@babel/traverse': 7.28.5 '@babel/types': 7.28.5 '@jridgewell/remapping': 2.3.5 convert-source-map: 2.0.0 - debug: 4.4.3(supports-color@5.5.0) + debug: 4.4.3 gensync: 1.0.0-beta.2 json5: 2.2.3 semver: 6.3.1 @@ -4423,6 +4428,13 @@ snapshots: '@babel/helper-globals@7.28.0': {} + '@babel/helper-module-imports@7.27.1': + dependencies: + '@babel/traverse': 7.28.5 + '@babel/types': 7.28.5 + transitivePeerDependencies: + - supports-color + '@babel/helper-module-imports@7.27.1(supports-color@5.5.0)': dependencies: '@babel/traverse': 7.28.5(supports-color@5.5.0) @@ -4433,9 +4445,9 @@ snapshots: '@babel/helper-module-transforms@7.28.3(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 - '@babel/helper-module-imports': 7.27.1(supports-color@5.5.0) + '@babel/helper-module-imports': 7.27.1 '@babel/helper-validator-identifier': 7.28.5 - '@babel/traverse': 7.28.5(supports-color@5.5.0) + '@babel/traverse': 7.28.5 transitivePeerDependencies: - supports-color @@ -4551,6 +4563,18 @@ snapshots: '@babel/parser': 7.28.5 '@babel/types': 7.28.5 + '@babel/traverse@7.28.5': + dependencies: + '@babel/code-frame': 7.27.1 + '@babel/generator': 7.28.5 + '@babel/helper-globals': 7.28.0 + '@babel/parser': 7.28.5 + '@babel/template': 7.27.2 + '@babel/types': 7.28.5 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + '@babel/traverse@7.28.5(supports-color@5.5.0)': dependencies: '@babel/code-frame': 7.27.1 @@ -4573,7 +4597,7 @@ snapshots: '@constructive-io/job-pg@1.1.0': dependencies: '@constructive-io/job-utils': 1.1.0 - '@pgpmjs/logger': 2.1.0 + '@pgpmjs/logger': 2.2.1 pg: 8.17.1 transitivePeerDependencies: - pg-native @@ -4582,7 +4606,7 @@ snapshots: dependencies: '@constructive-io/job-pg': 1.1.0 '@constructive-io/job-utils': 1.1.0 - '@pgpmjs/logger': 2.1.0 + '@pgpmjs/logger': 2.2.1 node-schedule: 1.3.2 transitivePeerDependencies: - pg-native @@ -4590,7 +4614,7 @@ snapshots: '@constructive-io/job-utils@1.1.0': dependencies: '@pgpmjs/env': 2.11.0 - '@pgpmjs/logger': 2.1.0 + '@pgpmjs/logger': 2.2.1 '@pgpmjs/types': 2.16.0 pg-cache: 2.1.0 pg-env: 1.4.0 @@ -4641,7 +4665,7 @@ snapshots: '@eslint/config-array@0.21.1': dependencies: '@eslint/object-schema': 2.1.7 - debug: 4.4.3(supports-color@5.5.0) + debug: 4.4.3 minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -4657,7 +4681,7 @@ snapshots: '@eslint/eslintrc@3.3.3': dependencies: ajv: 6.12.6 - debug: 4.4.3(supports-color@5.5.0) + debug: 4.4.3 espree: 10.4.0 globals: 14.0.0 ignore: 5.3.2 @@ -5535,7 +5559,7 @@ snapshots: '@types/pg@8.16.0': dependencies: '@types/node': 22.19.3 - pg-protocol: 1.11.0 + pg-protocol: 1.13.0 pg-types: 2.2.0 '@types/qs@6.14.0': {} @@ -5592,7 +5616,7 @@ snapshots: '@typescript-eslint/types': 8.55.0 '@typescript-eslint/typescript-estree': 8.55.0(typescript@5.9.3) '@typescript-eslint/visitor-keys': 8.55.0 - debug: 4.4.3(supports-color@5.5.0) + debug: 4.4.3 eslint: 9.39.2 typescript: 5.9.3 transitivePeerDependencies: @@ -5602,7 +5626,7 @@ snapshots: dependencies: '@typescript-eslint/tsconfig-utils': 8.55.0(typescript@5.9.3) '@typescript-eslint/types': 8.55.0 - debug: 4.4.3(supports-color@5.5.0) + debug: 4.4.3 typescript: 5.9.3 transitivePeerDependencies: - supports-color @@ -5621,7 +5645,7 @@ snapshots: '@typescript-eslint/types': 8.55.0 '@typescript-eslint/typescript-estree': 8.55.0(typescript@5.9.3) '@typescript-eslint/utils': 8.55.0(eslint@9.39.2)(typescript@5.9.3) - debug: 4.4.3(supports-color@5.5.0) + debug: 4.4.3 eslint: 9.39.2 ts-api-utils: 2.4.0(typescript@5.9.3) typescript: 5.9.3 @@ -5636,7 +5660,7 @@ snapshots: '@typescript-eslint/tsconfig-utils': 8.55.0(typescript@5.9.3) '@typescript-eslint/types': 8.55.0 '@typescript-eslint/visitor-keys': 8.55.0 - debug: 4.4.3(supports-color@5.5.0) + debug: 4.4.3 minimatch: 9.0.5 semver: 7.7.3 tinyglobby: 0.2.15 @@ -6155,6 +6179,10 @@ snapshots: dependencies: ms: 2.0.0 + debug@4.4.3: + dependencies: + ms: 2.1.3 + debug@4.4.3(supports-color@5.5.0): dependencies: ms: 2.1.3 @@ -6373,7 +6401,7 @@ snapshots: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.6 - debug: 4.4.3(supports-color@5.5.0) + debug: 4.4.3 escape-string-regexp: 4.0.0 eslint-scope: 8.4.0 eslint-visitor-keys: 4.2.1 @@ -6862,7 +6890,7 @@ snapshots: istanbul-lib-source-maps@5.0.6: dependencies: '@jridgewell/trace-mapping': 0.3.31 - debug: 4.4.3(supports-color@5.5.0) + debug: 4.4.3 istanbul-lib-coverage: 3.2.2 transitivePeerDependencies: - supports-color @@ -7828,7 +7856,7 @@ snapshots: pg-cache@2.1.0: dependencies: - '@pgpmjs/logger': 2.1.0 + '@pgpmjs/logger': 2.2.1 '@pgpmjs/types': 2.16.0 lru-cache: 11.2.5 pg: 8.17.1 @@ -7849,8 +7877,6 @@ snapshots: pg-cloudflare@1.3.0: optional: true - pg-connection-string@2.11.0: {} - pg-connection-string@2.12.0: {} pg-env@1.4.0: {} @@ -7859,7 +7885,7 @@ snapshots: pg-int8@1.0.1: {} - pg-pool@3.11.0(pg@8.17.1): + pg-pool@3.13.0(pg@8.17.1): dependencies: pg: 8.17.1 @@ -7867,8 +7893,6 @@ snapshots: dependencies: pg: 8.20.0 - pg-protocol@1.11.0: {} - pg-protocol@1.13.0: {} pg-types@2.2.0: @@ -7881,9 +7905,9 @@ snapshots: pg@8.17.1: dependencies: - pg-connection-string: 2.11.0 - pg-pool: 3.11.0(pg@8.17.1) - pg-protocol: 1.11.0 + pg-connection-string: 2.12.0 + pg-pool: 3.13.0(pg@8.17.1) + pg-protocol: 1.13.0 pg-types: 2.2.0 pgpass: 1.0.5 optionalDependencies: diff --git a/tests/__mocks__/@pgpmjs/logger.ts b/tests/__mocks__/@pgpmjs/logger.ts index 1ad9b2e..98cd25e 100644 --- a/tests/__mocks__/@pgpmjs/logger.ts +++ b/tests/__mocks__/@pgpmjs/logger.ts @@ -4,3 +4,11 @@ export const createLogger = jest.fn(() => ({ warn: jest.fn(), debug: jest.fn() })); + +export class Logger { + info = jest.fn(); + error = jest.fn(); + warn = jest.fn(); + debug = jest.fn(); + constructor(_name?: string) {} +} From ca44b06e268a8e8ce3bce4acc78f4439b5ae6a9c Mon Sep 17 00:00:00 2001 From: zetazzz Date: Wed, 11 Mar 2026 23:01:01 +0800 Subject: [PATCH 03/13] added cloud fns --- .../__tests__/handler.e2e.test.ts | 221 ++++++++++++ functions/delete-s3-object/handler.json | 10 + functions/delete-s3-object/handler.ts | 57 ++++ functions/file-cleanup/handler.json | 9 + functions/file-cleanup/handler.ts | 89 +++++ functions/process-image/handler.json | 2 +- functions/process-image/handler.ts | 322 ++++++++++++++++-- pnpm-lock.yaml | 88 +++-- scripts/dev.ts | 14 +- tests/helpers/object-store-schema.ts | 101 ++++++ 10 files changed, 851 insertions(+), 62 deletions(-) create mode 100644 functions/delete-s3-object/__tests__/handler.e2e.test.ts create mode 100644 functions/delete-s3-object/handler.json create mode 100644 functions/delete-s3-object/handler.ts create mode 100644 functions/file-cleanup/handler.json create mode 100644 functions/file-cleanup/handler.ts create mode 100644 tests/helpers/object-store-schema.ts diff --git a/functions/delete-s3-object/__tests__/handler.e2e.test.ts b/functions/delete-s3-object/__tests__/handler.e2e.test.ts new file mode 100644 index 0000000..d442c0d --- /dev/null +++ b/functions/delete-s3-object/__tests__/handler.e2e.test.ts @@ -0,0 +1,221 @@ +/** + * E2E tests for delete-s3-object handler. + * + * Requires: Postgres on :5432, MinIO on :9000 (docker compose up -d) + * Run: npx jest --runInBand functions/delete-s3-object + */ + +import { S3Client, PutObjectCommand, GetObjectCommand, DeleteObjectCommand } from '@aws-sdk/client-s3'; +import { Client as PgClient } from 'pg'; + +import handler from '../handler'; +import { createMockContext } from '../../../tests/helpers/mock-context'; +import { + makePgClient, + setupObjectStoreSchema, + teardownObjectStoreSchema, + cleanObjectStoreRows, +} from '../../../tests/helpers/object-store-schema'; + +// --------------------------------------------------------------------------- +// Infra helpers +// --------------------------------------------------------------------------- + +const SCHEMA = 'object_store_public'; +const BUCKET = 'test-bucket'; + +const ENV: Record = { + PGHOST: 'localhost', + PGPORT: '5432', + PGUSER: 'postgres', + PGPASSWORD: 'password', + PGDATABASE: 'constructive', + AWS_REGION: 'us-east-1', + AWS_ACCESS_KEY_ID: 'minioadmin', + AWS_SECRET_ACCESS_KEY: 'minioadmin', + S3_ENDPOINT: 'http://localhost:9000', + S3_BUCKET: BUCKET, +}; + +function makeS3(): S3Client { + return new S3Client({ + region: 'us-east-1', + credentials: { accessKeyId: 'minioadmin', secretAccessKey: 'minioadmin' }, + endpoint: 'http://localhost:9000', + forcePathStyle: true, + }); +} + +// --------------------------------------------------------------------------- +// Suite +// --------------------------------------------------------------------------- + +describe('delete-s3-object handler e2e', () => { + let pg: PgClient; + let s3: S3Client; + const s3Keys: string[] = []; + + beforeAll(async () => { + pg = makePgClient(); + await pg.connect(); + s3 = makeS3(); + await setupObjectStoreSchema(pg); + }); + + afterAll(async () => { + for (const key of s3Keys) { + try { + await s3.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: key })); + } catch { /* ignore */ } + } + await teardownObjectStoreSchema(pg); + await pg.end(); + s3.destroy(); + try { + const pgCache = require('pg-cache'); + if (pgCache.close) await pgCache.close(); + } catch { /* ignore */ } + }); + + afterEach(async () => { + await cleanObjectStoreRows(pg); + }); + + function callHandler(file_id: string, database_id: number, key: string) { + const ctx = createMockContext({ env: ENV }); + return handler({ file_id, database_id, key }, ctx as any); + } + + async function insertFile(opts: { + s3Key: string; + body: Buffer; + status?: string; + databaseId?: number; + }): Promise<{ id: string; database_id: number }> { + const databaseId = opts.databaseId ?? 1; + s3Keys.push(opts.s3Key); + + await s3.send(new PutObjectCommand({ + Bucket: BUCKET, + Key: opts.s3Key, + Body: opts.body, + ContentType: 'application/octet-stream', + })); + + const res = await pg.query( + `INSERT INTO ${SCHEMA}.files + (database_id, key, bucket_key, status) + VALUES ($1, $2, 'default', $3::${SCHEMA}.file_status) + RETURNING id, database_id`, + [databaseId, opts.s3Key, opts.status ?? 'deleting'] + ); + return res.rows[0]; + } + + async function s3ObjectExists(key: string): Promise { + try { + await s3.send(new GetObjectCommand({ Bucket: BUCKET, Key: key })); + return true; + } catch (err: any) { + if (err.name === 'NoSuchKey' || err.$metadata?.httpStatusCode === 404) { + return false; + } + throw err; + } + } + + // ----------------------------------------------------------------------- + // Test 1: Happy path — deletes S3 object and DB row + // ----------------------------------------------------------------------- + + it('deletes S3 object and DB row successfully', async () => { + const key = `e2e-del-${Date.now()}-test.bin`; + const body = Buffer.from('test file content'); + + const { id, database_id } = await insertFile({ + s3Key: key, + body, + status: 'deleting', + }); + + expect(await s3ObjectExists(key)).toBe(true); + + const result: any = await callHandler(id, database_id, key); + + expect(result.success).toBe(true); + expect(result.key).toBe(key); + expect(await s3ObjectExists(key)).toBe(false); + + const dbRes = await pg.query( + `SELECT * FROM ${SCHEMA}.files WHERE id = $1 AND database_id = $2`, + [id, database_id] + ); + expect(dbRes.rows.length).toBe(0); + }); + + // ----------------------------------------------------------------------- + // Test 2: Idempotency — S3 object already deleted + // ----------------------------------------------------------------------- + + it('succeeds when S3 object already deleted (idempotent)', async () => { + const key = `e2e-del-gone-${Date.now()}-test.bin`; + + const res = await pg.query( + `INSERT INTO ${SCHEMA}.files + (database_id, key, bucket_key, status) + VALUES (1, $1, 'default', 'deleting') + RETURNING id, database_id`, + [key] + ); + const { id, database_id } = res.rows[0]; + + const result: any = await callHandler(id, database_id, key); + + expect(result.success).toBe(true); + + const dbRes = await pg.query( + `SELECT * FROM ${SCHEMA}.files WHERE id = $1 AND database_id = $2`, + [id, database_id] + ); + expect(dbRes.rows.length).toBe(0); + }); + + // ----------------------------------------------------------------------- + // Test 3: Idempotency — DB row already deleted + // ----------------------------------------------------------------------- + + it('succeeds when DB row already deleted (idempotent)', async () => { + const key = `e2e-del-norow-${Date.now()}-test.bin`; + + s3Keys.push(key); + await s3.send(new PutObjectCommand({ + Bucket: BUCKET, + Key: key, + Body: Buffer.from('orphan'), + ContentType: 'application/octet-stream', + })); + + const result: any = await callHandler( + '00000000-0000-0000-0000-000000000000', + 1, + key + ); + + expect(result.success).toBe(true); + expect(await s3ObjectExists(key)).toBe(false); + }); + + // ----------------------------------------------------------------------- + // Test 4: Both already deleted — fully idempotent + // ----------------------------------------------------------------------- + + it('succeeds when both S3 and DB are already gone', async () => { + const result: any = await callHandler( + '00000000-0000-0000-0000-000000000000', + 999, + `nonexistent-key-${Date.now()}` + ); + + expect(result.success).toBe(true); + }); +}); diff --git a/functions/delete-s3-object/handler.json b/functions/delete-s3-object/handler.json new file mode 100644 index 0000000..02d3510 --- /dev/null +++ b/functions/delete-s3-object/handler.json @@ -0,0 +1,10 @@ +{ + "name": "delete-s3-object", + "version": "1.0.0", + "type": "node-graphql", + "description": "Deletes S3 objects and removes corresponding files table rows", + "dependencies": { + "@aws-sdk/client-s3": "^3.700.0", + "pg-cache": "^3.1.0" + } +} diff --git a/functions/delete-s3-object/handler.ts b/functions/delete-s3-object/handler.ts new file mode 100644 index 0000000..962ac51 --- /dev/null +++ b/functions/delete-s3-object/handler.ts @@ -0,0 +1,57 @@ +import type { FunctionContext, FunctionHandler } from '@constructive-io/fn-runtime'; +import { S3Client, DeleteObjectCommand } from '@aws-sdk/client-s3'; +import { getPgPool } from 'pg-cache'; + +type DeleteParams = { + file_id: string; + database_id: number; + key: string; +}; + +const handler: FunctionHandler = async ( + params: DeleteParams, + context: FunctionContext +) => { + const { log, env } = context; + + log.info('[delete-s3-object] deleting', { key: params.key }); + + const s3 = new S3Client({ + region: env.AWS_REGION || 'us-east-1', + endpoint: env.S3_ENDPOINT, + forcePathStyle: true, + credentials: { + accessKeyId: env.AWS_ACCESS_KEY_ID!, + secretAccessKey: env.AWS_SECRET_ACCESS_KEY!, + }, + }); + + const pool = getPgPool({ + host: env.PGHOST, + port: Number(env.PGPORT || 5432), + database: env.PGDATABASE || 'constructive', + user: env.PGUSER, + password: env.PGPASSWORD, + }); + + // Step 1: Delete from S3 (idempotent -- delete ignores missing keys) + await s3.send(new DeleteObjectCommand({ + Bucket: env.S3_BUCKET!, + Key: params.key, + })); + + // Step 2: Delete the DB row + const result = await pool.query( + 'DELETE FROM object_store_public.files WHERE id = $1 AND database_id = $2', + [params.file_id, params.database_id] + ); + + log.info('[delete-s3-object] complete', { + key: params.key, + rowsDeleted: result.rowCount, + }); + + return { success: true, key: params.key }; +}; + +export default handler; diff --git a/functions/file-cleanup/handler.json b/functions/file-cleanup/handler.json new file mode 100644 index 0000000..5f7a48f --- /dev/null +++ b/functions/file-cleanup/handler.json @@ -0,0 +1,9 @@ +{ + "name": "file-cleanup", + "version": "1.0.0", + "type": "node-graphql", + "description": "Scheduled cleanup: pending reaper, error cleanup, unattached file cleanup", + "dependencies": { + "pg-cache": "^3.1.0" + } +} diff --git a/functions/file-cleanup/handler.ts b/functions/file-cleanup/handler.ts new file mode 100644 index 0000000..653c762 --- /dev/null +++ b/functions/file-cleanup/handler.ts @@ -0,0 +1,89 @@ +import type { FunctionContext, FunctionHandler } from '@constructive-io/fn-runtime'; +import { getPgPool } from 'pg-cache'; + +type CleanupType = 'pending_reaper' | 'error_cleanup' | 'unattached_cleanup'; + +type CleanupParams = { + type: CleanupType; +}; + +const BATCH_SIZE = 1000; + +const CLEANUP_QUERIES: Record = { + pending_reaper: { + description: 'Mark stale pending files as error (upload timeout after 24h)', + query: ` + UPDATE object_store_public.files + SET status = 'error', status_reason = 'upload timeout' + WHERE id IN ( + SELECT id FROM object_store_public.files + WHERE status = 'pending' AND created_at < now() - interval '24 hours' + LIMIT ${BATCH_SIZE} + ) + `, + }, + error_cleanup: { + description: 'Mark old error files as deleting (expired after 30 days)', + query: ` + UPDATE object_store_public.files + SET status = 'deleting', status_reason = 'expired error' + WHERE id IN ( + SELECT id FROM object_store_public.files + WHERE status = 'error' AND updated_at < now() - interval '30 days' + LIMIT ${BATCH_SIZE} + ) + `, + }, + unattached_cleanup: { + description: 'Mark unattached ready files as error (never attached after 7 days)', + query: ` + UPDATE object_store_public.files + SET status = 'error', status_reason = 'never attached' + WHERE id IN ( + SELECT id FROM object_store_public.files + WHERE status = 'ready' AND source_table IS NULL AND created_at < now() - interval '7 days' + LIMIT ${BATCH_SIZE} + ) + `, + }, +}; + +const handler: FunctionHandler = async ( + params: CleanupParams, + context: FunctionContext +) => { + const { log, env } = context; + + if (!params.type || !CLEANUP_QUERIES[params.type]) { + return { error: `Invalid cleanup type: ${params.type}. Must be one of: ${Object.keys(CLEANUP_QUERIES).join(', ')}` }; + } + + const cleanup = CLEANUP_QUERIES[params.type]; + log.info(`[file-cleanup] running ${params.type}: ${cleanup.description}`); + + const pool = getPgPool({ + host: env.PGHOST || 'localhost', + port: Number(env.PGPORT || 5432), + database: env.PGDATABASE || 'constructive', + user: env.PGUSER || 'postgres', + password: env.PGPASSWORD || 'password', + }); + + const result = await pool.query(cleanup.query); + const rowsAffected = result.rowCount ?? 0; + + log.info(`[file-cleanup] ${params.type} complete`, { rowsAffected }); + + // If we processed a full batch, re-enqueue to handle remaining rows + if (rowsAffected >= BATCH_SIZE) { + log.info(`[file-cleanup] batch full (${BATCH_SIZE}), re-enqueuing for next batch`); + await pool.query( + `SELECT app_jobs.add_job('file-cleanup', $1::json)`, + [JSON.stringify({ type: params.type })] + ); + } + + return { success: true, type: params.type, rowsAffected }; +}; + +export default handler; diff --git a/functions/process-image/handler.json b/functions/process-image/handler.json index e76b43f..70c8196 100644 --- a/functions/process-image/handler.json +++ b/functions/process-image/handler.json @@ -2,7 +2,7 @@ "name": "process-image", "version": "1.0.0", "type": "node-graphql", - "description": "Downloads images from MinIO, generates resized versions via sharp, uploads back, and updates the database record", + "description": "Processes uploaded files and images: generates resized versions via sharp, manages object_store_public.files status, and updates database records", "dependencies": { "@aws-sdk/client-s3": "^3.1001.0", "@aws-sdk/lib-storage": "^3.1001.0", diff --git a/functions/process-image/handler.ts b/functions/process-image/handler.ts index 99d3a7f..91b75b9 100644 --- a/functions/process-image/handler.ts +++ b/functions/process-image/handler.ts @@ -1,4 +1,4 @@ -import { DeleteObjectCommand, GetObjectCommand, S3Client } from '@aws-sdk/client-s3'; +import { DeleteObjectCommand, GetObjectCommand, PutObjectCommand, S3Client } from '@aws-sdk/client-s3'; import { Upload } from '@aws-sdk/lib-storage'; import type { FunctionContext, FunctionHandler } from '@constructive-io/fn-runtime'; import { getPgPool } from 'pg-cache'; @@ -16,7 +16,14 @@ interface VersionConfig { maxHeight: number; } -interface ProcessImageParams { +/** File mode: process a file from object_store_public.files */ +interface ProcessFileParams { + file_id: string; + database_id: number; +} + +/** Image mode: process JSONB image fields on an arbitrary table */ +interface ProcessImageFieldParams { schema: string; table: string; idFields: string[]; @@ -25,6 +32,8 @@ interface ProcessImageParams { versions?: VersionConfig[]; } +type ProcessParams = ProcessFileParams | ProcessImageFieldParams; + interface ImageFieldValue { url?: string; id?: string; @@ -52,10 +61,18 @@ const DEFAULT_VERSIONS: VersionConfig[] = [ { name: 'large', maxWidth: 1200, maxHeight: 1200 }, ]; +const PROCESSABLE_FORMATS = new Set([ + 'jpeg', 'png', 'webp', 'gif', 'tiff', 'avif', 'heif', 'jp2', +]); + // --------------------------------------------------------------------------- // Helpers // --------------------------------------------------------------------------- +function isFileMode(params: ProcessParams): params is ProcessFileParams { + return 'file_id' in params; +} + function validateIdentifier(name: string): string { if (!/^[a-zA-Z_][a-zA-Z0-9_]*$/.test(name)) { throw new Error(`Invalid SQL identifier: "${name}"`); @@ -63,6 +80,15 @@ function validateIdentifier(name: string): string { return name; } +function validateQualifiedName(name: string): string { + const parts = name.split('.'); + if (parts.length < 1 || parts.length > 3) { + throw new Error(`Invalid qualified name: "${name}"`); + } + parts.forEach(validateIdentifier); + return name; +} + function createS3Client(env: Record): S3Client { const provider = env.BUCKET_PROVIDER || 'minio'; const isMinio = provider === 'minio'; @@ -82,6 +108,16 @@ function createS3Client(env: Record): S3Client { }); } +function createPgPool(env: Record) { + return getPgPool({ + host: env.PGHOST || 'localhost', + port: Number(env.PGPORT || 5432), + database: env.PGDATABASE || 'constructive', + user: env.PGUSER || 'postgres', + password: env.PGPASSWORD || 'password', + }); +} + function parseS3Url(url: string): { bucket: string; key: string } | null { try { const parsed = new URL(url); @@ -122,10 +158,6 @@ function buildObjectUrl( return `https://${bucket}.s3.${region}.amazonaws.com/${key}`; } -const PROCESSABLE_FORMATS = new Set([ - 'jpeg', 'png', 'webp', 'gif', 'tiff', 'avif', 'heif', 'jp2', -]); - async function streamToBuffer(stream: Readable): Promise { const chunks: Buffer[] = []; for await (const chunk of stream) { @@ -136,24 +168,258 @@ async function streamToBuffer(stream: Readable): Promise { async function deleteS3Objects( s3: S3Client, - objects: { bucket: string; key: string }[], + bucket: string, + keys: string[], log: FunctionContext['log'], ): Promise { - for (const obj of objects) { + for (const key of keys) { try { - await s3.send(new DeleteObjectCommand({ Bucket: obj.bucket, Key: obj.key })); - log.info(`[process-image] rolled back: deleted ${obj.key}`); + await s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: key })); + log.info(`[process-image] rolled back: deleted ${key}`); } catch (err) { - log.error(`[process-image] rollback failed for ${obj.key}`, err); + log.error(`[process-image] rollback failed for ${key}`, err); + } + } +} + +// --------------------------------------------------------------------------- +// File Mode: process a file from object_store_public.files +// +// Locks the row with FOR UPDATE SKIP LOCKED, transitions status through +// pending -> processing -> ready, generates thumbnail + medium versions, +// inserts version rows, and writes back-references to the domain table. +// --------------------------------------------------------------------------- + +async function handleFileMode( + params: ProcessFileParams, + context: FunctionContext, +): Promise { + const { log, env } = context; + const pool = createPgPool(env); + const s3 = createS3Client(env); + const bucket = env.BUCKET_NAME || 'test-bucket'; + + // --------------------------------------------------------------- + // Step 1: SELECT ... FOR UPDATE SKIP LOCKED + // Prevents concurrent workers from processing the same file. + // --------------------------------------------------------------- + const client = await pool.connect(); + try { + await client.query('BEGIN'); + + const { rows } = await client.query( + `SELECT * FROM object_store_public.files + WHERE id = $1 AND database_id = $2 AND status = 'pending' + FOR UPDATE SKIP LOCKED`, + [params.file_id, params.database_id] + ); + + if (rows.length === 0) { + await client.query('ROLLBACK'); + log.info('[process-image] skipped: row not found, not pending, or locked by another worker'); + return { skipped: true, reason: 'not_pending_or_locked' }; + } + + const file = rows[0]; + + // Transition to processing + await client.query( + `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = $2`, + [file.id, file.database_id] + ); + + await client.query('COMMIT'); + + // --------------------------------------------------------------- + // Step 2: Get MIME type from S3 (not stored in files table) + // --------------------------------------------------------------- + const headResult = await s3.send(new GetObjectCommand({ + Bucket: bucket, + Key: file.key, + })); + const mimeType = headResult.ContentType ?? 'application/octet-stream'; + + if (!mimeType.startsWith('image/')) { + // Non-image: mark as ready immediately, no versions to generate + await pool.query( + `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = $2`, + [file.id, file.database_id] + ); + return { success: true, mime: mimeType, versions: 0 }; + } + + // --------------------------------------------------------------- + // Step 3: Download original and generate versions + // --------------------------------------------------------------- + const baseKey = file.key.replace(/_origin$/, ''); + const originalBuffer = await streamToBuffer(headResult.Body as Readable); + const image = sharp(originalBuffer); + const metadata = await image.metadata(); + + const uploadedS3Keys: string[] = []; + const versionRows: Array<{ + key: string; + etag: string; + mime: string; + width: number; + height: number; + }> = []; + + try { + // Generate thumbnail (150x150, skip if original is smaller) + if ((metadata.width ?? 0) > 150 || (metadata.height ?? 0) > 150) { + const thumbKey = `${baseKey}_thumbnail`; + const thumbBuffer = await image.clone() + .resize(150, 150, { fit: 'cover' }).jpeg().toBuffer(); + + const putResult = await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: thumbKey, + Body: thumbBuffer, + ContentType: 'image/jpeg', + })); + + uploadedS3Keys.push(thumbKey); + versionRows.push({ + key: thumbKey, + etag: putResult.ETag ?? '', + mime: 'image/jpeg', + width: 150, + height: 150, + }); + } + + // Generate medium (max 1200px wide, skip if original is smaller) + if ((metadata.width ?? 0) > 1200) { + const medKey = `${baseKey}_medium`; + const medResult = await image.clone() + .resize(1200, null, { withoutEnlargement: true }).jpeg() + .toBuffer({ resolveWithObject: true }); + + const putResult = await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: medKey, + Body: medResult.data, + ContentType: 'image/jpeg', + })); + + uploadedS3Keys.push(medKey); + versionRows.push({ + key: medKey, + etag: putResult.ETag ?? '', + mime: 'image/jpeg', + width: 1200, + height: medResult.info.height, + }); + } + + // --------------------------------------------------------------- + // Step 4: Transactional batch commit + // All version row INSERTs + origin status update in single transaction. + // --------------------------------------------------------------- + const txClient = await pool.connect(); + try { + await txClient.query('BEGIN'); + + for (const ver of versionRows) { + await txClient.query( + `INSERT INTO object_store_public.files + (database_id, bucket_key, key, status, etag, created_by, + source_table, source_column, source_id) + VALUES ($1, $2, $3, 'ready', $4, $5, $6, $7, $8)`, + [ + file.database_id, file.bucket_key, ver.key, ver.etag, + file.created_by, file.source_table, file.source_column, file.source_id, + ] + ); + } + + // Mark origin as ready + await txClient.query( + `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = $2`, + [file.id, file.database_id] + ); + + await txClient.query('COMMIT'); + } catch (txErr: any) { + await txClient.query('ROLLBACK'); + + // --------------------------------------------------------------- + // Graceful deleting handling: + // If the file was marked 'deleting' during processing (source row + // deleted), the state machine rejects processing->ready. This is + // correct behavior -- the file is already marked for deletion. + // --------------------------------------------------------------- + if (txErr.message?.includes('Invalid status transition')) { + log.info('[process-image] file transitioned to deleting during processing, exiting gracefully'); + await deleteS3Objects(s3, bucket, uploadedS3Keys, log); + return { success: true, reason: 'file_marked_deleting_during_processing' }; + } + throw txErr; + } finally { + txClient.release(); + } + + // --------------------------------------------------------------- + // Step 5: Write version info to domain table (if back-reference populated) + // --------------------------------------------------------------- + if (file.source_table && file.source_column && file.source_id && versionRows.length > 0) { + validateQualifiedName(file.source_table); + validateIdentifier(file.source_column); + + const versionsArray = versionRows.map((v) => ({ + key: v.key, + mime: v.mime, + width: v.width, + height: v.height, + })); + + await pool.query( + `UPDATE ${file.source_table} + SET ${file.source_column} = jsonb_set( + ${file.source_column}::jsonb, + '{versions}', + $1::jsonb + ) + WHERE id = $2`, + [JSON.stringify(versionsArray), file.source_id] + ); + } + + return { success: true, versions: versionRows.length }; + } catch (processingErr) { + // --------------------------------------------------------------- + // Partial failure recovery: + // If any S3 upload or DB insert fails, explicitly delete any S3 + // objects that were uploaded before the failure. + // --------------------------------------------------------------- + await deleteS3Objects(s3, bucket, uploadedS3Keys, log); + + await pool.query( + `UPDATE object_store_public.files SET status = 'error', status_reason = $3 + WHERE id = $1 AND database_id = $2`, + [file.id, file.database_id, (processingErr as Error).message] + ); + + throw processingErr; } + } finally { + client.release(); } } // --------------------------------------------------------------------------- -// Handler +// Image Mode: process JSONB image fields on an arbitrary table +// +// For each specified field, downloads the original image from S3, generates +// resized versions (thumbnail, medium, large by default), uploads them back, +// and updates the JSONB field with version metadata. // --------------------------------------------------------------------------- -const handler: FunctionHandler = async (params, context) => { +async function handleImageMode( + params: ProcessImageFieldParams, + context: FunctionContext, +): Promise { const { log, env } = context; const { schema, @@ -189,13 +455,7 @@ const handler: FunctionHandler = async (params, context) => }); const s3 = createS3Client(env); - const pool = getPgPool({ - host: env.PGHOST || 'localhost', - port: Number(env.PGPORT || 5432), - database: env.PGDATABASE || 'constructive', - user: env.PGUSER || 'postgres', - password: env.PGPASSWORD || 'password', - }); + const pool = createPgPool(env); // --- Query the record --- @@ -321,7 +581,7 @@ const handler: FunctionHandler = async (params, context) => // --- Generate versions --- const generatedVersions: ImageVersion[] = []; - const uploadedObjects: { bucket: string; key: string }[] = []; + const uploadedKeys: string[] = []; try { for (const ver of versions) { @@ -355,7 +615,7 @@ const handler: FunctionHandler = async (params, context) => }, }).done(); - uploadedObjects.push({ bucket, key: vKey }); + uploadedKeys.push(vKey); generatedVersions.push({ name: ver.name, @@ -373,10 +633,10 @@ const handler: FunctionHandler = async (params, context) => } } catch (err) { log.error( - `[process-image] version generation failed for "${field}", rolling back ${uploadedObjects.length} uploads`, + `[process-image] version generation failed for "${field}", rolling back ${uploadedKeys.length} uploads`, err, ); - await deleteS3Objects(s3, uploadedObjects, log); + await deleteS3Objects(s3, bucket, uploadedKeys, log); throw err; } @@ -403,7 +663,8 @@ const handler: FunctionHandler = async (params, context) => log.error(`[process-image] DB update failed for "${field}", rolling back uploads`, err); await deleteS3Objects( s3, - generatedVersions.map((v) => ({ bucket: v.bucket, key: v.key })), + bucket, + generatedVersions.map((v) => v.key), log, ); throw err; @@ -421,6 +682,17 @@ const handler: FunctionHandler = async (params, context) => log.info('[process-image] complete'); return { success: true, results }; +} + +// --------------------------------------------------------------------------- +// Main Handler +// --------------------------------------------------------------------------- + +const handler: FunctionHandler = async (params, context) => { + if (isFileMode(params)) { + return handleFileMode(params, context); + } + return handleImageMode(params as ProcessImageFieldParams, context); }; export default handler; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9a1e190..5dd5ef4 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -63,6 +63,28 @@ importers: specifier: ^8.33.0 version: 8.55.0(eslint@9.39.2)(typescript@5.9.3) + generated/delete-s3-object: + dependencies: + '@aws-sdk/client-s3': + specifier: ^3.700.0 + version: 3.1004.0 + '@constructive-io/fn-runtime': + specifier: workspace:^ + version: link:../../packages/fn-runtime + pg-cache: + specifier: ^3.1.0 + version: 3.1.1 + devDependencies: + '@types/node': + specifier: ^22.10.4 + version: 22.19.3 + makage: + specifier: ^0.1.10 + version: 0.1.12 + typescript: + specifier: ^5.1.6 + version: 5.9.3 + generated/example: dependencies: '@constructive-io/fn-runtime': @@ -79,6 +101,25 @@ importers: specifier: ^5.1.6 version: 5.9.3 + generated/file-cleanup: + dependencies: + '@constructive-io/fn-runtime': + specifier: workspace:^ + version: link:../../packages/fn-runtime + pg-cache: + specifier: ^3.1.0 + version: 3.1.1 + devDependencies: + '@types/node': + specifier: ^22.10.4 + version: 22.19.3 + makage: + specifier: ^0.1.10 + version: 0.1.12 + typescript: + specifier: ^5.1.6 + version: 5.9.3 + generated/process-image: dependencies: '@aws-sdk/client-s3': @@ -4395,11 +4436,11 @@ snapshots: '@babel/helpers': 7.28.4 '@babel/parser': 7.28.5 '@babel/template': 7.27.2 - '@babel/traverse': 7.28.5 + '@babel/traverse': 7.28.5(supports-color@5.5.0) '@babel/types': 7.28.5 '@jridgewell/remapping': 2.3.5 convert-source-map: 2.0.0 - debug: 4.4.3 + debug: 4.4.3(supports-color@5.5.0) gensync: 1.0.0-beta.2 json5: 2.2.3 semver: 6.3.1 @@ -4428,13 +4469,6 @@ snapshots: '@babel/helper-globals@7.28.0': {} - '@babel/helper-module-imports@7.27.1': - dependencies: - '@babel/traverse': 7.28.5 - '@babel/types': 7.28.5 - transitivePeerDependencies: - - supports-color - '@babel/helper-module-imports@7.27.1(supports-color@5.5.0)': dependencies: '@babel/traverse': 7.28.5(supports-color@5.5.0) @@ -4445,9 +4479,9 @@ snapshots: '@babel/helper-module-transforms@7.28.3(@babel/core@7.28.5)': dependencies: '@babel/core': 7.28.5 - '@babel/helper-module-imports': 7.27.1 + '@babel/helper-module-imports': 7.27.1(supports-color@5.5.0) '@babel/helper-validator-identifier': 7.28.5 - '@babel/traverse': 7.28.5 + '@babel/traverse': 7.28.5(supports-color@5.5.0) transitivePeerDependencies: - supports-color @@ -4563,18 +4597,6 @@ snapshots: '@babel/parser': 7.28.5 '@babel/types': 7.28.5 - '@babel/traverse@7.28.5': - dependencies: - '@babel/code-frame': 7.27.1 - '@babel/generator': 7.28.5 - '@babel/helper-globals': 7.28.0 - '@babel/parser': 7.28.5 - '@babel/template': 7.27.2 - '@babel/types': 7.28.5 - debug: 4.4.3 - transitivePeerDependencies: - - supports-color - '@babel/traverse@7.28.5(supports-color@5.5.0)': dependencies: '@babel/code-frame': 7.27.1 @@ -4665,7 +4687,7 @@ snapshots: '@eslint/config-array@0.21.1': dependencies: '@eslint/object-schema': 2.1.7 - debug: 4.4.3 + debug: 4.4.3(supports-color@5.5.0) minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -4681,7 +4703,7 @@ snapshots: '@eslint/eslintrc@3.3.3': dependencies: ajv: 6.12.6 - debug: 4.4.3 + debug: 4.4.3(supports-color@5.5.0) espree: 10.4.0 globals: 14.0.0 ignore: 5.3.2 @@ -5616,7 +5638,7 @@ snapshots: '@typescript-eslint/types': 8.55.0 '@typescript-eslint/typescript-estree': 8.55.0(typescript@5.9.3) '@typescript-eslint/visitor-keys': 8.55.0 - debug: 4.4.3 + debug: 4.4.3(supports-color@5.5.0) eslint: 9.39.2 typescript: 5.9.3 transitivePeerDependencies: @@ -5626,7 +5648,7 @@ snapshots: dependencies: '@typescript-eslint/tsconfig-utils': 8.55.0(typescript@5.9.3) '@typescript-eslint/types': 8.55.0 - debug: 4.4.3 + debug: 4.4.3(supports-color@5.5.0) typescript: 5.9.3 transitivePeerDependencies: - supports-color @@ -5645,7 +5667,7 @@ snapshots: '@typescript-eslint/types': 8.55.0 '@typescript-eslint/typescript-estree': 8.55.0(typescript@5.9.3) '@typescript-eslint/utils': 8.55.0(eslint@9.39.2)(typescript@5.9.3) - debug: 4.4.3 + debug: 4.4.3(supports-color@5.5.0) eslint: 9.39.2 ts-api-utils: 2.4.0(typescript@5.9.3) typescript: 5.9.3 @@ -5660,7 +5682,7 @@ snapshots: '@typescript-eslint/tsconfig-utils': 8.55.0(typescript@5.9.3) '@typescript-eslint/types': 8.55.0 '@typescript-eslint/visitor-keys': 8.55.0 - debug: 4.4.3 + debug: 4.4.3(supports-color@5.5.0) minimatch: 9.0.5 semver: 7.7.3 tinyglobby: 0.2.15 @@ -6179,10 +6201,6 @@ snapshots: dependencies: ms: 2.0.0 - debug@4.4.3: - dependencies: - ms: 2.1.3 - debug@4.4.3(supports-color@5.5.0): dependencies: ms: 2.1.3 @@ -6401,7 +6419,7 @@ snapshots: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.6 - debug: 4.4.3 + debug: 4.4.3(supports-color@5.5.0) escape-string-regexp: 4.0.0 eslint-scope: 8.4.0 eslint-visitor-keys: 4.2.1 @@ -6890,7 +6908,7 @@ snapshots: istanbul-lib-source-maps@5.0.6: dependencies: '@jridgewell/trace-mapping': 0.3.31 - debug: 4.4.3 + debug: 4.4.3(supports-color@5.5.0) istanbul-lib-coverage: 3.2.2 transitivePeerDependencies: - supports-color diff --git a/scripts/dev.ts b/scripts/dev.ts index 25dd45f..7308d41 100644 --- a/scripts/dev.ts +++ b/scripts/dev.ts @@ -76,6 +76,16 @@ const allProcesses: ProcessDef[] = [ script: path.resolve(ROOT, 'generated/process-image/dist/index.js'), port: 8083, }, + { + name: 'delete-s3-object', + script: path.resolve(ROOT, 'generated/delete-s3-object/dist/index.js'), + port: 8084, + }, + { + name: 'file-cleanup', + script: path.resolve(ROOT, 'generated/file-cleanup/dist/index.js'), + port: 8085, + }, ]; // --- CLI args --- @@ -90,7 +100,7 @@ function getJobServiceEnv(): Record { return { JOBS_SCHEMA: 'app_jobs', JOBS_SUPPORT_ANY: 'false', - JOBS_SUPPORTED: 'send-email-link,process-image', + JOBS_SUPPORTED: 'send-email-link,process-image,delete-s3-object,file-cleanup', HOSTNAME: 'knative-job-service-local', INTERNAL_JOBS_CALLBACK_PORT: '8080', INTERNAL_JOBS_CALLBACK_URL: 'http://localhost:8080/callback', @@ -100,6 +110,8 @@ function getJobServiceEnv(): Record { 'send-email-link': 'http://localhost:8082', 'simple-email': 'http://localhost:8081', 'process-image': 'http://localhost:8083', + 'delete-s3-object': 'http://localhost:8084', + 'file-cleanup': 'http://localhost:8085', }), }; } diff --git a/tests/helpers/object-store-schema.ts b/tests/helpers/object-store-schema.ts new file mode 100644 index 0000000..c91be43 --- /dev/null +++ b/tests/helpers/object-store-schema.ts @@ -0,0 +1,101 @@ +/** + * Shared setup/teardown for the object_store_public schema used by + * process-file and delete-s3-object e2e tests. + * + * Creates the schema, enum, table, and triggers once. Multiple test + * suites can safely call setup() concurrently — CREATE IF NOT EXISTS + * prevents duplication. Teardown only drops if explicitly requested + * (e.g., from a global teardown hook). + */ +import { Client as PgClient } from 'pg'; + +const SCHEMA = 'object_store_public'; +const TABLE = 'files'; + +export function makePgClient(): PgClient { + return new PgClient({ + host: 'localhost', + port: 5432, + user: 'postgres', + password: 'password', + database: 'constructive', + }); +} + +export async function setupObjectStoreSchema(pg: PgClient): Promise { + await pg.query(`CREATE SCHEMA IF NOT EXISTS ${SCHEMA}`); + + await pg.query(` + DO $$ BEGIN + CREATE TYPE ${SCHEMA}.file_status AS ENUM ( + 'pending', 'processing', 'ready', 'error', 'deleting' + ); + EXCEPTION WHEN duplicate_object THEN NULL; + END $$ + `); + + await pg.query(` + CREATE TABLE IF NOT EXISTS ${SCHEMA}.${TABLE} ( + id uuid NOT NULL DEFAULT gen_random_uuid(), + database_id integer NOT NULL, + bucket_key text NOT NULL DEFAULT 'default', + key text NOT NULL, + status ${SCHEMA}.file_status NOT NULL DEFAULT 'pending', + status_reason text, + etag text, + source_table text, + source_column text, + source_id uuid, + processing_started_at timestamptz, + created_by uuid, + created_at timestamptz NOT NULL DEFAULT now(), + updated_at timestamptz NOT NULL DEFAULT now(), + CONSTRAINT object_store_files_pkey PRIMARY KEY (id, database_id) + ) + `); + + // State machine trigger + await pg.query(` + CREATE OR REPLACE FUNCTION ${SCHEMA}.files_before_update_timestamp() + RETURNS trigger AS $fn$ + BEGIN + NEW.updated_at := now(); + IF OLD.status IS DISTINCT FROM NEW.status THEN + IF NOT ( + (OLD.status = 'pending' AND NEW.status IN ('processing', 'error')) + OR (OLD.status = 'processing' AND NEW.status IN ('ready', 'error', 'deleting')) + OR (OLD.status = 'ready' AND NEW.status = 'deleting') + OR (OLD.status = 'error' AND NEW.status IN ('deleting', 'pending')) + ) THEN + RAISE EXCEPTION 'Invalid status transition from % to %', OLD.status, NEW.status; + END IF; + IF NEW.status = 'processing' THEN + NEW.processing_started_at := now(); + ELSIF OLD.status = 'processing' AND NEW.status <> 'processing' THEN + NEW.processing_started_at := NULL; + END IF; + END IF; + RETURN NEW; + END; + $fn$ LANGUAGE plpgsql + `); + + await pg.query(` + DROP TRIGGER IF EXISTS files_before_update_timestamp ON ${SCHEMA}.${TABLE}; + CREATE TRIGGER files_before_update_timestamp + BEFORE UPDATE ON ${SCHEMA}.${TABLE} + FOR EACH ROW + EXECUTE FUNCTION ${SCHEMA}.files_before_update_timestamp() + `); +} + +export async function teardownObjectStoreSchema(pg: PgClient): Promise { + await pg.query(`DROP TABLE IF EXISTS ${SCHEMA}.${TABLE} CASCADE`); + await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.files_before_update_timestamp CASCADE`); + await pg.query(`DROP TYPE IF EXISTS ${SCHEMA}.file_status CASCADE`); + await pg.query(`DROP SCHEMA IF EXISTS ${SCHEMA} CASCADE`); +} + +export async function cleanObjectStoreRows(pg: PgClient): Promise { + await pg.query(`DELETE FROM ${SCHEMA}.${TABLE}`); +} From cc14b180ce670f68e56e592e6517589df74fdb69 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 12 Mar 2026 11:48:02 +0800 Subject: [PATCH 04/13] added test and debug --- .../__tests__/handler.file-mode.e2e.test.ts | 301 +++++++++++++ .../__tests__/upload-flow.e2e.test.ts | 408 ++++++++++++++++++ functions/process-image/handler.ts | 49 ++- tests/helpers/object-store-schema.ts | 77 ++++ 4 files changed, 820 insertions(+), 15 deletions(-) create mode 100644 functions/process-image/__tests__/handler.file-mode.e2e.test.ts create mode 100644 functions/process-image/__tests__/upload-flow.e2e.test.ts diff --git a/functions/process-image/__tests__/handler.file-mode.e2e.test.ts b/functions/process-image/__tests__/handler.file-mode.e2e.test.ts new file mode 100644 index 0000000..abb7f25 --- /dev/null +++ b/functions/process-image/__tests__/handler.file-mode.e2e.test.ts @@ -0,0 +1,301 @@ +import { + DeleteObjectCommand, + HeadObjectCommand, + PutObjectCommand, + S3Client, +} from '@aws-sdk/client-s3'; +import { randomUUID } from 'crypto'; +import { Client as PgClient } from 'pg'; +import sharp from 'sharp'; + +import handler from '../handler'; +import { createMockContext } from '../../../tests/helpers/mock-context'; +import { + cleanObjectStoreRows, + makePgClient, + setupObjectStoreSchema, + teardownObjectStoreSchema, +} from '../../../tests/helpers/object-store-schema'; + +jest.setTimeout(60000); + +const OBJECT_STORE_SCHEMA = 'object_store_public'; +const SOURCE_SCHEMA = 'public'; +const SOURCE_TABLE = 'test_process_file_uploads'; +const BUCKET = 'test-bucket'; +const USER_ID = 'aaaaaaaa-0000-0000-0000-000000000001'; + +const ENV: Record = { + PGHOST: 'localhost', + PGPORT: '5432', + PGUSER: 'postgres', + PGPASSWORD: 'password', + PGDATABASE: 'constructive', + BUCKET_PROVIDER: 'minio', + BUCKET_NAME: BUCKET, + AWS_ACCESS_KEY: 'minioadmin', + AWS_SECRET_KEY: 'minioadmin', + AWS_REGION: 'us-east-1', + MINIO_ENDPOINT: 'http://localhost:9000', +}; + +function makeS3(): S3Client { + return new S3Client({ + region: 'us-east-1', + credentials: { accessKeyId: 'minioadmin', secretAccessKey: 'minioadmin' }, + endpoint: 'http://localhost:9000', + forcePathStyle: true, + }); +} + +async function generateTestImage(width: number, height: number): Promise { + const raw = Buffer.alloc(width * height * 3, 0); + + for (let i = 0; i < raw.length; i += 3) { + raw[i] = (i / 3) % 256; + raw[i + 1] = ((i / 3) >> 8) % 256; + raw[i + 2] = 96; + } + + return sharp(raw, { raw: { width, height, channels: 3 } }) + .jpeg() + .toBuffer(); +} + +async function objectExists(s3: S3Client, key: string): Promise { + try { + await s3.send(new HeadObjectCommand({ Bucket: BUCKET, Key: key })); + return true; + } catch { + return false; + } +} + +describe('process-image handler file mode e2e', () => { + let pg: PgClient; + let s3: S3Client; + const s3Keys = new Set(); + + beforeAll(async () => { + pg = makePgClient(); + await pg.connect(); + s3 = makeS3(); + + await setupObjectStoreSchema(pg); + await pg.query(` + CREATE TABLE IF NOT EXISTS ${SOURCE_SCHEMA}.${SOURCE_TABLE} ( + id uuid PRIMARY KEY, + image jsonb + ) + `); + }); + + afterEach(async () => { + for (const key of s3Keys) { + try { + await s3.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: key })); + } catch { + // ignore cleanup failures for already-deleted objects + } + } + s3Keys.clear(); + + await pg.query(`DELETE FROM ${SOURCE_SCHEMA}.${SOURCE_TABLE}`); + await cleanObjectStoreRows(pg); + }); + + afterAll(async () => { + await pg.query(`DROP TABLE IF EXISTS ${SOURCE_SCHEMA}.${SOURCE_TABLE}`); + await teardownObjectStoreSchema(pg); + await pg.end(); + s3.destroy(); + + try { + const pgCache = require('pg-cache'); + if (pgCache.close) await pgCache.close(); + if (pgCache.teardownPgPools) await pgCache.teardownPgPools(); + } catch { + // ignore pg-cache teardown issues in tests + } + }); + + async function putOriginImage(key: string, body: Buffer): Promise { + s3Keys.add(key); + await s3.send(new PutObjectCommand({ + Bucket: BUCKET, + Key: key, + Body: body, + ContentType: 'image/jpeg', + })); + } + + async function insertFileRow(opts: { + fileId: string; + key: string; + sourceId?: string; + }): Promise { + if (opts.sourceId) { + await pg.query( + `INSERT INTO ${OBJECT_STORE_SCHEMA}.files + (id, database_id, bucket_key, key, status, etag, created_by, + source_table, source_column, source_id) + VALUES ($1, 1, 'default', $2, 'pending', 'etag-origin', $3, $4, 'image', $5)`, + [ + opts.fileId, + opts.key, + USER_ID, + `${SOURCE_SCHEMA}.${SOURCE_TABLE}`, + opts.sourceId, + ] + ); + return; + } + + await pg.query( + `INSERT INTO ${OBJECT_STORE_SCHEMA}.files + (id, database_id, bucket_key, key, status, etag, created_by) + VALUES ($1, 1, 'default', $2, 'pending', 'etag-origin', $3)`, + [opts.fileId, opts.key, USER_ID] + ); + } + + async function callHandler(fileId: string) { + const ctx = createMockContext({ env: ENV }); + return handler({ file_id: fileId, database_id: 1 }, ctx as any); + } + + it('processes an attached image into ready thumbnail and medium versions', async () => { + const fileId = randomUUID(); + const sourceId = randomUUID(); + const baseId = randomUUID(); + const originKey = `1/default/${baseId}_origin`; + const thumbKey = `1/default/${baseId}_thumbnail`; + const mediumKey = `1/default/${baseId}_medium`; + const imageBuffer = await generateTestImage(1600, 900); + + await putOriginImage(originKey, imageBuffer); + await pg.query( + `INSERT INTO ${SOURCE_SCHEMA}.${SOURCE_TABLE} (id, image) + VALUES ($1, $2::jsonb)`, + [ + sourceId, + JSON.stringify({ + key: originKey, + mime: 'image/jpeg', + filename: 'attached.jpg', + url: `http://localhost:9000/${BUCKET}/${originKey}`, + }), + ] + ); + await insertFileRow({ fileId, key: originKey, sourceId }); + + const result: any = await callHandler(fileId); + + expect(result).toEqual({ success: true, versions: 2 }); + expect(await objectExists(s3, thumbKey)).toBe(true); + expect(await objectExists(s3, mediumKey)).toBe(true); + s3Keys.add(thumbKey); + s3Keys.add(mediumKey); + + const files = await pg.query( + `SELECT key, status, source_table, source_column, source_id + FROM ${OBJECT_STORE_SCHEMA}.files + WHERE key LIKE $1 + ORDER BY key`, + [`1/default/${baseId}%`] + ); + + expect(files.rows).toEqual([ + { + key: mediumKey, + status: 'ready', + source_table: `${SOURCE_SCHEMA}.${SOURCE_TABLE}`, + source_column: 'image', + source_id: sourceId, + }, + { + key: originKey, + status: 'ready', + source_table: `${SOURCE_SCHEMA}.${SOURCE_TABLE}`, + source_column: 'image', + source_id: sourceId, + }, + { + key: thumbKey, + status: 'ready', + source_table: `${SOURCE_SCHEMA}.${SOURCE_TABLE}`, + source_column: 'image', + source_id: sourceId, + }, + ]); + + const sourceRow = await pg.query( + `SELECT image FROM ${SOURCE_SCHEMA}.${SOURCE_TABLE} WHERE id = $1`, + [sourceId] + ); + const versions = sourceRow.rows[0].image.versions; + + expect(versions).toHaveLength(2); + expect(versions).toEqual( + expect.arrayContaining([ + expect.objectContaining({ key: thumbKey, mime: 'image/jpeg', width: 150 }), + expect.objectContaining({ key: mediumKey, mime: 'image/jpeg', width: 1200 }), + ]) + ); + + const secondRun: any = await callHandler(fileId); + expect(secondRun).toEqual({ skipped: true, reason: 'not_pending_or_locked' }); + }); + + it('processes an unattached image without writing domain metadata', async () => { + const fileId = randomUUID(); + const baseId = randomUUID(); + const originKey = `1/default/${baseId}_origin`; + const thumbKey = `1/default/${baseId}_thumbnail`; + const mediumKey = `1/default/${baseId}_medium`; + const imageBuffer = await generateTestImage(1600, 900); + + await putOriginImage(originKey, imageBuffer); + await insertFileRow({ fileId, key: originKey }); + + const result: any = await callHandler(fileId); + + expect(result).toEqual({ success: true, versions: 2 }); + expect(await objectExists(s3, thumbKey)).toBe(true); + expect(await objectExists(s3, mediumKey)).toBe(true); + s3Keys.add(thumbKey); + s3Keys.add(mediumKey); + + const files = await pg.query( + `SELECT key, status, source_table, source_column, source_id + FROM ${OBJECT_STORE_SCHEMA}.files + WHERE key LIKE $1 + ORDER BY key`, + [`1/default/${baseId}%`] + ); + + expect(files.rows).toEqual([ + { + key: mediumKey, + status: 'ready', + source_table: null, + source_column: null, + source_id: null, + }, + { + key: originKey, + status: 'ready', + source_table: null, + source_column: null, + source_id: null, + }, + { + key: thumbKey, + status: 'ready', + source_table: null, + source_column: null, + source_id: null, + }, + ]); + }); +}); diff --git a/functions/process-image/__tests__/upload-flow.e2e.test.ts b/functions/process-image/__tests__/upload-flow.e2e.test.ts new file mode 100644 index 0000000..f4af8b7 --- /dev/null +++ b/functions/process-image/__tests__/upload-flow.e2e.test.ts @@ -0,0 +1,408 @@ +import { + DeleteObjectCommand, + HeadObjectCommand, + S3Client, +} from '@aws-sdk/client-s3'; +import { randomUUID } from 'crypto'; +import { readFileSync } from 'fs'; +import path from 'path'; +import { Client as PgClient } from 'pg'; +import { Readable } from 'stream'; + +import handler from '../handler'; +import { + cleanObjectStoreRows, + makePgClient, + setupObjectStoreSchema, + teardownObjectStoreSchema, +} from '../../../tests/helpers/object-store-schema'; + +jest.setTimeout(60000); + +jest.mock('@constructive-io/graphql-env', () => ({ + getEnvOptions: () => ({ + cdn: { + provider: process.env.BUCKET_PROVIDER || 'minio', + bucketName: process.env.BUCKET_NAME || 'test-bucket', + awsRegion: process.env.AWS_REGION || 'us-east-1', + awsAccessKey: process.env.AWS_ACCESS_KEY || 'minioadmin', + awsSecretKey: process.env.AWS_SECRET_KEY || 'minioadmin', + minioEndpoint: process.env.MINIO_ENDPOINT || 'http://localhost:9000', + }, + }), +}), { virtual: true }); + +const OBJECT_STORE_SCHEMA = 'object_store_public'; +const SOURCE_SCHEMA = 'public'; +const SOURCE_TABLE = 'test_upload_flow_images'; +const SOURCE_COLUMN = 'image'; +const BUCKET = 'test-bucket'; +const USER_ID = 'aaaaaaaa-0000-0000-0000-000000000001'; +const DATABASE_ID = 1; +const LARGE_JPEG = readFileSync( + path.resolve( + __dirname, + '../../../../constructive/uploads/etag-stream/__fixtures__/deadman.jpg', + ), +); + +type UploadResolverModule = typeof import('../../../../constructive/graphile/graphile-settings/src/upload-resolver'); + +type UploadResult = { + key?: string; + url: string; + mime: string; + filename: string; +}; + +const ENV: Record = { + PGHOST: 'localhost', + PGPORT: '5432', + PGUSER: 'postgres', + PGPASSWORD: 'password', + PGDATABASE: 'constructive', + BUCKET_PROVIDER: 'minio', + BUCKET_NAME: BUCKET, + AWS_ACCESS_KEY: 'minioadmin', + AWS_SECRET_KEY: 'minioadmin', + AWS_REGION: 'us-east-1', + MINIO_ENDPOINT: 'http://localhost:9000', +}; + +function makeS3(): S3Client { + return new S3Client({ + region: 'us-east-1', + credentials: { accessKeyId: 'minioadmin', secretAccessKey: 'minioadmin' }, + endpoint: 'http://localhost:9000', + forcePathStyle: true, + }); +} + +function makeContext(envOverrides: Record = {}) { + return { + log: { + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + }, + env: { + ...ENV, + ...envOverrides, + }, + } as any; +} + +function makeUpload(filename: string, body: Buffer) { + return { + filename, + createReadStream: () => Readable.from(body), + }; +} + +async function objectExists(s3: S3Client, key: string): Promise { + try { + await s3.send(new HeadObjectCommand({ Bucket: BUCKET, Key: key })); + return true; + } catch { + return false; + } +} + +async function loadUploadResolverModule(): Promise { + jest.resetModules(); + return import('../../../../constructive/graphile/graphile-settings/src/upload-resolver'); +} + +describe('upload to process-image flow e2e', () => { + let pg: PgClient; + let s3: S3Client; + let uploadResolverModule: UploadResolverModule | null = null; + const originalEnv = { ...process.env }; + + beforeAll(async () => { + process.env.UPLOAD_V2_ENABLED = 'true'; + process.env.BUCKET_PROVIDER = ENV.BUCKET_PROVIDER; + process.env.BUCKET_NAME = ENV.BUCKET_NAME; + process.env.AWS_REGION = ENV.AWS_REGION; + process.env.AWS_ACCESS_KEY = ENV.AWS_ACCESS_KEY; + process.env.AWS_SECRET_KEY = ENV.AWS_SECRET_KEY; + process.env.MINIO_ENDPOINT = ENV.MINIO_ENDPOINT; + process.env.PGHOST = ENV.PGHOST; + process.env.PGPORT = ENV.PGPORT; + process.env.PGUSER = ENV.PGUSER; + process.env.PGPASSWORD = ENV.PGPASSWORD; + process.env.PGDATABASE = ENV.PGDATABASE; + + pg = makePgClient(); + await pg.connect(); + s3 = makeS3(); + + await setupObjectStoreSchema(pg); + await pg.query(` + CREATE TABLE IF NOT EXISTS ${SOURCE_SCHEMA}.${SOURCE_TABLE} ( + id uuid PRIMARY KEY, + ${SOURCE_COLUMN} jsonb + ) + `); + await pg.query(` + DROP TRIGGER IF EXISTS ${SOURCE_TABLE}_${SOURCE_COLUMN}_file_ref + ON ${SOURCE_SCHEMA}.${SOURCE_TABLE} + `); + await pg.query(` + CREATE TRIGGER ${SOURCE_TABLE}_${SOURCE_COLUMN}_file_ref + AFTER UPDATE OF ${SOURCE_COLUMN} ON ${SOURCE_SCHEMA}.${SOURCE_TABLE} + FOR EACH ROW + EXECUTE FUNCTION ${OBJECT_STORE_SCHEMA}.populate_file_back_reference( + '${SOURCE_COLUMN}', + '${SOURCE_SCHEMA}.${SOURCE_TABLE}' + ) + `); + }); + + afterEach(async () => { + if (uploadResolverModule) { + await uploadResolverModule.__resetUploadResolverForTests(); + uploadResolverModule = null; + } + + const keysResult = await pg.query( + `SELECT key FROM ${OBJECT_STORE_SCHEMA}.files ORDER BY key` + ); + for (const row of keysResult.rows) { + try { + await s3.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: row.key })); + } catch { + // ignore cleanup failures for already-deleted objects + } + } + + await pg.query(`DELETE FROM ${SOURCE_SCHEMA}.${SOURCE_TABLE}`); + await cleanObjectStoreRows(pg); + }); + + afterAll(async () => { + process.env = originalEnv; + + await pg.query( + `DROP TABLE IF EXISTS ${SOURCE_SCHEMA}.${SOURCE_TABLE} CASCADE` + ); + await teardownObjectStoreSchema(pg); + await pg.end(); + s3.destroy(); + + try { + const pgCache = require('pg-cache'); + if (pgCache.close) await pgCache.close(); + if (pgCache.teardownPgPools) await pgCache.teardownPgPools(); + } catch { + // ignore pg-cache teardown issues in tests + } + }); + + async function insertSourceRow(id: string): Promise { + await pg.query( + `INSERT INTO ${SOURCE_SCHEMA}.${SOURCE_TABLE} (id, ${SOURCE_COLUMN}) + VALUES ($1, NULL)`, + [id] + ); + } + + async function attachUploadToSourceRow(sourceId: string, upload: UploadResult): Promise { + await pg.query('BEGIN'); + try { + await pg.query(`SELECT set_config('app.database_id', $1, true)`, [ + String(DATABASE_ID), + ]); + await pg.query( + `UPDATE ${SOURCE_SCHEMA}.${SOURCE_TABLE} + SET ${SOURCE_COLUMN} = $2::jsonb + WHERE id = $1`, + [sourceId, JSON.stringify(upload)] + ); + await pg.query('COMMIT'); + } catch (err) { + await pg.query('ROLLBACK'); + throw err; + } + } + + async function getFileRowByKey(key: string) { + const result = await pg.query( + `SELECT id, database_id, key, status, source_table, source_column, source_id + FROM ${OBJECT_STORE_SCHEMA}.files + WHERE key = $1`, + [key] + ); + if (result.rowCount !== 1) { + throw new Error(`Expected one file row for key=${key}, got ${result.rowCount}`); + } + return result.rows[0]; + } + + async function runProcessImage(fileId: string) { + return handler( + { file_id: fileId, database_id: DATABASE_ID }, + makeContext(), + ) as Promise; + } + + async function expectProcessedFlow(upload: UploadResult): Promise { + if (!upload.key) { + throw new Error('Expected upload result to include a durable key'); + } + + const sourceId = randomUUID(); + await insertSourceRow(sourceId); + await attachUploadToSourceRow(sourceId, upload); + + const fileRow = await getFileRowByKey(upload.key); + expect(fileRow).toEqual( + expect.objectContaining({ + database_id: DATABASE_ID, + key: upload.key, + status: 'pending', + source_table: `${SOURCE_SCHEMA}.${SOURCE_TABLE}`, + source_column: SOURCE_COLUMN, + source_id: sourceId, + }) + ); + + const result = await runProcessImage(fileRow.id); + expect(result).toEqual({ success: true, versions: 2 }); + + const baseKey = upload.key.replace(/_origin$/, ''); + const thumbnailKey = `${baseKey}_thumbnail`; + const mediumKey = `${baseKey}_medium`; + + expect(await objectExists(s3, upload.key)).toBe(true); + expect(await objectExists(s3, thumbnailKey)).toBe(true); + expect(await objectExists(s3, mediumKey)).toBe(true); + + const files = await pg.query( + `SELECT key, status, source_table, source_column, source_id + FROM ${OBJECT_STORE_SCHEMA}.files + WHERE key LIKE $1 + ORDER BY key`, + [`${baseKey}%`] + ); + + expect(files.rows).toEqual([ + { + key: mediumKey, + status: 'ready', + source_table: `${SOURCE_SCHEMA}.${SOURCE_TABLE}`, + source_column: SOURCE_COLUMN, + source_id: sourceId, + }, + { + key: upload.key, + status: 'ready', + source_table: `${SOURCE_SCHEMA}.${SOURCE_TABLE}`, + source_column: SOURCE_COLUMN, + source_id: sourceId, + }, + { + key: thumbnailKey, + status: 'ready', + source_table: `${SOURCE_SCHEMA}.${SOURCE_TABLE}`, + source_column: SOURCE_COLUMN, + source_id: sourceId, + }, + ]); + + const sourceResult = await pg.query( + `SELECT ${SOURCE_COLUMN} + FROM ${SOURCE_SCHEMA}.${SOURCE_TABLE} + WHERE id = $1`, + [sourceId] + ); + const imageValue = sourceResult.rows[0][SOURCE_COLUMN]; + + expect(imageValue).toEqual( + expect.objectContaining({ + key: upload.key, + filename: upload.filename, + mime: upload.mime, + }) + ); + expect(imageValue.versions).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + key: thumbnailKey, + mime: 'image/jpeg', + width: 150, + height: 150, + }), + expect.objectContaining({ + key: mediumKey, + mime: 'image/jpeg', + width: 1200, + }), + ]) + ); + expect(imageValue.versions).toHaveLength(2); + + const secondRun = await runProcessImage(fileRow.id); + expect(secondRun).toEqual({ skipped: true, reason: 'not_pending_or_locked' }); + } + + it('covers REST-style upload through processed versions', async () => { + uploadResolverModule = await loadUploadResolverModule(); + + const upload = await uploadResolverModule.streamToStorage( + Readable.from(LARGE_JPEG), + 'rest-flow.jpg', + { + databaseId: String(DATABASE_ID), + userId: USER_ID, + bucketKey: 'default', + } + ); + + expect(upload).toEqual( + expect.objectContaining({ + key: expect.stringMatching(/^1\/default\/[0-9a-f-]+_origin$/), + filename: 'rest-flow.jpg', + mime: 'image/jpeg', + url: expect.any(String), + }) + ); + + await expectProcessedFlow(upload); + }); + + it('covers inline GraphQL upload through processed versions', async () => { + uploadResolverModule = await loadUploadResolverModule(); + + const imageUploadDefinition = uploadResolverModule.constructiveUploadFieldDefinitions.find( + (definition) => 'name' in definition && definition.name === 'image' + ); + + if (!imageUploadDefinition) { + throw new Error('Missing image upload definition'); + } + + const upload = await imageUploadDefinition.resolve( + makeUpload('inline-flow.jpg', LARGE_JPEG) as any, + {}, + { + req: { + api: { databaseId: String(DATABASE_ID) }, + token: { user_id: USER_ID }, + }, + }, + { uploadPlugin: { tags: {}, type: 'image' } } as any + ) as UploadResult; + + expect(upload).toEqual( + expect.objectContaining({ + key: expect.stringMatching(/^1\/default\/[0-9a-f-]+_origin$/), + filename: 'inline-flow.jpg', + mime: 'image/jpeg', + url: expect.any(String), + }) + ); + + await expectProcessedFlow(upload); + }); +}); diff --git a/functions/process-image/handler.ts b/functions/process-image/handler.ts index 91b75b9..21aaafe 100644 --- a/functions/process-image/handler.ts +++ b/functions/process-image/handler.ts @@ -374,16 +374,30 @@ async function handleFileMode( height: v.height, })); - await pool.query( - `UPDATE ${file.source_table} - SET ${file.source_column} = jsonb_set( - ${file.source_column}::jsonb, - '{versions}', - $1::jsonb - ) - WHERE id = $2`, - [JSON.stringify(versionsArray), file.source_id] - ); + const sourceClient = await pool.connect(); + try { + await sourceClient.query('BEGIN'); + await sourceClient.query( + `SELECT set_config('app.database_id', $1, true)`, + [String(file.database_id)] + ); + await sourceClient.query( + `UPDATE ${file.source_table} + SET ${file.source_column} = jsonb_set( + ${file.source_column}::jsonb, + '{versions}', + $1::jsonb + ) + WHERE id = $2`, + [JSON.stringify(versionsArray), file.source_id] + ); + await sourceClient.query('COMMIT'); + } catch (domainUpdateErr) { + await sourceClient.query('ROLLBACK'); + throw domainUpdateErr; + } finally { + sourceClient.release(); + } } return { success: true, versions: versionRows.length }; @@ -395,16 +409,21 @@ async function handleFileMode( // --------------------------------------------------------------- await deleteS3Objects(s3, bucket, uploadedS3Keys, log); - await pool.query( - `UPDATE object_store_public.files SET status = 'error', status_reason = $3 - WHERE id = $1 AND database_id = $2`, - [file.id, file.database_id, (processingErr as Error).message] - ); + try { + await pool.query( + `UPDATE object_store_public.files SET status = 'error', status_reason = $3 + WHERE id = $1 AND database_id = $2`, + [file.id, file.database_id, (processingErr as Error).message] + ); + } catch (statusErr) { + log.error('[process-image] failed to mark file as error', statusErr); + } throw processingErr; } } finally { client.release(); + s3.destroy(); } } diff --git a/tests/helpers/object-store-schema.ts b/tests/helpers/object-store-schema.ts index c91be43..542f2ca 100644 --- a/tests/helpers/object-store-schema.ts +++ b/tests/helpers/object-store-schema.ts @@ -23,6 +23,7 @@ export function makePgClient(): PgClient { } export async function setupObjectStoreSchema(pg: PgClient): Promise { + await pg.query('CREATE EXTENSION IF NOT EXISTS pgcrypto'); await pg.query(`CREATE SCHEMA IF NOT EXISTS ${SCHEMA}`); await pg.query(` @@ -54,6 +55,80 @@ export async function setupObjectStoreSchema(pg: PgClient): Promise { ) `); + await pg.query(` + CREATE OR REPLACE FUNCTION ${SCHEMA}.populate_file_back_reference() + RETURNS trigger AS $fn$ + DECLARE + col_name text := TG_ARGV[0]; + table_name text := TG_ARGV[1]; + new_val jsonb; + old_val jsonb; + new_key text; + old_key text; + base_key text; + db_id integer; + BEGIN + db_id := current_setting('app.database_id')::integer; + + EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO new_val USING NEW; + EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO old_val USING OLD; + + new_key := new_val ->> 'key'; + old_key := old_val ->> 'key'; + + IF new_key IS NOT DISTINCT FROM old_key THEN + RETURN NEW; + END IF; + + IF old_key IS NOT NULL AND old_key <> '' THEN + base_key := regexp_replace(old_key, '_[^_]+$', ''); + + UPDATE ${SCHEMA}.${TABLE} + SET status = 'deleting', status_reason = 'replaced by new file' + WHERE database_id = db_id + AND (key = old_key OR key LIKE base_key || '_%') + AND status <> 'deleting'; + END IF; + + IF new_key IS NOT NULL AND new_key <> '' THEN + base_key := regexp_replace(new_key, '_[^_]+$', ''); + + UPDATE ${SCHEMA}.${TABLE} + SET source_table = table_name, + source_column = col_name, + source_id = NEW.id + WHERE database_id = db_id + AND (key = new_key OR key LIKE base_key || '_%'); + END IF; + + RETURN NEW; + END; + $fn$ LANGUAGE plpgsql + `); + + await pg.query(` + CREATE OR REPLACE FUNCTION ${SCHEMA}.mark_files_deleting_on_source_delete() + RETURNS trigger AS $fn$ + DECLARE + col_name text := TG_ARGV[0]; + table_name text := TG_ARGV[1]; + db_id integer; + BEGIN + db_id := current_setting('app.database_id')::integer; + + UPDATE ${SCHEMA}.${TABLE} + SET status = 'deleting', status_reason = 'source row deleted' + WHERE database_id = db_id + AND source_table = table_name + AND source_column = col_name + AND source_id = OLD.id + AND status <> 'deleting'; + + RETURN OLD; + END; + $fn$ LANGUAGE plpgsql + `); + // State machine trigger await pg.query(` CREATE OR REPLACE FUNCTION ${SCHEMA}.files_before_update_timestamp() @@ -91,6 +166,8 @@ export async function setupObjectStoreSchema(pg: PgClient): Promise { export async function teardownObjectStoreSchema(pg: PgClient): Promise { await pg.query(`DROP TABLE IF EXISTS ${SCHEMA}.${TABLE} CASCADE`); + await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.mark_files_deleting_on_source_delete CASCADE`); + await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.populate_file_back_reference CASCADE`); await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.files_before_update_timestamp CASCADE`); await pg.query(`DROP TYPE IF EXISTS ${SCHEMA}.file_status CASCADE`); await pg.query(`DROP SCHEMA IF EXISTS ${SCHEMA} CASCADE`); From 3e043c04522144a8aad9cdee9e8f5dc5d479933a Mon Sep 17 00:00:00 2001 From: zetazzz Date: Fri, 13 Mar 2026 13:16:56 +0800 Subject: [PATCH 05/13] refined code by code review doc --- .../__tests__/handler.e2e.test.ts | 9 +- functions/delete-s3-object/handler.ts | 83 +++++++++++-------- functions/process-image/handler.ts | 15 +++- 3 files changed, 68 insertions(+), 39 deletions(-) diff --git a/functions/delete-s3-object/__tests__/handler.e2e.test.ts b/functions/delete-s3-object/__tests__/handler.e2e.test.ts index d442c0d..25451bb 100644 --- a/functions/delete-s3-object/__tests__/handler.e2e.test.ts +++ b/functions/delete-s3-object/__tests__/handler.e2e.test.ts @@ -30,11 +30,12 @@ const ENV: Record = { PGUSER: 'postgres', PGPASSWORD: 'password', PGDATABASE: 'constructive', + BUCKET_PROVIDER: 'minio', + BUCKET_NAME: BUCKET, + AWS_ACCESS_KEY: 'minioadmin', + AWS_SECRET_KEY: 'minioadmin', AWS_REGION: 'us-east-1', - AWS_ACCESS_KEY_ID: 'minioadmin', - AWS_SECRET_ACCESS_KEY: 'minioadmin', - S3_ENDPOINT: 'http://localhost:9000', - S3_BUCKET: BUCKET, + MINIO_ENDPOINT: 'http://localhost:9000', }; function makeS3(): S3Client { diff --git a/functions/delete-s3-object/handler.ts b/functions/delete-s3-object/handler.ts index 962ac51..c3fc746 100644 --- a/functions/delete-s3-object/handler.ts +++ b/functions/delete-s3-object/handler.ts @@ -8,6 +8,34 @@ type DeleteParams = { key: string; }; +function createS3Client(env: Record): S3Client { + const provider = env.BUCKET_PROVIDER || 'minio'; + const isMinio = provider === 'minio'; + return new S3Client({ + region: env.AWS_REGION || 'us-east-1', + credentials: { + accessKeyId: env.AWS_ACCESS_KEY || 'minioadmin', + secretAccessKey: env.AWS_SECRET_KEY || 'minioadmin', + }, + ...(isMinio + ? { + endpoint: env.MINIO_ENDPOINT || 'http://localhost:9000', + forcePathStyle: true, + } + : {}), + }); +} + +function createPgPool(env: Record) { + return getPgPool({ + host: env.PGHOST || 'localhost', + port: Number(env.PGPORT || 5432), + database: env.PGDATABASE || 'constructive', + user: env.PGUSER || 'postgres', + password: env.PGPASSWORD || 'password', + }); +} + const handler: FunctionHandler = async ( params: DeleteParams, context: FunctionContext @@ -16,42 +44,31 @@ const handler: FunctionHandler = async ( log.info('[delete-s3-object] deleting', { key: params.key }); - const s3 = new S3Client({ - region: env.AWS_REGION || 'us-east-1', - endpoint: env.S3_ENDPOINT, - forcePathStyle: true, - credentials: { - accessKeyId: env.AWS_ACCESS_KEY_ID!, - secretAccessKey: env.AWS_SECRET_ACCESS_KEY!, - }, - }); + const s3 = createS3Client(env); + const pool = createPgPool(env); - const pool = getPgPool({ - host: env.PGHOST, - port: Number(env.PGPORT || 5432), - database: env.PGDATABASE || 'constructive', - user: env.PGUSER, - password: env.PGPASSWORD, - }); + try { + // Step 1: Delete from S3 (idempotent -- delete ignores missing keys) + await s3.send(new DeleteObjectCommand({ + Bucket: env.BUCKET_NAME || 'test-bucket', + Key: params.key, + })); - // Step 1: Delete from S3 (idempotent -- delete ignores missing keys) - await s3.send(new DeleteObjectCommand({ - Bucket: env.S3_BUCKET!, - Key: params.key, - })); - - // Step 2: Delete the DB row - const result = await pool.query( - 'DELETE FROM object_store_public.files WHERE id = $1 AND database_id = $2', - [params.file_id, params.database_id] - ); - - log.info('[delete-s3-object] complete', { - key: params.key, - rowsDeleted: result.rowCount, - }); + // Step 2: Delete the DB row + const result = await pool.query( + 'DELETE FROM object_store_public.files WHERE id = $1 AND database_id = $2', + [params.file_id, params.database_id] + ); + + log.info('[delete-s3-object] complete', { + key: params.key, + rowsDeleted: result.rowCount, + }); - return { success: true, key: params.key }; + return { success: true, key: params.key }; + } finally { + s3.destroy(); + } }; export default handler; diff --git a/functions/process-image/handler.ts b/functions/process-image/handler.ts index 21aaafe..59b4fe2 100644 --- a/functions/process-image/handler.ts +++ b/functions/process-image/handler.ts @@ -239,6 +239,17 @@ async function handleFileMode( })); const mimeType = headResult.ContentType ?? 'application/octet-stream'; + // Check file size before downloading + const maxFileSize = Number(env.MAX_FILE_SIZE) || Number(env.MAX_IMAGE_SIZE) || 52_428_800; // 50MB + if (headResult.ContentLength && headResult.ContentLength > maxFileSize) { + log.warn(`[process-image] file too large (${headResult.ContentLength} bytes, max ${maxFileSize}), marking as ready`); + await pool.query( + `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = $2`, + [file.id, file.database_id] + ); + return { success: true, mime: mimeType, skipped: true, reason: 'file_too_large' }; + } + if (!mimeType.startsWith('image/')) { // Non-image: mark as ready immediately, no versions to generate await pool.query( @@ -289,8 +300,8 @@ async function handleFileMode( }); } - // Generate medium (max 1200px wide, skip if original is smaller) - if ((metadata.width ?? 0) > 1200) { + // Generate medium (max 1200px, skip if original is smaller) + if ((metadata.width ?? 0) > 1200 || (metadata.height ?? 0) > 1200) { const medKey = `${baseKey}_medium`; const medResult = await image.clone() .resize(1200, null, { withoutEnlargement: true }).jpeg() From 3d036b1936d1c915bc81e7c9bf3c9770354ece02 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Fri, 13 Mar 2026 16:31:38 +0800 Subject: [PATCH 06/13] change schema names --- .../__tests__/handler.e2e.test.ts | 14 +++++++------- functions/delete-s3-object/handler.ts | 2 +- functions/file-cleanup/handler.ts | 12 ++++++------ .../__tests__/handler.file-mode.e2e.test.ts | 14 +++++++------- .../__tests__/upload-flow.e2e.test.ts | 14 +++++++------- functions/process-image/handler.json | 2 +- functions/process-image/handler.ts | 18 +++++++++--------- tests/helpers/object-store-schema.ts | 12 ++++++------ 8 files changed, 44 insertions(+), 44 deletions(-) diff --git a/functions/delete-s3-object/__tests__/handler.e2e.test.ts b/functions/delete-s3-object/__tests__/handler.e2e.test.ts index 25451bb..28b7fb0 100644 --- a/functions/delete-s3-object/__tests__/handler.e2e.test.ts +++ b/functions/delete-s3-object/__tests__/handler.e2e.test.ts @@ -12,16 +12,16 @@ import handler from '../handler'; import { createMockContext } from '../../../tests/helpers/mock-context'; import { makePgClient, - setupObjectStoreSchema, - teardownObjectStoreSchema, - cleanObjectStoreRows, + setupFilesStoreSchema, + teardownFilesStoreSchema, + cleanFilesStoreRows, } from '../../../tests/helpers/object-store-schema'; // --------------------------------------------------------------------------- // Infra helpers // --------------------------------------------------------------------------- -const SCHEMA = 'object_store_public'; +const SCHEMA = 'files_store_public'; const BUCKET = 'test-bucket'; const ENV: Record = { @@ -60,7 +60,7 @@ describe('delete-s3-object handler e2e', () => { pg = makePgClient(); await pg.connect(); s3 = makeS3(); - await setupObjectStoreSchema(pg); + await setupFilesStoreSchema(pg); }); afterAll(async () => { @@ -69,7 +69,7 @@ describe('delete-s3-object handler e2e', () => { await s3.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: key })); } catch { /* ignore */ } } - await teardownObjectStoreSchema(pg); + await teardownFilesStoreSchema(pg); await pg.end(); s3.destroy(); try { @@ -79,7 +79,7 @@ describe('delete-s3-object handler e2e', () => { }); afterEach(async () => { - await cleanObjectStoreRows(pg); + await cleanFilesStoreRows(pg); }); function callHandler(file_id: string, database_id: number, key: string) { diff --git a/functions/delete-s3-object/handler.ts b/functions/delete-s3-object/handler.ts index c3fc746..e166822 100644 --- a/functions/delete-s3-object/handler.ts +++ b/functions/delete-s3-object/handler.ts @@ -56,7 +56,7 @@ const handler: FunctionHandler = async ( // Step 2: Delete the DB row const result = await pool.query( - 'DELETE FROM object_store_public.files WHERE id = $1 AND database_id = $2', + 'DELETE FROM files_store_public.files WHERE id = $1 AND database_id = $2', [params.file_id, params.database_id] ); diff --git a/functions/file-cleanup/handler.ts b/functions/file-cleanup/handler.ts index 653c762..82b092c 100644 --- a/functions/file-cleanup/handler.ts +++ b/functions/file-cleanup/handler.ts @@ -13,10 +13,10 @@ const CLEANUP_QUERIES: Record { await pg.connect(); s3 = makeS3(); - await setupObjectStoreSchema(pg); + await setupFilesStoreSchema(pg); await pg.query(` CREATE TABLE IF NOT EXISTS ${SOURCE_SCHEMA}.${SOURCE_TABLE} ( id uuid PRIMARY KEY, @@ -101,12 +101,12 @@ describe('process-image handler file mode e2e', () => { s3Keys.clear(); await pg.query(`DELETE FROM ${SOURCE_SCHEMA}.${SOURCE_TABLE}`); - await cleanObjectStoreRows(pg); + await cleanFilesStoreRows(pg); }); afterAll(async () => { await pg.query(`DROP TABLE IF EXISTS ${SOURCE_SCHEMA}.${SOURCE_TABLE}`); - await teardownObjectStoreSchema(pg); + await teardownFilesStoreSchema(pg); await pg.end(); s3.destroy(); diff --git a/functions/process-image/__tests__/upload-flow.e2e.test.ts b/functions/process-image/__tests__/upload-flow.e2e.test.ts index f4af8b7..af27305 100644 --- a/functions/process-image/__tests__/upload-flow.e2e.test.ts +++ b/functions/process-image/__tests__/upload-flow.e2e.test.ts @@ -11,10 +11,10 @@ import { Readable } from 'stream'; import handler from '../handler'; import { - cleanObjectStoreRows, + cleanFilesStoreRows, makePgClient, - setupObjectStoreSchema, - teardownObjectStoreSchema, + setupFilesStoreSchema, + teardownFilesStoreSchema, } from '../../../tests/helpers/object-store-schema'; jest.setTimeout(60000); @@ -32,7 +32,7 @@ jest.mock('@constructive-io/graphql-env', () => ({ }), }), { virtual: true }); -const OBJECT_STORE_SCHEMA = 'object_store_public'; +const OBJECT_STORE_SCHEMA = 'files_store_public'; const SOURCE_SCHEMA = 'public'; const SOURCE_TABLE = 'test_upload_flow_images'; const SOURCE_COLUMN = 'image'; @@ -137,7 +137,7 @@ describe('upload to process-image flow e2e', () => { await pg.connect(); s3 = makeS3(); - await setupObjectStoreSchema(pg); + await setupFilesStoreSchema(pg); await pg.query(` CREATE TABLE IF NOT EXISTS ${SOURCE_SCHEMA}.${SOURCE_TABLE} ( id uuid PRIMARY KEY, @@ -177,7 +177,7 @@ describe('upload to process-image flow e2e', () => { } await pg.query(`DELETE FROM ${SOURCE_SCHEMA}.${SOURCE_TABLE}`); - await cleanObjectStoreRows(pg); + await cleanFilesStoreRows(pg); }); afterAll(async () => { @@ -186,7 +186,7 @@ describe('upload to process-image flow e2e', () => { await pg.query( `DROP TABLE IF EXISTS ${SOURCE_SCHEMA}.${SOURCE_TABLE} CASCADE` ); - await teardownObjectStoreSchema(pg); + await teardownFilesStoreSchema(pg); await pg.end(); s3.destroy(); diff --git a/functions/process-image/handler.json b/functions/process-image/handler.json index 70c8196..6895001 100644 --- a/functions/process-image/handler.json +++ b/functions/process-image/handler.json @@ -2,7 +2,7 @@ "name": "process-image", "version": "1.0.0", "type": "node-graphql", - "description": "Processes uploaded files and images: generates resized versions via sharp, manages object_store_public.files status, and updates database records", + "description": "Processes uploaded files and images: generates resized versions via sharp, manages files_store_public.files status, and updates database records", "dependencies": { "@aws-sdk/client-s3": "^3.1001.0", "@aws-sdk/lib-storage": "^3.1001.0", diff --git a/functions/process-image/handler.ts b/functions/process-image/handler.ts index 59b4fe2..353bb75 100644 --- a/functions/process-image/handler.ts +++ b/functions/process-image/handler.ts @@ -16,7 +16,7 @@ interface VersionConfig { maxHeight: number; } -/** File mode: process a file from object_store_public.files */ +/** File mode: process a file from files_store_public.files */ interface ProcessFileParams { file_id: string; database_id: number; @@ -183,7 +183,7 @@ async function deleteS3Objects( } // --------------------------------------------------------------------------- -// File Mode: process a file from object_store_public.files +// File Mode: process a file from files_store_public.files // // Locks the row with FOR UPDATE SKIP LOCKED, transitions status through // pending -> processing -> ready, generates thumbnail + medium versions, @@ -208,7 +208,7 @@ async function handleFileMode( await client.query('BEGIN'); const { rows } = await client.query( - `SELECT * FROM object_store_public.files + `SELECT * FROM files_store_public.files WHERE id = $1 AND database_id = $2 AND status = 'pending' FOR UPDATE SKIP LOCKED`, [params.file_id, params.database_id] @@ -224,7 +224,7 @@ async function handleFileMode( // Transition to processing await client.query( - `UPDATE object_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = $2`, + `UPDATE files_store_public.files SET status = 'processing' WHERE id = $1 AND database_id = $2`, [file.id, file.database_id] ); @@ -244,7 +244,7 @@ async function handleFileMode( if (headResult.ContentLength && headResult.ContentLength > maxFileSize) { log.warn(`[process-image] file too large (${headResult.ContentLength} bytes, max ${maxFileSize}), marking as ready`); await pool.query( - `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = $2`, + `UPDATE files_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = $2`, [file.id, file.database_id] ); return { success: true, mime: mimeType, skipped: true, reason: 'file_too_large' }; @@ -253,7 +253,7 @@ async function handleFileMode( if (!mimeType.startsWith('image/')) { // Non-image: mark as ready immediately, no versions to generate await pool.query( - `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = $2`, + `UPDATE files_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = $2`, [file.id, file.database_id] ); return { success: true, mime: mimeType, versions: 0 }; @@ -334,7 +334,7 @@ async function handleFileMode( for (const ver of versionRows) { await txClient.query( - `INSERT INTO object_store_public.files + `INSERT INTO files_store_public.files (database_id, bucket_key, key, status, etag, created_by, source_table, source_column, source_id) VALUES ($1, $2, $3, 'ready', $4, $5, $6, $7, $8)`, @@ -347,7 +347,7 @@ async function handleFileMode( // Mark origin as ready await txClient.query( - `UPDATE object_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = $2`, + `UPDATE files_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = $2`, [file.id, file.database_id] ); @@ -422,7 +422,7 @@ async function handleFileMode( try { await pool.query( - `UPDATE object_store_public.files SET status = 'error', status_reason = $3 + `UPDATE files_store_public.files SET status = 'error', status_reason = $3 WHERE id = $1 AND database_id = $2`, [file.id, file.database_id, (processingErr as Error).message] ); diff --git a/tests/helpers/object-store-schema.ts b/tests/helpers/object-store-schema.ts index 542f2ca..9a38318 100644 --- a/tests/helpers/object-store-schema.ts +++ b/tests/helpers/object-store-schema.ts @@ -1,5 +1,5 @@ /** - * Shared setup/teardown for the object_store_public schema used by + * Shared setup/teardown for the files_store_public schema used by * process-file and delete-s3-object e2e tests. * * Creates the schema, enum, table, and triggers once. Multiple test @@ -9,7 +9,7 @@ */ import { Client as PgClient } from 'pg'; -const SCHEMA = 'object_store_public'; +const SCHEMA = 'files_store_public'; const TABLE = 'files'; export function makePgClient(): PgClient { @@ -22,7 +22,7 @@ export function makePgClient(): PgClient { }); } -export async function setupObjectStoreSchema(pg: PgClient): Promise { +export async function setupFilesStoreSchema(pg: PgClient): Promise { await pg.query('CREATE EXTENSION IF NOT EXISTS pgcrypto'); await pg.query(`CREATE SCHEMA IF NOT EXISTS ${SCHEMA}`); @@ -51,7 +51,7 @@ export async function setupObjectStoreSchema(pg: PgClient): Promise { created_by uuid, created_at timestamptz NOT NULL DEFAULT now(), updated_at timestamptz NOT NULL DEFAULT now(), - CONSTRAINT object_store_files_pkey PRIMARY KEY (id, database_id) + CONSTRAINT files_store_files_pkey PRIMARY KEY (id, database_id) ) `); @@ -164,7 +164,7 @@ export async function setupObjectStoreSchema(pg: PgClient): Promise { `); } -export async function teardownObjectStoreSchema(pg: PgClient): Promise { +export async function teardownFilesStoreSchema(pg: PgClient): Promise { await pg.query(`DROP TABLE IF EXISTS ${SCHEMA}.${TABLE} CASCADE`); await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.mark_files_deleting_on_source_delete CASCADE`); await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.populate_file_back_reference CASCADE`); @@ -173,6 +173,6 @@ export async function teardownObjectStoreSchema(pg: PgClient): Promise { await pg.query(`DROP SCHEMA IF EXISTS ${SCHEMA} CASCADE`); } -export async function cleanObjectStoreRows(pg: PgClient): Promise { +export async function cleanFilesStoreRows(pg: PgClient): Promise { await pg.query(`DELETE FROM ${SCHEMA}.${TABLE}`); } From 208af4fbd26486a206121d6830556f378b445eb4 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Sat, 14 Mar 2026 13:46:24 +0800 Subject: [PATCH 07/13] =?UTF-8?q?fix=20ready=20=E2=86=92=20deleting=20tran?= =?UTF-8?q?sition?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- functions/file-cleanup/handler.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions/file-cleanup/handler.ts b/functions/file-cleanup/handler.ts index 82b092c..b4fa0aa 100644 --- a/functions/file-cleanup/handler.ts +++ b/functions/file-cleanup/handler.ts @@ -35,10 +35,10 @@ const CLEANUP_QUERIES: Record Date: Sat, 14 Mar 2026 14:32:33 +0800 Subject: [PATCH 08/13] remove miss leading flag --- functions/process-image/handler.ts | 376 +---------------------------- 1 file changed, 4 insertions(+), 372 deletions(-) diff --git a/functions/process-image/handler.ts b/functions/process-image/handler.ts index 353bb75..2062b6b 100644 --- a/functions/process-image/handler.ts +++ b/functions/process-image/handler.ts @@ -1,8 +1,6 @@ import { DeleteObjectCommand, GetObjectCommand, PutObjectCommand, S3Client } from '@aws-sdk/client-s3'; -import { Upload } from '@aws-sdk/lib-storage'; import type { FunctionContext, FunctionHandler } from '@constructive-io/fn-runtime'; import { getPgPool } from 'pg-cache'; -import { extname } from 'path'; import sharp from 'sharp'; import { Readable } from 'stream'; @@ -16,63 +14,16 @@ interface VersionConfig { maxHeight: number; } -/** File mode: process a file from files_store_public.files */ +/** Process a file from files_store_public.files */ interface ProcessFileParams { file_id: string; database_id: number; } -/** Image mode: process JSONB image fields on an arbitrary table */ -interface ProcessImageFieldParams { - schema: string; - table: string; - idFields: string[]; - idValues: (string | number)[]; - fields: string[]; - versions?: VersionConfig[]; -} - -type ProcessParams = ProcessFileParams | ProcessImageFieldParams; - -interface ImageFieldValue { - url?: string; - id?: string; - key?: string; - bucket?: string; - provider?: string; - mime?: string; - filename?: string; - versions?: ImageVersion[]; -} - -interface ImageVersion { - name: string; - key: string; - bucket: string; - url: string; - width: number; - height: number; - mime: string; -} - -const DEFAULT_VERSIONS: VersionConfig[] = [ - { name: 'thumbnail', maxWidth: 150, maxHeight: 150 }, - { name: 'medium', maxWidth: 600, maxHeight: 600 }, - { name: 'large', maxWidth: 1200, maxHeight: 1200 }, -]; - -const PROCESSABLE_FORMATS = new Set([ - 'jpeg', 'png', 'webp', 'gif', 'tiff', 'avif', 'heif', 'jp2', -]); - // --------------------------------------------------------------------------- // Helpers // --------------------------------------------------------------------------- -function isFileMode(params: ProcessParams): params is ProcessFileParams { - return 'file_id' in params; -} - function validateIdentifier(name: string): string { if (!/^[a-zA-Z_][a-zA-Z0-9_]*$/.test(name)) { throw new Error(`Invalid SQL identifier: "${name}"`); @@ -118,46 +69,6 @@ function createPgPool(env: Record) { }); } -function parseS3Url(url: string): { bucket: string; key: string } | null { - try { - const parsed = new URL(url); - const parts = parsed.pathname.split('/').filter(Boolean); - - const host = parsed.hostname; - if (host.endsWith('.s3.amazonaws.com')) { - const bucket = host.replace('.s3.amazonaws.com', ''); - return { bucket, key: parts.join('/') }; - } - - if (parts.length >= 2) { - return { bucket: parts[0], key: parts.slice(1).join('/') }; - } - } catch { - // not a valid URL - } - return null; -} - -function deriveVersionKey(originalKey: string, versionName: string): string { - const ext = extname(originalKey); - const base = ext ? originalKey.slice(0, -ext.length) : originalKey; - return `${base}_${versionName}${ext}`; -} - -function buildObjectUrl( - env: Record, - bucket: string, - key: string, -): string { - const provider = env.BUCKET_PROVIDER || 'minio'; - if (provider === 'minio') { - const endpoint = env.MINIO_ENDPOINT || 'http://localhost:9000'; - return `${endpoint.replace(/\/$/, '')}/${bucket}/${key}`; - } - const region = env.AWS_REGION || 'us-east-1'; - return `https://${bucket}.s3.${region}.amazonaws.com/${key}`; -} - async function streamToBuffer(stream: Readable): Promise { const chunks: Buffer[] = []; for await (const chunk of stream) { @@ -183,7 +94,7 @@ async function deleteS3Objects( } // --------------------------------------------------------------------------- -// File Mode: process a file from files_store_public.files +// Process a file from files_store_public.files // // Locks the row with FOR UPDATE SKIP LOCKED, transitions status through // pending -> processing -> ready, generates thumbnail + medium versions, @@ -438,291 +349,12 @@ async function handleFileMode( } } -// --------------------------------------------------------------------------- -// Image Mode: process JSONB image fields on an arbitrary table -// -// For each specified field, downloads the original image from S3, generates -// resized versions (thumbnail, medium, large by default), uploads them back, -// and updates the JSONB field with version metadata. -// --------------------------------------------------------------------------- - -async function handleImageMode( - params: ProcessImageFieldParams, - context: FunctionContext, -): Promise { - const { log, env } = context; - const { - schema, - table, - idFields, - idValues, - fields, - versions = DEFAULT_VERSIONS, - } = params; - - // --- Validation --- - - if (!schema || !table) return { error: 'Missing schema or table' }; - if (!idFields?.length || !idValues?.length) - return { error: 'Missing idFields or idValues' }; - if (idFields.length !== idValues.length) - return { error: 'idFields and idValues must have same length' }; - if (!fields?.length) return { error: 'Missing fields' }; - - validateIdentifier(schema); - validateIdentifier(table); - idFields.forEach(validateIdentifier); - fields.forEach(validateIdentifier); - - const defaultBucket = env.BUCKET_NAME || 'test-bucket'; - - log.info('[process-image] starting', { - schema, - table, - idFields, - fields, - versionCount: versions.length, - }); - - const s3 = createS3Client(env); - const pool = createPgPool(env); - - // --- Query the record --- - - const fieldList = fields.map((f: string) => `"${f}"`).join(', '); - const whereClauses = idFields - .map((f: string, i: number) => `"${f}" = $${i + 1}`) - .join(' AND '); - const selectSql = `SELECT ${fieldList} FROM "${schema}"."${table}" WHERE ${whereClauses}`; - - log.info('[process-image] querying record'); - const { rows } = await pool.query(selectSql, idValues); - - if (rows.length === 0) { - s3.destroy(); - return { error: 'Record not found' }; - } - - const record = rows[0]; - const results: Record = {}; - - try { - for (const field of fields) { - const fieldValue: ImageFieldValue | null = - typeof record[field] === 'string' - ? JSON.parse(record[field]) - : record[field]; - - if (!fieldValue) { - log.info(`[process-image] field "${field}" is null, skipping`); - results[field] = { skipped: true, reason: 'null_value' }; - continue; - } - - if (fieldValue.versions && fieldValue.versions.length > 0) { - log.info( - `[process-image] field "${field}" already has ${fieldValue.versions.length} versions, skipping`, - ); - results[field] = { skipped: true, reason: 'versions_exist' }; - continue; - } - - // Resolve bucket + key - let key = fieldValue.key; - let bucket = fieldValue.bucket || defaultBucket; - - if (!key && fieldValue.url) { - const parsed = parseS3Url(fieldValue.url); - if (parsed) { - key = parsed.key; - bucket = parsed.bucket; - } - } - - if (!key) { - log.warn( - `[process-image] field "${field}" has no resolvable key, skipping`, - ); - results[field] = { skipped: true, reason: 'no_key' }; - continue; - } - - log.info(`[process-image] processing "${field}"`, { key, bucket }); - - // --- Download original to buffer --- - - const response = await s3.send( - new GetObjectCommand({ Bucket: bucket, Key: key }), - ); - - const maxImageSize = Number(env.MAX_IMAGE_SIZE) || 52_428_800; // 50MB - if (response.ContentLength && response.ContentLength > maxImageSize) { - log.warn( - `[process-image] field "${field}" file too large (${response.ContentLength} bytes, max ${maxImageSize}), skipping`, - ); - results[field] = { skipped: true, reason: 'file_too_large', size: response.ContentLength }; - continue; - } - - if (!(response.Body instanceof Readable)) { - throw new Error(`S3 response body is not a readable stream for key=${key}`); - } - - const originalBuffer = await streamToBuffer(response.Body); - - if (originalBuffer.length > maxImageSize) { - log.warn( - `[process-image] field "${field}" buffer too large (${originalBuffer.length} bytes, max ${maxImageSize}), skipping`, - ); - results[field] = { skipped: true, reason: 'file_too_large', size: originalBuffer.length }; - continue; - } - - // --- Gate: verify this is a processable image --- - - let metadata: sharp.Metadata; - try { - metadata = await sharp(originalBuffer).metadata(); - } catch { - log.warn(`[process-image] field "${field}" is not a valid image, skipping`); - results[field] = { skipped: true, reason: 'not_an_image' }; - continue; - } - - if (!metadata.format || !PROCESSABLE_FORMATS.has(metadata.format)) { - log.warn( - `[process-image] field "${field}" has unsupported format "${metadata.format || 'unknown'}", skipping`, - ); - results[field] = { skipped: true, reason: 'unsupported_format', format: metadata.format }; - continue; - } - - const originalWidth = metadata.width || 0; - const originalHeight = metadata.height || 0; - - if (!originalWidth || !originalHeight) { - log.warn(`[process-image] field "${field}" has no dimensions, skipping`); - results[field] = { skipped: true, reason: 'no_dimensions' }; - continue; - } - - log.info(`[process-image] original: ${originalWidth}x${originalHeight} (${metadata.format})`); - - // --- Generate versions --- - - const generatedVersions: ImageVersion[] = []; - const uploadedKeys: string[] = []; - - try { - for (const ver of versions) { - if (originalWidth <= ver.maxWidth && originalHeight <= ver.maxHeight) { - log.info( - `[process-image] original (${originalWidth}x${originalHeight}) fits within ${ver.name} (${ver.maxWidth}x${ver.maxHeight}), skipping`, - ); - continue; - } - - const resized = await sharp(originalBuffer) - .resize(ver.maxWidth, ver.maxHeight, { - fit: 'inside', - withoutEnlargement: true, - }) - .toBuffer({ resolveWithObject: true }); - - const vKey = deriveVersionKey(key, ver.name); - const vUrl = buildObjectUrl(env, bucket, vKey); - const mime = resized.info.format - ? `image/${resized.info.format}` - : (fieldValue.mime || 'image/jpeg'); - - const uploadResult = await new Upload({ - client: s3, - params: { - Bucket: bucket, - Key: vKey, - Body: resized.data, - ContentType: mime, - }, - }).done(); - - uploadedKeys.push(vKey); - - generatedVersions.push({ - name: ver.name, - key: vKey, - bucket, - url: uploadResult.Location || vUrl, - width: resized.info.width, - height: resized.info.height, - mime, - }); - - log.info( - `[process-image] uploaded ${ver.name}: ${resized.info.width}x${resized.info.height}`, - ); - } - } catch (err) { - log.error( - `[process-image] version generation failed for "${field}", rolling back ${uploadedKeys.length} uploads`, - err, - ); - await deleteS3Objects(s3, bucket, uploadedKeys, log); - throw err; - } - - // --- Update database (rollback uploads on failure) --- - - if (generatedVersions.length > 0) { - const updatedValue: ImageFieldValue = { - ...fieldValue, - versions: generatedVersions, - }; - - const updateWhere = idFields - .map((f: string, i: number) => `"${f}" = $${i + 2}`) - .join(' AND '); - const updateSql = `UPDATE "${schema}"."${table}" SET "${field}" = $1::jsonb WHERE ${updateWhere}`; - const updateValues = [JSON.stringify(updatedValue), ...idValues]; - - try { - await pool.query(updateSql, updateValues); - log.info( - `[process-image] updated "${field}" with ${generatedVersions.length} versions`, - ); - } catch (err) { - log.error(`[process-image] DB update failed for "${field}", rolling back uploads`, err); - await deleteS3Objects( - s3, - bucket, - generatedVersions.map((v) => v.key), - log, - ); - throw err; - } - } - - results[field] = { - original: { width: originalWidth, height: originalHeight }, - versions: generatedVersions, - }; - } - } finally { - s3.destroy(); - } - - log.info('[process-image] complete'); - return { success: true, results }; -} - // --------------------------------------------------------------------------- // Main Handler // --------------------------------------------------------------------------- -const handler: FunctionHandler = async (params, context) => { - if (isFileMode(params)) { - return handleFileMode(params, context); - } - return handleImageMode(params as ProcessImageFieldParams, context); +const handler: FunctionHandler = async (params, context) => { + return handleFileMode(params, context); }; export default handler; From d9e39a3167dda771c7bdefdd05b2862313b02d28 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Tue, 17 Mar 2026 11:29:15 +0800 Subject: [PATCH 09/13] add origin id for versions of imgs --- functions/file-cleanup/handler.ts | 7 +- functions/process-image/handler.ts | 16 +++- tests/helpers/object-store-schema.ts | 115 +++++++++++++++++++++++---- 3 files changed, 119 insertions(+), 19 deletions(-) diff --git a/functions/file-cleanup/handler.ts b/functions/file-cleanup/handler.ts index b4fa0aa..dd11b48 100644 --- a/functions/file-cleanup/handler.ts +++ b/functions/file-cleanup/handler.ts @@ -35,13 +35,16 @@ const CLEANUP_QUERIES: Record maxFileSize) { @@ -247,11 +253,12 @@ async function handleFileMode( await txClient.query( `INSERT INTO files_store_public.files (database_id, bucket_key, key, status, etag, created_by, - source_table, source_column, source_id) - VALUES ($1, $2, $3, 'ready', $4, $5, $6, $7, $8)`, + source_table, source_column, source_id, origin_id, mime_type) + VALUES ($1, $2, $3, 'ready', $4, $5, $6, $7, $8, $9, $10)`, [ file.database_id, file.bucket_key, ver.key, ver.etag, file.created_by, file.source_table, file.source_column, file.source_id, + file.id, ver.mime, ] ); } @@ -320,6 +327,11 @@ async function handleFileMode( } finally { sourceClient.release(); } + } else if (versionRows.length > 0) { + log.info( + `[process-image] source_* not yet populated, skipping domain write-back. ` + + `Versions will be written when domain trigger fires. file_id=${file.id}` + ); } return { success: true, versions: versionRows.length }; diff --git a/tests/helpers/object-store-schema.ts b/tests/helpers/object-store-schema.ts index 9a38318..1289e5d 100644 --- a/tests/helpers/object-store-schema.ts +++ b/tests/helpers/object-store-schema.ts @@ -49,12 +49,38 @@ export async function setupFilesStoreSchema(pg: PgClient): Promise { source_id uuid, processing_started_at timestamptz, created_by uuid, + origin_id uuid, + mime_type text, created_at timestamptz NOT NULL DEFAULT now(), updated_at timestamptz NOT NULL DEFAULT now(), CONSTRAINT files_store_files_pkey PRIMARY KEY (id, database_id) ) `); + // Ensure new columns exist on pre-existing tables (CREATE TABLE IF NOT EXISTS + // does not add missing columns to an already-existing table) + await pg.query(`ALTER TABLE ${SCHEMA}.${TABLE} ADD COLUMN IF NOT EXISTS origin_id uuid`); + await pg.query(`ALTER TABLE ${SCHEMA}.${TABLE} ADD COLUMN IF NOT EXISTS mime_type text`); + + // Self-referential FK (version -> origin) + await pg.query(` + DO $$ BEGIN + ALTER TABLE ${SCHEMA}.${TABLE} + ADD CONSTRAINT files_origin_fk + FOREIGN KEY (origin_id, database_id) + REFERENCES ${SCHEMA}.${TABLE} (id, database_id) + ON DELETE CASCADE; + EXCEPTION WHEN duplicate_object THEN NULL; + END $$ + `); + + // Index for version lookups + await pg.query(` + CREATE INDEX IF NOT EXISTS files_origin_id_idx + ON ${SCHEMA}.${TABLE} (origin_id, database_id) + WHERE origin_id IS NOT NULL + `); + await pg.query(` CREATE OR REPLACE FUNCTION ${SCHEMA}.populate_file_back_reference() RETURNS trigger AS $fn$ @@ -65,8 +91,10 @@ export async function setupFilesStoreSchema(pg: PgClient): Promise { old_val jsonb; new_key text; old_key text; - base_key text; db_id integer; + origin_file_id uuid; + old_origin_file_id uuid; + versions_json json; BEGIN db_id := current_setting('app.database_id')::integer; @@ -81,24 +109,56 @@ export async function setupFilesStoreSchema(pg: PgClient): Promise { END IF; IF old_key IS NOT NULL AND old_key <> '' THEN - base_key := regexp_replace(old_key, '_[^_]+$', ''); + SELECT id INTO old_origin_file_id + FROM ${SCHEMA}.${TABLE} + WHERE key = old_key AND database_id = db_id; - UPDATE ${SCHEMA}.${TABLE} - SET status = 'deleting', status_reason = 'replaced by new file' - WHERE database_id = db_id - AND (key = old_key OR key LIKE base_key || '_%') - AND status <> 'deleting'; + IF old_origin_file_id IS NOT NULL THEN + UPDATE ${SCHEMA}.${TABLE} + SET status = 'deleting', status_reason = 'replaced by new file' + WHERE id = old_origin_file_id AND database_id = db_id + AND status NOT IN ('deleting'); + + UPDATE ${SCHEMA}.${TABLE} + SET status = 'deleting', status_reason = 'replaced by new file' + WHERE origin_id = old_origin_file_id AND database_id = db_id + AND status NOT IN ('deleting'); + END IF; END IF; IF new_key IS NOT NULL AND new_key <> '' THEN - base_key := regexp_replace(new_key, '_[^_]+$', ''); - - UPDATE ${SCHEMA}.${TABLE} - SET source_table = table_name, - source_column = col_name, - source_id = NEW.id - WHERE database_id = db_id - AND (key = new_key OR key LIKE base_key || '_%'); + SELECT id INTO origin_file_id + FROM ${SCHEMA}.${TABLE} + WHERE key = new_key AND database_id = db_id; + + IF origin_file_id IS NOT NULL THEN + UPDATE ${SCHEMA}.${TABLE} + SET source_table = table_name, source_column = col_name, source_id = NEW.id + WHERE id = origin_file_id AND database_id = db_id; + + UPDATE ${SCHEMA}.${TABLE} + SET source_table = table_name, source_column = col_name, source_id = NEW.id + WHERE origin_id = origin_file_id AND database_id = db_id; + + SELECT json_agg(json_build_object( + 'key', f.key, + 'mime', COALESCE(f.mime_type, 'image/jpeg'), + 'width', 0, + 'height', 0 + )) + INTO versions_json + FROM ${SCHEMA}.${TABLE} f + WHERE f.origin_id = origin_file_id + AND f.database_id = db_id + AND f.status = 'ready'; + + IF versions_json IS NOT NULL THEN + EXECUTE format( + 'UPDATE %s SET %I = jsonb_set(COALESCE(%I, ''{}''::jsonb), ''{versions}'', $1::jsonb) WHERE id = $2', + table_name, col_name, col_name + ) USING versions_json, NEW.id; + END IF; + END IF; END IF; RETURN NEW; @@ -162,10 +222,35 @@ export async function setupFilesStoreSchema(pg: PgClient): Promise { FOR EACH ROW EXECUTE FUNCTION ${SCHEMA}.files_before_update_timestamp() `); + + // Propagate deleting status from origin to version rows + await pg.query(` + CREATE OR REPLACE FUNCTION ${SCHEMA}.files_propagate_deleting_to_versions() + RETURNS trigger AS $fn$ + BEGIN + UPDATE ${SCHEMA}.${TABLE} + SET status = 'deleting', status_reason = COALESCE(NEW.status_reason, 'origin marked deleting') + WHERE origin_id = NEW.id + AND database_id = NEW.database_id + AND status NOT IN ('deleting'); + RETURN NEW; + END; + $fn$ LANGUAGE plpgsql + `); + + await pg.query(` + DROP TRIGGER IF EXISTS files_after_update_propagate_deleting ON ${SCHEMA}.${TABLE}; + CREATE TRIGGER files_after_update_propagate_deleting + AFTER UPDATE ON ${SCHEMA}.${TABLE} + FOR EACH ROW + WHEN (NEW.status = 'deleting' AND OLD.status <> 'deleting' AND NEW.origin_id IS NULL) + EXECUTE FUNCTION ${SCHEMA}.files_propagate_deleting_to_versions() + `); } export async function teardownFilesStoreSchema(pg: PgClient): Promise { await pg.query(`DROP TABLE IF EXISTS ${SCHEMA}.${TABLE} CASCADE`); + await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.files_propagate_deleting_to_versions CASCADE`); await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.mark_files_deleting_on_source_delete CASCADE`); await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.populate_file_back_reference CASCADE`); await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.files_before_update_timestamp CASCADE`); From 5cde028edf49b1f04e89db19b45292e52298e17c Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 19 Mar 2026 07:28:13 +0800 Subject: [PATCH 10/13] fixed db id type --- functions/delete-s3-object/handler.ts | 2 +- functions/process-image/handler.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/functions/delete-s3-object/handler.ts b/functions/delete-s3-object/handler.ts index e166822..46f35ff 100644 --- a/functions/delete-s3-object/handler.ts +++ b/functions/delete-s3-object/handler.ts @@ -4,7 +4,7 @@ import { getPgPool } from 'pg-cache'; type DeleteParams = { file_id: string; - database_id: number; + database_id: string; key: string; }; diff --git a/functions/process-image/handler.ts b/functions/process-image/handler.ts index 9dff800..a32111b 100644 --- a/functions/process-image/handler.ts +++ b/functions/process-image/handler.ts @@ -17,7 +17,7 @@ interface VersionConfig { /** Process a file from files_store_public.files */ interface ProcessFileParams { file_id: string; - database_id: number; + database_id: string; } // --------------------------------------------------------------------------- From 415ac6e6bc323e9b8abe3514a17b48385991659b Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 19 Mar 2026 18:25:16 +0800 Subject: [PATCH 11/13] adapt the jsonb image field --- .../__tests__/handler.e2e.test.ts | 71 ++++++++++-- functions/delete-s3-object/handler.ts | 23 +++- .../__tests__/handler.file-mode.e2e.test.ts | 105 +++++++----------- functions/process-image/handler.ts | 61 ++++------ tests/helpers/object-store-schema.ts | 104 +++-------------- 5 files changed, 151 insertions(+), 213 deletions(-) diff --git a/functions/delete-s3-object/__tests__/handler.e2e.test.ts b/functions/delete-s3-object/__tests__/handler.e2e.test.ts index 28b7fb0..cbfb234 100644 --- a/functions/delete-s3-object/__tests__/handler.e2e.test.ts +++ b/functions/delete-s3-object/__tests__/handler.e2e.test.ts @@ -82,18 +82,21 @@ describe('delete-s3-object handler e2e', () => { await cleanFilesStoreRows(pg); }); - function callHandler(file_id: string, database_id: number, key: string) { + const TEST_DB_ID = 'aaaaaaaa-0000-0000-0000-000000000099'; + + function callHandler(file_id: string, database_id: string, key: string, version_keys?: string[]) { const ctx = createMockContext({ env: ENV }); - return handler({ file_id, database_id, key }, ctx as any); + return handler({ file_id, database_id, key, version_keys }, ctx as any); } async function insertFile(opts: { s3Key: string; body: Buffer; status?: string; - databaseId?: number; - }): Promise<{ id: string; database_id: number }> { - const databaseId = opts.databaseId ?? 1; + databaseId?: string; + versions?: object[]; + }): Promise<{ id: string; database_id: string }> { + const databaseId = opts.databaseId ?? TEST_DB_ID; s3Keys.push(opts.s3Key); await s3.send(new PutObjectCommand({ @@ -105,10 +108,10 @@ describe('delete-s3-object handler e2e', () => { const res = await pg.query( `INSERT INTO ${SCHEMA}.files - (database_id, key, bucket_key, status) - VALUES ($1, $2, 'default', $3::${SCHEMA}.file_status) + (database_id, key, bucket_key, status, versions) + VALUES ($1, $2, 'default', $3::${SCHEMA}.file_status, $4::jsonb) RETURNING id, database_id`, - [databaseId, opts.s3Key, opts.status ?? 'deleting'] + [databaseId, opts.s3Key, opts.status ?? 'deleting', opts.versions ? JSON.stringify(opts.versions) : null] ); return res.rows[0]; } @@ -164,9 +167,9 @@ describe('delete-s3-object handler e2e', () => { const res = await pg.query( `INSERT INTO ${SCHEMA}.files (database_id, key, bucket_key, status) - VALUES (1, $1, 'default', 'deleting') + VALUES ($1, $2, 'default', 'deleting') RETURNING id, database_id`, - [key] + [TEST_DB_ID, key] ); const { id, database_id } = res.rows[0]; @@ -198,7 +201,7 @@ describe('delete-s3-object handler e2e', () => { const result: any = await callHandler( '00000000-0000-0000-0000-000000000000', - 1, + TEST_DB_ID, key ); @@ -210,10 +213,54 @@ describe('delete-s3-object handler e2e', () => { // Test 4: Both already deleted — fully idempotent // ----------------------------------------------------------------------- + // ----------------------------------------------------------------------- + // Test 4: Delete with version S3 objects + // ----------------------------------------------------------------------- + + it('deletes origin + version S3 objects and DB row', async () => { + const originKey = `e2e-del-ver-${Date.now()}-origin.bin`; + const thumbKey = `e2e-del-ver-${Date.now()}-thumb.bin`; + const mediumKey = `e2e-del-ver-${Date.now()}-medium.bin`; + const body = Buffer.from('test'); + + // Upload origin + versions to S3 + for (const k of [originKey, thumbKey, mediumKey]) { + s3Keys.push(k); + await s3.send(new PutObjectCommand({ Bucket: BUCKET, Key: k, Body: body })); + } + + const { id, database_id } = await insertFile({ + s3Key: originKey, + body, + status: 'deleting', + versions: [ + { key: thumbKey, mime: 'image/jpeg', width: 150, height: 150 }, + { key: mediumKey, mime: 'image/jpeg', width: 1200, height: 675 }, + ], + }); + + const result: any = await callHandler(id, database_id, originKey, [thumbKey, mediumKey]); + + expect(result.success).toBe(true); + expect(await s3ObjectExists(originKey)).toBe(false); + expect(await s3ObjectExists(thumbKey)).toBe(false); + expect(await s3ObjectExists(mediumKey)).toBe(false); + + const dbRes = await pg.query( + `SELECT * FROM ${SCHEMA}.files WHERE id = $1`, + [id] + ); + expect(dbRes.rows.length).toBe(0); + }); + + // ----------------------------------------------------------------------- + // Test 5: Both already deleted — fully idempotent + // ----------------------------------------------------------------------- + it('succeeds when both S3 and DB are already gone', async () => { const result: any = await callHandler( '00000000-0000-0000-0000-000000000000', - 999, + TEST_DB_ID, `nonexistent-key-${Date.now()}` ); diff --git a/functions/delete-s3-object/handler.ts b/functions/delete-s3-object/handler.ts index 46f35ff..48529c2 100644 --- a/functions/delete-s3-object/handler.ts +++ b/functions/delete-s3-object/handler.ts @@ -6,6 +6,7 @@ type DeleteParams = { file_id: string; database_id: string; key: string; + version_keys?: string[]; }; function createS3Client(env: Record): S3Client { @@ -48,13 +49,22 @@ const handler: FunctionHandler = async ( const pool = createPgPool(env); try { - // Step 1: Delete from S3 (idempotent -- delete ignores missing keys) - await s3.send(new DeleteObjectCommand({ - Bucket: env.BUCKET_NAME || 'test-bucket', - Key: params.key, - })); + const bucket = env.BUCKET_NAME || 'test-bucket'; - // Step 2: Delete the DB row + // Step 1: Delete origin S3 object (idempotent) + await s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: params.key })); + + // Step 2: Delete version S3 objects (from job payload) + const versionKeys = params.version_keys || []; + for (const vk of versionKeys) { + try { + await s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: vk })); + } catch (err) { + log.error(`[delete-s3-object] failed to delete version ${vk}`, err); + } + } + + // Step 3: Delete the DB row const result = await pool.query( 'DELETE FROM files_store_public.files WHERE id = $1 AND database_id = $2', [params.file_id, params.database_id] @@ -62,6 +72,7 @@ const handler: FunctionHandler = async ( log.info('[delete-s3-object] complete', { key: params.key, + versionKeysDeleted: versionKeys.length, rowsDeleted: result.rowCount, }); diff --git a/functions/process-image/__tests__/handler.file-mode.e2e.test.ts b/functions/process-image/__tests__/handler.file-mode.e2e.test.ts index 40f60cf..1e37ad1 100644 --- a/functions/process-image/__tests__/handler.file-mode.e2e.test.ts +++ b/functions/process-image/__tests__/handler.file-mode.e2e.test.ts @@ -24,6 +24,7 @@ const SOURCE_SCHEMA = 'public'; const SOURCE_TABLE = 'test_process_file_uploads'; const BUCKET = 'test-bucket'; const USER_ID = 'aaaaaaaa-0000-0000-0000-000000000001'; +const TEST_DB_ID = 'aaaaaaaa-0000-0000-0000-000000000099'; const ENV: Record = { PGHOST: 'localhost', @@ -139,9 +140,10 @@ describe('process-image handler file mode e2e', () => { `INSERT INTO ${OBJECT_STORE_SCHEMA}.files (id, database_id, bucket_key, key, status, etag, created_by, source_table, source_column, source_id) - VALUES ($1, 1, 'default', $2, 'pending', 'etag-origin', $3, $4, 'image', $5)`, + VALUES ($1, $2, 'default', $3, 'pending', 'etag-origin', $4, $5, 'image', $6)`, [ opts.fileId, + TEST_DB_ID, opts.key, USER_ID, `${SOURCE_SCHEMA}.${SOURCE_TABLE}`, @@ -154,23 +156,23 @@ describe('process-image handler file mode e2e', () => { await pg.query( `INSERT INTO ${OBJECT_STORE_SCHEMA}.files (id, database_id, bucket_key, key, status, etag, created_by) - VALUES ($1, 1, 'default', $2, 'pending', 'etag-origin', $3)`, - [opts.fileId, opts.key, USER_ID] + VALUES ($1, $2, 'default', $3, 'pending', 'etag-origin', $4)`, + [opts.fileId, TEST_DB_ID, opts.key, USER_ID] ); } async function callHandler(fileId: string) { const ctx = createMockContext({ env: ENV }); - return handler({ file_id: fileId, database_id: 1 }, ctx as any); + return handler({ file_id: fileId, database_id: TEST_DB_ID }, ctx as any); } - it('processes an attached image into ready thumbnail and medium versions', async () => { + it('processes an attached image into ready with versions JSONB', async () => { const fileId = randomUUID(); const sourceId = randomUUID(); const baseId = randomUUID(); - const originKey = `1/default/${baseId}_origin`; - const thumbKey = `1/default/${baseId}_thumbnail`; - const mediumKey = `1/default/${baseId}_medium`; + const originKey = `${TEST_DB_ID}/default/${baseId}_origin`; + const thumbKey = `${TEST_DB_ID}/default/${baseId}_thumbnail`; + const mediumKey = `${TEST_DB_ID}/default/${baseId}_medium`; const imageBuffer = await generateTestImage(1600, 900); await putOriginImage(originKey, imageBuffer); @@ -197,44 +199,33 @@ describe('process-image handler file mode e2e', () => { s3Keys.add(thumbKey); s3Keys.add(mediumKey); + // Only 1 row — origin with versions JSONB const files = await pg.query( - `SELECT key, status, source_table, source_column, source_id + `SELECT key, status, versions, source_table, source_column, source_id FROM ${OBJECT_STORE_SCHEMA}.files - WHERE key LIKE $1 - ORDER BY key`, - [`1/default/${baseId}%`] + WHERE id = $1`, + [fileId] ); - expect(files.rows).toEqual([ - { - key: mediumKey, - status: 'ready', - source_table: `${SOURCE_SCHEMA}.${SOURCE_TABLE}`, - source_column: 'image', - source_id: sourceId, - }, - { - key: originKey, - status: 'ready', - source_table: `${SOURCE_SCHEMA}.${SOURCE_TABLE}`, - source_column: 'image', - source_id: sourceId, - }, - { - key: thumbKey, - status: 'ready', - source_table: `${SOURCE_SCHEMA}.${SOURCE_TABLE}`, - source_column: 'image', - source_id: sourceId, - }, - ]); + expect(files.rows).toHaveLength(1); + const row = files.rows[0]; + expect(row.status).toBe('ready'); + expect(row.key).toBe(originKey); + expect(row.source_table).toBe(`${SOURCE_SCHEMA}.${SOURCE_TABLE}`); + expect(row.versions).toHaveLength(2); + expect(row.versions).toEqual( + expect.arrayContaining([ + expect.objectContaining({ key: thumbKey, mime: 'image/jpeg', width: 150 }), + expect.objectContaining({ key: mediumKey, mime: 'image/jpeg', width: 1200 }), + ]) + ); + // Domain table also has versions const sourceRow = await pg.query( `SELECT image FROM ${SOURCE_SCHEMA}.${SOURCE_TABLE} WHERE id = $1`, [sourceId] ); const versions = sourceRow.rows[0].image.versions; - expect(versions).toHaveLength(2); expect(versions).toEqual( expect.arrayContaining([ @@ -243,6 +234,7 @@ describe('process-image handler file mode e2e', () => { ]) ); + // Idempotency const secondRun: any = await callHandler(fileId); expect(secondRun).toEqual({ skipped: true, reason: 'not_pending_or_locked' }); }); @@ -250,9 +242,9 @@ describe('process-image handler file mode e2e', () => { it('processes an unattached image without writing domain metadata', async () => { const fileId = randomUUID(); const baseId = randomUUID(); - const originKey = `1/default/${baseId}_origin`; - const thumbKey = `1/default/${baseId}_thumbnail`; - const mediumKey = `1/default/${baseId}_medium`; + const originKey = `${TEST_DB_ID}/default/${baseId}_origin`; + const thumbKey = `${TEST_DB_ID}/default/${baseId}_thumbnail`; + const mediumKey = `${TEST_DB_ID}/default/${baseId}_medium`; const imageBuffer = await generateTestImage(1600, 900); await putOriginImage(originKey, imageBuffer); @@ -266,36 +258,17 @@ describe('process-image handler file mode e2e', () => { s3Keys.add(thumbKey); s3Keys.add(mediumKey); + // Only 1 row with versions JSONB const files = await pg.query( - `SELECT key, status, source_table, source_column, source_id + `SELECT key, status, versions, source_table FROM ${OBJECT_STORE_SCHEMA}.files - WHERE key LIKE $1 - ORDER BY key`, - [`1/default/${baseId}%`] + WHERE id = $1`, + [fileId] ); - expect(files.rows).toEqual([ - { - key: mediumKey, - status: 'ready', - source_table: null, - source_column: null, - source_id: null, - }, - { - key: originKey, - status: 'ready', - source_table: null, - source_column: null, - source_id: null, - }, - { - key: thumbKey, - status: 'ready', - source_table: null, - source_column: null, - source_id: null, - }, - ]); + expect(files.rows).toHaveLength(1); + expect(files.rows[0].status).toBe('ready'); + expect(files.rows[0].versions).toHaveLength(2); + expect(files.rows[0].source_table).toBeNull(); }); }); diff --git a/functions/process-image/handler.ts b/functions/process-image/handler.ts index a32111b..e6f7422 100644 --- a/functions/process-image/handler.ts +++ b/functions/process-image/handler.ts @@ -242,66 +242,43 @@ async function handleFileMode( } // --------------------------------------------------------------- - // Step 4: Transactional batch commit - // All version row INSERTs + origin status update in single transaction. + // Step 4: Update origin row with versions JSONB + mark ready // --------------------------------------------------------------- - const txClient = await pool.connect(); - try { - await txClient.query('BEGIN'); - - for (const ver of versionRows) { - await txClient.query( - `INSERT INTO files_store_public.files - (database_id, bucket_key, key, status, etag, created_by, - source_table, source_column, source_id, origin_id, mime_type) - VALUES ($1, $2, $3, 'ready', $4, $5, $6, $7, $8, $9, $10)`, - [ - file.database_id, file.bucket_key, ver.key, ver.etag, - file.created_by, file.source_table, file.source_column, file.source_id, - file.id, ver.mime, - ] - ); - } + const versionsJsonb = versionRows.map((v) => ({ + key: v.key, + mime: v.mime, + width: v.width, + height: v.height, + })); - // Mark origin as ready - await txClient.query( - `UPDATE files_store_public.files SET status = 'ready' WHERE id = $1 AND database_id = $2`, - [file.id, file.database_id] + try { + await pool.query( + `UPDATE files_store_public.files + SET status = 'ready', + versions = $3::jsonb + WHERE id = $1 AND database_id = $2`, + [file.id, file.database_id, JSON.stringify(versionsJsonb.length > 0 ? versionsJsonb : null)] ); - - await txClient.query('COMMIT'); } catch (txErr: any) { - await txClient.query('ROLLBACK'); - - // --------------------------------------------------------------- // Graceful deleting handling: - // If the file was marked 'deleting' during processing (source row - // deleted), the state machine rejects processing->ready. This is - // correct behavior -- the file is already marked for deletion. - // --------------------------------------------------------------- + // If the file was marked 'deleting' during processing, the state + // machine rejects processing->ready. This is correct behavior. if (txErr.message?.includes('Invalid status transition')) { log.info('[process-image] file transitioned to deleting during processing, exiting gracefully'); await deleteS3Objects(s3, bucket, uploadedS3Keys, log); return { success: true, reason: 'file_marked_deleting_during_processing' }; } throw txErr; - } finally { - txClient.release(); } // --------------------------------------------------------------- // Step 5: Write version info to domain table (if back-reference populated) // --------------------------------------------------------------- - if (file.source_table && file.source_column && file.source_id && versionRows.length > 0) { + if (file.source_table && file.source_column && file.source_id && versionsJsonb.length > 0) { validateQualifiedName(file.source_table); validateIdentifier(file.source_column); - const versionsArray = versionRows.map((v) => ({ - key: v.key, - mime: v.mime, - width: v.width, - height: v.height, - })); + const versionsArray = versionsJsonb; const sourceClient = await pool.connect(); try { @@ -327,7 +304,7 @@ async function handleFileMode( } finally { sourceClient.release(); } - } else if (versionRows.length > 0) { + } else if (versionsJsonb.length > 0) { log.info( `[process-image] source_* not yet populated, skipping domain write-back. ` + `Versions will be written when domain trigger fires. file_id=${file.id}` diff --git a/tests/helpers/object-store-schema.ts b/tests/helpers/object-store-schema.ts index 1289e5d..438f407 100644 --- a/tests/helpers/object-store-schema.ts +++ b/tests/helpers/object-store-schema.ts @@ -38,7 +38,7 @@ export async function setupFilesStoreSchema(pg: PgClient): Promise { await pg.query(` CREATE TABLE IF NOT EXISTS ${SCHEMA}.${TABLE} ( id uuid NOT NULL DEFAULT gen_random_uuid(), - database_id integer NOT NULL, + database_id uuid NOT NULL, bucket_key text NOT NULL DEFAULT 'default', key text NOT NULL, status ${SCHEMA}.file_status NOT NULL DEFAULT 'pending', @@ -49,7 +49,7 @@ export async function setupFilesStoreSchema(pg: PgClient): Promise { source_id uuid, processing_started_at timestamptz, created_by uuid, - origin_id uuid, + versions jsonb, mime_type text, created_at timestamptz NOT NULL DEFAULT now(), updated_at timestamptz NOT NULL DEFAULT now(), @@ -57,30 +57,10 @@ export async function setupFilesStoreSchema(pg: PgClient): Promise { ) `); - // Ensure new columns exist on pre-existing tables (CREATE TABLE IF NOT EXISTS - // does not add missing columns to an already-existing table) - await pg.query(`ALTER TABLE ${SCHEMA}.${TABLE} ADD COLUMN IF NOT EXISTS origin_id uuid`); + // Ensure new columns exist on pre-existing tables + await pg.query(`ALTER TABLE ${SCHEMA}.${TABLE} ADD COLUMN IF NOT EXISTS versions jsonb`); await pg.query(`ALTER TABLE ${SCHEMA}.${TABLE} ADD COLUMN IF NOT EXISTS mime_type text`); - // Self-referential FK (version -> origin) - await pg.query(` - DO $$ BEGIN - ALTER TABLE ${SCHEMA}.${TABLE} - ADD CONSTRAINT files_origin_fk - FOREIGN KEY (origin_id, database_id) - REFERENCES ${SCHEMA}.${TABLE} (id, database_id) - ON DELETE CASCADE; - EXCEPTION WHEN duplicate_object THEN NULL; - END $$ - `); - - // Index for version lookups - await pg.query(` - CREATE INDEX IF NOT EXISTS files_origin_id_idx - ON ${SCHEMA}.${TABLE} (origin_id, database_id) - WHERE origin_id IS NOT NULL - `); - await pg.query(` CREATE OR REPLACE FUNCTION ${SCHEMA}.populate_file_back_reference() RETURNS trigger AS $fn$ @@ -91,12 +71,12 @@ export async function setupFilesStoreSchema(pg: PgClient): Promise { old_val jsonb; new_key text; old_key text; - db_id integer; - origin_file_id uuid; - old_origin_file_id uuid; - versions_json json; + db_id uuid; + file_id uuid; + old_file_id uuid; + versions_json jsonb; BEGIN - db_id := current_setting('app.database_id')::integer; + db_id := current_setting('app.database_id')::uuid; EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO new_val USING NEW; EXECUTE format('SELECT ($1).%I::jsonb', col_name) INTO old_val USING OLD; @@ -109,48 +89,27 @@ export async function setupFilesStoreSchema(pg: PgClient): Promise { END IF; IF old_key IS NOT NULL AND old_key <> '' THEN - SELECT id INTO old_origin_file_id + SELECT id INTO old_file_id FROM ${SCHEMA}.${TABLE} WHERE key = old_key AND database_id = db_id; - IF old_origin_file_id IS NOT NULL THEN + IF old_file_id IS NOT NULL THEN UPDATE ${SCHEMA}.${TABLE} SET status = 'deleting', status_reason = 'replaced by new file' - WHERE id = old_origin_file_id AND database_id = db_id - AND status NOT IN ('deleting'); - - UPDATE ${SCHEMA}.${TABLE} - SET status = 'deleting', status_reason = 'replaced by new file' - WHERE origin_id = old_origin_file_id AND database_id = db_id + WHERE id = old_file_id AND database_id = db_id AND status NOT IN ('deleting'); END IF; END IF; IF new_key IS NOT NULL AND new_key <> '' THEN - SELECT id INTO origin_file_id + SELECT id, versions INTO file_id, versions_json FROM ${SCHEMA}.${TABLE} WHERE key = new_key AND database_id = db_id; - IF origin_file_id IS NOT NULL THEN + IF file_id IS NOT NULL THEN UPDATE ${SCHEMA}.${TABLE} SET source_table = table_name, source_column = col_name, source_id = NEW.id - WHERE id = origin_file_id AND database_id = db_id; - - UPDATE ${SCHEMA}.${TABLE} - SET source_table = table_name, source_column = col_name, source_id = NEW.id - WHERE origin_id = origin_file_id AND database_id = db_id; - - SELECT json_agg(json_build_object( - 'key', f.key, - 'mime', COALESCE(f.mime_type, 'image/jpeg'), - 'width', 0, - 'height', 0 - )) - INTO versions_json - FROM ${SCHEMA}.${TABLE} f - WHERE f.origin_id = origin_file_id - AND f.database_id = db_id - AND f.status = 'ready'; + WHERE id = file_id AND database_id = db_id; IF versions_json IS NOT NULL THEN EXECUTE format( @@ -172,9 +131,9 @@ export async function setupFilesStoreSchema(pg: PgClient): Promise { DECLARE col_name text := TG_ARGV[0]; table_name text := TG_ARGV[1]; - db_id integer; + db_id uuid; BEGIN - db_id := current_setting('app.database_id')::integer; + db_id := current_setting('app.database_id')::uuid; UPDATE ${SCHEMA}.${TABLE} SET status = 'deleting', status_reason = 'source row deleted' @@ -223,38 +182,9 @@ export async function setupFilesStoreSchema(pg: PgClient): Promise { EXECUTE FUNCTION ${SCHEMA}.files_before_update_timestamp() `); - // Propagate deleting status from origin to version rows - await pg.query(` - CREATE OR REPLACE FUNCTION ${SCHEMA}.files_propagate_deleting_to_versions() - RETURNS trigger AS $fn$ - BEGIN - UPDATE ${SCHEMA}.${TABLE} - SET status = 'deleting', status_reason = COALESCE(NEW.status_reason, 'origin marked deleting') - WHERE origin_id = NEW.id - AND database_id = NEW.database_id - AND status NOT IN ('deleting'); - RETURN NEW; - END; - $fn$ LANGUAGE plpgsql - `); - - await pg.query(` - DROP TRIGGER IF EXISTS files_after_update_propagate_deleting ON ${SCHEMA}.${TABLE}; - CREATE TRIGGER files_after_update_propagate_deleting - AFTER UPDATE ON ${SCHEMA}.${TABLE} - FOR EACH ROW - WHEN (NEW.status = 'deleting' AND OLD.status <> 'deleting' AND NEW.origin_id IS NULL) - EXECUTE FUNCTION ${SCHEMA}.files_propagate_deleting_to_versions() - `); } export async function teardownFilesStoreSchema(pg: PgClient): Promise { - await pg.query(`DROP TABLE IF EXISTS ${SCHEMA}.${TABLE} CASCADE`); - await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.files_propagate_deleting_to_versions CASCADE`); - await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.mark_files_deleting_on_source_delete CASCADE`); - await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.populate_file_back_reference CASCADE`); - await pg.query(`DROP FUNCTION IF EXISTS ${SCHEMA}.files_before_update_timestamp CASCADE`); - await pg.query(`DROP TYPE IF EXISTS ${SCHEMA}.file_status CASCADE`); await pg.query(`DROP SCHEMA IF EXISTS ${SCHEMA} CASCADE`); } From 8194ec1679266f925f70a870a3ffb7c298d88a19 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Thu, 19 Mar 2026 20:02:03 +0800 Subject: [PATCH 12/13] fixed backfill logic --- functions/process-image/handler.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/functions/process-image/handler.ts b/functions/process-image/handler.ts index e6f7422..3aa2ab0 100644 --- a/functions/process-image/handler.ts +++ b/functions/process-image/handler.ts @@ -287,15 +287,17 @@ async function handleFileMode( `SELECT set_config('app.database_id', $1, true)`, [String(file.database_id)] ); + // Use COALESCE to handle NULL domain column: if NULL, build a + // minimal object with key + versions; if not NULL, merge versions in. await sourceClient.query( `UPDATE ${file.source_table} SET ${file.source_column} = jsonb_set( - ${file.source_column}::jsonb, + COALESCE(${file.source_column}::jsonb, jsonb_build_object('key', $3::text)), '{versions}', $1::jsonb ) WHERE id = $2`, - [JSON.stringify(versionsArray), file.source_id] + [JSON.stringify(versionsArray), file.source_id, file.key] ); await sourceClient.query('COMMIT'); } catch (domainUpdateErr) { From c80f5e73ca2bc7d3a21d05100f3b7d5ea3467e19 Mon Sep 17 00:00:00 2001 From: zetazzz Date: Mon, 23 Mar 2026 09:45:21 +0800 Subject: [PATCH 13/13] fixed field type --- functions/process-image/handler.ts | 46 +++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/functions/process-image/handler.ts b/functions/process-image/handler.ts index 3aa2ab0..2a4cee6 100644 --- a/functions/process-image/handler.ts +++ b/functions/process-image/handler.ts @@ -287,17 +287,19 @@ async function handleFileMode( `SELECT set_config('app.database_id', $1, true)`, [String(file.database_id)] ); - // Use COALESCE to handle NULL domain column: if NULL, build a - // minimal object with key + versions; if not NULL, merge versions in. + // Write the full image object to the domain column using || merge. + // Since dashboard no longer patches the img field (to avoid race conditions), + // process-image is the sole writer. Includes key, mime, and versions. await sourceClient.query( `UPDATE ${file.source_table} - SET ${file.source_column} = jsonb_set( - COALESCE(${file.source_column}::jsonb, jsonb_build_object('key', $3::text)), - '{versions}', - $1::jsonb - ) + SET ${file.source_column} = COALESCE(${file.source_column}::jsonb, '{}'::jsonb) + || jsonb_build_object( + 'key', $3::text, + 'mime', $4::text, + 'versions', $1::jsonb + ) WHERE id = $2`, - [JSON.stringify(versionsArray), file.source_id, file.key] + [JSON.stringify(versionsArray), file.source_id, file.key, mimeType] ); await sourceClient.query('COMMIT'); } catch (domainUpdateErr) { @@ -306,10 +308,34 @@ async function handleFileMode( } finally { sourceClient.release(); } + } else if (file.source_table && file.source_column && file.source_id) { + // No versions generated (image too small), but still write key + mime + validateQualifiedName(file.source_table); + validateIdentifier(file.source_column); + const sourceClient = await pool.connect(); + try { + await sourceClient.query('BEGIN'); + await sourceClient.query( + `SELECT set_config('app.database_id', $1, true)`, + [String(file.database_id)] + ); + await sourceClient.query( + `UPDATE ${file.source_table} + SET ${file.source_column} = COALESCE(${file.source_column}::jsonb, '{}'::jsonb) + || jsonb_build_object('key', $2::text, 'mime', $3::text) + WHERE id = $1`, + [file.source_id, file.key, mimeType] + ); + await sourceClient.query('COMMIT'); + } catch (err) { + await sourceClient.query('ROLLBACK'); + log.error('[process-image] failed to write key+mime to domain table', err); + } finally { + sourceClient.release(); + } } else if (versionsJsonb.length > 0) { log.info( - `[process-image] source_* not yet populated, skipping domain write-back. ` + - `Versions will be written when domain trigger fires. file_id=${file.id}` + `[process-image] source_* not yet populated, skipping domain write-back. file_id=${file.id}` ); }