Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,36 @@ services:
ports:
- "3002:3000"

minio:
image: minio/minio:latest
command: server /data --console-address ":9001"
environment:
MINIO_ROOT_USER: minioadmin
MINIO_ROOT_PASSWORD: minioadmin
ports:
- "9000:9000" # S3 API
- "9001:9001" # Console UI
volumes:
- miniodata:/data
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5

minio-setup:
image: minio/mc:latest
depends_on:
minio:
condition: service_healthy
entrypoint: ["sh", "-c"]
command:
- |
mc alias set local http://minio:9000 minioadmin minioadmin
mc mb --ignore-existing local/test-bucket
mc anonymous set download local/test-bucket
echo "MinIO bucket ready"

mailpit:
image: axllent/mailpit:latest
ports:
Expand All @@ -95,3 +125,4 @@ services:

volumes:
pgdata:
miniodata:
269 changes: 269 additions & 0 deletions functions/delete-s3-object/__tests__/handler.e2e.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,269 @@
/**
* E2E tests for delete-s3-object handler.
*
* Requires: Postgres on :5432, MinIO on :9000 (docker compose up -d)
* Run: npx jest --runInBand functions/delete-s3-object
*/

import { S3Client, PutObjectCommand, GetObjectCommand, DeleteObjectCommand } from '@aws-sdk/client-s3';
import { Client as PgClient } from 'pg';

import handler from '../handler';
import { createMockContext } from '../../../tests/helpers/mock-context';
import {
makePgClient,
setupFilesStoreSchema,
teardownFilesStoreSchema,
cleanFilesStoreRows,
} from '../../../tests/helpers/object-store-schema';

// ---------------------------------------------------------------------------
// Infra helpers
// ---------------------------------------------------------------------------

const SCHEMA = 'files_store_public';
const BUCKET = 'test-bucket';

const ENV: Record<string, string> = {
PGHOST: 'localhost',
PGPORT: '5432',
PGUSER: 'postgres',
PGPASSWORD: 'password',
PGDATABASE: 'constructive',
BUCKET_PROVIDER: 'minio',
BUCKET_NAME: BUCKET,
AWS_ACCESS_KEY: 'minioadmin',
AWS_SECRET_KEY: 'minioadmin',
AWS_REGION: 'us-east-1',
MINIO_ENDPOINT: 'http://localhost:9000',
};

function makeS3(): S3Client {
return new S3Client({
region: 'us-east-1',
credentials: { accessKeyId: 'minioadmin', secretAccessKey: 'minioadmin' },
endpoint: 'http://localhost:9000',
forcePathStyle: true,
});
}

// ---------------------------------------------------------------------------
// Suite
// ---------------------------------------------------------------------------

describe('delete-s3-object handler e2e', () => {
let pg: PgClient;
let s3: S3Client;
const s3Keys: string[] = [];

beforeAll(async () => {
pg = makePgClient();
await pg.connect();
s3 = makeS3();
await setupFilesStoreSchema(pg);
});

afterAll(async () => {
for (const key of s3Keys) {
try {
await s3.send(new DeleteObjectCommand({ Bucket: BUCKET, Key: key }));
} catch { /* ignore */ }
}
await teardownFilesStoreSchema(pg);
await pg.end();
s3.destroy();
try {
const pgCache = require('pg-cache');
if (pgCache.close) await pgCache.close();
} catch { /* ignore */ }
});

afterEach(async () => {
await cleanFilesStoreRows(pg);
});

const TEST_DB_ID = 'aaaaaaaa-0000-0000-0000-000000000099';

function callHandler(file_id: string, database_id: string, key: string, version_keys?: string[]) {
const ctx = createMockContext({ env: ENV });
return handler({ file_id, database_id, key, version_keys }, ctx as any);
}

async function insertFile(opts: {
s3Key: string;
body: Buffer;
status?: string;
databaseId?: string;
versions?: object[];
}): Promise<{ id: string; database_id: string }> {
const databaseId = opts.databaseId ?? TEST_DB_ID;
s3Keys.push(opts.s3Key);

await s3.send(new PutObjectCommand({
Bucket: BUCKET,
Key: opts.s3Key,
Body: opts.body,
ContentType: 'application/octet-stream',
}));

const res = await pg.query(
`INSERT INTO ${SCHEMA}.files
(database_id, key, bucket_key, status, versions)
VALUES ($1, $2, 'default', $3::${SCHEMA}.file_status, $4::jsonb)
RETURNING id, database_id`,
[databaseId, opts.s3Key, opts.status ?? 'deleting', opts.versions ? JSON.stringify(opts.versions) : null]
);
return res.rows[0];
}

async function s3ObjectExists(key: string): Promise<boolean> {
try {
await s3.send(new GetObjectCommand({ Bucket: BUCKET, Key: key }));
return true;
} catch (err: any) {
if (err.name === 'NoSuchKey' || err.$metadata?.httpStatusCode === 404) {
return false;
}
throw err;
}
}

// -----------------------------------------------------------------------
// Test 1: Happy path — deletes S3 object and DB row
// -----------------------------------------------------------------------

it('deletes S3 object and DB row successfully', async () => {
const key = `e2e-del-${Date.now()}-test.bin`;
const body = Buffer.from('test file content');

const { id, database_id } = await insertFile({
s3Key: key,
body,
status: 'deleting',
});

expect(await s3ObjectExists(key)).toBe(true);

const result: any = await callHandler(id, database_id, key);

expect(result.success).toBe(true);
expect(result.key).toBe(key);
expect(await s3ObjectExists(key)).toBe(false);

const dbRes = await pg.query(
`SELECT * FROM ${SCHEMA}.files WHERE id = $1 AND database_id = $2`,
[id, database_id]
);
expect(dbRes.rows.length).toBe(0);
});

// -----------------------------------------------------------------------
// Test 2: Idempotency — S3 object already deleted
// -----------------------------------------------------------------------

it('succeeds when S3 object already deleted (idempotent)', async () => {
const key = `e2e-del-gone-${Date.now()}-test.bin`;

const res = await pg.query(
`INSERT INTO ${SCHEMA}.files
(database_id, key, bucket_key, status)
VALUES ($1, $2, 'default', 'deleting')
RETURNING id, database_id`,
[TEST_DB_ID, key]
);
const { id, database_id } = res.rows[0];

const result: any = await callHandler(id, database_id, key);

expect(result.success).toBe(true);

const dbRes = await pg.query(
`SELECT * FROM ${SCHEMA}.files WHERE id = $1 AND database_id = $2`,
[id, database_id]
);
expect(dbRes.rows.length).toBe(0);
});

// -----------------------------------------------------------------------
// Test 3: Idempotency — DB row already deleted
// -----------------------------------------------------------------------

it('succeeds when DB row already deleted (idempotent)', async () => {
const key = `e2e-del-norow-${Date.now()}-test.bin`;

s3Keys.push(key);
await s3.send(new PutObjectCommand({
Bucket: BUCKET,
Key: key,
Body: Buffer.from('orphan'),
ContentType: 'application/octet-stream',
}));

const result: any = await callHandler(
'00000000-0000-0000-0000-000000000000',
TEST_DB_ID,
key
);

expect(result.success).toBe(true);
expect(await s3ObjectExists(key)).toBe(false);
});

// -----------------------------------------------------------------------
// Test 4: Both already deleted — fully idempotent
// -----------------------------------------------------------------------

// -----------------------------------------------------------------------
// Test 4: Delete with version S3 objects
// -----------------------------------------------------------------------

it('deletes origin + version S3 objects and DB row', async () => {
const originKey = `e2e-del-ver-${Date.now()}-origin.bin`;
const thumbKey = `e2e-del-ver-${Date.now()}-thumb.bin`;
const mediumKey = `e2e-del-ver-${Date.now()}-medium.bin`;
const body = Buffer.from('test');

// Upload origin + versions to S3
for (const k of [originKey, thumbKey, mediumKey]) {
s3Keys.push(k);
await s3.send(new PutObjectCommand({ Bucket: BUCKET, Key: k, Body: body }));
}

const { id, database_id } = await insertFile({
s3Key: originKey,
body,
status: 'deleting',
versions: [
{ key: thumbKey, mime: 'image/jpeg', width: 150, height: 150 },
{ key: mediumKey, mime: 'image/jpeg', width: 1200, height: 675 },
],
});

const result: any = await callHandler(id, database_id, originKey, [thumbKey, mediumKey]);

expect(result.success).toBe(true);
expect(await s3ObjectExists(originKey)).toBe(false);
expect(await s3ObjectExists(thumbKey)).toBe(false);
expect(await s3ObjectExists(mediumKey)).toBe(false);

const dbRes = await pg.query(
`SELECT * FROM ${SCHEMA}.files WHERE id = $1`,
[id]
);
expect(dbRes.rows.length).toBe(0);
});

// -----------------------------------------------------------------------
// Test 5: Both already deleted — fully idempotent
// -----------------------------------------------------------------------

it('succeeds when both S3 and DB are already gone', async () => {
const result: any = await callHandler(
'00000000-0000-0000-0000-000000000000',
TEST_DB_ID,
`nonexistent-key-${Date.now()}`
);

expect(result.success).toBe(true);
});
});
10 changes: 10 additions & 0 deletions functions/delete-s3-object/handler.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"name": "delete-s3-object",
"version": "1.0.0",
"type": "node-graphql",
"description": "Deletes S3 objects and removes corresponding files table rows",
"dependencies": {
"@aws-sdk/client-s3": "^3.700.0",
"pg-cache": "^3.1.0"
}
}
Loading