diff --git a/src/http/plugins/db.ts b/src/http/plugins/db.ts index 9b3894eee..916a3d8fd 100644 --- a/src/http/plugins/db.ts +++ b/src/http/plugins/db.ts @@ -25,8 +25,7 @@ declare module 'fastify' { } } -const { databaseEnableQueryCancellation, dbMigrationStrategy, isMultitenant, dbMigrationFreezeAt } = - getConfig() +const { dbMigrationStrategy, isMultitenant, dbMigrationFreezeAt } = getConfig() export const db = fastifyPlugin( async function db(fastify) { @@ -55,11 +54,6 @@ export const db = fastifyPlugin( method: request.method, operation: () => request.operation?.type, }) - - // Connect abort signal to DB connection for query cancellation - if (request.signals?.disconnect?.signal && databaseEnableQueryCancellation) { - request.db.setAbortSignal(request.signals.disconnect.signal) - } }) fastify.addHook('onSend', async (request, reply, payload) => { @@ -124,11 +118,6 @@ export const dbSuperUser = fastifyPlugin( maxConnections: opts.maxConnections, operation: () => request.operation?.type, }) - - // Connect abort signal to DB connection for query cancellation - if (request.signals?.disconnect?.signal && databaseEnableQueryCancellation) { - request.db.setAbortSignal(request.signals.disconnect.signal) - } }) fastify.addHook('onSend', async (request, reply, payload) => { diff --git a/src/http/routes/bucket/createBucket.ts b/src/http/routes/bucket/createBucket.ts index 3c889586b..70b384269 100644 --- a/src/http/routes/bucket/createBucket.ts +++ b/src/http/routes/bucket/createBucket.ts @@ -80,6 +80,7 @@ export default async function routes(fastify: FastifyInstance) { allowedMimeTypes: allowed_mime_types ? allowed_mime_types?.filter((mime) => mime) : allowed_mime_types, + signal: request.signals.disconnect.signal, }) return response.status(200).send({ diff --git a/src/http/routes/bucket/deleteBucket.ts b/src/http/routes/bucket/deleteBucket.ts index 5414ef632..f5c55499a 100644 --- a/src/http/routes/bucket/deleteBucket.ts +++ b/src/http/routes/bucket/deleteBucket.ts @@ -39,7 +39,7 @@ export default async function routes(fastify: FastifyInstance) { }, async (request, response) => { const { bucketId } = request.params - await request.storage.deleteBucket(bucketId) + await request.storage.deleteBucket({ bucketId, signal: request.signals.disconnect.signal }) return response.status(200).send(createResponse('Successfully deleted')) } diff --git a/src/http/routes/bucket/emptyBucket.ts b/src/http/routes/bucket/emptyBucket.ts index a997dfb74..ad5936385 100644 --- a/src/http/routes/bucket/emptyBucket.ts +++ b/src/http/routes/bucket/emptyBucket.ts @@ -42,7 +42,7 @@ export default async function routes(fastify: FastifyInstance) { async (request, response) => { const { bucketId } = request.params - await request.storage.emptyBucket(bucketId) + await request.storage.emptyBucket({ bucketId, signal: request.signals.disconnect.signal }) return response .status(200) diff --git a/src/http/routes/bucket/getAllBuckets.ts b/src/http/routes/bucket/getAllBuckets.ts index 75c9cac89..df9c047d9 100644 --- a/src/http/routes/bucket/getAllBuckets.ts +++ b/src/http/routes/bucket/getAllBuckets.ts @@ -70,11 +70,13 @@ export default async function routes(fastify: FastifyInstance) { isClientVersionBefore('supabase-py', clientInfo, '2.18.0') || isClientVersionBefore('storage3', userAgent, '0.12.1') - const results = await request.storage.listBuckets( - 'id, name, public, owner, created_at, updated_at, file_size_limit, allowed_mime_types' + + const results = await request.storage.listBuckets({ + columns: + 'id, name, public, owner, created_at, updated_at, file_size_limit, allowed_mime_types' + (omitBucketType ? '' : ', type'), - { limit, offset, sortColumn, sortOrder, search } - ) + options: { limit, offset, sortColumn, sortOrder, search }, + signal: request.signals.disconnect.signal, + }) return response.send(results) } diff --git a/src/http/routes/bucket/getBucket.ts b/src/http/routes/bucket/getBucket.ts index 94ba37e46..75ac40053 100644 --- a/src/http/routes/bucket/getBucket.ts +++ b/src/http/routes/bucket/getBucket.ts @@ -36,10 +36,12 @@ export default async function routes(fastify: FastifyInstance) { async (request, response) => { const { bucketId } = request.params - const results = await request.storage.findBucket( - bucketId, - 'id, name, owner, public, created_at, updated_at, file_size_limit, allowed_mime_types' - ) + const results = await request.storage.findBucket({ + bucketId: bucketId, + columns: + 'id, name, owner, public, created_at, updated_at, file_size_limit, allowed_mime_types', + signal: request.signals.disconnect.signal, + }) return response.send(results) } diff --git a/src/http/routes/bucket/updateBucket.ts b/src/http/routes/bucket/updateBucket.ts index 13d3db8b0..fabd49fa3 100644 --- a/src/http/routes/bucket/updateBucket.ts +++ b/src/http/routes/bucket/updateBucket.ts @@ -61,12 +61,16 @@ export default async function routes(fastify: FastifyInstance) { const { public: isPublic, file_size_limit, allowed_mime_types } = request.body - await request.storage.updateBucket(bucketId, { - public: isPublic, - fileSizeLimit: file_size_limit, - allowedMimeTypes: allowed_mime_types - ? allowed_mime_types?.filter((mime) => mime) - : allowed_mime_types, + await request.storage.updateBucket({ + bucketId, + data: { + public: isPublic, + fileSizeLimit: file_size_limit, + allowedMimeTypes: allowed_mime_types + ? allowed_mime_types?.filter((mime) => mime) + : allowed_mime_types, + }, + signal: request.signals.disconnect.signal, }) return response.status(200).send(createResponse('Successfully updated')) diff --git a/src/http/routes/iceberg/bucket.ts b/src/http/routes/iceberg/bucket.ts index 9d53d107f..ee6bf4cc3 100644 --- a/src/http/routes/iceberg/bucket.ts +++ b/src/http/routes/iceberg/bucket.ts @@ -58,7 +58,10 @@ export default async function routes(fastify: FastifyInstance) { }, async (request, response) => { const { bucketName } = request.params - await request.storage.deleteIcebergBucket(bucketName) + await request.storage.deleteIcebergBucket({ + name: bucketName, + signal: request.signals.disconnect.signal, + }) return response.status(200).send(createResponse('Successfully deleted')) } @@ -80,6 +83,7 @@ export default async function routes(fastify: FastifyInstance) { const { name } = request.body const bucket = await request.storage.createIcebergBucket({ name, + signal: request.signals.disconnect.signal, }) return response.status(200).send({ @@ -106,12 +110,16 @@ export default async function routes(fastify: FastifyInstance) { async (request, response) => { const query = request.query - const bucket = await request.storage.listAnalyticsBuckets('name,created_at,updated_at', { - limit: query.limit, - offset: query.offset, - sortColumn: query.sortColumn, - sortOrder: query.sortOrder, - search: query.search, + const bucket = await request.storage.listAnalyticsBuckets({ + columns: 'name,created_at,updated_at', + options: { + limit: query.limit, + offset: query.offset, + sortColumn: query.sortColumn, + sortOrder: query.sortOrder, + search: query.search, + }, + signal: request.signals.disconnect.signal, }) return response.status(200).send( diff --git a/src/http/routes/object/copyObject.ts b/src/http/routes/object/copyObject.ts index 19f3b0563..302eafc77 100644 --- a/src/http/routes/object/copyObject.ts +++ b/src/http/routes/object/copyObject.ts @@ -74,6 +74,7 @@ export default async function routes(fastify: FastifyInstance) { metadata: metadata, copyMetadata: request.body.copyMetadata ?? true, upsert: request.headers['x-upsert'] === 'true', + signal: request.signals.disconnect.signal, }) return response.status(result.httpStatusCode ?? 200).send({ diff --git a/src/http/routes/object/deleteObject.ts b/src/http/routes/object/deleteObject.ts index 370a436fb..348e91bd0 100644 --- a/src/http/routes/object/deleteObject.ts +++ b/src/http/routes/object/deleteObject.ts @@ -43,7 +43,9 @@ export default async function routes(fastify: FastifyInstance) { const { bucketName } = request.params const objectName = request.params['*'] - await request.storage.from(bucketName).deleteObject(objectName) + await request.storage + .from(bucketName) + .deleteObject({ objectName, signal: request.signals.disconnect.signal }) return response.status(200).send(createResponse('Successfully deleted')) } diff --git a/src/http/routes/object/deleteObjects.ts b/src/http/routes/object/deleteObjects.ts index 438fbb00b..1b57b589c 100644 --- a/src/http/routes/object/deleteObjects.ts +++ b/src/http/routes/object/deleteObjects.ts @@ -58,7 +58,9 @@ export default async function routes(fastify: FastifyInstance) { const { bucketName } = request.params const prefixes = request.body['prefixes'] - const results = await request.storage.from(bucketName).deleteObjects(prefixes) + const results = await request.storage + .from(bucketName) + .deleteObjects({ prefixes, signal: request.signals.disconnect.signal }) return response.status(200).send(results) } diff --git a/src/http/routes/object/getObject.ts b/src/http/routes/object/getObject.ts index 2e4b01080..2633f88d6 100644 --- a/src/http/routes/object/getObject.ts +++ b/src/http/routes/object/getObject.ts @@ -50,8 +50,13 @@ async function requestHandler( bucketId: bucketName, objectName, }) - const bucket = await request.storage.asSuperUser().findBucket(bucketName, 'id,public', { - dontErrorOnEmpty: true, + const bucket = await request.storage.asSuperUser().findBucket({ + bucketId: bucketName, + columns: 'id,public', + filters: { + dontErrorOnEmpty: true, + }, + signal: request.signals.disconnect.signal, }) // The request is not authenticated @@ -71,13 +76,18 @@ async function requestHandler( if (bucket.public) { // request is authenticated but we still use the superUser as we don't need to check RLS - obj = await request.storage - .asSuperUser() - .from(bucketName) - .findObject(objectName, 'id, version, metadata') + obj = await request.storage.asSuperUser().from(bucketName).findObject({ + objectName, + columns: 'id, version, metadata', + signal: request.signals.disconnect.signal, + }) } else { // request is authenticated use RLS - obj = await request.storage.from(bucketName).findObject(objectName, 'id, version, metadata') + obj = await request.storage.from(bucketName).findObject({ + objectName, + columns: 'id, version, metadata', + signal: request.signals.disconnect.signal, + }) } return request.storage.renderer('asset').render(request, response, { diff --git a/src/http/routes/object/getObjectInfo.ts b/src/http/routes/object/getObjectInfo.ts index 14e47925a..a3f29dcf6 100644 --- a/src/http/routes/object/getObjectInfo.ts +++ b/src/http/routes/object/getObjectInfo.ts @@ -43,8 +43,13 @@ async function requestHandler( objectName, }) - const bucket = await request.storage.asSuperUser().findBucket(bucketName, 'id,public', { - dontErrorOnEmpty: true, + const bucket = await request.storage.asSuperUser().findBucket({ + bucketId: bucketName, + columns: 'id,public', + filters: { + dontErrorOnEmpty: true, + }, + signal: request.signals.disconnect.signal, }) // Not Authenticated flow @@ -62,20 +67,17 @@ async function requestHandler( let obj: Obj if (bucket.public || publicRoute) { - obj = await request.storage - .asSuperUser() - .from(bucketName) - .findObject( - objectName, - 'id,name,version,bucket_id,metadata,user_metadata,updated_at,created_at' - ) + obj = await request.storage.asSuperUser().from(bucketName).findObject({ + objectName, + columns: 'id,name,version,bucket_id,metadata,user_metadata,updated_at,created_at', + signal: request.signals.disconnect.signal, + }) } else { - obj = await request.storage - .from(bucketName) - .findObject( - objectName, - 'id,name,version,bucket_id,metadata,user_metadata,updated_at,created_at' - ) + obj = await request.storage.from(bucketName).findObject({ + objectName, + columns: 'id,name,version,bucket_id,metadata,user_metadata,updated_at,created_at', + signal: request.signals.disconnect.signal, + }) } return request.storage.renderer(method).render(request, response, { diff --git a/src/http/routes/object/getPublicObject.ts b/src/http/routes/object/getPublicObject.ts index cc1b69f6d..541834fc6 100644 --- a/src/http/routes/object/getPublicObject.ts +++ b/src/http/routes/object/getPublicObject.ts @@ -54,10 +54,19 @@ export default async function routes(fastify: FastifyInstance) { const bucketRef = request.storage.asSuperUser().from(bucketName) const [, obj] = await Promise.all([ - request.storage.asSuperUser().findBucket(bucketName, 'id,public', { - isPublic: true, + request.storage.asSuperUser().findBucket({ + bucketId: bucketName, + columns: 'id,public', + filters: { + isPublic: true, + }, + signal: request.signals.disconnect.signal, + }), + bucketRef.findObject({ + objectName, + columns: 'id,version,metadata', + signal: request.signals.disconnect.signal, }), - bucketRef.findObject(objectName, 'id,version,metadata'), ]) // send the object from s3 diff --git a/src/http/routes/object/getSignedObject.ts b/src/http/routes/object/getSignedObject.ts index 11ab950b5..db009daf1 100644 --- a/src/http/routes/object/getSignedObject.ts +++ b/src/http/routes/object/getSignedObject.ts @@ -83,7 +83,11 @@ export default async function routes(fastify: FastifyInstance) { const obj = await request.storage .asSuperUser() .from(bucketName) - .findObject(objParts.join('/'), 'id,version,metadata') + .findObject({ + objectName: objParts.join('/'), + columns: 'id,version,metadata', + signal: request.signals.disconnect.signal, + }) return request.storage.renderer('asset').render(request, response, { bucket: storageS3Bucket, diff --git a/src/http/routes/object/getSignedURL.ts b/src/http/routes/object/getSignedURL.ts index 7ba57a3c8..1e021013b 100644 --- a/src/http/routes/object/getSignedURL.ts +++ b/src/http/routes/object/getSignedURL.ts @@ -80,9 +80,13 @@ export default async function routes(fastify: FastifyInstance) { } : undefined - const signedURL = await request.storage - .from(bucketName) - .signObjectUrl(objectName, urlPath as string, expiresIn, transformationOptions) + const signedURL = await request.storage.from(bucketName).signObjectUrl({ + objectName, + url: urlPath as string, + expiresIn, + metadata: transformationOptions, + signal: request.signals.disconnect.signal, + }) return response.status(200).send({ signedURL }) } diff --git a/src/http/routes/object/getSignedURLs.ts b/src/http/routes/object/getSignedURLs.ts index d73af3207..375d01d20 100644 --- a/src/http/routes/object/getSignedURLs.ts +++ b/src/http/routes/object/getSignedURLs.ts @@ -78,7 +78,9 @@ export default async function routes(fastify: FastifyInstance) { const { bucketName } = request.params const { expiresIn, paths } = request.body - const signedURLs = await request.storage.from(bucketName).signObjectUrls(paths, expiresIn) + const signedURLs = await request.storage + .from(bucketName) + .signObjectUrls({ paths, expiresIn, signal: request.signals.disconnect.signal }) return response.status(200).send(signedURLs) } diff --git a/src/http/routes/object/getSignedUploadURL.ts b/src/http/routes/object/getSignedUploadURL.ts index 5a68afa30..af1a2eac5 100644 --- a/src/http/routes/object/getSignedUploadURL.ts +++ b/src/http/routes/object/getSignedUploadURL.ts @@ -69,11 +69,16 @@ export default async function routes(fastify: FastifyInstance) { const urlPath = `${bucketName}/${objectName}` - const signedUpload = await request.storage - .from(bucketName) - .signUploadObjectUrl(objectName, urlPath as string, uploadSignedUrlExpirationTime, owner, { + const signedUpload = await request.storage.from(bucketName).signUploadObjectUrl({ + objectName, + url: urlPath as string, + expiresIn: uploadSignedUrlExpirationTime, + owner, + options: { upsert: request.headers['x-upsert'] === 'true', - }) + }, + signal: request.signals.disconnect.signal, + }) return response.status(200).send({ url: signedUpload.url, token: signedUpload.token }) } diff --git a/src/http/routes/object/listObjects.ts b/src/http/routes/object/listObjects.ts index 18aa1013b..e506f43c2 100644 --- a/src/http/routes/object/listObjects.ts +++ b/src/http/routes/object/listObjects.ts @@ -70,14 +70,18 @@ export default async function routes(fastify: FastifyInstance) { const { bucketName } = request.params const { limit, offset, sortBy, search, prefix } = request.body - const results = await request.storage.from(bucketName).searchObjects(prefix, { - limit, - offset, - search, - sortBy: { - column: sortBy?.column, - order: sortBy?.order, + const results = await request.storage.from(bucketName).searchObjects({ + prefix, + options: { + limit, + offset, + search, + sortBy: { + column: sortBy?.column, + order: sortBy?.order, + }, }, + signal: request.signals.disconnect.signal, }) return response.status(200).send(results) diff --git a/src/http/routes/object/listObjectsV2.ts b/src/http/routes/object/listObjectsV2.ts index 2612e1224..80d219d05 100644 --- a/src/http/routes/object/listObjectsV2.ts +++ b/src/http/routes/object/listObjectsV2.ts @@ -81,6 +81,7 @@ export default async function routes(fastify: FastifyInstance) { maxKeys: limit, cursor, sortBy, + signal: request.signals.disconnect.signal, }) return response.status(200).send(results) diff --git a/src/http/routes/object/moveObject.ts b/src/http/routes/object/moveObject.ts index 0edcbf6d8..d03f8afc9 100644 --- a/src/http/routes/object/moveObject.ts +++ b/src/http/routes/object/moveObject.ts @@ -51,9 +51,13 @@ export default async function routes(fastify: FastifyInstance) { const destinationBucketId = destinationBucket || bucketId - const move = await request.storage - .from(bucketId) - .moveObject(sourceKey, destinationBucketId, destinationKey, request.owner) + const move = await request.storage.from(bucketId).moveObject({ + sourceObjectName: sourceKey, + destinationBucket: destinationBucketId, + destinationObjectName: destinationKey, + owner: request.owner, + signal: request.signals.disconnect.signal, + }) return response.status(200).send({ message: 'Successfully moved', diff --git a/src/http/routes/object/uploadSignedObject.ts b/src/http/routes/object/uploadSignedObject.ts index 5b01aa467..d1c1cd65c 100644 --- a/src/http/routes/object/uploadSignedObject.ts +++ b/src/http/routes/object/uploadSignedObject.ts @@ -85,7 +85,7 @@ export default async function routes(fastify: FastifyInstance) { const { owner, upsert } = await request.storage .from(bucketName) - .verifyObjectSignature(token, objectName) + .verifyObjectSignature({ token, objectName }) const { objectMetadata, path } = await request.storage .asSuperUser() diff --git a/src/http/routes/render/renderAuthenticatedImage.ts b/src/http/routes/render/renderAuthenticatedImage.ts index 3673d3f56..c16bd00d0 100644 --- a/src/http/routes/render/renderAuthenticatedImage.ts +++ b/src/http/routes/render/renderAuthenticatedImage.ts @@ -51,9 +51,11 @@ export default async function routes(fastify: FastifyInstance) { const { bucketName } = request.params const objectName = request.params['*'] - const obj = await request.storage - .from(bucketName) - .findObject(objectName, 'id,version,metadata') + const obj = await request.storage.from(bucketName).findObject({ + objectName, + columns: 'id,version,metadata', + signal: request.signals.disconnect.signal, + }) const s3Key = request.storage.location.getKeyLocation({ tenantId: request.tenantId, diff --git a/src/http/routes/render/renderPublicImage.ts b/src/http/routes/render/renderPublicImage.ts index 67de7fb2b..e780364ec 100644 --- a/src/http/routes/render/renderPublicImage.ts +++ b/src/http/routes/render/renderPublicImage.ts @@ -53,10 +53,17 @@ export default async function routes(fastify: FastifyInstance) { const bucketRef = request.storage.asSuperUser().from(bucketName) const [, obj] = await Promise.all([ - request.storage.asSuperUser().findBucket(bucketName, 'id,public', { - isPublic: true, + request.storage.asSuperUser().findBucket({ + bucketId: bucketName, + columns: 'id,public', + filters: { isPublic: true }, + signal: request.signals.disconnect.signal, + }), + bucketRef.findObject({ + objectName, + columns: 'id,version,metadata', + signal: request.signals.disconnect.signal, }), - bucketRef.findObject(objectName, 'id,version,metadata'), ]) const s3Key = `${request.tenantId}/${bucketName}/${objectName}` diff --git a/src/http/routes/render/renderSignedImage.ts b/src/http/routes/render/renderSignedImage.ts index a2bb67e8e..727ddb50c 100644 --- a/src/http/routes/render/renderSignedImage.ts +++ b/src/http/routes/render/renderSignedImage.ts @@ -83,7 +83,11 @@ export default async function routes(fastify: FastifyInstance) { const obj = await request.storage .asSuperUser() .from(bucketName) - .findObject(objParts.join('/'), 'id,version,metadata') + .findObject({ + objectName: objParts.join('/'), + columns: 'id,version,metadata', + signal: request.signals.disconnect.signal, + }) const renderer = request.storage.renderer('image') as ImageRenderer diff --git a/src/http/routes/s3/commands/abort-multipart-upload.ts b/src/http/routes/s3/commands/abort-multipart-upload.ts index c8a4afc33..9ad069ac9 100644 --- a/src/http/routes/s3/commands/abort-multipart-upload.ts +++ b/src/http/routes/s3/commands/abort-multipart-upload.ts @@ -37,11 +37,11 @@ export default function AbortMultiPartUpload(s3Router: S3Router) { throw ERRORS.InvalidParameter('internalIcebergBucketName') } - await ctx.storage.backend.abortMultipartUpload( - icebergBucketName, - req.Params['*'], - req.Querystring.uploadId - ) + await ctx.storage.backend.abortMultipartUpload({ + bucket: icebergBucketName, + key: req.Params['*'], + uploadId: req.Querystring.uploadId, + }) return {} } @@ -53,11 +53,14 @@ export default function AbortMultiPartUpload(s3Router: S3Router) { (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.abortMultipartUpload({ - Bucket: req.Params.Bucket, - Key: req.Params['*'], - UploadId: req.Querystring.uploadId, - }) + return s3Protocol.abortMultipartUpload( + { + Bucket: req.Params.Bucket, + Key: req.Params['*'], + UploadId: req.Querystring.uploadId, + }, + ctx.signals.response + ) } ) } diff --git a/src/http/routes/s3/commands/complete-multipart-upload.ts b/src/http/routes/s3/commands/complete-multipart-upload.ts index ee16dd43d..4b29eab95 100644 --- a/src/http/routes/s3/commands/complete-multipart-upload.ts +++ b/src/http/routes/s3/commands/complete-multipart-upload.ts @@ -67,13 +67,13 @@ export default function CompleteMultipartUpload(s3Router: S3Router) { throw ERRORS.InvalidParameter('internalIcebergBucketName') } - const resp = await ctx.req.storage.backend.completeMultipartUpload( - icebergBucketName, - req.Params['*'], - req.Querystring.uploadId, - '', - req.Body?.CompleteMultipartUpload?.Part || [] - ) + const resp = await ctx.req.storage.backend.completeMultipartUpload({ + bucket: icebergBucketName, + key: req.Params['*'], + uploadId: req.Querystring.uploadId, + version: '', + parts: req.Body?.CompleteMultipartUpload?.Part || [], + }) return { responseBody: { diff --git a/src/http/routes/s3/commands/copy-object.ts b/src/http/routes/s3/commands/copy-object.ts index 29d5992a6..c3c7e2271 100644 --- a/src/http/routes/s3/commands/copy-object.ts +++ b/src/http/routes/s3/commands/copy-object.ts @@ -38,24 +38,29 @@ export default function CopyObject(s3Router: S3Router) { (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.copyObject({ - Bucket: req.Params.Bucket, - Key: req.Params['*'], - CopySource: req.Headers['x-amz-copy-source'], - ContentType: req.Headers['content-type'], - CacheControl: req.Headers['cache-control'], - MetadataDirective: req.Headers['x-amz-metadata-directive'] as MetadataDirective | undefined, - Expires: req.Headers.expires ? new Date(req.Headers.expires) : undefined, - ContentEncoding: req.Headers['content-encoding'], - CopySourceIfMatch: req.Headers['x-amz-copy-source-if-match'], - CopySourceIfModifiedSince: req.Headers['x-amz-copy-source-if-modified-since'] - ? new Date(req.Headers['x-amz-copy-source-if-modified-since']) - : undefined, - CopySourceIfNoneMatch: req.Headers['x-amz-copy-source-if-none-match'], - CopySourceIfUnmodifiedSince: req.Headers['x-amz-copy-source-if-unmodified-since'] - ? new Date(req.Headers['x-amz-copy-source-if-unmodified-since']) - : undefined, - }) + return s3Protocol.copyObject( + { + Bucket: req.Params.Bucket, + Key: req.Params['*'], + CopySource: req.Headers['x-amz-copy-source'], + ContentType: req.Headers['content-type'], + CacheControl: req.Headers['cache-control'], + MetadataDirective: req.Headers['x-amz-metadata-directive'] as + | MetadataDirective + | undefined, + Expires: req.Headers.expires ? new Date(req.Headers.expires) : undefined, + ContentEncoding: req.Headers['content-encoding'], + CopySourceIfMatch: req.Headers['x-amz-copy-source-if-match'], + CopySourceIfModifiedSince: req.Headers['x-amz-copy-source-if-modified-since'] + ? new Date(req.Headers['x-amz-copy-source-if-modified-since']) + : undefined, + CopySourceIfNoneMatch: req.Headers['x-amz-copy-source-if-none-match'], + CopySourceIfUnmodifiedSince: req.Headers['x-amz-copy-source-if-unmodified-since'] + ? new Date(req.Headers['x-amz-copy-source-if-unmodified-since']) + : undefined, + }, + ctx.signals.response + ) } ) } diff --git a/src/http/routes/s3/commands/create-bucket.ts b/src/http/routes/s3/commands/create-bucket.ts index 0d825b782..a079fb03c 100644 --- a/src/http/routes/s3/commands/create-bucket.ts +++ b/src/http/routes/s3/commands/create-bucket.ts @@ -28,7 +28,8 @@ export default function CreateBucket(s3Router: S3Router) { return s3Protocol.createBucket( req.Params.Bucket, - req.Headers?.['x-amz-acl'] === 'public-read' + req.Headers?.['x-amz-acl'] === 'public-read', + ctx.signals.response ) } ) diff --git a/src/http/routes/s3/commands/create-multipart-upload.ts b/src/http/routes/s3/commands/create-multipart-upload.ts index ef676ee98..1a965d50f 100644 --- a/src/http/routes/s3/commands/create-multipart-upload.ts +++ b/src/http/routes/s3/commands/create-multipart-upload.ts @@ -49,13 +49,13 @@ export default function CreateMultipartUpload(s3Router: S3Router) { throw ERRORS.InvalidBucketName('Iceberg bucket name is required') } - const uploadId = await ctx.req.storage.backend.createMultiPartUpload( - icebergBucketName, - req.Params['*'], - undefined, - req.Headers?.['content-type'] || 'application/octet-stream', - req.Headers?.['cache-control'] || 'no-cache' - ) + const uploadId = await ctx.req.storage.backend.createMultiPartUpload({ + bucket: icebergBucketName, + key: req.Params['*'], + version: undefined, + contentType: req.Headers?.['content-type'] || 'application/octet-stream', + cacheControl: req.Headers?.['cache-control'] || 'no-cache', + }) return { responseBody: { @@ -77,15 +77,18 @@ export default function CreateMultipartUpload(s3Router: S3Router) { const metadata = s3Protocol.parseMetadataHeaders(req.Headers) - return s3Protocol.createMultiPartUpload({ - Bucket: req.Params.Bucket, - Key: req.Params['*'], - ContentType: req.Headers?.['content-type'], - CacheControl: req.Headers?.['cache-control'], - ContentDisposition: req.Headers?.['content-disposition'], - ContentEncoding: req.Headers?.['content-encoding'], - Metadata: metadata || {}, - }) + return s3Protocol.createMultiPartUpload( + { + Bucket: req.Params.Bucket, + Key: req.Params['*'], + ContentType: req.Headers?.['content-type'], + CacheControl: req.Headers?.['cache-control'], + ContentDisposition: req.Headers?.['content-disposition'], + ContentEncoding: req.Headers?.['content-encoding'], + Metadata: metadata || {}, + }, + ctx.signals.response + ) } ) } diff --git a/src/http/routes/s3/commands/delete-bucket.ts b/src/http/routes/s3/commands/delete-bucket.ts index 17315fe34..95dfc5f77 100644 --- a/src/http/routes/s3/commands/delete-bucket.ts +++ b/src/http/routes/s3/commands/delete-bucket.ts @@ -20,7 +20,7 @@ export default function DeleteBucket(s3Router: S3Router) { (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.deleteBucket(req.Params.Bucket) + return s3Protocol.deleteBucket(req.Params.Bucket, ctx.signals.response) } ) } diff --git a/src/http/routes/s3/commands/delete-object.ts b/src/http/routes/s3/commands/delete-object.ts index 7ef70830e..3682c0c20 100644 --- a/src/http/routes/s3/commands/delete-object.ts +++ b/src/http/routes/s3/commands/delete-object.ts @@ -66,12 +66,15 @@ export default function DeleteObject(s3Router: S3Router) { (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.deleteObjects({ - Bucket: req.Params.Bucket, - Delete: { - Objects: req.Body.Delete.Object, + return s3Protocol.deleteObjects( + { + Bucket: req.Params.Bucket, + Delete: { + Objects: req.Body.Delete.Object, + }, }, - }) + ctx.signals.response + ) } ) @@ -82,10 +85,13 @@ export default function DeleteObject(s3Router: S3Router) { (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.deleteObject({ - Bucket: req.Params.Bucket, - Key: req.Params['*'], - }) + return s3Protocol.deleteObject( + { + Bucket: req.Params.Bucket, + Key: req.Params['*'], + }, + ctx.signals.response + ) } ) @@ -101,7 +107,11 @@ export default function DeleteObject(s3Router: S3Router) { throw new Error('Iceberg bucket name is required') } - await ctx.req.storage.backend.deleteObject(internalBucketName, req.Params['*'], undefined) + await ctx.req.storage.backend.remove({ + bucket: internalBucketName, + key: req.Params['*'], + version: undefined, + }) return {} } diff --git a/src/http/routes/s3/commands/get-bucket.ts b/src/http/routes/s3/commands/get-bucket.ts index 7703b6b2e..3a79be047 100644 --- a/src/http/routes/s3/commands/get-bucket.ts +++ b/src/http/routes/s3/commands/get-bucket.ts @@ -42,7 +42,7 @@ export default function GetBucket(s3Router: S3Router) { { schema: GetBucketLocationInput, operation: ROUTE_OPERATIONS.S3_GET_BUCKET_LOCATION }, async (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - await ctx.storage.findBucket(req.Params.Bucket) + await ctx.storage.findBucket({ bucketId: req.Params.Bucket, signal: ctx.signals.response }) return s3Protocol.getBucketLocation() } @@ -53,7 +53,7 @@ export default function GetBucket(s3Router: S3Router) { { schema: GetBucketVersioningInput, operation: ROUTE_OPERATIONS.S3_GET_BUCKET_VERSIONING }, async (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - await ctx.storage.findBucket(req.Params.Bucket) + await ctx.storage.findBucket({ bucketId: req.Params.Bucket, signal: ctx.signals.response }) return s3Protocol.getBucketVersioning() } diff --git a/src/http/routes/s3/commands/get-object.ts b/src/http/routes/s3/commands/get-object.ts index 514762614..6270fee0d 100644 --- a/src/http/routes/s3/commands/get-object.ts +++ b/src/http/routes/s3/commands/get-object.ts @@ -70,10 +70,13 @@ export default function GetObject(s3Router: S3Router) { (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.getObjectTagging({ - Bucket: req.Params.Bucket, - Key: req.Params['*'], - }) + return s3Protocol.getObjectTagging( + { + Bucket: req.Params.Bucket, + Key: req.Params['*'], + }, + ctx.signals.response + ) } ) diff --git a/src/http/routes/s3/commands/head-bucket.ts b/src/http/routes/s3/commands/head-bucket.ts index acdb20e6a..ebad31fca 100644 --- a/src/http/routes/s3/commands/head-bucket.ts +++ b/src/http/routes/s3/commands/head-bucket.ts @@ -19,7 +19,7 @@ export default function HeadBucket(s3Router: S3Router) { async (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.headBucket(req.Params.Bucket) + return s3Protocol.headBucket(req.Params.Bucket, ctx.signals.response) } ) } diff --git a/src/http/routes/s3/commands/head-object.ts b/src/http/routes/s3/commands/head-object.ts index 870fd4961..c731d44a5 100644 --- a/src/http/routes/s3/commands/head-object.ts +++ b/src/http/routes/s3/commands/head-object.ts @@ -41,10 +41,13 @@ export default function HeadObject(s3Router: S3Router) { (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.dbHeadObject({ - Bucket: req.Params.Bucket, - Key: req.Params['*'], - }) + return s3Protocol.dbHeadObject( + { + Bucket: req.Params.Bucket, + Key: req.Params['*'], + }, + ctx.signals.response + ) } ) } diff --git a/src/http/routes/s3/commands/list-buckets.ts b/src/http/routes/s3/commands/list-buckets.ts index dd520bd80..ab7d53075 100644 --- a/src/http/routes/s3/commands/list-buckets.ts +++ b/src/http/routes/s3/commands/list-buckets.ts @@ -12,7 +12,7 @@ export default function ListBuckets(s3Router: S3Router) { { schema: ListObjectsInput, operation: ROUTE_OPERATIONS.S3_LIST_BUCKET }, (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.listBuckets() + return s3Protocol.listBuckets(ctx.signals.response) } ) } diff --git a/src/http/routes/s3/commands/list-multipart-uploads.ts b/src/http/routes/s3/commands/list-multipart-uploads.ts index 5d8c2d1ee..c66a00c3b 100644 --- a/src/http/routes/s3/commands/list-multipart-uploads.ts +++ b/src/http/routes/s3/commands/list-multipart-uploads.ts @@ -33,15 +33,18 @@ export default function ListMultipartUploads(s3Router: S3Router) { async (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.listMultipartUploads({ - Bucket: req.Params.Bucket, - Prefix: req.Querystring?.prefix || '', - KeyMarker: req.Querystring?.['key-marker'], - UploadIdMarker: req.Querystring?.['upload-id-marker'], - EncodingType: req.Querystring?.['encoding-type'], - MaxUploads: req.Querystring?.['max-uploads'], - Delimiter: req.Querystring?.delimiter, - }) + return s3Protocol.listMultipartUploads( + { + Bucket: req.Params.Bucket, + Prefix: req.Querystring?.prefix || '', + KeyMarker: req.Querystring?.['key-marker'], + UploadIdMarker: req.Querystring?.['upload-id-marker'], + EncodingType: req.Querystring?.['encoding-type'], + MaxUploads: req.Querystring?.['max-uploads'], + Delimiter: req.Querystring?.delimiter, + }, + ctx.signals.response + ) } ) } diff --git a/src/http/routes/s3/commands/list-objects.ts b/src/http/routes/s3/commands/list-objects.ts index b4c97e3c1..ccfe2623b 100644 --- a/src/http/routes/s3/commands/list-objects.ts +++ b/src/http/routes/s3/commands/list-objects.ts @@ -54,15 +54,18 @@ export default function ListObjects(s3Router: S3Router) { async (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.listObjectsV2({ - Bucket: req.Params.Bucket, - Prefix: req.Querystring?.prefix || '', - ContinuationToken: req.Querystring?.['continuation-token'], - StartAfter: req.Querystring?.['start-after'], - EncodingType: req.Querystring?.['encoding-type'], - MaxKeys: req.Querystring?.['max-keys'], - Delimiter: req.Querystring?.delimiter, - }) + return s3Protocol.listObjectsV2( + { + Bucket: req.Params.Bucket, + Prefix: req.Querystring?.prefix || '', + ContinuationToken: req.Querystring?.['continuation-token'], + StartAfter: req.Querystring?.['start-after'], + EncodingType: req.Querystring?.['encoding-type'], + MaxKeys: req.Querystring?.['max-keys'], + Delimiter: req.Querystring?.delimiter, + }, + ctx.signals.response + ) } ) @@ -72,14 +75,17 @@ export default function ListObjects(s3Router: S3Router) { async (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.listObjects({ - Bucket: req.Params.Bucket, - Prefix: req.Querystring?.prefix || '', - Marker: req.Querystring?.['marker'], - EncodingType: req.Querystring?.['encoding-type'], - MaxKeys: req.Querystring?.['max-keys'], - Delimiter: req.Querystring?.delimiter, - }) + return s3Protocol.listObjects( + { + Bucket: req.Params.Bucket, + Prefix: req.Querystring?.prefix || '', + Marker: req.Querystring?.['marker'], + EncodingType: req.Querystring?.['encoding-type'], + MaxKeys: req.Querystring?.['max-keys'], + Delimiter: req.Querystring?.delimiter, + }, + ctx.signals.response + ) } ) } diff --git a/src/http/routes/s3/commands/list-parts.ts b/src/http/routes/s3/commands/list-parts.ts index 62779a0d7..0902abcb0 100644 --- a/src/http/routes/s3/commands/list-parts.ts +++ b/src/http/routes/s3/commands/list-parts.ts @@ -1,7 +1,7 @@ import { S3ProtocolHandler } from '@storage/protocols/s3/s3-handler' import { S3Router } from '../router' import { ROUTE_OPERATIONS } from '../../operations' -import { S3Backend } from '@storage/backend' +import { S3Adapter } from '@storage/backend' import { ERRORS } from '@internal/errors' const ListPartsInput = { @@ -32,7 +32,7 @@ export default function ListParts(s3Router: S3Router) { async (req, ctx) => { const backend = ctx.req.storage.backend - if (!(backend instanceof S3Backend)) { + if (!(backend instanceof S3Adapter)) { throw ERRORS.NotSupported('only S3 driver is supported for this operation') } @@ -71,13 +71,16 @@ export default function ListParts(s3Router: S3Router) { async (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.listParts({ - Bucket: req.Params.Bucket, - Key: req.Params['*'], - UploadId: req.Querystring.uploadId, - MaxParts: req.Querystring['max-parts'], - PartNumberMarker: req.Querystring['part-number-marker'], - }) + return s3Protocol.listParts( + { + Bucket: req.Params.Bucket, + Key: req.Params['*'], + UploadId: req.Querystring.uploadId, + MaxParts: req.Querystring['max-parts'], + PartNumberMarker: req.Querystring['part-number-marker'], + }, + ctx.signals.response + ) } ) } diff --git a/src/http/routes/s3/commands/put-object.ts b/src/http/routes/s3/commands/put-object.ts index 88cc73baa..655ece5e2 100644 --- a/src/http/routes/s3/commands/put-object.ts +++ b/src/http/routes/s3/commands/put-object.ts @@ -86,15 +86,15 @@ export default function PutObject(s3Router: S3Router) { new ByteLimitTransformStream(MAX_PART_SIZE), // 5GB limit for iceberg objects ctx.req.streamingSignatureV4 || new PassThrough(), async (fileStream) => { - const u = await ctx.req.storage.backend.uploadObject( - icebergBucket, + const u = await ctx.req.storage.backend.write({ + bucket: icebergBucket, key, - undefined, - fileStream as Readable, - uploadRequest.mimeType, - uploadRequest.cacheControl, - ctx.signals.body - ) + version: undefined, + body: fileStream as Readable, + contentType: uploadRequest.mimeType, + cacheControl: uploadRequest.cacheControl, + signal: ctx.signals.body, + }) return { headers: { @@ -125,9 +125,11 @@ export default function PutObject(s3Router: S3Router) { key += '.emptyFolderPlaceholder' } - const bucket = await ctx.storage - .asSuperUser() - .findBucket(req.Params.Bucket, 'id,file_size_limit,allowed_mime_types') + const bucket = await ctx.storage.asSuperUser().findBucket({ + bucketId: req.Params.Bucket, + columns: 'id,file_size_limit,allowed_mime_types', + signal: ctx.signals.body, + }) const uploadRequest = await fileUploadFromRequest(ctx.req, { objectName: key, @@ -174,9 +176,11 @@ export default function PutObject(s3Router: S3Router) { throw ERRORS.InvalidParameter('Missing file') } - const bucket = await ctx.storage - .asSuperUser() - .findBucket(req.Params.Bucket, 'id,file_size_limit,allowed_mime_types') + const bucket = await ctx.storage.asSuperUser().findBucket({ + bucketId: req.Params.Bucket, + columns: 'id,file_size_limit,allowed_mime_types', + signal: ctx.signals.body, + }) const fieldsObject = fieldsToObject(file?.fields || {}) const metadata = s3Protocol.parseMetadataHeaders(fieldsObject) diff --git a/src/http/routes/s3/commands/upload-part-copy.ts b/src/http/routes/s3/commands/upload-part-copy.ts index 41db976c5..f31afe198 100644 --- a/src/http/routes/s3/commands/upload-part-copy.ts +++ b/src/http/routes/s3/commands/upload-part-copy.ts @@ -42,22 +42,25 @@ export default function UploadPartCopy(s3Router: S3Router) { (req, ctx) => { const s3Protocol = new S3ProtocolHandler(ctx.storage, ctx.tenantId, ctx.owner) - return s3Protocol.uploadPartCopy({ - Bucket: req.Params.Bucket, - Key: req.Params['*'], - CopySource: req.Headers['x-amz-copy-source'], - PartNumber: req.Querystring.partNumber, - UploadId: req.Querystring.uploadId, - CopySourceRange: req.Headers['x-amz-copy-source-range'], - CopySourceIfMatch: req.Headers['x-amz-copy-source-if-match'], - CopySourceIfModifiedSince: req.Headers['x-amz-copy-source-if-modified-since'] - ? new Date(req.Headers['x-amz-copy-source-if-modified-since']) - : undefined, - CopySourceIfNoneMatch: req.Headers['x-amz-copy-source-if-none-match'], - CopySourceIfUnmodifiedSince: req.Headers['x-amz-copy-source-if-unmodified-since'] - ? new Date(req.Headers['x-amz-copy-source-if-unmodified-since']) - : undefined, - }) + return s3Protocol.uploadPartCopy( + { + Bucket: req.Params.Bucket, + Key: req.Params['*'], + CopySource: req.Headers['x-amz-copy-source'], + PartNumber: req.Querystring.partNumber, + UploadId: req.Querystring.uploadId, + CopySourceRange: req.Headers['x-amz-copy-source-range'], + CopySourceIfMatch: req.Headers['x-amz-copy-source-if-match'], + CopySourceIfModifiedSince: req.Headers['x-amz-copy-source-if-modified-since'] + ? new Date(req.Headers['x-amz-copy-source-if-modified-since']) + : undefined, + CopySourceIfNoneMatch: req.Headers['x-amz-copy-source-if-none-match'], + CopySourceIfUnmodifiedSince: req.Headers['x-amz-copy-source-if-unmodified-since'] + ? new Date(req.Headers['x-amz-copy-source-if-unmodified-since']) + : undefined, + }, + ctx.signals.response + ) } ) } diff --git a/src/http/routes/s3/commands/upload-part.ts b/src/http/routes/s3/commands/upload-part.ts index 36462a75b..6d2980113 100644 --- a/src/http/routes/s3/commands/upload-part.ts +++ b/src/http/routes/s3/commands/upload-part.ts @@ -62,16 +62,17 @@ export default function UploadPart(s3Router: S3Router) { new ByteLimitTransformStream(MAX_PART_SIZE), // 5GB max part size ctx.req.streamingSignatureV4, async (body) => { - const part = await ctx.req.storage.backend.uploadPart( - icebergBucketName!, - req.Params['*'], - '', - req.Querystring.uploadId, - req.Querystring.partNumber, - body as Readable, - req.Headers?.['x-amz-decoded-content-length'] || req.Headers?.['content-length'], - ctx.signals.body - ) + const part = await ctx.req.storage.backend.uploadPart({ + bucket: icebergBucketName!, + key: req.Params['*'], + version: '', + uploadId: req.Querystring.uploadId, + partNumber: req.Querystring.partNumber, + body: body as Readable, + length: + req.Headers?.['x-amz-decoded-content-length'] || req.Headers?.['content-length'], + signal: ctx.signals.body, + }) return { headers: { @@ -91,16 +92,16 @@ export default function UploadPart(s3Router: S3Router) { passThrough.destroy(err) }) - const part = await ctx.req.storage.backend.uploadPart( - icebergBucketName!, - req.Params['*'], - '', - req.Querystring.uploadId, - req.Querystring.partNumber, - ctx.req.raw as Readable, - req.Headers?.['content-length'], - ctx.signals.body - ) + const part = await ctx.req.storage.backend.uploadPart({ + bucket: icebergBucketName!, + key: req.Params['*'], + version: '', + uploadId: req.Querystring.uploadId, + partNumber: req.Querystring.partNumber, + body: ctx.req.raw as Readable, + length: req.Headers?.['content-length'], + signal: ctx.signals.body, + }) return { headers: { diff --git a/src/http/routes/tus/index.ts b/src/http/routes/tus/index.ts index 097c5a034..1d1d7e370 100644 --- a/src/http/routes/tus/index.ts +++ b/src/http/routes/tus/index.ts @@ -56,6 +56,7 @@ type MultiPartRequest = http.IncomingMessage & { db: TenantConnection isUpsert: boolean resources?: string[] + signal?: AbortSignal } } @@ -156,9 +157,11 @@ function createTusServer( const resourceId = UploadId.fromString(uploadId) - const bucket = await req.upload.storage - .asSuperUser() - .findBucket(resourceId.bucket, 'id,file_size_limit') + const bucket = await req.upload.storage.asSuperUser().findBucket({ + bucketId: resourceId.bucket, + columns: 'id,file_size_limit', + signal: req.upload.signal, + }) const globalFileLimit = await getFileSizeLimit(req.upload.tenantId) @@ -263,6 +266,7 @@ const authenticatedRoutes = fastifyPlugin( tenantId: req.tenantId, db: req.db, isUpsert: req.headers['x-upsert'] === 'true', + signal: req.signals.disconnect.signal, } }) @@ -365,6 +369,7 @@ const publicRoutes = fastifyPlugin( tenantId: req.tenantId, db: req.db, isUpsert: req.headers['x-upsert'] === 'true', + signal: req.signals.disconnect.signal, } }) diff --git a/src/http/routes/tus/lifecycle.ts b/src/http/routes/tus/lifecycle.ts index 0e7f911a0..57845957b 100644 --- a/src/http/routes/tus/lifecycle.ts +++ b/src/http/routes/tus/lifecycle.ts @@ -38,6 +38,7 @@ export type MultiPartRequest = http.IncomingMessage & { tenantId: string isUpsert: boolean resources?: string[] + signal?: AbortSignal } } @@ -76,7 +77,7 @@ export async function onIncomingRequest(rawReq: Request, id: string) { const payload = await req.upload.storage .from(uploadID.bucket) - .verifyObjectSignature(signature, uploadID.objectName) + .verifyObjectSignature({ token: signature, objectName: uploadID.objectName }) req.upload.owner = payload.owner req.upload.isUpsert = payload.upsert @@ -215,9 +216,11 @@ export async function onCreate( const storage = req.upload.storage - const bucket = await storage - .asSuperUser() - .findBucket(uploadID.bucket, 'id, file_size_limit, allowed_mime_types') + const bucket = await storage.asSuperUser().findBucket({ + bucketId: uploadID.bucket, + columns: 'id, file_size_limit, allowed_mime_types', + signal: req.upload.signal, + }) const metadata = { ...(upload.metadata ? upload.metadata : {}), @@ -250,11 +253,11 @@ export async function onUploadFinish(rawReq: Request, upload: Upload) { bucketId: resourceId.bucket, objectName: resourceId.objectName, }) - const metadata = await req.upload.storage.backend.headObject( - storageS3Bucket, - s3Key, - resourceId.version - ) + const metadata = await req.upload.storage.backend.stats({ + bucket: storageS3Bucket, + key: s3Key, + version: resourceId.version, + }) const uploader = new Uploader( req.upload.storage.backend, diff --git a/src/internal/database/connection.ts b/src/internal/database/connection.ts index 9b165e565..efc784813 100644 --- a/src/internal/database/connection.ts +++ b/src/internal/database/connection.ts @@ -20,7 +20,6 @@ pg.types.setTypeParser(20, 'text', parseInt) export class TenantConnection { static poolManager = new PoolManager() public readonly role: string - private abortSignal?: AbortSignal constructor( public readonly pool: PoolStrategy, @@ -29,14 +28,6 @@ export class TenantConnection { this.role = options.user.payload.role || 'anon' } - setAbortSignal(signal: AbortSignal) { - this.abortSignal = signal - } - - getAbortSignal(): AbortSignal | undefined { - return this.abortSignal - } - static stop() { return TenantConnection.poolManager.destroyAll() } @@ -76,6 +67,7 @@ export class TenantConnection { }, { minTimeout: 50, + factor: 2, maxTimeout: 200, maxRetryTime: 3000, retries: 10, @@ -144,22 +136,17 @@ export class TenantConnection { } asSuperUser() { - const tenantConnection = new TenantConnection(this.pool, { + return new TenantConnection(this.pool, { ...this.options, user: this.options.superUser, }) - - if (this.abortSignal) { - tenantConnection.setAbortSignal(this.abortSignal) - } - - return tenantConnection } - async setScope(tnx: Knex) { + async setScope(tnx: Knex, opts?: { signal?: AbortSignal }) { const headers = JSON.stringify(this.options.headers || {}) - await tnx.raw( - ` + await tnx + .raw( + ` SELECT set_config('role', ?, true), set_config('request.jwt.claim.role', ?, true), @@ -172,17 +159,18 @@ export class TenantConnection { set_config('storage.operation', ?, true), set_config('storage.allow_delete_query', 'true', true); `, - [ - this.role, - this.role, - this.options.user.jwt || '', - this.options.user.payload.sub || '', - JSON.stringify(this.options.user.payload), - headers, - this.options.method || '', - this.options.path || '', - this.options.operation?.() || '', - ] - ) + [ + this.role, + this.role, + this.options.user.jwt || '', + this.options.user.payload.sub || '', + JSON.stringify(this.options.user.payload), + headers, + this.options.method || '', + this.options.path || '', + this.options.operation?.() || '', + ] + ) + .abortOnSignal(opts?.signal) } } diff --git a/src/internal/monitoring/logger.ts b/src/internal/monitoring/logger.ts index bfaa038e7..8ebbef766 100644 --- a/src/internal/monitoring/logger.ts +++ b/src/internal/monitoring/logger.ts @@ -12,6 +12,7 @@ const { logflareEnabled, logflareBatchSize, region, + version, } = getConfig() export const baseLogger = pino({ @@ -59,7 +60,7 @@ export const baseLogger = pino({ timestamp: pino.stdTimeFunctions.isoTime, }) -export let logger = baseLogger.child({ region }) +export let logger = baseLogger.child({ region, appVersion: version }) export function setLogger(newLogger: Logger) { logger = newLogger diff --git a/src/internal/monitoring/otel-class-instrumentations.ts b/src/internal/monitoring/otel-class-instrumentations.ts index 5659f8b5a..49836abf8 100644 --- a/src/internal/monitoring/otel-class-instrumentations.ts +++ b/src/internal/monitoring/otel-class-instrumentations.ts @@ -5,7 +5,7 @@ import { ObjectStorage } from '@storage/object' import { Uploader } from '@storage/uploader' import { Storage } from '@storage/storage' import { Event as QueueBaseEvent } from '@internal/queue' -import { S3Backend } from '@storage/backend' +import { S3Adapter } from '@storage/backend' import { StorageKnexDB } from '@storage/database' import { TenantConnection } from '@internal/database' import { S3Store } from '@tus/s3-store' @@ -67,7 +67,7 @@ export const classInstrumentations = [ }, }), new ClassInstrumentation({ - targetClass: S3Backend, + targetClass: S3Adapter, enabled: true, methodsToInstrument: [ 'getObject', diff --git a/src/storage/backend/adapter.ts b/src/storage/backend/adapter.ts index a897283be..11ad4c87f 100644 --- a/src/storage/backend/adapter.ts +++ b/src/storage/backend/adapter.ts @@ -44,6 +44,130 @@ export type UploadPart = { ChecksumSHA256?: string } +export interface ListObjectsInput { + bucket: string + options?: { + prefix?: string + delimiter?: string + nextToken?: string + startAfter?: string + beforeDate?: Date + } + signal?: AbortSignal +} + +export interface ReadObjectInput { + bucket: string + key: string + version: string | undefined + headers?: BrowserCacheHeaders + signal?: AbortSignal +} + +export interface WriteObjectInput { + bucket: string + key: string + version: string | undefined + body: NodeJS.ReadableStream + contentType: string + cacheControl: string + signal?: AbortSignal +} + +export interface RemoveObjectInput { + bucket: string + key: string + version: string | undefined + signal?: AbortSignal +} + +export interface CopyObjectInput { + bucket: string + source: string + version: string | undefined + destination: string + destinationVersion: string | undefined + metadata?: { cacheControl?: string; mimetype?: string } + conditions?: { + ifMatch?: string + ifNoneMatch?: string + ifModifiedSince?: Date + ifUnmodifiedSince?: Date + } + signal?: AbortSignal +} + +export interface RemoveManyObjectsInput { + bucket: string + prefixes: string[] + signal?: AbortSignal +} + +export interface StatsObjectInput { + bucket: string + key: string + version: string | undefined + signal?: AbortSignal +} + +export interface TempPrivateAccessUrlInput { + bucket: string + key: string + version: string | undefined + signal?: AbortSignal +} + +export interface CreateMultiPartUploadInput { + bucket: string + key: string + version: string | undefined + contentType: string + cacheControl: string + metadata?: Record + signal?: AbortSignal +} + +export interface UploadPartInput { + bucket: string + key: string + version: string + uploadId: string + partNumber: number + body?: string | Uint8Array | Buffer | Readable + length?: number + signal?: AbortSignal +} + +export interface CompleteMultipartUploadInput { + bucket: string + key: string + uploadId: string + version: string + parts: UploadPart[] + opts?: { removePrefix?: boolean } + signal?: AbortSignal +} + +export interface AbortMultipartUploadInput { + bucket: string + key: string + uploadId: string + version?: string + signal?: AbortSignal +} + +export interface UploadPartCopyInput { + bucket: string + key: string + version: string + uploadId: string + partNumber: number + sourceKey: string + sourceKeyVersion?: string + bytesRange?: { fromByte: number; toByte: number } + signal?: AbortSignal +} + /** * A generic storage Adapter to interact with files */ @@ -54,156 +178,71 @@ export abstract class StorageBackendAdapter { } async list( - bucket: string, - options?: { - prefix?: string - delimiter?: string - nextToken?: string - startAfter?: string - beforeDate?: Date - } + input: ListObjectsInput ): Promise<{ keys: { name: string; size: number }[]; nextToken?: string }> { throw new Error('list not implemented') } /** * Gets an object body and metadata - * @param bucketName - * @param key - * @param headers */ - async getObject( - bucketName: string, - key: string, - version: string | undefined, - headers?: BrowserCacheHeaders, - signal?: AbortSignal - ): Promise { + async read(input: ReadObjectInput): Promise { throw new Error('getObject not implemented') } /** * Uploads and store an object - * @param bucketName - * @param key - * @param body - * @param contentType - * @param cacheControl */ - async uploadObject( - bucketName: string, - key: string, - version: string | undefined, - body: NodeJS.ReadableStream, - contentType: string, - cacheControl: string, - signal?: AbortSignal - ): Promise { + async write(input: WriteObjectInput): Promise { throw new Error('uploadObject not implemented') } /** * Deletes an object - * @param bucket - * @param key - * @param version */ - async deleteObject(bucket: string, key: string, version: string | undefined): Promise { + async remove(input: RemoveObjectInput): Promise { throw new Error('deleteObject not implemented') } /** * Copies an existing object to the given location - * @param bucket - * @param source - * @param version - * @param destination - * @param destinationVersion - * @param metadata - * @param conditions */ - async copyObject( - bucket: string, - source: string, - version: string | undefined, - destination: string, - destinationVersion: string | undefined, - metadata?: { cacheControl?: string; mimetype?: string }, - conditions?: { - ifMatch?: string - ifNoneMatch?: string - ifModifiedSince?: Date - ifUnmodifiedSince?: Date - } + async copy( + input: CopyObjectInput ): Promise> { throw new Error('copyObject not implemented') } /** * Deletes multiple objects - * @param bucket - * @param prefixes */ - async deleteObjects(bucket: string, prefixes: string[]): Promise { + async removeMany(input: RemoveManyObjectsInput): Promise { throw new Error('deleteObjects not implemented') } /** * Returns metadata information of a specific object - * @param bucket - * @param key - * @param version */ - async headObject( - bucket: string, - key: string, - version: string | undefined - ): Promise { + async stats(input: StatsObjectInput): Promise { throw new Error('headObject not implemented') } /** * Returns a private url that can only be accessed internally by the system - * @param bucket - * @param key - * @param version */ - async privateAssetUrl(bucket: string, key: string, version: string | undefined): Promise { + async tempPrivateAccessUrl(input: TempPrivateAccessUrlInput): Promise { throw new Error('privateAssetUrl not implemented') } - async createMultiPartUpload( - bucketName: string, - key: string, - version: string | undefined, - contentType: string, - cacheControl: string, - metadata?: Record - ): Promise { + async createMultiPartUpload(input: CreateMultiPartUploadInput): Promise { throw new Error('not implemented') } - async uploadPart( - bucketName: string, - key: string, - version: string, - uploadId: string, - partNumber: number, - body?: string | Uint8Array | Buffer | Readable, - length?: number, - signal?: AbortSignal - ): Promise<{ ETag?: string }> { + async uploadPart(input: UploadPartInput): Promise<{ ETag?: string }> { throw new Error('not implemented') } - async completeMultipartUpload( - bucketName: string, - key: string, - uploadId: string, - version: string, - parts: UploadPart[], - opts?: { removePrefix?: boolean } - ): Promise< + async completeMultipartUpload(input: CompleteMultipartUploadInput): Promise< Omit & { location?: string bucket?: string @@ -213,24 +252,12 @@ export abstract class StorageBackendAdapter { throw new Error('not implemented') } - async abortMultipartUpload( - bucketName: string, - key: string, - uploadId: string, - version?: string - ): Promise { + async abortMultipartUpload(input: AbortMultipartUploadInput): Promise { throw new Error('not implemented') } async uploadPartCopy( - storageS3Bucket: string, - key: string, - version: string, - UploadId: string, - PartNumber: number, - sourceKey: string, - sourceKeyVersion?: string, - bytes?: { fromByte: number; toByte: number } + input: UploadPartCopyInput ): Promise<{ eTag?: string; lastModified?: Date }> { throw new Error('not implemented') } diff --git a/src/storage/backend/file.ts b/src/storage/backend/file/file-adapter.ts similarity index 80% rename from src/storage/backend/file.ts rename to src/storage/backend/file/file-adapter.ts index 89d5110fa..95d8ca6fa 100644 --- a/src/storage/backend/file.ts +++ b/src/storage/backend/file/file-adapter.ts @@ -5,15 +5,27 @@ import fileChecksum from 'md5-file' import { promisify } from 'util' import stream from 'stream' import MultiStream from 'multistream' -import { getConfig } from '../../config' +import { getConfig } from '../../../config' import { StorageBackendAdapter, ObjectMetadata, ObjectResponse, withOptionalVersion, - BrowserCacheHeaders, UploadPart, -} from './adapter' + ListObjectsInput, + ReadObjectInput, + WriteObjectInput, + RemoveObjectInput, + CopyObjectInput, + RemoveManyObjectsInput, + StatsObjectInput, + TempPrivateAccessUrlInput, + CreateMultiPartUploadInput, + UploadPartInput, + CompleteMultipartUploadInput, + AbortMultipartUploadInput, + UploadPartCopyInput, +} from '../adapter' import { ERRORS, StorageBackendError } from '@internal/errors' import { randomUUID } from 'crypto' import fsExtra from 'fs-extra' @@ -39,10 +51,10 @@ const METADATA_ATTR_KEYS = { } /** - * FileBackend + * FileAdapter * Interacts with the file system with this FileBackend adapter */ -export class FileBackend implements StorageBackendAdapter { +export class FileAdapter implements StorageBackendAdapter { client = null filePath: string etagAlgorithm: 'mtime' | 'md5' @@ -59,32 +71,18 @@ export class FileBackend implements StorageBackendAdapter { } async list( - bucket: string, - options?: { - prefix?: string - delimiter?: string - nextToken?: string - startAfter?: string - } + input: ListObjectsInput ): Promise<{ keys: { name: string; size: number }[]; nextToken?: string }> { return Promise.resolve({ keys: [] }) } /** * Gets an object body and metadata - * @param bucketName - * @param key - * @param version - * @param headers */ - async getObject( - bucketName: string, - key: string, - version: string | undefined, - headers?: BrowserCacheHeaders - ): Promise { + async read(input: ReadObjectInput): Promise { + const { bucket, key, version, headers } = input // 'Range: bytes=#######-###### - const file = this.resolveSecurePath(withOptionalVersion(`${bucketName}/${key}`, version)) + const file = this.resolveSecurePath(withOptionalVersion(`${bucket}/${key}`, version)) const data = await fs.stat(file) const eTag = await this.etag(file, data) const fileSize = data.size @@ -169,23 +167,11 @@ export class FileBackend implements StorageBackendAdapter { /** * Uploads and store an object - * @param bucketName - * @param key - * @param version - * @param body - * @param contentType - * @param cacheControl */ - async uploadObject( - bucketName: string, - key: string, - version: string | undefined, - body: NodeJS.ReadableStream, - contentType: string, - cacheControl: string - ): Promise { + async write(input: WriteObjectInput): Promise { + const { bucket, key, version, body, contentType, cacheControl } = input try { - const file = this.resolveSecurePath(withOptionalVersion(`${bucketName}/${key}`, version)) + const file = this.resolveSecurePath(withOptionalVersion(`${bucket}/${key}`, version)) await fs.ensureFile(file) const destFile = fs.createWriteStream(file) await pipeline(body, destFile) @@ -195,7 +181,7 @@ export class FileBackend implements StorageBackendAdapter { cacheControl: cacheControl || 'no-cache', }) - const metadata = await this.headObject(bucketName, key, version) + const metadata = await this.stats({ bucket, key, version }) return { ...metadata, @@ -208,11 +194,9 @@ export class FileBackend implements StorageBackendAdapter { /** * Deletes an object from the file system - * @param bucket - * @param key - * @param version */ - async deleteObject(bucket: string, key: string, version: string | undefined): Promise { + async remove(input: RemoveObjectInput): Promise { + const { bucket, key, version } = input try { const file = this.resolveSecurePath(withOptionalVersion(`${bucket}/${key}`, version)) await fs.remove(file) @@ -231,21 +215,11 @@ export class FileBackend implements StorageBackendAdapter { /** * Copies an existing object to the given location - * @param bucket - * @param source - * @param version - * @param destination - * @param destinationVersion - * @param metadata */ - async copyObject( - bucket: string, - source: string, - version: string | undefined, - destination: string, - destinationVersion: string, - metadata: { cacheControl?: string; contentType?: string } + async copy( + input: CopyObjectInput ): Promise> { + const { bucket, source, version, destination, destinationVersion, metadata } = input const srcFile = this.resolveSecurePath(withOptionalVersion(`${bucket}/${source}`, version)) const destFile = this.resolveSecurePath( withOptionalVersion(`${bucket}/${destination}`, destinationVersion) @@ -269,10 +243,9 @@ export class FileBackend implements StorageBackendAdapter { /** * Deletes multiple objects - * @param bucket - * @param prefixes */ - async deleteObjects(bucket: string, prefixes: string[]): Promise { + async removeMany(input: RemoveManyObjectsInput): Promise { + const { bucket, prefixes } = input const promises = prefixes.map((prefix) => { return fs.rm(this.resolveSecurePath(`${bucket}/${prefix}`)) }) @@ -306,15 +279,9 @@ export class FileBackend implements StorageBackendAdapter { /** * Returns metadata information of a specific object - * @param bucket - * @param key - * @param version */ - async headObject( - bucket: string, - key: string, - version: string | undefined - ): Promise { + async stats(input: StatsObjectInput): Promise { + const { bucket, key, version } = input const file = this.resolveSecurePath(withOptionalVersion(`${bucket}/${key}`, version)) const data = await fs.stat(file) @@ -334,19 +301,14 @@ export class FileBackend implements StorageBackendAdapter { } } - async createMultiPartUpload( - bucketName: string, - key: string, - version: string | undefined, - contentType: string, - cacheControl: string - ): Promise { + async createMultiPartUpload(input: CreateMultiPartUploadInput): Promise { + const { bucket, key, version, contentType, cacheControl } = input const uploadId = randomUUID() const multiPartFolder = path.join( this.filePath, 'multiparts', uploadId, - bucketName, + bucket, withOptionalVersion(key, version) ) @@ -357,19 +319,13 @@ export class FileBackend implements StorageBackendAdapter { return uploadId } - async uploadPart( - bucketName: string, - key: string, - version: string, - uploadId: string, - partNumber: number, - body: stream.Readable - ): Promise<{ ETag?: string }> { + async uploadPart(input: UploadPartInput): Promise<{ ETag?: string }> { + const { bucket, key, version, uploadId, partNumber, body } = input const multiPartFolder = path.join( this.filePath, 'multiparts', uploadId, - bucketName, + bucket, withOptionalVersion(key, version) ) @@ -377,7 +333,7 @@ export class FileBackend implements StorageBackendAdapter { const writeStream = fsExtra.createWriteStream(partPath) - await pipeline(body, writeStream) + await pipeline(body as stream.Readable, writeStream) const etag = await fileChecksum(partPath) @@ -387,24 +343,19 @@ export class FileBackend implements StorageBackendAdapter { return { ETag: etag } } - async completeMultipartUpload( - bucketName: string, - key: string, - uploadId: string, - version: string, - parts: UploadPart[] - ): Promise< + async completeMultipartUpload(input: CompleteMultipartUploadInput): Promise< Omit & { location?: string bucket?: string version: string } > { + const { bucket, key, uploadId, version, parts } = input const multiPartFolder = path.join( this.filePath, 'multiparts', uploadId, - bucketName, + bucket, withOptionalVersion(key, version) ) @@ -439,14 +390,14 @@ export class FileBackend implements StorageBackendAdapter { const metadata = JSON.parse(metadataContent) - const uploaded = await this.uploadObject( - bucketName, + const uploaded = await this.write({ + bucket, key, version, - multistream, - metadata.contentType, - metadata.cacheControl - ) + body: multistream, + contentType: metadata.contentType, + cacheControl: metadata.cacheControl, + }) fsExtra.remove(path.join(this.filePath, 'multiparts', uploadId)).catch(() => { // no-op @@ -455,17 +406,13 @@ export class FileBackend implements StorageBackendAdapter { return { version: version, ETag: uploaded.eTag, - bucket: bucketName, - location: `${bucketName}/${key}`, + bucket: bucket, + location: `${bucket}/${key}`, } } - async abortMultipartUpload( - bucketName: string, - key: string, - uploadId: string, - version?: string - ): Promise { + async abortMultipartUpload(input: AbortMultipartUploadInput): Promise { + const { uploadId } = input const multiPartFolder = path.join(this.filePath, 'multiparts', uploadId) await fsExtra.remove(multiPartFolder) @@ -479,32 +426,27 @@ export class FileBackend implements StorageBackendAdapter { } async uploadPartCopy( - storageS3Bucket: string, - key: string, - version: string, - UploadId: string, - PartNumber: number, - sourceKey: string, - sourceVersion?: string, - rangeBytes?: { fromByte: number; toByte: number } + input: UploadPartCopyInput ): Promise<{ eTag?: string; lastModified?: Date }> { + const { bucket, key, version, uploadId, partNumber, sourceKey, sourceKeyVersion, bytesRange } = + input const multiPartFolder = path.join( this.filePath, 'multiparts', - UploadId, - storageS3Bucket, + uploadId, + bucket, withOptionalVersion(key, version) ) - const partFilePath = path.join(multiPartFolder, `part-${PartNumber}`) + const partFilePath = path.join(multiPartFolder, `part-${partNumber}`) const sourceFilePath = this.resolveSecurePath( - `${storageS3Bucket}/${withOptionalVersion(sourceKey, sourceVersion)}` + `${bucket}/${withOptionalVersion(sourceKey, sourceKeyVersion)}` ) const platform = process.platform == 'darwin' ? 'darwin' : 'linux' - const readStreamOptions = rangeBytes - ? { start: rangeBytes.fromByte, end: rangeBytes.toByte } + const readStreamOptions = bytesRange + ? { start: bytesRange.fromByte, end: bytesRange.toByte } : {} const partStream = fs.createReadStream(sourceFilePath, readStreamOptions) @@ -524,11 +466,9 @@ export class FileBackend implements StorageBackendAdapter { /** * Returns a private url that can only be accessed internally by the system - * @param bucket - * @param key - * @param version */ - async privateAssetUrl(bucket: string, key: string, version: string | undefined): Promise { + async tempPrivateAccessUrl(input: TempPrivateAccessUrlInput): Promise { + const { bucket, key, version } = input return 'local:///' + this.resolveSecurePath(withOptionalVersion(`${bucket}/${key}`, version)) } @@ -613,9 +553,6 @@ export class FileBackend implements StorageBackendAdapter { } } catch { // Ignore errors during cleanup to not affect main operations - // Could be permission issues, concurrent access, directory not empty due to race conditions, etc. - // Optional: Log for debugging purposes (uncomment if needed) - // console.debug('Directory cleanup failed:', dirPath, e.message) } } diff --git a/src/storage/backend/file/index.ts b/src/storage/backend/file/index.ts new file mode 100644 index 000000000..ee3dbbd3f --- /dev/null +++ b/src/storage/backend/file/index.ts @@ -0,0 +1 @@ +export * from './file-adapter' diff --git a/src/storage/backend/index.ts b/src/storage/backend/index.ts index 46e5b5a40..131e33c30 100644 --- a/src/storage/backend/index.ts +++ b/src/storage/backend/index.ts @@ -1,6 +1,6 @@ import { StorageBackendAdapter } from './adapter' -import { FileBackend } from './file' -import { S3Backend, S3ClientOptions } from './s3/adapter' +import { FileAdapter } from './file' +import { S3Adapter, S3ClientOptions } from './s3' import { getConfig, StorageBackendType } from '../../config' export * from './s3' @@ -21,7 +21,7 @@ export function createStorageBackend( let storageBackend: StorageBackendAdapter if (type === 'file') { - storageBackend = new FileBackend() + storageBackend = new FileAdapter() } else { const defaultOptions: S3ClientOptions = { region: storageS3Region, @@ -30,7 +30,7 @@ export function createStorageBackend( requestTimeout: storageS3ClientTimeout, ...(config ? config : {}), } - storageBackend = new S3Backend(defaultOptions) + storageBackend = new S3Adapter(defaultOptions) } return storageBackend diff --git a/src/storage/backend/s3/index.ts b/src/storage/backend/s3/index.ts index 32544c6df..245923fb6 100644 --- a/src/storage/backend/s3/index.ts +++ b/src/storage/backend/s3/index.ts @@ -1 +1 @@ -export * from './adapter' +export * from './s3-adapter' diff --git a/src/storage/backend/s3/adapter.ts b/src/storage/backend/s3/s3-adapter.ts similarity index 75% rename from src/storage/backend/s3/adapter.ts rename to src/storage/backend/s3/s3-adapter.ts index 918edcab2..4e8661431 100644 --- a/src/storage/backend/s3/adapter.ts +++ b/src/storage/backend/s3/s3-adapter.ts @@ -19,11 +19,23 @@ import { Progress, Upload } from '@aws-sdk/lib-storage' import { NodeHttpHandler } from '@smithy/node-http-handler' import { StorageBackendAdapter, - BrowserCacheHeaders, ObjectMetadata, ObjectResponse, withOptionalVersion, UploadPart, + ListObjectsInput, + ReadObjectInput, + WriteObjectInput, + RemoveObjectInput, + CopyObjectInput, + RemoveManyObjectsInput, + StatsObjectInput, + TempPrivateAccessUrlInput, + CreateMultiPartUploadInput, + UploadPartInput, + CompleteMultipartUploadInput, + AbortMultipartUploadInput, + UploadPartCopyInput, } from './../adapter' import { getSignedUrl } from '@aws-sdk/s3-request-presigner' import { ERRORS, StorageBackendError } from '@internal/errors' @@ -31,7 +43,7 @@ import { getConfig } from '../../../config' import { Readable } from 'node:stream' import { createAgent, InstrumentedAgent } from '@internal/http' import { monitorStream } from '@internal/streams' -import { BackupObjectInfo, ObjectBackup } from '@storage/backend/s3/backup' +import { BackupObjectInfo, ObjectBackup } from '@storage/backend/s3/s3-backup' const { storageS3UploadQueueSize, tracingFeatures, storageS3MaxSockets, tracingEnabled } = getConfig() @@ -48,10 +60,10 @@ export interface S3ClientOptions { } /** - * S3Backend + * S3Adapter * Interacts with a s3-compatible file system with this S3Adapter */ -export class S3Backend implements StorageBackendAdapter { +export class S3Adapter implements StorageBackendAdapter { client: S3Client agent: InstrumentedAgent @@ -76,29 +88,19 @@ export class S3Backend implements StorageBackendAdapter { /** * Gets an object body and metadata - * @param bucketName - * @param key - * @param version - * @param headers - * @param signal */ - async getObject( - bucketName: string, - key: string, - version: string | undefined, - headers?: BrowserCacheHeaders, - signal?: AbortSignal - ): Promise { - const input: GetObjectCommandInput = { - Bucket: bucketName, + async read(input: ReadObjectInput): Promise { + const { bucket, key, version, headers, signal } = input + const commandInput: GetObjectCommandInput = { + Bucket: bucket, IfNoneMatch: headers?.ifNoneMatch, Key: withOptionalVersion(key, version), Range: headers?.range, } if (headers?.ifModifiedSince) { - input.IfModifiedSince = new Date(headers.ifModifiedSince) + commandInput.IfModifiedSince = new Date(headers.ifModifiedSince) } - const command = new GetObjectCommand(input) + const command = new GetObjectCommand(commandInput) const data = await this.client.send(command, { abortSignal: signal, }) @@ -121,36 +123,24 @@ export class S3Backend implements StorageBackendAdapter { /** * Uploads and store an object - * @param bucketName - * @param key - * @param version - * @param body - * @param contentType - * @param cacheControl - * @param signal */ - async uploadObject( - bucketName: string, - key: string, - version: string | undefined, - body: Readable, - contentType: string, - cacheControl: string, - signal?: AbortSignal - ): Promise { + async write(input: WriteObjectInput): Promise { + const { bucket, key, version, body, contentType, cacheControl, signal } = input + if (signal?.aborted) { throw ERRORS.Aborted('Upload was aborted') } - const dataStream = tracingFeatures?.upload ? monitorStream(body) : body + const readableBody = body as Readable + const dataStream = tracingFeatures?.upload ? monitorStream(readableBody) : readableBody const upload = new Upload({ client: this.client, queueSize: storageS3UploadQueueSize, params: { - Bucket: bucketName, + Bucket: bucket, Key: withOptionalVersion(key, version), - Body: dataStream, + Body: dataStream as Readable, ContentType: contentType, CacheControl: cacheControl, }, @@ -164,7 +154,7 @@ export class S3Backend implements StorageBackendAdapter { hasUploadedBytes = true } if (tracingFeatures?.upload) { - dataStream.emit('s3_progress', JSON.stringify(progress)) + ;(dataStream as any).emit('s3_progress', JSON.stringify(progress)) } } upload.on('httpUploadProgress', progressHandler) @@ -178,7 +168,7 @@ export class S3Backend implements StorageBackendAdapter { // Only call head for objects that are > 0 bytes // for some reason headObject can take a really long time to resolve on zero byte uploads, this was causing requests to timeout const metadata = hasUploadedBytes - ? await this.headObject(bucketName, key, version) + ? await this.stats({ bucket, key, version }) : { httpStatusCode: 200, eTag: data.ETag || '', @@ -211,11 +201,9 @@ export class S3Backend implements StorageBackendAdapter { /** * Deletes an object - * @param bucket - * @param key - * @param version */ - async deleteObject(bucket: string, key: string, version: string | undefined): Promise { + async remove(input: RemoveObjectInput): Promise { + const { bucket, key, version } = input const command = new DeleteObjectCommand({ Bucket: bucket, Key: withOptionalVersion(key, version), @@ -225,28 +213,11 @@ export class S3Backend implements StorageBackendAdapter { /** * Copies an existing object to the given location - * @param bucket - * @param source - * @param version - * @param destination - * @param destinationVersion - * @param metadata - * @param conditions */ - async copyObject( - bucket: string, - source: string, - version: string | undefined, - destination: string, - destinationVersion: string | undefined, - metadata?: { cacheControl?: string; mimetype?: string }, - conditions?: { - ifMatch?: string - ifNoneMatch?: string - ifModifiedSince?: Date - ifUnmodifiedSince?: Date - } + async copy( + input: CopyObjectInput ): Promise> { + const { bucket, source, version, destination, destinationVersion, metadata, conditions } = input try { const command = new CopyObjectCommand({ Bucket: bucket, @@ -271,15 +242,9 @@ export class S3Backend implements StorageBackendAdapter { } async list( - bucket: string, - options?: { - prefix?: string - delimiter?: string - nextToken?: string - startAfter?: string - beforeDate?: Date - } + input: ListObjectsInput ): Promise<{ keys: { name: string; size: number }[]; nextToken?: string }> { + const { bucket, options } = input try { const command = new ListObjectsV2Command({ Bucket: bucket, @@ -321,10 +286,9 @@ export class S3Backend implements StorageBackendAdapter { /** * Deletes multiple objects - * @param bucket - * @param prefixes */ - async deleteObjects(bucket: string, prefixes: string[]): Promise { + async removeMany(input: RemoveManyObjectsInput): Promise { + const { bucket, prefixes } = input try { const s3Prefixes = prefixes.map((ele) => { return { Key: ele } @@ -344,15 +308,9 @@ export class S3Backend implements StorageBackendAdapter { /** * Returns metadata information of a specific object - * @param bucket - * @param key - * @param version */ - async headObject( - bucket: string, - key: string, - version: string | undefined - ): Promise { + async stats(input: StatsObjectInput): Promise { + const { bucket, key, version } = input try { const command = new HeadObjectCommand({ Bucket: bucket, @@ -404,30 +362,22 @@ export class S3Backend implements StorageBackendAdapter { /** * Returns a private url that can only be accessed internally by the system - * @param bucket - * @param key - * @param version */ - async privateAssetUrl(bucket: string, key: string, version: string | undefined): Promise { - const input: GetObjectCommandInput = { + async tempPrivateAccessUrl(input: TempPrivateAccessUrlInput): Promise { + const { bucket, key, version } = input + const commandInput: GetObjectCommandInput = { Bucket: bucket, Key: withOptionalVersion(key, version), } - const command = new GetObjectCommand(input) + const command = new GetObjectCommand(commandInput) return getSignedUrl(this.client, command, { expiresIn: 600 }) } - async createMultiPartUpload( - bucketName: string, - key: string, - version: string | undefined, - contentType: string, - cacheControl: string, - metadata?: Record - ) { + async createMultiPartUpload(input: CreateMultiPartUploadInput) { + const { bucket, key, version, contentType, cacheControl, metadata } = input const createMultiPart = new CreateMultipartUploadCommand({ - Bucket: bucketName, + Bucket: bucket, Key: withOptionalVersion(key, version), CacheControl: cacheControl, ContentType: contentType, @@ -448,19 +398,11 @@ export class S3Backend implements StorageBackendAdapter { return resp.UploadId } - async uploadPart( - bucketName: string, - key: string, - version: string, - uploadId: string, - partNumber: number, - body?: string | Uint8Array | Buffer | Readable, - length?: number, - signal?: AbortSignal - ) { + async uploadPart(input: UploadPartInput) { + const { bucket, key, version, uploadId, partNumber, body, length, signal } = input try { const paralellUploadS3 = new UploadPartCommand({ - Bucket: bucketName, + Bucket: bucket, Key: version ? `${key}/${version}` : key, UploadId: uploadId, PartNumber: partNumber, @@ -485,19 +427,14 @@ export class S3Backend implements StorageBackendAdapter { } } - async completeMultipartUpload( - bucketName: string, - key: string, - uploadId: string, - version: string, - parts: UploadPart[], - opts?: { removePrefix?: boolean } - ) { + async completeMultipartUpload(input: CompleteMultipartUploadInput) { + const { bucket, key, uploadId, version, opts } = input + let { parts } = input const keyParts = key.split('/') if (parts.length === 0) { const listPartsInput = new ListPartsCommand({ - Bucket: bucketName, + Bucket: bucket, Key: version ? key + '/' + version : key, UploadId: uploadId, }) @@ -507,7 +444,7 @@ export class S3Backend implements StorageBackendAdapter { } const completeUpload = new CompleteMultipartUploadCommand({ - Bucket: bucketName, + Bucket: bucket, Key: version ? key + '/' + version : key, UploadId: uploadId, MultipartUpload: @@ -521,53 +458,47 @@ export class S3Backend implements StorageBackendAdapter { const response = await this.client.send(completeUpload) let location = key - let bucket = bucketName + let resultBucket = bucket if (opts?.removePrefix) { const locationParts = key.split('/') locationParts.shift() // tenant-id - bucket = keyParts.shift() || '' + resultBucket = keyParts.shift() || '' location = keyParts.join('/') } return { version, location: location, - bucket, + bucket: resultBucket, ...response, } } - async abortMultipartUpload(bucketName: string, key: string, uploadId: string): Promise { + async abortMultipartUpload(input: AbortMultipartUploadInput): Promise { + const { bucket, key, uploadId } = input const abortUpload = new AbortMultipartUploadCommand({ - Bucket: bucketName, + Bucket: bucket, Key: key, UploadId: uploadId, }) await this.client.send(abortUpload) } - async uploadPartCopy( - storageS3Bucket: string, - key: string, - version: string, - UploadId: string, - PartNumber: number, - sourceKey: string, - sourceKeyVersion?: string, - bytesRange?: { fromByte: number; toByte: number } - ) { - const uploadPartCopy = new UploadPartCopyCommand({ - Bucket: storageS3Bucket, + async uploadPartCopy(input: UploadPartCopyInput) { + const { bucket, key, version, uploadId, partNumber, sourceKey, sourceKeyVersion, bytesRange } = + input + const uploadPartCopyCmd = new UploadPartCopyCommand({ + Bucket: bucket, Key: withOptionalVersion(key, version), - UploadId, - PartNumber, - CopySource: `${storageS3Bucket}/${withOptionalVersion(sourceKey, sourceKeyVersion)}`, + UploadId: uploadId, + PartNumber: partNumber, + CopySource: `${bucket}/${withOptionalVersion(sourceKey, sourceKeyVersion)}`, CopySourceRange: bytesRange ? `bytes=${bytesRange.fromByte}-${bytesRange.toByte}` : undefined, }) - const part = await this.client.send(uploadPartCopy) + const part = await this.client.send(uploadPartCopyCmd) return { eTag: part.CopyPartResult?.ETag, diff --git a/src/storage/backend/s3/backup.ts b/src/storage/backend/s3/s3-backup.ts similarity index 100% rename from src/storage/backend/s3/backup.ts rename to src/storage/backend/s3/s3-backup.ts diff --git a/src/storage/cdn/cdn-cache-manager.ts b/src/storage/cdn/cdn-cache-manager.ts index b6f819320..ce9fdf970 100644 --- a/src/storage/cdn/cdn-cache-manager.ts +++ b/src/storage/cdn/cdn-cache-manager.ts @@ -38,7 +38,7 @@ export class CdnCacheManager { } // Check if object exists - await this.storage.from(opts.bucket).asSuperUser().findObject(opts.objectName) + await this.storage.from(opts.bucket).asSuperUser().findObject({ objectName: opts.objectName }) // Purge cache try { diff --git a/src/storage/database/adapter.ts b/src/storage/database/adapter.ts index 8cba19eb6..72238eb39 100644 --- a/src/storage/database/adapter.ts +++ b/src/storage/database/adapter.ts @@ -3,6 +3,10 @@ import { ObjectMetadata } from '../backend' import { TenantConnection } from '@internal/database' import { DBMigration } from '@internal/database/migrations' +export interface Cancellable { + signal?: AbortSignal +} + export interface SearchObjectOption { search?: string sortBy?: { @@ -33,6 +37,7 @@ export interface TransactionOptions { retry?: number readOnly?: boolean timeout?: number + signal?: AbortSignal } export interface DatabaseOptions { @@ -43,6 +48,7 @@ export interface DatabaseOptions { tnx?: TNX parentTnx?: TNX parentConnection?: TenantConnection + signal?: AbortSignal } export interface ListBucketOptions { @@ -53,6 +59,199 @@ export interface ListBucketOptions { search?: string } +// --- Database method input interfaces --- + +export interface FindBucketByIdInput extends Cancellable { + bucketId: string + columns?: string + filters?: FindBucketFilters +} + +export interface CountObjectsInBucketInput extends Cancellable { + bucketId: string + limit?: number +} + +export interface DbDeleteBucketInput extends Cancellable { + bucketId: string | string[] +} + +export interface ListObjectsInput extends Cancellable { + bucketId: string + columns?: string + limit?: number + before?: Date + nextToken?: string +} + +export interface ListObjectsV2Input extends Cancellable { + bucketId: string + options?: { + prefix?: string + delimiter?: string + nextToken?: string + maxKeys?: number + startAfter?: string + sortBy?: { + order?: string + column?: string + after?: string + } + } +} + +export interface ListMultipartUploadsInput extends Cancellable { + bucketId: string + options?: { + prefix?: string + deltimeter?: string + nextUploadToken?: string + nextUploadKeyToken?: string + maxKeys?: number + } +} + +export interface DbListBucketsInput extends Cancellable { + columns?: string + options?: ListBucketOptions +} + +export interface MustLockObjectInput extends Cancellable { + bucketId: string + objectName: string + version?: string +} + +export interface WaitObjectLockInput extends Cancellable { + bucketId: string + objectName: string + version?: string + timeout?: number +} + +export interface DbUpdateBucketInput extends Cancellable { + bucketId: string + fields: Pick +} + +export interface UpdateObjectInput extends Cancellable { + bucketId: string + name: string + data: Pick +} + +export interface DeleteObjectInput extends Cancellable { + bucketId: string + objectName: string + version?: string +} + +export interface DeleteObjectsInput extends Cancellable { + bucketId: string + objectNames: string[] + by?: keyof Obj +} + +export interface DeleteObjectVersionsInput extends Cancellable { + bucketId: string + objectNames: { name: string; version: string }[] +} + +export interface UpdateObjectMetadataInput extends Cancellable { + bucketId: string + objectName: string + metadata: ObjectMetadata +} + +export interface UpdateObjectOwnerInput extends Cancellable { + bucketId: string + objectName: string + owner?: string +} + +export interface FindObjectsInput extends Cancellable { + bucketId: string + objectNames: string[] + columns?: string +} + +export interface FindObjectVersionsInput extends Cancellable { + bucketId: string + objectNames: { name: string; version: string }[] + columns?: string +} + +export interface FindObjectInput extends Cancellable { + bucketId: string + objectName: string + columns?: string + filters?: FindObjectFilters +} + +export interface SearchObjectsInput extends Cancellable { + bucketId: string + prefix: string + options: SearchObjectOption +} + +export interface CreateMultipartUploadInput extends Cancellable { + uploadId: string + bucketId: string + objectName: string + version: string + signature: string + owner?: string + metadata?: Record +} + +export interface FindMultipartUploadInput extends Cancellable { + uploadId: string + columns?: string + options?: { forUpdate?: boolean } +} + +export interface UpdateMultipartUploadProgressInput extends Cancellable { + uploadId: string + progress: number + signature: string +} + +export interface DeleteMultipartUploadInput extends Cancellable { + uploadId: string +} + +export interface ListPartsInput extends Cancellable { + uploadId: string + options: { afterPart?: string; maxParts: number } +} + +export interface DeleteAnalyticsBucketInput extends Cancellable { + id: string + opts?: { soft: boolean } +} + +export interface ListAnalyticsBucketsInput extends Cancellable { + columns?: string + options?: ListBucketOptions +} + +export interface FindAnalyticsBucketByNameInput extends Cancellable { + name: string +} + +export type CreateBucketInput = Cancellable & + Pick + +export type UpsertObjectInput = Cancellable & + Pick + +export type CreateObjectInput = Cancellable & + Pick + +export type InsertUploadPartInput = Cancellable & S3PartUpload + +export type CreateAnalyticsBucketInput = Cancellable & Pick + export interface Database { tenantHost: string tenantId: string @@ -69,160 +268,79 @@ export interface Database { transactionOptions?: TransactionOptions ): Promise> - testPermission any>(fn: T): Promise>> + testPermission any>( + fn: T, + opts?: { signal?: AbortSignal } + ): Promise>> - createBucket( - data: Pick< - Bucket, - 'id' | 'name' | 'public' | 'owner' | 'file_size_limit' | 'allowed_mime_types' - > - ): Promise> + createBucket(input: CreateBucketInput): Promise> - createAnalyticsBucket(data: Pick): Promise + createAnalyticsBucket(input: CreateAnalyticsBucketInput): Promise findBucketById( - bucketId: string, - columns: string, - filters?: Filters + input: FindBucketByIdInput & { filters?: Filters } ): Promise - countObjectsInBucket(bucketId: string, limit?: number): Promise + countObjectsInBucket(input: CountObjectsInBucketInput): Promise - deleteBucket(bucketId: string | string[]): Promise + deleteBucket(input: DbDeleteBucketInput): Promise - listObjects( - bucketId: string, - columns: string, - limit: number, - before?: Date, - nextToken?: string - ): Promise - - listObjectsV2( - bucketId: string, - options?: { - prefix?: string - delimiter?: string - nextToken?: string - maxKeys?: number - startAfter?: string - sortBy?: { - order?: string - column?: string - after?: string - } - } - ): Promise - - listMultipartUploads( - bucketId: string, - options?: { - prefix?: string - deltimeter?: string - nextUploadToken?: string - nextUploadKeyToken?: string - maxKeys?: number - } - ): Promise + listObjects(input: ListObjectsInput): Promise + + listObjectsV2(input: ListObjectsV2Input): Promise - listBuckets(columns: string, options?: ListBucketOptions): Promise - mustLockObject(bucketId: string, objectName: string, version?: string): Promise + listMultipartUploads(input: ListMultipartUploadsInput): Promise - waitObjectLock( - bucketId: string, - objectName: string, - version?: string, - opts?: { timeout?: number } - ): Promise + listBuckets(input: DbListBucketsInput): Promise + mustLockObject(input: MustLockObjectInput): Promise - updateBucket( - bucketId: string, - fields: Pick - ): Promise + waitObjectLock(input: WaitObjectLockInput): Promise - upsertObject( - data: Pick - ): Promise + updateBucket(input: DbUpdateBucketInput): Promise - updateObject( - bucketId: string, - name: string, - data: Pick - ): Promise + upsertObject(input: UpsertObjectInput): Promise - createObject( - data: Pick - ): Promise + updateObject(input: UpdateObjectInput): Promise - deleteObject(bucketId: string, objectName: string, version?: string): Promise + createObject(input: CreateObjectInput): Promise - deleteObjects(bucketId: string, objectNames: string[], by: keyof Obj): Promise + deleteObject(input: DeleteObjectInput): Promise - deleteObjectVersions( - bucketId: string, - objectNames: { name: string; version: string }[] - ): Promise + deleteObjects(input: DeleteObjectsInput): Promise - updateObjectMetadata(bucketId: string, objectName: string, metadata: ObjectMetadata): Promise + deleteObjectVersions(input: DeleteObjectVersionsInput): Promise - updateObjectOwner(bucketId: string, objectName: string, owner?: string): Promise + updateObjectMetadata(input: UpdateObjectMetadataInput): Promise - findObjects(bucketId: string, objectNames: string[], columns: string): Promise + updateObjectOwner(input: UpdateObjectOwnerInput): Promise - findObjectVersions( - bucketId: string, - objectNames: { name: string; version: string }[], - columns: string - ): Promise + findObjects(input: FindObjectsInput): Promise + + findObjectVersions(input: FindObjectVersionsInput): Promise findObject( - bucketId: string, - objectName: string, - columns: string, - filters?: Filters + input: FindObjectInput & { filters?: Filters } ): Promise - searchObjects(bucketId: string, prefix: string, options: SearchObjectOption): Promise + searchObjects(input: SearchObjectsInput): Promise healthcheck(): Promise destroyConnection(): Promise - createMultipartUpload( - uploadId: string, - bucketId: string, - objectName: string, - version: string, - signature: string, - owner?: string, - metadata?: Record - ): Promise - - findMultipartUpload( - uploadId: string, - columns: string, - options?: { forUpdate?: boolean } - ): Promise - - updateMultipartUploadProgress( - uploadId: string, - progress: number, - signature: string - ): Promise - - deleteMultipartUpload(uploadId: string): Promise - - insertUploadPart(part: S3PartUpload): Promise - - listParts( - uploadId: string, - options: { afterPart?: string; maxParts: number } - ): Promise - - deleteAnalyticsBucket(id: string, opts?: { soft: boolean }): Promise - listAnalyticsBuckets( - columns: string, - options: ListBucketOptions | undefined - ): Promise - findAnalyticsBucketByName(name: string): Promise + createMultipartUpload(input: CreateMultipartUploadInput): Promise + + findMultipartUpload(input: FindMultipartUploadInput): Promise + + updateMultipartUploadProgress(input: UpdateMultipartUploadProgressInput): Promise + + deleteMultipartUpload(input: DeleteMultipartUploadInput): Promise + + insertUploadPart(input: InsertUploadPartInput): Promise + + listParts(input: ListPartsInput): Promise + + deleteAnalyticsBucket(input: DeleteAnalyticsBucketInput): Promise + listAnalyticsBuckets(input: ListAnalyticsBucketsInput): Promise + findAnalyticsBucketByName(input: FindAnalyticsBucketByNameInput): Promise } diff --git a/src/storage/database/knex.ts b/src/storage/database/knex.ts index 9d27318ba..842d0f987 100644 --- a/src/storage/database/knex.ts +++ b/src/storage/database/knex.ts @@ -7,16 +7,45 @@ import { StorageBackendError, StorageErrorOptions, } from '@internal/errors' -import { ObjectMetadata } from '../backend' import { Knex } from 'knex' import { Database, DatabaseOptions, - FindBucketFilters, FindObjectFilters, - SearchObjectOption, - ListBucketOptions, TransactionOptions, + FindBucketByIdInput, + CountObjectsInBucketInput, + DbDeleteBucketInput, + ListObjectsInput, + ListObjectsV2Input, + ListMultipartUploadsInput, + DbListBucketsInput, + MustLockObjectInput, + WaitObjectLockInput, + DbUpdateBucketInput, + UpdateObjectInput, + DeleteObjectInput, + DeleteObjectsInput, + DeleteObjectVersionsInput, + UpdateObjectMetadataInput, + UpdateObjectOwnerInput, + FindObjectsInput, + FindObjectVersionsInput, + FindObjectInput, + SearchObjectsInput, + CreateMultipartUploadInput, + FindMultipartUploadInput, + UpdateMultipartUploadProgressInput, + DeleteMultipartUploadInput, + ListPartsInput, + DeleteAnalyticsBucketInput, + ListAnalyticsBucketsInput, + FindAnalyticsBucketByNameInput, + CreateBucketInput, + UpsertObjectInput, + CreateObjectInput, + InsertUploadPartInput, + CreateAnalyticsBucketInput, } from './adapter' import { DatabaseError } from 'pg' import { TenantConnection } from '@internal/database' @@ -56,16 +85,16 @@ export class StorageKnexDB implements Database { opts?: TransactionOptions ) { const tnx = await this.connection.transactionProvider(this.options.tnx, opts)() - + const localSignal = opts?.signal || this.options.signal try { - await this.connection.setScope(tnx) + await this.connection.setScope(tnx, { signal: localSignal }) tnx.once('query-error', (error, q) => { throw DBError.fromDBError(error, q.sql) }) - const opts = { ...this.options, tnx } - const storageWithTnx = new StorageKnexDB(this.connection, opts) + const newOts = { ...this.options, tnx, signal: localSignal } + const storageWithTnx = new StorageKnexDB(this.connection, newOts) const result: Awaited> = await fn(storageWithTnx) await tnx.commit() @@ -91,13 +120,13 @@ export class StorageKnexDB implements Database { }) } - async testPermission any>(fn: T) { + async testPermission any>(fn: T, opts?: { signal?: AbortSignal }) { let result: any try { await this.withTransaction(async (db) => { result = await fn(db) throw true - }) + }, opts) } catch (e) { if (e === true) { return result @@ -106,117 +135,130 @@ export class StorageKnexDB implements Database { } } - deleteAnalyticsBucket(id: string, opts?: { soft: boolean }): Promise { - return this.runQuery('DeleteAnalyticsBucket', async (knex, signal) => { - if (opts?.soft) { - const softDeleted = await knex + deleteAnalyticsBucket(input: DeleteAnalyticsBucketInput): Promise { + const { id, opts, signal } = input + return this.runQuery( + 'DeleteAnalyticsBucket', + async (knex) => { + if (opts?.soft) { + const softDeleted = await knex + .from('buckets_analytics') + .where('id', id) + .whereNull('deleted_at') + .update({ deleted_at: new Date() }) + .returning('*') + .abortOnSignal(signal) + + if (softDeleted.length === 0) { + throw ERRORS.NoSuchBucket(id) + } + + return softDeleted[0] + } + + const deleted = await knex .from('buckets_analytics') .where('id', id) - .whereNull('deleted_at') - .update({ deleted_at: new Date() }) + .delete() .returning('*') .abortOnSignal(signal) - if (softDeleted.length === 0) { + if (deleted.length === 0) { throw ERRORS.NoSuchBucket(id) } - return softDeleted[0] - } - - const deleted = await knex - .from('buckets_analytics') - .where('id', id) - .delete() - .returning('*') - .abortOnSignal(signal) - - if (deleted.length === 0) { - throw ERRORS.NoSuchBucket(id) - } - - return deleted[0] - }) + return deleted[0] + }, + { signal } + ) } - listAnalyticsBuckets( - columns: string, - options: ListBucketOptions | undefined - ): Promise { - return this.runQuery('ListIcebergBuckets', async (knex, signal) => { - const query = knex - .from('buckets_analytics') - .select(columns.split(',').map((c) => c.trim())) - .whereNull('deleted_at') - - if (options?.search !== undefined && options.search.length > 0) { - query.where('name', 'like', `%${options.search}%`) - } + listAnalyticsBuckets(input: ListAnalyticsBucketsInput): Promise { + const { columns = 'id', options, signal } = input + return this.runQuery( + 'ListIcebergBuckets', + async (knex) => { + const query = knex + .from('buckets_analytics') + .select(columns.split(',').map((c) => c.trim())) + .whereNull('deleted_at') - if (options?.sortColumn !== undefined) { - query.orderBy(options.sortColumn, options.sortOrder || 'asc') - } else { - query.orderBy('name', 'asc') - } + if (options?.search !== undefined && options.search.length > 0) { + query.where('name', 'like', `%${options.search}%`) + } - if (options?.limit !== undefined) { - query.limit(options.limit) - } + if (options?.sortColumn !== undefined) { + query.orderBy(options.sortColumn, options.sortOrder || 'asc') + } else { + query.orderBy('name', 'asc') + } - if (options?.offset !== undefined) { - query.offset(options.offset) - } + if (options?.limit !== undefined) { + query.limit(options.limit) + } - return query.abortOnSignal(signal) - }) + if (options?.offset !== undefined) { + query.offset(options.offset) + } + + return query.abortOnSignal(signal) + }, + { signal } + ) } - findAnalyticsBucketByName(name: string) { - return this.runQuery('FindAnalyticsBucketByName', async (knex, signal) => { - const icebergBucket = await knex - .from('buckets_analytics') - .select('*') - .where('name', name) - .whereNull('deleted_at') - .first() - .abortOnSignal(signal) - - if (!icebergBucket) { - throw ERRORS.NoSuchBucket(name) - } + findAnalyticsBucketByName(input: FindAnalyticsBucketByNameInput) { + const { name, signal } = input + return this.runQuery( + 'FindAnalyticsBucketByName', + async (knex) => { + const icebergBucket = await knex + .from('buckets_analytics') + .select('*') + .where('name', name) + .whereNull('deleted_at') + .first() + .abortOnSignal(signal) - return icebergBucket - }) + if (!icebergBucket) { + throw ERRORS.NoSuchBucket(name) + } + + return icebergBucket + }, + { signal } + ) } - createAnalyticsBucket(data: Pick): Promise { + createAnalyticsBucket(input: CreateAnalyticsBucketInput): Promise { + const { signal, ...data } = input const bucketData: Pick = { name: data.name, } - return this.runQuery('CreateAnalyticsBucket', async (knex, signal) => { - const icebergBucket = await knex - .from('buckets_analytics') - .insert(bucketData) - .onConflict(knex.raw('(name) WHERE deleted_at IS NULL')) - .ignore() - .returning('*') - .abortOnSignal(signal) - - if (icebergBucket.length === 0) { - throw ERRORS.ResourceAlreadyExists() - } + return this.runQuery( + 'CreateAnalyticsBucket', + async (knex) => { + const icebergBucket = await knex + .from('buckets_analytics') + .insert(bucketData) + .onConflict(knex.raw('(name) WHERE deleted_at IS NULL')) + .ignore() + .returning('*') + .abortOnSignal(signal) - return icebergBucket[0] - }) + if (icebergBucket.length === 0) { + throw ERRORS.ResourceAlreadyExists() + } + + return icebergBucket[0] + }, + { signal } + ) } - async createBucket( - data: Pick< - Bucket, - 'id' | 'name' | 'public' | 'owner' | 'file_size_limit' | 'allowed_mime_types' | 'type' - > - ) { + async createBucket(input: CreateBucketInput) { + const { signal, ...data } = input const bucketData: Bucket = { id: data.id, name: data.name, @@ -232,9 +274,13 @@ export class StorageKnexDB implements Database { } try { - const rowCount = await this.runQuery('CreateBucket', async (knex, signal) => { - return knex.from('buckets').insert(bucketData).abortOnSignal(signal) - }) + const rowCount = await this.runQuery( + 'CreateBucket', + async (knex) => { + return knex.from('buckets').insert(bucketData).abortOnSignal(signal) + }, + { signal } + ) if (!rowCount || rowCount[0] === 0) { throw ERRORS.NoSuchBucket(data.id) @@ -249,32 +295,37 @@ export class StorageKnexDB implements Database { } } - async findBucketById(bucketId: string, columns = 'id', filters?: FindBucketFilters) { - const result = await this.runQuery('FindBucketById', async (knex, signal) => { - let columnNames = columns.split(',') + async findBucketById(input: FindBucketByIdInput) { + const { bucketId, columns = 'id', filters, signal } = input + const result = await this.runQuery( + 'FindBucketById', + async (knex) => { + let columnNames = columns.split(',') - if (!(await tenantHasMigrations(this.tenantId, 'iceberg-catalog-flag-on-buckets'))) { - columnNames = columnNames.filter((name) => { - return name.trim() !== 'type' - }) - } + if (!(await tenantHasMigrations(this.tenantId, 'iceberg-catalog-flag-on-buckets'))) { + columnNames = columnNames.filter((name) => { + return name.trim() !== 'type' + }) + } - const query = knex.from('buckets').select(columnNames).where('id', bucketId) + const query = knex.from('buckets').select(columnNames).where('id', bucketId) - if (typeof filters?.isPublic !== 'undefined') { - query.where('public', filters.isPublic) - } + if (typeof filters?.isPublic !== 'undefined') { + query.where('public', filters.isPublic) + } - if (filters?.forUpdate) { - query.forUpdate() - } + if (filters?.forUpdate) { + query.forUpdate() + } - if (filters?.forShare) { - query.forShare() - } + if (filters?.forShare) { + query.forShare() + } - return query.abortOnSignal(signal).first() as Promise - }) + return query.abortOnSignal(signal).first() as Promise + }, + { signal } + ) if (!result && !filters?.dontErrorOnEmpty) { throw ERRORS.NoSuchBucket(bucketId) @@ -283,294 +334,301 @@ export class StorageKnexDB implements Database { return result } - async countObjectsInBucket(bucketId: string, limit?: number): Promise { + async countObjectsInBucket(input: CountObjectsInBucketInput): Promise { + const { bucketId, limit, signal } = input // if we have a limit use select to only scan up to that limit if (limit !== undefined) { - const result = await this.runQuery('CountObjectsInBucketWithLimit', (knex, signal) => { - return knex - .from('objects') - .where('bucket_id', bucketId) - .limit(limit) - .select(knex.raw('1')) - .abortOnSignal(signal) - }) + const result = await this.runQuery( + 'CountObjectsInBucketWithLimit', + (knex) => { + return knex + .from('objects') + .where('bucket_id', bucketId) + .limit(limit) + .select(knex.raw('1')) + .abortOnSignal(signal) + }, + { signal } + ) return result.length } // do full count if there is no limit - const result = await this.runQuery('CountObjectsInBucket', (knex, signal) => { - return knex - .from('objects') - .where('bucket_id', bucketId) - .count() - .abortOnSignal(signal) - .first<{ count: number }>() - }) + const result = await this.runQuery( + 'CountObjectsInBucket', + (knex) => { + return knex + .from('objects') + .where('bucket_id', bucketId) + .count() + .abortOnSignal(signal) + .first<{ count: number }>() + }, + { signal } + ) return result?.count || 0 } - async deleteBucket(bucketId: string | string[]) { - return await this.runQuery('DeleteBucket', (knex, signal) => { - return knex('buckets') - .whereIn('id', Array.isArray(bucketId) ? bucketId : [bucketId]) - .delete() - .abortOnSignal(signal) - }) + async deleteBucket(input: DbDeleteBucketInput) { + const { bucketId, signal } = input + return await this.runQuery( + 'DeleteBucket', + (knex) => { + return knex('buckets') + .whereIn('id', Array.isArray(bucketId) ? bucketId : [bucketId]) + .delete() + .abortOnSignal(signal) + }, + { signal } + ) } - async listObjects( - bucketId: string, - columns = 'id', - limit = 10, - before?: Date, - nextToken?: string - ) { - const data = await this.runQuery('ListObjects', (knex, signal) => { - const query = knex - .from('objects') - .select(columns.split(',')) - .where('bucket_id', bucketId) - // @ts-expect-error knex typing is wrong, it doesn't accept a knex raw on orderBy, even though is totally legit - .orderBy(knex.raw('name COLLATE "C"')) - .limit(limit) - - if (before) { - query.andWhere('created_at', '<', before.toISOString()) - } + async listObjects(input: ListObjectsInput) { + const { bucketId, columns = 'id', limit = 10, before, nextToken, signal } = input + const data = await this.runQuery( + 'ListObjects', + (knex) => { + const query = knex + .from('objects') + .select(columns.split(',')) + .where('bucket_id', bucketId) + // @ts-expect-error knex typing is wrong, it doesn't accept a knex raw on orderBy, even though is totally legit + .orderBy(knex.raw('name COLLATE "C"')) + .limit(limit) - if (nextToken) { - query.andWhere(knex.raw('name COLLATE "C" > ?', [nextToken])) - } + if (before) { + query.andWhere('created_at', '<', before.toISOString()) + } - return query.abortOnSignal(signal) as Promise - }) + if (nextToken) { + query.andWhere(knex.raw('name COLLATE "C" > ?', [nextToken])) + } + + return query.abortOnSignal(signal) as Promise + }, + { signal } + ) return data } - async listObjectsV2( - bucketId: string, - options?: { - prefix?: string - delimiter?: string - nextToken?: string - maxKeys?: number - startAfter?: string - sortBy?: { - order?: string - column?: string - after?: string - } - } - ) { - return this.runQuery('ListObjectsV2', async (knex, signal) => { - if (!options?.delimiter) { - const query = knex - .table('objects') - .where('bucket_id', bucketId) - .select(['id', 'name', 'metadata', 'updated_at', 'created_at', 'last_accessed_at']) - .limit(options?.maxKeys || 100) - - // only allow these values for sort columns, "name" is excluded intentionally as it is the default and used as tie breaker when sorting by other columns - const allowedSortColumns = new Set(['updated_at', 'created_at']) - const allowedSortOrders = new Set(['asc', 'desc']) - const sortColumn = - options?.sortBy?.column && allowedSortColumns.has(options.sortBy.column) - ? options.sortBy.column - : undefined - const sortOrder = - options?.sortBy?.order && allowedSortOrders.has(options.sortBy.order) - ? options.sortBy.order - : 'asc' - - if (sortColumn) { - query.orderBy(sortColumn, sortOrder) - } - // knex typing is wrong, it doesn't accept a knex.raw on orderBy, even though is totally legit - // eslint-disable-next-line @typescript-eslint/ban-ts-comment - // @ts-ignore - query.orderBy(knex.raw(`name COLLATE "C"`), sortOrder) + async listObjectsV2(input: ListObjectsV2Input) { + const { bucketId, options, signal } = input + return this.runQuery( + 'ListObjectsV2', + async (knex) => { + if (!options?.delimiter) { + const query = knex + .table('objects') + .where('bucket_id', bucketId) + .select(['id', 'name', 'metadata', 'updated_at', 'created_at', 'last_accessed_at']) + .limit(options?.maxKeys || 100) + + // only allow these values for sort columns, "name" is excluded intentionally as it is the default and used as tie breaker when sorting by other columns + const allowedSortColumns = new Set(['updated_at', 'created_at']) + const allowedSortOrders = new Set(['asc', 'desc']) + const sortColumn = + options?.sortBy?.column && allowedSortColumns.has(options.sortBy.column) + ? options.sortBy.column + : undefined + const sortOrder = + options?.sortBy?.order && allowedSortOrders.has(options.sortBy.order) + ? options.sortBy.order + : 'asc' + + if (sortColumn) { + query.orderBy(sortColumn, sortOrder) + } + // knex typing is wrong, it doesn't accept a knex.raw on orderBy, even though is totally legit + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + query.orderBy(knex.raw(`name COLLATE "C"`), sortOrder) - if (options?.prefix) { - query.where('name', 'like', `${options.prefix}%`) - } + if (options?.prefix) { + query.where('name', 'like', `${options.prefix}%`) + } - if (options?.startAfter && !options?.nextToken) { - query.andWhere(knex.raw(`name COLLATE "C" > ?`, [options.startAfter])) - } + if (options?.startAfter && !options?.nextToken) { + query.andWhere(knex.raw(`name COLLATE "C" > ?`, [options.startAfter])) + } - if (options?.nextToken) { - const pageOperator = sortOrder === 'asc' ? '>' : '<' - if (sortColumn && options.sortBy?.after) { - query.andWhere( - knex.raw( - `ROW(date_trunc('milliseconds', ${sortColumn}), name COLLATE "C") ${pageOperator} ROW(COALESCE(NULLIF(?, '')::timestamptz, 'epoch'::timestamptz), ?)`, - [options.sortBy.after, options.nextToken] + if (options?.nextToken) { + const pageOperator = sortOrder === 'asc' ? '>' : '<' + if (sortColumn && options.sortBy?.after) { + query.andWhere( + knex.raw( + `ROW(date_trunc('milliseconds', ${sortColumn}), name COLLATE "C") ${pageOperator} ROW(COALESCE(NULLIF(?, '')::timestamptz, 'epoch'::timestamptz), ?)`, + [options.sortBy.after, options.nextToken] + ) ) - ) - } else { - query.andWhere(knex.raw(`name COLLATE "C" ${pageOperator} ?`, [options.nextToken])) + } else { + query.andWhere(knex.raw(`name COLLATE "C" ${pageOperator} ?`, [options.nextToken])) + } } - } - return query.abortOnSignal(signal) - } + return query.abortOnSignal(signal) + } - let useNewSearchVersion2 = true + let useNewSearchVersion2 = true - if (isMultitenant) { - useNewSearchVersion2 = await tenantHasMigrations(this.tenantId, 'search-v2') - } + if (isMultitenant) { + useNewSearchVersion2 = await tenantHasMigrations(this.tenantId, 'search-v2') + } - if (useNewSearchVersion2 && options?.delimiter === '/') { - let paramPlaceholders = '?,?,?,?,?' - const sortParams: (string | null)[] = [] - // this migration adds 3 more parameters to search v2 support sorting - // 'search-v2-optimised' also implies sort support (it's a newer migration) - const hasSortSupport = - (await tenantHasMigrations(this.tenantId, 'add-search-v2-sort-support')) || - (await tenantHasMigrations(this.tenantId, 'search-v2-optimised')) - if (hasSortSupport) { - paramPlaceholders += ',?,?,?' - sortParams.push( - options?.sortBy?.order || 'asc', - options?.sortBy?.column || 'name', - options?.sortBy?.after || null - ) + if (useNewSearchVersion2 && options?.delimiter === '/') { + let paramPlaceholders = '?,?,?,?,?' + const sortParams: (string | null)[] = [] + // this migration adds 3 more parameters to search v2 support sorting + // 'search-v2-optimised' also implies sort support (it's a newer migration) + const hasSortSupport = + (await tenantHasMigrations(this.tenantId, 'add-search-v2-sort-support')) || + (await tenantHasMigrations(this.tenantId, 'search-v2-optimised')) + if (hasSortSupport) { + paramPlaceholders += ',?,?,?' + sortParams.push( + options?.sortBy?.order || 'asc', + options?.sortBy?.column || 'name', + options?.sortBy?.after || null + ) + } + const levels = !options?.prefix ? 1 : options.prefix.split('/').length + const searchParams = [ + options?.prefix || '', + bucketId, + options?.maxKeys || 1000, + levels, + options?.startAfter || '', + ...sortParams, + ] + const result = await knex + .raw(`select * from storage.search_v2(${paramPlaceholders})`, searchParams) + .abortOnSignal(signal) + return result.rows } - const levels = !options?.prefix ? 1 : options.prefix.split('/').length - const searchParams = [ - options?.prefix || '', - bucketId, - options?.maxKeys || 1000, - levels, - options?.startAfter || '', - ...sortParams, - ] + const result = await knex - .raw(`select * from storage.search_v2(${paramPlaceholders})`, searchParams) + .raw('select * from storage.list_objects_with_delimiter(?,?,?,?,?,?)', [ + bucketId, + options?.prefix, + options?.delimiter, + options?.maxKeys, + options?.startAfter || '', + options?.nextToken || '', + ]) .abortOnSignal(signal) return result.rows - } - - const result = await knex - .raw('select * from storage.list_objects_with_delimiter(?,?,?,?,?,?)', [ - bucketId, - options?.prefix, - options?.delimiter, - options?.maxKeys, - options?.startAfter || '', - options?.nextToken || '', - ]) - .abortOnSignal(signal) - return result.rows - }) + }, + { signal } + ) } - async listBuckets(columns = 'id', options?: ListBucketOptions) { - const data = await this.runQuery('ListBuckets', async (knex, signal) => { - const columnNames = columns.split(',').map((c) => c.trim()) + async listBuckets(input: DbListBucketsInput) { + const { columns = 'id', options, signal } = input + const data = await this.runQuery( + 'ListBuckets', + async (knex) => { + const columnNames = columns.split(',').map((c) => c.trim()) - const selectColumns = columnNames.filter((name) => { - return name !== 'type' - }) + const selectColumns = columnNames.filter((name) => { + return name !== 'type' + }) - if (columnNames.includes('type')) { - selectColumns.push(knex.raw("'STANDARD' as type") as unknown as string) - } + if (columnNames.includes('type')) { + selectColumns.push(knex.raw("'STANDARD' as type") as unknown as string) + } - const query = knex.from('buckets').select(selectColumns) + const query = knex.from('buckets').select(selectColumns) - if (options?.search !== undefined && options.search.length > 0) { - query.where('name', 'ilike', `%${options.search}%`) - } + if (options?.search !== undefined && options.search.length > 0) { + query.where('name', 'ilike', `%${options.search}%`) + } - if (options?.sortColumn !== undefined) { - query.orderBy(options.sortColumn, options.sortOrder || 'asc') - } + if (options?.sortColumn !== undefined) { + query.orderBy(options.sortColumn, options.sortOrder || 'asc') + } - if (options?.limit !== undefined) { - query.limit(options.limit) - } + if (options?.limit !== undefined) { + query.limit(options.limit) + } - if (options?.offset !== undefined) { - query.offset(options.offset) - } + if (options?.offset !== undefined) { + query.offset(options.offset) + } - return query.abortOnSignal(signal) - }) + return query.abortOnSignal(signal) + }, + { signal } + ) return data as Bucket[] } - listMultipartUploads( - bucketId: string, - options?: { - prefix?: string - deltimeter?: string - nextUploadToken?: string - nextUploadKeyToken?: string - maxKeys?: number - } - ) { - return this.runQuery('ListMultipartsUploads', async (knex, signal) => { - if (!options?.deltimeter) { - const query = knex - .table('s3_multipart_uploads') - .select(['id', 'key', 'created_at']) - .where('bucket_id', bucketId) - .limit(options?.maxKeys || 100) - - // knex typing is wrong, it doesn't accept a knex.raw on orderBy, even though is totally legit - // eslint-disable-next-line @typescript-eslint/ban-ts-comment - // @ts-ignore - query.orderBy(knex.raw('key COLLATE "C", created_at')) + listMultipartUploads(input: ListMultipartUploadsInput) { + const { bucketId, options, signal } = input + return this.runQuery( + 'ListMultipartsUploads', + async (knex) => { + if (!options?.deltimeter) { + const query = knex + .table('s3_multipart_uploads') + .select(['id', 'key', 'created_at']) + .where('bucket_id', bucketId) + .limit(options?.maxKeys || 100) + + // knex typing is wrong, it doesn't accept a knex.raw on orderBy, even though is totally legit + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + query.orderBy(knex.raw('key COLLATE "C", created_at')) + + if (options?.prefix) { + query.where('key', 'ilike', `${options.prefix}%`) + } - if (options?.prefix) { - query.where('key', 'ilike', `${options.prefix}%`) - } + if (options?.nextUploadKeyToken && !options.nextUploadToken) { + query.andWhere(knex.raw(`key COLLATE "C" > ?`, [options?.nextUploadKeyToken])) + } - if (options?.nextUploadKeyToken && !options.nextUploadToken) { - query.andWhere(knex.raw(`key COLLATE "C" > ?`, [options?.nextUploadKeyToken])) - } + if (options?.nextUploadToken) { + query.andWhere(knex.raw('id COLLATE "C" > ?', [options?.nextUploadToken])) + } - if (options?.nextUploadToken) { - query.andWhere(knex.raw('id COLLATE "C" > ?', [options?.nextUploadToken])) + return query.abortOnSignal(signal) } - return query.abortOnSignal(signal) - } - - const result = await knex - .raw('select * from storage.list_multipart_uploads_with_delimiter(?,?,?,?,?,?)', [ - bucketId, - options?.prefix, - options?.deltimeter, - options?.maxKeys, - options?.nextUploadKeyToken || '', - options.nextUploadToken || '', - ]) - .abortOnSignal(signal) - return result.rows - }) + const result = await knex + .raw('select * from storage.list_multipart_uploads_with_delimiter(?,?,?,?,?,?)', [ + bucketId, + options?.prefix, + options?.deltimeter, + options?.maxKeys, + options?.nextUploadKeyToken || '', + options.nextUploadToken || '', + ]) + .abortOnSignal(signal) + return result.rows + }, + { signal } + ) } - async updateBucket( - bucketId: string, - fields: Pick - ) { - const bucket = await this.runQuery('UpdateBucket', (knex, signal) => { - return knex - .from('buckets') - .where('id', bucketId) - .update({ - public: fields.public, - file_size_limit: fields.file_size_limit, - allowed_mime_types: fields.allowed_mime_types, - }) - .abortOnSignal(signal) - }) + async updateBucket(input: DbUpdateBucketInput) { + const { bucketId, fields, signal } = input + const bucket = await this.runQuery( + 'UpdateBucket', + (knex) => { + return knex + .from('buckets') + .where('id', bucketId) + .update({ + public: fields.public, + file_size_limit: fields.file_size_limit, + allowed_mime_types: fields.allowed_mime_types, + }) + .abortOnSignal(signal) + }, + { signal } + ) if (bucket === 0) { throw ERRORS.NoSuchBucket(bucketId) @@ -579,9 +637,8 @@ export class StorageKnexDB implements Database { return } - async upsertObject( - data: Pick - ) { + async upsertObject(input: UpsertObjectInput) { + const { signal, ...data } = input const objectData = this.normalizeColumns({ name: data.name, owner: isUuid(data.owner || '') ? data.owner : undefined, @@ -591,51 +648,56 @@ export class StorageKnexDB implements Database { user_metadata: data.user_metadata, version: data.version, }) - const [object] = await this.runQuery('UpsertObject', (knex, signal) => { - return knex - .from('objects') - .insert(objectData) - .onConflict(['name', 'bucket_id']) - .merge( - this.normalizeColumns({ - metadata: data.metadata, - user_metadata: data.user_metadata, - version: data.version, - owner: isUuid(data.owner || '') ? data.owner : undefined, - owner_id: data.owner, - }) - ) - .returning('*') - .abortOnSignal(signal) - }) + const [object] = await this.runQuery( + 'UpsertObject', + (knex) => { + return knex + .from('objects') + .insert(objectData) + .onConflict(['name', 'bucket_id']) + .merge( + this.normalizeColumns({ + metadata: data.metadata, + user_metadata: data.user_metadata, + version: data.version, + owner: isUuid(data.owner || '') ? data.owner : undefined, + owner_id: data.owner, + }) + ) + .returning('*') + .abortOnSignal(signal) + }, + { signal } + ) return object } - async updateObject( - bucketId: string, - name: string, - data: Pick - ) { - const [object] = await this.runQuery('UpdateObject', (knex, signal) => { - return knex - .from('objects') - .where('bucket_id', bucketId) - .where('name', name) - .update( - this.normalizeColumns({ - name: data.name, - bucket_id: data.bucket_id, - owner: isUuid(data.owner || '') ? data.owner : undefined, - owner_id: data.owner, - metadata: data.metadata, - user_metadata: data.user_metadata, - version: data.version, - }), - '*' - ) - .abortOnSignal(signal) - }) + async updateObject(input: UpdateObjectInput) { + const { bucketId, name, data, signal } = input + const [object] = await this.runQuery( + 'UpdateObject', + (knex) => { + return knex + .from('objects') + .where('bucket_id', bucketId) + .where('name', name) + .update( + this.normalizeColumns({ + name: data.name, + bucket_id: data.bucket_id, + owner: isUuid(data.owner || '') ? data.owner : undefined, + owner_id: data.owner, + metadata: data.metadata, + user_metadata: data.user_metadata, + version: data.version, + }), + '*' + ) + .abortOnSignal(signal) + }, + { signal } + ) if (!object) { throw ERRORS.NoSuchKey(name) @@ -644,9 +706,8 @@ export class StorageKnexDB implements Database { return object } - async createObject( - data: Pick - ) { + async createObject(input: CreateObjectInput) { + const { signal, ...data } = input try { const object = this.normalizeColumns({ name: data.name, @@ -657,9 +718,13 @@ export class StorageKnexDB implements Database { version: data.version, user_metadata: data.user_metadata, }) - await this.runQuery('CreateObject', (knex, signal) => { - return knex.from('objects').insert(object).abortOnSignal(signal) - }) + await this.runQuery( + 'CreateObject', + (knex) => { + return knex.from('objects').insert(object).abortOnSignal(signal) + }, + { signal } + ) return object } catch (e) { @@ -670,78 +735,103 @@ export class StorageKnexDB implements Database { } } - async deleteObject(bucketId: string, objectName: string, version?: string) { - const [data] = await this.runQuery('Delete Object', (knex, signal) => { - return knex - .from('objects') - .delete() - .where({ - name: objectName, - bucket_id: bucketId, - ...(version ? { version } : {}), - }) - .returning('*') - .abortOnSignal(signal) - }) + async deleteObject(input: DeleteObjectInput) { + const { bucketId, objectName, version, signal } = input + const [data] = await this.runQuery( + 'Delete Object', + (knex) => { + return knex + .from('objects') + .delete() + .where({ + name: objectName, + bucket_id: bucketId, + ...(version ? { version } : {}), + }) + .returning('*') + .abortOnSignal(signal) + }, + { signal } + ) return data } - async deleteObjects(bucketId: string, objectNames: string[], by: keyof Obj = 'name') { - return this.runQuery('DeleteObjects', (knex, signal) => { - return knex - .from('objects') - .delete() - .where('bucket_id', bucketId) - .whereIn(by, objectNames) - .returning('*') - .abortOnSignal(signal) - }) + async deleteObjects(input: DeleteObjectsInput) { + const { bucketId, objectNames, by = 'name', signal } = input + return this.runQuery( + 'DeleteObjects', + (knex) => { + return knex + .from('objects') + .delete() + .where('bucket_id', bucketId) + .whereIn(by, objectNames) + .returning('*') + .abortOnSignal(signal) + }, + { signal } + ) } - async deleteObjectVersions(bucketId: string, objectNames: { name: string; version: string }[]) { - return this.runQuery('DeleteObjects', (knex, signal) => { - const placeholders = objectNames.map(() => '(?, ?)').join(', ') + async deleteObjectVersions(input: DeleteObjectVersionsInput) { + const { bucketId, objectNames, signal } = input + return this.runQuery( + 'DeleteObjects', + (knex) => { + const placeholders = objectNames.map(() => '(?, ?)').join(', ') - // Step 2: Flatten the array of tuples into a single array of values - const flatParams = objectNames.flatMap(({ name, version }) => [name, version]) + // Step 2: Flatten the array of tuples into a single array of values + const flatParams = objectNames.flatMap(({ name, version }) => [name, version]) - return knex - .from('objects') - .delete() - .where('bucket_id', bucketId) - .whereRaw(`(name, version) IN (${placeholders})`, flatParams) - .returning('*') - .abortOnSignal(signal) - }) + return knex + .from('objects') + .delete() + .where('bucket_id', bucketId) + .whereRaw(`(name, version) IN (${placeholders})`, flatParams) + .returning('*') + .abortOnSignal(signal) + }, + { signal } + ) } - async updateObjectMetadata(bucketId: string, objectName: string, metadata: ObjectMetadata) { - const [object] = await this.runQuery('UpdateObjectMetadata', (knex, signal) => { - return knex - .from('objects') - .update({ metadata }) - .where({ bucket_id: bucketId, name: objectName }) - .returning('*') - .abortOnSignal(signal) - }) + async updateObjectMetadata(input: UpdateObjectMetadataInput) { + const { bucketId, objectName, metadata, signal } = input + const [object] = await this.runQuery( + 'UpdateObjectMetadata', + (knex) => { + return knex + .from('objects') + .update({ metadata }) + .where({ bucket_id: bucketId, name: objectName }) + .returning('*') + .abortOnSignal(signal) + }, + { signal } + ) return object } - async updateObjectOwner(bucketId: string, objectName: string, owner?: string) { - const [object] = await this.runQuery('UpdateObjectOwner', (knex, signal) => { - return knex - .from('objects') - .update({ - last_accessed_at: new Date().toISOString(), - owner: isUuid(owner || '') ? owner : undefined, - owner_id: owner, - }) - .returning('*') - .where({ bucket_id: bucketId, name: objectName }) - .abortOnSignal(signal) - }) + async updateObjectOwner(input: UpdateObjectOwnerInput) { + const { bucketId, objectName, owner, signal } = input + const [object] = await this.runQuery( + 'UpdateObjectOwner', + (knex) => { + return knex + .from('objects') + .update({ + last_accessed_at: new Date().toISOString(), + owner: isUuid(owner || '') ? owner : undefined, + owner_id: owner, + }) + .returning('*') + .where({ bucket_id: bucketId, name: objectName }) + .abortOnSignal(signal) + }, + { signal } + ) if (!object) { throw ERRORS.NoSuchKey(objectName) @@ -750,39 +840,39 @@ export class StorageKnexDB implements Database { return object } - async findObject( - bucketId: string, - objectName: string, - columns = 'id', - filters?: FindObjectFilters - ) { - const object = await this.runQuery('FindObject', (knex, signal) => { - const query = knex - .from('objects') - .select(this.normalizeColumns(columns).split(',')) - .where({ - name: objectName, - bucket_id: bucketId, - }) + async findObject(input: FindObjectInput) { + const { bucketId, objectName, columns = 'id', filters, signal } = input + const object = await this.runQuery( + 'FindObject', + (knex) => { + const query = knex + .from('objects') + .select(this.normalizeColumns(columns).split(',')) + .where({ + name: objectName, + bucket_id: bucketId, + }) - if (filters?.forUpdate) { - query.forUpdate() - } + if (filters?.forUpdate) { + query.forUpdate() + } - if (filters?.forShare) { - query.forShare() - } + if (filters?.forShare) { + query.forShare() + } - if (filters?.forKeyShare) { - query.forKeyShare() - } + if (filters?.forKeyShare) { + query.forKeyShare() + } - if (filters?.noWait) { - query.noWait() - } + if (filters?.noWait) { + query.noWait() + } - return query.abortOnSignal(signal).first() as Promise - }) + return query.abortOnSignal(signal).first() as Promise + }, + { signal } + ) if (!object && !filters?.dontErrorOnEmpty) { throw ERRORS.NoSuchKey(objectName) @@ -795,150 +885,172 @@ export class StorageKnexDB implements Database { : Obj } - async findObjects(bucketId: string, objectNames: string[], columns = 'id') { - return this.runQuery('FindObjects', (knex, signal) => { - return knex - .from('objects') - .select(columns) - .where('bucket_id', bucketId) - .whereIn('name', objectNames) - .abortOnSignal(signal) - }) + async findObjects(input: FindObjectsInput) { + const { bucketId, objectNames, columns = 'id', signal } = input + return this.runQuery( + 'FindObjects', + (knex) => { + return knex + .from('objects') + .select(columns) + .where('bucket_id', bucketId) + .whereIn('name', objectNames) + .abortOnSignal(signal) + }, + { signal } + ) } - async findObjectVersions(bucketId: string, obj: { name: string; version: string }[]) { - return this.runQuery('FindObjectVersions', (knex, signal) => { - // Step 1: Generate placeholders for each tuple - const placeholders = obj.map(() => '(?, ?)').join(', ') + async findObjectVersions(input: FindObjectVersionsInput) { + const { bucketId, objectNames, signal } = input + return this.runQuery( + 'FindObjectVersions', + (knex) => { + // Step 1: Generate placeholders for each tuple + const placeholders = objectNames.map(() => '(?, ?)').join(', ') - // Step 2: Flatten the array of tuples into a single array of values - const flatParams = obj.flatMap(({ name, version }) => [name, version]) + // Step 2: Flatten the array of tuples into a single array of values + const flatParams = objectNames.flatMap(({ name, version }) => [name, version]) - return knex - .from('objects') - .select('objects.name', 'objects.version') - .where('bucket_id', bucketId) - .whereRaw(`(name, version) IN (${placeholders})`, flatParams) - .abortOnSignal(signal) - }) + return knex + .from('objects') + .select('objects.name', 'objects.version') + .where('bucket_id', bucketId) + .whereRaw(`(name, version) IN (${placeholders})`, flatParams) + .abortOnSignal(signal) + }, + { signal } + ) } - async mustLockObject(bucketId: string, objectName: string, version?: string) { - return this.runQuery('MustLockObject', async (knex, signal) => { - const hash = hashStringToInt(`${bucketId}/${objectName}${version ? `/${version}` : ''}`) - const result = await knex - .raw<{ rows: { pg_try_advisory_xact_lock: boolean }[] }>( - `SELECT pg_try_advisory_xact_lock(?);`, - [hash] - ) - .abortOnSignal(signal) - const lockAcquired = result.rows.shift()?.pg_try_advisory_xact_lock || false - - if (!lockAcquired) { - throw ERRORS.ResourceLocked() - } + async mustLockObject(input: MustLockObjectInput) { + const { bucketId, objectName, version, signal } = input + return this.runQuery( + 'MustLockObject', + async (knex) => { + const hash = hashStringToInt(`${bucketId}/${objectName}${version ? `/${version}` : ''}`) + const result = await knex + .raw<{ rows: { pg_try_advisory_xact_lock: boolean }[] }>( + `SELECT pg_try_advisory_xact_lock(?);`, + [hash] + ) + .abortOnSignal(signal) + const lockAcquired = result.rows.shift()?.pg_try_advisory_xact_lock || false - return true - }) + if (!lockAcquired) { + throw ERRORS.ResourceLocked() + } + + return true + }, + { signal } + ) } - async waitObjectLock( - bucketId: string, - objectName: string, - version?: string, - opts?: { timeout: number } - ) { - return this.runQuery('WaitObjectLock', async (knex, signal) => { - const hash = hashStringToInt(`${bucketId}/${objectName}${version ? `/${version}` : ''}`) - const query = knex.raw(`SELECT pg_advisory_xact_lock(?)`, [hash]).abortOnSignal(signal) - - if (opts?.timeout) { - let timeoutInterval: undefined | NodeJS.Timeout - - try { - await Promise.race([ - query, - new Promise( - (_, reject) => - (timeoutInterval = setTimeout(() => reject(ERRORS.LockTimeout()), opts.timeout)) - ), - ]) - } catch (e) { - throw e - } finally { - if (timeoutInterval) { - clearTimeout(timeoutInterval) + async waitObjectLock(input: WaitObjectLockInput) { + const { bucketId, objectName, version, timeout, signal } = input + return this.runQuery( + 'WaitObjectLock', + async (knex) => { + const hash = hashStringToInt(`${bucketId}/${objectName}${version ? `/${version}` : ''}`) + const query = knex.raw(`SELECT pg_advisory_xact_lock(?)`, [hash]).abortOnSignal(signal) + + if (timeout) { + let timeoutInterval: undefined | NodeJS.Timeout + + try { + await Promise.race([ + query, + new Promise( + (_, reject) => + (timeoutInterval = setTimeout(() => reject(ERRORS.LockTimeout()), timeout)) + ), + ]) + } catch (e) { + throw e + } finally { + if (timeoutInterval) { + clearTimeout(timeoutInterval) + } } + } else { + await query } - } else { - await query - } - return true - }) + return true + }, + { signal } + ) } - async searchObjects(bucketId: string, prefix: string, options: SearchObjectOption) { - return this.runQuery('SearchObjects', async (knex, signal) => { - const result = await knex - .raw<{ rows: Obj[] }>('select * from storage.search(?,?,?,?,?,?,?,?)', [ - prefix, - bucketId, - options.limit || 100, - prefix.split('/').length, - options.offset || 0, - options.search || '', - options.sortBy?.column ?? 'name', - options.sortBy?.order ?? 'asc', - ]) - .abortOnSignal(signal) - - return result.rows - }) + async searchObjects(input: SearchObjectsInput) { + const { bucketId, prefix, options, signal } = input + return this.runQuery( + 'SearchObjects', + async (knex) => { + const result = await knex + .raw<{ rows: Obj[] }>('select * from storage.search(?,?,?,?,?,?,?,?)', [ + prefix, + bucketId, + options.limit || 100, + prefix.split('/').length, + options.offset || 0, + options.search || '', + options.sortBy?.column ?? 'name', + options.sortBy?.order ?? 'asc', + ]) + .abortOnSignal(signal) + + return result.rows + }, + { signal } + ) } - async createMultipartUpload( - uploadId: string, - bucketId: string, - objectName: string, - version: string, - signature: string, - owner?: string, - metadata?: Record - ) { - return this.runQuery('CreateMultipartUpload', async (knex, signal) => { - const multipart = await knex - .table('s3_multipart_uploads') - .insert( - this.normalizeColumns({ - id: uploadId, - bucket_id: bucketId, - key: objectName, - version, - upload_signature: signature, - owner_id: owner, - user_metadata: metadata, - }) - ) - .returning('*') - .abortOnSignal(signal) + async createMultipartUpload(input: CreateMultipartUploadInput) { + const { uploadId, bucketId, objectName, version, signature, owner, metadata, signal } = input + return this.runQuery( + 'CreateMultipartUpload', + async (knex) => { + const multipart = await knex + .table('s3_multipart_uploads') + .insert( + this.normalizeColumns({ + id: uploadId, + bucket_id: bucketId, + key: objectName, + version, + upload_signature: signature, + owner_id: owner, + user_metadata: metadata, + }) + ) + .returning('*') + .abortOnSignal(signal) - return multipart[0] as S3MultipartUpload - }) + return multipart[0] as S3MultipartUpload + }, + { signal } + ) } - async findMultipartUpload(uploadId: string, columns = 'id', options?: { forUpdate?: boolean }) { - const multiPart = await this.runQuery('FindMultipartUpload', async (knex, signal) => { - const query = knex - .from('s3_multipart_uploads') - .select(columns.split(',')) - .where('id', uploadId) + async findMultipartUpload(input: FindMultipartUploadInput) { + const { uploadId, columns = 'id', options, signal } = input + const multiPart = await this.runQuery( + 'FindMultipartUpload', + async (knex) => { + const query = knex + .from('s3_multipart_uploads') + .select(columns.split(',')) + .where('id', uploadId) - if (options?.forUpdate) { - return query.abortOnSignal(signal).forUpdate().first() - } - return query.abortOnSignal(signal).first() - }) + if (options?.forUpdate) { + return query.abortOnSignal(signal).forUpdate().first() + } + return query.abortOnSignal(signal).first() + }, + { signal } + ) if (!multiPart) { throw ERRORS.NoSuchUpload(uploadId) @@ -946,57 +1058,74 @@ export class StorageKnexDB implements Database { return multiPart } - async updateMultipartUploadProgress(uploadId: string, progress: number, signature: string) { - return this.runQuery('UpdateMultipartUploadProgress', async (knex, signal) => { - await knex - .from('s3_multipart_uploads') - .update({ in_progress_size: progress, upload_signature: signature }) - .where('id', uploadId) - .abortOnSignal(signal) - }) + async updateMultipartUploadProgress(input: UpdateMultipartUploadProgressInput) { + const { uploadId, progress, signature, signal } = input + return this.runQuery( + 'UpdateMultipartUploadProgress', + async (knex) => { + await knex + .from('s3_multipart_uploads') + .update({ in_progress_size: progress, upload_signature: signature }) + .where('id', uploadId) + .abortOnSignal(signal) + }, + { signal } + ) } - async deleteMultipartUpload(uploadId: string) { - return this.runQuery('DeleteMultipartUpload', async (knex, signal) => { - await knex.from('s3_multipart_uploads').delete().where('id', uploadId).abortOnSignal(signal) - }) + async deleteMultipartUpload(input: DeleteMultipartUploadInput) { + const { uploadId, signal } = input + return this.runQuery( + 'DeleteMultipartUpload', + async (knex) => { + await knex.from('s3_multipart_uploads').delete().where('id', uploadId).abortOnSignal(signal) + }, + { signal } + ) } - async insertUploadPart(part: S3PartUpload) { - return this.runQuery('InsertUploadPart', async (knex, signal) => { - const storedPart = await knex - .table('s3_multipart_uploads_parts') - .insert(part) - .returning('*') - .abortOnSignal(signal) + async insertUploadPart(input: InsertUploadPartInput) { + const { signal, ...part } = input + return this.runQuery( + 'InsertUploadPart', + async (knex) => { + const storedPart = await knex + .table('s3_multipart_uploads_parts') + .insert(part) + .returning('*') + .abortOnSignal(signal) - return storedPart[0] - }) + return storedPart[0] + }, + { signal } + ) } - async listParts( - uploadId: string, - options: { afterPart?: string; maxParts: number } - ): Promise { - return this.runQuery('ListParts', async (knex, signal) => { - const query = knex - .from('s3_multipart_uploads_parts') - .select('etag', 'part_number', 'size', 'upload_id', 'created_at') - .where('upload_id', uploadId) - .orderBy('part_number') - .limit(options.maxParts) - - if (options.afterPart) { - query.andWhere('part_number', '>', options.afterPart) - } + async listParts(input: ListPartsInput): Promise { + const { uploadId, options, signal } = input + return this.runQuery( + 'ListParts', + async (knex) => { + const query = knex + .from('s3_multipart_uploads_parts') + .select('etag', 'part_number', 'size', 'upload_id', 'created_at') + .where('upload_id', uploadId) + .orderBy('part_number') + .limit(options.maxParts) + + if (options.afterPart) { + query.andWhere('part_number', '>', options.afterPart) + } - return query.abortOnSignal(signal) - }) + return query.abortOnSignal(signal) + }, + { signal } + ) } healthcheck() { - return this.runQuery('Healthcheck', (knex, signal) => { - return knex.raw('SELECT id from storage.buckets limit 1').abortOnSignal(signal) + return this.runQuery('Healthcheck', (knex) => { + return knex.raw('SELECT id from storage.buckets limit 1') }) } @@ -1040,9 +1169,11 @@ export class StorageKnexDB implements Database { return columns } - protected async runQuery< - T extends (...args: [db: Knex.Transaction, signal?: AbortSignal]) => Promise - >(queryName: string, fn: T): Promise>> { + protected async runQuery Promise>( + queryName: string, + fn: T, + opts?: { signal?: AbortSignal } + ): Promise>> { const startTime = process.hrtime.bigint() const recordDuration = () => { const duration = Number(process.hrtime.bigint() - startTime) / 1e9 @@ -1052,7 +1183,7 @@ export class StorageKnexDB implements Database { }) } - const abortSignal = this.connection.getAbortSignal() + const abortSignal = opts?.signal ?? this.options.signal let tnx = this.options.tnx @@ -1071,10 +1202,10 @@ export class StorageKnexDB implements Database { try { if (needsNewTransaction || differentScopes) { - await this.connection.setScope(tnx) + await this.connection.setScope(tnx, { signal: abortSignal }) } - const result: Awaited> = await fn(tnx, abortSignal) + const result: Awaited> = await fn(tnx) if (needsNewTransaction) { await tnx.commit() diff --git a/src/storage/events/iceberg/delete-iceberg-resources.ts b/src/storage/events/iceberg/delete-iceberg-resources.ts index 25ef63e44..926f36702 100644 --- a/src/storage/events/iceberg/delete-iceberg-resources.ts +++ b/src/storage/events/iceberg/delete-iceberg-resources.ts @@ -161,7 +161,7 @@ export class DeleteIcebergResources extends BaseEvent { }) } catch (e) { if (e instanceof StorageBackendError && e.code === ErrorCode.NoSuchCatalog) { - await storage.db.deleteAnalyticsBucket(bucketId) + await storage.db.deleteAnalyticsBucket({ id: bucketId }) return } throw e @@ -71,7 +71,7 @@ export class BucketDeleted extends BaseEvent { ) if (isMultitenant) { - await storage.db.deleteAnalyticsBucket(bucketId, { soft: true }) + await storage.db.deleteAnalyticsBucket({ id: bucketId, opts: { soft: true } }) } }) } diff --git a/src/storage/events/objects/backup-object.ts b/src/storage/events/objects/backup-object.ts index e14f07f4b..522992064 100644 --- a/src/storage/events/objects/backup-object.ts +++ b/src/storage/events/objects/backup-object.ts @@ -1,7 +1,7 @@ import { BaseEvent } from '../base-event' import { JobWithMetadata, Queue, SendOptions, WorkOptions } from 'pg-boss' import { BasePayload } from '@internal/queue' -import { S3Backend } from '@storage/backend' +import { S3Adapter } from '@storage/backend' import { getConfig } from '../../../config' import { logger, logSchema } from '@internal/monitoring' @@ -44,7 +44,7 @@ export class BackupObjectEvent extends BaseEvent { const tenantId = job.data.tenant.ref const storage = await this.createStorage(job.data) - if (!(storage.backend instanceof S3Backend)) { + if (!(storage.backend instanceof S3Adapter)) { return } @@ -88,15 +88,15 @@ export class BackupObjectEvent extends BaseEvent { reqId: job.data.reqId, }) - await storage.backend.deleteObject( - storageS3Bucket, - storage.location.getKeyLocation({ + await storage.backend.remove({ + bucket: storageS3Bucket, + key: storage.location.getKeyLocation({ tenantId, bucketId: job.data.bucketId, objectName: job.data.name, }), - job.data.version - ) + version: job.data.version, + }) } } catch (e) { logger.error( diff --git a/src/storage/events/objects/object-admin-delete-all-before.ts b/src/storage/events/objects/object-admin-delete-all-before.ts index ef70e8edd..db5133ebc 100644 --- a/src/storage/events/objects/object-admin-delete-all-before.ts +++ b/src/storage/events/objects/object-admin-delete-all-before.ts @@ -61,7 +61,12 @@ export class ObjectAdminDeleteAllBefore extends BaseEvent 0) { @@ -71,11 +76,11 @@ export class ObjectAdminDeleteAllBefore extends BaseEvent { - const deleted = await trx.deleteObjects( + const deleted = await trx.deleteObjects({ bucketId, - objects.map(({ id }) => id!), - 'id' - ) + objectNames: objects.map(({ id }) => id!), + by: 'id', + }) if (deleted && deleted.length > 0) { const prefixes: string[] = [] @@ -86,7 +91,7 @@ export class ObjectAdminDeleteAllBefore extends BaseEvent { reqId: job.data.reqId, }) - await storage.backend.deleteObjects(storageS3Bucket, [ - withOptionalVersion(s3Key, version), - withOptionalVersion(s3Key, version) + '.info', - ]) + await storage.backend.removeMany({ + bucket: storageS3Bucket, + prefixes: [ + withOptionalVersion(s3Key, version), + withOptionalVersion(s3Key, version) + '.info', + ], + }) } catch (e) { const s3Key = `${job.data.tenant.ref}/${job.data.bucketId}/${job.data.name}` diff --git a/src/storage/events/upgrades/sync-catalog-ids.ts b/src/storage/events/upgrades/sync-catalog-ids.ts index a5a8ed86b..3e012adc4 100644 --- a/src/storage/events/upgrades/sync-catalog-ids.ts +++ b/src/storage/events/upgrades/sync-catalog-ids.ts @@ -50,8 +50,11 @@ export class SyncCatalogIds extends UpgradeBaseEvent { }, }) - const tenantBuckets = await storage.listAnalyticsBuckets('id,name', { - limit: 1000, + const tenantBuckets = await storage.listAnalyticsBuckets({ + columns: 'id,name', + options: { + limit: 1000, + }, }) logSchema.info( diff --git a/src/storage/object.ts b/src/storage/object.ts index f81207364..61f67c75a 100644 --- a/src/storage/object.ts +++ b/src/storage/object.ts @@ -22,7 +22,7 @@ import { StorageObjectLocator } from '@storage/locator' const { requestUrlLengthLimit } = getConfig() -interface CopyObjectParams { +export interface CopyObjectParams { sourceKey: string destinationBucket: string destinationKey: string @@ -40,7 +40,101 @@ interface CopyObjectParams { ifModifiedSince?: Date ifUnmodifiedSince?: Date } + signal?: AbortSignal } + +export interface DeleteObjectInput { + objectName: string + signal?: AbortSignal +} + +export interface DeleteObjectsInput { + prefixes: string[] + signal?: AbortSignal +} + +export interface UpdateObjectMetadataInput { + objectName: string + metadata: ObjectMetadata + signal?: AbortSignal +} + +export interface UpdateObjectOwnerInput { + objectName: string + owner?: string + signal?: AbortSignal +} + +export interface FindObjectInput { + objectName: string + columns?: string + filters?: FindObjectFilters + signal?: AbortSignal +} + +export interface FindObjectsInput { + objectNames: string[] + columns?: string + signal?: AbortSignal +} + +export interface MoveObjectInput { + sourceObjectName: string + destinationBucket: string + destinationObjectName: string + owner?: string + signal?: AbortSignal +} + +export interface SearchObjectsInput { + prefix: string + options: SearchObjectOption + signal?: AbortSignal +} + +export interface ListObjectsV2Input { + prefix?: string + delimiter?: string + cursor?: string + startAfter?: string + maxKeys?: number + encodingType?: 'url' + sortBy?: { + column: 'name' | 'created_at' | 'updated_at' + order?: string + } + signal?: AbortSignal +} + +export interface SignObjectUrlInput { + objectName: string + url: string + expiresIn: number + metadata?: Record + signal?: AbortSignal +} + +export interface SignObjectUrlsInput { + paths: string[] + expiresIn: number + signal?: AbortSignal +} + +export interface SignUploadObjectUrlInput { + objectName: string + url: string + expiresIn: number + owner?: string + options?: { upsert?: boolean } + signal?: AbortSignal +} + +export interface VerifyObjectSignatureInput { + token: string + objectName: string + signal?: AbortSignal +} + export interface ListObjectsV2Result { folders: Obj[] objects: Obj[] @@ -82,9 +176,11 @@ export class ObjectStorage { signal?: AbortSignal } ) { - const bucket = await this.db - .asSuperUser() - .findBucketById(this.bucketId, 'id, file_size_limit, allowed_mime_types') + const bucket = await this.db.asSuperUser().findBucketById({ + bucketId: this.bucketId, + columns: 'id, file_size_limit, allowed_mime_types', + signal: file.signal, + }) const uploadRequest = await fileUploadFromRequest(request, { objectName: file.objectName, @@ -122,32 +218,40 @@ export class ObjectStorage { /** * Deletes an object from the remote storage * and the database - * @param objectName + * @param input */ - async deleteObject(objectName: string) { - const obj = await this.db.withTransaction(async (db) => { - const obj = await db.asSuperUser().findObject(this.bucketId, objectName, 'id,version', { - forUpdate: true, - }) + async deleteObject(input: DeleteObjectInput) { + const { objectName, signal } = input + const obj = await this.db.withTransaction( + async (db) => { + const obj = await db.asSuperUser().findObject({ + bucketId: this.bucketId, + objectName, + columns: 'id,version', + filters: { forUpdate: true }, + }) - const deleted = await db.deleteObject(this.bucketId, objectName) + const deleted = await db.deleteObject({ bucketId: this.bucketId, objectName }) - if (!deleted) { - throw ERRORS.NoSuchKey(objectName) - } + if (!deleted) { + throw ERRORS.NoSuchKey(objectName) + } - await this.backend.deleteObject( - this.location.getRootLocation(), - this.location.getKeyLocation({ - tenantId: this.db.tenantId, - bucketId: this.bucketId, - objectName, - }), - obj.version - ) + await this.backend.remove({ + bucket: this.location.getRootLocation(), + key: this.location.getKeyLocation({ + tenantId: this.db.tenantId, + bucketId: this.bucketId, + objectName, + }), + version: obj.version, + signal, + }) - return obj - }) + return obj + }, + { signal } + ) await ObjectRemoved.sendWebhook({ tenant: this.db.tenant(), @@ -162,9 +266,10 @@ export class ObjectStorage { /** * Deletes multiple objects from the remote storage * and the database - * @param prefixes + * @param input */ - async deleteObjects(prefixes: string[]) { + async deleteObjects(input: DeleteObjectsInput) { + const { prefixes, signal } = input let results: { name: string }[] = [] for (let i = 0; i < prefixes.length; ) { @@ -177,53 +282,64 @@ export class ObjectStorage { urlParamLength += encodeURIComponent(prefix).length + 9 // length of '%22%2C%22' } - await this.db.withTransaction(async (db) => { - const data = await db.deleteObjects(this.bucketId, prefixesSubset, 'name') - - if (data.length > 0) { - results = results.concat(data) + await this.db.withTransaction( + async (db) => { + const data = await db.deleteObjects({ + bucketId: this.bucketId, + objectNames: prefixesSubset, + by: 'name', + }) - // if successfully deleted, delete from s3 too - // todo: consider moving this to a queue - const prefixesToDelete = data.reduce((all, { name, version }) => { - all.push( - this.location.getKeyLocation({ - tenantId: db.tenantId, - bucketId: this.bucketId, - objectName: name, - version, - }) - ) + if (data.length > 0) { + results = results.concat(data) - if (version) { + // if successfully deleted, delete from s3 too + // todo: consider moving this to a queue + const prefixesToDelete = data.reduce((all, { name, version }) => { all.push( this.location.getKeyLocation({ tenantId: db.tenantId, bucketId: this.bucketId, objectName: name, version, - }) + '.info' + }) ) - } - return all - }, [] as string[]) - await this.backend.deleteObjects(this.location.getRootLocation(), prefixesToDelete) - - await Promise.allSettled( - data.map((object) => - ObjectRemoved.sendWebhook({ - tenant: db.tenant(), - name: object.name, - bucketId: this.bucketId, - reqId: this.db.reqId, - version: object.version, - metadata: object.metadata, - }) + if (version) { + all.push( + this.location.getKeyLocation({ + tenantId: db.tenantId, + bucketId: this.bucketId, + objectName: name, + version, + }) + '.info' + ) + } + return all + }, [] as string[]) + + await this.backend.removeMany({ + bucket: this.location.getRootLocation(), + prefixes: prefixesToDelete, + signal, + }) + + await Promise.allSettled( + data.map((object) => + ObjectRemoved.sendWebhook({ + tenant: db.tenant(), + name: object.name, + bucketId: this.bucketId, + reqId: this.db.reqId, + version: object.version, + metadata: object.metadata, + }) + ) ) - ) - } - }) + } + }, + { signal } + ) } return results @@ -231,13 +347,18 @@ export class ObjectStorage { /** * Updates object metadata in the database - * @param objectName - * @param metadata + * @param input */ - async updateObjectMetadata(objectName: string, metadata: ObjectMetadata) { + async updateObjectMetadata(input: UpdateObjectMetadataInput) { + const { objectName, metadata, signal } = input mustBeValidKey(objectName) - const result = await this.db.updateObjectMetadata(this.bucketId, objectName, metadata) + const result = await this.db.updateObjectMetadata({ + bucketId: this.bucketId, + objectName, + metadata, + signal, + }) await ObjectUpdatedMetadata.sendWebhook({ tenant: this.db.tenant(), @@ -253,32 +374,31 @@ export class ObjectStorage { /** * Updates the owner of an object in the database - * @param objectName - * @param owner + * @param input */ - updateObjectOwner(objectName: string, owner?: string) { - return this.db.updateObjectOwner(this.bucketId, objectName, owner) + updateObjectOwner(input: UpdateObjectOwnerInput) { + const { objectName, owner, signal } = input + return this.db.updateObjectOwner({ bucketId: this.bucketId, objectName, owner, signal }) } /** * Finds an object by name - * @param objectName - * @param columns - * @param filters + * @param input */ - async findObject(objectName: string, columns = 'id', filters?: FindObjectFilters) { + async findObject(input: FindObjectInput) { + const { objectName, columns = 'id', filters, signal } = input mustBeValidKey(objectName) - return this.db.findObject(this.bucketId, objectName, columns, filters) + return this.db.findObject({ bucketId: this.bucketId, objectName, columns, filters, signal }) } /** * Find multiple objects by name - * @param objectNames - * @param columns + * @param input */ - async findObjects(objectNames: string[], columns = 'id') { - return this.db.findObjects(this.bucketId, objectNames, columns) + async findObjects(input: FindObjectsInput) { + const { objectNames, columns = 'id', signal } = input + return this.db.findObjects({ bucketId: this.bucketId, objectNames, columns, signal }) } /** @@ -292,6 +412,7 @@ export class ObjectStorage { * @param upsert * @param fileMetadata * @param userMetadata + * @param signal */ async copyObject({ sourceKey, @@ -303,6 +424,7 @@ export class ObjectStorage { upsert, metadata: fileMetadata, userMetadata, + signal, }: CopyObjectParams) { mustBeValidKey(destinationKey) @@ -319,11 +441,12 @@ export class ObjectStorage { }) // We check if the user has permission to copy the object to the destination key - const originObject = await this.db.findObject( - this.bucketId, - sourceKey, - 'bucket_id,metadata,user_metadata,version' - ) + const originObject = await this.db.findObject({ + bucketId: this.bucketId, + objectName: sourceKey, + columns: 'bucket_id,metadata,user_metadata,version', + signal, + }) // eslint-disable-next-line @typescript-eslint/no-unused-vars const baseMetadata = originObject.metadata || {} @@ -342,63 +465,70 @@ export class ObjectStorage { }) try { - const copyResult = await this.backend.copyObject( - this.location.getRootLocation(), - s3SourceKey, - originObject.version, - s3DestinationKey, - newVersion, - destinationMetadata, - conditions - ) + const copyResult = await this.backend.copy({ + bucket: this.location.getRootLocation(), + source: s3SourceKey, + version: originObject.version, + destination: s3DestinationKey, + destinationVersion: newVersion, + metadata: destinationMetadata, + conditions, + signal, + }) - const metadata = await this.backend.headObject( - this.location.getRootLocation(), - s3DestinationKey, - newVersion - ) + const metadata = await this.backend.stats({ + bucket: this.location.getRootLocation(), + key: s3DestinationKey, + version: newVersion, + signal, + }) - const destinationObject = await this.db.asSuperUser().withTransaction(async (db) => { - await db.waitObjectLock(destinationBucket, destinationKey, undefined, { - timeout: 3000, - }) + const destinationObject = await this.db.asSuperUser().withTransaction( + async (db) => { + await db.waitObjectLock({ + bucketId: destinationBucket, + objectName: destinationKey, + timeout: 3000, + }) - const existingDestObject = await db.findObject( - destinationBucket, - destinationKey, - 'id,name,metadata,version,bucket_id', - { - dontErrorOnEmpty: true, - forUpdate: true, - } - ) - - const destinationObject = await db.upsertObject({ - ...originObject, - bucket_id: destinationBucket, - name: destinationKey, - owner, - metadata: { - ...destinationMetadata, - lastModified: copyResult.lastModified, - eTag: copyResult.eTag, - }, - user_metadata: copyMetadata ? originObject.user_metadata : userMetadata, - version: newVersion, - }) + const existingDestObject = await db.findObject({ + bucketId: destinationBucket, + objectName: destinationKey, + columns: 'id,name,metadata,version,bucket_id', + filters: { + dontErrorOnEmpty: true, + forUpdate: true, + }, + }) - if (existingDestObject) { - await ObjectAdminDelete.send({ - name: existingDestObject.name, - bucketId: existingDestObject.bucket_id, - tenant: this.db.tenant(), - version: existingDestObject.version, - reqId: this.db.reqId, + const destinationObject = await db.upsertObject({ + ...originObject, + bucket_id: destinationBucket, + name: destinationKey, + owner, + metadata: { + ...destinationMetadata, + lastModified: copyResult.lastModified, + eTag: copyResult.eTag, + }, + user_metadata: copyMetadata ? originObject.user_metadata : userMetadata, + version: newVersion, }) - } - return destinationObject - }) + if (existingDestObject) { + await ObjectAdminDelete.send({ + name: existingDestObject.name, + bucketId: existingDestObject.bucket_id, + tenant: this.db.tenant(), + version: existingDestObject.version, + reqId: this.db.reqId, + }) + } + + return destinationObject + }, + { signal } + ) await ObjectCreatedCopyEvent.sendWebhook({ tenant: this.db.tenant(), @@ -429,17 +559,10 @@ export class ObjectStorage { /** * Moves an existing remote object to a given location - * @param sourceObjectName - * @param destinationBucket - * @param destinationObjectName - * @param owner + * @param input */ - async moveObject( - sourceObjectName: string, - destinationBucket: string, - destinationObjectName: string, - owner?: string - ) { + async moveObject(input: MoveObjectInput) { + const { sourceObjectName, destinationBucket, destinationObjectName, owner, signal } = input mustBeValidKey(destinationObjectName) const newVersion = randomUUID() @@ -455,21 +578,33 @@ export class ObjectStorage { objectName: destinationObjectName, }) - await this.db.testPermission((db) => { - return Promise.all([ - db.findObject(this.bucketId, sourceObjectName, 'id'), - db.updateObject(this.bucketId, sourceObjectName, { - name: destinationObjectName, - version: newVersion, - bucket_id: destinationBucket, - owner, - }), - ]) - }) + await this.db.testPermission( + (db) => { + return Promise.all([ + db.findObject({ bucketId: this.bucketId, objectName: sourceObjectName, columns: 'id' }), + db.updateObject({ + bucketId: this.bucketId, + name: sourceObjectName, + data: { + name: destinationObjectName, + version: newVersion, + bucket_id: destinationBucket, + owner, + }, + }), + ]) + }, + { + signal, + } + ) - const sourceObj = await this.db - .asSuperUser() - .findObject(this.bucketId, sourceObjectName, 'id, version,user_metadata') + const sourceObj = await this.db.asSuperUser().findObject({ + bucketId: this.bucketId, + objectName: sourceObjectName, + columns: 'id, version,user_metadata', + signal, + }) if (s3SourceKey === s3DestinationKey) { return { @@ -478,88 +613,99 @@ export class ObjectStorage { } try { - await this.backend.copyObject( - this.location.getRootLocation(), - s3SourceKey, - sourceObj.version, - s3DestinationKey, - newVersion - ) + await this.backend.copy({ + bucket: this.location.getRootLocation(), + source: s3SourceKey, + version: sourceObj.version, + destination: s3DestinationKey, + destinationVersion: newVersion, + signal, + }) - const metadata = await this.backend.headObject( - this.location.getRootLocation(), - s3DestinationKey, - newVersion - ) + const metadata = await this.backend.stats({ + bucket: this.location.getRootLocation(), + key: s3DestinationKey, + version: newVersion, + signal, + }) - return this.db.asSuperUser().withTransaction(async (db) => { - await db.waitObjectLock(this.bucketId, destinationObjectName, undefined, { - timeout: 5000, - }) + return this.db.asSuperUser().withTransaction( + async (db) => { + await db.waitObjectLock({ + bucketId: this.bucketId, + objectName: destinationObjectName, + timeout: 5000, + }) - const sourceObject = await db.findObject( - this.bucketId, - sourceObjectName, - 'id,version,metadata,user_metadata', - { - forUpdate: true, - dontErrorOnEmpty: false, - } - ) - - await db.updateObject(this.bucketId, sourceObjectName, { - name: destinationObjectName, - bucket_id: destinationBucket, - version: newVersion, - owner: owner, - metadata, - user_metadata: sourceObj.user_metadata, - }) + const sourceObject = await db.findObject({ + bucketId: this.bucketId, + objectName: sourceObjectName, + columns: 'id,version,metadata,user_metadata', + filters: { + forUpdate: true, + dontErrorOnEmpty: false, + }, + }) - await ObjectAdminDelete.send({ - name: sourceObjectName, - bucketId: this.bucketId, - tenant: this.db.tenant(), - version: sourceObj.version, - reqId: this.db.reqId, - }) + await db.updateObject({ + bucketId: this.bucketId, + name: sourceObjectName, + data: { + name: destinationObjectName, + bucket_id: destinationBucket, + version: newVersion, + owner: owner, + metadata, + user_metadata: sourceObj.user_metadata, + }, + }) - await Promise.allSettled([ - ObjectRemovedMove.sendWebhook({ - tenant: this.db.tenant(), + await ObjectAdminDelete.send({ name: sourceObjectName, bucketId: this.bucketId, - reqId: this.db.reqId, - version: sourceObject.version, - metadata: sourceObject.metadata, - }), - ObjectCreatedMove.sendWebhook({ tenant: this.db.tenant(), - name: destinationObjectName, - version: newVersion, - bucketId: this.bucketId, - metadata: metadata, - oldObject: { + version: sourceObj.version, + reqId: this.db.reqId, + }) + + await Promise.allSettled([ + ObjectRemovedMove.sendWebhook({ + tenant: this.db.tenant(), name: sourceObjectName, bucketId: this.bucketId, reqId: this.db.reqId, version: sourceObject.version, + metadata: sourceObject.metadata, + }), + ObjectCreatedMove.sendWebhook({ + tenant: this.db.tenant(), + name: destinationObjectName, + version: newVersion, + bucketId: this.bucketId, + metadata: metadata, + oldObject: { + name: sourceObjectName, + bucketId: this.bucketId, + reqId: this.db.reqId, + version: sourceObject.version, + }, + reqId: this.db.reqId, + }), + ]) + + return { + destObject: { + id: sourceObject.id, + name: destinationObjectName, + bucket_id: destinationBucket, + version: newVersion, + owner: owner, + metadata, }, - reqId: this.db.reqId, - }), - ]) - - return { - destObject: { - id: sourceObject.id, - name: destinationObjectName, - bucket_id: destinationBucket, - version: newVersion, - owner: owner, - metadata, - }, - } - }) + } + }, + { signal } + ) } catch (e) { await ObjectAdminDelete.send({ name: destinationObjectName, @@ -577,43 +723,38 @@ export class ObjectStorage { * @param prefix * @param options */ - async searchObjects(prefix: string, options: SearchObjectOption) { + async searchObjects(input: SearchObjectsInput) { + let { prefix } = input + const { options } = input if (prefix.length > 0 && !prefix.endsWith('/')) { // assuming prefix is always a folder prefix = `${prefix}/` } - return this.db.searchObjects(this.bucketId, prefix, options) + return this.db.searchObjects({ bucketId: this.bucketId, prefix, options, signal: input.signal }) } - async listObjectsV2(options?: { - prefix?: string - delimiter?: string - cursor?: string - startAfter?: string - maxKeys?: number - encodingType?: 'url' - sortBy?: { - column: 'name' | 'created_at' | 'updated_at' - order?: string - } - }): Promise { - const limit = Math.min(options?.maxKeys || 1000, 1000) - const prefix = options?.prefix || '' - const delimiter = options?.delimiter - - const cursor = options?.cursor ? decodeContinuationToken(options.cursor) : undefined - let searchResult = await this.db.listObjectsV2(this.bucketId, { - prefix: options?.prefix, - delimiter: options?.delimiter, - maxKeys: limit + 1, - nextToken: cursor?.startAfter, - startAfter: cursor?.startAfter || options?.startAfter, - sortBy: { - order: cursor?.sortOrder || options?.sortBy?.order, - column: cursor?.sortColumn || options?.sortBy?.column, - after: cursor?.sortColumnAfter, + async listObjectsV2(input: ListObjectsV2Input = {}): Promise { + const limit = Math.min(input.maxKeys || 1000, 1000) + const prefix = input.prefix || '' + const delimiter = input.delimiter + + const cursor = input.cursor ? decodeContinuationToken(input.cursor) : undefined + let searchResult = await this.db.listObjectsV2({ + bucketId: this.bucketId, + options: { + prefix: input.prefix, + delimiter: input.delimiter, + maxKeys: limit + 1, + nextToken: cursor?.startAfter, + startAfter: cursor?.startAfter || input.startAfter, + sortBy: { + order: cursor?.sortOrder || input.sortBy?.order, + column: cursor?.sortColumn || input.sortBy?.column, + after: cursor?.sortColumnAfter, + }, }, + signal: input.signal, }) let prevPrefix = '' @@ -657,7 +798,7 @@ export class ObjectStorage { const name = obj.id === null && !obj.name.endsWith('/') ? obj.name + '/' : obj.name target.push({ ...obj, - name: options?.encodingType === 'url' ? encodeURIComponent(name) : name, + name: input.encodingType === 'url' ? encodeURIComponent(name) : name, }) }) @@ -665,7 +806,7 @@ export class ObjectStorage { let nextCursorKey: string | undefined if (isTruncated) { - const sortColumn = (cursor?.sortColumn || options?.sortBy?.column) as + const sortColumn = (cursor?.sortColumn || input.sortBy?.column) as | 'name' | 'created_at' | 'updated_at' @@ -673,7 +814,7 @@ export class ObjectStorage { nextContinuationToken = encodeContinuationToken({ startAfter: searchResult[searchResult.length - 1].name, - sortOrder: cursor?.sortOrder || options?.sortBy?.order, + sortOrder: cursor?.sortOrder || input.sortBy?.order, sortColumn, sortColumnAfter: sortColumn && sortColumn !== 'name' && searchResult[searchResult.length - 1][sortColumn] @@ -699,13 +840,10 @@ export class ObjectStorage { * @param expiresIn seconds * @param metadata */ - async signObjectUrl( - objectName: string, - url: string, - expiresIn: number, - metadata?: Record - ) { - await this.findObject(objectName) + async signObjectUrl(input: SignObjectUrlInput) { + const { objectName, url, expiresIn } = input + let { metadata } = input + await this.findObject({ objectName, signal: input.signal }) metadata = Object.keys(metadata || {}).reduce((all, key) => { if (!all[key]) { @@ -738,7 +876,8 @@ export class ObjectStorage { * @param paths * @param expiresIn */ - async signObjectUrls(paths: string[], expiresIn: number) { + async signObjectUrls(input: SignObjectUrlsInput) { + const { paths, expiresIn } = input let results: { name: string }[] = [] for (let i = 0; i < paths.length; ) { @@ -751,7 +890,11 @@ export class ObjectStorage { urlParamLength += encodeURIComponent(path).length + 9 // length of '%22%2C%22' } - const objects = await this.findObjects(pathsSubset, 'name') + const objects = await this.findObjects({ + objectNames: pathsSubset, + columns: 'name', + signal: input.signal, + }) results = results.concat(objects) } @@ -787,13 +930,8 @@ export class ObjectStorage { * @param owner * @param options */ - async signUploadObjectUrl( - objectName: string, - url: string, - expiresIn: number, - owner?: string, - options?: { upsert?: boolean } - ) { + async signUploadObjectUrl(input: SignUploadObjectUrlInput) { + const { objectName, url, expiresIn, owner, options } = input // check if user has INSERT permissions await this.uploader.canUpload({ bucketId: this.bucketId, @@ -817,7 +955,8 @@ export class ObjectStorage { * @param token * @param objectName */ - async verifyObjectSignature(token: string, objectName: string) { + async verifyObjectSignature(input: VerifyObjectSignatureInput) { + const { token, objectName } = input const { secret: jwtSecret, jwks } = await getJwtSecret(this.db.tenantId) let payload: SignedUploadToken diff --git a/src/storage/protocols/s3/s3-handler.ts b/src/storage/protocols/s3/s3-handler.ts index a6883a577..ceef8cfa1 100644 --- a/src/storage/protocols/s3/s3-handler.ts +++ b/src/storage/protocols/s3/s3-handler.ts @@ -77,8 +77,8 @@ export class S3ProtocolHandler { * * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html */ - async listBuckets() { - const buckets = await this.storage.listBuckets('name,created_at') + async listBuckets(signal?: AbortSignal) { + const buckets = await this.storage.listBuckets({ columns: 'name,created_at', signal }) return { responseBody: { @@ -104,7 +104,7 @@ export class S3ProtocolHandler { * @param Bucket * @param isPublic */ - async createBucket(Bucket: string, isPublic: boolean) { + async createBucket(Bucket: string, isPublic: boolean, signal?: AbortSignal) { mustBeValidBucketName(Bucket || '') await this.storage.createBucket({ @@ -112,6 +112,7 @@ export class S3ProtocolHandler { id: Bucket, public: isPublic, owner: this.owner, + signal, }) return { @@ -128,8 +129,8 @@ export class S3ProtocolHandler { * * @param name */ - async deleteBucket(name: string) { - await this.storage.deleteBucket(name) + async deleteBucket(name: string, signal?: AbortSignal) { + await this.storage.deleteBucket({ bucketId: name, signal }) return { statusCode: 204, @@ -143,8 +144,8 @@ export class S3ProtocolHandler { * * @param name */ - async headBucket(name: string) { - await this.storage.findBucket(name) + async headBucket(name: string, signal?: AbortSignal) { + await this.storage.findBucket({ bucketId: name, signal }) return { statusCode: 200, headers: { @@ -159,16 +160,19 @@ export class S3ProtocolHandler { * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html * @param command */ - async listObjects(command: ListObjectsCommandInput) { - const list = await this.listObjectsV2({ - Bucket: command.Bucket, - Delimiter: command.Delimiter, - EncodingType: command.EncodingType, - MaxKeys: command.MaxKeys, - Prefix: command.Prefix, - StartAfter: command.Marker, - cursorV1: true, - }) + async listObjects(command: ListObjectsCommandInput, signal?: AbortSignal) { + const list = await this.listObjectsV2( + { + Bucket: command.Bucket, + Delimiter: command.Delimiter, + EncodingType: command.EncodingType, + MaxKeys: command.MaxKeys, + Prefix: command.Prefix, + StartAfter: command.Marker, + cursorV1: true, + }, + signal + ) return { responseBody: { @@ -193,12 +197,15 @@ export class S3ProtocolHandler { * * @param command */ - async listObjectsV2(command: ListObjectsV2CommandInput & { cursorV1?: boolean }) { + async listObjectsV2( + command: ListObjectsV2CommandInput & { cursorV1?: boolean }, + signal?: AbortSignal + ) { if (!command.Bucket) { throw ERRORS.MissingParameter('Bucket') } - await this.storage.asSuperUser().findBucket(command.Bucket) + await this.storage.asSuperUser().findBucket({ bucketId: command.Bucket, signal }) const continuationToken = command.ContinuationToken const startAfter = command.StartAfter @@ -217,6 +224,7 @@ export class S3ProtocolHandler { cursor: continuationToken, startAfter: startAfter, encodingType: command.EncodingType, + signal, }) const commonPrefixes = results.folders.map((object) => { @@ -271,12 +279,12 @@ export class S3ProtocolHandler { * * @param command */ - async listMultipartUploads(command: ListMultipartUploadsCommandInput) { + async listMultipartUploads(command: ListMultipartUploadsCommandInput, signal?: AbortSignal) { if (!command.Bucket) { throw ERRORS.MissingParameter('Bucket') } - await this.storage.asSuperUser().findBucket(command.Bucket) + await this.storage.asSuperUser().findBucket({ bucketId: command.Bucket, signal }) const keyContinuationToken = command.KeyMarker const uploadContinuationToken = command.UploadIdMarker @@ -289,16 +297,20 @@ export class S3ProtocolHandler { const limit = maxKeys || 200 - const multipartUploads = await this.storage.db.listMultipartUploads(bucket, { - prefix, - deltimeter: delimiter, - maxKeys: limit + 1, - nextUploadKeyToken: keyContinuationToken - ? decodeContinuationToken(keyContinuationToken) - : undefined, - nextUploadToken: uploadContinuationToken - ? decodeContinuationToken(uploadContinuationToken) - : undefined, + const multipartUploads = await this.storage.db.listMultipartUploads({ + bucketId: bucket, + options: { + prefix, + deltimeter: delimiter, + maxKeys: limit + 1, + nextUploadKeyToken: keyContinuationToken + ? decodeContinuationToken(keyContinuationToken) + : undefined, + nextUploadToken: uploadContinuationToken + ? decodeContinuationToken(uploadContinuationToken) + : undefined, + }, + signal, }) let results: Partial[] = multipartUploads @@ -394,14 +406,16 @@ export class S3ProtocolHandler { * * @param command */ - async createMultiPartUpload(command: CreateMultipartUploadCommandInput) { + async createMultiPartUpload(command: CreateMultipartUploadCommandInput, signal?: AbortSignal) { const uploader = new Uploader(this.storage.backend, this.storage.db, this.storage.location) const { Bucket, Key } = command mustBeValidBucketName(Bucket) mustBeValidKey(Key) - const bucket = await this.storage.asSuperUser().findBucket(Bucket, 'id,allowed_mime_types') + const bucket = await this.storage + .asSuperUser() + .findBucket({ bucketId: Bucket, columns: 'id,allowed_mime_types', signal }) if (command.ContentType && bucket.allowed_mime_types && bucket.allowed_mime_types.length > 0) { validateMimeType(command.ContentType, bucket.allowed_mime_types || []) @@ -415,34 +429,33 @@ export class S3ProtocolHandler { owner: this.owner, }) - const uploadId = await this.storage.backend.createMultiPartUpload( - storageS3Bucket, - this.storage.location.getKeyLocation({ + const uploadId = await this.storage.backend.createMultiPartUpload({ + bucket: storageS3Bucket, + key: this.storage.location.getKeyLocation({ bucketId: command.Bucket as string, objectName: command.Key as string, tenantId: this.tenantId, }), version, - command.ContentType || '', - command.CacheControl || '' - ) + contentType: command.ContentType || '', + cacheControl: command.CacheControl || '', + }) if (!uploadId) { throw ERRORS.InvalidUploadId(uploadId) } const signature = this.uploadSignature({ in_progress_size: 0 }) - await this.storage.db - .asSuperUser() - .createMultipartUpload( - uploadId, - Bucket, - Key, - version, - signature, - this.owner, - command.Metadata - ) + await this.storage.db.asSuperUser().createMultipartUpload({ + uploadId, + bucketId: Bucket, + objectName: Key, + version, + signature, + owner: this.owner, + metadata: command.Metadata, + signal, + }) return { responseBody: { @@ -479,14 +492,14 @@ export class S3ProtocolHandler { const multiPartUpload = await this.storage.db .asSuperUser() - .findMultipartUpload(UploadId, 'id,version,user_metadata') + .findMultipartUpload({ uploadId: UploadId, columns: 'id,version,user_metadata' }) const parts = command.MultipartUpload?.Parts || [] if (parts.length === 0) { - const allParts = await this.storage.db.asSuperUser().listParts(UploadId, { - maxParts: 10000, - }) + const allParts = await this.storage.db + .asSuperUser() + .listParts({ uploadId: UploadId, options: { maxParts: 10000 } }) parts.push( ...allParts.map((part) => ({ @@ -496,28 +509,28 @@ export class S3ProtocolHandler { ) } - const resp = await this.storage.backend.completeMultipartUpload( - storageS3Bucket, - this.storage.location.getKeyLocation({ + const resp = await this.storage.backend.completeMultipartUpload({ + bucket: storageS3Bucket, + key: this.storage.location.getKeyLocation({ bucketId: Bucket as string, objectName: Key as string, tenantId: this.tenantId, }), - UploadId as string, - multiPartUpload.version, + uploadId: UploadId as string, + version: multiPartUpload.version, parts, - { removePrefix: true } - ) + opts: { removePrefix: true }, + }) - const metadata = await this.storage.backend.headObject( - storageS3Bucket, - this.storage.location.getKeyLocation({ + const metadata = await this.storage.backend.stats({ + bucket: storageS3Bucket, + key: this.storage.location.getKeyLocation({ bucketId: Bucket as string, objectName: Key as string, tenantId: this.tenantId, }), - resp.version - ) + version: resp.version, + }) await uploader.completeUpload({ bucketId: Bucket as string, @@ -530,7 +543,7 @@ export class S3ProtocolHandler { userMetadata: multiPartUpload.user_metadata || undefined, }) - await this.storage.db.asSuperUser().deleteMultipartUpload(UploadId) + await this.storage.db.asSuperUser().deleteMultipartUpload({ uploadId: UploadId }) return { responseBody: { @@ -574,7 +587,9 @@ export class S3ProtocolHandler { throw ERRORS.MissingContentLength() } - const bucket = await this.storage.asSuperUser().findBucket(Bucket, 'file_size_limit') + const bucket = await this.storage + .asSuperUser() + .findBucket({ bucketId: Bucket, columns: 'file_size_limit', signal }) const maxFileSize = await getFileSizeLimit(this.storage.db.tenantId, bucket?.file_size_limit) const uploader = new Uploader(this.storage.backend, this.storage.db, this.storage.location) @@ -585,7 +600,7 @@ export class S3ProtocolHandler { isUpsert: true, }) - const multipart = await this.shouldAllowPartUpload(UploadId, ContentLength, maxFileSize) + const multipart = await this.shouldAllowPartUpload(UploadId, ContentLength, maxFileSize, signal) if (signal?.aborted) { throw ERRORS.AbortedTerminate('UploadPart aborted') @@ -612,20 +627,20 @@ export class S3ProtocolHandler { body, new ByteLimitTransformStream(ContentLength), async (stream) => { - return this.storage.backend.uploadPart( - storageS3Bucket, - this.storage.location.getKeyLocation({ + return this.storage.backend.uploadPart({ + bucket: storageS3Bucket, + key: this.storage.location.getKeyLocation({ bucketId: Bucket as string, objectName: Key as string, tenantId: this.tenantId, }), - multipart.version, - UploadId, - PartNumber || 0, - stream as Readable, - ContentLength, - signal - ) + version: multipart.version, + uploadId: UploadId, + partNumber: PartNumber || 0, + body: stream as Readable, + length: ContentLength, + signal, + }) } ) @@ -637,6 +652,7 @@ export class S3ProtocolHandler { key: Key as string, bucket_id: Bucket, owner_id: this.owner, + signal, }) return { @@ -647,14 +663,17 @@ export class S3ProtocolHandler { } } catch (e) { try { + // No signal here - cleanup must complete await this.storage.db.asSuperUser().withTransaction(async (db) => { - const multipart = await db.findMultipartUpload(UploadId, 'in_progress_size', { - forUpdate: true, + const multipart = await db.findMultipartUpload({ + uploadId: UploadId, + columns: 'in_progress_size', + options: { forUpdate: true }, }) const diff = multipart.in_progress_size - ContentLength const signature = this.uploadSignature({ in_progress_size: diff }) - await db.updateMultipartUploadProgress(UploadId, diff, signature) + await db.updateMultipartUploadProgress({ uploadId: UploadId, progress: diff, signature }) }) } catch (e) { logSchema.error(logger, 'Failed to update multipart upload progress', { @@ -718,7 +737,7 @@ export class S3ProtocolHandler { * * @param command */ - async abortMultipartUpload(command: AbortMultipartUploadCommandInput) { + async abortMultipartUpload(command: AbortMultipartUploadCommandInput, signal?: AbortSignal) { const { Bucket, Key, UploadId } = command if (!UploadId) { @@ -735,7 +754,7 @@ export class S3ProtocolHandler { const multipart = await this.storage.db .asSuperUser() - .findMultipartUpload(UploadId, 'id,version') + .findMultipartUpload({ uploadId: UploadId, columns: 'id,version', signal }) const uploader = new Uploader(this.storage.backend, this.storage.db, this.storage.location) await uploader.canUpload({ @@ -745,18 +764,18 @@ export class S3ProtocolHandler { isUpsert: true, }) - await this.storage.backend.abortMultipartUpload( - storageS3Bucket, - this.storage.location.getKeyLocation({ + await this.storage.backend.abortMultipartUpload({ + bucket: storageS3Bucket, + key: this.storage.location.getKeyLocation({ bucketId: Bucket, objectName: Key, tenantId: this.tenantId, }), - UploadId, - multipart.version - ) + uploadId: UploadId, + version: multipart.version, + }) - await this.storage.db.asSuperUser().deleteMultipartUpload(UploadId) + await this.storage.db.asSuperUser().deleteMultipartUpload({ uploadId: UploadId, signal }) return {} } @@ -772,7 +791,7 @@ export class S3ProtocolHandler { throw ERRORS.MissingParameter('Bucket') } - const r = await this.storage.backend.headObject(Bucket, Key, undefined) + const r = await this.storage.backend.stats({ bucket: Bucket, key: Key, version: undefined }) return { headers: { @@ -791,9 +810,9 @@ export class S3ProtocolHandler { * Reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html * * @param command - * @param opts + * @param signal */ - async dbHeadObject(command: HeadObjectCommandInput) { + async dbHeadObject(command: HeadObjectCommandInput, signal?: AbortSignal) { const { Bucket, Key } = command if (!Bucket) { @@ -804,9 +823,11 @@ export class S3ProtocolHandler { throw ERRORS.MissingParameter('Bucket') } - const object = await this.storage - .from(Bucket) - .findObject(Key, 'metadata,user_metadata,created_at,updated_at') + const object = await this.storage.from(Bucket).findObject({ + objectName: Key, + columns: 'metadata,user_metadata,created_at,updated_at', + signal, + }) if (!object) { throw ERRORS.NoSuchKey(Key) @@ -832,7 +853,7 @@ export class S3ProtocolHandler { } } - async getObjectTagging(command: GetObjectTaggingCommandInput) { + async getObjectTagging(command: GetObjectTaggingCommandInput, signal?: AbortSignal) { const { Bucket, Key } = command if (!Bucket) { @@ -843,7 +864,9 @@ export class S3ProtocolHandler { throw ERRORS.MissingParameter('Key') } - const object = await this.storage.from(Bucket).findObject(Key, 'id') + const object = await this.storage + .from(Bucket) + .findObject({ objectName: Key, columns: 'id', signal }) if (!object) { throw ERRORS.NoSuchKey(Key) @@ -878,26 +901,28 @@ export class S3ProtocolHandler { let userMetadata: Record | undefined | null if (!options?.skipDbCheck) { - const object = await this.storage.from(bucket).findObject(key, 'version,user_metadata') + const object = await this.storage + .from(bucket) + .findObject({ objectName: key, columns: 'version,user_metadata', signal: options?.signal }) version = object.version userMetadata = object.user_metadata } - const response = await this.storage.backend.getObject( - this.storage.location.getRootLocation(), - this.storage.location.getKeyLocation({ + const response = await this.storage.backend.read({ + bucket: this.storage.location.getRootLocation(), + key: this.storage.location.getKeyLocation({ bucketId: bucket, objectName: key, tenantId: this.tenantId, }), version, - { + headers: { ifModifiedSince: command.IfModifiedSince?.toISOString(), ifNoneMatch: command.IfNoneMatch, range: command.Range, }, - options?.signal - ) + signal: options?.signal, + }) let metadataHeaders: Record = {} @@ -961,7 +986,7 @@ export class S3ProtocolHandler { * * @param command */ - async deleteObject(command: DeleteObjectCommandInput) { + async deleteObject(command: DeleteObjectCommandInput, signal?: AbortSignal) { const { Bucket, Key } = command if (!Bucket) { @@ -972,7 +997,7 @@ export class S3ProtocolHandler { throw ERRORS.MissingParameter('Key') } - await this.storage.from(Bucket).deleteObject(Key) + await this.storage.from(Bucket).deleteObject({ objectName: Key, signal }) return {} } @@ -984,7 +1009,7 @@ export class S3ProtocolHandler { * * @param command */ - async deleteObjects(command: DeleteObjectsCommandInput) { + async deleteObjects(command: DeleteObjectsCommandInput, signal?: AbortSignal) { const { Bucket, Delete } = command if (!Bucket) { @@ -1005,7 +1030,7 @@ export class S3ProtocolHandler { const deletedResult = await this.storage .from(Bucket) - .deleteObjects(Delete.Objects.map((o) => o.Key || '')) + .deleteObjects({ prefixes: Delete.Objects.map((o) => o.Key || ''), signal }) const deleted = Delete.Objects.filter((o) => deletedResult.find((d) => d.name === o.Key)).map( (o) => ({ Key: o.Key }) @@ -1036,7 +1061,7 @@ export class S3ProtocolHandler { * * @param command */ - async copyObject(command: CopyObjectCommandInput) { + async copyObject(command: CopyObjectCommandInput, signal?: AbortSignal) { const { Bucket, Key, CopySource } = command if (!Bucket) { @@ -1091,6 +1116,7 @@ export class S3ProtocolHandler { }, userMetadata: command.Metadata, copyMetadata: command.MetadataDirective === 'COPY', + signal, }) return { @@ -1110,19 +1136,22 @@ export class S3ProtocolHandler { * * @param command */ - async listParts(command: ListPartsCommandInput) { + async listParts(command: ListPartsCommandInput, signal?: AbortSignal) { if (!command.UploadId) { throw ERRORS.MissingParameter('UploadId') } // check if multipart exists - await this.storage.db.asSuperUser().findMultipartUpload(command.UploadId, 'id') + await this.storage.db + .asSuperUser() + .findMultipartUpload({ uploadId: command.UploadId, columns: 'id', signal }) const maxParts = Math.min(command.MaxParts || 1000, 1000) - let result = await this.storage.db.listParts(command.UploadId, { - afterPart: command.PartNumberMarker, - maxParts: maxParts + 1, + let result = await this.storage.db.listParts({ + uploadId: command.UploadId, + options: { afterPart: command.PartNumberMarker, maxParts: maxParts + 1 }, + signal, }) const isTruncated = result.length > maxParts @@ -1159,7 +1188,7 @@ export class S3ProtocolHandler { * * @param command UploadPartCopyCommandInput */ - async uploadPartCopy(command: UploadPartCopyCommandInput) { + async uploadPartCopy(command: UploadPartCopyCommandInput, signal?: AbortSignal) { const { Bucket, Key, UploadId, PartNumber, CopySource, CopySourceRange } = command if (!UploadId) { @@ -1204,11 +1233,12 @@ export class S3ProtocolHandler { } // Check if copy source exists - const copySource = await this.storage.db.findObject( - sourceBucketName, - sourceKey, - 'id,name,version,metadata' - ) + const copySource = await this.storage.db.findObject({ + bucketId: sourceBucketName, + objectName: sourceKey, + columns: 'id,name,version,metadata', + signal, + }) let copySize = copySource.metadata?.size || 0 let rangeBytes: { fromByte: number; toByte: number } | undefined = undefined @@ -1240,37 +1270,45 @@ export class S3ProtocolHandler { isUpsert: true, }) - const [destinationBucket] = await this.storage.db.asSuperUser().withTransaction(async (db) => { - return Promise.all([ - db.findBucketById(Bucket, 'file_size_limit'), - db.findBucketById(sourceBucketName, 'id'), - ]) - }) + const [destinationBucket] = await this.storage.db.asSuperUser().withTransaction( + async (db) => { + return Promise.all([ + db.findBucketById({ bucketId: Bucket, columns: 'file_size_limit' }), + db.findBucketById({ bucketId: sourceBucketName, columns: 'id' }), + ]) + }, + { signal } + ) const maxFileSize = await getFileSizeLimit( this.storage.db.tenantId, destinationBucket?.file_size_limit ) - const multipart = await this.shouldAllowPartUpload(UploadId, Number(copySize), maxFileSize) + const multipart = await this.shouldAllowPartUpload( + UploadId, + Number(copySize), + maxFileSize, + signal + ) - const uploadPart = await this.storage.backend.uploadPartCopy( - storageS3Bucket, - this.storage.location.getKeyLocation({ + const uploadPart = await this.storage.backend.uploadPartCopy({ + bucket: storageS3Bucket, + key: this.storage.location.getKeyLocation({ bucketId: Bucket, objectName: Key, tenantId: this.tenantId, }), - multipart.version, - UploadId, - PartNumber, - this.storage.location.getKeyLocation({ + version: multipart.version, + uploadId: UploadId, + partNumber: PartNumber, + sourceKey: this.storage.location.getKeyLocation({ bucketId: sourceBucketName, objectName: copySource.name, tenantId: this.tenantId, }), - copySource.version, - rangeBytes - ) + sourceKeyVersion: copySource.version, + bytesRange: rangeBytes, + }) await this.storage.db.asSuperUser().insertUploadPart({ upload_id: UploadId, @@ -1280,6 +1318,7 @@ export class S3ProtocolHandler { key: Key as string, bucket_id: Bucket, owner_id: this.owner, + signal, }) return { @@ -1319,33 +1358,37 @@ export class S3ProtocolHandler { protected async shouldAllowPartUpload( uploadId: string, contentLength: number, - maxFileSize: number + maxFileSize: number, + signal?: AbortSignal ) { - return this.storage.db.asSuperUser().withTransaction(async (db) => { - const multipart = await db.findMultipartUpload( - uploadId, - 'in_progress_size,version,upload_signature', - { - forUpdate: true, - } - ) + return this.storage.db.asSuperUser().withTransaction( + async (db) => { + const multipart = await db.findMultipartUpload({ + uploadId, + columns: 'in_progress_size,version,upload_signature', + options: { + forUpdate: true, + }, + }) - const { progress } = this.decryptUploadSignature(multipart.upload_signature) + const { progress } = this.decryptUploadSignature(multipart.upload_signature) - if (progress !== multipart.in_progress_size) { - throw ERRORS.InvalidUploadSignature() - } + if (progress !== multipart.in_progress_size) { + throw ERRORS.InvalidUploadSignature() + } - const currentProgress = multipart.in_progress_size + contentLength + const currentProgress = multipart.in_progress_size + contentLength - if (currentProgress > maxFileSize) { - throw ERRORS.EntityTooLarge() - } + if (currentProgress > maxFileSize) { + throw ERRORS.EntityTooLarge() + } - const signature = this.uploadSignature({ in_progress_size: currentProgress }) - await db.updateMultipartUploadProgress(uploadId, currentProgress, signature) - return multipart - }) + const signature = this.uploadSignature({ in_progress_size: currentProgress }) + await db.updateMultipartUploadProgress({ uploadId, progress: currentProgress, signature }) + return multipart + }, + { signal } + ) } } diff --git a/src/storage/protocols/tus/file-store.ts b/src/storage/protocols/tus/file-store.ts index bd84fb1ff..4d8bee90a 100644 --- a/src/storage/protocols/tus/file-store.ts +++ b/src/storage/protocols/tus/file-store.ts @@ -3,7 +3,7 @@ import { Upload } from '@tus/server' import fsExtra from 'fs-extra' import path from 'path' import { Configstore } from '@tus/file-store' -import { FileBackend } from '../../backend' +import { FileAdapter } from '../../backend' type FileStoreOptions = { directory: string @@ -12,11 +12,11 @@ type FileStoreOptions = { } export class FileStore extends TusFileStore { - protected fileAdapter: FileBackend + protected fileAdapter: FileAdapter constructor(protected readonly options: FileStoreOptions) { super(options) - this.fileAdapter = new FileBackend() + this.fileAdapter = new FileAdapter() } async create(file: Upload): Promise { diff --git a/src/storage/protocols/tus/postgres-locker.ts b/src/storage/protocols/tus/postgres-locker.ts index 71f34b0f5..2bc1bbf66 100644 --- a/src/storage/protocols/tus/postgres-locker.ts +++ b/src/storage/protocols/tus/postgres-locker.ts @@ -119,7 +119,11 @@ export class PgLock implements Lock { while (!signal.aborted) { try { - await db.mustLockObject(uploadId.bucket, uploadId.objectName, uploadId.version) + await db.mustLockObject({ + bucketId: uploadId.bucket, + objectName: uploadId.objectName, + version: uploadId.version, + }) return true } catch (e) { if (e instanceof StorageBackendError && e.code === ErrorCode.ResourceLocked) { diff --git a/src/storage/renderer/asset.ts b/src/storage/renderer/asset.ts index 0b3c8351a..0e9caf184 100644 --- a/src/storage/renderer/asset.ts +++ b/src/storage/renderer/asset.ts @@ -12,16 +12,16 @@ export class AssetRenderer extends Renderer { } getAsset(request: FastifyRequest, options: RenderOptions) { - return this.backend.getObject( - options.bucket, - options.key, - options.version, - { + return this.backend.read({ + bucket: options.bucket, + key: options.key, + version: options.version, + headers: { ifModifiedSince: request.headers['if-modified-since'], ifNoneMatch: request.headers['if-none-match'], range: request.headers.range, }, - options.signal - ) + signal: options.signal, + }) } } diff --git a/src/storage/renderer/image.ts b/src/storage/renderer/image.ts index 1e5c924a4..335cc1a46 100644 --- a/src/storage/renderer/image.ts +++ b/src/storage/renderer/image.ts @@ -205,8 +205,16 @@ export class ImageRenderer extends Renderer { */ async getAsset(request: FastifyRequest, options: RenderOptions) { const [privateURL, headObj] = await Promise.all([ - this.backend.privateAssetUrl(options.bucket, options.key, options.version), - this.backend.headObject(options.bucket, options.key, options.version), + this.backend.tempPrivateAccessUrl({ + bucket: options.bucket, + key: options.key, + version: options.version, + }), + this.backend.stats({ + bucket: options.bucket, + key: options.key, + version: options.version, + }), ]) const transformations = ImageRenderer.applyTransformation(this.transformOptions || {}) const transformLimits = ImageRenderer.applyTransformationLimits(this.limits || {}) diff --git a/src/storage/scanner/scanner.ts b/src/storage/scanner/scanner.ts index fefdaa03e..babaa350e 100644 --- a/src/storage/scanner/scanner.ts +++ b/src/storage/scanner/scanner.ts @@ -160,13 +160,13 @@ export class ObjectScanner { break } - const storageObjects = await this.storage.db.listObjects( - bucket, - 'id,name,version,metadata', - 1000, - options.before, - nextToken - ) + const storageObjects = await this.storage.db.listObjects({ + bucketId: bucket, + columns: 'id,name,version,metadata', + limit: 1000, + before: options.before, + nextToken, + }) const dbKeys = storageObjects.map(({ name, version, metadata }) => { if (version) { @@ -305,10 +305,13 @@ export class ObjectScanner { break } - const result = await this.storage.backend.list(storageS3Bucket, { - prefix: prefix + '/', - nextToken, - beforeDate: options.before, + const result = await this.storage.backend.list({ + bucket: storageS3Bucket, + options: { + prefix: prefix + '/', + nextToken, + beforeDate: options.before, + }, }) if (result.keys.length === 0) { @@ -384,11 +387,11 @@ export class ObjectScanner { continue } - const dbObjects = await this.storage.db.findObjectVersions( - options.bucket, - localObjs, - 'name,version' - ) + const dbObjects = await this.storage.db.findObjectVersions({ + bucketId: options.bucket, + objectNames: localObjs, + columns: 'name,version', + }) const s3OrphanedKeys = tmpS3Objects.filter( (key) => @@ -459,10 +462,10 @@ export class ObjectScanner { for await (const dbObjects of orphans) { if (dbObjects.length > 0) { promises.push( - this.storage.db.deleteObjectVersions( - options.bucket, - dbObjects.filter((o) => o.version) as { name: string; version: string }[] - ) + this.storage.db.deleteObjectVersions({ + bucketId: options.bucket, + objectNames: dbObjects.filter((o) => o.version) as { name: string; version: string }[], + }) ) yield dbObjects diff --git a/src/storage/storage.ts b/src/storage/storage.ts index 62ca7941a..83aa4bf06 100644 --- a/src/storage/storage.ts +++ b/src/storage/storage.ts @@ -21,6 +21,47 @@ import { logger, logSchema } from '@internal/monitoring' const { emptyBucketMax } = getConfig() +export interface FindBucketInput { + bucketId: string + columns?: string + filters?: FindBucketFilters + signal?: AbortSignal +} + +export interface ListBucketsInput { + columns?: string + options?: ListBucketOptions + signal?: AbortSignal +} + +export interface UpdateBucketInput { + bucketId: string + data: Omit< + Parameters[0]['fields'], + 'file_size_limit' | 'allowed_mime_types' + > & { + fileSizeLimit?: number | string | null + allowedMimeTypes?: null | string[] + } + signal?: AbortSignal +} + +export interface DeleteBucketInput { + bucketId: string + signal?: AbortSignal +} + +export interface DeleteIcebergBucketInput { + name: string + signal?: AbortSignal +} + +export interface EmptyBucketInput { + bucketId: string + before?: Date + signal?: AbortSignal +} + /** * Storage * interacts with the storage backend of choice and the database @@ -72,25 +113,25 @@ export class Storage { /** * Find a bucket by id - * @param id - * @param columns - * @param filters + * @param input */ - findBucket(id: string, columns = 'id', filters?: FindBucketFilters) { - return this.db.findBucketById(id, columns, filters) + findBucket(input: FindBucketInput) { + const { bucketId, columns = 'id', filters, signal } = input + return this.db.findBucketById({ bucketId, columns, filters, signal }) } /** * List buckets - * @param columns - * @param options + * @param input */ - listBuckets(columns = 'id', options?: ListBucketOptions) { - return this.db.listBuckets(columns, options) + listBuckets(input: ListBucketsInput = {}) { + const { columns = 'id', options, signal } = input + return this.db.listBuckets({ columns, options, signal }) } - listAnalyticsBuckets(columns = 'name', options?: ListBucketOptions) { - return this.db.listAnalyticsBuckets(columns, options) + listAnalyticsBuckets(input: ListBucketsInput = {}) { + const { columns = 'name', options, signal } = input + return this.db.listAnalyticsBuckets({ columns, options, signal }) } /** @@ -149,57 +190,52 @@ export class Storage { } async createIcebergBucket(data: Parameters[0]) { - return this.db.withTransaction(async (db) => { - const result = await db.createAnalyticsBucket(data) - - await BucketCreatedEvent.invokeOrSend( - { - bucketId: result.id, - bucketName: result.name, - type: 'ANALYTICS', - tenant: { - ref: db.tenantId, - host: db.tenantHost, - }, - }, - { - sendWhenError: (error) => { - if (error instanceof StorageBackendError) { - return false - } - - logSchema.error(logger, 'Failed to invoke BucketCreatedEvent handler', { - project: db.tenantId, - type: 'event', - error: error, - }) - return true + const { signal } = data + return this.db.withTransaction( + async (db) => { + const result = await db.createAnalyticsBucket(data) + + await BucketCreatedEvent.invokeOrSend( + { + bucketId: result.id, + bucketName: result.name, + type: 'ANALYTICS', + tenant: { + ref: db.tenantId, + host: db.tenantHost, + }, }, - } - ) + { + sendWhenError: (error) => { + if (error instanceof StorageBackendError) { + return false + } + + logSchema.error(logger, 'Failed to invoke BucketCreatedEvent handler', { + project: db.tenantId, + type: 'event', + error: error, + }) + return true + }, + } + ) - return result - }) + return result + }, + { signal } + ) } /** * Updates a bucket - * @param id - * @param data + * @param input */ - async updateBucket( - id: string, - data: Omit< - Parameters[1], - 'file_size_limit' | 'allowed_mime_types' - > & { - fileSizeLimit?: number | string | null - allowedMimeTypes?: null | string[] - } - ) { - mustBeValidBucketName(id) + async updateBucket(input: UpdateBucketInput) { + const { bucketId, data, signal } = input + mustBeValidBucketName(bucketId) - const bucketData: Parameters[1] = data + const bucketData: Parameters[0]['fields'] = data if (typeof data.fileSizeLimit === 'number' || typeof data.fileSizeLimit === 'string') { bucketData.file_size_limit = await this.parseMaxSizeLimit(data.fileSizeLimit) @@ -214,36 +250,41 @@ export class Storage { } bucketData.allowed_mime_types = data.allowedMimeTypes - return this.db.updateBucket(id, bucketData) + return this.db.updateBucket({ bucketId, fields: bucketData, signal }) } /** * Delete a specific bucket if empty - * @param id + * @param input */ - async deleteBucket(id: string) { - return this.db.withTransaction(async (db) => { - await db.asSuperUser().findBucketById(id, 'id', { - forUpdate: true, - }) - - const countObjects = await db.asSuperUser().countObjectsInBucket(id, 1) - - if (countObjects && countObjects > 0) { - throw ERRORS.BucketNotEmpty(id) - } + async deleteBucket(input: DeleteBucketInput) { + const { bucketId, signal } = input + return this.db.withTransaction( + async (db) => { + await db + .asSuperUser() + .findBucketById({ bucketId, columns: 'id', filters: { forUpdate: true } }) + + const countObjects = await db.asSuperUser().countObjectsInBucket({ bucketId, limit: 1 }) + + if (countObjects && countObjects > 0) { + throw ERRORS.BucketNotEmpty(bucketId) + } - const deleted = await db.deleteBucket(id) + const deleted = await db.deleteBucket({ bucketId }) - if (!deleted) { - throw ERRORS.NoSuchBucket(id) - } + if (!deleted) { + throw ERRORS.NoSuchBucket(bucketId) + } - return deleted - }) + return deleted + }, + { signal } + ) } - async deleteIcebergBucket(name: string) { + async deleteIcebergBucket(input: DeleteIcebergBucketInput) { + const { name, signal } = input if ( !(await tenantHasMigrations(this.db.tenantId, 'iceberg-catalog-flag-on-buckets')) || !(await tenantHasFeature(this.db.tenantId, 'icebergCatalog')) @@ -254,7 +295,7 @@ export class Storage { ) } - const catalog = await this.db.findAnalyticsBucketByName(name) + const catalog = await this.db.findAnalyticsBucketByName({ name, signal }) await BucketDeleted.invoke({ bucketId: catalog.id, @@ -268,13 +309,17 @@ export class Storage { /** * Deletes all files in a bucket - * @param bucketId - * @param before limit to files before the specified time (defaults to now) + * @param input */ - async emptyBucket(bucketId: string, before: Date = new Date()) { - await this.findBucket(bucketId, 'name') + async emptyBucket(input: EmptyBucketInput) { + const { bucketId, before = new Date(), signal } = input + await this.findBucket({ bucketId, columns: 'name', signal }) - const count = await this.db.countObjectsInBucket(bucketId, emptyBucketMax + 1) + const count = await this.db.countObjectsInBucket({ + bucketId, + limit: emptyBucketMax + 1, + signal, + }) if (count > emptyBucketMax) { throw ERRORS.UnableToEmptyBucket( bucketId, @@ -282,16 +327,25 @@ export class Storage { ) } - const objects = await this.db.listObjects(bucketId, 'id, name', 1, before) + const objects = await this.db.listObjects({ + bucketId, + columns: 'id, name', + limit: 1, + before, + signal, + }) if (!objects || objects.length < 1) { // the bucket is already empty return } // ensure delete permissions - await this.db.testPermission((db) => { - return db.deleteObject(bucketId, objects[0].id!) - }) + await this.db.testPermission( + (db) => { + return db.deleteObject({ bucketId, objectName: objects[0].id! }) + }, + { signal } + ) // use queue to recursively delete all objects created before the specified time await ObjectAdminDeleteAllBefore.send({ diff --git a/src/storage/uploader.ts b/src/storage/uploader.ts index c30ff4ace..f25f0374a 100644 --- a/src/storage/uploader.ts +++ b/src/storage/uploader.ts @@ -105,15 +105,15 @@ export class Uploader { objectName: request.objectName, }) - const objectMetadata = await this.backend.uploadObject( - storageS3Bucket, - s3Key, + const objectMetadata = await this.backend.write({ + bucket: storageS3Bucket, + key: s3Key, version, - file.body, - file.mimeType, - file.cacheControl, - request.signal - ) + body: file.body, + contentType: file.mimeType, + cacheControl: file.cacheControl, + signal: request.signal, + }) if (request.file.xRobotsTag) { objectMetadata.xRobotsTag = request.file.xRobotsTag @@ -170,19 +170,25 @@ export class Uploader { }) { try { const db = this.db.asSuperUser() - // Since we have finished uploading the file, - // even if the request is aborted now, we want to complete the DB transaction - const abController = new AbortController() - db.connection.setAbortSignal(abController.signal) + // No signal passed to withTransaction intentionally: + // after uploading the file, we want to complete the DB transaction + // even if the request is aborted return await db.withTransaction(async (db) => { - await db.waitObjectLock(bucketId, objectName, undefined, { + await db.waitObjectLock({ + bucketId, + objectName, timeout: 5000, }) - const currentObj = await db.findObject(bucketId, objectName, 'id, version, metadata', { - forUpdate: true, - dontErrorOnEmpty: true, + const currentObj = await db.findObject({ + bucketId, + objectName, + columns: 'id, version, metadata', + filters: { + forUpdate: true, + dontErrorOnEmpty: true, + }, }) const isNew = !Boolean(currentObj) diff --git a/src/test/bucket.test.ts b/src/test/bucket.test.ts index 70985f1dc..179864e85 100644 --- a/src/test/bucket.test.ts +++ b/src/test/bucket.test.ts @@ -1,7 +1,7 @@ 'use strict' import dotenv from 'dotenv' import app from '../app' -import { S3Backend } from '../storage/backend' +import { S3Adapter } from '../storage/backend' import { FastifyInstance } from 'fastify' import { getPostgresConnection, getServiceKeyUser } from '@internal/database' import { StorageKnexDB } from '@storage/database' @@ -13,11 +13,11 @@ const anonKey = process.env.ANON_KEY || '' let appInstance: FastifyInstance beforeAll(() => { - jest.spyOn(S3Backend.prototype, 'deleteObjects').mockImplementation(() => { + jest.spyOn(S3Adapter.prototype, 'removeMany').mockImplementation(() => { return Promise.resolve() }) - jest.spyOn(S3Backend.prototype, 'getObject').mockImplementation(() => { + jest.spyOn(S3Adapter.prototype, 'read').mockImplementation(() => { return Promise.resolve({ metadata: { httpStatusCode: 200, @@ -365,7 +365,7 @@ describe('testing public bucket functionality', () => { expect(publicResponse.headers['etag']).toBe('abc') expect(publicResponse.headers['last-modified']).toBe('Thu, 12 Aug 2021 16:00:00 GMT') - const mockGetObject = jest.spyOn(S3Backend.prototype, 'getObject') + const mockGetObject = jest.spyOn(S3Adapter.prototype, 'read') mockGetObject.mockRejectedValue({ $metadata: { httpStatusCode: 304, @@ -380,7 +380,7 @@ describe('testing public bucket functionality', () => { }, }) expect(notModifiedResponse.statusCode).toBe(304) - expect(mockGetObject.mock.calls[1][3]).toMatchObject({ + expect(mockGetObject.mock.calls[1][0].headers).toMatchObject({ ifModifiedSince: 'Thu, 12 Aug 2021 16:00:00 GMT', ifNoneMatch: 'abc', }) @@ -466,16 +466,18 @@ describe('testing count objects in bucket', () => { }) it('should return correct object count', async () => { - await expect(db.countObjectsInBucket('bucket2')).resolves.toBe(27) + await expect(db.countObjectsInBucket({ bucketId: 'bucket2' })).resolves.toBe(27) }) it('should return limited object count', async () => { - await expect(db.countObjectsInBucket('bucket2', 22)).resolves.toBe(22) + await expect(db.countObjectsInBucket({ bucketId: 'bucket2', limit: 22 })).resolves.toBe(22) }) it('should return full object count if limit is greater than total', async () => { - await expect(db.countObjectsInBucket('bucket2', 999)).resolves.toBe(27) + await expect(db.countObjectsInBucket({ bucketId: 'bucket2', limit: 999 })).resolves.toBe(27) }) it('should return 0 object count if there are no objects with provided bucket id', async () => { - await expect(db.countObjectsInBucket('this-is-not-a-bucket-at-all', 999)).resolves.toBe(0) + await expect( + db.countObjectsInBucket({ bucketId: 'this-is-not-a-bucket-at-all', limit: 999 }) + ).resolves.toBe(0) }) }) diff --git a/src/test/common.ts b/src/test/common.ts index 704891c03..96d1b7639 100644 --- a/src/test/common.ts +++ b/src/test/common.ts @@ -1,6 +1,6 @@ import { HeadBucketCommand, S3Client } from '@aws-sdk/client-s3' import app from '../admin-app' -import { S3Backend } from '../storage/backend' +import { S3Adapter } from '../storage/backend' import { Queue } from '@internal/queue' import { isS3Error } from '@internal/errors' import path from 'path' @@ -36,7 +36,7 @@ export function useMockObject() { process.env = { ...ENV } jest.clearAllMocks() - jest.spyOn(S3Backend.prototype, 'getObject').mockResolvedValue({ + jest.spyOn(S3Adapter.prototype, 'read').mockResolvedValue({ metadata: { httpStatusCode: 200, size: 3746, @@ -50,7 +50,7 @@ export function useMockObject() { body: Buffer.from(''), }) - jest.spyOn(S3Backend.prototype, 'uploadObject').mockResolvedValue({ + jest.spyOn(S3Adapter.prototype, 'write').mockResolvedValue({ httpStatusCode: 200, size: 3746, mimetype: 'image/png', @@ -60,17 +60,17 @@ export function useMockObject() { contentLength: 3746, }) - jest.spyOn(S3Backend.prototype, 'copyObject').mockResolvedValue({ + jest.spyOn(S3Adapter.prototype, 'copy').mockResolvedValue({ httpStatusCode: 200, lastModified: new Date('Thu, 12 Aug 2021 16:00:00 GMT'), eTag: 'abc', }) - jest.spyOn(S3Backend.prototype, 'deleteObject').mockResolvedValue() + jest.spyOn(S3Adapter.prototype, 'remove').mockResolvedValue() - jest.spyOn(S3Backend.prototype, 'deleteObjects').mockResolvedValue() + jest.spyOn(S3Adapter.prototype, 'removeMany').mockResolvedValue() - jest.spyOn(S3Backend.prototype, 'headObject').mockResolvedValue({ + jest.spyOn(S3Adapter.prototype, 'stats').mockResolvedValue({ httpStatusCode: 200, size: 3746, mimetype: 'image/png', @@ -81,7 +81,7 @@ export function useMockObject() { }) jest - .spyOn(S3Backend.prototype, 'privateAssetUrl') + .spyOn(S3Adapter.prototype, 'tempPrivateAccessUrl') .mockResolvedValue(`local:///${projectRoot}/data/sadcat.jpg`) }) diff --git a/src/test/file-backend.test.ts b/src/test/file-backend.test.ts index 88fff4286..47817c684 100644 --- a/src/test/file-backend.test.ts +++ b/src/test/file-backend.test.ts @@ -4,7 +4,7 @@ import path from 'path' import { Readable } from 'stream' import * as xattr from 'fs-xattr' import { withOptionalVersion } from '../storage/backend/adapter' -import { FileBackend } from '../storage/backend/file' +import { FileAdapter } from '@storage/backend/file' import { getConfig } from '../config' jest.mock('fs-xattr', () => ({ @@ -32,16 +32,23 @@ describe('FileBackend xattr metadata', () => { process.env.FILE_STORAGE_BACKEND_PATH = tmpDir getConfig({ reload: true }) - const backend = new FileBackend() - const uploadId = await backend.createMultiPartUpload( - 'bucket', - 'key', - 'v1', - 'text/plain', - 'no-cache' - ) + const backend = new FileAdapter() + const uploadId = await backend.createMultiPartUpload({ + bucket: 'bucket', + key: 'key', + version: 'v1', + contentType: 'text/plain', + cacheControl: 'no-cache', + }) - await backend.uploadPart('bucket', 'key', 'v1', uploadId as string, 1, Readable.from('hello')) + await backend.uploadPart({ + bucket: 'bucket', + key: 'key', + version: 'v1', + uploadId: uploadId as string, + partNumber: 1, + body: Readable.from('hello'), + }) expect(xattr.set).toHaveBeenCalledWith( expect.any(String), @@ -82,14 +89,14 @@ describe('FileBackend xattr metadata', () => { process.env.FILE_STORAGE_BACKEND_PATH = tmpDir getConfig({ reload: true }) - const backend = new FileBackend() - const uploadId = await backend.createMultiPartUpload( - 'bucket', - 'key', - 'v1', - 'text/plain', - 'no-cache' - ) + const backend = new FileAdapter() + const uploadId = await backend.createMultiPartUpload({ + bucket: 'bucket', + key: 'key', + version: 'v1', + contentType: 'text/plain', + cacheControl: 'no-cache', + }) const partDir = path.join( tmpDir, @@ -110,29 +117,31 @@ describe('FileBackend xattr metadata', () => { return Promise.resolve(undefined) }) - uploadSpy = jest - .spyOn(backend, 'uploadObject') - .mockImplementation(async (_bucket, _key, _version, body) => { - await new Promise((resolve, reject) => { - body.on('error', reject) - body.on('end', resolve) - body.resume() - }) - return { - httpStatusCode: 200, - size: 5, - cacheControl: 'no-cache', - mimetype: 'text/plain', - eTag: '"final"', - lastModified: new Date(), - contentLength: 5, - } + uploadSpy = jest.spyOn(backend, 'write').mockImplementation(async (input) => { + await new Promise((resolve, reject) => { + ;(input.body as NodeJS.ReadableStream).on('error', reject) + ;(input.body as NodeJS.ReadableStream).on('end', resolve) + ;(input.body as NodeJS.ReadableStream).resume() }) + return { + httpStatusCode: 200, + size: 5, + cacheControl: 'no-cache', + mimetype: 'text/plain', + eTag: '"final"', + lastModified: new Date(), + contentLength: 5, + } + }) await expect( - backend.completeMultipartUpload('bucket', 'key', uploadId as string, 'v1', [ - { PartNumber: 1, ETag: 'part-etag' }, - ]) + backend.completeMultipartUpload({ + bucket: 'bucket', + key: 'key', + uploadId: uploadId as string, + version: 'v1', + parts: [{ PartNumber: 1, ETag: 'part-etag' }], + }) ).resolves.toMatchObject({ ETag: '"final"', }) diff --git a/src/test/object.test.ts b/src/test/object.test.ts index 189d6f5e2..2db6e533d 100644 --- a/src/test/object.test.ts +++ b/src/test/object.test.ts @@ -15,7 +15,7 @@ import { withDeleteEnabled } from './utils/storage' const { jwtSecret, serviceKeyAsync, tenantId } = getConfig() const anonKey = process.env.ANON_KEY || '' -const S3Backend = backends.S3Backend +const S3Backend = backends.S3Adapter let appInstance: FastifyInstance let tnx: Knex.Transaction | undefined @@ -64,7 +64,7 @@ describe('testing GET object', () => { expect(response.headers['etag']).toBe('abc') expect(response.headers['x-robots-tag']).toBe('none') expect(response.headers['last-modified']).toBe('Thu, 12 Aug 2021 16:00:00 GMT') - expect(S3Backend.prototype.getObject).toBeCalled() + expect(S3Backend.prototype.read).toBeCalled() }) test('check if RLS policies are respected: authenticated user is able to read authenticated resource without /authenticated prefix', async () => { @@ -78,11 +78,11 @@ describe('testing GET object', () => { expect(response.statusCode).toBe(200) expect(response.headers['etag']).toBe('abc') expect(response.headers['last-modified']).toBe('Thu, 12 Aug 2021 16:00:00 GMT') - expect(S3Backend.prototype.getObject).toBeCalled() + expect(S3Backend.prototype.read).toBeCalled() }) test('forward 304 and If-Modified-Since/If-None-Match headers', async () => { - const mockGetObject = jest.spyOn(S3Backend.prototype, 'getObject') + const mockGetObject = jest.spyOn(S3Backend.prototype, 'read') mockGetObject.mockRejectedValue({ $metadata: { httpStatusCode: 304, @@ -98,7 +98,7 @@ describe('testing GET object', () => { }, }) expect(response.statusCode).toBe(304) - expect(mockGetObject.mock.calls[0][3]).toMatchObject({ + expect(mockGetObject.mock.calls[0][0].headers).toMatchObject({ ifModifiedSince: 'Thu, 12 Aug 2021 16:00:00 GMT', ifNoneMatch: 'abc', }) @@ -180,7 +180,7 @@ describe('testing GET object', () => { authorization: `Bearer ${process.env.AUTHENTICATED_KEY}`, }, }) - expect(S3Backend.prototype.getObject).toBeCalled() + expect(S3Backend.prototype.read).toBeCalled() expect(response.headers).toEqual( expect.objectContaining({ 'content-disposition': `attachment;`, @@ -196,7 +196,7 @@ describe('testing GET object', () => { authorization: `Bearer ${process.env.AUTHENTICATED_KEY}`, }, }) - expect(S3Backend.prototype.getObject).toBeCalled() + expect(S3Backend.prototype.read).toBeCalled() expect(response.headers).toEqual( expect.objectContaining({ 'content-disposition': `attachment; filename=testname.png; filename*=UTF-8''testname.png`, @@ -213,7 +213,7 @@ describe('testing GET object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.getObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.read).not.toHaveBeenCalled() }) test('check if RLS policies are respected: anon user is not able to read authenticated resource without /authenticated prefix', async () => { @@ -225,7 +225,7 @@ describe('testing GET object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.getObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.read).not.toHaveBeenCalled() }) test('user is not able to read a resource without Auth header', async () => { @@ -234,7 +234,7 @@ describe('testing GET object', () => { url: '/object/authenticated/bucket2/authenticated/casestudy.png', }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.getObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.read).not.toHaveBeenCalled() }) test('user is not able to read a resource without Auth header without the /authenticated prefix', async () => { @@ -243,7 +243,7 @@ describe('testing GET object', () => { url: '/object/bucket2/authenticated/casestudy.png', }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.getObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.read).not.toHaveBeenCalled() }) test('return 400 when reading a non existent object', async () => { @@ -255,7 +255,7 @@ describe('testing GET object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.getObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.read).not.toHaveBeenCalled() }) test('return 400 when reading a non existent bucket', async () => { @@ -267,7 +267,7 @@ describe('testing GET object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.getObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.read).not.toHaveBeenCalled() }) }) @@ -291,7 +291,7 @@ describe('testing POST object via multipart upload', () => { payload: form, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.uploadObject).toBeCalled() + expect(S3Backend.prototype.write).toBeCalled() expect(await response.json()).toEqual( expect.objectContaining({ Id: expect.any(String), @@ -315,7 +315,7 @@ describe('testing POST object via multipart upload', () => { payload: form, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() expect(response.body).toBe( JSON.stringify({ statusCode: '403', @@ -336,7 +336,7 @@ describe('testing POST object via multipart upload', () => { payload: form, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('return 400 when uploading to a non existent bucket', async () => { @@ -353,7 +353,7 @@ describe('testing POST object via multipart upload', () => { payload: form, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('return 400 when uploading to duplicate object', async () => { @@ -370,7 +370,7 @@ describe('testing POST object via multipart upload', () => { payload: form, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('return 200 when uploading an object within bucket max size limit', async () => { @@ -388,7 +388,7 @@ describe('testing POST object via multipart upload', () => { payload: form, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.uploadObject).toHaveBeenCalled() + expect(S3Backend.prototype.write).toHaveBeenCalled() }) test('return 400 when uploading an object that exceed bucket level max size', async () => { @@ -411,7 +411,7 @@ describe('testing POST object via multipart upload', () => { message: 'The object exceeded the maximum allowed size', statusCode: '413', }) - expect(S3Backend.prototype.uploadObject).toHaveBeenCalled() + expect(S3Backend.prototype.write).toHaveBeenCalled() }) test('successfully uploading an object with a the allowed mime-type', async () => { @@ -430,7 +430,7 @@ describe('testing POST object via multipart upload', () => { payload: form, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.uploadObject).toHaveBeenCalled() + expect(S3Backend.prototype.write).toHaveBeenCalled() }) test('successfully uploading an object with custom metadata using form data', async () => { @@ -456,7 +456,7 @@ describe('testing POST object via multipart upload', () => { payload: form, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.uploadObject).toHaveBeenCalled() + expect(S3Backend.prototype.write).toHaveBeenCalled() const client = await getSuperuserPostgrestClient() @@ -495,7 +495,7 @@ describe('testing POST object via multipart upload', () => { payload: file, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.uploadObject).toHaveBeenCalled() + expect(S3Backend.prototype.write).toHaveBeenCalled() const client = await getSuperuserPostgrestClient() @@ -574,7 +574,7 @@ describe('testing POST object via multipart upload', () => { message: `mime type image/png is not supported`, statusCode: '415', }) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('can create an empty folder when mime-type is set', async () => { @@ -593,7 +593,7 @@ describe('testing POST object via multipart upload', () => { payload: form, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.uploadObject).toHaveBeenCalled() + expect(S3Backend.prototype.write).toHaveBeenCalled() }) test('cannot create an empty folder with more than 0kb', async () => { @@ -635,7 +635,7 @@ describe('testing POST object via multipart upload', () => { message: `mime type thisisnotarealmimetype is not supported`, statusCode: '415', }) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('return 200 when upserting duplicate object', async () => { @@ -697,12 +697,12 @@ describe('testing POST object via multipart upload', () => { payload: form, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('should not add row to database if upload fails', async () => { // Mock S3 upload failure. - jest.spyOn(S3Backend.prototype, 'uploadObject').mockRejectedValue( + jest.spyOn(S3Backend.prototype, 'write').mockRejectedValue( StorageBackendError.fromError({ name: 'S3ServiceException', message: 'Unknown error', @@ -775,7 +775,7 @@ describe('testing POST object via binary upload', () => { payload: fs.createReadStream(path), }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.uploadObject).toBeCalled() + expect(S3Backend.prototype.write).toBeCalled() expect(await response.json()).toEqual( expect.objectContaining({ Id: expect.any(String), @@ -801,7 +801,7 @@ describe('testing POST object via binary upload', () => { payload: fs.createReadStream(path), }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() expect(response.body).toBe( JSON.stringify({ statusCode: '403', @@ -827,7 +827,7 @@ describe('testing POST object via binary upload', () => { payload: fs.createReadStream(path), }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('return 400 when uploading to a non existent bucket', async () => { @@ -847,7 +847,7 @@ describe('testing POST object via binary upload', () => { payload: fs.createReadStream(path), }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('return 400 when uploading to duplicate object', async () => { @@ -867,7 +867,7 @@ describe('testing POST object via binary upload', () => { payload: fs.createReadStream(path), }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('return 200 when upserting duplicate object', async () => { @@ -888,7 +888,7 @@ describe('testing POST object via binary upload', () => { payload: fs.createReadStream(path), }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.uploadObject).toHaveBeenCalled() + expect(S3Backend.prototype.write).toHaveBeenCalled() }) test('return 400 when exceeding file size limit', async () => { @@ -938,12 +938,12 @@ describe('testing POST object via binary upload', () => { payload: fs.createReadStream(path), }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('should not add row to database if upload fails', async () => { // Mock S3 upload failure. - jest.spyOn(S3Backend.prototype, 'uploadObject').mockRejectedValue( + jest.spyOn(S3Backend.prototype, 'write').mockRejectedValue( StorageBackendError.fromError({ name: 'S3ServiceException', message: 'Unknown error', @@ -1014,7 +1014,7 @@ describe('testing PUT object', () => { payload: form, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.uploadObject).toBeCalled() + expect(S3Backend.prototype.write).toBeCalled() expect(await response.json()).toEqual( expect.objectContaining({ Id: expect.any(String), @@ -1039,7 +1039,7 @@ describe('testing PUT object', () => { expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('user is not able to update a resource without Auth header', async () => { @@ -1053,7 +1053,7 @@ describe('testing PUT object', () => { payload: form, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('return 400 when update to a non existent bucket', async () => { @@ -1071,7 +1071,7 @@ describe('testing PUT object', () => { }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('return 400 when updating a non existent key', async () => { @@ -1088,7 +1088,7 @@ describe('testing PUT object', () => { payload: form, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) }) @@ -1114,7 +1114,7 @@ describe('testing PUT object via binary upload', () => { payload: fs.createReadStream(path), }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.uploadObject).toBeCalled() + expect(S3Backend.prototype.write).toBeCalled() expect(await response.json()).toEqual( expect.objectContaining({ Id: expect.any(String), @@ -1140,7 +1140,7 @@ describe('testing PUT object via binary upload', () => { payload: fs.createReadStream(path), }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('check if RLS policies are respected: user is not able to upload a resource without Auth header', async () => { @@ -1159,7 +1159,7 @@ describe('testing PUT object via binary upload', () => { payload: fs.createReadStream(path), }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('return 400 when updating an object in a non existent bucket', async () => { @@ -1179,7 +1179,7 @@ describe('testing PUT object via binary upload', () => { payload: fs.createReadStream(path), }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('return 400 when updating an object in a non existent key', async () => { @@ -1199,7 +1199,7 @@ describe('testing PUT object via binary upload', () => { payload: fs.createReadStream(path), }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) }) @@ -1221,7 +1221,7 @@ describe('testing copy object', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.copyObject).toBeCalled() + expect(S3Backend.prototype.copy).toBeCalled() const jsonResponse = await response.json() expect(jsonResponse.Key).toBe(`bucket2/authenticated/casestudy11.png`) }) @@ -1241,7 +1241,7 @@ describe('testing copy object', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.copyObject).toBeCalled() + expect(S3Backend.prototype.copy).toBeCalled() const jsonResponse = await response.json() expect(jsonResponse.Key).toBe(`bucket3/authenticated/casestudy11.png`) @@ -1263,7 +1263,7 @@ describe('testing copy object', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.copyObject).toBeCalled() + expect(S3Backend.prototype.copy).toBeCalled() const jsonResponse = response.json() expect(jsonResponse.Key).toBe(`bucket2/authenticated/${copiedKey}`) @@ -1307,7 +1307,7 @@ describe('testing copy object', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.copyObject).toBeCalled() + expect(S3Backend.prototype.copy).toBeCalled() const parsedBody = JSON.parse(response.body) expect(parsedBody.Key).toBe(`bucket2/authenticated/${copiedKey}`) @@ -1356,7 +1356,7 @@ describe('testing copy object', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.copyObject).toBeCalled() + expect(S3Backend.prototype.copy).toBeCalled() const jsonResponse = response.json() expect(jsonResponse.Key).toBe(`bucket2/authenticated/${copiedKey}`) @@ -1403,7 +1403,7 @@ describe('testing copy object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.copyObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.copy).not.toHaveBeenCalled() }) test('user is not able to copy a resource without Auth header', async () => { @@ -1417,7 +1417,7 @@ describe('testing copy object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.copyObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.copy).not.toHaveBeenCalled() }) test('return 400 when copy from a non existent bucket', async () => { @@ -1434,7 +1434,7 @@ describe('testing copy object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.copyObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.copy).not.toHaveBeenCalled() }) test('return 400 when copying a non existent key', async () => { @@ -1451,7 +1451,7 @@ describe('testing copy object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.copyObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.copy).not.toHaveBeenCalled() }) }) @@ -1468,7 +1468,7 @@ describe('testing delete object', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.deleteObject).toBeCalled() + expect(S3Backend.prototype.remove).toBeCalled() }) test('check if RLS policies are respected: anon user is not able to delete authenticated resource', async () => { @@ -1480,7 +1480,7 @@ describe('testing delete object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.deleteObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.remove).not.toHaveBeenCalled() }) test('user is not able to delete a resource without Auth header', async () => { @@ -1489,7 +1489,7 @@ describe('testing delete object', () => { url: '/object/bucket2/authenticated/delete1.png', }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.deleteObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.remove).not.toHaveBeenCalled() }) test('return 400 when delete from a non existent bucket', async () => { @@ -1501,7 +1501,7 @@ describe('testing delete object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.deleteObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.remove).not.toHaveBeenCalled() }) test('return 400 when deleting a non existent key', async () => { @@ -1513,7 +1513,7 @@ describe('testing delete object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.deleteObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.remove).not.toHaveBeenCalled() }) }) @@ -1533,7 +1533,7 @@ describe('testing deleting multiple objects', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.deleteObjects).toBeCalled() + expect(S3Backend.prototype.removeMany).toBeCalled() const result = JSON.parse(response.body) expect(result).toHaveLength(10001) @@ -1553,7 +1553,7 @@ describe('testing deleting multiple objects', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.deleteObjects).not.toHaveBeenCalled() + expect(S3Backend.prototype.removeMany).not.toHaveBeenCalled() const results = JSON.parse(response.body) expect(results).toHaveLength(0) }) @@ -1567,7 +1567,7 @@ describe('testing deleting multiple objects', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.deleteObjects).not.toHaveBeenCalled() + expect(S3Backend.prototype.removeMany).not.toHaveBeenCalled() }) test('deleting from a non existent bucket', async () => { @@ -1582,7 +1582,7 @@ describe('testing deleting multiple objects', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.deleteObjects).not.toHaveBeenCalled() + expect(S3Backend.prototype.removeMany).not.toHaveBeenCalled() }) test('deleting a non existent key', async () => { @@ -1597,7 +1597,7 @@ describe('testing deleting multiple objects', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.deleteObjects).not.toHaveBeenCalled() + expect(S3Backend.prototype.removeMany).not.toHaveBeenCalled() const results = JSON.parse(response.body) expect(results).toHaveLength(0) }) @@ -1614,7 +1614,7 @@ describe('testing deleting multiple objects', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.deleteObjects).toBeCalled() + expect(S3Backend.prototype.removeMany).toBeCalled() const results = JSON.parse(response.body) expect(results).toHaveLength(1) expect(results[0].name).toBe('authenticated/delete-multiple7.png') @@ -1859,7 +1859,7 @@ describe('testing uploading with generated signed upload URL', () => { payload: form, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.uploadObject).toHaveBeenCalled() + expect(S3Backend.prototype.write).toHaveBeenCalled() // check that row has neccessary data const db = await getSuperuserPostgrestClient() @@ -1899,7 +1899,7 @@ describe('testing uploading with generated signed upload URL', () => { payload: form, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('upload object with a malformed JWT', async () => { @@ -1916,7 +1916,7 @@ describe('testing uploading with generated signed upload URL', () => { payload: form, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) test('upload object with an expired JWT', async () => { @@ -1939,7 +1939,7 @@ describe('testing uploading with generated signed upload URL', () => { payload: form, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.uploadObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.write).not.toHaveBeenCalled() }) it('will allow overwriting a file when the generating a signed upload url with x-upsert:true', async () => { @@ -1984,7 +1984,7 @@ describe('testing uploading with generated signed upload URL', () => { payload: createUpload(), }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.uploadObject).toHaveBeenCalled() + expect(S3Backend.prototype.write).toHaveBeenCalled() }) it('will allow not be able overwriting a file when the generating a signed upload url without x-upsert header', async () => { @@ -2144,7 +2144,7 @@ describe('testing retrieving signed URL', () => { }) test('forward 304 and If-Modified-Since/If-None-Match headers', async () => { - const mockGetObject = jest.spyOn(S3Backend.prototype, 'getObject') + const mockGetObject = jest.spyOn(S3Backend.prototype, 'read') mockGetObject.mockRejectedValue({ $metadata: { httpStatusCode: 304, @@ -2161,7 +2161,7 @@ describe('testing retrieving signed URL', () => { }, }) expect(response.statusCode).toBe(304) - expect(mockGetObject.mock.calls[0][3]).toMatchObject({ + expect(mockGetObject.mock.calls[0][0].headers).toMatchObject({ ifModifiedSince: 'Thu, 12 Aug 2021 16:00:00 GMT', ifNoneMatch: 'abc', }) @@ -2221,8 +2221,8 @@ describe('testing move object', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.copyObject).toHaveBeenCalled() - expect(S3Backend.prototype.deleteObjects).toHaveBeenCalled() + expect(S3Backend.prototype.copy).toHaveBeenCalled() + expect(S3Backend.prototype.removeMany).toHaveBeenCalled() }) test('can move objects across buckets respecting RLS', async () => { @@ -2240,8 +2240,8 @@ describe('testing move object', () => { }, }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.copyObject).toHaveBeenCalled() - expect(S3Backend.prototype.deleteObjects).toHaveBeenCalled() + expect(S3Backend.prototype.copy).toHaveBeenCalled() + expect(S3Backend.prototype.removeMany).toHaveBeenCalled() }) test('cannot move objects across buckets because RLS checks', async () => { @@ -2259,8 +2259,8 @@ describe('testing move object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.copyObject).not.toHaveBeenCalled() - expect(S3Backend.prototype.deleteObjects).not.toHaveBeenCalled() + expect(S3Backend.prototype.copy).not.toHaveBeenCalled() + expect(S3Backend.prototype.removeMany).not.toHaveBeenCalled() }) test('check if RLS policies are respected: anon user is not able to move an authenticated object', async () => { @@ -2277,8 +2277,8 @@ describe('testing move object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.copyObject).not.toHaveBeenCalled() - expect(S3Backend.prototype.deleteObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.copy).not.toHaveBeenCalled() + expect(S3Backend.prototype.remove).not.toHaveBeenCalled() }) test('user is not able to move an object without auth header', async () => { @@ -2292,8 +2292,8 @@ describe('testing move object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.copyObject).not.toHaveBeenCalled() - expect(S3Backend.prototype.deleteObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.copy).not.toHaveBeenCalled() + expect(S3Backend.prototype.remove).not.toHaveBeenCalled() }) test('user is not able to move an object in a non existent bucket', async () => { @@ -2310,8 +2310,8 @@ describe('testing move object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.copyObject).not.toHaveBeenCalled() - expect(S3Backend.prototype.deleteObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.copy).not.toHaveBeenCalled() + expect(S3Backend.prototype.remove).not.toHaveBeenCalled() }) test('user is not able to move an non existent object', async () => { @@ -2328,8 +2328,8 @@ describe('testing move object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.copyObject).not.toHaveBeenCalled() - expect(S3Backend.prototype.deleteObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.copy).not.toHaveBeenCalled() + expect(S3Backend.prototype.remove).not.toHaveBeenCalled() }) test('user is not able to move to an existing key', async () => { @@ -2346,8 +2346,8 @@ describe('testing move object', () => { }, }) expect(response.statusCode).toBe(400) - expect(S3Backend.prototype.copyObject).not.toHaveBeenCalled() - expect(S3Backend.prototype.deleteObject).not.toHaveBeenCalled() + expect(S3Backend.prototype.copy).not.toHaveBeenCalled() + expect(S3Backend.prototype.remove).not.toHaveBeenCalled() }) }) diff --git a/src/test/query-abort-signal.test.ts b/src/test/query-abort-signal.test.ts index e1edf5233..6a9f194c1 100644 --- a/src/test/query-abort-signal.test.ts +++ b/src/test/query-abort-signal.test.ts @@ -178,39 +178,3 @@ describe('Statement Timeout', () => { } }) }) - -describe('TenantConnection Abort Signal', () => { - it('should store and retrieve abort signal', async () => { - const superUser = await getServiceKeyUser(tenantId) - const poolManager = new PoolManager() - const pool = poolManager.getPool({ - tenantId, - isExternalPool: true, - maxConnections: 2, - dbUrl: databasePoolURL || databaseURL, - user: superUser, - superUser, - }) - - const connection = new TenantConnection(pool, { - tenantId, - isExternalPool: true, - maxConnections: 2, - dbUrl: databasePoolURL || databaseURL, - user: superUser, - superUser, - }) - - // Initially no signal - expect(connection.getAbortSignal()).toBeUndefined() - - // Set signal - const controller = new AbortController() - connection.setAbortSignal(controller.signal) - - // Should retrieve the same signal - expect(connection.getAbortSignal()).toBe(controller.signal) - - await pool.destroy() - }) -}) diff --git a/src/test/render-routes.test.ts b/src/test/render-routes.test.ts index 358f9dcf0..3d630e19b 100644 --- a/src/test/render-routes.test.ts +++ b/src/test/render-routes.test.ts @@ -2,7 +2,7 @@ import dotenv from 'dotenv' import fs from 'fs/promises' import { getConfig, JwksConfig, mergeConfig } from '../config' import app from '../app' -import { S3Backend } from '../storage/backend' +import { S3Adapter } from '../storage/backend' import path from 'path' import { ImageRenderer } from '../storage/renderer' import axios from 'axios' @@ -51,7 +51,7 @@ describe('image rendering routes', () => { }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.privateAssetUrl).toBeCalledTimes(1) + expect(S3Adapter.prototype.tempPrivateAccessUrl).toBeCalledTimes(1) expect(axiosSpy).toBeCalledWith( `/public/height:100/width:100/resizing_type:fill/plain/local:///${projectRoot}/data/sadcat.jpg`, { responseType: 'stream', signal: expect.any(AbortSignal) } @@ -69,7 +69,7 @@ describe('image rendering routes', () => { }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.privateAssetUrl).toBeCalledTimes(1) + expect(S3Adapter.prototype.tempPrivateAccessUrl).toBeCalledTimes(1) expect(axiosSpy).toBeCalledWith( `/public/height:100/width:100/resizing_type:fill/plain/local:///${projectRoot}/data/sadcat.jpg`, { responseType: 'stream', signal: expect.any(AbortSignal) } @@ -112,7 +112,7 @@ describe('image rendering routes', () => { }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.privateAssetUrl).toBeCalledTimes(1) + expect(S3Adapter.prototype.tempPrivateAccessUrl).toBeCalledTimes(1) expect(axiosSpy).toBeCalledWith( `/public/height:100/width:100/resizing_type:fit/plain/local:///${projectRoot}/data/sadcat.jpg`, { responseType: 'stream', signal: expect.any(AbortSignal) } @@ -159,7 +159,7 @@ describe('image rendering routes', () => { }) expect(response.statusCode).toBe(200) - expect(S3Backend.prototype.privateAssetUrl).toBeCalledTimes(1) + expect(S3Adapter.prototype.tempPrivateAccessUrl).toBeCalledTimes(1) expect(axiosSpy).toBeCalledWith( `/public/height:100/width:100/resizing_type:fit/plain/local:///${projectRoot}/data/sadcat.jpg`, { responseType: 'stream', signal: expect.any(AbortSignal) } @@ -171,7 +171,7 @@ describe('image rendering routes', () => { const url = '/render/image/sign/bucket2/authenticated/casestudy.png?token=' + token const response = await appInstance.inject({ method: 'GET', url }) - expect(S3Backend.prototype.privateAssetUrl).not.toHaveBeenCalled() + expect(S3Adapter.prototype.tempPrivateAccessUrl).not.toHaveBeenCalled() expect(response.statusCode).toBe(400) const body = response.json<{ error: string }>() expect(body.error).toBe('InvalidJWT') @@ -189,7 +189,7 @@ describe('image rendering routes', () => { const url = '/render/image/sign/bucket2/authenticated/casestudy.png?token=' + token const response = await appInstance.inject({ method: 'GET', url }) - expect(S3Backend.prototype.privateAssetUrl).not.toHaveBeenCalled() + expect(S3Adapter.prototype.tempPrivateAccessUrl).not.toHaveBeenCalled() expect(response.statusCode).toBe(400) const body = response.json<{ error: string }>() expect(body.error).toBe('InvalidSignature') diff --git a/src/test/s3-adapter.test.ts b/src/test/s3-adapter.test.ts index 8e6763b92..ed00fd943 100644 --- a/src/test/s3-adapter.test.ts +++ b/src/test/s3-adapter.test.ts @@ -1,6 +1,6 @@ 'use strict' -import { S3Backend } from '../storage/backend/s3/adapter' +import { S3Adapter } from '@storage/backend/s3/s3-adapter' import { S3Client } from '@aws-sdk/client-s3' import { Readable } from 'stream' @@ -38,12 +38,16 @@ describe('S3Backend', () => { }, }) - const backend = new S3Backend({ + const backend = new S3Adapter({ region: 'us-east-1', endpoint: 'http://localhost:9000', }) - const result = await backend.getObject('test-bucket', 'test-key', undefined) + const result = await backend.read({ + bucket: 'test-bucket', + key: 'test-key', + version: undefined, + }) expect(result.metadata.mimetype).toBe('application/octet-stream') expect(result.metadata.cacheControl).toBe('max-age=3600') @@ -64,12 +68,16 @@ describe('S3Backend', () => { }, }) - const backend = new S3Backend({ + const backend = new S3Adapter({ region: 'us-east-1', endpoint: 'http://localhost:9000', }) - const result = await backend.getObject('test-bucket', 'test-key', undefined) + const result = await backend.read({ + bucket: 'test-bucket', + key: 'test-key', + version: undefined, + }) expect(result.metadata.mimetype).toBe('image/png') }) diff --git a/src/test/scanner.test.ts b/src/test/scanner.test.ts index ab4cbbe3c..c5cdf897c 100644 --- a/src/test/scanner.test.ts +++ b/src/test/scanner.test.ts @@ -38,19 +38,23 @@ describe('ObjectScanner', () => { const numToDelete = 5 const objectsToDelete = result.slice(0, numToDelete) - await storage.database.deleteObjects( - bucket.id, - objectsToDelete.map((o) => o.name), - 'name' - ) + await storage.database.deleteObjects({ + bucketId: bucket.id, + objectNames: objectsToDelete.map((o) => o.name), + by: 'name', + }) const s3ToDelete = result.slice(5, 5 + numToDelete) - await storage.adapter.deleteObjects( - storageS3Bucket, - s3ToDelete.map((o) => `${tenantId}/${bucket.id}/${o.name}/${o.version}`) - ) + await storage.adapter.removeMany({ + bucket: storageS3Bucket, + prefixes: s3ToDelete.map((o) => `${tenantId}/${bucket.id}/${o.name}/${o.version}`), + }) - const objectsAfterDel = await storage.database.listObjects(bucket.id, 'name', 10000) + const objectsAfterDel = await storage.database.listObjects({ + bucketId: bucket.id, + columns: 'name', + limit: 10000, + }) expect(objectsAfterDel).toHaveLength(maxUploads - numToDelete) const orphaned = storage.scanner.listOrphaned(bucket.id, { @@ -105,13 +109,17 @@ describe('ObjectScanner', () => { const numToDelete = 10 const objectsToDelete = result.slice(0, numToDelete) - await storage.database.deleteObjects( - bucket.id, - objectsToDelete.map((o) => o.name), - 'name' - ) + await storage.database.deleteObjects({ + bucketId: bucket.id, + objectNames: objectsToDelete.map((o) => o.name), + by: 'name', + }) - const objectsAfterDel = await storage.database.listObjects(bucket.id, 'name', 10000) + const objectsAfterDel = await storage.database.listObjects({ + bucketId: bucket.id, + columns: 'name', + limit: 10000, + }) expect(objectsAfterDel).toHaveLength(maxUploads - numToDelete) const orphaned = storage.scanner.deleteOrphans(bucket.id, options) @@ -134,9 +142,12 @@ describe('ObjectScanner', () => { let nextToken = '' while (true) { - const s3Objects = await storage.adapter.list(storageS3Bucket, { - prefix: `${tenantId}/${bucket.id}`, - nextToken: nextToken, + const s3Objects = await storage.adapter.list({ + bucket: storageS3Bucket, + options: { + prefix: `${tenantId}/${bucket.id}`, + nextToken: nextToken, + }, }) s3ObjectAll.push(...s3Objects.keys) if (!s3Objects.nextToken) { @@ -151,8 +162,11 @@ describe('ObjectScanner', () => { expect(s3ObjectAll.length).not.toContain(objectsToDelete.map((o) => `${bucket.id}/${o.name}`)) // Check files are backed-up - const backupFiles = await storage.adapter.list(storageS3Bucket, { - prefix: `__internal/${tenantId}/${bucket.id}`, + const backupFiles = await storage.adapter.list({ + bucket: storageS3Bucket, + options: { + prefix: `__internal/${tenantId}/${bucket.id}`, + }, }) expect(backupFiles.keys).toHaveLength(numToDelete) diff --git a/src/test/tus.test.ts b/src/test/tus.test.ts index 69871be8d..90cc61808 100644 --- a/src/test/tus.test.ts +++ b/src/test/tus.test.ts @@ -118,7 +118,7 @@ describe('Tus multipart', () => { expect(result).toEqual(true) - const dbAsset = await storage.from(bucket.id).findObject(objectName, '*') + const dbAsset = await storage.from(bucket.id).findObject({ objectName, columns: '*' }) expect(dbAsset).toEqual({ bucket_id: bucket.id, created_at: expect.any(Date), @@ -258,7 +258,7 @@ describe('Tus multipart', () => { const signedUpload = await storage .from(bucketName) - .signUploadObjectUrl(objectName, `${bucketName}/${objectName}`, 3600) + .signUploadObjectUrl({ objectName, url: `${bucketName}/${objectName}`, expiresIn: 3600 }) const result = await new Promise((resolve, reject) => { const upload = new tus.Upload(oneChunkFile, { @@ -292,7 +292,7 @@ describe('Tus multipart', () => { expect(result).toEqual(true) - const dbAsset = await storage.from(bucket.id).findObject(objectName, '*') + const dbAsset = await storage.from(bucket.id).findObject({ objectName, columns: '*' }) expect(dbAsset).toEqual({ bucket_id: bucket.id, created_at: expect.any(Date), @@ -329,9 +329,12 @@ describe('Tus multipart', () => { const objectName = randomUUID() + '-cat.jpeg' - const signedUpload = await storage - .from(bucketName) - .signUploadObjectUrl(objectName, `${bucketName}/${objectName}`, 3600, 'some-owner-id') + const signedUpload = await storage.from(bucketName).signUploadObjectUrl({ + objectName, + url: `${bucketName}/${objectName}`, + expiresIn: 3600, + owner: 'some-owner-id', + }) const result = await new Promise((resolve, reject) => { const upload = new tus.Upload(oneChunkFile, { @@ -361,7 +364,7 @@ describe('Tus multipart', () => { expect(result).toEqual(true) - const dbAsset = await storage.from(bucket.id).findObject(objectName, '*') + const dbAsset = await storage.from(bucket.id).findObject({ objectName, columns: '*' }) expect(dbAsset).toEqual({ bucket_id: bucket.id, created_at: expect.any(Date), @@ -397,7 +400,7 @@ describe('Tus multipart', () => { const signedUpload = await storage .from(bucketName) - .signUploadObjectUrl(objectName, `${bucketName}/${objectName}`, 1) + .signUploadObjectUrl({ objectName, url: `${bucketName}/${objectName}`, expiresIn: 1 }) await wait(2000)