Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ type StorageConfigType = {
storageFileEtagAlgorithm: 'mtime' | 'md5'
storageS3InternalTracesEnabled?: boolean
storageS3MaxSockets: number
storageS3BatchDeleteEnabled: boolean
storageS3DisableChecksum: boolean
storageS3UploadQueueSize: number
storageS3Bucket: string
Expand Down Expand Up @@ -364,6 +365,8 @@ export function getConfig(options?: { reload?: boolean }): StorageConfigType {
getOptionalConfigFromEnv('STORAGE_S3_MAX_SOCKETS', 'GLOBAL_S3_MAX_SOCKETS') || '200',
10
),
storageS3BatchDeleteEnabled:
getOptionalConfigFromEnv('STORAGE_S3_BATCH_DELETE_ENABLED') !== 'false',
Comment on lines +368 to +369
Copy link

Copilot AI Mar 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This introduces a new env var/config flag (STORAGE_S3_BATCH_DELETE_ENABLED). The PR description says "No new env vars, config flags" and "No configuration required"; please either update the PR/docs to reflect the new flag (and its default), or remove the flag and rely solely on runtime NotImplemented detection.

Copilot uses AI. Check for mistakes.
storageS3DisableChecksum: getOptionalConfigFromEnv('STORAGE_S3_DISABLE_CHECKSUM') === 'true',
storageS3UploadQueueSize:
envNumber(getOptionalConfigFromEnv('STORAGE_S3_UPLOAD_QUEUE_SIZE')) ?? 2,
Expand Down
2 changes: 2 additions & 0 deletions src/http/routes/tus/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ const {
storageS3ForcePathStyle,
storageS3Region,
storageS3ClientTimeout,
storageS3BatchDeleteEnabled,
tusUrlExpiryMs,
tusPath,
tusPartSize,
Expand Down Expand Up @@ -117,6 +118,7 @@ function createTusServer(
maxRetries: 10,
retryDelayMs: 250,
renewalIntervalMs: 10 * 1000, // 10 seconds
batchDeleteEnabled: storageS3BatchDeleteEnabled,
s3Client: new S3Client({
requestHandler: new NodeHttpHandler({
...agent,
Expand Down
25 changes: 25 additions & 0 deletions src/storage/backend/s3/adapter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,12 @@ export class S3Backend implements StorageBackendAdapter {
* @param prefixes
*/
async deleteObjects(bucket: string, prefixes: string[]): Promise<void> {
const { storageS3BatchDeleteEnabled } = getConfig()

try {
if (!storageS3BatchDeleteEnabled) {
throw Object.assign(new Error('NotImplemented'), { name: 'NotImplemented' })
}
const s3Prefixes = prefixes.map((ele) => {
return { Key: ele }
})
Expand All @@ -343,6 +348,26 @@ export class S3Backend implements StorageBackendAdapter {
})
await this.client.send(command)
} catch (e) {
// Some S3-compatible backends (e.g. GCS) do not support DeleteObjects; fall back to individual deletes
const code = (e as { Code?: string; name?: string })?.Code ?? (e as { name?: string })?.name
if (code === 'NotImplemented') {
const results = await Promise.allSettled(
prefixes.map((key) =>
this.client.send(new DeleteObjectCommand({ Bucket: bucket, Key: key }))
)
)
for (const result of results) {
if (result.status === 'rejected') {
const errCode =
(result.reason as { Code?: string })?.Code ??
(result.reason as { name?: string })?.name
if (errCode !== 'NoSuchKey') {
throw StorageBackendError.fromError(result.reason)
}
}
}
return
}
throw StorageBackendError.fromError(e)
}
}
Expand Down
29 changes: 27 additions & 2 deletions src/storage/protocols/tus/s3-locker.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ export interface S3LockerOptions {
renewalIntervalMs?: number
maxRetries?: number
retryDelayMs?: number
batchDeleteEnabled?: boolean
logger?: Pick<Console, 'log' | 'warn' | 'error'>
}

Expand All @@ -37,6 +38,7 @@ export class S3Locker implements Locker {
private readonly renewalIntervalMs: number
private readonly maxRetries: number
private readonly retryDelayMs: number
private readonly batchDeleteEnabled: boolean
private readonly logger: Pick<Console, 'log' | 'warn' | 'error'>
private readonly notifier: LockNotifier

Expand All @@ -49,6 +51,7 @@ export class S3Locker implements Locker {
this.renewalIntervalMs = options.renewalIntervalMs || 10000 // 10 seconds
this.maxRetries = options.maxRetries || 10
this.retryDelayMs = options.retryDelayMs || 500
this.batchDeleteEnabled = options.batchDeleteEnabled !== false // default true
this.logger = options.logger || console

// Validate configuration
Expand Down Expand Up @@ -245,6 +248,9 @@ export class S3Locker implements Locker {
const batch = expiredLocks.slice(i, i + 1000)

try {
if (!this.batchDeleteEnabled) {
throw Object.assign(new Error('NotImplemented'), { name: 'NotImplemented' })
}
await this.s3Client.send(
new DeleteObjectsCommand({
Bucket: this.bucket,
Expand All @@ -255,8 +261,27 @@ export class S3Locker implements Locker {
})
)
this.logger.log(`Cleaned up ${batch.length} expired locks in batch`)
} catch (error) {
this.logger.warn(`Failed to delete batch of expired locks:`, error)
} catch (error: any) {
const code = error?.Code ?? error?.name
if (code === 'NotImplemented') {
const results = await Promise.allSettled(
batch.map((key) =>
this.s3Client.send(new DeleteObjectCommand({ Bucket: this.bucket, Key: key }))
)
)
for (const result of results) {
if (result.status === 'rejected') {
const errCode =
(result.reason as { Code?: string })?.Code ??
(result.reason as { name?: string })?.name
if (errCode !== 'NoSuchKey') {
this.logger.warn(`Failed to delete expired lock in fallback:`, result.reason)
}
}
}
} else {
this.logger.warn(`Failed to delete batch of expired locks:`, error)
}
}
}
}
Expand Down
112 changes: 109 additions & 3 deletions src/test/s3-adapter.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import { S3Client } from '@aws-sdk/client-s3'
import { Readable } from 'stream'
import * as config from '../config'
import { S3Backend } from '../storage/backend/s3/adapter'

jest.mock('@aws-sdk/client-s3', () => {
Expand All @@ -20,9 +21,9 @@ describe('S3Backend', () => {
beforeEach(() => {
jest.clearAllMocks()
mockSend = jest.fn()
;(S3Client as jest.Mock).mockImplementation(() => ({
send: mockSend,
}))
; (S3Client as jest.Mock).mockImplementation(() => ({
send: mockSend,
}))
Comment on lines +24 to +26
Copy link

Copilot AI Mar 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This beforeEach block has odd indentation/leading semicolon placement (mockSend = jest.fn() followed by an indented ; (S3Client …)). This does not look Prettier-formatted and may fail prettier --check; please reformat (either remove the leading semicolon or use the standard ;(…) pattern).

Suggested change
; (S3Client as jest.Mock).mockImplementation(() => ({
send: mockSend,
}))
;(S3Client as jest.Mock).mockImplementation(() => ({
send: mockSend,
}))

Copilot uses AI. Check for mistakes.
})

describe('getObject', () => {
Expand Down Expand Up @@ -74,4 +75,109 @@ describe('S3Backend', () => {
expect(result.metadata.mimetype).toBe('image/png')
})
})

describe('deleteObjects', () => {
test('should use batch DeleteObjectsCommand when backend supports it', async () => {
mockSend.mockResolvedValue({
Deleted: [{ Key: 'file1.txt' }, { Key: 'file2.txt' }],
$metadata: { httpStatusCode: 200 },
})

const backend = new S3Backend({ region: 'us-east-1', endpoint: 'http://localhost:9000' })
await backend.deleteObjects('test-bucket', ['file1.txt', 'file2.txt'])

expect(mockSend).toHaveBeenCalledTimes(1)
expect(mockSend.mock.calls[0][0].constructor.name).toBe('DeleteObjectsCommand')
})

test('should fall back to individual DeleteObjectCommands when backend returns NotImplemented', async () => {
const err = Object.assign(new Error('NotImplemented'), { Code: 'NotImplemented' })
mockSend
.mockRejectedValueOnce(err)
.mockResolvedValue({ $metadata: { httpStatusCode: 204 } })

const backend = new S3Backend({ region: 'us-east-1', endpoint: 'http://localhost:9000' })
await backend.deleteObjects('test-bucket', ['file1.txt', 'file2.txt'])

expect(mockSend).toHaveBeenCalledTimes(3)
expect(mockSend.mock.calls[0][0].constructor.name).toBe('DeleteObjectsCommand')
expect(mockSend.mock.calls[1][0].constructor.name).toBe('DeleteObjectCommand')
expect(mockSend.mock.calls[2][0].constructor.name).toBe('DeleteObjectCommand')
})

test('should ignore NoSuchKey errors in the individual fallback', async () => {
const notImplemented = Object.assign(new Error('NotImplemented'), { Code: 'NotImplemented' })
const noSuchKey = Object.assign(new Error('NoSuchKey'), { Code: 'NoSuchKey' })
mockSend
.mockRejectedValueOnce(notImplemented)
.mockResolvedValueOnce({ $metadata: { httpStatusCode: 204 } })
.mockRejectedValueOnce(noSuchKey)

const backend = new S3Backend({ region: 'us-east-1', endpoint: 'http://localhost:9000' })
await expect(
backend.deleteObjects('test-bucket', ['file1.txt', 'file2.txt'])
).resolves.toBeUndefined()
})

test('should throw when an individual fallback delete fails with a real error', async () => {
const notImplemented = Object.assign(new Error('NotImplemented'), { Code: 'NotImplemented' })
const accessDenied = Object.assign(new Error('AccessDenied'), { Code: 'AccessDenied' })
mockSend
.mockRejectedValueOnce(notImplemented)
.mockResolvedValueOnce({ $metadata: { httpStatusCode: 204 } })
.mockRejectedValueOnce(accessDenied)

const backend = new S3Backend({ region: 'us-east-1', endpoint: 'http://localhost:9000' })
await expect(
backend.deleteObjects('test-bucket', ['file1.txt', 'file2.txt'])
).rejects.toThrow()
})

test('should rethrow errors that are not NotImplemented', async () => {
const err = Object.assign(new Error('AccessDenied'), { Code: 'AccessDenied' })
mockSend.mockRejectedValue(err)

const backend = new S3Backend({ region: 'us-east-1', endpoint: 'http://localhost:9000' })
await expect(backend.deleteObjects('test-bucket', ['file1.txt'])).rejects.toThrow()
expect(mockSend).toHaveBeenCalledTimes(1)
})

test('should skip DeleteObjectsCommand and use individual deletes when batchDeleteEnabled is false', async () => {
const getConfigSpy = jest
.spyOn(config, 'getConfig')
.mockReturnValue({ storageS3BatchDeleteEnabled: false } as any)
mockSend.mockResolvedValue({ $metadata: { httpStatusCode: 204 } })

try {
const backend = new S3Backend({ region: 'us-east-1', endpoint: 'http://localhost:9000' })
await backend.deleteObjects('test-bucket', ['file1.txt', 'file2.txt'])

// No DeleteObjectsCommand call — straight to individual deletes
expect(mockSend).toHaveBeenCalledTimes(2)
expect(mockSend.mock.calls[0][0].constructor.name).toBe('DeleteObjectCommand')
expect(mockSend.mock.calls[1][0].constructor.name).toBe('DeleteObjectCommand')
} finally {
getConfigSpy.mockRestore()
}
})

test('should ignore NoSuchKey when batchDeleteEnabled is false', async () => {
const getConfigSpy = jest
.spyOn(config, 'getConfig')
.mockReturnValue({ storageS3BatchDeleteEnabled: false } as any)
const noSuchKey = Object.assign(new Error('NoSuchKey'), { Code: 'NoSuchKey' })
mockSend
.mockResolvedValueOnce({ $metadata: { httpStatusCode: 204 } })
.mockRejectedValueOnce(noSuchKey)

try {
const backend = new S3Backend({ region: 'us-east-1', endpoint: 'http://localhost:9000' })
await expect(
backend.deleteObjects('test-bucket', ['file1.txt', 'file2.txt'])
).resolves.toBeUndefined()
} finally {
getConfigSpy.mockRestore()
}
})
})
})
Loading