Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 7 additions & 4 deletions src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ type StorageConfigType = {
storageFileEtagAlgorithm: 'mtime' | 'md5'
storageS3InternalTracesEnabled?: boolean
storageS3MaxSockets: number
storageS3BatchDeleteEnabled: boolean
storageS3DisableChecksum: boolean
storageS3UploadQueueSize: number
storageS3Bucket: string
Expand Down Expand Up @@ -265,8 +266,8 @@ export function getConfig(options?: { reload?: boolean }): StorageConfigType {
tenantId: isMultitenant
? ''
: getOptionalConfigFromEnv('PROJECT_REF') ||
getOptionalConfigFromEnv('TENANT_ID') ||
'storage-single-tenant',
getOptionalConfigFromEnv('TENANT_ID') ||
'storage-single-tenant',

// Server
region: getOptionalConfigFromEnv('SERVER_REGION', 'REGION') || 'not-specified',
Expand Down Expand Up @@ -364,6 +365,8 @@ export function getConfig(options?: { reload?: boolean }): StorageConfigType {
getOptionalConfigFromEnv('STORAGE_S3_MAX_SOCKETS', 'GLOBAL_S3_MAX_SOCKETS') || '200',
10
),
storageS3BatchDeleteEnabled:
getOptionalConfigFromEnv('STORAGE_S3_BATCH_DELETE_ENABLED') !== 'false',
Comment on lines +368 to +369
Copy link

Copilot AI Mar 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This introduces a new env var/config flag (STORAGE_S3_BATCH_DELETE_ENABLED). The PR description says "No new env vars, config flags" and "No configuration required"; please either update the PR/docs to reflect the new flag (and its default), or remove the flag and rely solely on runtime NotImplemented detection.

Copilot uses AI. Check for mistakes.
storageS3DisableChecksum: getOptionalConfigFromEnv('STORAGE_S3_DISABLE_CHECKSUM') === 'true',
storageS3UploadQueueSize:
envNumber(getOptionalConfigFromEnv('STORAGE_S3_UPLOAD_QUEUE_SIZE')) ?? 2,
Expand Down Expand Up @@ -539,12 +542,12 @@ export function getConfig(options?: { reload?: boolean }): StorageConfigType {
size: {
min: parseInt(
getOptionalConfigFromEnv('IMAGE_TRANSFORMATION_LIMIT_MIN_SIZE', 'IMG_LIMITS_MIN_SIZE') ||
'1',
'1',
10
),
max: parseInt(
getOptionalConfigFromEnv('IMAGE_TRANSFORMATION_LIMIT_MAX_SIZE', 'IMG_LIMITS_MAX_SIZE') ||
'2000',
'2000',
10
),
},
Expand Down
34 changes: 18 additions & 16 deletions src/http/routes/tus/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ const {
storageS3ForcePathStyle,
storageS3Region,
storageS3ClientTimeout,
storageS3BatchDeleteEnabled,
tusUrlExpiryMs,
tusPath,
tusPartSize,
Expand Down Expand Up @@ -117,6 +118,7 @@ function createTusServer(
maxRetries: 10,
retryDelayMs: 250,
renewalIntervalMs: 10 * 1000, // 10 seconds
batchDeleteEnabled: storageS3BatchDeleteEnabled,
s3Client: new S3Client({
requestHandler: new NodeHttpHandler({
...agent,
Expand Down Expand Up @@ -256,14 +258,14 @@ const authenticatedRoutes = fastifyPlugin(
})

fastify.addHook('preHandler', async (req) => {
;(req.raw as MultiPartRequest).log = req.log
;(req.raw as MultiPartRequest).upload = {
storage: req.storage,
owner: req.owner,
tenantId: req.tenantId,
db: req.db,
isUpsert: req.headers['x-upsert'] === 'true',
}
; (req.raw as MultiPartRequest).log = req.log
; (req.raw as MultiPartRequest).upload = {
storage: req.storage,
owner: req.owner,
tenantId: req.tenantId,
db: req.db,
isUpsert: req.headers['x-upsert'] === 'true',
}
Copy link

Copilot AI Mar 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These two assignments are formatted in a way that will likely fail the repo’s prettier --check (extra indentation before the second statement and a space after the leading semicolon). Please run Prettier / adjust to the standard ;(expr) style used elsewhere so CI lint passes.

Copilot uses AI. Check for mistakes.
})

fastify.post(
Expand Down Expand Up @@ -358,14 +360,14 @@ const publicRoutes = fastifyPlugin(
)

fastify.addHook('preHandler', async (req) => {
;(req.raw as MultiPartRequest).log = req.log
;(req.raw as MultiPartRequest).upload = {
storage: req.storage,
owner: req.owner,
tenantId: req.tenantId,
db: req.db,
isUpsert: req.headers['x-upsert'] === 'true',
}
; (req.raw as MultiPartRequest).log = req.log
; (req.raw as MultiPartRequest).upload = {
storage: req.storage,
owner: req.owner,
tenantId: req.tenantId,
db: req.db,
isUpsert: req.headers['x-upsert'] === 'true',
}
Copy link

Copilot AI Mar 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same formatting issue here as above: extra indentation and ; ( instead of the repo’s usual ;(…) style. This is likely to fail prettier --check in CI; please reformat with Prettier.

Suggested change
; (req.raw as MultiPartRequest).log = req.log
; (req.raw as MultiPartRequest).upload = {
storage: req.storage,
owner: req.owner,
tenantId: req.tenantId,
db: req.db,
isUpsert: req.headers['x-upsert'] === 'true',
}
;(req.raw as MultiPartRequest).log = req.log
;(req.raw as MultiPartRequest).upload = {
storage: req.storage,
owner: req.owner,
tenantId: req.tenantId,
db: req.db,
isUpsert: req.headers['x-upsert'] === 'true',
}

Copilot uses AI. Check for mistakes.
})

fastify.options(
Expand Down
62 changes: 49 additions & 13 deletions src/storage/backend/s3/adapter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -185,14 +185,14 @@ export class S3Backend implements StorageBackendAdapter {
const metadata = hasUploadedBytes
? await this.headObject(bucketName, key, version)
: {
httpStatusCode: 200,
eTag: data.ETag || '',
mimetype: contentType,
lastModified: new Date(),
size: 0,
contentLength: 0,
contentRange: undefined,
}
httpStatusCode: 200,
eTag: data.ETag || '',
mimetype: contentType,
lastModified: new Date(),
size: 0,
contentLength: 0,
contentRange: undefined,
}

return {
httpStatusCode: data.$metadata.httpStatusCode || metadata.httpStatusCode,
Expand Down Expand Up @@ -330,6 +330,13 @@ export class S3Backend implements StorageBackendAdapter {
* @param prefixes
*/
async deleteObjects(bucket: string, prefixes: string[]): Promise<void> {
const { storageS3BatchDeleteEnabled } = getConfig()

if (!storageS3BatchDeleteEnabled) {
// Batch delete explicitly disabled (e.g. GCS S3-interop mode)
return this.deleteObjectsIndividually(bucket, prefixes)
}

try {
const s3Prefixes = prefixes.map((ele) => {
return { Key: ele }
Expand All @@ -343,10 +350,39 @@ export class S3Backend implements StorageBackendAdapter {
})
await this.client.send(command)
} catch (e) {
// Some S3-compatible backends (e.g. GCS) do not support DeleteObjects; fall back to individual deletes
const code = (e as { Code?: string; name?: string })?.Code ?? (e as { name?: string })?.name
if (code === 'NotImplemented') {
return this.deleteObjectsIndividually(bucket, prefixes)
}
throw StorageBackendError.fromError(e)
}
}

/**
* Deletes objects one-by-one in parallel.
* Used when batch delete is disabled or when the S3 backend returns NotImplemented.
* NoSuchKey errors are silently ignored (object already gone).
* @param bucket
* @param prefixes
*/
private async deleteObjectsIndividually(bucket: string, prefixes: string[]): Promise<void> {
const results = await Promise.allSettled(
prefixes.map((key) =>
this.client.send(new DeleteObjectCommand({ Bucket: bucket, Key: key }))
)
)
for (const result of results) {
if (result.status === 'rejected') {
const errCode =
(result.reason as { Code?: string })?.Code ?? (result.reason as { name?: string })?.name
if (errCode !== 'NoSuchKey') {
throw StorageBackendError.fromError(result.reason)
}
}
}
}

/**
* Returns metadata information of a specific object
* @param bucket
Expand Down Expand Up @@ -438,9 +474,9 @@ export class S3Backend implements StorageBackendAdapter {
ContentType: contentType,
Metadata: metadata
? {
...metadata,
Version: version || '',
}
...metadata,
Version: version || '',
}
: undefined,
})

Expand Down Expand Up @@ -519,8 +555,8 @@ export class S3Backend implements StorageBackendAdapter {
parts.length === 0
? undefined
: {
Parts: parts,
},
Parts: parts,
},
})

const response = await this.client.send(completeUpload)
Expand Down
49 changes: 46 additions & 3 deletions src/storage/protocols/tus/s3-locker.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ export interface S3LockerOptions {
renewalIntervalMs?: number
maxRetries?: number
retryDelayMs?: number
/** When false, skips DeleteObjectsCommand in zombie-lock cleanup and uses individual deletes. Default: true */
batchDeleteEnabled?: boolean
logger?: Pick<Console, 'log' | 'warn' | 'error'>
}

Expand All @@ -37,6 +39,7 @@ export class S3Locker implements Locker {
private readonly renewalIntervalMs: number
private readonly maxRetries: number
private readonly retryDelayMs: number
private readonly batchDeleteEnabled: boolean
private readonly logger: Pick<Console, 'log' | 'warn' | 'error'>
private readonly notifier: LockNotifier

Expand All @@ -49,6 +52,7 @@ export class S3Locker implements Locker {
this.renewalIntervalMs = options.renewalIntervalMs || 10000 // 10 seconds
this.maxRetries = options.maxRetries || 10
this.retryDelayMs = options.retryDelayMs || 500
this.batchDeleteEnabled = options.batchDeleteEnabled !== false // default true
this.logger = options.logger || console

// Validate configuration
Expand Down Expand Up @@ -244,6 +248,13 @@ export class S3Locker implements Locker {
for (let i = 0; i < expiredLocks.length; i += 1000) {
const batch = expiredLocks.slice(i, i + 1000)

if (!this.batchDeleteEnabled) {
// Batch delete explicitly disabled — use individual deletes directly
await this.deleteLocksIndividually(batch)
this.logger.log(`Cleaned up ${batch.length} expired locks (individual, batch disabled)`)
continue
}

try {
await this.s3Client.send(
new DeleteObjectsCommand({
Expand All @@ -255,8 +266,18 @@ export class S3Locker implements Locker {
})
)
this.logger.log(`Cleaned up ${batch.length} expired locks in batch`)
} catch (error) {
this.logger.warn(`Failed to delete batch of expired locks:`, error)
} catch (error: any) {
// Some S3-compatible backends (e.g. GCS) do not support DeleteObjects;
// fall back to individual deletes so zombie-lock cleanup still works.
const code = error?.Code ?? error?.name
if (code === 'NotImplemented') {
await this.deleteLocksIndividually(batch)
this.logger.log(
`Cleaned up ${batch.length} expired locks in batch (individual fallback)`
)
} else {
this.logger.warn(`Failed to delete batch of expired locks:`, error)
}
}
}
}
Expand All @@ -282,6 +303,28 @@ export class S3Locker implements Locker {
}
}

/**
* Deletes a batch of lock keys one-by-one in parallel.
* NoSuchKey is ignored (lock already gone). Other errors are logged as warnings.
*/
private async deleteLocksIndividually(keys: string[]): Promise<void> {
const results = await Promise.allSettled(
keys.map((key) =>
this.s3Client.send(new DeleteObjectCommand({ Bucket: this.bucket, Key: key }))
)
)
for (const result of results) {
if (result.status === 'rejected') {
const errCode =
(result.reason as { Code?: string })?.Code ??
(result.reason as { name?: string })?.name
if (errCode !== 'NoSuchKey') {
this.logger.warn(`Failed to delete expired lock in fallback:`, result.reason)
}
}
}
}

private async checkAndCleanupExpiredLock(lockKey: string, signal: AbortSignal): Promise<boolean> {
if (signal.aborted) {
return false
Expand Down Expand Up @@ -350,7 +393,7 @@ export class S3Lock implements Lock {
private readonly id: string,
private readonly locker: S3Locker,
private readonly notifier: LockNotifier
) {}
) { }
Copy link

Copilot AI Mar 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Constructor formatting here () { }) doesn’t match Prettier output and is likely to fail prettier --check. Please reformat (e.g., ) {}) or run Prettier on the file.

Suggested change
) { }
) {}

Copilot uses AI. Check for mistakes.

async lock(stopSignal: AbortSignal, cancelReq: RequestRelease): Promise<void> {
// Set up abort handler to clean up in case of abort
Expand Down
Loading
Loading