Skip to content

Commit

Permalink
@tus/s3-store: fix part number increment (#689)
Browse files Browse the repository at this point in the history
Co-authored-by: Murderlon <[email protected]>
  • Loading branch information
fenos and Murderlon authored Dec 23, 2024
1 parent d0765da commit 32d847d
Show file tree
Hide file tree
Showing 3 changed files with 60 additions and 9 deletions.
5 changes: 5 additions & 0 deletions .changeset/plenty-actors-compare.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"@tus/s3-store": patch
---

Fix increment for part numbers
2 changes: 1 addition & 1 deletion packages/s3-store/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -367,8 +367,8 @@ export class S3Store extends DataStore {
.on('chunkFinished', ({path, size: partSize}) => {
pendingChunkFilepath = null

const partNumber = currentPartNumber + 1
const acquiredPermit = permit
const partNumber = currentPartNumber++

offset += partSize

Expand Down
62 changes: 54 additions & 8 deletions packages/s3-store/test/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,15 @@ import {Upload} from '@tus/utils'
const fixturesPath = path.resolve('../', '../', 'test', 'fixtures')
const storePath = path.resolve('../', '../', 'test', 'output', 's3-store')

const s3ClientConfig = {
bucket: process.env.AWS_BUCKET as string,
credentials: {
accessKeyId: process.env.AWS_ACCESS_KEY_ID as string,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY as string,
},
region: process.env.AWS_REGION,
}

describe('S3DataStore', () => {
before(function () {
this.testFileSize = 960_244
Expand All @@ -21,14 +30,7 @@ describe('S3DataStore', () => {
beforeEach(function () {
this.datastore = new S3Store({
partSize: 8 * 1024 * 1024, // Each uploaded part will have ~8MiB,
s3ClientConfig: {
bucket: process.env.AWS_BUCKET as string,
credentials: {
accessKeyId: process.env.AWS_ACCESS_KEY_ID as string,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY as string,
},
region: process.env.AWS_REGION,
},
s3ClientConfig,
})
})

Expand Down Expand Up @@ -196,6 +198,50 @@ describe('S3DataStore', () => {
assert.equal(offset, incompleteSize)
})

it('should use strictly sequential part numbers when uploading multiple chunks', async () => {
const store = new S3Store({
partSize: 5 * 1024 * 1024,
maxConcurrentPartUploads: 1,
s3ClientConfig,
})

// @ts-expect-error private method
const uploadPartSpy = sinon.spy(store, 'uploadPart')

const size = 15 * 1024 * 1024
const upload = new Upload({
id: shared.testId('increment-bug'),
size: size,
offset: 0,
})

await store.create(upload)

// Write all 15 MB in a single call (S3Store will internally chunk to ~3 parts):
const offset = await store.write(Readable.from(Buffer.alloc(size)), upload.id, 0)

assert.equal(offset, size)

const finalUpload = await store.getUpload(upload.id)
assert.equal(finalUpload.offset, size, 'getUpload offset should match total size')

const partNumbers = uploadPartSpy.getCalls().map((call) => call.args[2])

for (let i = 0; i < partNumbers.length; i++) {
if (i === 0) {
assert.equal(partNumbers[i], 1, 'First part number must be 1')
} else {
const prev = partNumbers[i - 1]
const curr = partNumbers[i]
assert.equal(
curr,
prev + 1,
`Part numbers should increment by 1. Found jump from ${prev} to ${curr}`
)
}
}
})

shared.shouldHaveStoreMethods()
shared.shouldCreateUploads()
shared.shouldRemoveUploads() // Termination extension
Expand Down

0 comments on commit 32d847d

Please sign in to comment.