diff --git a/src/gridfs/download.ts b/src/gridfs/download.ts index 3691065da7..32946f6737 100644 --- a/src/gridfs/download.ts +++ b/src/gridfs/download.ts @@ -19,9 +19,14 @@ import type { GridFSChunk } from './upload'; export interface GridFSBucketReadStreamOptions { sort?: Sort; skip?: number; - /** 0-based offset in bytes to start streaming from */ + /** + * 0-indexed non-negative byte offset from the beginning of the file + */ start?: number; - /** 0-based offset in bytes to stop streaming before */ + /** + * 0-indexed non-negative byte offset to the end of the file contents + * to be returned by the stream. `end` is non-inclusive + */ end?: number; } diff --git a/test/integration/gridfs/gridfs_stream.test.js b/test/integration/gridfs/gridfs_stream.test.js index e9ad76c886..4ec41f0d38 100644 --- a/test/integration/gridfs/gridfs_stream.test.js +++ b/test/integration/gridfs/gridfs_stream.test.js @@ -1002,57 +1002,42 @@ describe('GridFS Stream', function () { }); }); - /** - * Provide start and end parameters for file download to skip ahead x bytes and limit the total amount of bytes read to n - * - * @example-class GridFSBucket - * @example-method openDownloadStream - */ - it('NODE-829 start/end options for openDownloadStream where start-end is < size of chunk', { + it('should return only end - start bytes when the end is within a chunk', { metadata: { requires: { topology: ['single'] } }, - test(done) { - const configuration = this.configuration; - const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 }); - client.connect(function (err, client) { - const db = client.db(configuration.db); - const bucket = new GridFSBucket(db, { - bucketName: 'gridfsdownload', - chunkSizeBytes: 20 - }); + // Provide start and end parameters for file download to skip + // ahead x bytes and limit the total amount of bytes read to n + const db = client.db(); - const readStream = fs.createReadStream('./LICENSE.md'); - const uploadStream = bucket.openUploadStream('teststart.dat'); + const start = 1; + const end = 6; - uploadStream.once('finish', function () { - const downloadStream = bucket - .openDownloadStreamByName('teststart.dat', { start: 1 }) - .end(6); + const bucket = new GridFSBucket(db, { + bucketName: 'gridfsdownload', + chunkSizeBytes: 20 + }); - downloadStream.on('error', function (error) { - expect(error).to.not.exist; - }); + const readStream = fs.createReadStream('./LICENSE.md'); + const uploadStream = bucket.openUploadStream('teststart.dat'); - let gotData = 0; - let str = ''; - downloadStream.on('data', function (data) { - ++gotData; - str += data.toString('utf8'); - }); + uploadStream.once('finish', function () { + const downloadStream = bucket.openDownloadStreamByName('teststart.dat', { start }).end(end); - downloadStream.on('end', function () { - // Depending on different versions of node, we may get - // different amounts of 'data' events. node 0.10 gives 2, - // node >= 0.12 gives 3. Either is correct, but we just - // care that we got between 1 and 3, and got the right result - expect(gotData >= 1 && gotData <= 3).to.equal(true); - expect(str).to.equal('pache'); - client.close(done); - }); + downloadStream.on('error', done); + + let str = ''; + downloadStream.on('data', function (data) { + str += data.toString('utf8'); }); - readStream.pipe(uploadStream); + downloadStream.on('end', function () { + expect(str).to.equal('pache'); + expect(str).to.have.lengthOf(end - start); + client.close(done); + }); }); + + readStream.pipe(uploadStream); } });