Skip to content

Commit

Permalink
fs: partition readFile against pool exhaustion
Browse files Browse the repository at this point in the history
Problem:

Node implements fs.readFile as:
- a call to stat, then
- a C++ -> libuv request to read the entire file using the stat size

Why is this bad?
The effect is to place on the libuv threadpool a potentially-large
read request, occupying the libuv thread until it completes.
While readFile certainly requires buffering the entire file contents,
it can partition the read into smaller buffers
(as is done on other read paths)
along the way to avoid threadpool exhaustion.

If the file is relatively large or stored on a slow medium, reading
the entire file in one shot seems particularly harmful,
and presents a possible DoS vector.

Solution:

Partition the read into multiple smaller requests.

Considerations:

1. Correctness

I don't think partitioning the read like this raises
any additional risk of read-write races on the FS.
If the application is concurrently readFile'ing and modifying the file,
it will already see funny behavior. Though libuv uses preadv where
available, this doesn't guarantee read atomicity in the presence of
concurrent writes.

2. Performance

Downside: Partitioning means that a single large readFile will
  require into many "out and back" requests to libuv,
  introducing overhead.
Upside: In between each "out and back", other work pending on the
  threadpool can take a turn.

In short, although partitioning will slow down a large request,
it will lead to better throughput if the threadpool is handling
more than one type of request.

Fixes: #17047

PR-URL: #17054
Reviewed-By: Benjamin Gruenbaum <benjamingr@gmail.com>
Reviewed-By: Tiancheng "Timothy" Gu <timothygu99@gmail.com>
Reviewed-By: Gireesh Punathil <gpunathi@in.ibm.com>
Reviewed-By: James M Snell <jasnell@gmail.com>
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Reviewed-By: Sakthipriyan Vairamani <thechargingvolcano@gmail.com>
Reviewed-By: Ruben Bridgewater <ruben@bridgewater.de>
  • Loading branch information
davisjam authored and BridgeAR committed Feb 1, 2018
1 parent 36fd25f commit 67a4ce1
Show file tree
Hide file tree
Showing 5 changed files with 159 additions and 9 deletions.
86 changes: 86 additions & 0 deletions benchmark/fs/readfile-partitioned.js
@@ -0,0 +1,86 @@
// Submit a mix of short and long jobs to the threadpool.
// Report total job throughput.
// If we partition the long job, overall job throughput goes up significantly.
// However, this comes at the cost of the long job throughput.
//
// Short jobs: small zip jobs.
// Long jobs: fs.readFile on a large file.

'use strict';

const path = require('path');
const common = require('../common.js');
const filename = path.resolve(__dirname,
`.removeme-benchmark-garbage-${process.pid}`);
const fs = require('fs');
const zlib = require('zlib');
const assert = require('assert');

const bench = common.createBenchmark(main, {
dur: [5],
len: [1024, 16 * 1024 * 1024],
concurrent: [1, 10]
});

function main(conf) {
const len = +conf.len;
try { fs.unlinkSync(filename); } catch (e) {}
var data = Buffer.alloc(len, 'x');
fs.writeFileSync(filename, data);
data = null;

var zipData = Buffer.alloc(1024, 'a');

var reads = 0;
var zips = 0;
var benchEnded = false;
bench.start();
setTimeout(function() {
const totalOps = reads + zips;
benchEnded = true;
bench.end(totalOps);
try { fs.unlinkSync(filename); } catch (e) {}
}, +conf.dur * 1000);

function read() {
fs.readFile(filename, afterRead);
}

function afterRead(er, data) {
if (er) {
if (er.code === 'ENOENT') {
// Only OK if unlinked by the timer from main.
assert.ok(benchEnded);
return;
}
throw er;
}

if (data.length !== len)
throw new Error('wrong number of bytes returned');

reads++;
if (!benchEnded)
read();
}

function zip() {
zlib.deflate(zipData, afterZip);
}

function afterZip(er, data) {
if (er)
throw er;

zips++;
if (!benchEnded)
zip();
}

// Start reads
var cur = +conf.concurrent;
while (cur--) read();

// Start a competing zip
zip();
}
15 changes: 11 additions & 4 deletions benchmark/fs/readfile.js
Expand Up @@ -8,6 +8,7 @@ const common = require('../common.js');
const filename = path.resolve(process.env.NODE_TMPDIR || __dirname, const filename = path.resolve(process.env.NODE_TMPDIR || __dirname,
`.removeme-benchmark-garbage-${process.pid}`); `.removeme-benchmark-garbage-${process.pid}`);
const fs = require('fs'); const fs = require('fs');
const assert = require('assert');


const bench = common.createBenchmark(main, { const bench = common.createBenchmark(main, {
dur: [5], dur: [5],
Expand All @@ -22,10 +23,10 @@ function main({ len, dur, concurrent }) {
data = null; data = null;


var reads = 0; var reads = 0;
var bench_ended = false; var benchEnded = false;
bench.start(); bench.start();
setTimeout(function() { setTimeout(function() {
bench_ended = true; benchEnded = true;
bench.end(reads); bench.end(reads);
try { fs.unlinkSync(filename); } catch (e) {} try { fs.unlinkSync(filename); } catch (e) {}
process.exit(0); process.exit(0);
Expand All @@ -36,14 +37,20 @@ function main({ len, dur, concurrent }) {
} }


function afterRead(er, data) { function afterRead(er, data) {
if (er) if (er) {
if (er.code === 'ENOENT') {
// Only OK if unlinked by the timer from main.
assert.ok(benchEnded);
return;
}
throw er; throw er;
}


if (data.length !== len) if (data.length !== len)
throw new Error('wrong number of bytes returned'); throw new Error('wrong number of bytes returned');


reads++; reads++;
if (!bench_ended) if (!benchEnded)
read(); read();
} }


Expand Down
7 changes: 3 additions & 4 deletions doc/api/fs.md
Expand Up @@ -2250,10 +2250,9 @@ Any specified file descriptor has to support reading.
*Note*: If a file descriptor is specified as the `path`, it will not be closed *Note*: If a file descriptor is specified as the `path`, it will not be closed
automatically. automatically.


*Note*: `fs.readFile()` reads the entire file in a single threadpool request. *Note*: `fs.readFile()` buffers the entire file.
To minimize threadpool task length variation, prefer the partitioned APIs To minimize memory costs, when possible prefer streaming via
`fs.read()` and `fs.createReadStream()` when reading files as part of `fs.createReadStream()`.
fulfilling a client request.


## fs.readFileSync(path[, options]) ## fs.readFileSync(path[, options])
<!-- YAML <!-- YAML
Expand Down
2 changes: 1 addition & 1 deletion lib/fs.js
Expand Up @@ -508,7 +508,7 @@ ReadFileContext.prototype.read = function() {
} else { } else {
buffer = this.buffer; buffer = this.buffer;
offset = this.pos; offset = this.pos;
length = this.size - this.pos; length = Math.min(kReadFileBufferLength, this.size - this.pos);
} }


var req = new FSReqWrap(); var req = new FSReqWrap();
Expand Down
58 changes: 58 additions & 0 deletions test/parallel/test-fs-readfile.js
@@ -0,0 +1,58 @@
'use strict';
const common = require('../common');

// This test ensures that fs.readFile correctly returns the
// contents of varying-sized files.

const assert = require('assert');
const fs = require('fs');
const path = require('path');

const prefix = `.removeme-fs-readfile-${process.pid}`;

common.refreshTmpDir();

const fileInfo = [
{ name: path.join(common.tmpDir, `${prefix}-1K.txt`),
len: 1024,
},
{ name: path.join(common.tmpDir, `${prefix}-64K.txt`),
len: 64 * 1024,
},
{ name: path.join(common.tmpDir, `${prefix}-64KLessOne.txt`),
len: (64 * 1024) - 1,
},
{ name: path.join(common.tmpDir, `${prefix}-1M.txt`),
len: 1 * 1024 * 1024,
},
{ name: path.join(common.tmpDir, `${prefix}-1MPlusOne.txt`),
len: (1 * 1024 * 1024) + 1,
},
];

// Populate each fileInfo (and file) with unique fill.
const sectorSize = 512;
for (const e of fileInfo) {
e.contents = Buffer.allocUnsafe(e.len);

// This accounts for anything unusual in Node's implementation of readFile.
// Using e.g. 'aa...aa' would miss bugs like Node re-reading
// the same section twice instead of two separate sections.
for (let offset = 0; offset < e.len; offset += sectorSize) {
const fillByte = 256 * Math.random();
const nBytesToFill = Math.min(sectorSize, e.len - offset);
e.contents.fill(fillByte, offset, offset + nBytesToFill);
}

fs.writeFileSync(e.name, e.contents);
}
// All files are now populated.

// Test readFile on each size.
for (const e of fileInfo) {
fs.readFile(e.name, common.mustCall((err, buf) => {
console.log(`Validating readFile on file ${e.name} of length ${e.len}`);
assert.ifError(err, 'An error occurred');
assert.deepStrictEqual(buf, e.contents, 'Incorrect file contents');
}));
}

0 comments on commit 67a4ce1

Please sign in to comment.