Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

stream: pipeline should only destroy un-finished streams. #32968

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 18 additions & 38 deletions lib/internal/streams/pipeline.js
Original file line number Diff line number Diff line change
Expand Up @@ -25,43 +25,18 @@ let EE;
let PassThrough;
let createReadableStreamAsyncIterator;

function isIncoming(stream) {
return (
stream.socket &&
typeof stream.complete === 'boolean' &&
ArrayIsArray(stream.rawTrailers) &&
ArrayIsArray(stream.rawHeaders)
);
}

function isOutgoing(stream) {
return (
stream.socket &&
typeof stream.setHeader === 'function'
);
}

function destroyer(stream, reading, writing, final, callback) {
const _destroy = once((err) => {
if (!err && (isIncoming(stream) || isOutgoing(stream))) {
// http/1 request objects have a coupling to their response and should
// not be prematurely destroyed. Assume they will handle their own
// lifecycle.
return callback();
}
function destroyer(stream, reading, writing, callback) {
callback = once(callback);

if (!err && reading && !writing && stream.writable) {
return callback();
}

if (err || !final || !stream.readable) {
destroyImpl.destroyer(stream, err);
}
callback(err);
let finished = false;
stream.on('close', () => {
finished = true;
});

if (eos === undefined) eos = require('internal/streams/end-of-stream');
eos(stream, { readable: reading, writable: writing }, (err) => {
finished = !err;

const rState = stream._readableState;
if (
err &&
Expand All @@ -78,14 +53,19 @@ function destroyer(stream, reading, writing, final, callback) {
// eos will only fail with premature close on the reading side for
// duplex streams.
stream
.once('end', _destroy)
.once('error', _destroy);
.once('end', callback)
.once('error', callback);
} else {
_destroy(err);
callback(err);
}
});

return (err) => _destroy(err || new ERR_STREAM_DESTROYED('pipe'));
return (err) => {
if (finished) return;
finished = true;
destroyImpl.destroyer(stream, err);
callback(err || new ERR_STREAM_DESTROYED('pipe'));
};
}

function popCallback(streams) {
Expand Down Expand Up @@ -204,7 +184,7 @@ function pipeline(...streams) {

if (isStream(stream)) {
finishCount++;
destroys.push(destroyer(stream, reading, writing, !reading, finish));
destroys.push(destroyer(stream, reading, writing, finish));
}

if (i === 0) {
Expand Down Expand Up @@ -262,7 +242,7 @@ function pipeline(...streams) {
ret = pt;

finishCount++;
destroys.push(destroyer(ret, false, true, true, finish));
destroys.push(destroyer(ret, false, true, finish));
}
} else if (isStream(stream)) {
if (isReadable(ret)) {
Expand Down
95 changes: 94 additions & 1 deletion test/parallel/test-stream-pipeline.js
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ const {
const assert = require('assert');
const http = require('http');
const { promisify } = require('util');
const net = require('net');

{
let finished = false;
Expand Down Expand Up @@ -916,7 +917,7 @@ const { promisify } = require('util');
const src = new PassThrough({ autoDestroy: false });
const dst = new PassThrough({ autoDestroy: false });
pipeline(src, dst, common.mustCall(() => {
assert.strictEqual(src.destroyed, true);
assert.strictEqual(src.destroyed, false);
assert.strictEqual(dst.destroyed, false);
}));
src.end();
Expand Down Expand Up @@ -1118,3 +1119,95 @@ const { promisify } = require('util');
assert.strictEqual(closed, true);
}));
}

{
const server = net.createServer(common.mustCall((socket) => {
// echo server
pipeline(socket, socket, common.mustCall());
// 13 force destroys the socket before it has a chance to emit finish
socket.on('finish', common.mustCall(() => {
server.close();
}));
})).listen(0, common.mustCall(() => {
const socket = net.connect(server.address().port);
socket.end();
}));
}

{
const d = new Duplex({
autoDestroy: false,
write: common.mustCall((data, enc, cb) => {
d.push(data);
cb();
}),
read: common.mustCall(() => {
d.push(null);
}),
final: common.mustCall((cb) => {
setTimeout(() => {
assert.strictEqual(d.destroyed, false);
cb();
}, 1000);
}),
destroy: common.mustNotCall()
});

const sink = new Writable({
write: common.mustCall((data, enc, cb) => {
cb();
})
});

pipeline(d, sink, common.mustCall());

d.write('test');
d.end();
}

{
const server = net.createServer(common.mustCall((socket) => {
// echo server
pipeline(socket, socket, common.mustCall());
socket.on('finish', common.mustCall(() => {
server.close();
}));
})).listen(0, common.mustCall(() => {
const socket = net.connect(server.address().port);
socket.end();
}));
}

{
const d = new Duplex({
autoDestroy: false,
write: common.mustCall((data, enc, cb) => {
d.push(data);
cb();
}),
read: common.mustCall(() => {
d.push(null);
}),
final: common.mustCall((cb) => {
setTimeout(() => {
assert.strictEqual(d.destroyed, false);
cb();
}, 1000);
}),
// `destroy()` won't be invoked by pipeline since
// the writable side has not completed when
// the pipeline has completed.
destroy: common.mustNotCall()
});

const sink = new Writable({
write: common.mustCall((data, enc, cb) => {
cb();
})
});

pipeline(d, sink, common.mustCall());

d.write('test');
d.end();
}