Skip to content

Commit

Permalink
fix: properly handle large request body
Browse files Browse the repository at this point in the history
Previously, only the first chunk (around 64 kb) of the body was
received by the worker process.

Based on the work there: #9

Related: #6
  • Loading branch information
darrachequesne committed Oct 31, 2022
1 parent bb47fc4 commit a124d0b
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 20 deletions.
78 changes: 61 additions & 17 deletions index.js
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
const cluster = require("cluster");
const { randomBytes } = require("crypto");

const randomId = () => randomBytes(8).toString("hex");

const setupMaster = (httpServer, opts) => {
if (!cluster.isMaster) {
Expand Down Expand Up @@ -57,22 +60,50 @@ const setupMaster = (httpServer, opts) => {
};

httpServer.on("connection", (socket) => {
socket.once("data", (buffer) => {
socket.pause();
let workerId, connectionId;

const sendCallback = (err) => {
if (err) {
socket.destroy();
}
};

socket.on("data", (buffer) => {
const data = buffer.toString();
const workerId = computeWorkerId(data);
if (workerId && connectionId) {
cluster.workers[workerId].send(
{ type: "sticky:http-chunk", data, connectionId },
sendCallback
);
return;
}
workerId = computeWorkerId(data);
const mayHaveMultipleChunks = !(
data.startsWith("GET") ||
data
.substring(0, data.indexOf("\r\n\r\n"))
.includes("pgrade: websocket")
);
socket.pause();
if (mayHaveMultipleChunks) {
connectionId = randomId();
}
cluster.workers[workerId].send(
{ type: "sticky:connection", data },
{ type: "sticky:connection", data, connectionId },
socket,
(err) => {
if (err) {
socket.destroy();
}
}
{
keepOpen: mayHaveMultipleChunks,
},
sendCallback
);
});
});

// this is needed to properly detect the end of the HTTP request body
httpServer.on("request", (req) => {
req.on("data", () => {});
});

cluster.on("message", (worker, { type, data }) => {
switch (type) {
case "sticky:connection":
Expand All @@ -96,7 +127,10 @@ const setupWorker = (io) => {
throw new Error("not worker");
}

process.on("message", ({ type, data }, socket) => {
// store connections that may receive multiple chunks
const sockets = new Map();

process.on("message", ({ type, data, connectionId }, socket) => {
switch (type) {
case "sticky:connection":
if (!socket) {
Expand All @@ -105,15 +139,25 @@ const setupWorker = (io) => {
return;
}
io.httpServer.emit("connection", socket); // inject connection
// republish first chunk
if (socket._handle.onread.length === 1) {
socket._handle.onread(Buffer.from(data));
} else {
// for Node.js < 12
socket._handle.onread(1, Buffer.from(data));
}
socket.emit("data", Buffer.from(data)); // republish first chunk
socket.resume();

if (connectionId) {
sockets.set(connectionId, socket);

socket.on("close", () => {
sockets.delete(connectionId);
});
}

break;

case "sticky:http-chunk": {
const socket = sockets.get(connectionId);
if (socket) {
socket.emit("data", Buffer.from(data));
}
}
}
});

Expand Down
4 changes: 3 additions & 1 deletion test/fixtures/connection.js
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@ const { setupMaster, setupWorker } = require("../..");

if (cluster.isWorker) {
const httpServer = http.createServer();
const io = new Server(httpServer);
const io = new Server(httpServer, {
maxHttpBufferSize: 1.4 * 1e6, // to account for the base64 encoding of the binary payload
});
setupWorker(io);

io.on("connection", (socket) => {
Expand Down
3 changes: 1 addition & 2 deletions test/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@ describe("@socket.io/sticky", () => {
exec(fixture("connection.js"), { env: { TRANSPORT: "websocket" } }, done);
});

// FIXME it fails when sending a packet whose size is over 65 kb
it.skip("should work with HTTP long-polling only", (done) => {
it("should work with HTTP long-polling only", (done) => {
exec(fixture("connection.js"), { env: { TRANSPORT: "polling" } }, done);
});
});

0 comments on commit a124d0b

Please sign in to comment.