Skip to content

Commit

Permalink
Improve request graph cache reading (#9721)
Browse files Browse the repository at this point in the history
* Make nodes per blob a method to allow overriding

* Add a test for the stale cache issue

* Update tests to cover more cases

* Write the number of nodes in each blob to the cache

* Undo erroneous variable move

* Be extra mathsy
  • Loading branch information
benjervis committed May 17, 2024
1 parent 4556b5c commit 25d010a
Show file tree
Hide file tree
Showing 2 changed files with 84 additions and 18 deletions.
56 changes: 38 additions & 18 deletions packages/core/core/src/RequestTracker.js
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,10 @@ const keyFromEnvContentKey = (contentKey: ContentKey): string =>
const keyFromOptionContentKey = (contentKey: ContentKey): string =>
contentKey.slice('option:'.length);

// This constant is chosen by local profiling the time to serialise n nodes and tuning until an average time of ~50 ms per blob.
// The goal is to free up the event loop periodically to allow interruption by the user.
const NODES_PER_BLOB = 2 ** 14;

export class RequestGraph extends ContentGraph<
RequestGraphNode,
RequestGraphEdgeType,
Expand All @@ -283,6 +287,7 @@ export class RequestGraph extends ContentGraph<
invalidateOnBuildNodeIds: Set<NodeId> = new Set();
cachedRequestChunks: Set<number> = new Set();
configKeyNodes: Map<ProjectPath, Set<NodeId>> = new Map();
nodesPerBlob: number = NODES_PER_BLOB;

// $FlowFixMe[prop-missing]
static deserialize(opts: RequestGraphOpts): RequestGraph {
Expand Down Expand Up @@ -1032,14 +1037,10 @@ export class RequestGraph extends ContentGraph<
}

removeCachedRequestChunkForNode(nodeId: number): void {
this.cachedRequestChunks.delete(Math.floor(nodeId / NODES_PER_BLOB));
this.cachedRequestChunks.delete(Math.floor(nodeId / this.nodesPerBlob));
}
}

// This constant is chosen by local profiling the time to serialise n nodes and tuning until an average time of ~50 ms per blob.
// The goal is to free up the event loop periodically to allow interruption by the user.
const NODES_PER_BLOB = 2 ** 14;

export default class RequestTracker {
graph: RequestGraph;
farm: WorkerFarm;
Expand Down Expand Up @@ -1427,17 +1428,30 @@ export default class RequestTracker {
}
}

for (let i = 0; i * NODES_PER_BLOB < cacheableNodes.length; i += 1) {
let nodeCountsPerBlob = [];

for (
let i = 0;
i * this.graph.nodesPerBlob < cacheableNodes.length;
i += 1
) {
let nodesStartIndex = i * this.graph.nodesPerBlob;
let nodesEndIndex = Math.min(
(i + 1) * this.graph.nodesPerBlob,
cacheableNodes.length,
);

nodeCountsPerBlob.push(nodesEndIndex - nodesStartIndex);

if (!this.graph.hasCachedRequestChunk(i)) {
// We assume the request graph nodes are immutable and won't change
let nodesToCache = cacheableNodes.slice(nodesStartIndex, nodesEndIndex);

queue
.add(() =>
serialiseAndSet(
getRequestGraphNodeKey(i, cacheKey),
cacheableNodes.slice(
i * NODES_PER_BLOB,
(i + 1) * NODES_PER_BLOB,
),
nodesToCache,
).then(() => {
// Succeeded in writing to disk, save that we have completed this chunk
this.graph.setCachedRequestChunk(i);
Expand All @@ -1455,6 +1469,7 @@ export default class RequestTracker {
// Set the request graph after the queue is flushed to avoid writing an invalid state
await serialiseAndSet(requestGraphKey, {
...serialisedGraph,
nodeCountsPerBlob,
nodes: undefined,
});

Expand Down Expand Up @@ -1523,19 +1538,24 @@ export async function readAndDeserializeRequestGraph(
return deserialize(buffer);
};

let i = 0;
let nodePromises = [];
while (await cache.hasLargeBlob(getRequestGraphNodeKey(i, cacheKey))) {
nodePromises.push(getAndDeserialize(getRequestGraphNodeKey(i, cacheKey)));
i += 1;
}

let serializedRequestGraph = await getAndDeserialize(requestGraphKey);

let nodePromises = serializedRequestGraph.nodeCountsPerBlob.map(
async (nodesCount, i) => {
let nodes = await getAndDeserialize(getRequestGraphNodeKey(i, cacheKey));
invariant.equal(
nodes.length,
nodesCount,
'RequestTracker node chunk: invalid node count',
);
return nodes;
},
);

return {
requestGraph: RequestGraph.deserialize({
...serializedRequestGraph,
nodes: (await Promise.all(nodePromises)).flatMap(nodeChunk => nodeChunk),
nodes: (await Promise.all(nodePromises)).flat(),
}),
// This is used inside parcel query for `.inspectCache`
bufferLength,
Expand Down
46 changes: 46 additions & 0 deletions packages/core/core/test/RequestTracker.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,52 @@ describe('RequestTracker', () => {
assert.strictEqual(called, false);
});

it('should ignore stale node chunks from cache', async () => {
let tracker = new RequestTracker({farm, options});

// Set the nodes per blob low so we can ensure multiple files without
// creating 17,000 nodes
tracker.graph.nodesPerBlob = 2;

tracker.graph.addNode({type: 0, id: 'some-file-node-1'});
tracker.graph.addNode({type: 0, id: 'some-file-node-2'});
tracker.graph.addNode({type: 0, id: 'some-file-node-3'});
tracker.graph.addNode({type: 0, id: 'some-file-node-4'});
tracker.graph.addNode({type: 0, id: 'some-file-node-5'});

await tracker.writeToCache();

// Create a new request tracker that shouldn't look at the old cache files
tracker = new RequestTracker({farm, options});
assert.equal(tracker.graph.nodes.length, 0);

tracker.graph.addNode({type: 0, id: 'some-file-node-1'});
await tracker.writeToCache();

// Init a request tracker that should only read the relevant cache files
tracker = await RequestTracker.init({farm, options});
assert.equal(tracker.graph.nodes.length, 1);
});

it('should init with multiple node chunks', async () => {
let tracker = new RequestTracker({farm, options});

// Set the nodes per blob low so we can ensure multiple files without
// creating 17,000 nodes
tracker.graph.nodesPerBlob = 2;

tracker.graph.addNode({type: 0, id: 'some-file-node-1'});
tracker.graph.addNode({type: 0, id: 'some-file-node-2'});
tracker.graph.addNode({type: 0, id: 'some-file-node-3'});
tracker.graph.addNode({type: 0, id: 'some-file-node-4'});
tracker.graph.addNode({type: 0, id: 'some-file-node-5'});

await tracker.writeToCache();

tracker = await RequestTracker.init({farm, options});
assert.equal(tracker.graph.nodes.length, 5);
});

it('should write new nodes to cache', async () => {
let tracker = new RequestTracker({farm, options});

Expand Down

0 comments on commit 25d010a

Please sign in to comment.