Skip to content

Commit

Permalink
Merge branch 'v2' into molejniczak/resolver-types
Browse files Browse the repository at this point in the history
  • Loading branch information
MonicaOlejniczak committed May 20, 2024
2 parents c107b96 + 25d010a commit ef4508f
Show file tree
Hide file tree
Showing 2 changed files with 242 additions and 54 deletions.
165 changes: 111 additions & 54 deletions packages/core/core/src/RequestTracker.js
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,10 @@ export const requestGraphEdgeTypes = {
dirname: 7,
};

class FSBailoutError extends Error {
name: string = 'FSBailoutError';
}

export type RequestGraphEdgeType = $Values<typeof requestGraphEdgeTypes>;

type RequestGraphOpts = {|
Expand Down Expand Up @@ -263,6 +267,10 @@ const keyFromEnvContentKey = (contentKey: ContentKey): string =>
const keyFromOptionContentKey = (contentKey: ContentKey): string =>
contentKey.slice('option:'.length);

// This constant is chosen by local profiling the time to serialise n nodes and tuning until an average time of ~50 ms per blob.
// The goal is to free up the event loop periodically to allow interruption by the user.
const NODES_PER_BLOB = 2 ** 14;

export class RequestGraph extends ContentGraph<
RequestGraphNode,
RequestGraphEdgeType,
Expand All @@ -279,6 +287,7 @@ export class RequestGraph extends ContentGraph<
invalidateOnBuildNodeIds: Set<NodeId> = new Set();
cachedRequestChunks: Set<number> = new Set();
configKeyNodes: Map<ProjectPath, Set<NodeId>> = new Map();
nodesPerBlob: number = NODES_PER_BLOB;

// $FlowFixMe[prop-missing]
static deserialize(opts: RequestGraphOpts): RequestGraph {
Expand Down Expand Up @@ -328,6 +337,8 @@ export class RequestGraph extends ContentGraph<
this.optionNodeIds.add(nodeId);
}

this.removeCachedRequestChunkForNode(nodeId);

return nodeId;
}

Expand Down Expand Up @@ -855,7 +866,7 @@ export class RequestGraph extends ContentGraph<
predictedTime,
},
});
throw new Error(
throw new FSBailoutError(
'Responding to file system events exceeded threshold, start with empty cache.',
);
}
Expand Down Expand Up @@ -1026,14 +1037,10 @@ export class RequestGraph extends ContentGraph<
}

removeCachedRequestChunkForNode(nodeId: number): void {
this.cachedRequestChunks.delete(Math.floor(nodeId / NODES_PER_BLOB));
this.cachedRequestChunks.delete(Math.floor(nodeId / this.nodesPerBlob));
}
}

// This constant is chosen by local profiling the time to serialise n nodes and tuning until an average time of ~50 ms per blob.
// The goal is to free up the event loop periodically to allow interruption by the user.
const NODES_PER_BLOB = 2 ** 14;

export default class RequestTracker {
graph: RequestGraph;
farm: WorkerFarm;
Expand Down Expand Up @@ -1421,17 +1428,30 @@ export default class RequestTracker {
}
}

for (let i = 0; i * NODES_PER_BLOB < cacheableNodes.length; i += 1) {
let nodeCountsPerBlob = [];

for (
let i = 0;
i * this.graph.nodesPerBlob < cacheableNodes.length;
i += 1
) {
let nodesStartIndex = i * this.graph.nodesPerBlob;
let nodesEndIndex = Math.min(
(i + 1) * this.graph.nodesPerBlob,
cacheableNodes.length,
);

nodeCountsPerBlob.push(nodesEndIndex - nodesStartIndex);

if (!this.graph.hasCachedRequestChunk(i)) {
// We assume the request graph nodes are immutable and won't change
let nodesToCache = cacheableNodes.slice(nodesStartIndex, nodesEndIndex);

queue
.add(() =>
serialiseAndSet(
getRequestGraphNodeKey(i, cacheKey),
cacheableNodes.slice(
i * NODES_PER_BLOB,
(i + 1) * NODES_PER_BLOB,
),
nodesToCache,
).then(() => {
// Succeeded in writing to disk, save that we have completed this chunk
this.graph.setCachedRequestChunk(i);
Expand All @@ -1449,6 +1469,7 @@ export default class RequestTracker {
// Set the request graph after the queue is flushed to avoid writing an invalid state
await serialiseAndSet(requestGraphKey, {
...serialisedGraph,
nodeCountsPerBlob,
nodes: undefined,
});

Expand Down Expand Up @@ -1517,19 +1538,24 @@ export async function readAndDeserializeRequestGraph(
return deserialize(buffer);
};

let i = 0;
let nodePromises = [];
while (await cache.hasLargeBlob(getRequestGraphNodeKey(i, cacheKey))) {
nodePromises.push(getAndDeserialize(getRequestGraphNodeKey(i, cacheKey)));
i += 1;
}

let serializedRequestGraph = await getAndDeserialize(requestGraphKey);

let nodePromises = serializedRequestGraph.nodeCountsPerBlob.map(
async (nodesCount, i) => {
let nodes = await getAndDeserialize(getRequestGraphNodeKey(i, cacheKey));
invariant.equal(
nodes.length,
nodesCount,
'RequestTracker node chunk: invalid node count',
);
return nodes;
},
);

return {
requestGraph: RequestGraph.deserialize({
...serializedRequestGraph,
nodes: (await Promise.all(nodePromises)).flatMap(nodeChunk => nodeChunk),
nodes: (await Promise.all(nodePromises)).flat(),
}),
// This is used inside parcel query for `.inspectCache`
bufferLength,
Expand All @@ -1543,55 +1569,58 @@ async function loadRequestGraph(options): Async<RequestGraph> {

let cacheKey = getCacheKey(options);
let requestGraphKey = `requestGraph-${cacheKey}`;

let timeout;
const snapshotKey = `snapshot-${cacheKey}`;
const snapshotPath = path.join(options.cacheDir, snapshotKey + '.txt');
if (await options.cache.hasLargeBlob(requestGraphKey)) {
let {requestGraph} = await readAndDeserializeRequestGraph(
options.cache,
requestGraphKey,
cacheKey,
);
try {
let {requestGraph} = await readAndDeserializeRequestGraph(
options.cache,
requestGraphKey,
cacheKey,
);

let opts = getWatcherOptions(options);
let snapshotKey = `snapshot-${cacheKey}`;
let snapshotPath = path.join(options.cacheDir, snapshotKey + '.txt');
let opts = getWatcherOptions(options);

let timeout = setTimeout(() => {
logger.warn({
timeout = setTimeout(() => {
logger.warn({
origin: '@parcel/core',
message: `Retrieving file system events since last build...\nThis can take upto a minute after branch changes or npm/yarn installs.`,
});
}, 5000);
let startTime = Date.now();
let events = await options.inputFS.getEventsSince(
options.watchDir,
snapshotPath,
opts,
);
clearTimeout(timeout);

logger.verbose({
origin: '@parcel/core',
message: `Retrieving file system events since last build...\nThis can take upto a minute after branch changes or npm/yarn installs.`,
message: `File system event count: ${events.length}`,
meta: {
trackableEvent: 'watcher_events_count',
watcherEventCount: events.length,
duration: Date.now() - startTime,
},
});
}, 5000);
let startTime = Date.now();
let events = await options.inputFS.getEventsSince(
options.watchDir,
snapshotPath,
opts,
);
clearTimeout(timeout);

logger.verbose({
origin: '@parcel/core',
message: `File system event count: ${events.length}`,
meta: {
trackableEvent: 'watcher_events_count',
watcherEventCount: events.length,
duration: Date.now() - startTime,
},
});

requestGraph.invalidateUnpredictableNodes();
requestGraph.invalidateOnBuildNodes();
requestGraph.invalidateEnvNodes(options.env);
requestGraph.invalidateOptionNodes(options);
requestGraph.invalidateUnpredictableNodes();
requestGraph.invalidateOnBuildNodes();
requestGraph.invalidateEnvNodes(options.env);
requestGraph.invalidateOptionNodes(options);

try {
await requestGraph.respondToFSEvents(
options.unstableFileInvalidations || events,
options,
10000,
);
return requestGraph;
} catch (e) {
// Prevent logging fs events took too long warning
clearTimeout(timeout);
logErrorOnBailout(options, snapshotPath, e);
// This error means respondToFSEvents timed out handling the invalidation events
// In this case we'll return a fresh RequestGraph
return new RequestGraph();
Expand All @@ -1600,3 +1629,31 @@ async function loadRequestGraph(options): Async<RequestGraph> {

return new RequestGraph();
}
function logErrorOnBailout(
options: ParcelOptions,
snapshotPath: string,
e: Error,
): void {
if (e.message && e.message.includes('invalid clockspec')) {
const snapshotContents = options.inputFS.readFileSync(
snapshotPath,
'utf-8',
);
logger.warn({
origin: '@parcel/core',
message: `Error reading clockspec from snapshot, building with clean cache.`,
meta: {
snapshotContents: snapshotContents,
trackableEvent: 'invalid_clockspec_error',
},
});
} else if (!(e instanceof FSBailoutError)) {
logger.warn({
origin: '@parcel/core',
message: `Unexpected error loading cache from disk, building with clean cache.`,
meta: {
trackableEvent: 'cache_load_error',
},
});
}
}
Loading

0 comments on commit ef4508f

Please sign in to comment.