From 3d045a8764a5e0a77b6cd3585e359052375eeb59 Mon Sep 17 00:00:00 2001 From: Andrew Clark Date: Thu, 3 Oct 2019 14:57:35 -0700 Subject: [PATCH] [Scheduler Profiler] Use microsecond precision The `performance.now` returns a timestamp in milliseconds as a float. The browser has the option to adjust the precision of the float, but it's usually more precise than a millisecond. However, this precision is lost when the timestamp is logged by the Scheduler profiler, because we store the numbers in an Int32Array. This change multiplies the millisecond float value by 1000, giving us three more degrees of precision. --- packages/scheduler/src/SchedulerProfiling.js | 35 ++++++++++--------- .../src/__tests__/SchedulerProfiling-test.js | 11 +++--- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/packages/scheduler/src/SchedulerProfiling.js b/packages/scheduler/src/SchedulerProfiling.js index a7c7afefef0..b2e7781101a 100644 --- a/packages/scheduler/src/SchedulerProfiling.js +++ b/packages/scheduler/src/SchedulerProfiling.js @@ -103,13 +103,16 @@ export function stopLoggingProfilingEvents(): ArrayBuffer | null { export function markTaskStart( task: {id: number, priorityLevel: PriorityLevel}, - time: number, + ms: number, ) { if (enableProfiling) { profilingState[QUEUE_SIZE]++; if (eventLog !== null) { - logEvent([TaskStartEvent, time, task.id, task.priorityLevel]); + // performance.now returns a float, representing milliseconds. When the + // event is logged, it's coerced to an int. Convert to microseconds to + // maintain extra degrees of precision. + logEvent([TaskStartEvent, ms * 1000, task.id, task.priorityLevel]); } } } @@ -119,7 +122,7 @@ export function markTaskCompleted( id: number, priorityLevel: PriorityLevel, }, - time: number, + ms: number, ) { if (enableProfiling) { profilingState[PRIORITY] = NoPriority; @@ -127,7 +130,7 @@ export function markTaskCompleted( profilingState[QUEUE_SIZE]--; if (eventLog !== null) { - logEvent([TaskCompleteEvent, time, task.id]); + logEvent([TaskCompleteEvent, ms * 1000, task.id]); } } } @@ -137,13 +140,13 @@ export function markTaskCanceled( id: number, priorityLevel: PriorityLevel, }, - time: number, + ms: number, ) { if (enableProfiling) { profilingState[QUEUE_SIZE]--; if (eventLog !== null) { - logEvent([TaskCancelEvent, time, task.id]); + logEvent([TaskCancelEvent, ms * 1000, task.id]); } } } @@ -153,7 +156,7 @@ export function markTaskErrored( id: number, priorityLevel: PriorityLevel, }, - time: number, + ms: number, ) { if (enableProfiling) { profilingState[PRIORITY] = NoPriority; @@ -161,14 +164,14 @@ export function markTaskErrored( profilingState[QUEUE_SIZE]--; if (eventLog !== null) { - logEvent([TaskErrorEvent, time, task.id]); + logEvent([TaskErrorEvent, ms * 1000, task.id]); } } } export function markTaskRun( task: {id: number, priorityLevel: PriorityLevel}, - time: number, + ms: number, ) { if (enableProfiling) { runIdCounter++; @@ -178,37 +181,37 @@ export function markTaskRun( profilingState[CURRENT_RUN_ID] = runIdCounter; if (eventLog !== null) { - logEvent([TaskRunEvent, time, task.id, runIdCounter]); + logEvent([TaskRunEvent, ms * 1000, task.id, runIdCounter]); } } } -export function markTaskYield(task: {id: number}, time: number) { +export function markTaskYield(task: {id: number}, ms: number) { if (enableProfiling) { profilingState[PRIORITY] = NoPriority; profilingState[CURRENT_TASK_ID] = 0; profilingState[CURRENT_RUN_ID] = 0; if (eventLog !== null) { - logEvent([TaskYieldEvent, time, task.id, runIdCounter]); + logEvent([TaskYieldEvent, ms * 1000, task.id, runIdCounter]); } } } -export function markSchedulerSuspended(time: number) { +export function markSchedulerSuspended(ms: number) { if (enableProfiling) { mainThreadIdCounter++; if (eventLog !== null) { - logEvent([SchedulerSuspendEvent, time, mainThreadIdCounter]); + logEvent([SchedulerSuspendEvent, ms * 1000, mainThreadIdCounter]); } } } -export function markSchedulerUnsuspended(time: number) { +export function markSchedulerUnsuspended(ms: number) { if (enableProfiling) { if (eventLog !== null) { - logEvent([SchedulerResumeEvent, time, mainThreadIdCounter]); + logEvent([SchedulerResumeEvent, ms * 1000, mainThreadIdCounter]); } } } diff --git a/packages/scheduler/src/__tests__/SchedulerProfiling-test.js b/packages/scheduler/src/__tests__/SchedulerProfiling-test.js index a1b08488cda..cf9f9c5ea8c 100644 --- a/packages/scheduler/src/__tests__/SchedulerProfiling-test.js +++ b/packages/scheduler/src/__tests__/SchedulerProfiling-test.js @@ -213,7 +213,8 @@ describe('Scheduler', () => { // Now we can render the tasks as a flamegraph. const labelColumnWidth = 30; - const msPerChar = 50; + // Scheduler event times are in microseconds + const microsecondsPerChar = 50000; let result = ''; @@ -221,7 +222,7 @@ describe('Scheduler', () => { let mainThreadTimelineColumn = ''; let isMainThreadBusy = true; for (const time of mainThreadRuns) { - const index = time / msPerChar; + const index = time / microsecondsPerChar; mainThreadTimelineColumn += (isMainThreadBusy ? '█' : '░').repeat( index - mainThreadTimelineColumn.length, ); @@ -244,18 +245,18 @@ describe('Scheduler', () => { labelColumn += ' '.repeat(labelColumnWidth - labelColumn.length - 1); // Add empty space up until the start mark - let timelineColumn = ' '.repeat(task.start / msPerChar); + let timelineColumn = ' '.repeat(task.start / microsecondsPerChar); let isRunning = false; for (const time of task.runs) { - const index = time / msPerChar; + const index = time / microsecondsPerChar; timelineColumn += (isRunning ? '█' : '░').repeat( index - timelineColumn.length, ); isRunning = !isRunning; } - const endIndex = task.end / msPerChar; + const endIndex = task.end / microsecondsPerChar; timelineColumn += (isRunning ? '█' : '░').repeat( endIndex - timelineColumn.length, );