diff --git a/packages/core/src/api.js b/packages/core/src/api.js index 9455b214b..dc40e415b 100644 --- a/packages/core/src/api.js +++ b/packages/core/src/api.js @@ -230,7 +230,7 @@ export function createPercyServer(percy, port) { if (cmd === 'reset') { // the reset command will reset testing mode and clear any logs percy.testing = {}; - logger.instance.messages.clear(); + logger.instance.reset(); } else if (cmd === 'version') { // the version command will update the api version header for testing percy.testing.version = body; @@ -262,7 +262,7 @@ export function createPercyServer(percy, port) { })) // returns an array of raw logs from the logger .route('get', '/test/logs', (req, res) => res.json(200, { - logs: Array.from(logger.instance.messages) + logs: logger.instance.query(() => true) })) // serves a very basic html page for testing snapshots .route('get', '/test/snapshot', (req, res) => { diff --git a/packages/core/src/discovery.js b/packages/core/src/discovery.js index a36f0df2d..88fe7d26a 100644 --- a/packages/core/src/discovery.js +++ b/packages/core/src/discovery.js @@ -217,9 +217,8 @@ function processSnapshotResources({ domSnapshot, resources, ...snapshot }) { resources = resources.flat(); // include associated snapshot logs matched by meta information - resources.push(createLogResource(logger.query(log => ( - log.meta.snapshot?.testCase === snapshot.meta.snapshot.testCase && log.meta.snapshot?.name === snapshot.meta.snapshot.name - )))); + resources.push(createLogResource(logger.snapshotLogs(snapshot.meta.snapshot))); + logger.evictSnapshot(snapshot.meta.snapshot); if (process.env.PERCY_GZIP) { for (let index = 0; index < resources.length; index++) { diff --git a/packages/core/test/api.test.js b/packages/core/test/api.test.js index e0a5731fd..a1db6bbe7 100644 --- a/packages/core/test/api.test.js +++ b/packages/core/test/api.test.js @@ -667,7 +667,7 @@ describe('API Server', () => { beforeEach(async () => { process.env.PERCY_TOKEN = 'TEST_TOKEN'; percy = await Percy.start({ testing: true }); - logger.instance.messages.clear(); + logger.instance.reset(); }); afterEach(() => { diff --git a/packages/core/test/helpers/index.js b/packages/core/test/helpers/index.js index 11a54e699..343f78456 100644 --- a/packages/core/test/helpers/index.js +++ b/packages/core/test/helpers/index.js @@ -23,6 +23,13 @@ export async function setupTest({ loggerTTY, apiDelay } = {}) { + // Why: downstream packages (cli-exec, cli-snapshot, percy core, etc.) use + // mockfs as their default fs sandbox, and the disk-backed logger flakes + // ENOENT against mockfs's volume mid-flush — which leaks the fallback + // warning into another test's captured stderr. Keep these tests in + // unbounded in-memory mode (master parity) — the disk path has its own + // dedicated coverage in logger.test.js. + process.env.PERCY_LOGS_IN_MEMORY = '1'; await api.mock({ delay: apiDelay }); await logger.mock({ isTTY: loggerTTY }); await resetPercyConfig(resetConfig); diff --git a/packages/core/test/percy.test.js b/packages/core/test/percy.test.js index 7c5e0ac55..b39270569 100644 --- a/packages/core/test/percy.test.js +++ b/packages/core/test/percy.test.js @@ -1180,7 +1180,7 @@ describe('Percy', () => { percy.log.info('cli_test'); percy.log.info('ci_test', {}, true); const logsObject = { - clilogs: Array.from(logger.instance.messages) + clilogs: logger.instance.query(() => true) }; const content = base64encode(Pako.gzip(JSON.stringify(logsObject))); diff --git a/packages/logger/src/index.js b/packages/logger/src/index.js index 919bf5cf6..d1e729df5 100644 --- a/packages/logger/src/index.js +++ b/packages/logger/src/index.js @@ -11,6 +11,9 @@ Object.defineProperties(logger, { constructor: { get: () => Logger }, instance: { get: () => new Logger() }, query: { value: (...args) => logger.instance.query(...args) }, + snapshotLogs: { value: (...args) => logger.instance.snapshotLogs(...args) }, + evictSnapshot: { value: (...args) => logger.instance.evictSnapshot(...args) }, + reset: { value: (...args) => logger.instance.reset(...args) }, format: { value: (...args) => logger.instance.format(...args) }, loglevel: { value: (...args) => logger.instance.loglevel(...args) }, timeit: { get: () => new TimeIt(logger.instance.group('timer')) }, diff --git a/packages/logger/src/logger.js b/packages/logger/src/logger.js index a1c76fb23..545dedd18 100644 --- a/packages/logger/src/logger.js +++ b/packages/logger/src/logger.js @@ -1,32 +1,59 @@ +import fs from 'fs'; +import { tmpdir } from 'os'; +import { join, dirname } from 'path'; +import { randomBytes } from 'crypto'; import { colors } from './utils.js'; const LINE_PAD_REGEXP = /^(\n*)(.*?)(\n*)$/s; const URL_REGEXP = /https?:\/\/[-a-zA-Z0-9@:%._+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:;%_+.~#?&//=[\]]*)/i; const LOG_LEVELS = { debug: 0, info: 1, warn: 2, error: 3 }; -// A PercyLogger instance retains logs in-memory for quick lookups while also writing log -// messages to stdout and stderr depending on the log level and debug string. +const FLUSH_AT_ENTRIES = 500; +const FLUSH_TIMER_MS = 100; +const READ_CHUNK_BYTES = 64 * 1024; + +// Hooks latch + active-instance set kept on the `process` object via Symbol.for +// so that they are shared across module copies (the ESM loader-mock path used +// by tests creates fresh module instances; a module-scoped variable would let +// each one register its own listener and accumulate into MaxListenersWarning). +// Using a Set rather than a single pointer also handles transitional states +// where two PercyLogger instances are alive at once (e.g. test setups that +// don't reset between cases) — both files get cleaned at exit. +const EXIT_HOOKS_INSTALLED = Symbol.for('@percy/logger.exitHooksInstalled'); +const ACTIVE_INSTANCES = Symbol.for('@percy/logger.activeInstances'); + +// A PercyLogger writes logs to stdout/stderr and persists every entry to a +// JSONL file under os.tmpdir()/percy-logs//, keeping resident memory +// bounded across long builds. Falls back to an unbounded in-memory Set if +// disk is unavailable (or if the rollback env var PERCY_LOGS_IN_MEMORY is set). export class PercyLogger { - // default log level level = 'info'; - // namespace regular expressions used to determine which debug logs to write namespaces = { include: [/^.*?$/], exclude: [/^ci$/, /^sdk$/] }; - // in-memory store for logs and meta info - messages = new Set(); - - // track deprecations to limit noisy logging deprecations = new Set(); - // static vars can be overriden for testing + // disk-backed store state + diskMode = 'disk'; + diskPath = null; + diskSize = 0; + writeBuffer = []; + flushTimer = null; + // snapshotLogs cache: Map. Bounded by # of un-evicted snapshot + // keys at any moment; evictSnapshot() drops them. Late entries that arrive + // after eviction repopulate the cache — that is intentional, so retry/ + // re-discovery flows that snapshotLogs(meta) again still see them. + cache = new Map(); + cacheCursor = 0; + fallback = null; + writeFailureWarned = false; + static stdout = process.stdout; static stderr = process.stderr; - // Handles setting env var values and returns a singleton constructor() { let { instance = this } = this.constructor; @@ -36,18 +63,26 @@ export class PercyLogger { instance.loglevel(process.env.PERCY_LOGLEVEL); } + // If the rollback / test env var is set, flip to memory mode immediately so + // log() never goes through the disk buffer at all. Drain any entries that + // were already queued in disk mode so they aren't stranded after the flip. + if (process.env.PERCY_LOGS_IN_MEMORY === '1' && + instance.diskMode === 'disk' && !instance.diskPath) { + instance.diskMode = 'memory'; + instance.fallback ??= new Set(); + /* istanbul ignore if: only triggered when env=1 is set after logs have already buffered */ + if (instance.writeBuffer.length) instance._drainBufferToMemory(); + } + this.constructor.instance = instance; return instance; } - // Change log level at any time or return the current log level loglevel(level) { if (level) this.level = level; return this.level; } - // Change namespaces by generating an array of namespace regular expressions from a - // comma separated debug string debug(namespaces) { if (this.namespaces.string === namespaces) return; this.namespaces.string = namespaces; @@ -73,7 +108,6 @@ export class PercyLogger { }); } - // Creates a new log group and returns level specific functions for logging group(name) { return Object.keys(LOG_LEVELS) .reduce((group, level) => Object.assign(group, { @@ -89,61 +123,112 @@ export class PercyLogger { }); } - // Query for a set of logs by filtering the in-memory store + // Returns matching entries. In memory mode, callers see the live fallback + // Set entries (mutations persist). In disk mode, each call streams a fresh + // pass over the JSONL — production callers (sendBuildLogs) only need the + // upload payload, which is the value redactSecrets returns, not a mutation + // side-effect. Tests rely on identity preservation and run in memory mode. query(filter) { - return Array.from(this.messages).filter(filter); + if (this.diskMode === 'memory') { + return Array.from(this.fallback).filter(filter); + } + + this._flushSync(); + if (this.diskMode === 'memory') { + return Array.from(this.fallback).filter(filter); + } + return this._scanDisk(filter); + } + + // Returns entries tagged with the given snapshot meta. In disk mode, reads + // only the disk delta since the last call to amortize the work in defer + // mode (snapshots accumulate; logs route through the cache lazily). + snapshotLogs(meta) { + let key = this._snapshotKey({ snapshot: meta }); + if (!key) return []; + + if (this.diskMode === 'memory') { + return this._filterFallback(key); + } + + this._flushSync(); + /* istanbul ignore if: defensive — _flushSync only flips mode via _fallbackToMemory, which our snapshotLogs tests don't exercise mid-call */ + if (this.diskMode === 'memory') { + return this._filterFallback(key); + } + this._refreshCache(); + return this.cache.get(key) || []; + } + + evictSnapshot(meta) { + let key = this._snapshotKey({ snapshot: meta }); + if (!key) return; + this.cache.delete(key); + } + + // Resets all logger state. Cleans up the disk file; next log will lazily reinit. + reset() { + // Why: discard buffered entries before _cleanup — between tests, the old + // diskPath may reference a file from a prior mockfs volume that no longer + // exists in the real fs. Letting _flushSync run would trip ENOENT and + // emit the disk-fallback warning into the next test's captured stderr. + this.writeBuffer = []; + /* istanbul ignore if: defensive — the only code path that schedules a + timer also drains via query() before reset() in tests */ + if (this.flushTimer) { + clearTimeout(this.flushTimer); + this.flushTimer = null; + } + this._cleanup(); + process[ACTIVE_INSTANCES]?.delete(this); + this.diskPath = null; + this.diskSize = 0; + this.cache.clear(); + this.cacheCursor = 0; + this.fallback = null; + this.diskMode = 'disk'; + this.writeFailureWarned = false; + this.deprecations = new Set(); } - // Formats messages before they are logged to stdio format(debug, level, message, elapsed) { let color = (n, m) => this.isTTY ? colors[n](m) : m; let begin, end, suffix = ''; let label = 'percy'; if (arguments.length === 1) { - // format(message) [debug, message] = [null, debug]; } else if (arguments.length === 2) { - // format(debug, message) [level, message] = [null, level]; } - // do not format leading or trailing newlines [, begin, message, end] = message.match(LINE_PAD_REGEXP); - // include debug information if (this.level === 'debug') { if (debug) label += `:${debug}`; - // include elapsed time since last log if (elapsed != null) { suffix = ' ' + color('grey', `(${elapsed}ms)`); } } - // add colors label = color('magenta', label); if (level === 'error') { - // red errors message = color('red', message); } else if (level === 'warn') { - // yellow warnings message = color('yellow', message); } else if (level === 'info' || level === 'debug') { - // blue info and debug URLs message = message.replace(URL_REGEXP, color('blue', '$&')); } return `${begin}[${label}] ${message}${suffix}${end}`; } - // True if stdout is a TTY interface get isTTY() { return !!this.constructor.stdout.isTTY; } - // Replaces the current line with a log message progress(debug, message, persist) { if (!this.shouldLog(debug, 'info')) return; let { stdout } = this.constructor; @@ -159,7 +244,6 @@ export class PercyLogger { this._progress = !!message && { message, persist }; } - // Returns true or false if the level and debug group can write messages to stdio shouldLog(debug, level) { return LOG_LEVELS[level] != null && LOG_LEVELS[level] >= LOG_LEVELS[this.level] && @@ -167,7 +251,6 @@ export class PercyLogger { this.namespaces.include.some(ns => ns.test(debug)); } - // Ensures that deprecation messages are not logged more than once deprecated(debug, message, meta) { if (this.deprecations.has(message)) return; this.deprecations.add(message); @@ -175,36 +258,29 @@ export class PercyLogger { this.log(debug, 'warn', `Warning: ${message}`, meta); } - // Generic log method accepts a debug group, log level, log message, and optional meta - // information to store with the message and other info log(debug, level, message, meta = {}) { - // message might be an error-like object let err = typeof message !== 'string' && (level === 'debug' || level === 'error'); err &&= message.message ? Error.prototype.toString.call(message) : message.toString(); - // save log entries let timestamp = Date.now(); message = err ? (message.stack || err) : message.toString(); let entry = { debug, level, message, meta, timestamp, error: !!err }; - this.messages.add(entry); - // maybe write the message to stdio + this._record(entry); + if (this.shouldLog(debug, level)) { - // unless the loglevel is debug, write shorter error messages if (err && this.level !== 'debug') message = err; this.write({ ...entry, message }); this.lastlog = timestamp; } } - // Writes a log entry to stdio based on the loglevel write({ debug, level, message, timestamp, error }) { let elapsed = timestamp - (this.lastlog || timestamp); let msg = this.format(debug, error ? 'error' : level, message, elapsed); let progress = this.isTTY && this._progress; let { stdout, stderr } = this.constructor; - // clear any logged progress if (progress) { stdout.cursorTo(0); stdout.clearLine(0); @@ -214,6 +290,249 @@ export class PercyLogger { if (!this._progress?.persist) delete this._progress; else if (progress) stdout.write(progress.message); } + + // ── internals ─────────────────────────────────────────────────────────────── + + _filterFallback(key) { + let result = []; + for (let entry of this.fallback) { + if (this._snapshotKey(entry?.meta) === key) result.push(entry); + } + return result; + } + + _record(entry) { + if (this.diskMode === 'memory') { + this.fallback.add(entry); + return; + } + + let line; + try { + line = JSON.stringify(entry) + '\n'; + } catch { + // Why: circular references in meta would otherwise kill this log call. + // Preserve meta.snapshot so the entry still routes via snapshotLogs. + let safeMeta = { unserializable: true }; + if (entry.meta?.snapshot) safeMeta.snapshot = entry.meta.snapshot; + entry = { ...entry, meta: safeMeta }; + line = JSON.stringify(entry) + '\n'; + } + + let length = Buffer.byteLength(line, 'utf8'); + this.writeBuffer.push({ line, length }); + this._scheduleFlush(); + + if (this.writeBuffer.length >= FLUSH_AT_ENTRIES) this._flushSync(); + } + + _snapshotKey(meta) { + let s = meta?.snapshot; + if (!s || (!s.testCase && !s.name)) return null; + return `${s.testCase || ''}|${s.name || ''}`; + } + + _scheduleFlush() { + if (this.flushTimer) return; + this.flushTimer = setTimeout(() => { + this.flushTimer = null; + this._flushSync(); + }, FLUSH_TIMER_MS); + this.flushTimer.unref?.(); + } + + _flushSync() { + if (this.flushTimer) { + clearTimeout(this.flushTimer); + this.flushTimer = null; + } + if (!this.writeBuffer.length) return; + + this._ensureDiskInit(); + if (this.diskMode !== 'disk') { + this._drainBufferToMemory(); + return; + } + + let chunk = ''; + let written = 0; + for (let item of this.writeBuffer) { + chunk += item.line; + written += item.length; + } + + try { + fs.appendFileSync(this.diskPath, chunk); + } catch (err) { + this._fallbackToMemory(err); + return; + } + + this.diskSize += written; + this.writeBuffer = []; + } + + _drainBufferToMemory() { + /* istanbul ignore next: defensive — fallback is always set before drain runs */ + if (!this.fallback) this.fallback = new Set(); + for (let { line } of this.writeBuffer) { + /* istanbul ignore next: defensive — entries are JSON.stringify'd by us */ + try { this.fallback.add(JSON.parse(line.replace(/\n$/, ''))); } catch { /* skip */ } + } + this.writeBuffer = []; + } + + _ensureDiskInit() { + if (this.diskPath || this.diskMode !== 'disk') return; + + try { + // Per-pid subdir keeps concurrent percy processes (CI matrix, parallel + // test workers, npx invocations) from clobbering each other's files. + let dir = join(tmpdir(), 'percy-logs', String(process.pid)); + fs.mkdirSync(dir, { recursive: true }); + this.diskPath = join( + dir, + `${Date.now()}-${randomBytes(8).toString('hex')}.jsonl` + ); + fs.writeFileSync(this.diskPath, ''); + this._installExitHooks(); + } catch { + this.diskMode = 'memory'; + /* istanbul ignore next: defensive — fallback may already be set */ + this.fallback ??= new Set(); + this.diskPath = null; + } + } + + // Reads the disk delta into the snapshotLogs cache, grouped by snapshotKey. + _refreshCache() { + if (this.cacheCursor >= this.diskSize) return; + + let length = this.diskSize - this.cacheCursor; + let fd = fs.openSync(this.diskPath, 'r'); + + try { + let buf = Buffer.alloc(length); + fs.readSync(fd, buf, 0, length, this.cacheCursor); + let lines = buf.toString('utf8').split('\n'); + // Trailing empty after the final newline. + lines.pop(); + for (let line of lines) { + let entry; + /* istanbul ignore next: defensive — entries are JSON.stringify'd by us */ + try { entry = JSON.parse(line); } catch { continue; } + let key = this._snapshotKey(entry?.meta); + if (!key) continue; + let arr = this.cache.get(key); + if (!arr) this.cache.set(key, arr = []); + arr.push(entry); + } + this.cacheCursor = this.diskSize; + } finally { + fs.closeSync(fd); + } + } + + // Streams the JSONL once and returns matching entries. Each call parses + // afresh — no parsed-entry cache, so RSS at upload time stays bounded by + // the size of the filtered result rather than the total log volume. + _scanDisk(filter) { + if (!this.diskPath || this.diskSize === 0) return []; + + let result = []; + let fd = fs.openSync(this.diskPath, 'r'); + + try { + let buf = Buffer.alloc(READ_CHUNK_BYTES); + let offset = 0; + let partial = ''; + while (offset < this.diskSize) { + let toRead = Math.min(READ_CHUNK_BYTES, this.diskSize - offset); + let n = fs.readSync(fd, buf, 0, toRead, offset); + offset += n; + let lines = (partial + buf.slice(0, n).toString('utf8')).split('\n'); + partial = lines.pop(); + for (let line of lines) { + /* istanbul ignore next: defensive — entries are JSON.stringify'd by us */ + try { + let entry = JSON.parse(line); + if (filter(entry)) result.push(entry); + } catch { /* skip */ } + } + } + } finally { + fs.closeSync(fd); + } + return result; + } + + _fallbackToMemory(err) { + /* istanbul ignore else: latch — only fires once per build */ + if (!this.writeFailureWarned) { + this.writeFailureWarned = true; + PercyLogger.stderr.write( + `[percy] logger: disk write failed (${err?.code || err?.message || 'unknown'}), falling back to in-memory\n` + ); + } + + // Read whatever we already wrote to disk into the fallback Set so /logs + // upload still includes everything from before the failure. + let existing = []; + if (this.diskPath && this.diskSize > 0) { + /* istanbul ignore next: defensive — _scanDisk handles its own errors */ + try { existing = this._scanDisk(() => true); } catch { /* tolerate */ } + } + + this.diskMode = 'memory'; + /* istanbul ignore next: defensive — fallback may already be set */ + this.fallback ??= new Set(); + for (let entry of existing) this.fallback.add(entry); + this._drainBufferToMemory(); + + /* istanbul ignore else: latch — diskPath always set on first fallback */ + if (this.diskPath) { + /* istanbul ignore next: defensive — best-effort cleanup */ + try { fs.unlinkSync(this.diskPath); } catch { /* tolerate */ } + this.diskPath = null; + } + this.diskSize = 0; + this.cache.clear(); + this.cacheCursor = 0; + } + + _installExitHooks() { + let active = process[ACTIVE_INSTANCES] ??= new Set(); + active.add(this); + /* istanbul ignore if: latch — only the first install per process */ + if (process[EXIT_HOOKS_INSTALLED]) return; + process[EXIT_HOOKS_INSTALLED] = true; + let cleanup = () => { + for (let logger of process[ACTIVE_INSTANCES]) logger._cleanup(); + }; + process.once('exit', cleanup); + process.once('beforeExit', cleanup); + // Why: SIGINT/SIGTERM are intentionally not handled. The CLI runtime + // already installs its own signal listeners; adding ours pushes past + // the default 10-listener limit and trips MaxListenersExceededWarning + // in downstream test suites. On Ctrl-C / runner kill our JSONL is left + // in os.tmpdir()/percy-logs// which the OS cleans, and the + // pid-scoped subdir prevents concurrent runs from colliding. + } + + _cleanup() { + /* istanbul ignore next: defensive — flush should not throw */ + try { this._flushSync(); } catch { /* tolerate */ } + if (this.diskPath) { + let dir = dirname(this.diskPath); + /* istanbul ignore next: defensive — best-effort */ + try { fs.unlinkSync(this.diskPath); } catch { /* tolerate */ } + // Best-effort rmdir of the per-pid subdir so long-lived runners don't + // accumulate empty directories. Fails harmlessly if peer instances of + // the same pid still hold files there. + /* istanbul ignore next: defensive — best-effort */ + try { fs.rmdirSync(dir); } catch { /* tolerate */ } + } + } } export default PercyLogger; diff --git a/packages/logger/test/helpers.js b/packages/logger/test/helpers.js index 869665533..7d752923e 100644 --- a/packages/logger/test/helpers.js +++ b/packages/logger/test/helpers.js @@ -78,8 +78,13 @@ const helpers = { }, reset(soft) { - if (soft) logger.loglevel('info'); - else delete logger.constructor.instance; + if (soft) { + logger.loglevel('info'); + } else { + // tear down the prior instance's disk artifacts before swapping it out + try { logger.constructor.instance?.reset(); } catch { /* tolerate */ } + delete logger.constructor.instance; + } helpers.stdout.length = 0; helpers.stderr.length = 0; @@ -92,7 +97,7 @@ const helpers = { }, dump() { - let msgs = Array.from(logger.instance.messages); + let msgs = logger.instance.query(() => true); if (!msgs.length) return; let log = m => process.env.__PERCY_BROWSERIFIED__ ? ( diff --git a/packages/logger/test/logger.test.js b/packages/logger/test/logger.test.js index 92b1a7a8b..dfacc3a31 100644 --- a/packages/logger/test/logger.test.js +++ b/packages/logger/test/logger.test.js @@ -1,3 +1,6 @@ +import fs, { existsSync, readFileSync, readdirSync, rmSync } from 'fs'; +import { tmpdir } from 'os'; +import { join, sep, dirname } from 'path'; import helpers from './helpers.js'; import { colors } from '@percy/logger/utils'; import logger from '@percy/logger'; @@ -46,13 +49,13 @@ describe('logger', () => { meta }); - expect(inst.messages).toEqual(new Set([ + expect(inst.query(() => true)).toEqual([ entry('info', 'Info log', { foo: 'bar' }), entry('warn', 'Warn log', { bar: 'baz' }), entry('error', 'Error log', { to: 'be' }), entry('debug', 'Debug log', { not: 'to be' }), entry('warn', 'Warning: Deprecation log', { test: 'me' }) - ])); + ]); }); it('writes info logs to stdout', () => { @@ -88,7 +91,7 @@ describe('logger', () => { let error = new Error('test'); log.error(error); - expect(inst.messages).toContain({ + expect(inst.query(() => true)).toContain({ debug: 'test', level: 'error', message: error.stack, @@ -566,4 +569,447 @@ describe('logger', () => { }); }); }); + + describe('disk-backed storage', () => { + let percyLogsDir = join(tmpdir(), 'percy-logs', String(process.pid)); + + beforeEach(async () => { + delete process.env.PERCY_LOGS_IN_MEMORY; + try { rmSync(percyLogsDir, { recursive: true, force: true }); } catch { /* tolerate */ } + await helpers.mock(); + delete process.env.PERCY_LOGS_IN_MEMORY; + logger.instance.reset(); + }); + + afterEach(() => { + logger.instance.reset(); + try { rmSync(percyLogsDir, { recursive: true, force: true }); } catch { /* tolerate */ } + }); + + it('round-trips entries through disk', () => { + let group = logger('disk'); + for (let i = 0; i < 50; i++) group.info(`entry ${i}`, { i }); + + let entries = logger.query(() => true); + expect(entries.length).toEqual(50); + expect(entries[0]).toEqual(jasmine.objectContaining({ message: 'entry 0', meta: { i: 0 } })); + expect(entries[49]).toEqual(jasmine.objectContaining({ message: 'entry 49', meta: { i: 49 } })); + }); + + it('writes a JSONL file under percy-logs', () => { + logger('disk').info('hello'); + logger.query(() => true); // forces flush + + let files = readdirSync(percyLogsDir).filter(f => f.endsWith('.jsonl')); + expect(files.length).toEqual(1); + let content = readFileSync(join(percyLogsDir, files[0]), 'utf8'); + let line = content.trim().split('\n')[0]; + expect(JSON.parse(line)).toEqual(jasmine.objectContaining({ + debug: 'disk', level: 'info', message: 'hello', error: false + })); + }); + + it('serves snapshotLogs from in-memory cache, not disk', () => { + let group = logger('core:snapshot'); + group.info('A1', { snapshot: { testCase: 'tc', name: 'A' } }); + group.info('B1', { snapshot: { testCase: 'tc', name: 'B' } }); + group.info('A2', { snapshot: { testCase: 'tc', name: 'A' } }); + + let logsA = logger.snapshotLogs({ testCase: 'tc', name: 'A' }); + expect(logsA.length).toEqual(2); + expect(logsA.map(l => l.message)).toEqual(['A1', 'A2']); + + let logsB = logger.snapshotLogs({ testCase: 'tc', name: 'B' }); + expect(logsB.length).toEqual(1); + expect(logsB[0].message).toEqual('B1'); + }); + + it('lets late entries after evictSnapshot repopulate the cache (retry-friendly)', () => { + let group = logger('core:snapshot'); + group.info('A1', { snapshot: { testCase: 'tc', name: 'A' } }); + + // upload happens, snapshot is evicted + expect(logger.snapshotLogs({ testCase: 'tc', name: 'A' }).length).toEqual(1); + logger.evictSnapshot({ testCase: 'tc', name: 'A' }); + + // a late deferred handler — or a discovery retry that re-snapshots the + // same meta — logs against the same snapshot + group.info('A2 (late)', { snapshot: { testCase: 'tc', name: 'A' } }); + + // snapshotLogs must surface the late entry (master parity) + let logsA = logger.snapshotLogs({ testCase: 'tc', name: 'A' }); + expect(logsA.length).toEqual(1); + expect(logsA[0].message).toEqual('A2 (late)'); + // disk still keeps the entry for the global /logs payload + expect(logger.query(() => true).find(e => e.message === 'A2 (late)')).toBeDefined(); + }); + + it('evictSnapshot drops cache but disk still has the entries', () => { + let group = logger('core:snapshot'); + group.info('hi', { snapshot: { testCase: 'tc', name: 'A' } }); + + expect(logger.snapshotLogs({ testCase: 'tc', name: 'A' }).length).toEqual(1); + logger.evictSnapshot({ testCase: 'tc', name: 'A' }); + expect(logger.snapshotLogs({ testCase: 'tc', name: 'A' }).length).toEqual(0); + + // disk still has it for sendBuildLogs + let all = logger.query(() => true); + expect(all.find(e => e.message === 'hi')).toBeDefined(); + }); + + it('flushes the buffer when query is called', () => { + // 5 entries; below the 500 size cap and faster than the 100ms timer. + let group = logger('disk'); + for (let i = 0; i < 5; i++) group.info(`x${i}`); + + // query forces a flush, so entries become visible on disk + let result = logger.query(() => true); + expect(result.length).toEqual(5); + }); + + it('reset() clears state and removes the disk file', () => { + logger('disk').info('temporary'); + logger.query(() => true); + + let files = readdirSync(percyLogsDir).filter(f => f.endsWith('.jsonl')); + expect(files.length).toEqual(1); + let path = join(percyLogsDir, files[0]); + expect(existsSync(path)).toBeTrue(); + + logger.instance.reset(); + expect(existsSync(path)).toBeFalse(); + expect(logger.query(() => true).length).toEqual(0); + }); + + it('survives circular references in meta', () => { + let circular = {}; + circular.self = circular; + logger('disk').error('round and round', { circle: circular }); + + let entries = logger.query(() => true); + expect(entries.length).toEqual(1); + expect(entries[0].meta).toEqual({ unserializable: true }); + }); + + it('preserves snapshot key when meta has circular references', () => { + let circular = {}; + circular.self = circular; + logger('disk').error('boom', { snapshot: { name: 'A' }, circle: circular }); + + // The entry should still route to snapshotLogs even after meta sanitization. + let logs = logger.snapshotLogs({ name: 'A' }); + expect(logs.length).toEqual(1); + expect(logs[0].meta).toEqual({ unserializable: true, snapshot: { name: 'A' } }); + }); + + it('falls back to in-memory mode when PERCY_LOGS_IN_MEMORY=1', () => { + logger.instance.reset(); + process.env.PERCY_LOGS_IN_MEMORY = '1'; + // force a fresh instance so the env is re-read + delete logger.constructor.instance; + + logger('mem').info('no disk for me'); + + let files; + try { files = readdirSync(percyLogsDir).filter(f => f.endsWith('.jsonl')); } catch { files = []; } + expect(files.length).toEqual(0); + + let entries = logger.query(() => true); + expect(entries.length).toEqual(1); + expect(entries[0].message).toEqual('no disk for me'); + }); + + it('snapshotLogs filters from the in-memory Set in memory mode', () => { + logger.instance.reset(); + process.env.PERCY_LOGS_IN_MEMORY = '1'; + delete logger.constructor.instance; + + let group = logger('core:snapshot'); + group.info('A1', { snapshot: { testCase: 'tc', name: 'A' } }); + group.info('B1', { snapshot: { testCase: 'tc', name: 'B' } }); + group.info('untagged'); + + // First call: mode is still 'disk' at entry, _flushSync flips it to memory. + let logsA = logger.snapshotLogs({ testCase: 'tc', name: 'A' }); + expect(logsA.length).toEqual(1); + expect(logsA[0].message).toEqual('A1'); + + // Add another tagged entry — goes directly into the fallback Set now. + group.info('A2', { snapshot: { testCase: 'tc', name: 'A' } }); + + // Second call: mode is 'memory' at entry — covers the top-of-method memory branch. + let logsA2 = logger.snapshotLogs({ testCase: 'tc', name: 'A' }); + expect(logsA2.length).toEqual(2); + + // also covers the top-of-method memory branch in query() + let all = logger.query(() => true); + expect(all.length).toEqual(4); + + // empty meta returns [] + expect(logger.snapshotLogs({}).length).toEqual(0); + }); + + it('falls back to memory when appendFileSync throws', () => { + let calls = 0; + let real = fs.appendFileSync; + let spy = spyOn(fs, 'appendFileSync').and.callFake((...args) => { + calls++; + if (calls === 1) throw Object.assign(new Error('ENOSPC'), { code: 'ENOSPC' }); + return real.apply(fs, args); + }); + + logger('disk').info('first entry'); + // forces a flush, which triggers the appendFileSync failure + let entries = logger.query(() => true); + + expect(spy).toHaveBeenCalled(); + expect(entries.length).toEqual(1); + expect(entries[0].message).toEqual('first entry'); + // after fallback, no disk file should exist + let files = []; + try { files = readdirSync(percyLogsDir); } catch { /* ok */ } + expect(files.filter(f => f.endsWith('.jsonl')).length).toEqual(0); + }); + + it('falls back to memory when mkdirSync throws', () => { + spyOn(fs, 'mkdirSync').and.throwError(Object.assign(new Error('EACCES'), { code: 'EACCES' })); + + logger('disk').info('cannot create dir'); + let entries = logger.query(() => true); + + expect(entries.length).toEqual(1); + expect(entries[0].message).toEqual('cannot create dir'); + }); + + it('drains pre-fallback buffer entries into memory', () => { + spyOn(fs, 'appendFileSync').and.callFake(() => { + throw Object.assign(new Error('ENOSPC'), { code: 'ENOSPC' }); + }); + + let group = logger('disk'); + group.info('one'); + group.info('two'); + group.info('three'); + + let entries = logger.query(() => true); + expect(entries.length).toEqual(3); + expect(entries.map(e => e.message)).toEqual(['one', 'two', 'three']); + }); + + it('reads existing disk content into memory when fallback fires mid-build', () => { + let original = fs.appendFileSync; + let failAfter = 1; + let calls = 0; + spyOn(fs, 'appendFileSync').and.callFake((...args) => { + calls++; + if (calls > failAfter) throw Object.assign(new Error('ENOSPC'), { code: 'ENOSPC' }); + return original.apply(fs, args); + }); + + let group = logger('disk'); + group.info('on disk'); + logger.query(() => true); // flush #1 — succeeds, on disk + + group.info('after fallback'); + let entries = logger.query(() => true); // flush #2 — fails, fallback fires + + expect(entries.length).toEqual(2); + expect(entries.map(e => e.message).sort()).toEqual(['after fallback', 'on disk']); + }); + + it('evictSnapshot with empty meta is a no-op', () => { + logger.instance.evictSnapshot({}); + logger.instance.evictSnapshot(); + expect(logger.query(() => true).length).toEqual(0); + }); + + it('snapshotKey works with only name or only testCase set', () => { + let group = logger('partial'); + group.info('only-name', { snapshot: { name: 'A' } }); + group.info('only-testcase', { snapshot: { testCase: 'tc' } }); + group.info('both', { snapshot: { testCase: 'tc', name: 'A' } }); + + expect(logger.snapshotLogs({ name: 'A' }).length).toEqual(1); + expect(logger.snapshotLogs({ testCase: 'tc' }).length).toEqual(1); + expect(logger.snapshotLogs({ testCase: 'tc', name: 'A' }).length).toEqual(1); + }); + + it('query filter rejects non-matching entries', () => { + let g = logger('disk'); + g.info('keep-me'); + g.info('drop-me'); + let kept = logger.query(e => e.message === 'keep-me'); + expect(kept.length).toEqual(1); + expect(kept[0].message).toEqual('keep-me'); + }); + + it('logger.reset() (public wrapper) clears the logger', () => { + logger('disk').info('temporary'); + expect(logger.query(() => true).length).toEqual(1); + logger.reset(); + expect(logger.query(() => true).length).toEqual(0); + }); + + it('the 100ms timer flushes the buffer on its own', async () => { + logger('timer').info('lazy'); + // Wait past the FLUSH_TIMER_MS window so the timer callback fires + // without us forcing a flush via query(). + await new Promise(r => setTimeout(r, 150)); + // Now query() should find the entry already on disk; the buffer is + // empty, so no extra flush happens here. + expect(logger.query(() => true).length).toEqual(1); + }); + + it('auto-flushes when the buffer hits the entry cap', () => { + let group = logger('cap'); + // FLUSH_AT_ENTRIES = 500. Push more than that to trigger the size-cap flush. + for (let i = 0; i < 510; i++) group.info(`x${i}`); + let entries = logger.query(() => true); + expect(entries.length).toEqual(510); + }); + + it('skips untagged entries while building the snapshot cache from disk', () => { + let group = logger('mix'); + group.info('untagged-1'); + group.info('tagged-1', { snapshot: { name: 'A' } }); + group.info('untagged-2'); + // first snapshotLogs call triggers cache build from disk delta + let logsA = logger.snapshotLogs({ name: 'A' }); + expect(logsA.length).toEqual(1); + expect(logsA[0].message).toEqual('tagged-1'); + }); + + it('disk-fail warning falls back to err.message when no code is present', () => { + let warnSpy = jasmine.createSpy('write'); + let originalStderr = logger.constructor.stderr; + logger.constructor.stderr = { write: warnSpy }; + + spyOn(fs, 'appendFileSync').and.throwError(new Error('something broke')); + + logger('msg').info('one'); + logger.query(() => true); + + logger.constructor.stderr = originalStderr; + expect(warnSpy.calls.allArgs().some(a => /something broke/.test(a[0]))).toBeTrue(); + }); + + it('disk-fail warning shows "unknown" when err has neither code nor message', () => { + let warnSpy = jasmine.createSpy('write'); + let originalStderr = logger.constructor.stderr; + logger.constructor.stderr = { write: warnSpy }; + + // bare object — no .code, no .message — exercises the 'unknown' branch + // eslint-disable-next-line no-throw-literal + spyOn(fs, 'appendFileSync').and.callFake(() => { throw {}; }); + + logger('unk').info('one'); + logger.query(() => true); + + logger.constructor.stderr = originalStderr; + expect(warnSpy.calls.allArgs().some(a => /\(unknown\)/.test(a[0]))).toBeTrue(); + }); + + it('fires the disk-write-failed stderr warning exactly once', () => { + let warnSpy = jasmine.createSpy('write'); + let originalStderr = logger.constructor.stderr; + logger.constructor.stderr = { write: warnSpy }; + + spyOn(fs, 'appendFileSync').and.throwError(Object.assign(new Error('ENOSPC'), { code: 'ENOSPC' })); + + logger('warn1').info('one'); + logger.query(() => true); + logger('warn2').info('two'); + logger.query(() => true); + + logger.constructor.stderr = originalStderr; + expect(warnSpy.calls.allArgs().filter(a => /disk write failed/.test(a[0])).length).toEqual(1); + }); + + it('cleans up the disk file when the process exit hook fires', () => { + logger('exit').info('hello'); + logger.query(() => true); // ensure file exists + let files = readdirSync(percyLogsDir).filter(f => f.endsWith('.jsonl')); + expect(files.length).toEqual(1); + let path = join(percyLogsDir, files[0]); + + // Manually invoke the registered exit listener — simulating process.emit('exit') + // would also fire other test-suite listeners which we don't want. + let listeners = process.listeners('exit'); + for (let listener of listeners.slice(-1)) listener(); + + expect(existsSync(path)).toBeFalse(); + }); + + it('writes the JSONL into the per-pid subdir', () => { + logger('pid').info('one'); + logger.query(() => true); + + let pidDir = join(tmpdir(), 'percy-logs', String(process.pid)); + let files = readdirSync(pidDir).filter(f => f.endsWith('.jsonl')); + expect(files.length).toEqual(1); + // diskPath itself sits under the pid subdir + expect(logger.instance.diskPath.includes(`${sep}${process.pid}${sep}`)).toBeTrue(); + }); + + it('process[Symbol.for(@percy/logger.exitHooksInstalled)] is the dedupe latch', () => { + logger('latch').info('one'); + logger.query(() => true); + expect(process[Symbol.for('@percy/logger.exitHooksInstalled')]).toBeTrue(); + // Active-instance Set holds the live logger, so it survives module reloads + expect(process[Symbol.for('@percy/logger.activeInstances')].has(logger.instance)).toBeTrue(); + }); + + it('exit-hook cleanup iterates every active logger instance', () => { + // The current singleton + a sibling instance held off-thread (we can't + // construct two live PercyLogger via the singleton getter, so we add + // a stub directly to the active set to mirror the multi-instance case). + logger('a').info('one'); + logger.query(() => true); + let firstPath = logger.instance.diskPath; + + let stub = { + diskPath: join(percyLogsDir, '99999-stub.jsonl'), + _flushSync() {}, + _cleanup: logger.constructor.prototype._cleanup + }; + // create the stub's file so we can verify cleanup unlinks it + fs.writeFileSync(stub.diskPath, ''); + process[Symbol.for('@percy/logger.activeInstances')].add(stub); + + let listeners = process.listeners('exit'); + for (let listener of listeners.slice(-1)) listener(); + + expect(existsSync(firstPath)).toBeFalse(); + expect(existsSync(stub.diskPath)).toBeFalse(); + // tidy up our manual injection + process[Symbol.for('@percy/logger.activeInstances')].delete(stub); + }); + + it('rmdir best-effort tolerates a non-empty pid subdir', () => { + logger('rm').info('hi'); + logger.query(() => true); + let diskPath = logger.instance.diskPath; + let pidDir = dirname(diskPath); + // peer file in the same pid subdir + let peer = join(pidDir, 'peer.txt'); + fs.writeFileSync(peer, 'peer'); + + logger.instance.reset(); + + // peer survived because rmdir is best-effort and the dir is non-empty + expect(existsSync(peer)).toBeTrue(); + // our jsonl is gone + expect(existsSync(diskPath)).toBeFalse(); + fs.unlinkSync(peer); + }); + }); + + describe('PERCY_LOGLEVEL', () => { + it('honors the env var on construction', () => { + delete logger.constructor.instance; + process.env.PERCY_LOGLEVEL = 'error'; + expect(logger.loglevel()).toEqual('error'); + delete process.env.PERCY_LOGLEVEL; + }); + }); });