diff --git a/.github/instructions/extractors_cds_tools_ts.instructions.md b/.github/instructions/extractors_cds_tools_ts.instructions.md new file mode 100644 index 00000000..e27d6cf2 --- /dev/null +++ b/.github/instructions/extractors_cds_tools_ts.instructions.md @@ -0,0 +1,65 @@ +--- +applyTo: 'extractors/cds/tools/**/*.ts' +description: 'Instructions for CodeQL CDS extractor TypeScript source and test files.' +--- + +# Copilot Instructions for `extractors/cds/tools/**/*.ts` files + +## PURPOSE + +This file contains instructions for working with TypeScript source code files in the `extractors/cds/tools/` directory of the `codeql-sap-js` repository. This includes the main `cds-extractor.ts` entry-point, modular source files in `src/**/*.ts`, and comprehensive test files in `test/**/*.test.ts`. + +## REQUIREMENTS + +## COMMON REQUIREMENTS + +- ALWAYS use modern TypeScript syntax and features compatible with the configured target (ES2020). +- ALWAYS follow best practices for implementing secure and efficient CodeQL extractor functionality. +- ALWAYS order imports, definitions, static lists, and similar constructs alphabetically. +- ALWAYS follow a test-driven development (TDD) approach by writing comprehensive tests for new features or bug fixes. +- ALWAYS fix lint errors by running `npm run lint:fix` from the `extractors/cds/tools/` directory before committing changes. +- ALWAYS maintain consistency between the CDS extractor's compilation behavior and the `extractors/cds/tools/test/cds-compilation-for-actions.test.sh` script to prevent CI/CD workflow failures. +- **ALWAYS run `npm run build:all` from the `extractors/cds/tools/` directory and ensure it passes completely before committing any changes. This is MANDATORY and includes lint checks, test coverage, and bundle validation.** + +### CDS EXTRACTOR SOURCE REQUIREMENTS + +The following requirements are specific to the CDS extractor main entry-point `cds-extractor.ts` and source files matching `extractors/cds/tools/src/**/*.ts`. + +- ALWAYS keep the main entry-point `cds-extractor.ts` focused on orchestration, delegating specific tasks to well-defined modules in `src/`. +- ALWAYS gracefully handle extraction failures using tool-level diagnostics in order to avoid disrupting the overall CodeQL extraction process. Instead of exiting with a non-zero code, the CDS extractor should generate a diagnostic error (or warning) that points to the relative path (from source root) of the problematic source (e.g. `.cds`) file. + +### CDS EXTRACTOR TESTING REQUIREMENTS + +The following requirements are specific to the CDS extractor test files matching `extractors/cds/tools/test/**/*.test.ts`. + +- ALWAYS write unit tests for new functions and classes in corresponding `test/src/**/*.test.ts` files. +- ALWAYS use Jest testing framework with the configured `ts-jest` preset. +- ALWAYS follow the AAA pattern (Arrange, Act, Assert) for test structure. +- ALWAYS mock external dependencies (filesystem, child processes, network calls) using Jest mocks or `mock-fs`. +- ALWAYS test both success and error scenarios with appropriate edge cases. +- ALWAYS maintain test coverage above the established threshold. +- **ALWAYS run `npm test` or `npm run test:coverage` from the `extractors/cds/tools/` directory and ensure all tests pass before committing changes.** + +## PREFERENCES + +- PREFER modular design with each major functionality implemented in its own dedicated file or module under `src/`. +- PREFER the existing architectural patterns: + - `src/cds/compiler/` for CDS compiler specific logic + - `src/cds/parser/` for CDS parser specific logic + - `src/logging/` for unified logging and performance tracking + - `src/packageManager/` for dependency management and caching + - `src/codeql.ts` for CodeQL JavaScript extractor integration + - `src/environment.ts` for environment setup and validation +- PREFER comprehensive error handling with diagnostic reporting through the `src/diagnostics.ts` module. +- PREFER performance-conscious implementations that minimize filesystem operations and dependency installations. +- PREFER project-aware processing that understands CDS file relationships and dependencies. + +## CONSTRAINTS + +- NEVER leave any trailing whitespace on any line. +- NEVER directly modify any compiled files in the `dist/` directory; all changes must be made in the corresponding `src/` files and built using the build process. +- NEVER commit changes without verifying that `npm run build:all` passes completely when run from the `extractors/cds/tools/` directory. +- NEVER modify compilation behavior without updating the corresponding test script `extractors/cds/tools/test/cds-compilation-for-actions.test.sh`. +- NEVER process CDS files in isolation - maintain project-aware context for accurate extraction. +- NEVER bypass the unified logging system - use `src/logging/` utilities for all output and diagnostics. +- NEVER commit extra documentation files that purely explain what has been changed and/or fixed; use git commit messages instead of adding any `.md` files that you have not explicitly been requested to create. diff --git a/extractors/cds/tools/cds-extractor.ts b/extractors/cds/tools/cds-extractor.ts index b7585eb6..d18a45a0 100644 --- a/extractors/cds/tools/cds-extractor.ts +++ b/extractors/cds/tools/cds-extractor.ts @@ -183,7 +183,12 @@ try { if (!extractorResult.success && extractorResult.error) { cdsExtractorLog('error', `Error running JavaScript extractor: ${extractorResult.error}`); if (codeqlExePath) { - addJavaScriptExtractorDiagnostic(sourceRoot, extractorResult.error, codeqlExePath); + addJavaScriptExtractorDiagnostic( + sourceRoot, + extractorResult.error, + codeqlExePath, + sourceRoot, + ); } logExtractorStop(false, 'JavaScript extractor failed'); } else { @@ -223,7 +228,12 @@ try { if (!extractorResult.success && extractorResult.error) { cdsExtractorLog('error', `Error running JavaScript extractor: ${extractorResult.error}`); if (codeqlExePath) { - addJavaScriptExtractorDiagnostic(sourceRoot, extractorResult.error, codeqlExePath); + addJavaScriptExtractorDiagnostic( + sourceRoot, + extractorResult.error, + codeqlExePath, + sourceRoot, + ); } logExtractorStop(false, 'JavaScript extractor failed'); } else { @@ -316,6 +326,7 @@ try { cdsFilePathsToProcess[0], // Use first file as representative `Compilation orchestration failed: ${String(error)}`, codeqlExePath, + sourceRoot, ); } } @@ -350,7 +361,12 @@ if (!extractorResult.success && extractorResult.error) { // Use the first CDS file as a representative file for the diagnostic const firstProject = Array.from(dependencyGraph.projects.values())[0]; const representativeFile = firstProject.cdsFiles[0] || sourceRoot; - addJavaScriptExtractorDiagnostic(representativeFile, extractorResult.error, codeqlExePath); + addJavaScriptExtractorDiagnostic( + representativeFile, + extractorResult.error, + codeqlExePath, + sourceRoot, + ); } logExtractorStop(false, 'JavaScript extractor failed'); diff --git a/extractors/cds/tools/dist/cds-extractor.bundle.js b/extractors/cds/tools/dist/cds-extractor.bundle.js index 330be653..7e84cdb1 100644 --- a/extractors/cds/tools/dist/cds-extractor.bundle.js +++ b/extractors/cds/tools/dist/cds-extractor.bundle.js @@ -5790,10 +5790,10 @@ var Ignore = class { ignored(p) { const fullpath = p.fullpath(); const fullpaths = `${fullpath}/`; - const relative4 = p.relative() || "."; - const relatives = `${relative4}/`; + const relative5 = p.relative() || "."; + const relatives = `${relative5}/`; for (const m of this.relative) { - if (m.match(relative4) || m.match(relatives)) + if (m.match(relative5) || m.match(relatives)) return true; } for (const m of this.absolute) { @@ -5804,9 +5804,9 @@ var Ignore = class { } childrenIgnored(p) { const fullpath = p.fullpath() + "/"; - const relative4 = (p.relative() || ".") + "/"; + const relative5 = (p.relative() || ".") + "/"; for (const m of this.relativeChildren) { - if (m.match(relative4)) + if (m.match(relative5)) return true; } for (const m of this.absoluteChildren) { @@ -7449,7 +7449,27 @@ function validateTaskOutputs(task, sourceRoot2) { // src/diagnostics.ts var import_child_process4 = require("child_process"); var import_path6 = require("path"); -function addDiagnostic(filePath, message, codeqlExePath2, sourceId, sourceName, severity, logPrefix) { +function convertToRelativePath(filePath, sourceRoot2) { + if (!filePath || typeof filePath !== "string" || !sourceRoot2 || typeof sourceRoot2 !== "string") { + return "."; + } + try { + const resolvedSourceRoot = (0, import_path6.resolve)(sourceRoot2); + const resolvedFilePath = filePath.startsWith("/") ? (0, import_path6.resolve)(filePath) : (0, import_path6.resolve)(resolvedSourceRoot, filePath); + if (resolvedFilePath === resolvedSourceRoot) { + return "."; + } + const relativePath = (0, import_path6.relative)(resolvedSourceRoot, resolvedFilePath); + if (relativePath.startsWith("..")) { + return "."; + } + return relativePath; + } catch { + return "."; + } +} +function addDiagnostic(filePath, message, codeqlExePath2, sourceId, sourceName, severity, logPrefix, sourceRoot2) { + const finalFilePath = sourceRoot2 ? convertToRelativePath(filePath, sourceRoot2) : (0, import_path6.resolve)(filePath); try { (0, import_child_process4.execFileSync)(codeqlExePath2, [ "database", @@ -7460,7 +7480,7 @@ function addDiagnostic(filePath, message, codeqlExePath2, sourceId, sourceName, `--source-name=${sourceName}`, `--severity=${severity}`, `--markdown-message=${message}`, - `--file-path=${(0, import_path6.resolve)(filePath)}`, + `--file-path=${finalFilePath}`, "--", `${process.env.CODEQL_EXTRACTOR_CDS_WIP_DATABASE ?? ""}` ]); @@ -7474,7 +7494,7 @@ function addDiagnostic(filePath, message, codeqlExePath2, sourceId, sourceName, return false; } } -function addCompilationDiagnostic(cdsFilePath, errorMessage, codeqlExePath2) { +function addCompilationDiagnostic(cdsFilePath, errorMessage, codeqlExePath2, sourceRoot2) { return addDiagnostic( cdsFilePath, errorMessage, @@ -7482,7 +7502,8 @@ function addCompilationDiagnostic(cdsFilePath, errorMessage, codeqlExePath2) { "cds/compilation-failure", "Failure to compile one or more SAP CAP CDS files", "error" /* Error */, - "source file" + "source file", + sourceRoot2 ); } function addDependencyGraphDiagnostic(sourceRoot2, errorMessage, codeqlExePath2) { @@ -7493,7 +7514,8 @@ function addDependencyGraphDiagnostic(sourceRoot2, errorMessage, codeqlExePath2) "cds/dependency-graph-failure", "CDS project dependency graph build failure", "error" /* Error */, - "source root" + "source root", + sourceRoot2 ); } function addDependencyInstallationDiagnostic(sourceRoot2, errorMessage, codeqlExePath2) { @@ -7504,7 +7526,8 @@ function addDependencyInstallationDiagnostic(sourceRoot2, errorMessage, codeqlEx "cds/dependency-installation-failure", "CDS dependency installation failure", "error" /* Error */, - "source root" + "source root", + sourceRoot2 ); } function addEnvironmentSetupDiagnostic(sourceRoot2, errorMessage, codeqlExePath2) { @@ -7516,10 +7539,11 @@ function addEnvironmentSetupDiagnostic(sourceRoot2, errorMessage, codeqlExePath2 "cds/environment-setup-failure", "CDS extractor environment setup failure", "error" /* Error */, - "source root" + "source root", + sourceRoot2 ); } -function addJavaScriptExtractorDiagnostic(filePath, errorMessage, codeqlExePath2) { +function addJavaScriptExtractorDiagnostic(filePath, errorMessage, codeqlExePath2, sourceRoot2) { return addDiagnostic( filePath, errorMessage, @@ -7527,7 +7551,8 @@ function addJavaScriptExtractorDiagnostic(filePath, errorMessage, codeqlExePath2 "cds/js-extractor-failure", "Failure in JavaScript extractor for SAP CAP CDS files", "error" /* Error */, - "extraction file" + "extraction file", + sourceRoot2 ); } function addNoCdsProjectsDiagnostic(sourceRoot2, message, codeqlExePath2) { @@ -7538,7 +7563,8 @@ function addNoCdsProjectsDiagnostic(sourceRoot2, message, codeqlExePath2) { "cds/no-cds-projects", "No CDS projects detected in source", "warning" /* Warning */, - "source root" + "source root", + sourceRoot2 ); } @@ -8069,7 +8095,7 @@ function projectInstallDependencies(project, sourceRoot2) { } // src/cds/compiler/retry.ts -function addCompilationDiagnosticsForFailedTasks(dependencyGraph2, codeqlExePath2) { +function addCompilationDiagnosticsForFailedTasks(dependencyGraph2, codeqlExePath2, sourceRoot2) { for (const project of dependencyGraph2.projects.values()) { for (const task of project.compilationTasks) { if (task.status === "failed") { @@ -8079,7 +8105,8 @@ function addCompilationDiagnosticsForFailedTasks(dependencyGraph2, codeqlExePath addCompilationDiagnostic( sourceFile, task.errorSummary ?? "Compilation failed", - codeqlExePath2 + codeqlExePath2, + sourceRoot2 ); } } @@ -8191,7 +8218,11 @@ function orchestrateRetryAttempts(dependencyGraph2, codeqlExePath2) { result.retryCompilationDurationMs = retryCompilationEndTime - retryCompilationStartTime; updateCdsDependencyGraphStatus(dependencyGraph2, dependencyGraph2.sourceRootDir); updateDependencyGraphWithRetryResults(dependencyGraph2, result); - addCompilationDiagnosticsForFailedTasks(dependencyGraph2, codeqlExePath2); + addCompilationDiagnosticsForFailedTasks( + dependencyGraph2, + codeqlExePath2, + dependencyGraph2.sourceRootDir + ); result.success = result.totalSuccessfulRetries > 0 || result.totalTasksRequiringRetry === 0; } catch (error) { const errorMessage = `Retry orchestration failed: ${String(error)}`; @@ -9071,7 +9102,7 @@ function runJavaScriptExtractor(sourceRoot2, autobuildScriptPath2, codeqlExePath if (result.error) { const errorMessage = `Error running JavaScript extractor: ${result.error.message}`; if (codeqlExePath2) { - addJavaScriptExtractorDiagnostic(sourceRoot2, errorMessage, codeqlExePath2); + addJavaScriptExtractorDiagnostic(sourceRoot2, errorMessage, codeqlExePath2, sourceRoot2); } return { success: false, @@ -9081,7 +9112,7 @@ function runJavaScriptExtractor(sourceRoot2, autobuildScriptPath2, codeqlExePath if (result.status !== 0) { const errorMessage = `JavaScript extractor failed with exit code ${String(result.status)}`; if (codeqlExePath2) { - addJavaScriptExtractorDiagnostic(sourceRoot2, errorMessage, codeqlExePath2); + addJavaScriptExtractorDiagnostic(sourceRoot2, errorMessage, codeqlExePath2, sourceRoot2); } return { success: false, @@ -9445,7 +9476,12 @@ try { if (!extractorResult2.success && extractorResult2.error) { cdsExtractorLog("error", `Error running JavaScript extractor: ${extractorResult2.error}`); if (codeqlExePath) { - addJavaScriptExtractorDiagnostic(sourceRoot, extractorResult2.error, codeqlExePath); + addJavaScriptExtractorDiagnostic( + sourceRoot, + extractorResult2.error, + codeqlExePath, + sourceRoot + ); } logExtractorStop(false, "JavaScript extractor failed"); } else { @@ -9476,7 +9512,12 @@ try { if (!extractorResult2.success && extractorResult2.error) { cdsExtractorLog("error", `Error running JavaScript extractor: ${extractorResult2.error}`); if (codeqlExePath) { - addJavaScriptExtractorDiagnostic(sourceRoot, extractorResult2.error, codeqlExePath); + addJavaScriptExtractorDiagnostic( + sourceRoot, + extractorResult2.error, + codeqlExePath, + sourceRoot + ); } logExtractorStop(false, "JavaScript extractor failed"); } else { @@ -9539,7 +9580,8 @@ try { cdsFilePathsToProcess[0], // Use first file as representative `Compilation orchestration failed: ${String(error)}`, - codeqlExePath + codeqlExePath, + sourceRoot ); } } @@ -9557,7 +9599,12 @@ if (!extractorResult.success && extractorResult.error) { if (codeqlExePath && dependencyGraph.projects.size > 0) { const firstProject = Array.from(dependencyGraph.projects.values())[0]; const representativeFile = firstProject.cdsFiles[0] || sourceRoot; - addJavaScriptExtractorDiagnostic(representativeFile, extractorResult.error, codeqlExePath); + addJavaScriptExtractorDiagnostic( + representativeFile, + extractorResult.error, + codeqlExePath, + sourceRoot + ); } logExtractorStop(false, "JavaScript extractor failed"); } else { diff --git a/extractors/cds/tools/dist/cds-extractor.bundle.js.map b/extractors/cds/tools/dist/cds-extractor.bundle.js.map index 1c05e1da..a04c15c7 100644 --- a/extractors/cds/tools/dist/cds-extractor.bundle.js.map +++ b/extractors/cds/tools/dist/cds-extractor.bundle.js.map @@ -1,7 +1,7 @@ { "version": 3, "sources": ["../cds-extractor.ts", "../node_modules/@isaacs/balanced-match/src/index.ts", "../node_modules/@isaacs/brace-expansion/src/index.ts", "../node_modules/glob/node_modules/minimatch/src/assert-valid-pattern.ts", "../node_modules/glob/node_modules/minimatch/src/brace-expressions.ts", "../node_modules/glob/node_modules/minimatch/src/unescape.ts", "../node_modules/glob/node_modules/minimatch/src/ast.ts", "../node_modules/glob/node_modules/minimatch/src/escape.ts", "../node_modules/glob/node_modules/minimatch/src/index.ts", "../node_modules/glob/src/glob.ts", "../node_modules/path-scurry/node_modules/lru-cache/src/index.ts", "../node_modules/path-scurry/src/index.ts", "../node_modules/minipass/src/index.ts", "../node_modules/glob/src/pattern.ts", "../node_modules/glob/src/ignore.ts", "../node_modules/glob/src/processor.ts", "../node_modules/glob/src/walker.ts", "../node_modules/glob/src/has-magic.ts", "../node_modules/glob/src/index.ts", "../src/cds/compiler/command.ts", "../src/filesystem.ts", "../src/logging/cdsExtractorLog.ts", "../src/logging/statusReport.ts", "../src/cds/compiler/compile.ts", "../src/cds/compiler/version.ts", "../src/constants.ts", "../src/cds/compiler/validator.ts", "../src/diagnostics.ts", "../src/packageManager/cacheInstaller.ts", "../src/packageManager/versionResolver.ts", "../src/packageManager/projectInstaller.ts", "../src/cds/compiler/retry.ts", "../src/cds/compiler/graph.ts", "../src/cds/compiler/project.ts", "../src/cds/parser/graph.ts", "../src/cds/parser/functions.ts", "../src/codeql.ts", "../src/environment.ts", "../src/utils.ts"], - "sourcesContent": ["import { join } from 'path';\n\nimport { sync as globSync } from 'glob';\n\nimport { orchestrateCompilation } from './src/cds/compiler';\nimport { buildCdsProjectDependencyGraph } from './src/cds/parser';\nimport { runJavaScriptExtractor } from './src/codeql';\nimport {\n addCompilationDiagnostic,\n addDependencyGraphDiagnostic,\n addDependencyInstallationDiagnostic,\n addEnvironmentSetupDiagnostic,\n addJavaScriptExtractorDiagnostic,\n addNoCdsProjectsDiagnostic,\n} from './src/diagnostics';\nimport { configureLgtmIndexFilters, setupAndValidateEnvironment } from './src/environment';\nimport {\n cdsExtractorLog,\n generateStatusReport,\n logExtractorStart,\n logExtractorStop,\n logPerformanceMilestone,\n logPerformanceTrackingStart,\n logPerformanceTrackingStop,\n setSourceRootDirectory,\n} from './src/logging';\nimport { cacheInstallDependencies } from './src/packageManager';\nimport { validateArguments } from './src/utils';\n\n// Validate the script arguments.\nconst validationResult = validateArguments(process.argv);\nif (!validationResult.isValid) {\n console.warn(validationResult.usageMessage);\n // For invalid arguments, we can't proceed but we also can't add diagnostics since we don't have\n // the necessary context (sourceRoot, codeqlExePath). Log the issue and exit gracefully.\n console.log(\n `CDS extractor terminated due to invalid arguments: ${validationResult.usageMessage}`,\n );\n console.log(`Completed run of the cds-extractor.js script for the CDS extractor.`);\n process.exit(0); // Use exit code 0 to not fail the overall JavaScript extractor\n}\n\n// Get the validated and sanitized arguments.\nconst { sourceRoot } = validationResult.args!;\n\n// Initialize the unified logging system with the source root directory.\nsetSourceRootDirectory(sourceRoot);\n\n// Log the start of the CDS extractor session as a whole.\nlogExtractorStart(sourceRoot);\n\n// Setup the environment and validate all requirements first, before changing\n// directory back to the \"sourceRoot\" directory. This ensures we can properly locate\n// the CodeQL tools.\nlogPerformanceTrackingStart('Environment Setup');\nconst {\n success: envSetupSuccess,\n errorMessages,\n codeqlExePath,\n autobuildScriptPath,\n platformInfo,\n} = setupAndValidateEnvironment(sourceRoot);\nlogPerformanceTrackingStop('Environment Setup');\n\nif (!envSetupSuccess) {\n const codeqlExe = platformInfo.isWindows ? 'codeql.exe' : 'codeql';\n const errorMessage = `'${codeqlExe} database index-files --language cds' terminated early due to: ${errorMessages.join(\n ', ',\n )}.`;\n\n cdsExtractorLog('warn', errorMessage);\n\n // Add diagnostic for environment setup failure if we have a codeqlExePath\n if (codeqlExePath) {\n addEnvironmentSetupDiagnostic(sourceRoot, errorMessage, codeqlExePath);\n }\n\n // Continue with a warning instead of exiting - let JavaScript extractor proceed\n logExtractorStop(\n false,\n 'Warning: Environment setup failed, continuing with limited functionality',\n );\n} else {\n // Force this script, and any process it spawns, to use the project (source) root\n // directory as the current working directory.\n process.chdir(sourceRoot);\n}\n\ncdsExtractorLog(\n 'info',\n `CodeQL CDS extractor using autobuild mode for scan of project source root directory '${sourceRoot}'.`,\n);\n\ncdsExtractorLog('info', 'Building CDS project dependency graph...');\n\n// Build the CDS project `dependencyGraph` as the foundation for the extraction process.\n// This graph will contain all discovered CDS projects, their dependencies, the `.cds`\n// files discovered within each project, the expected `.cds.json` files for each project\n// and the compilation status of such `.cds.json` files.\n//\n// The `dependencyGraph` will be updated as CDS extractor phases progress, allowing for\n// a single data structure to be used for planning, execution, retries (i.e. error handling),\n// debugging, and final reporting.\nlet dependencyGraph;\n\ntry {\n logPerformanceTrackingStart('Dependency Graph Build');\n dependencyGraph = buildCdsProjectDependencyGraph(sourceRoot);\n logPerformanceTrackingStop('Dependency Graph Build');\n\n logPerformanceMilestone(\n 'Dependency graph created',\n `${dependencyGraph.projects.size} projects, ${dependencyGraph.statusSummary.totalCdsFiles} CDS files`,\n );\n\n // Log details about discovered projects for debugging\n if (dependencyGraph.projects.size > 0) {\n for (const [projectDir, project] of dependencyGraph.projects.entries()) {\n cdsExtractorLog(\n 'info',\n `Project: ${projectDir}, Status: ${project.status}, CDS files: ${project.cdsFiles.length}, Compilation targets: ${project.compilationTargets.length}`,\n );\n }\n } else {\n cdsExtractorLog(\n 'error',\n 'No CDS projects were detected. This is an unrecoverable error as there is nothing to scan.',\n );\n // Let's also try to find CDS files directly as a backup check\n try {\n const allCdsFiles = Array.from(\n new Set([\n ...globSync(join(sourceRoot, '**/*.cds'), {\n ignore: ['**/node_modules/**', '**/.git/**'],\n }),\n ]),\n );\n cdsExtractorLog(\n 'info',\n `Direct search found ${allCdsFiles.length} CDS files in the source tree.`,\n );\n if (allCdsFiles.length > 0) {\n cdsExtractorLog(\n 'info',\n `Sample CDS files: ${allCdsFiles.slice(0, 5).join(', ')}${allCdsFiles.length > 5 ? ', ...' : ''}`,\n );\n cdsExtractorLog(\n 'error',\n 'CDS files were found but no projects were detected. This indicates a problem with project detection logic.',\n );\n } else {\n cdsExtractorLog(\n 'info',\n 'No CDS files found in the source tree. This may be expected if the source does not contain CAP/CDS projects.',\n );\n }\n } catch (globError) {\n cdsExtractorLog('warn', `Could not perform direct CDS file search: ${String(globError)}`);\n }\n\n // Add diagnostic warning for no CDS projects detected\n const warningMessage =\n 'No CDS projects were detected. This may be expected if the source does not contain CAP/CDS projects.';\n if (codeqlExePath) {\n addNoCdsProjectsDiagnostic(sourceRoot, warningMessage, codeqlExePath);\n }\n\n // Continue instead of exiting - let JavaScript extractor proceed with non-CDS files\n logExtractorStop(false, 'Warning: No CDS projects detected, skipping CDS-specific processing');\n\n // Skip the rest of CDS processing and go directly to JavaScript extraction\n configureLgtmIndexFilters();\n\n // Run CodeQL's JavaScript extractor to process any remaining files\n logPerformanceTrackingStart('JavaScript Extraction');\n const extractorResult = runJavaScriptExtractor(\n sourceRoot,\n autobuildScriptPath || '', // Use empty string if autobuildScriptPath is undefined\n codeqlExePath,\n );\n logPerformanceTrackingStop('JavaScript Extraction');\n\n if (!extractorResult.success && extractorResult.error) {\n cdsExtractorLog('error', `Error running JavaScript extractor: ${extractorResult.error}`);\n if (codeqlExePath) {\n addJavaScriptExtractorDiagnostic(sourceRoot, extractorResult.error, codeqlExePath);\n }\n logExtractorStop(false, 'JavaScript extractor failed');\n } else {\n logExtractorStop(true, 'JavaScript extraction completed (CDS processing was skipped)');\n }\n\n console.log(`Completed run of the cds-extractor.js script for the CDS extractor.`);\n process.exit(0); // Graceful exit to skip the rest of the processing\n }\n} catch (error) {\n const errorMessage = `Failed to build CDS dependency graph: ${String(error)}`;\n cdsExtractorLog('error', errorMessage);\n\n // Add diagnostic for dependency graph build failure\n if (codeqlExePath) {\n addDependencyGraphDiagnostic(sourceRoot, errorMessage, codeqlExePath);\n }\n\n // Continue with a warning instead of exiting - let JavaScript extractor proceed with non-CDS files\n logExtractorStop(\n false,\n 'Warning: Dependency graph build failed, skipping CDS-specific processing',\n );\n\n // Skip the rest of CDS processing and go directly to JavaScript extraction\n configureLgtmIndexFilters();\n\n // Run CodeQL's JavaScript extractor to process any remaining files\n logPerformanceTrackingStart('JavaScript Extraction');\n const extractorResult = runJavaScriptExtractor(\n sourceRoot,\n autobuildScriptPath || '', // Use empty string if autobuildScriptPath is undefined\n codeqlExePath,\n );\n logPerformanceTrackingStop('JavaScript Extraction');\n\n if (!extractorResult.success && extractorResult.error) {\n cdsExtractorLog('error', `Error running JavaScript extractor: ${extractorResult.error}`);\n if (codeqlExePath) {\n addJavaScriptExtractorDiagnostic(sourceRoot, extractorResult.error, codeqlExePath);\n }\n logExtractorStop(false, 'JavaScript extractor failed');\n } else {\n logExtractorStop(true, 'JavaScript extraction completed (CDS processing was skipped)');\n }\n\n console.log(`Completed run of the cds-extractor.js script for the CDS extractor.`);\n process.exit(0); // Graceful exit to skip the rest of the processing\n}\n\nlogPerformanceTrackingStart('Dependency Installation');\nconst projectCacheDirMap = cacheInstallDependencies(dependencyGraph, sourceRoot, codeqlExePath);\nlogPerformanceTrackingStop('Dependency Installation');\n\n// Check if dependency installation resulted in any usable project mappings\nif (projectCacheDirMap.size === 0) {\n cdsExtractorLog(\n 'error',\n 'No project cache directory mappings were created. This indicates that dependency installation failed for all discovered projects.',\n );\n\n // This is a critical error if we have projects but no cache mappings\n if (dependencyGraph.projects.size > 0) {\n const errorMessage = `Found ${dependencyGraph.projects.size} CDS projects but failed to install dependencies for any of them. Cannot proceed with compilation.`;\n cdsExtractorLog('error', errorMessage);\n\n // Add diagnostic for dependency installation failure\n if (codeqlExePath) {\n addDependencyInstallationDiagnostic(sourceRoot, errorMessage, codeqlExePath);\n }\n\n // Continue with a warning instead of exiting - let JavaScript extractor proceed\n logExtractorStop(\n false,\n 'Warning: Dependency installation failed for all projects, continuing with limited functionality',\n );\n }\n\n // If we have no projects and no cache mappings, this should have been caught earlier\n cdsExtractorLog(\n 'warn',\n 'No projects and no cache mappings - this should have been detected earlier.',\n );\n}\n\nconst cdsFilePathsToProcess: string[] = [];\n\n// Use the dependency graph to collect all `.cds` files from each project.\n// We want to \"extract\" all `.cds` files from all projects so that we have a copy\n// of each `.cds` source file in the CodeQL database.\nfor (const project of dependencyGraph.projects.values()) {\n cdsFilePathsToProcess.push(...project.cdsFiles);\n}\n\n// TODO : Improve logging / debugging of dependencyGraph.statusSummary. Just log the JSON?\ncdsExtractorLog(\n 'info',\n `Found ${cdsFilePathsToProcess.length} total CDS files, ${dependencyGraph.statusSummary.totalCdsFiles} CDS files in dependency graph`,\n);\n\nlogPerformanceTrackingStart('CDS Compilation');\ntry {\n // Use the new orchestrated compilation approach (autobuild mode, no debug)\n orchestrateCompilation(dependencyGraph, projectCacheDirMap, codeqlExePath);\n\n // Handle compilation failures for normal mode\n if (!dependencyGraph.statusSummary.overallSuccess) {\n cdsExtractorLog(\n 'error',\n `Compilation completed with failures: ${dependencyGraph.statusSummary.failedCompilations} failed out of ${dependencyGraph.statusSummary.totalCompilationTasks} total tasks`,\n );\n\n // Add diagnostics for critical errors\n for (const error of dependencyGraph.errors.critical) {\n cdsExtractorLog('error', `Critical error in ${error.phase}: ${error.message}`);\n }\n\n // Don't exit with error - let the JavaScript extractor run on whatever was compiled\n }\n\n logPerformanceTrackingStop('CDS Compilation');\n logPerformanceMilestone('CDS compilation completed');\n} catch (error) {\n logPerformanceTrackingStop('CDS Compilation');\n cdsExtractorLog('error', `Compilation orchestration failed: ${String(error)}`);\n\n // Add diagnostic for the overall failure\n if (cdsFilePathsToProcess.length > 0) {\n addCompilationDiagnostic(\n cdsFilePathsToProcess[0], // Use first file as representative\n `Compilation orchestration failed: ${String(error)}`,\n codeqlExePath,\n );\n }\n}\n\n// Configure the \"LGTM\" index filters for proper extraction.\nconfigureLgtmIndexFilters();\n\n// Run CodeQL's JavaScript extractor to process the .cds source files and\n// the compiled .cds.json files.\nlogPerformanceTrackingStart('JavaScript Extraction');\nconst extractionStartTime = Date.now();\nconst extractorResult = runJavaScriptExtractor(sourceRoot, autobuildScriptPath, codeqlExePath);\nconst extractionEndTime = Date.now();\nlogPerformanceTrackingStop('JavaScript Extraction');\n\n// Update the dependency graph's performance metrics with the extraction duration\ndependencyGraph.statusSummary.performance.extractionDurationMs =\n extractionEndTime - extractionStartTime;\n\n// Calculate total duration by summing all phases\nconst totalDuration =\n dependencyGraph.statusSummary.performance.parsingDurationMs +\n dependencyGraph.statusSummary.performance.compilationDurationMs +\n dependencyGraph.statusSummary.performance.extractionDurationMs;\ndependencyGraph.statusSummary.performance.totalDurationMs = totalDuration;\n\nif (!extractorResult.success && extractorResult.error) {\n cdsExtractorLog('error', `Error running JavaScript extractor: ${extractorResult.error}`);\n\n // Add diagnostic for JavaScript extractor failure\n if (codeqlExePath && dependencyGraph.projects.size > 0) {\n // Use the first CDS file as a representative file for the diagnostic\n const firstProject = Array.from(dependencyGraph.projects.values())[0];\n const representativeFile = firstProject.cdsFiles[0] || sourceRoot;\n addJavaScriptExtractorDiagnostic(representativeFile, extractorResult.error, codeqlExePath);\n }\n\n logExtractorStop(false, 'JavaScript extractor failed');\n} else {\n logExtractorStop(true, 'CDS extraction completed successfully');\n}\n\ncdsExtractorLog(\n 'info',\n 'CDS Extractor Status Report : Final...\\n' + generateStatusReport(dependencyGraph),\n);\n\n// Use the `cds-extractor.js` name in the log message as that is the name of the script\n// that is actually run by the `codeql database index-files` command. This TypeScript\n// file is where the code/logic is edited/implemented, but the runnable script is\n// generated by the TypeScript compiler and is named `cds-extractor.js`.\nconsole.log(`Completed run of the cds-extractor.js script for the CDS extractor.`);\n", "export const balanced = (\n a: string | RegExp,\n b: string | RegExp,\n str: string,\n) => {\n const ma = a instanceof RegExp ? maybeMatch(a, str) : a\n const mb = b instanceof RegExp ? maybeMatch(b, str) : b\n\n const r = ma !== null && mb != null && range(ma, mb, str)\n\n return (\n r && {\n start: r[0],\n end: r[1],\n pre: str.slice(0, r[0]),\n body: str.slice(r[0] + ma.length, r[1]),\n post: str.slice(r[1] + mb.length),\n }\n )\n}\n\nconst maybeMatch = (reg: RegExp, str: string) => {\n const m = str.match(reg)\n return m ? m[0] : null\n}\n\nexport const range = (\n a: string,\n b: string,\n str: string,\n): undefined | [number, number] => {\n let begs: number[],\n beg: number | undefined,\n left: number,\n right: number | undefined = undefined,\n result: undefined | [number, number]\n let ai = str.indexOf(a)\n let bi = str.indexOf(b, ai + 1)\n let i = ai\n\n if (ai >= 0 && bi > 0) {\n if (a === b) {\n return [ai, bi]\n }\n begs = []\n left = str.length\n\n while (i >= 0 && !result) {\n if (i === ai) {\n begs.push(i)\n ai = str.indexOf(a, i + 1)\n } else if (begs.length === 1) {\n const r = begs.pop()\n if (r !== undefined) result = [r, bi]\n } else {\n beg = begs.pop()\n if (beg !== undefined && beg < left) {\n left = beg\n right = bi\n }\n\n bi = str.indexOf(b, i + 1)\n }\n\n i = ai < bi && ai >= 0 ? ai : bi\n }\n\n if (begs.length && right !== undefined) {\n result = [left, right]\n }\n }\n\n return result\n}\n", "import { balanced } from '@isaacs/balanced-match'\n\nconst escSlash = '\\0SLASH' + Math.random() + '\\0'\nconst escOpen = '\\0OPEN' + Math.random() + '\\0'\nconst escClose = '\\0CLOSE' + Math.random() + '\\0'\nconst escComma = '\\0COMMA' + Math.random() + '\\0'\nconst escPeriod = '\\0PERIOD' + Math.random() + '\\0'\nconst escSlashPattern = new RegExp(escSlash, 'g')\nconst escOpenPattern = new RegExp(escOpen, 'g')\nconst escClosePattern = new RegExp(escClose, 'g')\nconst escCommaPattern = new RegExp(escComma, 'g')\nconst escPeriodPattern = new RegExp(escPeriod, 'g')\nconst slashPattern = /\\\\\\\\/g\nconst openPattern = /\\\\{/g\nconst closePattern = /\\\\}/g\nconst commaPattern = /\\\\,/g\nconst periodPattern = /\\\\./g\n\nfunction numeric(str: string) {\n return !isNaN(str as any) ? parseInt(str, 10) : str.charCodeAt(0)\n}\n\nfunction escapeBraces(str: string) {\n return str\n .replace(slashPattern, escSlash)\n .replace(openPattern, escOpen)\n .replace(closePattern, escClose)\n .replace(commaPattern, escComma)\n .replace(periodPattern, escPeriod)\n}\n\nfunction unescapeBraces(str: string) {\n return str\n .replace(escSlashPattern, '\\\\')\n .replace(escOpenPattern, '{')\n .replace(escClosePattern, '}')\n .replace(escCommaPattern, ',')\n .replace(escPeriodPattern, '.')\n}\n\n/**\n * Basically just str.split(\",\"), but handling cases\n * where we have nested braced sections, which should be\n * treated as individual members, like {a,{b,c},d}\n */\nfunction parseCommaParts(str: string) {\n if (!str) {\n return ['']\n }\n\n const parts: string[] = []\n const m = balanced('{', '}', str)\n\n if (!m) {\n return str.split(',')\n }\n\n const { pre, body, post } = m\n const p = pre.split(',')\n\n p[p.length - 1] += '{' + body + '}'\n const postParts = parseCommaParts(post)\n if (post.length) {\n ;(p[p.length - 1] as string) += postParts.shift()\n p.push.apply(p, postParts)\n }\n\n parts.push.apply(parts, p)\n\n return parts\n}\n\nexport function expand(str: string) {\n if (!str) {\n return []\n }\n\n // I don't know why Bash 4.3 does this, but it does.\n // Anything starting with {} will have the first two bytes preserved\n // but *only* at the top level, so {},a}b will not expand to anything,\n // but a{},b}c will be expanded to [a}c,abc].\n // One could argue that this is a bug in Bash, but since the goal of\n // this module is to match Bash's rules, we escape a leading {}\n if (str.slice(0, 2) === '{}') {\n str = '\\\\{\\\\}' + str.slice(2)\n }\n\n return expand_(escapeBraces(str), true).map(unescapeBraces)\n}\n\nfunction embrace(str: string) {\n return '{' + str + '}'\n}\n\nfunction isPadded(el: string) {\n return /^-?0\\d/.test(el)\n}\n\nfunction lte(i: number, y: number) {\n return i <= y\n}\n\nfunction gte(i: number, y: number) {\n return i >= y\n}\n\nfunction expand_(str: string, isTop?: boolean): string[] {\n /** @type {string[]} */\n const expansions: string[] = []\n\n const m = balanced('{', '}', str)\n if (!m) return [str]\n\n // no need to expand pre, since it is guaranteed to be free of brace-sets\n const pre = m.pre\n const post: string[] = m.post.length ? expand_(m.post, false) : ['']\n\n if (/\\$$/.test(m.pre)) {\n for (let k = 0; k < post.length; k++) {\n const expansion = pre + '{' + m.body + '}' + post[k]\n expansions.push(expansion)\n }\n } else {\n const isNumericSequence = /^-?\\d+\\.\\.-?\\d+(?:\\.\\.-?\\d+)?$/.test(m.body)\n const isAlphaSequence = /^[a-zA-Z]\\.\\.[a-zA-Z](?:\\.\\.-?\\d+)?$/.test(m.body)\n const isSequence = isNumericSequence || isAlphaSequence\n const isOptions = m.body.indexOf(',') >= 0\n if (!isSequence && !isOptions) {\n // {a},b}\n if (m.post.match(/,(?!,).*\\}/)) {\n str = m.pre + '{' + m.body + escClose + m.post\n return expand_(str)\n }\n return [str]\n }\n\n let n: string[]\n if (isSequence) {\n n = m.body.split(/\\.\\./)\n } else {\n n = parseCommaParts(m.body)\n if (n.length === 1 && n[0] !== undefined) {\n // x{{a,b}}y ==> x{a}y x{b}y\n n = expand_(n[0], false).map(embrace)\n //XXX is this necessary? Can't seem to hit it in tests.\n /* c8 ignore start */\n if (n.length === 1) {\n return post.map(p => m.pre + n[0] + p)\n }\n /* c8 ignore stop */\n }\n }\n\n // at this point, n is the parts, and we know it's not a comma set\n // with a single entry.\n let N: string[]\n\n if (isSequence && n[0] !== undefined && n[1] !== undefined) {\n const x = numeric(n[0])\n const y = numeric(n[1])\n const width = Math.max(n[0].length, n[1].length)\n let incr =\n n.length === 3 && n[2] !== undefined ? Math.abs(numeric(n[2])) : 1\n let test = lte\n const reverse = y < x\n if (reverse) {\n incr *= -1\n test = gte\n }\n const pad = n.some(isPadded)\n\n N = []\n\n for (let i = x; test(i, y); i += incr) {\n let c\n if (isAlphaSequence) {\n c = String.fromCharCode(i)\n if (c === '\\\\') {\n c = ''\n }\n } else {\n c = String(i)\n if (pad) {\n const need = width - c.length\n if (need > 0) {\n const z = new Array(need + 1).join('0')\n if (i < 0) {\n c = '-' + z + c.slice(1)\n } else {\n c = z + c\n }\n }\n }\n }\n N.push(c)\n }\n } else {\n N = []\n\n for (let j = 0; j < n.length; j++) {\n N.push.apply(N, expand_(n[j] as string, false))\n }\n }\n\n for (let j = 0; j < N.length; j++) {\n for (let k = 0; k < post.length; k++) {\n const expansion = pre + N[j] + post[k]\n if (!isTop || isSequence || expansion) {\n expansions.push(expansion)\n }\n }\n }\n }\n\n return expansions\n}\n", "const MAX_PATTERN_LENGTH = 1024 * 64\nexport const assertValidPattern: (pattern: any) => void = (\n pattern: any\n): asserts pattern is string => {\n if (typeof pattern !== 'string') {\n throw new TypeError('invalid pattern')\n }\n\n if (pattern.length > MAX_PATTERN_LENGTH) {\n throw new TypeError('pattern is too long')\n }\n}\n", "// translate the various posix character classes into unicode properties\n// this works across all unicode locales\n\n// { : [, /u flag required, negated]\nconst posixClasses: { [k: string]: [e: string, u: boolean, n?: boolean] } = {\n '[:alnum:]': ['\\\\p{L}\\\\p{Nl}\\\\p{Nd}', true],\n '[:alpha:]': ['\\\\p{L}\\\\p{Nl}', true],\n '[:ascii:]': ['\\\\x' + '00-\\\\x' + '7f', false],\n '[:blank:]': ['\\\\p{Zs}\\\\t', true],\n '[:cntrl:]': ['\\\\p{Cc}', true],\n '[:digit:]': ['\\\\p{Nd}', true],\n '[:graph:]': ['\\\\p{Z}\\\\p{C}', true, true],\n '[:lower:]': ['\\\\p{Ll}', true],\n '[:print:]': ['\\\\p{C}', true],\n '[:punct:]': ['\\\\p{P}', true],\n '[:space:]': ['\\\\p{Z}\\\\t\\\\r\\\\n\\\\v\\\\f', true],\n '[:upper:]': ['\\\\p{Lu}', true],\n '[:word:]': ['\\\\p{L}\\\\p{Nl}\\\\p{Nd}\\\\p{Pc}', true],\n '[:xdigit:]': ['A-Fa-f0-9', false],\n}\n\n// only need to escape a few things inside of brace expressions\n// escapes: [ \\ ] -\nconst braceEscape = (s: string) => s.replace(/[[\\]\\\\-]/g, '\\\\$&')\n// escape all regexp magic characters\nconst regexpEscape = (s: string) =>\n s.replace(/[-[\\]{}()*+?.,\\\\^$|#\\s]/g, '\\\\$&')\n\n// everything has already been escaped, we just have to join\nconst rangesToString = (ranges: string[]): string => ranges.join('')\n\nexport type ParseClassResult = [\n src: string,\n uFlag: boolean,\n consumed: number,\n hasMagic: boolean\n]\n\n// takes a glob string at a posix brace expression, and returns\n// an equivalent regular expression source, and boolean indicating\n// whether the /u flag needs to be applied, and the number of chars\n// consumed to parse the character class.\n// This also removes out of order ranges, and returns ($.) if the\n// entire class just no good.\nexport const parseClass = (\n glob: string,\n position: number\n): ParseClassResult => {\n const pos = position\n /* c8 ignore start */\n if (glob.charAt(pos) !== '[') {\n throw new Error('not in a brace expression')\n }\n /* c8 ignore stop */\n const ranges: string[] = []\n const negs: string[] = []\n\n let i = pos + 1\n let sawStart = false\n let uflag = false\n let escaping = false\n let negate = false\n let endPos = pos\n let rangeStart = ''\n WHILE: while (i < glob.length) {\n const c = glob.charAt(i)\n if ((c === '!' || c === '^') && i === pos + 1) {\n negate = true\n i++\n continue\n }\n\n if (c === ']' && sawStart && !escaping) {\n endPos = i + 1\n break\n }\n\n sawStart = true\n if (c === '\\\\') {\n if (!escaping) {\n escaping = true\n i++\n continue\n }\n // escaped \\ char, fall through and treat like normal char\n }\n if (c === '[' && !escaping) {\n // either a posix class, a collation equivalent, or just a [\n for (const [cls, [unip, u, neg]] of Object.entries(posixClasses)) {\n if (glob.startsWith(cls, i)) {\n // invalid, [a-[] is fine, but not [a-[:alpha]]\n if (rangeStart) {\n return ['$.', false, glob.length - pos, true]\n }\n i += cls.length\n if (neg) negs.push(unip)\n else ranges.push(unip)\n uflag = uflag || u\n continue WHILE\n }\n }\n }\n\n // now it's just a normal character, effectively\n escaping = false\n if (rangeStart) {\n // throw this range away if it's not valid, but others\n // can still match.\n if (c > rangeStart) {\n ranges.push(braceEscape(rangeStart) + '-' + braceEscape(c))\n } else if (c === rangeStart) {\n ranges.push(braceEscape(c))\n }\n rangeStart = ''\n i++\n continue\n }\n\n // now might be the start of a range.\n // can be either c-d or c-] or c] or c] at this point\n if (glob.startsWith('-]', i + 1)) {\n ranges.push(braceEscape(c + '-'))\n i += 2\n continue\n }\n if (glob.startsWith('-', i + 1)) {\n rangeStart = c\n i += 2\n continue\n }\n\n // not the start of a range, just a single character\n ranges.push(braceEscape(c))\n i++\n }\n\n if (endPos < i) {\n // didn't see the end of the class, not a valid class,\n // but might still be valid as a literal match.\n return ['', false, 0, false]\n }\n\n // if we got no ranges and no negates, then we have a range that\n // cannot possibly match anything, and that poisons the whole glob\n if (!ranges.length && !negs.length) {\n return ['$.', false, glob.length - pos, true]\n }\n\n // if we got one positive range, and it's a single character, then that's\n // not actually a magic pattern, it's just that one literal character.\n // we should not treat that as \"magic\", we should just return the literal\n // character. [_] is a perfectly valid way to escape glob magic chars.\n if (\n negs.length === 0 &&\n ranges.length === 1 &&\n /^\\\\?.$/.test(ranges[0]) &&\n !negate\n ) {\n const r = ranges[0].length === 2 ? ranges[0].slice(-1) : ranges[0]\n return [regexpEscape(r), false, endPos - pos, false]\n }\n\n const sranges = '[' + (negate ? '^' : '') + rangesToString(ranges) + ']'\n const snegs = '[' + (negate ? '' : '^') + rangesToString(negs) + ']'\n const comb =\n ranges.length && negs.length\n ? '(' + sranges + '|' + snegs + ')'\n : ranges.length\n ? sranges\n : snegs\n\n return [comb, uflag, endPos - pos, true]\n}\n", "import { MinimatchOptions } from './index.js'\n/**\n * Un-escape a string that has been escaped with {@link escape}.\n *\n * If the {@link windowsPathsNoEscape} option is used, then square-brace\n * escapes are removed, but not backslash escapes. For example, it will turn\n * the string `'[*]'` into `*`, but it will not turn `'\\\\*'` into `'*'`,\n * becuase `\\` is a path separator in `windowsPathsNoEscape` mode.\n *\n * When `windowsPathsNoEscape` is not set, then both brace escapes and\n * backslash escapes are removed.\n *\n * Slashes (and backslashes in `windowsPathsNoEscape` mode) cannot be escaped\n * or unescaped.\n */\nexport const unescape = (\n s: string,\n {\n windowsPathsNoEscape = false,\n }: Pick = {}\n) => {\n return windowsPathsNoEscape\n ? s.replace(/\\[([^\\/\\\\])\\]/g, '$1')\n : s.replace(/((?!\\\\).|^)\\[([^\\/\\\\])\\]/g, '$1$2').replace(/\\\\([^\\/])/g, '$1')\n}\n", "// parse a single path portion\n\nimport { parseClass } from './brace-expressions.js'\nimport { MinimatchOptions, MMRegExp } from './index.js'\nimport { unescape } from './unescape.js'\n\n// classes [] are handled by the parseClass method\n// for positive extglobs, we sub-parse the contents, and combine,\n// with the appropriate regexp close.\n// for negative extglobs, we sub-parse the contents, but then\n// have to include the rest of the pattern, then the parent, etc.,\n// as the thing that cannot be because RegExp negative lookaheads\n// are different from globs.\n//\n// So for example:\n// a@(i|w!(x|y)z|j)b => ^a(i|w((!?(x|y)zb).*)z|j)b$\n// 1 2 3 4 5 6 1 2 3 46 5 6\n//\n// Assembling the extglob requires not just the negated patterns themselves,\n// but also anything following the negative patterns up to the boundary\n// of the current pattern, plus anything following in the parent pattern.\n//\n//\n// So, first, we parse the string into an AST of extglobs, without turning\n// anything into regexps yet.\n//\n// ['a', {@ [['i'], ['w', {!['x', 'y']}, 'z'], ['j']]}, 'b']\n//\n// Then, for all the negative extglobs, we append whatever comes after in\n// each parent as their tail\n//\n// ['a', {@ [['i'], ['w', {!['x', 'y'], 'z', 'b'}, 'z'], ['j']]}, 'b']\n//\n// Lastly, we turn each of these pieces into a regexp, and join\n//\n// v----- .* because there's more following,\n// v v otherwise, .+ because it must be\n// v v *something* there.\n// ['^a', {@ ['i', 'w(?:(!?(?:x|y).*zb$).*)z', 'j' ]}, 'b$']\n// copy what follows into here--^^^^^\n// ['^a', '(?:i|w(?:(?!(?:x|y).*zb$).*)z|j)', 'b$']\n// ['^a(?:i|w(?:(?!(?:x|y).*zb$).*)z|j)b$']\n\nexport type ExtglobType = '!' | '?' | '+' | '*' | '@'\nconst types = new Set(['!', '?', '+', '*', '@'])\nconst isExtglobType = (c: string): c is ExtglobType =>\n types.has(c as ExtglobType)\n\n// Patterns that get prepended to bind to the start of either the\n// entire string, or just a single path portion, to prevent dots\n// and/or traversal patterns, when needed.\n// Exts don't need the ^ or / bit, because the root binds that already.\nconst startNoTraversal = '(?!(?:^|/)\\\\.\\\\.?(?:$|/))'\nconst startNoDot = '(?!\\\\.)'\n\n// characters that indicate a start of pattern needs the \"no dots\" bit,\n// because a dot *might* be matched. ( is not in the list, because in\n// the case of a child extglob, it will handle the prevention itself.\nconst addPatternStart = new Set(['[', '.'])\n// cases where traversal is A-OK, no dot prevention needed\nconst justDots = new Set(['..', '.'])\nconst reSpecials = new Set('().*{}+?[]^$\\\\!')\nconst regExpEscape = (s: string) =>\n s.replace(/[-[\\]{}()*+?.,\\\\^$|#\\s]/g, '\\\\$&')\n\n// any single thing other than /\nconst qmark = '[^/]'\n\n// * => any number of characters\nconst star = qmark + '*?'\n// use + when we need to ensure that *something* matches, because the * is\n// the only thing in the path portion.\nconst starNoEmpty = qmark + '+?'\n\n// remove the \\ chars that we added if we end up doing a nonmagic compare\n// const deslash = (s: string) => s.replace(/\\\\(.)/g, '$1')\n\nexport class AST {\n type: ExtglobType | null\n readonly #root: AST\n\n #hasMagic?: boolean\n #uflag: boolean = false\n #parts: (string | AST)[] = []\n readonly #parent?: AST\n readonly #parentIndex: number\n #negs: AST[]\n #filledNegs: boolean = false\n #options: MinimatchOptions\n #toString?: string\n // set to true if it's an extglob with no children\n // (which really means one child of '')\n #emptyExt: boolean = false\n\n constructor(\n type: ExtglobType | null,\n parent?: AST,\n options: MinimatchOptions = {}\n ) {\n this.type = type\n // extglobs are inherently magical\n if (type) this.#hasMagic = true\n this.#parent = parent\n this.#root = this.#parent ? this.#parent.#root : this\n this.#options = this.#root === this ? options : this.#root.#options\n this.#negs = this.#root === this ? [] : this.#root.#negs\n if (type === '!' && !this.#root.#filledNegs) this.#negs.push(this)\n this.#parentIndex = this.#parent ? this.#parent.#parts.length : 0\n }\n\n get hasMagic(): boolean | undefined {\n /* c8 ignore start */\n if (this.#hasMagic !== undefined) return this.#hasMagic\n /* c8 ignore stop */\n for (const p of this.#parts) {\n if (typeof p === 'string') continue\n if (p.type || p.hasMagic) return (this.#hasMagic = true)\n }\n // note: will be undefined until we generate the regexp src and find out\n return this.#hasMagic\n }\n\n // reconstructs the pattern\n toString(): string {\n if (this.#toString !== undefined) return this.#toString\n if (!this.type) {\n return (this.#toString = this.#parts.map(p => String(p)).join(''))\n } else {\n return (this.#toString =\n this.type + '(' + this.#parts.map(p => String(p)).join('|') + ')')\n }\n }\n\n #fillNegs() {\n /* c8 ignore start */\n if (this !== this.#root) throw new Error('should only call on root')\n if (this.#filledNegs) return this\n /* c8 ignore stop */\n\n // call toString() once to fill this out\n this.toString()\n this.#filledNegs = true\n let n: AST | undefined\n while ((n = this.#negs.pop())) {\n if (n.type !== '!') continue\n // walk up the tree, appending everthing that comes AFTER parentIndex\n let p: AST | undefined = n\n let pp = p.#parent\n while (pp) {\n for (\n let i = p.#parentIndex + 1;\n !pp.type && i < pp.#parts.length;\n i++\n ) {\n for (const part of n.#parts) {\n /* c8 ignore start */\n if (typeof part === 'string') {\n throw new Error('string part in extglob AST??')\n }\n /* c8 ignore stop */\n part.copyIn(pp.#parts[i])\n }\n }\n p = pp\n pp = p.#parent\n }\n }\n return this\n }\n\n push(...parts: (string | AST)[]) {\n for (const p of parts) {\n if (p === '') continue\n /* c8 ignore start */\n if (typeof p !== 'string' && !(p instanceof AST && p.#parent === this)) {\n throw new Error('invalid part: ' + p)\n }\n /* c8 ignore stop */\n this.#parts.push(p)\n }\n }\n\n toJSON() {\n const ret: any[] =\n this.type === null\n ? this.#parts.slice().map(p => (typeof p === 'string' ? p : p.toJSON()))\n : [this.type, ...this.#parts.map(p => (p as AST).toJSON())]\n if (this.isStart() && !this.type) ret.unshift([])\n if (\n this.isEnd() &&\n (this === this.#root ||\n (this.#root.#filledNegs && this.#parent?.type === '!'))\n ) {\n ret.push({})\n }\n return ret\n }\n\n isStart(): boolean {\n if (this.#root === this) return true\n // if (this.type) return !!this.#parent?.isStart()\n if (!this.#parent?.isStart()) return false\n if (this.#parentIndex === 0) return true\n // if everything AHEAD of this is a negation, then it's still the \"start\"\n const p = this.#parent\n for (let i = 0; i < this.#parentIndex; i++) {\n const pp = p.#parts[i]\n if (!(pp instanceof AST && pp.type === '!')) {\n return false\n }\n }\n return true\n }\n\n isEnd(): boolean {\n if (this.#root === this) return true\n if (this.#parent?.type === '!') return true\n if (!this.#parent?.isEnd()) return false\n if (!this.type) return this.#parent?.isEnd()\n // if not root, it'll always have a parent\n /* c8 ignore start */\n const pl = this.#parent ? this.#parent.#parts.length : 0\n /* c8 ignore stop */\n return this.#parentIndex === pl - 1\n }\n\n copyIn(part: AST | string) {\n if (typeof part === 'string') this.push(part)\n else this.push(part.clone(this))\n }\n\n clone(parent: AST) {\n const c = new AST(this.type, parent)\n for (const p of this.#parts) {\n c.copyIn(p)\n }\n return c\n }\n\n static #parseAST(\n str: string,\n ast: AST,\n pos: number,\n opt: MinimatchOptions\n ): number {\n let escaping = false\n let inBrace = false\n let braceStart = -1\n let braceNeg = false\n if (ast.type === null) {\n // outside of a extglob, append until we find a start\n let i = pos\n let acc = ''\n while (i < str.length) {\n const c = str.charAt(i++)\n // still accumulate escapes at this point, but we do ignore\n // starts that are escaped\n if (escaping || c === '\\\\') {\n escaping = !escaping\n acc += c\n continue\n }\n\n if (inBrace) {\n if (i === braceStart + 1) {\n if (c === '^' || c === '!') {\n braceNeg = true\n }\n } else if (c === ']' && !(i === braceStart + 2 && braceNeg)) {\n inBrace = false\n }\n acc += c\n continue\n } else if (c === '[') {\n inBrace = true\n braceStart = i\n braceNeg = false\n acc += c\n continue\n }\n\n if (!opt.noext && isExtglobType(c) && str.charAt(i) === '(') {\n ast.push(acc)\n acc = ''\n const ext = new AST(c, ast)\n i = AST.#parseAST(str, ext, i, opt)\n ast.push(ext)\n continue\n }\n acc += c\n }\n ast.push(acc)\n return i\n }\n\n // some kind of extglob, pos is at the (\n // find the next | or )\n let i = pos + 1\n let part = new AST(null, ast)\n const parts: AST[] = []\n let acc = ''\n while (i < str.length) {\n const c = str.charAt(i++)\n // still accumulate escapes at this point, but we do ignore\n // starts that are escaped\n if (escaping || c === '\\\\') {\n escaping = !escaping\n acc += c\n continue\n }\n\n if (inBrace) {\n if (i === braceStart + 1) {\n if (c === '^' || c === '!') {\n braceNeg = true\n }\n } else if (c === ']' && !(i === braceStart + 2 && braceNeg)) {\n inBrace = false\n }\n acc += c\n continue\n } else if (c === '[') {\n inBrace = true\n braceStart = i\n braceNeg = false\n acc += c\n continue\n }\n\n if (isExtglobType(c) && str.charAt(i) === '(') {\n part.push(acc)\n acc = ''\n const ext = new AST(c, part)\n part.push(ext)\n i = AST.#parseAST(str, ext, i, opt)\n continue\n }\n if (c === '|') {\n part.push(acc)\n acc = ''\n parts.push(part)\n part = new AST(null, ast)\n continue\n }\n if (c === ')') {\n if (acc === '' && ast.#parts.length === 0) {\n ast.#emptyExt = true\n }\n part.push(acc)\n acc = ''\n ast.push(...parts, part)\n return i\n }\n acc += c\n }\n\n // unfinished extglob\n // if we got here, it was a malformed extglob! not an extglob, but\n // maybe something else in there.\n ast.type = null\n ast.#hasMagic = undefined\n ast.#parts = [str.substring(pos - 1)]\n return i\n }\n\n static fromGlob(pattern: string, options: MinimatchOptions = {}) {\n const ast = new AST(null, undefined, options)\n AST.#parseAST(pattern, ast, 0, options)\n return ast\n }\n\n // returns the regular expression if there's magic, or the unescaped\n // string if not.\n toMMPattern(): MMRegExp | string {\n // should only be called on root\n /* c8 ignore start */\n if (this !== this.#root) return this.#root.toMMPattern()\n /* c8 ignore stop */\n const glob = this.toString()\n const [re, body, hasMagic, uflag] = this.toRegExpSource()\n // if we're in nocase mode, and not nocaseMagicOnly, then we do\n // still need a regular expression if we have to case-insensitively\n // match capital/lowercase characters.\n const anyMagic =\n hasMagic ||\n this.#hasMagic ||\n (this.#options.nocase &&\n !this.#options.nocaseMagicOnly &&\n glob.toUpperCase() !== glob.toLowerCase())\n if (!anyMagic) {\n return body\n }\n\n const flags = (this.#options.nocase ? 'i' : '') + (uflag ? 'u' : '')\n return Object.assign(new RegExp(`^${re}$`, flags), {\n _src: re,\n _glob: glob,\n })\n }\n\n get options() {\n return this.#options\n }\n\n // returns the string match, the regexp source, whether there's magic\n // in the regexp (so a regular expression is required) and whether or\n // not the uflag is needed for the regular expression (for posix classes)\n // TODO: instead of injecting the start/end at this point, just return\n // the BODY of the regexp, along with the start/end portions suitable\n // for binding the start/end in either a joined full-path makeRe context\n // (where we bind to (^|/), or a standalone matchPart context (where\n // we bind to ^, and not /). Otherwise slashes get duped!\n //\n // In part-matching mode, the start is:\n // - if not isStart: nothing\n // - if traversal possible, but not allowed: ^(?!\\.\\.?$)\n // - if dots allowed or not possible: ^\n // - if dots possible and not allowed: ^(?!\\.)\n // end is:\n // - if not isEnd(): nothing\n // - else: $\n //\n // In full-path matching mode, we put the slash at the START of the\n // pattern, so start is:\n // - if first pattern: same as part-matching mode\n // - if not isStart(): nothing\n // - if traversal possible, but not allowed: /(?!\\.\\.?(?:$|/))\n // - if dots allowed or not possible: /\n // - if dots possible and not allowed: /(?!\\.)\n // end is:\n // - if last pattern, same as part-matching mode\n // - else nothing\n //\n // Always put the (?:$|/) on negated tails, though, because that has to be\n // there to bind the end of the negated pattern portion, and it's easier to\n // just stick it in now rather than try to inject it later in the middle of\n // the pattern.\n //\n // We can just always return the same end, and leave it up to the caller\n // to know whether it's going to be used joined or in parts.\n // And, if the start is adjusted slightly, can do the same there:\n // - if not isStart: nothing\n // - if traversal possible, but not allowed: (?:/|^)(?!\\.\\.?$)\n // - if dots allowed or not possible: (?:/|^)\n // - if dots possible and not allowed: (?:/|^)(?!\\.)\n //\n // But it's better to have a simpler binding without a conditional, for\n // performance, so probably better to return both start options.\n //\n // Then the caller just ignores the end if it's not the first pattern,\n // and the start always gets applied.\n //\n // But that's always going to be $ if it's the ending pattern, or nothing,\n // so the caller can just attach $ at the end of the pattern when building.\n //\n // So the todo is:\n // - better detect what kind of start is needed\n // - return both flavors of starting pattern\n // - attach $ at the end of the pattern when creating the actual RegExp\n //\n // Ah, but wait, no, that all only applies to the root when the first pattern\n // is not an extglob. If the first pattern IS an extglob, then we need all\n // that dot prevention biz to live in the extglob portions, because eg\n // +(*|.x*) can match .xy but not .yx.\n //\n // So, return the two flavors if it's #root and the first child is not an\n // AST, otherwise leave it to the child AST to handle it, and there,\n // use the (?:^|/) style of start binding.\n //\n // Even simplified further:\n // - Since the start for a join is eg /(?!\\.) and the start for a part\n // is ^(?!\\.), we can just prepend (?!\\.) to the pattern (either root\n // or start or whatever) and prepend ^ or / at the Regexp construction.\n toRegExpSource(\n allowDot?: boolean\n ): [re: string, body: string, hasMagic: boolean, uflag: boolean] {\n const dot = allowDot ?? !!this.#options.dot\n if (this.#root === this) this.#fillNegs()\n if (!this.type) {\n const noEmpty = this.isStart() && this.isEnd()\n const src = this.#parts\n .map(p => {\n const [re, _, hasMagic, uflag] =\n typeof p === 'string'\n ? AST.#parseGlob(p, this.#hasMagic, noEmpty)\n : p.toRegExpSource(allowDot)\n this.#hasMagic = this.#hasMagic || hasMagic\n this.#uflag = this.#uflag || uflag\n return re\n })\n .join('')\n\n let start = ''\n if (this.isStart()) {\n if (typeof this.#parts[0] === 'string') {\n // this is the string that will match the start of the pattern,\n // so we need to protect against dots and such.\n\n // '.' and '..' cannot match unless the pattern is that exactly,\n // even if it starts with . or dot:true is set.\n const dotTravAllowed =\n this.#parts.length === 1 && justDots.has(this.#parts[0])\n if (!dotTravAllowed) {\n const aps = addPatternStart\n // check if we have a possibility of matching . or ..,\n // and prevent that.\n const needNoTrav =\n // dots are allowed, and the pattern starts with [ or .\n (dot && aps.has(src.charAt(0))) ||\n // the pattern starts with \\., and then [ or .\n (src.startsWith('\\\\.') && aps.has(src.charAt(2))) ||\n // the pattern starts with \\.\\., and then [ or .\n (src.startsWith('\\\\.\\\\.') && aps.has(src.charAt(4)))\n // no need to prevent dots if it can't match a dot, or if a\n // sub-pattern will be preventing it anyway.\n const needNoDot = !dot && !allowDot && aps.has(src.charAt(0))\n\n start = needNoTrav ? startNoTraversal : needNoDot ? startNoDot : ''\n }\n }\n }\n\n // append the \"end of path portion\" pattern to negation tails\n let end = ''\n if (\n this.isEnd() &&\n this.#root.#filledNegs &&\n this.#parent?.type === '!'\n ) {\n end = '(?:$|\\\\/)'\n }\n const final = start + src + end\n return [\n final,\n unescape(src),\n (this.#hasMagic = !!this.#hasMagic),\n this.#uflag,\n ]\n }\n\n // We need to calculate the body *twice* if it's a repeat pattern\n // at the start, once in nodot mode, then again in dot mode, so a\n // pattern like *(?) can match 'x.y'\n\n const repeated = this.type === '*' || this.type === '+'\n // some kind of extglob\n const start = this.type === '!' ? '(?:(?!(?:' : '(?:'\n let body = this.#partsToRegExp(dot)\n\n if (this.isStart() && this.isEnd() && !body && this.type !== '!') {\n // invalid extglob, has to at least be *something* present, if it's\n // the entire path portion.\n const s = this.toString()\n this.#parts = [s]\n this.type = null\n this.#hasMagic = undefined\n return [s, unescape(this.toString()), false, false]\n }\n\n // XXX abstract out this map method\n let bodyDotAllowed =\n !repeated || allowDot || dot || !startNoDot\n ? ''\n : this.#partsToRegExp(true)\n if (bodyDotAllowed === body) {\n bodyDotAllowed = ''\n }\n if (bodyDotAllowed) {\n body = `(?:${body})(?:${bodyDotAllowed})*?`\n }\n\n // an empty !() is exactly equivalent to a starNoEmpty\n let final = ''\n if (this.type === '!' && this.#emptyExt) {\n final = (this.isStart() && !dot ? startNoDot : '') + starNoEmpty\n } else {\n const close =\n this.type === '!'\n ? // !() must match something,but !(x) can match ''\n '))' +\n (this.isStart() && !dot && !allowDot ? startNoDot : '') +\n star +\n ')'\n : this.type === '@'\n ? ')'\n : this.type === '?'\n ? ')?'\n : this.type === '+' && bodyDotAllowed\n ? ')'\n : this.type === '*' && bodyDotAllowed\n ? `)?`\n : `)${this.type}`\n final = start + body + close\n }\n return [\n final,\n unescape(body),\n (this.#hasMagic = !!this.#hasMagic),\n this.#uflag,\n ]\n }\n\n #partsToRegExp(dot: boolean) {\n return this.#parts\n .map(p => {\n // extglob ASTs should only contain parent ASTs\n /* c8 ignore start */\n if (typeof p === 'string') {\n throw new Error('string type in extglob ast??')\n }\n /* c8 ignore stop */\n // can ignore hasMagic, because extglobs are already always magic\n const [re, _, _hasMagic, uflag] = p.toRegExpSource(dot)\n this.#uflag = this.#uflag || uflag\n return re\n })\n .filter(p => !(this.isStart() && this.isEnd()) || !!p)\n .join('|')\n }\n\n static #parseGlob(\n glob: string,\n hasMagic: boolean | undefined,\n noEmpty: boolean = false\n ): [re: string, body: string, hasMagic: boolean, uflag: boolean] {\n let escaping = false\n let re = ''\n let uflag = false\n for (let i = 0; i < glob.length; i++) {\n const c = glob.charAt(i)\n if (escaping) {\n escaping = false\n re += (reSpecials.has(c) ? '\\\\' : '') + c\n continue\n }\n if (c === '\\\\') {\n if (i === glob.length - 1) {\n re += '\\\\\\\\'\n } else {\n escaping = true\n }\n continue\n }\n if (c === '[') {\n const [src, needUflag, consumed, magic] = parseClass(glob, i)\n if (consumed) {\n re += src\n uflag = uflag || needUflag\n i += consumed - 1\n hasMagic = hasMagic || magic\n continue\n }\n }\n if (c === '*') {\n if (noEmpty && glob === '*') re += starNoEmpty\n else re += star\n hasMagic = true\n continue\n }\n if (c === '?') {\n re += qmark\n hasMagic = true\n continue\n }\n re += regExpEscape(c)\n }\n return [re, unescape(glob), !!hasMagic, uflag]\n }\n}\n", "import { MinimatchOptions } from './index.js'\n/**\n * Escape all magic characters in a glob pattern.\n *\n * If the {@link windowsPathsNoEscape | GlobOptions.windowsPathsNoEscape}\n * option is used, then characters are escaped by wrapping in `[]`, because\n * a magic character wrapped in a character class can only be satisfied by\n * that exact character. In this mode, `\\` is _not_ escaped, because it is\n * not interpreted as a magic character, but instead as a path separator.\n */\nexport const escape = (\n s: string,\n {\n windowsPathsNoEscape = false,\n }: Pick = {}\n) => {\n // don't need to escape +@! because we escape the parens\n // that make those magic, and escaping ! as [!] isn't valid,\n // because [!]] is a valid glob class meaning not ']'.\n return windowsPathsNoEscape\n ? s.replace(/[?*()[\\]]/g, '[$&]')\n : s.replace(/[?*()[\\]\\\\]/g, '\\\\$&')\n}\n", "import { expand } from '@isaacs/brace-expansion'\nimport { assertValidPattern } from './assert-valid-pattern.js'\nimport { AST, ExtglobType } from './ast.js'\nimport { escape } from './escape.js'\nimport { unescape } from './unescape.js'\n\ntype Platform =\n | 'aix'\n | 'android'\n | 'darwin'\n | 'freebsd'\n | 'haiku'\n | 'linux'\n | 'openbsd'\n | 'sunos'\n | 'win32'\n | 'cygwin'\n | 'netbsd'\n\nexport interface MinimatchOptions {\n nobrace?: boolean\n nocomment?: boolean\n nonegate?: boolean\n debug?: boolean\n noglobstar?: boolean\n noext?: boolean\n nonull?: boolean\n windowsPathsNoEscape?: boolean\n allowWindowsEscape?: boolean\n partial?: boolean\n dot?: boolean\n nocase?: boolean\n nocaseMagicOnly?: boolean\n magicalBraces?: boolean\n matchBase?: boolean\n flipNegate?: boolean\n preserveMultipleSlashes?: boolean\n optimizationLevel?: number\n platform?: Platform\n windowsNoMagicRoot?: boolean\n}\n\nexport const minimatch = (\n p: string,\n pattern: string,\n options: MinimatchOptions = {}\n) => {\n assertValidPattern(pattern)\n\n // shortcut: comments match nothing.\n if (!options.nocomment && pattern.charAt(0) === '#') {\n return false\n }\n\n return new Minimatch(pattern, options).match(p)\n}\n\n// Optimized checking for the most common glob patterns.\nconst starDotExtRE = /^\\*+([^+@!?\\*\\[\\(]*)$/\nconst starDotExtTest = (ext: string) => (f: string) =>\n !f.startsWith('.') && f.endsWith(ext)\nconst starDotExtTestDot = (ext: string) => (f: string) => f.endsWith(ext)\nconst starDotExtTestNocase = (ext: string) => {\n ext = ext.toLowerCase()\n return (f: string) => !f.startsWith('.') && f.toLowerCase().endsWith(ext)\n}\nconst starDotExtTestNocaseDot = (ext: string) => {\n ext = ext.toLowerCase()\n return (f: string) => f.toLowerCase().endsWith(ext)\n}\nconst starDotStarRE = /^\\*+\\.\\*+$/\nconst starDotStarTest = (f: string) => !f.startsWith('.') && f.includes('.')\nconst starDotStarTestDot = (f: string) =>\n f !== '.' && f !== '..' && f.includes('.')\nconst dotStarRE = /^\\.\\*+$/\nconst dotStarTest = (f: string) => f !== '.' && f !== '..' && f.startsWith('.')\nconst starRE = /^\\*+$/\nconst starTest = (f: string) => f.length !== 0 && !f.startsWith('.')\nconst starTestDot = (f: string) => f.length !== 0 && f !== '.' && f !== '..'\nconst qmarksRE = /^\\?+([^+@!?\\*\\[\\(]*)?$/\nconst qmarksTestNocase = ([$0, ext = '']: RegExpMatchArray) => {\n const noext = qmarksTestNoExt([$0])\n if (!ext) return noext\n ext = ext.toLowerCase()\n return (f: string) => noext(f) && f.toLowerCase().endsWith(ext)\n}\nconst qmarksTestNocaseDot = ([$0, ext = '']: RegExpMatchArray) => {\n const noext = qmarksTestNoExtDot([$0])\n if (!ext) return noext\n ext = ext.toLowerCase()\n return (f: string) => noext(f) && f.toLowerCase().endsWith(ext)\n}\nconst qmarksTestDot = ([$0, ext = '']: RegExpMatchArray) => {\n const noext = qmarksTestNoExtDot([$0])\n return !ext ? noext : (f: string) => noext(f) && f.endsWith(ext)\n}\nconst qmarksTest = ([$0, ext = '']: RegExpMatchArray) => {\n const noext = qmarksTestNoExt([$0])\n return !ext ? noext : (f: string) => noext(f) && f.endsWith(ext)\n}\nconst qmarksTestNoExt = ([$0]: RegExpMatchArray) => {\n const len = $0.length\n return (f: string) => f.length === len && !f.startsWith('.')\n}\nconst qmarksTestNoExtDot = ([$0]: RegExpMatchArray) => {\n const len = $0.length\n return (f: string) => f.length === len && f !== '.' && f !== '..'\n}\n\n/* c8 ignore start */\nconst defaultPlatform: Platform = (\n typeof process === 'object' && process\n ? (typeof process.env === 'object' &&\n process.env &&\n process.env.__MINIMATCH_TESTING_PLATFORM__) ||\n process.platform\n : 'posix'\n) as Platform\ntype Sep = '\\\\' | '/'\nconst path: { [k: string]: { sep: Sep } } = {\n win32: { sep: '\\\\' },\n posix: { sep: '/' },\n}\n/* c8 ignore stop */\n\nexport const sep = defaultPlatform === 'win32' ? path.win32.sep : path.posix.sep\nminimatch.sep = sep\n\nexport const GLOBSTAR = Symbol('globstar **')\nminimatch.GLOBSTAR = GLOBSTAR\n\n// any single thing other than /\n// don't need to escape / when using new RegExp()\nconst qmark = '[^/]'\n\n// * => any number of characters\nconst star = qmark + '*?'\n\n// ** when dots are allowed. Anything goes, except .. and .\n// not (^ or / followed by one or two dots followed by $ or /),\n// followed by anything, any number of times.\nconst twoStarDot = '(?:(?!(?:\\\\/|^)(?:\\\\.{1,2})($|\\\\/)).)*?'\n\n// not a ^ or / followed by a dot,\n// followed by anything, any number of times.\nconst twoStarNoDot = '(?:(?!(?:\\\\/|^)\\\\.).)*?'\n\nexport const filter =\n (pattern: string, options: MinimatchOptions = {}) =>\n (p: string) =>\n minimatch(p, pattern, options)\nminimatch.filter = filter\n\nconst ext = (a: MinimatchOptions, b: MinimatchOptions = {}) =>\n Object.assign({}, a, b)\n\nexport const defaults = (def: MinimatchOptions): typeof minimatch => {\n if (!def || typeof def !== 'object' || !Object.keys(def).length) {\n return minimatch\n }\n\n const orig = minimatch\n\n const m = (p: string, pattern: string, options: MinimatchOptions = {}) =>\n orig(p, pattern, ext(def, options))\n\n return Object.assign(m, {\n Minimatch: class Minimatch extends orig.Minimatch {\n constructor(pattern: string, options: MinimatchOptions = {}) {\n super(pattern, ext(def, options))\n }\n static defaults(options: MinimatchOptions) {\n return orig.defaults(ext(def, options)).Minimatch\n }\n },\n\n AST: class AST extends orig.AST {\n /* c8 ignore start */\n constructor(\n type: ExtglobType | null,\n parent?: AST,\n options: MinimatchOptions = {}\n ) {\n super(type, parent, ext(def, options))\n }\n /* c8 ignore stop */\n\n static fromGlob(pattern: string, options: MinimatchOptions = {}) {\n return orig.AST.fromGlob(pattern, ext(def, options))\n }\n },\n\n unescape: (\n s: string,\n options: Pick = {}\n ) => orig.unescape(s, ext(def, options)),\n\n escape: (\n s: string,\n options: Pick = {}\n ) => orig.escape(s, ext(def, options)),\n\n filter: (pattern: string, options: MinimatchOptions = {}) =>\n orig.filter(pattern, ext(def, options)),\n\n defaults: (options: MinimatchOptions) => orig.defaults(ext(def, options)),\n\n makeRe: (pattern: string, options: MinimatchOptions = {}) =>\n orig.makeRe(pattern, ext(def, options)),\n\n braceExpand: (pattern: string, options: MinimatchOptions = {}) =>\n orig.braceExpand(pattern, ext(def, options)),\n\n match: (list: string[], pattern: string, options: MinimatchOptions = {}) =>\n orig.match(list, pattern, ext(def, options)),\n\n sep: orig.sep,\n GLOBSTAR: GLOBSTAR as typeof GLOBSTAR,\n })\n}\nminimatch.defaults = defaults\n\n// Brace expansion:\n// a{b,c}d -> abd acd\n// a{b,}c -> abc ac\n// a{0..3}d -> a0d a1d a2d a3d\n// a{b,c{d,e}f}g -> abg acdfg acefg\n// a{b,c}d{e,f}g -> abdeg acdeg abdeg abdfg\n//\n// Invalid sets are not expanded.\n// a{2..}b -> a{2..}b\n// a{b}c -> a{b}c\nexport const braceExpand = (\n pattern: string,\n options: MinimatchOptions = {}\n) => {\n assertValidPattern(pattern)\n\n // Thanks to Yeting Li for\n // improving this regexp to avoid a ReDOS vulnerability.\n if (options.nobrace || !/\\{(?:(?!\\{).)*\\}/.test(pattern)) {\n // shortcut. no need to expand.\n return [pattern]\n }\n\n return expand(pattern)\n}\nminimatch.braceExpand = braceExpand\n\n// parse a component of the expanded set.\n// At this point, no pattern may contain \"/\" in it\n// so we're going to return a 2d array, where each entry is the full\n// pattern, split on '/', and then turned into a regular expression.\n// A regexp is made at the end which joins each array with an\n// escaped /, and another full one which joins each regexp with |.\n//\n// Following the lead of Bash 4.1, note that \"**\" only has special meaning\n// when it is the *only* thing in a path portion. Otherwise, any series\n// of * is equivalent to a single *. Globstar behavior is enabled by\n// default, and can be disabled by setting options.noglobstar.\n\nexport const makeRe = (pattern: string, options: MinimatchOptions = {}) =>\n new Minimatch(pattern, options).makeRe()\nminimatch.makeRe = makeRe\n\nexport const match = (\n list: string[],\n pattern: string,\n options: MinimatchOptions = {}\n) => {\n const mm = new Minimatch(pattern, options)\n list = list.filter(f => mm.match(f))\n if (mm.options.nonull && !list.length) {\n list.push(pattern)\n }\n return list\n}\nminimatch.match = match\n\n// replace stuff like \\* with *\nconst globMagic = /[?*]|[+@!]\\(.*?\\)|\\[|\\]/\nconst regExpEscape = (s: string) =>\n s.replace(/[-[\\]{}()*+?.,\\\\^$|#\\s]/g, '\\\\$&')\n\nexport type MMRegExp = RegExp & {\n _src?: string\n _glob?: string\n}\n\nexport type ParseReturnFiltered = string | MMRegExp | typeof GLOBSTAR\nexport type ParseReturn = ParseReturnFiltered | false\n\nexport class Minimatch {\n options: MinimatchOptions\n set: ParseReturnFiltered[][]\n pattern: string\n\n windowsPathsNoEscape: boolean\n nonegate: boolean\n negate: boolean\n comment: boolean\n empty: boolean\n preserveMultipleSlashes: boolean\n partial: boolean\n globSet: string[]\n globParts: string[][]\n nocase: boolean\n\n isWindows: boolean\n platform: Platform\n windowsNoMagicRoot: boolean\n\n regexp: false | null | MMRegExp\n constructor(pattern: string, options: MinimatchOptions = {}) {\n assertValidPattern(pattern)\n\n options = options || {}\n this.options = options\n this.pattern = pattern\n this.platform = options.platform || defaultPlatform\n this.isWindows = this.platform === 'win32'\n this.windowsPathsNoEscape =\n !!options.windowsPathsNoEscape || options.allowWindowsEscape === false\n if (this.windowsPathsNoEscape) {\n this.pattern = this.pattern.replace(/\\\\/g, '/')\n }\n this.preserveMultipleSlashes = !!options.preserveMultipleSlashes\n this.regexp = null\n this.negate = false\n this.nonegate = !!options.nonegate\n this.comment = false\n this.empty = false\n this.partial = !!options.partial\n this.nocase = !!this.options.nocase\n this.windowsNoMagicRoot =\n options.windowsNoMagicRoot !== undefined\n ? options.windowsNoMagicRoot\n : !!(this.isWindows && this.nocase)\n\n this.globSet = []\n this.globParts = []\n this.set = []\n\n // make the set of regexps etc.\n this.make()\n }\n\n hasMagic(): boolean {\n if (this.options.magicalBraces && this.set.length > 1) {\n return true\n }\n for (const pattern of this.set) {\n for (const part of pattern) {\n if (typeof part !== 'string') return true\n }\n }\n return false\n }\n\n debug(..._: any[]) {}\n\n make() {\n const pattern = this.pattern\n const options = this.options\n\n // empty patterns and comments match nothing.\n if (!options.nocomment && pattern.charAt(0) === '#') {\n this.comment = true\n return\n }\n\n if (!pattern) {\n this.empty = true\n return\n }\n\n // step 1: figure out negation, etc.\n this.parseNegate()\n\n // step 2: expand braces\n this.globSet = [...new Set(this.braceExpand())]\n\n if (options.debug) {\n this.debug = (...args: any[]) => console.error(...args)\n }\n\n this.debug(this.pattern, this.globSet)\n\n // step 3: now we have a set, so turn each one into a series of\n // path-portion matching patterns.\n // These will be regexps, except in the case of \"**\", which is\n // set to the GLOBSTAR object for globstar behavior,\n // and will not contain any / characters\n //\n // First, we preprocess to make the glob pattern sets a bit simpler\n // and deduped. There are some perf-killing patterns that can cause\n // problems with a glob walk, but we can simplify them down a bit.\n const rawGlobParts = this.globSet.map(s => this.slashSplit(s))\n this.globParts = this.preprocess(rawGlobParts)\n this.debug(this.pattern, this.globParts)\n\n // glob --> regexps\n let set = this.globParts.map((s, _, __) => {\n if (this.isWindows && this.windowsNoMagicRoot) {\n // check if it's a drive or unc path.\n const isUNC =\n s[0] === '' &&\n s[1] === '' &&\n (s[2] === '?' || !globMagic.test(s[2])) &&\n !globMagic.test(s[3])\n const isDrive = /^[a-z]:/i.test(s[0])\n if (isUNC) {\n return [...s.slice(0, 4), ...s.slice(4).map(ss => this.parse(ss))]\n } else if (isDrive) {\n return [s[0], ...s.slice(1).map(ss => this.parse(ss))]\n }\n }\n return s.map(ss => this.parse(ss))\n })\n\n this.debug(this.pattern, set)\n\n // filter out everything that didn't compile properly.\n this.set = set.filter(\n s => s.indexOf(false) === -1\n ) as ParseReturnFiltered[][]\n\n // do not treat the ? in UNC paths as magic\n if (this.isWindows) {\n for (let i = 0; i < this.set.length; i++) {\n const p = this.set[i]\n if (\n p[0] === '' &&\n p[1] === '' &&\n this.globParts[i][2] === '?' &&\n typeof p[3] === 'string' &&\n /^[a-z]:$/i.test(p[3])\n ) {\n p[2] = '?'\n }\n }\n }\n\n this.debug(this.pattern, this.set)\n }\n\n // various transforms to equivalent pattern sets that are\n // faster to process in a filesystem walk. The goal is to\n // eliminate what we can, and push all ** patterns as far\n // to the right as possible, even if it increases the number\n // of patterns that we have to process.\n preprocess(globParts: string[][]) {\n // if we're not in globstar mode, then turn all ** into *\n if (this.options.noglobstar) {\n for (let i = 0; i < globParts.length; i++) {\n for (let j = 0; j < globParts[i].length; j++) {\n if (globParts[i][j] === '**') {\n globParts[i][j] = '*'\n }\n }\n }\n }\n\n const { optimizationLevel = 1 } = this.options\n\n if (optimizationLevel >= 2) {\n // aggressive optimization for the purpose of fs walking\n globParts = this.firstPhasePreProcess(globParts)\n globParts = this.secondPhasePreProcess(globParts)\n } else if (optimizationLevel >= 1) {\n // just basic optimizations to remove some .. parts\n globParts = this.levelOneOptimize(globParts)\n } else {\n // just collapse multiple ** portions into one\n globParts = this.adjascentGlobstarOptimize(globParts)\n }\n\n return globParts\n }\n\n // just get rid of adjascent ** portions\n adjascentGlobstarOptimize(globParts: string[][]) {\n return globParts.map(parts => {\n let gs: number = -1\n while (-1 !== (gs = parts.indexOf('**', gs + 1))) {\n let i = gs\n while (parts[i + 1] === '**') {\n i++\n }\n if (i !== gs) {\n parts.splice(gs, i - gs)\n }\n }\n return parts\n })\n }\n\n // get rid of adjascent ** and resolve .. portions\n levelOneOptimize(globParts: string[][]) {\n return globParts.map(parts => {\n parts = parts.reduce((set: string[], part) => {\n const prev = set[set.length - 1]\n if (part === '**' && prev === '**') {\n return set\n }\n if (part === '..') {\n if (prev && prev !== '..' && prev !== '.' && prev !== '**') {\n set.pop()\n return set\n }\n }\n set.push(part)\n return set\n }, [])\n return parts.length === 0 ? [''] : parts\n })\n }\n\n levelTwoFileOptimize(parts: string | string[]) {\n if (!Array.isArray(parts)) {\n parts = this.slashSplit(parts)\n }\n let didSomething: boolean = false\n do {\n didSomething = false\n //
// -> 
/\n      if (!this.preserveMultipleSlashes) {\n        for (let i = 1; i < parts.length - 1; i++) {\n          const p = parts[i]\n          // don't squeeze out UNC patterns\n          if (i === 1 && p === '' && parts[0] === '') continue\n          if (p === '.' || p === '') {\n            didSomething = true\n            parts.splice(i, 1)\n            i--\n          }\n        }\n        if (\n          parts[0] === '.' &&\n          parts.length === 2 &&\n          (parts[1] === '.' || parts[1] === '')\n        ) {\n          didSomething = true\n          parts.pop()\n        }\n      }\n\n      // 
/

/../ ->

/\n      let dd: number = 0\n      while (-1 !== (dd = parts.indexOf('..', dd + 1))) {\n        const p = parts[dd - 1]\n        if (p && p !== '.' && p !== '..' && p !== '**') {\n          didSomething = true\n          parts.splice(dd - 1, 2)\n          dd -= 2\n        }\n      }\n    } while (didSomething)\n    return parts.length === 0 ? [''] : parts\n  }\n\n  // First phase: single-pattern processing\n  // 
 is 1 or more portions\n  //  is 1 or more portions\n  // 

is any portion other than ., .., '', or **\n // is . or ''\n //\n // **/.. is *brutal* for filesystem walking performance, because\n // it effectively resets the recursive walk each time it occurs,\n // and ** cannot be reduced out by a .. pattern part like a regexp\n // or most strings (other than .., ., and '') can be.\n //\n //

/**/../

/

/ -> {

/../

/

/,

/**/

/

/}\n //

// -> 
/\n  // 
/

/../ ->

/\n  // **/**/ -> **/\n  //\n  // **/*/ -> */**/ <== not valid because ** doesn't follow\n  // this WOULD be allowed if ** did follow symlinks, or * didn't\n  firstPhasePreProcess(globParts: string[][]) {\n    let didSomething = false\n    do {\n      didSomething = false\n      // 
/**/../

/

/ -> {

/../

/

/,

/**/

/

/}\n for (let parts of globParts) {\n let gs: number = -1\n while (-1 !== (gs = parts.indexOf('**', gs + 1))) {\n let gss: number = gs\n while (parts[gss + 1] === '**') {\n //

/**/**/ -> 
/**/\n            gss++\n          }\n          // eg, if gs is 2 and gss is 4, that means we have 3 **\n          // parts, and can remove 2 of them.\n          if (gss > gs) {\n            parts.splice(gs + 1, gss - gs)\n          }\n\n          let next = parts[gs + 1]\n          const p = parts[gs + 2]\n          const p2 = parts[gs + 3]\n          if (next !== '..') continue\n          if (\n            !p ||\n            p === '.' ||\n            p === '..' ||\n            !p2 ||\n            p2 === '.' ||\n            p2 === '..'\n          ) {\n            continue\n          }\n          didSomething = true\n          // edit parts in place, and push the new one\n          parts.splice(gs, 1)\n          const other = parts.slice(0)\n          other[gs] = '**'\n          globParts.push(other)\n          gs--\n        }\n\n        // 
// -> 
/\n        if (!this.preserveMultipleSlashes) {\n          for (let i = 1; i < parts.length - 1; i++) {\n            const p = parts[i]\n            // don't squeeze out UNC patterns\n            if (i === 1 && p === '' && parts[0] === '') continue\n            if (p === '.' || p === '') {\n              didSomething = true\n              parts.splice(i, 1)\n              i--\n            }\n          }\n          if (\n            parts[0] === '.' &&\n            parts.length === 2 &&\n            (parts[1] === '.' || parts[1] === '')\n          ) {\n            didSomething = true\n            parts.pop()\n          }\n        }\n\n        // 
/

/../ ->

/\n        let dd: number = 0\n        while (-1 !== (dd = parts.indexOf('..', dd + 1))) {\n          const p = parts[dd - 1]\n          if (p && p !== '.' && p !== '..' && p !== '**') {\n            didSomething = true\n            const needDot = dd === 1 && parts[dd + 1] === '**'\n            const splin = needDot ? ['.'] : []\n            parts.splice(dd - 1, 2, ...splin)\n            if (parts.length === 0) parts.push('')\n            dd -= 2\n          }\n        }\n      }\n    } while (didSomething)\n\n    return globParts\n  }\n\n  // second phase: multi-pattern dedupes\n  // {
/*/,
/

/} ->

/*/\n  // {
/,
/} -> 
/\n  // {
/**/,
/} -> 
/**/\n  //\n  // {
/**/,
/**/

/} ->

/**/\n  // ^-- not valid because ** doens't follow symlinks\n  secondPhasePreProcess(globParts: string[][]): string[][] {\n    for (let i = 0; i < globParts.length - 1; i++) {\n      for (let j = i + 1; j < globParts.length; j++) {\n        const matched = this.partsMatch(\n          globParts[i],\n          globParts[j],\n          !this.preserveMultipleSlashes\n        )\n        if (matched) {\n          globParts[i] = []\n          globParts[j] = matched\n          break\n        }\n      }\n    }\n    return globParts.filter(gs => gs.length)\n  }\n\n  partsMatch(\n    a: string[],\n    b: string[],\n    emptyGSMatch: boolean = false\n  ): false | string[] {\n    let ai = 0\n    let bi = 0\n    let result: string[] = []\n    let which: string = ''\n    while (ai < a.length && bi < b.length) {\n      if (a[ai] === b[bi]) {\n        result.push(which === 'b' ? b[bi] : a[ai])\n        ai++\n        bi++\n      } else if (emptyGSMatch && a[ai] === '**' && b[bi] === a[ai + 1]) {\n        result.push(a[ai])\n        ai++\n      } else if (emptyGSMatch && b[bi] === '**' && a[ai] === b[bi + 1]) {\n        result.push(b[bi])\n        bi++\n      } else if (\n        a[ai] === '*' &&\n        b[bi] &&\n        (this.options.dot || !b[bi].startsWith('.')) &&\n        b[bi] !== '**'\n      ) {\n        if (which === 'b') return false\n        which = 'a'\n        result.push(a[ai])\n        ai++\n        bi++\n      } else if (\n        b[bi] === '*' &&\n        a[ai] &&\n        (this.options.dot || !a[ai].startsWith('.')) &&\n        a[ai] !== '**'\n      ) {\n        if (which === 'a') return false\n        which = 'b'\n        result.push(b[bi])\n        ai++\n        bi++\n      } else {\n        return false\n      }\n    }\n    // if we fall out of the loop, it means they two are identical\n    // as long as their lengths match\n    return a.length === b.length && result\n  }\n\n  parseNegate() {\n    if (this.nonegate) return\n\n    const pattern = this.pattern\n    let negate = false\n    let negateOffset = 0\n\n    for (let i = 0; i < pattern.length && pattern.charAt(i) === '!'; i++) {\n      negate = !negate\n      negateOffset++\n    }\n\n    if (negateOffset) this.pattern = pattern.slice(negateOffset)\n    this.negate = negate\n  }\n\n  // set partial to true to test if, for example,\n  // \"/a/b\" matches the start of \"/*/b/*/d\"\n  // Partial means, if you run out of file before you run\n  // out of pattern, then that's fine, as long as all\n  // the parts match.\n  matchOne(file: string[], pattern: ParseReturn[], partial: boolean = false) {\n    const options = this.options\n\n    // UNC paths like //?/X:/... can match X:/... and vice versa\n    // Drive letters in absolute drive or unc paths are always compared\n    // case-insensitively.\n    if (this.isWindows) {\n      const fileDrive = typeof file[0] === 'string' && /^[a-z]:$/i.test(file[0])\n      const fileUNC =\n        !fileDrive &&\n        file[0] === '' &&\n        file[1] === '' &&\n        file[2] === '?' &&\n        /^[a-z]:$/i.test(file[3])\n\n      const patternDrive =\n        typeof pattern[0] === 'string' && /^[a-z]:$/i.test(pattern[0])\n      const patternUNC =\n        !patternDrive &&\n        pattern[0] === '' &&\n        pattern[1] === '' &&\n        pattern[2] === '?' &&\n        typeof pattern[3] === 'string' &&\n        /^[a-z]:$/i.test(pattern[3])\n\n      const fdi = fileUNC ? 3 : fileDrive ? 0 : undefined\n      const pdi = patternUNC ? 3 : patternDrive ? 0 : undefined\n      if (typeof fdi === 'number' && typeof pdi === 'number') {\n        const [fd, pd]: [string, string] = [file[fdi], pattern[pdi] as string]\n        if (fd.toLowerCase() === pd.toLowerCase()) {\n          pattern[pdi] = fd\n          if (pdi > fdi) {\n            pattern = pattern.slice(pdi)\n          } else if (fdi > pdi) {\n            file = file.slice(fdi)\n          }\n        }\n      }\n    }\n\n    // resolve and reduce . and .. portions in the file as well.\n    // dont' need to do the second phase, because it's only one string[]\n    const { optimizationLevel = 1 } = this.options\n    if (optimizationLevel >= 2) {\n      file = this.levelTwoFileOptimize(file)\n    }\n\n    this.debug('matchOne', this, { file, pattern })\n    this.debug('matchOne', file.length, pattern.length)\n\n    for (\n      var fi = 0, pi = 0, fl = file.length, pl = pattern.length;\n      fi < fl && pi < pl;\n      fi++, pi++\n    ) {\n      this.debug('matchOne loop')\n      var p = pattern[pi]\n      var f = file[fi]\n\n      this.debug(pattern, p, f)\n\n      // should be impossible.\n      // some invalid regexp stuff in the set.\n      /* c8 ignore start */\n      if (p === false) {\n        return false\n      }\n      /* c8 ignore stop */\n\n      if (p === GLOBSTAR) {\n        this.debug('GLOBSTAR', [pattern, p, f])\n\n        // \"**\"\n        // a/**/b/**/c would match the following:\n        // a/b/x/y/z/c\n        // a/x/y/z/b/c\n        // a/b/x/b/x/c\n        // a/b/c\n        // To do this, take the rest of the pattern after\n        // the **, and see if it would match the file remainder.\n        // If so, return success.\n        // If not, the ** \"swallows\" a segment, and try again.\n        // This is recursively awful.\n        //\n        // a/**/b/**/c matching a/b/x/y/z/c\n        // - a matches a\n        // - doublestar\n        //   - matchOne(b/x/y/z/c, b/**/c)\n        //     - b matches b\n        //     - doublestar\n        //       - matchOne(x/y/z/c, c) -> no\n        //       - matchOne(y/z/c, c) -> no\n        //       - matchOne(z/c, c) -> no\n        //       - matchOne(c, c) yes, hit\n        var fr = fi\n        var pr = pi + 1\n        if (pr === pl) {\n          this.debug('** at the end')\n          // a ** at the end will just swallow the rest.\n          // We have found a match.\n          // however, it will not swallow /.x, unless\n          // options.dot is set.\n          // . and .. are *never* matched by **, for explosively\n          // exponential reasons.\n          for (; fi < fl; fi++) {\n            if (\n              file[fi] === '.' ||\n              file[fi] === '..' ||\n              (!options.dot && file[fi].charAt(0) === '.')\n            )\n              return false\n          }\n          return true\n        }\n\n        // ok, let's see if we can swallow whatever we can.\n        while (fr < fl) {\n          var swallowee = file[fr]\n\n          this.debug('\\nglobstar while', file, fr, pattern, pr, swallowee)\n\n          // XXX remove this slice.  Just pass the start index.\n          if (this.matchOne(file.slice(fr), pattern.slice(pr), partial)) {\n            this.debug('globstar found match!', fr, fl, swallowee)\n            // found a match.\n            return true\n          } else {\n            // can't swallow \".\" or \"..\" ever.\n            // can only swallow \".foo\" when explicitly asked.\n            if (\n              swallowee === '.' ||\n              swallowee === '..' ||\n              (!options.dot && swallowee.charAt(0) === '.')\n            ) {\n              this.debug('dot detected!', file, fr, pattern, pr)\n              break\n            }\n\n            // ** swallows a segment, and continue.\n            this.debug('globstar swallow a segment, and continue')\n            fr++\n          }\n        }\n\n        // no match was found.\n        // However, in partial mode, we can't say this is necessarily over.\n        /* c8 ignore start */\n        if (partial) {\n          // ran out of file\n          this.debug('\\n>>> no match, partial?', file, fr, pattern, pr)\n          if (fr === fl) {\n            return true\n          }\n        }\n        /* c8 ignore stop */\n        return false\n      }\n\n      // something other than **\n      // non-magic patterns just have to match exactly\n      // patterns with magic have been turned into regexps.\n      let hit: boolean\n      if (typeof p === 'string') {\n        hit = f === p\n        this.debug('string match', p, f, hit)\n      } else {\n        hit = p.test(f)\n        this.debug('pattern match', p, f, hit)\n      }\n\n      if (!hit) return false\n    }\n\n    // Note: ending in / means that we'll get a final \"\"\n    // at the end of the pattern.  This can only match a\n    // corresponding \"\" at the end of the file.\n    // If the file ends in /, then it can only match a\n    // a pattern that ends in /, unless the pattern just\n    // doesn't have any more for it. But, a/b/ should *not*\n    // match \"a/b/*\", even though \"\" matches against the\n    // [^/]*? pattern, except in partial mode, where it might\n    // simply not be reached yet.\n    // However, a/b/ should still satisfy a/*\n\n    // now either we fell off the end of the pattern, or we're done.\n    if (fi === fl && pi === pl) {\n      // ran out of pattern and filename at the same time.\n      // an exact hit!\n      return true\n    } else if (fi === fl) {\n      // ran out of file, but still had pattern left.\n      // this is ok if we're doing the match as part of\n      // a glob fs traversal.\n      return partial\n    } else if (pi === pl) {\n      // ran out of pattern, still have file left.\n      // this is only acceptable if we're on the very last\n      // empty segment of a file with a trailing slash.\n      // a/* should match a/b/\n      return fi === fl - 1 && file[fi] === ''\n\n      /* c8 ignore start */\n    } else {\n      // should be unreachable.\n      throw new Error('wtf?')\n    }\n    /* c8 ignore stop */\n  }\n\n  braceExpand() {\n    return braceExpand(this.pattern, this.options)\n  }\n\n  parse(pattern: string): ParseReturn {\n    assertValidPattern(pattern)\n\n    const options = this.options\n\n    // shortcuts\n    if (pattern === '**') return GLOBSTAR\n    if (pattern === '') return ''\n\n    // far and away, the most common glob pattern parts are\n    // *, *.*, and *.  Add a fast check method for those.\n    let m: RegExpMatchArray | null\n    let fastTest: null | ((f: string) => boolean) = null\n    if ((m = pattern.match(starRE))) {\n      fastTest = options.dot ? starTestDot : starTest\n    } else if ((m = pattern.match(starDotExtRE))) {\n      fastTest = (\n        options.nocase\n          ? options.dot\n            ? starDotExtTestNocaseDot\n            : starDotExtTestNocase\n          : options.dot\n          ? starDotExtTestDot\n          : starDotExtTest\n      )(m[1])\n    } else if ((m = pattern.match(qmarksRE))) {\n      fastTest = (\n        options.nocase\n          ? options.dot\n            ? qmarksTestNocaseDot\n            : qmarksTestNocase\n          : options.dot\n          ? qmarksTestDot\n          : qmarksTest\n      )(m)\n    } else if ((m = pattern.match(starDotStarRE))) {\n      fastTest = options.dot ? starDotStarTestDot : starDotStarTest\n    } else if ((m = pattern.match(dotStarRE))) {\n      fastTest = dotStarTest\n    }\n\n    const re = AST.fromGlob(pattern, this.options).toMMPattern()\n    if (fastTest && typeof re === 'object') {\n      // Avoids overriding in frozen environments\n      Reflect.defineProperty(re, 'test', { value: fastTest })\n    }\n    return re\n  }\n\n  makeRe() {\n    if (this.regexp || this.regexp === false) return this.regexp\n\n    // at this point, this.set is a 2d array of partial\n    // pattern strings, or \"**\".\n    //\n    // It's better to use .match().  This function shouldn't\n    // be used, really, but it's pretty convenient sometimes,\n    // when you just want to work with a regex.\n    const set = this.set\n\n    if (!set.length) {\n      this.regexp = false\n      return this.regexp\n    }\n    const options = this.options\n\n    const twoStar = options.noglobstar\n      ? star\n      : options.dot\n      ? twoStarDot\n      : twoStarNoDot\n    const flags = new Set(options.nocase ? ['i'] : [])\n\n    // regexpify non-globstar patterns\n    // if ** is only item, then we just do one twoStar\n    // if ** is first, and there are more, prepend (\\/|twoStar\\/)? to next\n    // if ** is last, append (\\/twoStar|) to previous\n    // if ** is in the middle, append (\\/|\\/twoStar\\/) to previous\n    // then filter out GLOBSTAR symbols\n    let re = set\n      .map(pattern => {\n        const pp: (string | typeof GLOBSTAR)[] = pattern.map(p => {\n          if (p instanceof RegExp) {\n            for (const f of p.flags.split('')) flags.add(f)\n          }\n          return typeof p === 'string'\n            ? regExpEscape(p)\n            : p === GLOBSTAR\n            ? GLOBSTAR\n            : p._src\n        }) as (string | typeof GLOBSTAR)[]\n        pp.forEach((p, i) => {\n          const next = pp[i + 1]\n          const prev = pp[i - 1]\n          if (p !== GLOBSTAR || prev === GLOBSTAR) {\n            return\n          }\n          if (prev === undefined) {\n            if (next !== undefined && next !== GLOBSTAR) {\n              pp[i + 1] = '(?:\\\\/|' + twoStar + '\\\\/)?' + next\n            } else {\n              pp[i] = twoStar\n            }\n          } else if (next === undefined) {\n            pp[i - 1] = prev + '(?:\\\\/|' + twoStar + ')?'\n          } else if (next !== GLOBSTAR) {\n            pp[i - 1] = prev + '(?:\\\\/|\\\\/' + twoStar + '\\\\/)' + next\n            pp[i + 1] = GLOBSTAR\n          }\n        })\n        return pp.filter(p => p !== GLOBSTAR).join('/')\n      })\n      .join('|')\n\n    // need to wrap in parens if we had more than one thing with |,\n    // otherwise only the first will be anchored to ^ and the last to $\n    const [open, close] = set.length > 1 ? ['(?:', ')'] : ['', '']\n    // must match entire pattern\n    // ending in a * or ** will make it less strict.\n    re = '^' + open + re + close + '$'\n\n    // can match anything, as long as it's not this.\n    if (this.negate) re = '^(?!' + re + ').+$'\n\n    try {\n      this.regexp = new RegExp(re, [...flags].join(''))\n      /* c8 ignore start */\n    } catch (ex) {\n      // should be impossible\n      this.regexp = false\n    }\n    /* c8 ignore stop */\n    return this.regexp\n  }\n\n  slashSplit(p: string) {\n    // if p starts with // on windows, we preserve that\n    // so that UNC paths aren't broken.  Otherwise, any number of\n    // / characters are coalesced into one, unless\n    // preserveMultipleSlashes is set to true.\n    if (this.preserveMultipleSlashes) {\n      return p.split('/')\n    } else if (this.isWindows && /^\\/\\/[^\\/]+/.test(p)) {\n      // add an extra '' for the one we lose\n      return ['', ...p.split(/\\/+/)]\n    } else {\n      return p.split(/\\/+/)\n    }\n  }\n\n  match(f: string, partial = this.partial) {\n    this.debug('match', f, this.pattern)\n    // short-circuit in the case of busted things.\n    // comments, etc.\n    if (this.comment) {\n      return false\n    }\n    if (this.empty) {\n      return f === ''\n    }\n\n    if (f === '/' && partial) {\n      return true\n    }\n\n    const options = this.options\n\n    // windows: need to use /, not \\\n    if (this.isWindows) {\n      f = f.split('\\\\').join('/')\n    }\n\n    // treat the test path as a set of pathparts.\n    const ff = this.slashSplit(f)\n    this.debug(this.pattern, 'split', ff)\n\n    // just ONE of the pattern sets in this.set needs to match\n    // in order for it to be valid.  If negating, then just one\n    // match means that we have failed.\n    // Either way, return on the first hit.\n\n    const set = this.set\n    this.debug(this.pattern, 'set', set)\n\n    // Find the basename of the path by looking for the last non-empty segment\n    let filename: string = ff[ff.length - 1]\n    if (!filename) {\n      for (let i = ff.length - 2; !filename && i >= 0; i--) {\n        filename = ff[i]\n      }\n    }\n\n    for (let i = 0; i < set.length; i++) {\n      const pattern = set[i]\n      let file = ff\n      if (options.matchBase && pattern.length === 1) {\n        file = [filename]\n      }\n      const hit = this.matchOne(file, pattern, partial)\n      if (hit) {\n        if (options.flipNegate) {\n          return true\n        }\n        return !this.negate\n      }\n    }\n\n    // didn't get any hits.  this is success if it's a negative\n    // pattern, failure otherwise.\n    if (options.flipNegate) {\n      return false\n    }\n    return this.negate\n  }\n\n  static defaults(def: MinimatchOptions) {\n    return minimatch.defaults(def).Minimatch\n  }\n}\n/* c8 ignore start */\nexport { AST } from './ast.js'\nexport { escape } from './escape.js'\nexport { unescape } from './unescape.js'\n/* c8 ignore stop */\nminimatch.AST = AST\nminimatch.Minimatch = Minimatch\nminimatch.escape = escape\nminimatch.unescape = unescape\n", "import { Minimatch, MinimatchOptions } from 'minimatch'\nimport { Minipass } from 'minipass'\nimport { fileURLToPath } from 'node:url'\nimport {\n  FSOption,\n  Path,\n  PathScurry,\n  PathScurryDarwin,\n  PathScurryPosix,\n  PathScurryWin32,\n} from 'path-scurry'\nimport { IgnoreLike } from './ignore.js'\nimport { Pattern } from './pattern.js'\nimport { GlobStream, GlobWalker } from './walker.js'\n\nexport type MatchSet = Minimatch['set']\nexport type GlobParts = Exclude\n\n// if no process global, just call it linux.\n// so we default to case-sensitive, / separators\nconst defaultPlatform: NodeJS.Platform =\n  (\n    typeof process === 'object' &&\n    process &&\n    typeof process.platform === 'string'\n  ) ?\n    process.platform\n  : 'linux'\n\n/**\n * A `GlobOptions` object may be provided to any of the exported methods, and\n * must be provided to the `Glob` constructor.\n *\n * All options are optional, boolean, and false by default, unless otherwise\n * noted.\n *\n * All resolved options are added to the Glob object as properties.\n *\n * If you are running many `glob` operations, you can pass a Glob object as the\n * `options` argument to a subsequent operation to share the previously loaded\n * cache.\n */\nexport interface GlobOptions {\n  /**\n   * Set to `true` to always receive absolute paths for\n   * matched files. Set to `false` to always return relative paths.\n   *\n   * When this option is not set, absolute paths are returned for patterns\n   * that are absolute, and otherwise paths are returned that are relative\n   * to the `cwd` setting.\n   *\n   * This does _not_ make an extra system call to get\n   * the realpath, it only does string path resolution.\n   *\n   * Conflicts with {@link withFileTypes}\n   */\n  absolute?: boolean\n\n  /**\n   * Set to false to enable {@link windowsPathsNoEscape}\n   *\n   * @deprecated\n   */\n  allowWindowsEscape?: boolean\n\n  /**\n   * The current working directory in which to search. Defaults to\n   * `process.cwd()`.\n   *\n   * May be eiher a string path or a `file://` URL object or string.\n   */\n  cwd?: string | URL\n\n  /**\n   * Include `.dot` files in normal matches and `globstar`\n   * matches. Note that an explicit dot in a portion of the pattern\n   * will always match dot files.\n   */\n  dot?: boolean\n\n  /**\n   * Prepend all relative path strings with `./` (or `.\\` on Windows).\n   *\n   * Without this option, returned relative paths are \"bare\", so instead of\n   * returning `'./foo/bar'`, they are returned as `'foo/bar'`.\n   *\n   * Relative patterns starting with `'../'` are not prepended with `./`, even\n   * if this option is set.\n   */\n  dotRelative?: boolean\n\n  /**\n   * Follow symlinked directories when expanding `**`\n   * patterns. This can result in a lot of duplicate references in\n   * the presence of cyclic links, and make performance quite bad.\n   *\n   * By default, a `**` in a pattern will follow 1 symbolic link if\n   * it is not the first item in the pattern, or none if it is the\n   * first item in the pattern, following the same behavior as Bash.\n   */\n  follow?: boolean\n\n  /**\n   * string or string[], or an object with `ignored` and `childrenIgnored`\n   * methods.\n   *\n   * If a string or string[] is provided, then this is treated as a glob\n   * pattern or array of glob patterns to exclude from matches. To ignore all\n   * children within a directory, as well as the entry itself, append `'/**'`\n   * to the ignore pattern.\n   *\n   * **Note** `ignore` patterns are _always_ in `dot:true` mode, regardless of\n   * any other settings.\n   *\n   * If an object is provided that has `ignored(path)` and/or\n   * `childrenIgnored(path)` methods, then these methods will be called to\n   * determine whether any Path is a match or if its children should be\n   * traversed, respectively.\n   */\n  ignore?: string | string[] | IgnoreLike\n\n  /**\n   * Treat brace expansion like `{a,b}` as a \"magic\" pattern. Has no\n   * effect if {@link nobrace} is set.\n   *\n   * Only has effect on the {@link hasMagic} function.\n   */\n  magicalBraces?: boolean\n\n  /**\n   * Add a `/` character to directory matches. Note that this requires\n   * additional stat calls in some cases.\n   */\n  mark?: boolean\n\n  /**\n   * Perform a basename-only match if the pattern does not contain any slash\n   * characters. That is, `*.js` would be treated as equivalent to\n   * `**\\/*.js`, matching all js files in all directories.\n   */\n  matchBase?: boolean\n\n  /**\n   * Limit the directory traversal to a given depth below the cwd.\n   * Note that this does NOT prevent traversal to sibling folders,\n   * root patterns, and so on. It only limits the maximum folder depth\n   * that the walk will descend, relative to the cwd.\n   */\n  maxDepth?: number\n\n  /**\n   * Do not expand `{a,b}` and `{1..3}` brace sets.\n   */\n  nobrace?: boolean\n\n  /**\n   * Perform a case-insensitive match. This defaults to `true` on macOS and\n   * Windows systems, and `false` on all others.\n   *\n   * **Note** `nocase` should only be explicitly set when it is\n   * known that the filesystem's case sensitivity differs from the\n   * platform default. If set `true` on case-sensitive file\n   * systems, or `false` on case-insensitive file systems, then the\n   * walk may return more or less results than expected.\n   */\n  nocase?: boolean\n\n  /**\n   * Do not match directories, only files. (Note: to match\n   * _only_ directories, put a `/` at the end of the pattern.)\n   */\n  nodir?: boolean\n\n  /**\n   * Do not match \"extglob\" patterns such as `+(a|b)`.\n   */\n  noext?: boolean\n\n  /**\n   * Do not match `**` against multiple filenames. (Ie, treat it as a normal\n   * `*` instead.)\n   *\n   * Conflicts with {@link matchBase}\n   */\n  noglobstar?: boolean\n\n  /**\n   * Defaults to value of `process.platform` if available, or `'linux'` if\n   * not. Setting `platform:'win32'` on non-Windows systems may cause strange\n   * behavior.\n   */\n  platform?: NodeJS.Platform\n\n  /**\n   * Set to true to call `fs.realpath` on all of the\n   * results. In the case of an entry that cannot be resolved, the\n   * entry is omitted. This incurs a slight performance penalty, of\n   * course, because of the added system calls.\n   */\n  realpath?: boolean\n\n  /**\n   *\n   * A string path resolved against the `cwd` option, which\n   * is used as the starting point for absolute patterns that start\n   * with `/`, (but not drive letters or UNC paths on Windows).\n   *\n   * Note that this _doesn't_ necessarily limit the walk to the\n   * `root` directory, and doesn't affect the cwd starting point for\n   * non-absolute patterns. A pattern containing `..` will still be\n   * able to traverse out of the root directory, if it is not an\n   * actual root directory on the filesystem, and any non-absolute\n   * patterns will be matched in the `cwd`. For example, the\n   * pattern `/../*` with `{root:'/some/path'}` will return all\n   * files in `/some`, not all files in `/some/path`. The pattern\n   * `*` with `{root:'/some/path'}` will return all the entries in\n   * the cwd, not the entries in `/some/path`.\n   *\n   * To start absolute and non-absolute patterns in the same\n   * path, you can use `{root:''}`. However, be aware that on\n   * Windows systems, a pattern like `x:/*` or `//host/share/*` will\n   * _always_ start in the `x:/` or `//host/share` directory,\n   * regardless of the `root` setting.\n   */\n  root?: string\n\n  /**\n   * A [PathScurry](http://npm.im/path-scurry) object used\n   * to traverse the file system. If the `nocase` option is set\n   * explicitly, then any provided `scurry` object must match this\n   * setting.\n   */\n  scurry?: PathScurry\n\n  /**\n   * Call `lstat()` on all entries, whether required or not to determine\n   * if it's a valid match. When used with {@link withFileTypes}, this means\n   * that matches will include data such as modified time, permissions, and\n   * so on.  Note that this will incur a performance cost due to the added\n   * system calls.\n   */\n  stat?: boolean\n\n  /**\n   * An AbortSignal which will cancel the Glob walk when\n   * triggered.\n   */\n  signal?: AbortSignal\n\n  /**\n   * Use `\\\\` as a path separator _only_, and\n   *  _never_ as an escape character. If set, all `\\\\` characters are\n   *  replaced with `/` in the pattern.\n   *\n   *  Note that this makes it **impossible** to match against paths\n   *  containing literal glob pattern characters, but allows matching\n   *  with patterns constructed using `path.join()` and\n   *  `path.resolve()` on Windows platforms, mimicking the (buggy!)\n   *  behavior of Glob v7 and before on Windows. Please use with\n   *  caution, and be mindful of [the caveat below about Windows\n   *  paths](#windows). (For legacy reasons, this is also set if\n   *  `allowWindowsEscape` is set to the exact value `false`.)\n   */\n  windowsPathsNoEscape?: boolean\n\n  /**\n   * Return [PathScurry](http://npm.im/path-scurry)\n   * `Path` objects instead of strings. These are similar to a\n   * NodeJS `Dirent` object, but with additional methods and\n   * properties.\n   *\n   * Conflicts with {@link absolute}\n   */\n  withFileTypes?: boolean\n\n  /**\n   * An fs implementation to override some or all of the defaults.  See\n   * http://npm.im/path-scurry for details about what can be overridden.\n   */\n  fs?: FSOption\n\n  /**\n   * Just passed along to Minimatch.  Note that this makes all pattern\n   * matching operations slower and *extremely* noisy.\n   */\n  debug?: boolean\n\n  /**\n   * Return `/` delimited paths, even on Windows.\n   *\n   * On posix systems, this has no effect.  But, on Windows, it means that\n   * paths will be `/` delimited, and absolute paths will be their full\n   * resolved UNC forms, eg instead of `'C:\\\\foo\\\\bar'`, it would return\n   * `'//?/C:/foo/bar'`\n   */\n  posix?: boolean\n\n  /**\n   * Do not match any children of any matches. For example, the pattern\n   * `**\\/foo` would match `a/foo`, but not `a/foo/b/foo` in this mode.\n   *\n   * This is especially useful for cases like \"find all `node_modules`\n   * folders, but not the ones in `node_modules`\".\n   *\n   * In order to support this, the `Ignore` implementation must support an\n   * `add(pattern: string)` method. If using the default `Ignore` class, then\n   * this is fine, but if this is set to `false`, and a custom `Ignore` is\n   * provided that does not have an `add()` method, then it will throw an\n   * error.\n   *\n   * **Caveat** It *only* ignores matches that would be a descendant of a\n   * previous match, and only if that descendant is matched *after* the\n   * ancestor is encountered. Since the file system walk happens in\n   * indeterminate order, it's possible that a match will already be added\n   * before its ancestor, if multiple or braced patterns are used.\n   *\n   * For example:\n   *\n   * ```ts\n   * const results = await glob([\n   *   // likely to match first, since it's just a stat\n   *   'a/b/c/d/e/f',\n   *\n   *   // this pattern is more complicated! It must to various readdir()\n   *   // calls and test the results against a regular expression, and that\n   *   // is certainly going to take a little bit longer.\n   *   //\n   *   // So, later on, it encounters a match at 'a/b/c/d/e', but it's too\n   *   // late to ignore a/b/c/d/e/f, because it's already been emitted.\n   *   'a/[bdf]/?/[a-z]/*',\n   * ], { includeChildMatches: false })\n   * ```\n   *\n   * It's best to only set this to `false` if you can be reasonably sure that\n   * no components of the pattern will potentially match one another's file\n   * system descendants, or if the occasional included child entry will not\n   * cause problems.\n   *\n   * @default true\n   */\n  includeChildMatches?: boolean\n}\n\nexport type GlobOptionsWithFileTypesTrue = GlobOptions & {\n  withFileTypes: true\n  // string options not relevant if returning Path objects.\n  absolute?: undefined\n  mark?: undefined\n  posix?: undefined\n}\n\nexport type GlobOptionsWithFileTypesFalse = GlobOptions & {\n  withFileTypes?: false\n}\n\nexport type GlobOptionsWithFileTypesUnset = GlobOptions & {\n  withFileTypes?: undefined\n}\n\nexport type Result =\n  Opts extends GlobOptionsWithFileTypesTrue ? Path\n  : Opts extends GlobOptionsWithFileTypesFalse ? string\n  : Opts extends GlobOptionsWithFileTypesUnset ? string\n  : string | Path\nexport type Results = Result[]\n\nexport type FileTypes =\n  Opts extends GlobOptionsWithFileTypesTrue ? true\n  : Opts extends GlobOptionsWithFileTypesFalse ? false\n  : Opts extends GlobOptionsWithFileTypesUnset ? false\n  : boolean\n\n/**\n * An object that can perform glob pattern traversals.\n */\nexport class Glob implements GlobOptions {\n  absolute?: boolean\n  cwd: string\n  root?: string\n  dot: boolean\n  dotRelative: boolean\n  follow: boolean\n  ignore?: string | string[] | IgnoreLike\n  magicalBraces: boolean\n  mark?: boolean\n  matchBase: boolean\n  maxDepth: number\n  nobrace: boolean\n  nocase: boolean\n  nodir: boolean\n  noext: boolean\n  noglobstar: boolean\n  pattern: string[]\n  platform: NodeJS.Platform\n  realpath: boolean\n  scurry: PathScurry\n  stat: boolean\n  signal?: AbortSignal\n  windowsPathsNoEscape: boolean\n  withFileTypes: FileTypes\n  includeChildMatches: boolean\n\n  /**\n   * The options provided to the constructor.\n   */\n  opts: Opts\n\n  /**\n   * An array of parsed immutable {@link Pattern} objects.\n   */\n  patterns: Pattern[]\n\n  /**\n   * All options are stored as properties on the `Glob` object.\n   *\n   * See {@link GlobOptions} for full options descriptions.\n   *\n   * Note that a previous `Glob` object can be passed as the\n   * `GlobOptions` to another `Glob` instantiation to re-use settings\n   * and caches with a new pattern.\n   *\n   * Traversal functions can be called multiple times to run the walk\n   * again.\n   */\n  constructor(pattern: string | string[], opts: Opts) {\n    /* c8 ignore start */\n    if (!opts) throw new TypeError('glob options required')\n    /* c8 ignore stop */\n    this.withFileTypes = !!opts.withFileTypes as FileTypes\n    this.signal = opts.signal\n    this.follow = !!opts.follow\n    this.dot = !!opts.dot\n    this.dotRelative = !!opts.dotRelative\n    this.nodir = !!opts.nodir\n    this.mark = !!opts.mark\n    if (!opts.cwd) {\n      this.cwd = ''\n    } else if (opts.cwd instanceof URL || opts.cwd.startsWith('file://')) {\n      opts.cwd = fileURLToPath(opts.cwd)\n    }\n    this.cwd = opts.cwd || ''\n    this.root = opts.root\n    this.magicalBraces = !!opts.magicalBraces\n    this.nobrace = !!opts.nobrace\n    this.noext = !!opts.noext\n    this.realpath = !!opts.realpath\n    this.absolute = opts.absolute\n    this.includeChildMatches = opts.includeChildMatches !== false\n\n    this.noglobstar = !!opts.noglobstar\n    this.matchBase = !!opts.matchBase\n    this.maxDepth =\n      typeof opts.maxDepth === 'number' ? opts.maxDepth : Infinity\n    this.stat = !!opts.stat\n    this.ignore = opts.ignore\n\n    if (this.withFileTypes && this.absolute !== undefined) {\n      throw new Error('cannot set absolute and withFileTypes:true')\n    }\n\n    if (typeof pattern === 'string') {\n      pattern = [pattern]\n    }\n\n    this.windowsPathsNoEscape =\n      !!opts.windowsPathsNoEscape ||\n      (opts as { allowWindowsEscape?: boolean }).allowWindowsEscape ===\n        false\n\n    if (this.windowsPathsNoEscape) {\n      pattern = pattern.map(p => p.replace(/\\\\/g, '/'))\n    }\n\n    if (this.matchBase) {\n      if (opts.noglobstar) {\n        throw new TypeError('base matching requires globstar')\n      }\n      pattern = pattern.map(p => (p.includes('/') ? p : `./**/${p}`))\n    }\n\n    this.pattern = pattern\n\n    this.platform = opts.platform || defaultPlatform\n    this.opts = { ...opts, platform: this.platform }\n    if (opts.scurry) {\n      this.scurry = opts.scurry\n      if (\n        opts.nocase !== undefined &&\n        opts.nocase !== opts.scurry.nocase\n      ) {\n        throw new Error('nocase option contradicts provided scurry option')\n      }\n    } else {\n      const Scurry =\n        opts.platform === 'win32' ? PathScurryWin32\n        : opts.platform === 'darwin' ? PathScurryDarwin\n        : opts.platform ? PathScurryPosix\n        : PathScurry\n      this.scurry = new Scurry(this.cwd, {\n        nocase: opts.nocase,\n        fs: opts.fs,\n      })\n    }\n    this.nocase = this.scurry.nocase\n\n    // If you do nocase:true on a case-sensitive file system, then\n    // we need to use regexps instead of strings for non-magic\n    // path portions, because statting `aBc` won't return results\n    // for the file `AbC` for example.\n    const nocaseMagicOnly =\n      this.platform === 'darwin' || this.platform === 'win32'\n\n    const mmo: MinimatchOptions = {\n      // default nocase based on platform\n      ...opts,\n      dot: this.dot,\n      matchBase: this.matchBase,\n      nobrace: this.nobrace,\n      nocase: this.nocase,\n      nocaseMagicOnly,\n      nocomment: true,\n      noext: this.noext,\n      nonegate: true,\n      optimizationLevel: 2,\n      platform: this.platform,\n      windowsPathsNoEscape: this.windowsPathsNoEscape,\n      debug: !!this.opts.debug,\n    }\n\n    const mms = this.pattern.map(p => new Minimatch(p, mmo))\n    const [matchSet, globParts] = mms.reduce(\n      (set: [MatchSet, GlobParts], m) => {\n        set[0].push(...m.set)\n        set[1].push(...m.globParts)\n        return set\n      },\n      [[], []],\n    )\n    this.patterns = matchSet.map((set, i) => {\n      const g = globParts[i]\n      /* c8 ignore start */\n      if (!g) throw new Error('invalid pattern object')\n      /* c8 ignore stop */\n      return new Pattern(set, g, 0, this.platform)\n    })\n  }\n\n  /**\n   * Returns a Promise that resolves to the results array.\n   */\n  async walk(): Promise>\n  async walk(): Promise<(string | Path)[]> {\n    // Walkers always return array of Path objects, so we just have to\n    // coerce them into the right shape.  It will have already called\n    // realpath() if the option was set to do so, so we know that's cached.\n    // start out knowing the cwd, at least\n    return [\n      ...(await new GlobWalker(this.patterns, this.scurry.cwd, {\n        ...this.opts,\n        maxDepth:\n          this.maxDepth !== Infinity ?\n            this.maxDepth + this.scurry.cwd.depth()\n          : Infinity,\n        platform: this.platform,\n        nocase: this.nocase,\n        includeChildMatches: this.includeChildMatches,\n      }).walk()),\n    ]\n  }\n\n  /**\n   * synchronous {@link Glob.walk}\n   */\n  walkSync(): Results\n  walkSync(): (string | Path)[] {\n    return [\n      ...new GlobWalker(this.patterns, this.scurry.cwd, {\n        ...this.opts,\n        maxDepth:\n          this.maxDepth !== Infinity ?\n            this.maxDepth + this.scurry.cwd.depth()\n          : Infinity,\n        platform: this.platform,\n        nocase: this.nocase,\n        includeChildMatches: this.includeChildMatches,\n      }).walkSync(),\n    ]\n  }\n\n  /**\n   * Stream results asynchronously.\n   */\n  stream(): Minipass, Result>\n  stream(): Minipass {\n    return new GlobStream(this.patterns, this.scurry.cwd, {\n      ...this.opts,\n      maxDepth:\n        this.maxDepth !== Infinity ?\n          this.maxDepth + this.scurry.cwd.depth()\n        : Infinity,\n      platform: this.platform,\n      nocase: this.nocase,\n      includeChildMatches: this.includeChildMatches,\n    }).stream()\n  }\n\n  /**\n   * Stream results synchronously.\n   */\n  streamSync(): Minipass, Result>\n  streamSync(): Minipass {\n    return new GlobStream(this.patterns, this.scurry.cwd, {\n      ...this.opts,\n      maxDepth:\n        this.maxDepth !== Infinity ?\n          this.maxDepth + this.scurry.cwd.depth()\n        : Infinity,\n      platform: this.platform,\n      nocase: this.nocase,\n      includeChildMatches: this.includeChildMatches,\n    }).streamSync()\n  }\n\n  /**\n   * Default sync iteration function. Returns a Generator that\n   * iterates over the results.\n   */\n  iterateSync(): Generator, void, void> {\n    return this.streamSync()[Symbol.iterator]()\n  }\n  [Symbol.iterator]() {\n    return this.iterateSync()\n  }\n\n  /**\n   * Default async iteration function. Returns an AsyncGenerator that\n   * iterates over the results.\n   */\n  iterate(): AsyncGenerator, void, void> {\n    return this.stream()[Symbol.asyncIterator]()\n  }\n  [Symbol.asyncIterator]() {\n    return this.iterate()\n  }\n}\n", "/**\n * @module LRUCache\n */\n\n// module-private names and types\ntype Perf = { now: () => number }\nconst perf: Perf =\n  typeof performance === 'object' &&\n  performance &&\n  typeof performance.now === 'function'\n    ? performance\n    : Date\n\nconst warned = new Set()\n\n// either a function or a class\ntype ForC = ((...a: any[]) => any) | { new (...a: any[]): any }\n\n/* c8 ignore start */\nconst PROCESS = (\n  typeof process === 'object' && !!process ? process : {}\n) as { [k: string]: any }\n/* c8 ignore start */\n\nconst emitWarning = (\n  msg: string,\n  type: string,\n  code: string,\n  fn: ForC\n) => {\n  typeof PROCESS.emitWarning === 'function'\n    ? PROCESS.emitWarning(msg, type, code, fn)\n    : console.error(`[${code}] ${type}: ${msg}`)\n}\n\nlet AC = globalThis.AbortController\nlet AS = globalThis.AbortSignal\n\n/* c8 ignore start */\nif (typeof AC === 'undefined') {\n  //@ts-ignore\n  AS = class AbortSignal {\n    onabort?: (...a: any[]) => any\n    _onabort: ((...a: any[]) => any)[] = []\n    reason?: any\n    aborted: boolean = false\n    addEventListener(_: string, fn: (...a: any[]) => any) {\n      this._onabort.push(fn)\n    }\n  }\n  //@ts-ignore\n  AC = class AbortController {\n    constructor() {\n      warnACPolyfill()\n    }\n    signal = new AS()\n    abort(reason: any) {\n      if (this.signal.aborted) return\n      //@ts-ignore\n      this.signal.reason = reason\n      //@ts-ignore\n      this.signal.aborted = true\n      //@ts-ignore\n      for (const fn of this.signal._onabort) {\n        fn(reason)\n      }\n      this.signal.onabort?.(reason)\n    }\n  }\n  let printACPolyfillWarning =\n    PROCESS.env?.LRU_CACHE_IGNORE_AC_WARNING !== '1'\n  const warnACPolyfill = () => {\n    if (!printACPolyfillWarning) return\n    printACPolyfillWarning = false\n    emitWarning(\n      'AbortController is not defined. If using lru-cache in ' +\n        'node 14, load an AbortController polyfill from the ' +\n        '`node-abort-controller` package. A minimal polyfill is ' +\n        'provided for use by LRUCache.fetch(), but it should not be ' +\n        'relied upon in other contexts (eg, passing it to other APIs that ' +\n        'use AbortController/AbortSignal might have undesirable effects). ' +\n        'You may disable this with LRU_CACHE_IGNORE_AC_WARNING=1 in the env.',\n      'NO_ABORT_CONTROLLER',\n      'ENOTSUP',\n      warnACPolyfill\n    )\n  }\n}\n/* c8 ignore stop */\n\nconst shouldWarn = (code: string) => !warned.has(code)\n\nconst TYPE = Symbol('type')\nexport type PosInt = number & { [TYPE]: 'Positive Integer' }\nexport type Index = number & { [TYPE]: 'LRUCache Index' }\n\nconst isPosInt = (n: any): n is PosInt =>\n  n && n === Math.floor(n) && n > 0 && isFinite(n)\n\nexport type UintArray = Uint8Array | Uint16Array | Uint32Array\nexport type NumberArray = UintArray | number[]\n\n/* c8 ignore start */\n// This is a little bit ridiculous, tbh.\n// The maximum array length is 2^32-1 or thereabouts on most JS impls.\n// And well before that point, you're caching the entire world, I mean,\n// that's ~32GB of just integers for the next/prev links, plus whatever\n// else to hold that many keys and values.  Just filling the memory with\n// zeroes at init time is brutal when you get that big.\n// But why not be complete?\n// Maybe in the future, these limits will have expanded.\nconst getUintArray = (max: number) =>\n  !isPosInt(max)\n    ? null\n    : max <= Math.pow(2, 8)\n    ? Uint8Array\n    : max <= Math.pow(2, 16)\n    ? Uint16Array\n    : max <= Math.pow(2, 32)\n    ? Uint32Array\n    : max <= Number.MAX_SAFE_INTEGER\n    ? ZeroArray\n    : null\n/* c8 ignore stop */\n\nclass ZeroArray extends Array {\n  constructor(size: number) {\n    super(size)\n    this.fill(0)\n  }\n}\nexport type { ZeroArray }\nexport type { Stack }\n\nexport type StackLike = Stack | Index[]\nclass Stack {\n  heap: NumberArray\n  length: number\n  // private constructor\n  static #constructing: boolean = false\n  static create(max: number): StackLike {\n    const HeapCls = getUintArray(max)\n    if (!HeapCls) return []\n    Stack.#constructing = true\n    const s = new Stack(max, HeapCls)\n    Stack.#constructing = false\n    return s\n  }\n  constructor(\n    max: number,\n    HeapCls: { new (n: number): NumberArray }\n  ) {\n    /* c8 ignore start */\n    if (!Stack.#constructing) {\n      throw new TypeError('instantiate Stack using Stack.create(n)')\n    }\n    /* c8 ignore stop */\n    this.heap = new HeapCls(max)\n    this.length = 0\n  }\n  push(n: Index) {\n    this.heap[this.length++] = n\n  }\n  pop(): Index {\n    return this.heap[--this.length] as Index\n  }\n}\n\n/**\n * Promise representing an in-progress {@link LRUCache#fetch} call\n */\nexport type BackgroundFetch = Promise & {\n  __returned: BackgroundFetch | undefined\n  __abortController: AbortController\n  __staleWhileFetching: V | undefined\n}\n\nexport type DisposeTask = [\n  value: V,\n  key: K,\n  reason: LRUCache.DisposeReason\n]\n\nexport namespace LRUCache {\n  /**\n   * An integer greater than 0, reflecting the calculated size of items\n   */\n  export type Size = number\n\n  /**\n   * Integer greater than 0, representing some number of milliseconds, or the\n   * time at which a TTL started counting from.\n   */\n  export type Milliseconds = number\n\n  /**\n   * An integer greater than 0, reflecting a number of items\n   */\n  export type Count = number\n\n  /**\n   * The reason why an item was removed from the cache, passed\n   * to the {@link Disposer} methods.\n   *\n   * - `evict`: The item was evicted because it is the least recently used,\n   *   and the cache is full.\n   * - `set`: A new value was set, overwriting the old value being disposed.\n   * - `delete`: The item was explicitly deleted, either by calling\n   *   {@link LRUCache#delete}, {@link LRUCache#clear}, or\n   *   {@link LRUCache#set} with an undefined value.\n   * - `expire`: The item was removed due to exceeding its TTL.\n   * - `fetch`: A {@link OptionsBase#fetchMethod} operation returned\n   *   `undefined` or was aborted, causing the item to be deleted.\n   */\n  export type DisposeReason =\n    | 'evict'\n    | 'set'\n    | 'delete'\n    | 'expire'\n    | 'fetch'\n  /**\n   * A method called upon item removal, passed as the\n   * {@link OptionsBase.dispose} and/or\n   * {@link OptionsBase.disposeAfter} options.\n   */\n  export type Disposer = (\n    value: V,\n    key: K,\n    reason: DisposeReason\n  ) => void\n\n  /**\n   * The reason why an item was added to the cache, passed\n   * to the {@link Inserter} methods.\n   *\n   * - `add`: the item was not found in the cache, and was added\n   * - `update`: the item was in the cache, with the same value provided\n   * - `replace`: the item was in the cache, and replaced\n   */\n  export type InsertReason = 'add' | 'update' | 'replace'\n\n  /**\n   * A method called upon item insertion, passed as the\n   * {@link OptionsBase.insert}\n   */\n  export type Inserter = (\n    value: V,\n    key: K,\n    reason: InsertReason\n  ) => void\n\n  /**\n   * A function that returns the effective calculated size\n   * of an entry in the cache.\n   */\n  export type SizeCalculator = (value: V, key: K) => Size\n\n  /**\n   * Options provided to the\n   * {@link OptionsBase.fetchMethod} function.\n   */\n  export interface FetcherOptions {\n    signal: AbortSignal\n    options: FetcherFetchOptions\n    /**\n     * Object provided in the {@link FetchOptions.context} option to\n     * {@link LRUCache#fetch}\n     */\n    context: FC\n  }\n\n  /**\n   * Occasionally, it may be useful to track the internal behavior of the\n   * cache, particularly for logging, debugging, or for behavior within the\n   * `fetchMethod`. To do this, you can pass a `status` object to the\n   * {@link LRUCache#fetch}, {@link LRUCache#get}, {@link LRUCache#set},\n   * {@link LRUCache#memo}, and {@link LRUCache#has} methods.\n   *\n   * The `status` option should be a plain JavaScript object. The following\n   * fields will be set on it appropriately, depending on the situation.\n   */\n  export interface Status {\n    /**\n     * The status of a set() operation.\n     *\n     * - add: the item was not found in the cache, and was added\n     * - update: the item was in the cache, with the same value provided\n     * - replace: the item was in the cache, and replaced\n     * - miss: the item was not added to the cache for some reason\n     */\n    set?: 'add' | 'update' | 'replace' | 'miss'\n\n    /**\n     * the ttl stored for the item, or undefined if ttls are not used.\n     */\n    ttl?: Milliseconds\n\n    /**\n     * the start time for the item, or undefined if ttls are not used.\n     */\n    start?: Milliseconds\n\n    /**\n     * The timestamp used for TTL calculation\n     */\n    now?: Milliseconds\n\n    /**\n     * the remaining ttl for the item, or undefined if ttls are not used.\n     */\n    remainingTTL?: Milliseconds\n\n    /**\n     * The calculated size for the item, if sizes are used.\n     */\n    entrySize?: Size\n\n    /**\n     * The total calculated size of the cache, if sizes are used.\n     */\n    totalCalculatedSize?: Size\n\n    /**\n     * A flag indicating that the item was not stored, due to exceeding the\n     * {@link OptionsBase.maxEntrySize}\n     */\n    maxEntrySizeExceeded?: true\n\n    /**\n     * The old value, specified in the case of `set:'update'` or\n     * `set:'replace'`\n     */\n    oldValue?: V\n\n    /**\n     * The results of a {@link LRUCache#has} operation\n     *\n     * - hit: the item was found in the cache\n     * - stale: the item was found in the cache, but is stale\n     * - miss: the item was not found in the cache\n     */\n    has?: 'hit' | 'stale' | 'miss'\n\n    /**\n     * The status of a {@link LRUCache#fetch} operation.\n     * Note that this can change as the underlying fetch() moves through\n     * various states.\n     *\n     * - inflight: there is another fetch() for this key which is in process\n     * - get: there is no {@link OptionsBase.fetchMethod}, so\n     *   {@link LRUCache#get} was called.\n     * - miss: the item is not in cache, and will be fetched.\n     * - hit: the item is in the cache, and was resolved immediately.\n     * - stale: the item is in the cache, but stale.\n     * - refresh: the item is in the cache, and not stale, but\n     *   {@link FetchOptions.forceRefresh} was specified.\n     */\n    fetch?: 'get' | 'inflight' | 'miss' | 'hit' | 'stale' | 'refresh'\n\n    /**\n     * The {@link OptionsBase.fetchMethod} was called\n     */\n    fetchDispatched?: true\n\n    /**\n     * The cached value was updated after a successful call to\n     * {@link OptionsBase.fetchMethod}\n     */\n    fetchUpdated?: true\n\n    /**\n     * The reason for a fetch() rejection.  Either the error raised by the\n     * {@link OptionsBase.fetchMethod}, or the reason for an\n     * AbortSignal.\n     */\n    fetchError?: Error\n\n    /**\n     * The fetch received an abort signal\n     */\n    fetchAborted?: true\n\n    /**\n     * The abort signal received was ignored, and the fetch was allowed to\n     * continue.\n     */\n    fetchAbortIgnored?: true\n\n    /**\n     * The fetchMethod promise resolved successfully\n     */\n    fetchResolved?: true\n\n    /**\n     * The fetchMethod promise was rejected\n     */\n    fetchRejected?: true\n\n    /**\n     * The status of a {@link LRUCache#get} operation.\n     *\n     * - fetching: The item is currently being fetched.  If a previous value\n     *   is present and allowed, that will be returned.\n     * - stale: The item is in the cache, and is stale.\n     * - hit: the item is in the cache\n     * - miss: the item is not in the cache\n     */\n    get?: 'stale' | 'hit' | 'miss'\n\n    /**\n     * A fetch or get operation returned a stale value.\n     */\n    returnedStale?: true\n  }\n\n  /**\n   * options which override the options set in the LRUCache constructor\n   * when calling {@link LRUCache#fetch}.\n   *\n   * This is the union of {@link GetOptions} and {@link SetOptions}, plus\n   * {@link OptionsBase.noDeleteOnFetchRejection},\n   * {@link OptionsBase.allowStaleOnFetchRejection},\n   * {@link FetchOptions.forceRefresh}, and\n   * {@link FetcherOptions.context}\n   *\n   * Any of these may be modified in the {@link OptionsBase.fetchMethod}\n   * function, but the {@link GetOptions} fields will of course have no\n   * effect, as the {@link LRUCache#get} call already happened by the time\n   * the fetchMethod is called.\n   */\n  export interface FetcherFetchOptions\n    extends Pick<\n      OptionsBase,\n      | 'allowStale'\n      | 'updateAgeOnGet'\n      | 'noDeleteOnStaleGet'\n      | 'sizeCalculation'\n      | 'ttl'\n      | 'noDisposeOnSet'\n      | 'noUpdateTTL'\n      | 'noDeleteOnFetchRejection'\n      | 'allowStaleOnFetchRejection'\n      | 'ignoreFetchAbort'\n      | 'allowStaleOnFetchAbort'\n    > {\n    status?: Status\n    size?: Size\n  }\n\n  /**\n   * Options that may be passed to the {@link LRUCache#fetch} method.\n   */\n  export interface FetchOptions\n    extends FetcherFetchOptions {\n    /**\n     * Set to true to force a re-load of the existing data, even if it\n     * is not yet stale.\n     */\n    forceRefresh?: boolean\n    /**\n     * Context provided to the {@link OptionsBase.fetchMethod} as\n     * the {@link FetcherOptions.context} param.\n     *\n     * If the FC type is specified as unknown (the default),\n     * undefined or void, then this is optional.  Otherwise, it will\n     * be required.\n     */\n    context?: FC\n    signal?: AbortSignal\n    status?: Status\n  }\n  /**\n   * Options provided to {@link LRUCache#fetch} when the FC type is something\n   * other than `unknown`, `undefined`, or `void`\n   */\n  export interface FetchOptionsWithContext\n    extends FetchOptions {\n    context: FC\n  }\n  /**\n   * Options provided to {@link LRUCache#fetch} when the FC type is\n   * `undefined` or `void`\n   */\n  export interface FetchOptionsNoContext\n    extends FetchOptions {\n    context?: undefined\n  }\n\n  export interface MemoOptions\n    extends Pick<\n      OptionsBase,\n      | 'allowStale'\n      | 'updateAgeOnGet'\n      | 'noDeleteOnStaleGet'\n      | 'sizeCalculation'\n      | 'ttl'\n      | 'noDisposeOnSet'\n      | 'noUpdateTTL'\n      | 'noDeleteOnFetchRejection'\n      | 'allowStaleOnFetchRejection'\n      | 'ignoreFetchAbort'\n      | 'allowStaleOnFetchAbort'\n    > {\n    /**\n     * Set to true to force a re-load of the existing data, even if it\n     * is not yet stale.\n     */\n    forceRefresh?: boolean\n    /**\n     * Context provided to the {@link OptionsBase.memoMethod} as\n     * the {@link MemoizerOptions.context} param.\n     *\n     * If the FC type is specified as unknown (the default),\n     * undefined or void, then this is optional.  Otherwise, it will\n     * be required.\n     */\n    context?: FC\n    status?: Status\n  }\n  /**\n   * Options provided to {@link LRUCache#memo} when the FC type is something\n   * other than `unknown`, `undefined`, or `void`\n   */\n  export interface MemoOptionsWithContext\n    extends MemoOptions {\n    context: FC\n  }\n  /**\n   * Options provided to {@link LRUCache#memo} when the FC type is\n   * `undefined` or `void`\n   */\n  export interface MemoOptionsNoContext\n    extends MemoOptions {\n    context?: undefined\n  }\n\n  /**\n   * Options provided to the\n   * {@link OptionsBase.memoMethod} function.\n   */\n  export interface MemoizerOptions {\n    options: MemoizerMemoOptions\n    /**\n     * Object provided in the {@link MemoOptions.context} option to\n     * {@link LRUCache#memo}\n     */\n    context: FC\n  }\n\n  /**\n   * options which override the options set in the LRUCache constructor\n   * when calling {@link LRUCache#memo}.\n   *\n   * This is the union of {@link GetOptions} and {@link SetOptions}, plus\n   * {@link MemoOptions.forceRefresh}, and\n   * {@link MemoOptions.context}\n   *\n   * Any of these may be modified in the {@link OptionsBase.memoMethod}\n   * function, but the {@link GetOptions} fields will of course have no\n   * effect, as the {@link LRUCache#get} call already happened by the time\n   * the memoMethod is called.\n   */\n  export interface MemoizerMemoOptions\n    extends Pick<\n      OptionsBase,\n      | 'allowStale'\n      | 'updateAgeOnGet'\n      | 'noDeleteOnStaleGet'\n      | 'sizeCalculation'\n      | 'ttl'\n      | 'noDisposeOnSet'\n      | 'noUpdateTTL'\n    > {\n    status?: Status\n    size?: Size\n    start?: Milliseconds\n  }\n\n  /**\n   * Options that may be passed to the {@link LRUCache#has} method.\n   */\n  export interface HasOptions\n    extends Pick, 'updateAgeOnHas'> {\n    status?: Status\n  }\n\n  /**\n   * Options that may be passed to the {@link LRUCache#get} method.\n   */\n  export interface GetOptions\n    extends Pick<\n      OptionsBase,\n      'allowStale' | 'updateAgeOnGet' | 'noDeleteOnStaleGet'\n    > {\n    status?: Status\n  }\n\n  /**\n   * Options that may be passed to the {@link LRUCache#peek} method.\n   */\n  export interface PeekOptions\n    extends Pick, 'allowStale'> {}\n\n  /**\n   * Options that may be passed to the {@link LRUCache#set} method.\n   */\n  export interface SetOptions\n    extends Pick<\n      OptionsBase,\n      'sizeCalculation' | 'ttl' | 'noDisposeOnSet' | 'noUpdateTTL'\n    > {\n    /**\n     * If size tracking is enabled, then setting an explicit size\n     * in the {@link LRUCache#set} call will prevent calling the\n     * {@link OptionsBase.sizeCalculation} function.\n     */\n    size?: Size\n    /**\n     * If TTL tracking is enabled, then setting an explicit start\n     * time in the {@link LRUCache#set} call will override the\n     * default time from `performance.now()` or `Date.now()`.\n     *\n     * Note that it must be a valid value for whichever time-tracking\n     * method is in use.\n     */\n    start?: Milliseconds\n    status?: Status\n  }\n\n  /**\n   * The type signature for the {@link OptionsBase.fetchMethod} option.\n   */\n  export type Fetcher = (\n    key: K,\n    staleValue: V | undefined,\n    options: FetcherOptions\n  ) => Promise | V | undefined | void\n\n  /**\n   * the type signature for the {@link OptionsBase.memoMethod} option.\n   */\n  export type Memoizer = (\n    key: K,\n    staleValue: V | undefined,\n    options: MemoizerOptions\n  ) => V\n\n  /**\n   * Options which may be passed to the {@link LRUCache} constructor.\n   *\n   * Most of these may be overridden in the various options that use\n   * them.\n   *\n   * Despite all being technically optional, the constructor requires that\n   * a cache is at minimum limited by one or more of {@link OptionsBase.max},\n   * {@link OptionsBase.ttl}, or {@link OptionsBase.maxSize}.\n   *\n   * If {@link OptionsBase.ttl} is used alone, then it is strongly advised\n   * (and in fact required by the type definitions here) that the cache\n   * also set {@link OptionsBase.ttlAutopurge}, to prevent potentially\n   * unbounded storage.\n   *\n   * All options are also available on the {@link LRUCache} instance, making\n   * it safe to pass an LRUCache instance as the options argumemnt to\n   * make another empty cache of the same type.\n   *\n   * Some options are marked as read-only, because changing them after\n   * instantiation is not safe. Changing any of the other options will of\n   * course only have an effect on subsequent method calls.\n   */\n  export interface OptionsBase {\n    /**\n     * The maximum number of items to store in the cache before evicting\n     * old entries. This is read-only on the {@link LRUCache} instance,\n     * and may not be overridden.\n     *\n     * If set, then storage space will be pre-allocated at construction\n     * time, and the cache will perform significantly faster.\n     *\n     * Note that significantly fewer items may be stored, if\n     * {@link OptionsBase.maxSize} and/or {@link OptionsBase.ttl} are also\n     * set.\n     *\n     * **It is strongly recommended to set a `max` to prevent unbounded growth\n     * of the cache.**\n     */\n    max?: Count\n\n    /**\n     * Max time in milliseconds for items to live in cache before they are\n     * considered stale.  Note that stale items are NOT preemptively removed by\n     * default, and MAY live in the cache, contributing to its LRU max, long\n     * after they have expired, unless {@link OptionsBase.ttlAutopurge} is\n     * set.\n     *\n     * If set to `0` (the default value), then that means \"do not track\n     * TTL\", not \"expire immediately\".\n     *\n     * Also, as this cache is optimized for LRU/MRU operations, some of\n     * the staleness/TTL checks will reduce performance, as they will incur\n     * overhead by deleting items.\n     *\n     * This is not primarily a TTL cache, and does not make strong TTL\n     * guarantees. There is no pre-emptive pruning of expired items, but you\n     * _may_ set a TTL on the cache, and it will treat expired items as missing\n     * when they are fetched, and delete them.\n     *\n     * Optional, but must be a non-negative integer in ms if specified.\n     *\n     * This may be overridden by passing an options object to `cache.set()`.\n     *\n     * At least one of `max`, `maxSize`, or `TTL` is required. This must be a\n     * positive integer if set.\n     *\n     * Even if ttl tracking is enabled, **it is strongly recommended to set a\n     * `max` to prevent unbounded growth of the cache.**\n     *\n     * If ttl tracking is enabled, and `max` and `maxSize` are not set,\n     * and `ttlAutopurge` is not set, then a warning will be emitted\n     * cautioning about the potential for unbounded memory consumption.\n     * (The TypeScript definitions will also discourage this.)\n     */\n    ttl?: Milliseconds\n\n    /**\n     * Minimum amount of time in ms in which to check for staleness.\n     * Defaults to 1, which means that the current time is checked\n     * at most once per millisecond.\n     *\n     * Set to 0 to check the current time every time staleness is tested.\n     * (This reduces performance, and is theoretically unnecessary.)\n     *\n     * Setting this to a higher value will improve performance somewhat\n     * while using ttl tracking, albeit at the expense of keeping stale\n     * items around a bit longer than their TTLs would indicate.\n     *\n     * @default 1\n     */\n    ttlResolution?: Milliseconds\n\n    /**\n     * Preemptively remove stale items from the cache.\n     *\n     * Note that this may *significantly* degrade performance, especially if\n     * the cache is storing a large number of items. It is almost always best\n     * to just leave the stale items in the cache, and let them fall out as new\n     * items are added.\n     *\n     * Note that this means that {@link OptionsBase.allowStale} is a bit\n     * pointless, as stale items will be deleted almost as soon as they\n     * expire.\n     *\n     * Use with caution!\n     */\n    ttlAutopurge?: boolean\n\n    /**\n     * When using time-expiring entries with `ttl`, setting this to `true` will\n     * make each item's age reset to 0 whenever it is retrieved from cache with\n     * {@link LRUCache#get}, causing it to not expire. (It can still fall out\n     * of cache based on recency of use, of course.)\n     *\n     * Has no effect if {@link OptionsBase.ttl} is not set.\n     *\n     * This may be overridden by passing an options object to `cache.get()`.\n     */\n    updateAgeOnGet?: boolean\n\n    /**\n     * When using time-expiring entries with `ttl`, setting this to `true` will\n     * make each item's age reset to 0 whenever its presence in the cache is\n     * checked with {@link LRUCache#has}, causing it to not expire. (It can\n     * still fall out of cache based on recency of use, of course.)\n     *\n     * Has no effect if {@link OptionsBase.ttl} is not set.\n     */\n    updateAgeOnHas?: boolean\n\n    /**\n     * Allow {@link LRUCache#get} and {@link LRUCache#fetch} calls to return\n     * stale data, if available.\n     *\n     * By default, if you set `ttl`, stale items will only be deleted from the\n     * cache when you `get(key)`. That is, it's not preemptively pruning items,\n     * unless {@link OptionsBase.ttlAutopurge} is set.\n     *\n     * If you set `allowStale:true`, it'll return the stale value *as well as*\n     * deleting it. If you don't set this, then it'll return `undefined` when\n     * you try to get a stale entry.\n     *\n     * Note that when a stale entry is fetched, _even if it is returned due to\n     * `allowStale` being set_, it is removed from the cache immediately. You\n     * can suppress this behavior by setting\n     * {@link OptionsBase.noDeleteOnStaleGet}, either in the constructor, or in\n     * the options provided to {@link LRUCache#get}.\n     *\n     * This may be overridden by passing an options object to `cache.get()`.\n     * The `cache.has()` method will always return `false` for stale items.\n     *\n     * Only relevant if a ttl is set.\n     */\n    allowStale?: boolean\n\n    /**\n     * Function that is called on items when they are dropped from the\n     * cache, as `dispose(value, key, reason)`.\n     *\n     * This can be handy if you want to close file descriptors or do\n     * other cleanup tasks when items are no longer stored in the cache.\n     *\n     * **NOTE**: It is called _before_ the item has been fully removed\n     * from the cache, so if you want to put it right back in, you need\n     * to wait until the next tick. If you try to add it back in during\n     * the `dispose()` function call, it will break things in subtle and\n     * weird ways.\n     *\n     * Unlike several other options, this may _not_ be overridden by\n     * passing an option to `set()`, for performance reasons.\n     *\n     * The `reason` will be one of the following strings, corresponding\n     * to the reason for the item's deletion:\n     *\n     * - `evict` Item was evicted to make space for a new addition\n     * - `set` Item was overwritten by a new value\n     * - `expire` Item expired its TTL\n     * - `fetch` Item was deleted due to a failed or aborted fetch, or a\n     *   fetchMethod returning `undefined.\n     * - `delete` Item was removed by explicit `cache.delete(key)`,\n     *   `cache.clear()`, or `cache.set(key, undefined)`.\n     */\n    dispose?: Disposer\n\n    /**\n     * Function that is called when new items are inserted into the cache,\n     * as `onInsert(value, key, reason)`.\n     *\n     * This can be useful if you need to perform actions when an item is\n     * added, such as logging or tracking insertions.\n     *\n     * Unlike some other options, this may _not_ be overridden by passing\n     * an option to `set()`, for performance and consistency reasons.\n     */\n    onInsert?: Inserter\n\n    /**\n     * The same as {@link OptionsBase.dispose}, but called *after* the entry\n     * is completely removed and the cache is once again in a clean state.\n     *\n     * It is safe to add an item right back into the cache at this point.\n     * However, note that it is *very* easy to inadvertently create infinite\n     * recursion this way.\n     */\n    disposeAfter?: Disposer\n\n    /**\n     * Set to true to suppress calling the\n     * {@link OptionsBase.dispose} function if the entry key is\n     * still accessible within the cache.\n     *\n     * This may be overridden by passing an options object to\n     * {@link LRUCache#set}.\n     *\n     * Only relevant if `dispose` or `disposeAfter` are set.\n     */\n    noDisposeOnSet?: boolean\n\n    /**\n     * Boolean flag to tell the cache to not update the TTL when setting a new\n     * value for an existing key (ie, when updating a value rather than\n     * inserting a new value).  Note that the TTL value is _always_ set (if\n     * provided) when adding a new entry into the cache.\n     *\n     * Has no effect if a {@link OptionsBase.ttl} is not set.\n     *\n     * May be passed as an option to {@link LRUCache#set}.\n     */\n    noUpdateTTL?: boolean\n\n    /**\n     * Set to a positive integer to track the sizes of items added to the\n     * cache, and automatically evict items in order to stay below this size.\n     * Note that this may result in fewer than `max` items being stored.\n     *\n     * Attempting to add an item to the cache whose calculated size is greater\n     * that this amount will be a no-op. The item will not be cached, and no\n     * other items will be evicted.\n     *\n     * Optional, must be a positive integer if provided.\n     *\n     * Sets `maxEntrySize` to the same value, unless a different value is\n     * provided for `maxEntrySize`.\n     *\n     * At least one of `max`, `maxSize`, or `TTL` is required. This must be a\n     * positive integer if set.\n     *\n     * Even if size tracking is enabled, **it is strongly recommended to set a\n     * `max` to prevent unbounded growth of the cache.**\n     *\n     * Note also that size tracking can negatively impact performance,\n     * though for most cases, only minimally.\n     */\n    maxSize?: Size\n\n    /**\n     * The maximum allowed size for any single item in the cache.\n     *\n     * If a larger item is passed to {@link LRUCache#set} or returned by a\n     * {@link OptionsBase.fetchMethod} or {@link OptionsBase.memoMethod}, then\n     * it will not be stored in the cache.\n     *\n     * Attempting to add an item whose calculated size is greater than\n     * this amount will not cache the item or evict any old items, but\n     * WILL delete an existing value if one is already present.\n     *\n     * Optional, must be a positive integer if provided. Defaults to\n     * the value of `maxSize` if provided.\n     */\n    maxEntrySize?: Size\n\n    /**\n     * A function that returns a number indicating the item's size.\n     *\n     * Requires {@link OptionsBase.maxSize} to be set.\n     *\n     * If not provided, and {@link OptionsBase.maxSize} or\n     * {@link OptionsBase.maxEntrySize} are set, then all\n     * {@link LRUCache#set} calls **must** provide an explicit\n     * {@link SetOptions.size} or sizeCalculation param.\n     */\n    sizeCalculation?: SizeCalculator\n\n    /**\n     * Method that provides the implementation for {@link LRUCache#fetch}\n     *\n     * ```ts\n     * fetchMethod(key, staleValue, { signal, options, context })\n     * ```\n     *\n     * If `fetchMethod` is not provided, then `cache.fetch(key)` is equivalent\n     * to `Promise.resolve(cache.get(key))`.\n     *\n     * If at any time, `signal.aborted` is set to `true`, or if the\n     * `signal.onabort` method is called, or if it emits an `'abort'` event\n     * which you can listen to with `addEventListener`, then that means that\n     * the fetch should be abandoned. This may be passed along to async\n     * functions aware of AbortController/AbortSignal behavior.\n     *\n     * The `fetchMethod` should **only** return `undefined` or a Promise\n     * resolving to `undefined` if the AbortController signaled an `abort`\n     * event. In all other cases, it should return or resolve to a value\n     * suitable for adding to the cache.\n     *\n     * The `options` object is a union of the options that may be provided to\n     * `set()` and `get()`. If they are modified, then that will result in\n     * modifying the settings to `cache.set()` when the value is resolved, and\n     * in the case of\n     * {@link OptionsBase.noDeleteOnFetchRejection} and\n     * {@link OptionsBase.allowStaleOnFetchRejection}, the handling of\n     * `fetchMethod` failures.\n     *\n     * For example, a DNS cache may update the TTL based on the value returned\n     * from a remote DNS server by changing `options.ttl` in the `fetchMethod`.\n     */\n    fetchMethod?: Fetcher\n\n    /**\n     * Method that provides the implementation for {@link LRUCache#memo}\n     */\n    memoMethod?: Memoizer\n\n    /**\n     * Set to true to suppress the deletion of stale data when a\n     * {@link OptionsBase.fetchMethod} returns a rejected promise.\n     */\n    noDeleteOnFetchRejection?: boolean\n\n    /**\n     * Do not delete stale items when they are retrieved with\n     * {@link LRUCache#get}.\n     *\n     * Note that the `get` return value will still be `undefined`\n     * unless {@link OptionsBase.allowStale} is true.\n     *\n     * When using time-expiring entries with `ttl`, by default stale\n     * items will be removed from the cache when the key is accessed\n     * with `cache.get()`.\n     *\n     * Setting this option will cause stale items to remain in the cache, until\n     * they are explicitly deleted with `cache.delete(key)`, or retrieved with\n     * `noDeleteOnStaleGet` set to `false`.\n     *\n     * This may be overridden by passing an options object to `cache.get()`.\n     *\n     * Only relevant if a ttl is used.\n     */\n    noDeleteOnStaleGet?: boolean\n\n    /**\n     * Set to true to allow returning stale data when a\n     * {@link OptionsBase.fetchMethod} throws an error or returns a rejected\n     * promise.\n     *\n     * This differs from using {@link OptionsBase.allowStale} in that stale\n     * data will ONLY be returned in the case that the {@link LRUCache#fetch}\n     * fails, not any other times.\n     *\n     * If a `fetchMethod` fails, and there is no stale value available, the\n     * `fetch()` will resolve to `undefined`. Ie, all `fetchMethod` errors are\n     * suppressed.\n     *\n     * Implies `noDeleteOnFetchRejection`.\n     *\n     * This may be set in calls to `fetch()`, or defaulted on the constructor,\n     * or overridden by modifying the options object in the `fetchMethod`.\n     */\n    allowStaleOnFetchRejection?: boolean\n\n    /**\n     * Set to true to return a stale value from the cache when the\n     * `AbortSignal` passed to the {@link OptionsBase.fetchMethod} dispatches\n     * an `'abort'` event, whether user-triggered, or due to internal cache\n     * behavior.\n     *\n     * Unless {@link OptionsBase.ignoreFetchAbort} is also set, the underlying\n     * {@link OptionsBase.fetchMethod} will still be considered canceled, and\n     * any value it returns will be ignored and not cached.\n     *\n     * Caveat: since fetches are aborted when a new value is explicitly\n     * set in the cache, this can lead to fetch returning a stale value,\n     * since that was the fallback value _at the moment the `fetch()` was\n     * initiated_, even though the new updated value is now present in\n     * the cache.\n     *\n     * For example:\n     *\n     * ```ts\n     * const cache = new LRUCache({\n     *   ttl: 100,\n     *   fetchMethod: async (url, oldValue, { signal }) =>  {\n     *     const res = await fetch(url, { signal })\n     *     return await res.json()\n     *   }\n     * })\n     * cache.set('https://example.com/', { some: 'data' })\n     * // 100ms go by...\n     * const result = cache.fetch('https://example.com/')\n     * cache.set('https://example.com/', { other: 'thing' })\n     * console.log(await result) // { some: 'data' }\n     * console.log(cache.get('https://example.com/')) // { other: 'thing' }\n     * ```\n     */\n    allowStaleOnFetchAbort?: boolean\n\n    /**\n     * Set to true to ignore the `abort` event emitted by the `AbortSignal`\n     * object passed to {@link OptionsBase.fetchMethod}, and still cache the\n     * resulting resolution value, as long as it is not `undefined`.\n     *\n     * When used on its own, this means aborted {@link LRUCache#fetch} calls\n     * are not immediately resolved or rejected when they are aborted, and\n     * instead take the full time to await.\n     *\n     * When used with {@link OptionsBase.allowStaleOnFetchAbort}, aborted\n     * {@link LRUCache#fetch} calls will resolve immediately to their stale\n     * cached value or `undefined`, and will continue to process and eventually\n     * update the cache when they resolve, as long as the resulting value is\n     * not `undefined`, thus supporting a \"return stale on timeout while\n     * refreshing\" mechanism by passing `AbortSignal.timeout(n)` as the signal.\n     *\n     * For example:\n     *\n     * ```ts\n     * const c = new LRUCache({\n     *   ttl: 100,\n     *   ignoreFetchAbort: true,\n     *   allowStaleOnFetchAbort: true,\n     *   fetchMethod: async (key, oldValue, { signal }) => {\n     *     // note: do NOT pass the signal to fetch()!\n     *     // let's say this fetch can take a long time.\n     *     const res = await fetch(`https://slow-backend-server/${key}`)\n     *     return await res.json()\n     *   },\n     * })\n     *\n     * // this will return the stale value after 100ms, while still\n     * // updating in the background for next time.\n     * const val = await c.fetch('key', { signal: AbortSignal.timeout(100) })\n     * ```\n     *\n     * **Note**: regardless of this setting, an `abort` event _is still\n     * emitted on the `AbortSignal` object_, so may result in invalid results\n     * when passed to other underlying APIs that use AbortSignals.\n     *\n     * This may be overridden in the {@link OptionsBase.fetchMethod} or the\n     * call to {@link LRUCache#fetch}.\n     */\n    ignoreFetchAbort?: boolean\n  }\n\n  export interface OptionsMaxLimit\n    extends OptionsBase {\n    max: Count\n  }\n  export interface OptionsTTLLimit\n    extends OptionsBase {\n    ttl: Milliseconds\n    ttlAutopurge: boolean\n  }\n  export interface OptionsSizeLimit\n    extends OptionsBase {\n    maxSize: Size\n  }\n\n  /**\n   * The valid safe options for the {@link LRUCache} constructor\n   */\n  export type Options =\n    | OptionsMaxLimit\n    | OptionsSizeLimit\n    | OptionsTTLLimit\n\n  /**\n   * Entry objects used by {@link LRUCache#load} and {@link LRUCache#dump},\n   * and returned by {@link LRUCache#info}.\n   */\n  export interface Entry {\n    value: V\n    ttl?: Milliseconds\n    size?: Size\n    start?: Milliseconds\n  }\n}\n\n/**\n * Default export, the thing you're using this module to get.\n *\n * The `K` and `V` types define the key and value types, respectively. The\n * optional `FC` type defines the type of the `context` object passed to\n * `cache.fetch()` and `cache.memo()`.\n *\n * Keys and values **must not** be `null` or `undefined`.\n *\n * All properties from the options object (with the exception of `max`,\n * `maxSize`, `fetchMethod`, `memoMethod`, `dispose` and `disposeAfter`) are\n * added as normal public members. (The listed options are read-only getters.)\n *\n * Changing any of these will alter the defaults for subsequent method calls.\n */\nexport class LRUCache {\n  // options that cannot be changed without disaster\n  readonly #max: LRUCache.Count\n  readonly #maxSize: LRUCache.Size\n  readonly #dispose?: LRUCache.Disposer\n  readonly #onInsert?: LRUCache.Inserter\n  readonly #disposeAfter?: LRUCache.Disposer\n  readonly #fetchMethod?: LRUCache.Fetcher\n  readonly #memoMethod?: LRUCache.Memoizer\n\n  /**\n   * {@link LRUCache.OptionsBase.ttl}\n   */\n  ttl: LRUCache.Milliseconds\n\n  /**\n   * {@link LRUCache.OptionsBase.ttlResolution}\n   */\n  ttlResolution: LRUCache.Milliseconds\n  /**\n   * {@link LRUCache.OptionsBase.ttlAutopurge}\n   */\n  ttlAutopurge: boolean\n  /**\n   * {@link LRUCache.OptionsBase.updateAgeOnGet}\n   */\n  updateAgeOnGet: boolean\n  /**\n   * {@link LRUCache.OptionsBase.updateAgeOnHas}\n   */\n  updateAgeOnHas: boolean\n  /**\n   * {@link LRUCache.OptionsBase.allowStale}\n   */\n  allowStale: boolean\n\n  /**\n   * {@link LRUCache.OptionsBase.noDisposeOnSet}\n   */\n  noDisposeOnSet: boolean\n  /**\n   * {@link LRUCache.OptionsBase.noUpdateTTL}\n   */\n  noUpdateTTL: boolean\n  /**\n   * {@link LRUCache.OptionsBase.maxEntrySize}\n   */\n  maxEntrySize: LRUCache.Size\n  /**\n   * {@link LRUCache.OptionsBase.sizeCalculation}\n   */\n  sizeCalculation?: LRUCache.SizeCalculator\n  /**\n   * {@link LRUCache.OptionsBase.noDeleteOnFetchRejection}\n   */\n  noDeleteOnFetchRejection: boolean\n  /**\n   * {@link LRUCache.OptionsBase.noDeleteOnStaleGet}\n   */\n  noDeleteOnStaleGet: boolean\n  /**\n   * {@link LRUCache.OptionsBase.allowStaleOnFetchAbort}\n   */\n  allowStaleOnFetchAbort: boolean\n  /**\n   * {@link LRUCache.OptionsBase.allowStaleOnFetchRejection}\n   */\n  allowStaleOnFetchRejection: boolean\n  /**\n   * {@link LRUCache.OptionsBase.ignoreFetchAbort}\n   */\n  ignoreFetchAbort: boolean\n\n  // computed properties\n  #size: LRUCache.Count\n  #calculatedSize: LRUCache.Size\n  #keyMap: Map\n  #keyList: (K | undefined)[]\n  #valList: (V | BackgroundFetch | undefined)[]\n  #next: NumberArray\n  #prev: NumberArray\n  #head: Index\n  #tail: Index\n  #free: StackLike\n  #disposed?: DisposeTask[]\n  #sizes?: ZeroArray\n  #starts?: ZeroArray\n  #ttls?: ZeroArray\n\n  #hasDispose: boolean\n  #hasFetchMethod: boolean\n  #hasDisposeAfter: boolean\n  #hasOnInsert: boolean\n\n  /**\n   * Do not call this method unless you need to inspect the\n   * inner workings of the cache.  If anything returned by this\n   * object is modified in any way, strange breakage may occur.\n   *\n   * These fields are private for a reason!\n   *\n   * @internal\n   */\n  static unsafeExposeInternals<\n    K extends {},\n    V extends {},\n    FC extends unknown = unknown\n  >(c: LRUCache) {\n    return {\n      // properties\n      starts: c.#starts,\n      ttls: c.#ttls,\n      sizes: c.#sizes,\n      keyMap: c.#keyMap as Map,\n      keyList: c.#keyList,\n      valList: c.#valList,\n      next: c.#next,\n      prev: c.#prev,\n      get head() {\n        return c.#head\n      },\n      get tail() {\n        return c.#tail\n      },\n      free: c.#free,\n      // methods\n      isBackgroundFetch: (p: any) => c.#isBackgroundFetch(p),\n      backgroundFetch: (\n        k: K,\n        index: number | undefined,\n        options: LRUCache.FetchOptions,\n        context: any\n      ): BackgroundFetch =>\n        c.#backgroundFetch(\n          k,\n          index as Index | undefined,\n          options,\n          context\n        ),\n      moveToTail: (index: number): void =>\n        c.#moveToTail(index as Index),\n      indexes: (options?: { allowStale: boolean }) =>\n        c.#indexes(options),\n      rindexes: (options?: { allowStale: boolean }) =>\n        c.#rindexes(options),\n      isStale: (index: number | undefined) =>\n        c.#isStale(index as Index),\n    }\n  }\n\n  // Protected read-only members\n\n  /**\n   * {@link LRUCache.OptionsBase.max} (read-only)\n   */\n  get max(): LRUCache.Count {\n    return this.#max\n  }\n  /**\n   * {@link LRUCache.OptionsBase.maxSize} (read-only)\n   */\n  get maxSize(): LRUCache.Count {\n    return this.#maxSize\n  }\n  /**\n   * The total computed size of items in the cache (read-only)\n   */\n  get calculatedSize(): LRUCache.Size {\n    return this.#calculatedSize\n  }\n  /**\n   * The number of items stored in the cache (read-only)\n   */\n  get size(): LRUCache.Count {\n    return this.#size\n  }\n  /**\n   * {@link LRUCache.OptionsBase.fetchMethod} (read-only)\n   */\n  get fetchMethod(): LRUCache.Fetcher | undefined {\n    return this.#fetchMethod\n  }\n  get memoMethod(): LRUCache.Memoizer | undefined {\n    return this.#memoMethod\n  }\n  /**\n   * {@link LRUCache.OptionsBase.dispose} (read-only)\n   */\n  get dispose() {\n    return this.#dispose\n  }\n  /**\n   * {@link LRUCache.OptionsBase.onInsert} (read-only)\n   */\n  get onInsert() {\n    return this.#onInsert\n  }\n  /**\n   * {@link LRUCache.OptionsBase.disposeAfter} (read-only)\n   */\n  get disposeAfter() {\n    return this.#disposeAfter\n  }\n\n  constructor(\n    options: LRUCache.Options | LRUCache\n  ) {\n    const {\n      max = 0,\n      ttl,\n      ttlResolution = 1,\n      ttlAutopurge,\n      updateAgeOnGet,\n      updateAgeOnHas,\n      allowStale,\n      dispose,\n      onInsert,\n      disposeAfter,\n      noDisposeOnSet,\n      noUpdateTTL,\n      maxSize = 0,\n      maxEntrySize = 0,\n      sizeCalculation,\n      fetchMethod,\n      memoMethod,\n      noDeleteOnFetchRejection,\n      noDeleteOnStaleGet,\n      allowStaleOnFetchRejection,\n      allowStaleOnFetchAbort,\n      ignoreFetchAbort,\n    } = options\n\n    if (max !== 0 && !isPosInt(max)) {\n      throw new TypeError('max option must be a nonnegative integer')\n    }\n\n    const UintArray = max ? getUintArray(max) : Array\n    if (!UintArray) {\n      throw new Error('invalid max value: ' + max)\n    }\n\n    this.#max = max\n    this.#maxSize = maxSize\n    this.maxEntrySize = maxEntrySize || this.#maxSize\n    this.sizeCalculation = sizeCalculation\n    if (this.sizeCalculation) {\n      if (!this.#maxSize && !this.maxEntrySize) {\n        throw new TypeError(\n          'cannot set sizeCalculation without setting maxSize or maxEntrySize'\n        )\n      }\n      if (typeof this.sizeCalculation !== 'function') {\n        throw new TypeError('sizeCalculation set to non-function')\n      }\n    }\n\n    if (\n      memoMethod !== undefined &&\n      typeof memoMethod !== 'function'\n    ) {\n      throw new TypeError('memoMethod must be a function if defined')\n    }\n    this.#memoMethod = memoMethod\n\n    if (\n      fetchMethod !== undefined &&\n      typeof fetchMethod !== 'function'\n    ) {\n      throw new TypeError(\n        'fetchMethod must be a function if specified'\n      )\n    }\n    this.#fetchMethod = fetchMethod\n    this.#hasFetchMethod = !!fetchMethod\n\n    this.#keyMap = new Map()\n    this.#keyList = new Array(max).fill(undefined)\n    this.#valList = new Array(max).fill(undefined)\n    this.#next = new UintArray(max)\n    this.#prev = new UintArray(max)\n    this.#head = 0 as Index\n    this.#tail = 0 as Index\n    this.#free = Stack.create(max)\n    this.#size = 0\n    this.#calculatedSize = 0\n\n    if (typeof dispose === 'function') {\n      this.#dispose = dispose\n    }\n    if (typeof onInsert === 'function') {\n      this.#onInsert = onInsert\n    }\n    if (typeof disposeAfter === 'function') {\n      this.#disposeAfter = disposeAfter\n      this.#disposed = []\n    } else {\n      this.#disposeAfter = undefined\n      this.#disposed = undefined\n    }\n    this.#hasDispose = !!this.#dispose\n    this.#hasOnInsert = !!this.#onInsert\n    this.#hasDisposeAfter = !!this.#disposeAfter\n\n    this.noDisposeOnSet = !!noDisposeOnSet\n    this.noUpdateTTL = !!noUpdateTTL\n    this.noDeleteOnFetchRejection = !!noDeleteOnFetchRejection\n    this.allowStaleOnFetchRejection = !!allowStaleOnFetchRejection\n    this.allowStaleOnFetchAbort = !!allowStaleOnFetchAbort\n    this.ignoreFetchAbort = !!ignoreFetchAbort\n\n    // NB: maxEntrySize is set to maxSize if it's set\n    if (this.maxEntrySize !== 0) {\n      if (this.#maxSize !== 0) {\n        if (!isPosInt(this.#maxSize)) {\n          throw new TypeError(\n            'maxSize must be a positive integer if specified'\n          )\n        }\n      }\n      if (!isPosInt(this.maxEntrySize)) {\n        throw new TypeError(\n          'maxEntrySize must be a positive integer if specified'\n        )\n      }\n      this.#initializeSizeTracking()\n    }\n\n    this.allowStale = !!allowStale\n    this.noDeleteOnStaleGet = !!noDeleteOnStaleGet\n    this.updateAgeOnGet = !!updateAgeOnGet\n    this.updateAgeOnHas = !!updateAgeOnHas\n    this.ttlResolution =\n      isPosInt(ttlResolution) || ttlResolution === 0\n        ? ttlResolution\n        : 1\n    this.ttlAutopurge = !!ttlAutopurge\n    this.ttl = ttl || 0\n    if (this.ttl) {\n      if (!isPosInt(this.ttl)) {\n        throw new TypeError(\n          'ttl must be a positive integer if specified'\n        )\n      }\n      this.#initializeTTLTracking()\n    }\n\n    // do not allow completely unbounded caches\n    if (this.#max === 0 && this.ttl === 0 && this.#maxSize === 0) {\n      throw new TypeError(\n        'At least one of max, maxSize, or ttl is required'\n      )\n    }\n    if (!this.ttlAutopurge && !this.#max && !this.#maxSize) {\n      const code = 'LRU_CACHE_UNBOUNDED'\n      if (shouldWarn(code)) {\n        warned.add(code)\n        const msg =\n          'TTL caching without ttlAutopurge, max, or maxSize can ' +\n          'result in unbounded memory consumption.'\n        emitWarning(msg, 'UnboundedCacheWarning', code, LRUCache)\n      }\n    }\n  }\n\n  /**\n   * Return the number of ms left in the item's TTL. If item is not in cache,\n   * returns `0`. Returns `Infinity` if item is in cache without a defined TTL.\n   */\n  getRemainingTTL(key: K) {\n    return this.#keyMap.has(key) ? Infinity : 0\n  }\n\n  #initializeTTLTracking() {\n    const ttls = new ZeroArray(this.#max)\n    const starts = new ZeroArray(this.#max)\n    this.#ttls = ttls\n    this.#starts = starts\n\n    this.#setItemTTL = (index, ttl, start = perf.now()) => {\n      starts[index] = ttl !== 0 ? start : 0\n      ttls[index] = ttl\n      if (ttl !== 0 && this.ttlAutopurge) {\n        const t = setTimeout(() => {\n          if (this.#isStale(index)) {\n            this.#delete(this.#keyList[index] as K, 'expire')\n          }\n        }, ttl + 1)\n        // unref() not supported on all platforms\n        /* c8 ignore start */\n        if (t.unref) {\n          t.unref()\n        }\n        /* c8 ignore stop */\n      }\n    }\n\n    this.#updateItemAge = index => {\n      starts[index] = ttls[index] !== 0 ? perf.now() : 0\n    }\n\n    this.#statusTTL = (status, index) => {\n      if (ttls[index]) {\n        const ttl = ttls[index]\n        const start = starts[index]\n        /* c8 ignore next */\n        if (!ttl || !start) return\n        status.ttl = ttl\n        status.start = start\n        status.now = cachedNow || getNow()\n        const age = status.now - start\n        status.remainingTTL = ttl - age\n      }\n    }\n\n    // debounce calls to perf.now() to 1s so we're not hitting\n    // that costly call repeatedly.\n    let cachedNow = 0\n    const getNow = () => {\n      const n = perf.now()\n      if (this.ttlResolution > 0) {\n        cachedNow = n\n        const t = setTimeout(\n          () => (cachedNow = 0),\n          this.ttlResolution\n        )\n        // not available on all platforms\n        /* c8 ignore start */\n        if (t.unref) {\n          t.unref()\n        }\n        /* c8 ignore stop */\n      }\n      return n\n    }\n\n    this.getRemainingTTL = key => {\n      const index = this.#keyMap.get(key)\n      if (index === undefined) {\n        return 0\n      }\n      const ttl = ttls[index]\n      const start = starts[index]\n      if (!ttl || !start) {\n        return Infinity\n      }\n      const age = (cachedNow || getNow()) - start\n      return ttl - age\n    }\n\n    this.#isStale = index => {\n      const s = starts[index]\n      const t = ttls[index]\n      return !!t && !!s && (cachedNow || getNow()) - s > t\n    }\n  }\n\n  // conditionally set private methods related to TTL\n  #updateItemAge: (index: Index) => void = () => {}\n  #statusTTL: (status: LRUCache.Status, index: Index) => void =\n    () => {}\n  #setItemTTL: (\n    index: Index,\n    ttl: LRUCache.Milliseconds,\n    start?: LRUCache.Milliseconds\n    // ignore because we never call this if we're not already in TTL mode\n    /* c8 ignore start */\n  ) => void = () => {}\n  /* c8 ignore stop */\n\n  #isStale: (index: Index) => boolean = () => false\n\n  #initializeSizeTracking() {\n    const sizes = new ZeroArray(this.#max)\n    this.#calculatedSize = 0\n    this.#sizes = sizes\n    this.#removeItemSize = index => {\n      this.#calculatedSize -= sizes[index] as number\n      sizes[index] = 0\n    }\n    this.#requireSize = (k, v, size, sizeCalculation) => {\n      // provisionally accept background fetches.\n      // actual value size will be checked when they return.\n      if (this.#isBackgroundFetch(v)) {\n        return 0\n      }\n      if (!isPosInt(size)) {\n        if (sizeCalculation) {\n          if (typeof sizeCalculation !== 'function') {\n            throw new TypeError('sizeCalculation must be a function')\n          }\n          size = sizeCalculation(v, k)\n          if (!isPosInt(size)) {\n            throw new TypeError(\n              'sizeCalculation return invalid (expect positive integer)'\n            )\n          }\n        } else {\n          throw new TypeError(\n            'invalid size value (must be positive integer). ' +\n              'When maxSize or maxEntrySize is used, sizeCalculation ' +\n              'or size must be set.'\n          )\n        }\n      }\n      return size\n    }\n    this.#addItemSize = (\n      index: Index,\n      size: LRUCache.Size,\n      status?: LRUCache.Status\n    ) => {\n      sizes[index] = size\n      if (this.#maxSize) {\n        const maxSize = this.#maxSize - (sizes[index] as number)\n        while (this.#calculatedSize > maxSize) {\n          this.#evict(true)\n        }\n      }\n      this.#calculatedSize += sizes[index] as number\n      if (status) {\n        status.entrySize = size\n        status.totalCalculatedSize = this.#calculatedSize\n      }\n    }\n  }\n\n  #removeItemSize: (index: Index) => void = _i => {}\n  #addItemSize: (\n    index: Index,\n    size: LRUCache.Size,\n    status?: LRUCache.Status\n  ) => void = (_i, _s, _st) => {}\n  #requireSize: (\n    k: K,\n    v: V | BackgroundFetch,\n    size?: LRUCache.Size,\n    sizeCalculation?: LRUCache.SizeCalculator\n  ) => LRUCache.Size = (\n    _k: K,\n    _v: V | BackgroundFetch,\n    size?: LRUCache.Size,\n    sizeCalculation?: LRUCache.SizeCalculator\n  ) => {\n    if (size || sizeCalculation) {\n      throw new TypeError(\n        'cannot set size without setting maxSize or maxEntrySize on cache'\n      )\n    }\n    return 0\n  };\n\n  *#indexes({ allowStale = this.allowStale } = {}) {\n    if (this.#size) {\n      for (let i = this.#tail; true; ) {\n        if (!this.#isValidIndex(i)) {\n          break\n        }\n        if (allowStale || !this.#isStale(i)) {\n          yield i\n        }\n        if (i === this.#head) {\n          break\n        } else {\n          i = this.#prev[i] as Index\n        }\n      }\n    }\n  }\n\n  *#rindexes({ allowStale = this.allowStale } = {}) {\n    if (this.#size) {\n      for (let i = this.#head; true; ) {\n        if (!this.#isValidIndex(i)) {\n          break\n        }\n        if (allowStale || !this.#isStale(i)) {\n          yield i\n        }\n        if (i === this.#tail) {\n          break\n        } else {\n          i = this.#next[i] as Index\n        }\n      }\n    }\n  }\n\n  #isValidIndex(index: Index) {\n    return (\n      index !== undefined &&\n      this.#keyMap.get(this.#keyList[index] as K) === index\n    )\n  }\n\n  /**\n   * Return a generator yielding `[key, value]` pairs,\n   * in order from most recently used to least recently used.\n   */\n  *entries() {\n    for (const i of this.#indexes()) {\n      if (\n        this.#valList[i] !== undefined &&\n        this.#keyList[i] !== undefined &&\n        !this.#isBackgroundFetch(this.#valList[i])\n      ) {\n        yield [this.#keyList[i], this.#valList[i]] as [K, V]\n      }\n    }\n  }\n\n  /**\n   * Inverse order version of {@link LRUCache.entries}\n   *\n   * Return a generator yielding `[key, value]` pairs,\n   * in order from least recently used to most recently used.\n   */\n  *rentries() {\n    for (const i of this.#rindexes()) {\n      if (\n        this.#valList[i] !== undefined &&\n        this.#keyList[i] !== undefined &&\n        !this.#isBackgroundFetch(this.#valList[i])\n      ) {\n        yield [this.#keyList[i], this.#valList[i]]\n      }\n    }\n  }\n\n  /**\n   * Return a generator yielding the keys in the cache,\n   * in order from most recently used to least recently used.\n   */\n  *keys() {\n    for (const i of this.#indexes()) {\n      const k = this.#keyList[i]\n      if (\n        k !== undefined &&\n        !this.#isBackgroundFetch(this.#valList[i])\n      ) {\n        yield k\n      }\n    }\n  }\n\n  /**\n   * Inverse order version of {@link LRUCache.keys}\n   *\n   * Return a generator yielding the keys in the cache,\n   * in order from least recently used to most recently used.\n   */\n  *rkeys() {\n    for (const i of this.#rindexes()) {\n      const k = this.#keyList[i]\n      if (\n        k !== undefined &&\n        !this.#isBackgroundFetch(this.#valList[i])\n      ) {\n        yield k\n      }\n    }\n  }\n\n  /**\n   * Return a generator yielding the values in the cache,\n   * in order from most recently used to least recently used.\n   */\n  *values() {\n    for (const i of this.#indexes()) {\n      const v = this.#valList[i]\n      if (\n        v !== undefined &&\n        !this.#isBackgroundFetch(this.#valList[i])\n      ) {\n        yield this.#valList[i] as V\n      }\n    }\n  }\n\n  /**\n   * Inverse order version of {@link LRUCache.values}\n   *\n   * Return a generator yielding the values in the cache,\n   * in order from least recently used to most recently used.\n   */\n  *rvalues() {\n    for (const i of this.#rindexes()) {\n      const v = this.#valList[i]\n      if (\n        v !== undefined &&\n        !this.#isBackgroundFetch(this.#valList[i])\n      ) {\n        yield this.#valList[i]\n      }\n    }\n  }\n\n  /**\n   * Iterating over the cache itself yields the same results as\n   * {@link LRUCache.entries}\n   */\n  [Symbol.iterator]() {\n    return this.entries()\n  }\n\n  /**\n   * A String value that is used in the creation of the default string\n   * description of an object. Called by the built-in method\n   * `Object.prototype.toString`.\n   */\n  [Symbol.toStringTag] = 'LRUCache'\n\n  /**\n   * Find a value for which the supplied fn method returns a truthy value,\n   * similar to `Array.find()`. fn is called as `fn(value, key, cache)`.\n   */\n  find(\n    fn: (v: V, k: K, self: LRUCache) => boolean,\n    getOptions: LRUCache.GetOptions = {}\n  ) {\n    for (const i of this.#indexes()) {\n      const v = this.#valList[i]\n      const value = this.#isBackgroundFetch(v)\n        ? v.__staleWhileFetching\n        : v\n      if (value === undefined) continue\n      if (fn(value, this.#keyList[i] as K, this)) {\n        return this.get(this.#keyList[i] as K, getOptions)\n      }\n    }\n  }\n\n  /**\n   * Call the supplied function on each item in the cache, in order from most\n   * recently used to least recently used.\n   *\n   * `fn` is called as `fn(value, key, cache)`.\n   *\n   * If `thisp` is provided, function will be called in the `this`-context of\n   * the provided object, or the cache if no `thisp` object is provided.\n   *\n   * Does not update age or recenty of use, or iterate over stale values.\n   */\n  forEach(\n    fn: (v: V, k: K, self: LRUCache) => any,\n    thisp: any = this\n  ) {\n    for (const i of this.#indexes()) {\n      const v = this.#valList[i]\n      const value = this.#isBackgroundFetch(v)\n        ? v.__staleWhileFetching\n        : v\n      if (value === undefined) continue\n      fn.call(thisp, value, this.#keyList[i] as K, this)\n    }\n  }\n\n  /**\n   * The same as {@link LRUCache.forEach} but items are iterated over in\n   * reverse order.  (ie, less recently used items are iterated over first.)\n   */\n  rforEach(\n    fn: (v: V, k: K, self: LRUCache) => any,\n    thisp: any = this\n  ) {\n    for (const i of this.#rindexes()) {\n      const v = this.#valList[i]\n      const value = this.#isBackgroundFetch(v)\n        ? v.__staleWhileFetching\n        : v\n      if (value === undefined) continue\n      fn.call(thisp, value, this.#keyList[i] as K, this)\n    }\n  }\n\n  /**\n   * Delete any stale entries. Returns true if anything was removed,\n   * false otherwise.\n   */\n  purgeStale() {\n    let deleted = false\n    for (const i of this.#rindexes({ allowStale: true })) {\n      if (this.#isStale(i)) {\n        this.#delete(this.#keyList[i] as K, 'expire')\n        deleted = true\n      }\n    }\n    return deleted\n  }\n\n  /**\n   * Get the extended info about a given entry, to get its value, size, and\n   * TTL info simultaneously. Returns `undefined` if the key is not present.\n   *\n   * Unlike {@link LRUCache#dump}, which is designed to be portable and survive\n   * serialization, the `start` value is always the current timestamp, and the\n   * `ttl` is a calculated remaining time to live (negative if expired).\n   *\n   * Always returns stale values, if their info is found in the cache, so be\n   * sure to check for expirations (ie, a negative {@link LRUCache.Entry#ttl})\n   * if relevant.\n   */\n  info(key: K): LRUCache.Entry | undefined {\n    const i = this.#keyMap.get(key)\n    if (i === undefined) return undefined\n    const v = this.#valList[i]\n    const value: V | undefined = this.#isBackgroundFetch(v)\n      ? v.__staleWhileFetching\n      : v\n    if (value === undefined) return undefined\n    const entry: LRUCache.Entry = { value }\n    if (this.#ttls && this.#starts) {\n      const ttl = this.#ttls[i]\n      const start = this.#starts[i]\n      if (ttl && start) {\n        const remain = ttl - (perf.now() - start)\n        entry.ttl = remain\n        entry.start = Date.now()\n      }\n    }\n    if (this.#sizes) {\n      entry.size = this.#sizes[i]\n    }\n    return entry\n  }\n\n  /**\n   * Return an array of [key, {@link LRUCache.Entry}] tuples which can be\n   * passed to {@link LRUCache#load}.\n   *\n   * The `start` fields are calculated relative to a portable `Date.now()`\n   * timestamp, even if `performance.now()` is available.\n   *\n   * Stale entries are always included in the `dump`, even if\n   * {@link LRUCache.OptionsBase.allowStale} is false.\n   *\n   * Note: this returns an actual array, not a generator, so it can be more\n   * easily passed around.\n   */\n  dump() {\n    const arr: [K, LRUCache.Entry][] = []\n    for (const i of this.#indexes({ allowStale: true })) {\n      const key = this.#keyList[i]\n      const v = this.#valList[i]\n      const value: V | undefined = this.#isBackgroundFetch(v)\n        ? v.__staleWhileFetching\n        : v\n      if (value === undefined || key === undefined) continue\n      const entry: LRUCache.Entry = { value }\n      if (this.#ttls && this.#starts) {\n        entry.ttl = this.#ttls[i]\n        // always dump the start relative to a portable timestamp\n        // it's ok for this to be a bit slow, it's a rare operation.\n        const age = perf.now() - (this.#starts[i] as number)\n        entry.start = Math.floor(Date.now() - age)\n      }\n      if (this.#sizes) {\n        entry.size = this.#sizes[i]\n      }\n      arr.unshift([key, entry])\n    }\n    return arr\n  }\n\n  /**\n   * Reset the cache and load in the items in entries in the order listed.\n   *\n   * The shape of the resulting cache may be different if the same options are\n   * not used in both caches.\n   *\n   * The `start` fields are assumed to be calculated relative to a portable\n   * `Date.now()` timestamp, even if `performance.now()` is available.\n   */\n  load(arr: [K, LRUCache.Entry][]) {\n    this.clear()\n    for (const [key, entry] of arr) {\n      if (entry.start) {\n        // entry.start is a portable timestamp, but we may be using\n        // node's performance.now(), so calculate the offset, so that\n        // we get the intended remaining TTL, no matter how long it's\n        // been on ice.\n        //\n        // it's ok for this to be a bit slow, it's a rare operation.\n        const age = Date.now() - entry.start\n        entry.start = perf.now() - age\n      }\n      this.set(key, entry.value, entry)\n    }\n  }\n\n  /**\n   * Add a value to the cache.\n   *\n   * Note: if `undefined` is specified as a value, this is an alias for\n   * {@link LRUCache#delete}\n   *\n   * Fields on the {@link LRUCache.SetOptions} options param will override\n   * their corresponding values in the constructor options for the scope\n   * of this single `set()` operation.\n   *\n   * If `start` is provided, then that will set the effective start\n   * time for the TTL calculation. Note that this must be a previous\n   * value of `performance.now()` if supported, or a previous value of\n   * `Date.now()` if not.\n   *\n   * Options object may also include `size`, which will prevent\n   * calling the `sizeCalculation` function and just use the specified\n   * number if it is a positive integer, and `noDisposeOnSet` which\n   * will prevent calling a `dispose` function in the case of\n   * overwrites.\n   *\n   * If the `size` (or return value of `sizeCalculation`) for a given\n   * entry is greater than `maxEntrySize`, then the item will not be\n   * added to the cache.\n   *\n   * Will update the recency of the entry.\n   *\n   * If the value is `undefined`, then this is an alias for\n   * `cache.delete(key)`. `undefined` is never stored in the cache.\n   */\n  set(\n    k: K,\n    v: V | BackgroundFetch | undefined,\n    setOptions: LRUCache.SetOptions = {}\n  ) {\n    if (v === undefined) {\n      this.delete(k)\n      return this\n    }\n    const {\n      ttl = this.ttl,\n      start,\n      noDisposeOnSet = this.noDisposeOnSet,\n      sizeCalculation = this.sizeCalculation,\n      status,\n    } = setOptions\n    let { noUpdateTTL = this.noUpdateTTL } = setOptions\n\n    const size = this.#requireSize(\n      k,\n      v,\n      setOptions.size || 0,\n      sizeCalculation\n    )\n    // if the item doesn't fit, don't do anything\n    // NB: maxEntrySize set to maxSize by default\n    if (this.maxEntrySize && size > this.maxEntrySize) {\n      if (status) {\n        status.set = 'miss'\n        status.maxEntrySizeExceeded = true\n      }\n      // have to delete, in case something is there already.\n      this.#delete(k, 'set')\n      return this\n    }\n    let index = this.#size === 0 ? undefined : this.#keyMap.get(k)\n    if (index === undefined) {\n      // addition\n      index = (\n        this.#size === 0\n          ? this.#tail\n          : this.#free.length !== 0\n          ? this.#free.pop()\n          : this.#size === this.#max\n          ? this.#evict(false)\n          : this.#size\n      ) as Index\n      this.#keyList[index] = k\n      this.#valList[index] = v\n      this.#keyMap.set(k, index)\n      this.#next[this.#tail] = index\n      this.#prev[index] = this.#tail\n      this.#tail = index\n      this.#size++\n      this.#addItemSize(index, size, status)\n      if (status) status.set = 'add'\n      noUpdateTTL = false\n      if (this.#hasOnInsert) {\n        this.#onInsert?.(v as V, k, 'add')\n      }\n    } else {\n      // update\n      this.#moveToTail(index)\n      const oldVal = this.#valList[index] as V | BackgroundFetch\n      if (v !== oldVal) {\n        if (this.#hasFetchMethod && this.#isBackgroundFetch(oldVal)) {\n          oldVal.__abortController.abort(new Error('replaced'))\n          const { __staleWhileFetching: s } = oldVal\n          if (s !== undefined && !noDisposeOnSet) {\n            if (this.#hasDispose) {\n              this.#dispose?.(s as V, k, 'set')\n            }\n            if (this.#hasDisposeAfter) {\n              this.#disposed?.push([s as V, k, 'set'])\n            }\n          }\n        } else if (!noDisposeOnSet) {\n          if (this.#hasDispose) {\n            this.#dispose?.(oldVal as V, k, 'set')\n          }\n          if (this.#hasDisposeAfter) {\n            this.#disposed?.push([oldVal as V, k, 'set'])\n          }\n        }\n        this.#removeItemSize(index)\n        this.#addItemSize(index, size, status)\n        this.#valList[index] = v\n        if (status) {\n          status.set = 'replace'\n          const oldValue =\n            oldVal && this.#isBackgroundFetch(oldVal)\n              ? oldVal.__staleWhileFetching\n              : oldVal\n          if (oldValue !== undefined) status.oldValue = oldValue\n        }\n      } else if (status) {\n        status.set = 'update'\n      }\n\n      if (this.#hasOnInsert) {\n        this.onInsert?.(v as V, k, v === oldVal ? 'update' : 'replace');\n      }\n    }\n    if (ttl !== 0 && !this.#ttls) {\n      this.#initializeTTLTracking()\n    }\n    if (this.#ttls) {\n      if (!noUpdateTTL) {\n        this.#setItemTTL(index, ttl, start)\n      }\n      if (status) this.#statusTTL(status, index)\n    }\n    if (!noDisposeOnSet && this.#hasDisposeAfter && this.#disposed) {\n      const dt = this.#disposed\n      let task: DisposeTask | undefined\n      while ((task = dt?.shift())) {\n        this.#disposeAfter?.(...task)\n      }\n    }\n    return this\n  }\n\n  /**\n   * Evict the least recently used item, returning its value or\n   * `undefined` if cache is empty.\n   */\n  pop(): V | undefined {\n    try {\n      while (this.#size) {\n        const val = this.#valList[this.#head]\n        this.#evict(true)\n        if (this.#isBackgroundFetch(val)) {\n          if (val.__staleWhileFetching) {\n            return val.__staleWhileFetching\n          }\n        } else if (val !== undefined) {\n          return val\n        }\n      }\n    } finally {\n      if (this.#hasDisposeAfter && this.#disposed) {\n        const dt = this.#disposed\n        let task: DisposeTask | undefined\n        while ((task = dt?.shift())) {\n          this.#disposeAfter?.(...task)\n        }\n      }\n    }\n  }\n\n  #evict(free: boolean) {\n    const head = this.#head\n    const k = this.#keyList[head] as K\n    const v = this.#valList[head] as V\n    if (this.#hasFetchMethod && this.#isBackgroundFetch(v)) {\n      v.__abortController.abort(new Error('evicted'))\n    } else if (this.#hasDispose || this.#hasDisposeAfter) {\n      if (this.#hasDispose) {\n        this.#dispose?.(v, k, 'evict')\n      }\n      if (this.#hasDisposeAfter) {\n        this.#disposed?.push([v, k, 'evict'])\n      }\n    }\n    this.#removeItemSize(head)\n    // if we aren't about to use the index, then null these out\n    if (free) {\n      this.#keyList[head] = undefined\n      this.#valList[head] = undefined\n      this.#free.push(head)\n    }\n    if (this.#size === 1) {\n      this.#head = this.#tail = 0 as Index\n      this.#free.length = 0\n    } else {\n      this.#head = this.#next[head] as Index\n    }\n    this.#keyMap.delete(k)\n    this.#size--\n    return head\n  }\n\n  /**\n   * Check if a key is in the cache, without updating the recency of use.\n   * Will return false if the item is stale, even though it is technically\n   * in the cache.\n   *\n   * Check if a key is in the cache, without updating the recency of\n   * use. Age is updated if {@link LRUCache.OptionsBase.updateAgeOnHas} is set\n   * to `true` in either the options or the constructor.\n   *\n   * Will return `false` if the item is stale, even though it is technically in\n   * the cache. The difference can be determined (if it matters) by using a\n   * `status` argument, and inspecting the `has` field.\n   *\n   * Will not update item age unless\n   * {@link LRUCache.OptionsBase.updateAgeOnHas} is set.\n   */\n  has(k: K, hasOptions: LRUCache.HasOptions = {}) {\n    const { updateAgeOnHas = this.updateAgeOnHas, status } =\n      hasOptions\n    const index = this.#keyMap.get(k)\n    if (index !== undefined) {\n      const v = this.#valList[index]\n      if (\n        this.#isBackgroundFetch(v) &&\n        v.__staleWhileFetching === undefined\n      ) {\n        return false\n      }\n      if (!this.#isStale(index)) {\n        if (updateAgeOnHas) {\n          this.#updateItemAge(index)\n        }\n        if (status) {\n          status.has = 'hit'\n          this.#statusTTL(status, index)\n        }\n        return true\n      } else if (status) {\n        status.has = 'stale'\n        this.#statusTTL(status, index)\n      }\n    } else if (status) {\n      status.has = 'miss'\n    }\n    return false\n  }\n\n  /**\n   * Like {@link LRUCache#get} but doesn't update recency or delete stale\n   * items.\n   *\n   * Returns `undefined` if the item is stale, unless\n   * {@link LRUCache.OptionsBase.allowStale} is set.\n   */\n  peek(k: K, peekOptions: LRUCache.PeekOptions = {}) {\n    const { allowStale = this.allowStale } = peekOptions\n    const index = this.#keyMap.get(k)\n    if (\n      index === undefined ||\n      (!allowStale && this.#isStale(index))\n    ) {\n      return\n    }\n    const v = this.#valList[index]\n    // either stale and allowed, or forcing a refresh of non-stale value\n    return this.#isBackgroundFetch(v) ? v.__staleWhileFetching : v\n  }\n\n  #backgroundFetch(\n    k: K,\n    index: Index | undefined,\n    options: LRUCache.FetchOptions,\n    context: any\n  ): BackgroundFetch {\n    const v = index === undefined ? undefined : this.#valList[index]\n    if (this.#isBackgroundFetch(v)) {\n      return v\n    }\n\n    const ac = new AC()\n    const { signal } = options\n    // when/if our AC signals, then stop listening to theirs.\n    signal?.addEventListener('abort', () => ac.abort(signal.reason), {\n      signal: ac.signal,\n    })\n\n    const fetchOpts = {\n      signal: ac.signal,\n      options,\n      context,\n    }\n\n    const cb = (\n      v: V | undefined,\n      updateCache = false\n    ): V | undefined => {\n      const { aborted } = ac.signal\n      const ignoreAbort = options.ignoreFetchAbort && v !== undefined\n      if (options.status) {\n        if (aborted && !updateCache) {\n          options.status.fetchAborted = true\n          options.status.fetchError = ac.signal.reason\n          if (ignoreAbort) options.status.fetchAbortIgnored = true\n        } else {\n          options.status.fetchResolved = true\n        }\n      }\n      if (aborted && !ignoreAbort && !updateCache) {\n        return fetchFail(ac.signal.reason)\n      }\n      // either we didn't abort, and are still here, or we did, and ignored\n      const bf = p as BackgroundFetch\n      if (this.#valList[index as Index] === p) {\n        if (v === undefined) {\n          if (bf.__staleWhileFetching) {\n            this.#valList[index as Index] = bf.__staleWhileFetching\n          } else {\n            this.#delete(k, 'fetch')\n          }\n        } else {\n          if (options.status) options.status.fetchUpdated = true\n          this.set(k, v, fetchOpts.options)\n        }\n      }\n      return v\n    }\n\n    const eb = (er: any) => {\n      if (options.status) {\n        options.status.fetchRejected = true\n        options.status.fetchError = er\n      }\n      return fetchFail(er)\n    }\n\n    const fetchFail = (er: any): V | undefined => {\n      const { aborted } = ac.signal\n      const allowStaleAborted =\n        aborted && options.allowStaleOnFetchAbort\n      const allowStale =\n        allowStaleAborted || options.allowStaleOnFetchRejection\n      const noDelete = allowStale || options.noDeleteOnFetchRejection\n      const bf = p as BackgroundFetch\n      if (this.#valList[index as Index] === p) {\n        // if we allow stale on fetch rejections, then we need to ensure that\n        // the stale value is not removed from the cache when the fetch fails.\n        const del = !noDelete || bf.__staleWhileFetching === undefined\n        if (del) {\n          this.#delete(k, 'fetch')\n        } else if (!allowStaleAborted) {\n          // still replace the *promise* with the stale value,\n          // since we are done with the promise at this point.\n          // leave it untouched if we're still waiting for an\n          // aborted background fetch that hasn't yet returned.\n          this.#valList[index as Index] = bf.__staleWhileFetching\n        }\n      }\n      if (allowStale) {\n        if (options.status && bf.__staleWhileFetching !== undefined) {\n          options.status.returnedStale = true\n        }\n        return bf.__staleWhileFetching\n      } else if (bf.__returned === bf) {\n        throw er\n      }\n    }\n\n    const pcall = (\n      res: (v: V | undefined) => void,\n      rej: (e: any) => void\n    ) => {\n      const fmp = this.#fetchMethod?.(k, v, fetchOpts)\n      if (fmp && fmp instanceof Promise) {\n        fmp.then(v => res(v === undefined ? undefined : v), rej)\n      }\n      // ignored, we go until we finish, regardless.\n      // defer check until we are actually aborting,\n      // so fetchMethod can override.\n      ac.signal.addEventListener('abort', () => {\n        if (\n          !options.ignoreFetchAbort ||\n          options.allowStaleOnFetchAbort\n        ) {\n          res(undefined)\n          // when it eventually resolves, update the cache.\n          if (options.allowStaleOnFetchAbort) {\n            res = v => cb(v, true)\n          }\n        }\n      })\n    }\n\n    if (options.status) options.status.fetchDispatched = true\n    const p = new Promise(pcall).then(cb, eb)\n    const bf: BackgroundFetch = Object.assign(p, {\n      __abortController: ac,\n      __staleWhileFetching: v,\n      __returned: undefined,\n    })\n\n    if (index === undefined) {\n      // internal, don't expose status.\n      this.set(k, bf, { ...fetchOpts.options, status: undefined })\n      index = this.#keyMap.get(k)\n    } else {\n      this.#valList[index] = bf\n    }\n    return bf\n  }\n\n  #isBackgroundFetch(p: any): p is BackgroundFetch {\n    if (!this.#hasFetchMethod) return false\n    const b = p as BackgroundFetch\n    return (\n      !!b &&\n      b instanceof Promise &&\n      b.hasOwnProperty('__staleWhileFetching') &&\n      b.__abortController instanceof AC\n    )\n  }\n\n  /**\n   * Make an asynchronous cached fetch using the\n   * {@link LRUCache.OptionsBase.fetchMethod} function.\n   *\n   * If the value is in the cache and not stale, then the returned\n   * Promise resolves to the value.\n   *\n   * If not in the cache, or beyond its TTL staleness, then\n   * `fetchMethod(key, staleValue, { options, signal, context })` is\n   * called, and the value returned will be added to the cache once\n   * resolved.\n   *\n   * If called with `allowStale`, and an asynchronous fetch is\n   * currently in progress to reload a stale value, then the former\n   * stale value will be returned.\n   *\n   * If called with `forceRefresh`, then the cached item will be\n   * re-fetched, even if it is not stale. However, if `allowStale` is also\n   * set, then the old value will still be returned. This is useful\n   * in cases where you want to force a reload of a cached value. If\n   * a background fetch is already in progress, then `forceRefresh`\n   * has no effect.\n   *\n   * If multiple fetches for the same key are issued, then they will all be\n   * coalesced into a single call to fetchMethod.\n   *\n   * Note that this means that handling options such as\n   * {@link LRUCache.OptionsBase.allowStaleOnFetchAbort},\n   * {@link LRUCache.FetchOptions.signal},\n   * and {@link LRUCache.OptionsBase.allowStaleOnFetchRejection} will be\n   * determined by the FIRST fetch() call for a given key.\n   *\n   * This is a known (fixable) shortcoming which will be addresed on when\n   * someone complains about it, as the fix would involve added complexity and\n   * may not be worth the costs for this edge case.\n   *\n   * If {@link LRUCache.OptionsBase.fetchMethod} is not specified, then this is\n   * effectively an alias for `Promise.resolve(cache.get(key))`.\n   *\n   * When the fetch method resolves to a value, if the fetch has not\n   * been aborted due to deletion, eviction, or being overwritten,\n   * then it is added to the cache using the options provided.\n   *\n   * If the key is evicted or deleted before the `fetchMethod`\n   * resolves, then the AbortSignal passed to the `fetchMethod` will\n   * receive an `abort` event, and the promise returned by `fetch()`\n   * will reject with the reason for the abort.\n   *\n   * If a `signal` is passed to the `fetch()` call, then aborting the\n   * signal will abort the fetch and cause the `fetch()` promise to\n   * reject with the reason provided.\n   *\n   * **Setting `context`**\n   *\n   * If an `FC` type is set to a type other than `unknown`, `void`, or\n   * `undefined` in the {@link LRUCache} constructor, then all\n   * calls to `cache.fetch()` _must_ provide a `context` option. If\n   * set to `undefined` or `void`, then calls to fetch _must not_\n   * provide a `context` option.\n   *\n   * The `context` param allows you to provide arbitrary data that\n   * might be relevant in the course of fetching the data. It is only\n   * relevant for the course of a single `fetch()` operation, and\n   * discarded afterwards.\n   *\n   * **Note: `fetch()` calls are inflight-unique**\n   *\n   * If you call `fetch()` multiple times with the same key value,\n   * then every call after the first will resolve on the same\n   * promise1,\n   * _even if they have different settings that would otherwise change\n   * the behavior of the fetch_, such as `noDeleteOnFetchRejection`\n   * or `ignoreFetchAbort`.\n   *\n   * In most cases, this is not a problem (in fact, only fetching\n   * something once is what you probably want, if you're caching in\n   * the first place). If you are changing the fetch() options\n   * dramatically between runs, there's a good chance that you might\n   * be trying to fit divergent semantics into a single object, and\n   * would be better off with multiple cache instances.\n   *\n   * **1**: Ie, they're not the \"same Promise\", but they resolve at\n   * the same time, because they're both waiting on the same\n   * underlying fetchMethod response.\n   */\n\n  fetch(\n    k: K,\n    fetchOptions: unknown extends FC\n      ? LRUCache.FetchOptions\n      : FC extends undefined | void\n      ? LRUCache.FetchOptionsNoContext\n      : LRUCache.FetchOptionsWithContext\n  ): Promise\n\n  // this overload not allowed if context is required\n  fetch(\n    k: unknown extends FC\n      ? K\n      : FC extends undefined | void\n      ? K\n      : never,\n    fetchOptions?: unknown extends FC\n      ? LRUCache.FetchOptions\n      : FC extends undefined | void\n      ? LRUCache.FetchOptionsNoContext\n      : never\n  ): Promise\n\n  async fetch(\n    k: K,\n    fetchOptions: LRUCache.FetchOptions = {}\n  ): Promise {\n    const {\n      // get options\n      allowStale = this.allowStale,\n      updateAgeOnGet = this.updateAgeOnGet,\n      noDeleteOnStaleGet = this.noDeleteOnStaleGet,\n      // set options\n      ttl = this.ttl,\n      noDisposeOnSet = this.noDisposeOnSet,\n      size = 0,\n      sizeCalculation = this.sizeCalculation,\n      noUpdateTTL = this.noUpdateTTL,\n      // fetch exclusive options\n      noDeleteOnFetchRejection = this.noDeleteOnFetchRejection,\n      allowStaleOnFetchRejection = this.allowStaleOnFetchRejection,\n      ignoreFetchAbort = this.ignoreFetchAbort,\n      allowStaleOnFetchAbort = this.allowStaleOnFetchAbort,\n      context,\n      forceRefresh = false,\n      status,\n      signal,\n    } = fetchOptions\n\n    if (!this.#hasFetchMethod) {\n      if (status) status.fetch = 'get'\n      return this.get(k, {\n        allowStale,\n        updateAgeOnGet,\n        noDeleteOnStaleGet,\n        status,\n      })\n    }\n\n    const options = {\n      allowStale,\n      updateAgeOnGet,\n      noDeleteOnStaleGet,\n      ttl,\n      noDisposeOnSet,\n      size,\n      sizeCalculation,\n      noUpdateTTL,\n      noDeleteOnFetchRejection,\n      allowStaleOnFetchRejection,\n      allowStaleOnFetchAbort,\n      ignoreFetchAbort,\n      status,\n      signal,\n    }\n\n    let index = this.#keyMap.get(k)\n    if (index === undefined) {\n      if (status) status.fetch = 'miss'\n      const p = this.#backgroundFetch(k, index, options, context)\n      return (p.__returned = p)\n    } else {\n      // in cache, maybe already fetching\n      const v = this.#valList[index]\n      if (this.#isBackgroundFetch(v)) {\n        const stale =\n          allowStale && v.__staleWhileFetching !== undefined\n        if (status) {\n          status.fetch = 'inflight'\n          if (stale) status.returnedStale = true\n        }\n        return stale ? v.__staleWhileFetching : (v.__returned = v)\n      }\n\n      // if we force a refresh, that means do NOT serve the cached value,\n      // unless we are already in the process of refreshing the cache.\n      const isStale = this.#isStale(index)\n      if (!forceRefresh && !isStale) {\n        if (status) status.fetch = 'hit'\n        this.#moveToTail(index)\n        if (updateAgeOnGet) {\n          this.#updateItemAge(index)\n        }\n        if (status) this.#statusTTL(status, index)\n        return v\n      }\n\n      // ok, it is stale or a forced refresh, and not already fetching.\n      // refresh the cache.\n      const p = this.#backgroundFetch(k, index, options, context)\n      const hasStale = p.__staleWhileFetching !== undefined\n      const staleVal = hasStale && allowStale\n      if (status) {\n        status.fetch = isStale ? 'stale' : 'refresh'\n        if (staleVal && isStale) status.returnedStale = true\n      }\n      return staleVal ? p.__staleWhileFetching : (p.__returned = p)\n    }\n  }\n\n  /**\n   * In some cases, `cache.fetch()` may resolve to `undefined`, either because\n   * a {@link LRUCache.OptionsBase#fetchMethod} was not provided (turning\n   * `cache.fetch(k)` into just an async wrapper around `cache.get(k)`) or\n   * because `ignoreFetchAbort` was specified (either to the constructor or\n   * in the {@link LRUCache.FetchOptions}). Also, the\n   * {@link LRUCache.OptionsBase.fetchMethod} may return `undefined` or `void`, making\n   * the test even more complicated.\n   *\n   * Because inferring the cases where `undefined` might be returned are so\n   * cumbersome, but testing for `undefined` can also be annoying, this method\n   * can be used, which will reject if `this.fetch()` resolves to undefined.\n   */\n  forceFetch(\n    k: K,\n    fetchOptions: unknown extends FC\n      ? LRUCache.FetchOptions\n      : FC extends undefined | void\n      ? LRUCache.FetchOptionsNoContext\n      : LRUCache.FetchOptionsWithContext\n  ): Promise\n  // this overload not allowed if context is required\n  forceFetch(\n    k: unknown extends FC\n      ? K\n      : FC extends undefined | void\n      ? K\n      : never,\n    fetchOptions?: unknown extends FC\n      ? LRUCache.FetchOptions\n      : FC extends undefined | void\n      ? LRUCache.FetchOptionsNoContext\n      : never\n  ): Promise\n  async forceFetch(\n    k: K,\n    fetchOptions: LRUCache.FetchOptions = {}\n  ): Promise {\n    const v = await this.fetch(\n      k,\n      fetchOptions as unknown extends FC\n        ? LRUCache.FetchOptions\n        : FC extends undefined | void\n        ? LRUCache.FetchOptionsNoContext\n        : LRUCache.FetchOptionsWithContext\n    )\n    if (v === undefined) throw new Error('fetch() returned undefined')\n    return v\n  }\n\n  /**\n   * If the key is found in the cache, then this is equivalent to\n   * {@link LRUCache#get}. If not, in the cache, then calculate the value using\n   * the {@link LRUCache.OptionsBase.memoMethod}, and add it to the cache.\n   *\n   * If an `FC` type is set to a type other than `unknown`, `void`, or\n   * `undefined` in the LRUCache constructor, then all calls to `cache.memo()`\n   * _must_ provide a `context` option. If set to `undefined` or `void`, then\n   * calls to memo _must not_ provide a `context` option.\n   *\n   * The `context` param allows you to provide arbitrary data that might be\n   * relevant in the course of fetching the data. It is only relevant for the\n   * course of a single `memo()` operation, and discarded afterwards.\n   */\n  memo(\n    k: K,\n    memoOptions: unknown extends FC\n      ? LRUCache.MemoOptions\n      : FC extends undefined | void\n      ? LRUCache.MemoOptionsNoContext\n      : LRUCache.MemoOptionsWithContext\n  ): V\n  // this overload not allowed if context is required\n  memo(\n    k: unknown extends FC\n      ? K\n      : FC extends undefined | void\n      ? K\n      : never,\n    memoOptions?: unknown extends FC\n      ? LRUCache.MemoOptions\n      : FC extends undefined | void\n      ? LRUCache.MemoOptionsNoContext\n      : never\n  ): V\n  memo(k: K, memoOptions: LRUCache.MemoOptions = {}) {\n    const memoMethod = this.#memoMethod\n    if (!memoMethod) {\n      throw new Error('no memoMethod provided to constructor')\n    }\n    const { context, forceRefresh, ...options } = memoOptions\n    const v = this.get(k, options)\n    if (!forceRefresh && v !== undefined) return v\n    const vv = memoMethod(k, v, {\n      options,\n      context,\n    } as LRUCache.MemoizerOptions)\n    this.set(k, vv, options)\n    return vv\n  }\n\n  /**\n   * Return a value from the cache. Will update the recency of the cache\n   * entry found.\n   *\n   * If the key is not found, get() will return `undefined`.\n   */\n  get(k: K, getOptions: LRUCache.GetOptions = {}) {\n    const {\n      allowStale = this.allowStale,\n      updateAgeOnGet = this.updateAgeOnGet,\n      noDeleteOnStaleGet = this.noDeleteOnStaleGet,\n      status,\n    } = getOptions\n    const index = this.#keyMap.get(k)\n    if (index !== undefined) {\n      const value = this.#valList[index]\n      const fetching = this.#isBackgroundFetch(value)\n      if (status) this.#statusTTL(status, index)\n      if (this.#isStale(index)) {\n        if (status) status.get = 'stale'\n        // delete only if not an in-flight background fetch\n        if (!fetching) {\n          if (!noDeleteOnStaleGet) {\n            this.#delete(k, 'expire')\n          }\n          if (status && allowStale) status.returnedStale = true\n          return allowStale ? value : undefined\n        } else {\n          if (\n            status &&\n            allowStale &&\n            value.__staleWhileFetching !== undefined\n          ) {\n            status.returnedStale = true\n          }\n          return allowStale ? value.__staleWhileFetching : undefined\n        }\n      } else {\n        if (status) status.get = 'hit'\n        // if we're currently fetching it, we don't actually have it yet\n        // it's not stale, which means this isn't a staleWhileRefetching.\n        // If it's not stale, and fetching, AND has a __staleWhileFetching\n        // value, then that means the user fetched with {forceRefresh:true},\n        // so it's safe to return that value.\n        if (fetching) {\n          return value.__staleWhileFetching\n        }\n        this.#moveToTail(index)\n        if (updateAgeOnGet) {\n          this.#updateItemAge(index)\n        }\n        return value\n      }\n    } else if (status) {\n      status.get = 'miss'\n    }\n  }\n\n  #connect(p: Index, n: Index) {\n    this.#prev[n] = p\n    this.#next[p] = n\n  }\n\n  #moveToTail(index: Index): void {\n    // if tail already, nothing to do\n    // if head, move head to next[index]\n    // else\n    //   move next[prev[index]] to next[index] (head has no prev)\n    //   move prev[next[index]] to prev[index]\n    // prev[index] = tail\n    // next[tail] = index\n    // tail = index\n    if (index !== this.#tail) {\n      if (index === this.#head) {\n        this.#head = this.#next[index] as Index\n      } else {\n        this.#connect(\n          this.#prev[index] as Index,\n          this.#next[index] as Index\n        )\n      }\n      this.#connect(this.#tail, index)\n      this.#tail = index\n    }\n  }\n\n  /**\n   * Deletes a key out of the cache.\n   *\n   * Returns true if the key was deleted, false otherwise.\n   */\n  delete(k: K) {\n    return this.#delete(k, 'delete')\n  }\n\n  #delete(k: K, reason: LRUCache.DisposeReason) {\n    let deleted = false\n    if (this.#size !== 0) {\n      const index = this.#keyMap.get(k)\n      if (index !== undefined) {\n        deleted = true\n        if (this.#size === 1) {\n          this.#clear(reason)\n        } else {\n          this.#removeItemSize(index)\n          const v = this.#valList[index]\n          if (this.#isBackgroundFetch(v)) {\n            v.__abortController.abort(new Error('deleted'))\n          } else if (this.#hasDispose || this.#hasDisposeAfter) {\n            if (this.#hasDispose) {\n              this.#dispose?.(v as V, k, reason)\n            }\n            if (this.#hasDisposeAfter) {\n              this.#disposed?.push([v as V, k, reason])\n            }\n          }\n          this.#keyMap.delete(k)\n          this.#keyList[index] = undefined\n          this.#valList[index] = undefined\n          if (index === this.#tail) {\n            this.#tail = this.#prev[index] as Index\n          } else if (index === this.#head) {\n            this.#head = this.#next[index] as Index\n          } else {\n            const pi = this.#prev[index] as number\n            this.#next[pi] = this.#next[index] as number\n            const ni = this.#next[index] as number\n            this.#prev[ni] = this.#prev[index] as number\n          }\n          this.#size--\n          this.#free.push(index)\n        }\n      }\n    }\n    if (this.#hasDisposeAfter && this.#disposed?.length) {\n      const dt = this.#disposed\n      let task: DisposeTask | undefined\n      while ((task = dt?.shift())) {\n        this.#disposeAfter?.(...task)\n      }\n    }\n    return deleted\n  }\n\n  /**\n   * Clear the cache entirely, throwing away all values.\n   */\n  clear() {\n    return this.#clear('delete')\n  }\n  #clear(reason: LRUCache.DisposeReason) {\n    for (const index of this.#rindexes({ allowStale: true })) {\n      const v = this.#valList[index]\n      if (this.#isBackgroundFetch(v)) {\n        v.__abortController.abort(new Error('deleted'))\n      } else {\n        const k = this.#keyList[index]\n        if (this.#hasDispose) {\n          this.#dispose?.(v as V, k as K, reason)\n        }\n        if (this.#hasDisposeAfter) {\n          this.#disposed?.push([v as V, k as K, reason])\n        }\n      }\n    }\n\n    this.#keyMap.clear()\n    this.#valList.fill(undefined)\n    this.#keyList.fill(undefined)\n    if (this.#ttls && this.#starts) {\n      this.#ttls.fill(0)\n      this.#starts.fill(0)\n    }\n    if (this.#sizes) {\n      this.#sizes.fill(0)\n    }\n    this.#head = 0 as Index\n    this.#tail = 0 as Index\n    this.#free.length = 0\n    this.#calculatedSize = 0\n    this.#size = 0\n    if (this.#hasDisposeAfter && this.#disposed) {\n      const dt = this.#disposed\n      let task: DisposeTask | undefined\n      while ((task = dt?.shift())) {\n        this.#disposeAfter?.(...task)\n      }\n    }\n  }\n}\n", "import { LRUCache } from 'lru-cache'\nimport { posix, win32 } from 'node:path'\n\nimport { fileURLToPath } from 'node:url'\n\nimport {\n  lstatSync,\n  readdir as readdirCB,\n  readdirSync,\n  readlinkSync,\n  realpathSync as rps,\n} from 'fs'\nimport * as actualFS from 'node:fs'\n\nconst realpathSync = rps.native\n// TODO: test perf of fs/promises realpath vs realpathCB,\n// since the promises one uses realpath.native\n\nimport { lstat, readdir, readlink, realpath } from 'node:fs/promises'\n\nimport { Minipass } from 'minipass'\nimport type { Dirent, Stats } from 'node:fs'\n\n/**\n * An object that will be used to override the default `fs`\n * methods.  Any methods that are not overridden will use Node's\n * built-in implementations.\n *\n * - lstatSync\n * - readdir (callback `withFileTypes` Dirent variant, used for\n *   readdirCB and most walks)\n * - readdirSync\n * - readlinkSync\n * - realpathSync\n * - promises: Object containing the following async methods:\n *   - lstat\n *   - readdir (Dirent variant only)\n *   - readlink\n *   - realpath\n */\nexport interface FSOption {\n  lstatSync?: (path: string) => Stats\n  readdir?: (\n    path: string,\n    options: { withFileTypes: true },\n    cb: (er: NodeJS.ErrnoException | null, entries?: Dirent[]) => any,\n  ) => void\n  readdirSync?: (\n    path: string,\n    options: { withFileTypes: true },\n  ) => Dirent[]\n  readlinkSync?: (path: string) => string\n  realpathSync?: (path: string) => string\n  promises?: {\n    lstat?: (path: string) => Promise\n    readdir?: (\n      path: string,\n      options: { withFileTypes: true },\n    ) => Promise\n    readlink?: (path: string) => Promise\n    realpath?: (path: string) => Promise\n    [k: string]: any\n  }\n  [k: string]: any\n}\n\ninterface FSValue {\n  lstatSync: (path: string) => Stats\n  readdir: (\n    path: string,\n    options: { withFileTypes: true },\n    cb: (er: NodeJS.ErrnoException | null, entries?: Dirent[]) => any,\n  ) => void\n  readdirSync: (path: string, options: { withFileTypes: true }) => Dirent[]\n  readlinkSync: (path: string) => string\n  realpathSync: (path: string) => string\n  promises: {\n    lstat: (path: string) => Promise\n    readdir: (\n      path: string,\n      options: { withFileTypes: true },\n    ) => Promise\n    readlink: (path: string) => Promise\n    realpath: (path: string) => Promise\n    [k: string]: any\n  }\n  [k: string]: any\n}\n\nconst defaultFS: FSValue = {\n  lstatSync,\n  readdir: readdirCB,\n  readdirSync,\n  readlinkSync,\n  realpathSync,\n  promises: {\n    lstat,\n    readdir,\n    readlink,\n    realpath,\n  },\n}\n\n// if they just gave us require('fs') then use our default\nconst fsFromOption = (fsOption?: FSOption): FSValue =>\n  !fsOption || fsOption === defaultFS || fsOption === actualFS ?\n    defaultFS\n  : {\n      ...defaultFS,\n      ...fsOption,\n      promises: {\n        ...defaultFS.promises,\n        ...(fsOption.promises || {}),\n      },\n    }\n\n// turn something like //?/c:/ into c:\\\nconst uncDriveRegexp = /^\\\\\\\\\\?\\\\([a-z]:)\\\\?$/i\nconst uncToDrive = (rootPath: string): string =>\n  rootPath.replace(/\\//g, '\\\\').replace(uncDriveRegexp, '$1\\\\')\n\n// windows paths are separated by either / or \\\nconst eitherSep = /[\\\\\\/]/\n\nconst UNKNOWN = 0 // may not even exist, for all we know\nconst IFIFO = 0b0001\nconst IFCHR = 0b0010\nconst IFDIR = 0b0100\nconst IFBLK = 0b0110\nconst IFREG = 0b1000\nconst IFLNK = 0b1010\nconst IFSOCK = 0b1100\nconst IFMT = 0b1111\n\nexport type Type =\n  | 'Unknown'\n  | 'FIFO'\n  | 'CharacterDevice'\n  | 'Directory'\n  | 'BlockDevice'\n  | 'File'\n  | 'SymbolicLink'\n  | 'Socket'\n\n// mask to unset low 4 bits\nconst IFMT_UNKNOWN = ~IFMT\n\n// set after successfully calling readdir() and getting entries.\nconst READDIR_CALLED = 0b0000_0001_0000\n// set after a successful lstat()\nconst LSTAT_CALLED = 0b0000_0010_0000\n// set if an entry (or one of its parents) is definitely not a dir\nconst ENOTDIR = 0b0000_0100_0000\n// set if an entry (or one of its parents) does not exist\n// (can also be set on lstat errors like EACCES or ENAMETOOLONG)\nconst ENOENT = 0b0000_1000_0000\n// cannot have child entries -- also verify &IFMT is either IFDIR or IFLNK\n// set if we fail to readlink\nconst ENOREADLINK = 0b0001_0000_0000\n// set if we know realpath() will fail\nconst ENOREALPATH = 0b0010_0000_0000\n\nconst ENOCHILD = ENOTDIR | ENOENT | ENOREALPATH\nconst TYPEMASK = 0b0011_1111_1111\n\nconst entToType = (s: Dirent | Stats) =>\n  s.isFile() ? IFREG\n  : s.isDirectory() ? IFDIR\n  : s.isSymbolicLink() ? IFLNK\n  : s.isCharacterDevice() ? IFCHR\n  : s.isBlockDevice() ? IFBLK\n  : s.isSocket() ? IFSOCK\n  : s.isFIFO() ? IFIFO\n  : UNKNOWN\n\n// normalize unicode path names\nconst normalizeCache = new Map()\nconst normalize = (s: string) => {\n  const c = normalizeCache.get(s)\n  if (c) return c\n  const n = s.normalize('NFKD')\n  normalizeCache.set(s, n)\n  return n\n}\n\nconst normalizeNocaseCache = new Map()\nconst normalizeNocase = (s: string) => {\n  const c = normalizeNocaseCache.get(s)\n  if (c) return c\n  const n = normalize(s.toLowerCase())\n  normalizeNocaseCache.set(s, n)\n  return n\n}\n\n/**\n * Options that may be provided to the Path constructor\n */\nexport interface PathOpts {\n  fullpath?: string\n  relative?: string\n  relativePosix?: string\n  parent?: PathBase\n  /**\n   * See {@link FSOption}\n   */\n  fs?: FSOption\n}\n\n/**\n * An LRUCache for storing resolved path strings or Path objects.\n * @internal\n */\nexport class ResolveCache extends LRUCache {\n  constructor() {\n    super({ max: 256 })\n  }\n}\n\n// In order to prevent blowing out the js heap by allocating hundreds of\n// thousands of Path entries when walking extremely large trees, the \"children\"\n// in this tree are represented by storing an array of Path entries in an\n// LRUCache, indexed by the parent.  At any time, Path.children() may return an\n// empty array, indicating that it doesn't know about any of its children, and\n// thus has to rebuild that cache.  This is fine, it just means that we don't\n// benefit as much from having the cached entries, but huge directory walks\n// don't blow out the stack, and smaller ones are still as fast as possible.\n//\n//It does impose some complexity when building up the readdir data, because we\n//need to pass a reference to the children array that we started with.\n\n/**\n * an LRUCache for storing child entries.\n * @internal\n */\nexport class ChildrenCache extends LRUCache {\n  constructor(maxSize: number = 16 * 1024) {\n    super({\n      maxSize,\n      // parent + children\n      sizeCalculation: a => a.length + 1,\n    })\n  }\n}\n\n/**\n * Array of Path objects, plus a marker indicating the first provisional entry\n *\n * @internal\n */\nexport type Children = PathBase[] & { provisional: number }\n\nconst setAsCwd = Symbol('PathScurry setAsCwd')\n\n/**\n * Path objects are sort of like a super-powered\n * {@link https://nodejs.org/docs/latest/api/fs.html#class-fsdirent fs.Dirent}\n *\n * Each one represents a single filesystem entry on disk, which may or may not\n * exist. It includes methods for reading various types of information via\n * lstat, readlink, and readdir, and caches all information to the greatest\n * degree possible.\n *\n * Note that fs operations that would normally throw will instead return an\n * \"empty\" value. This is in order to prevent excessive overhead from error\n * stack traces.\n */\nexport abstract class PathBase implements Dirent {\n  /**\n   * the basename of this path\n   *\n   * **Important**: *always* test the path name against any test string\n   * usingthe {@link isNamed} method, and not by directly comparing this\n   * string. Otherwise, unicode path strings that the system sees as identical\n   * will not be properly treated as the same path, leading to incorrect\n   * behavior and possible security issues.\n   */\n  name: string\n  /**\n   * the Path entry corresponding to the path root.\n   *\n   * @internal\n   */\n  root: PathBase\n  /**\n   * All roots found within the current PathScurry family\n   *\n   * @internal\n   */\n  roots: { [k: string]: PathBase }\n  /**\n   * a reference to the parent path, or undefined in the case of root entries\n   *\n   * @internal\n   */\n  parent?: PathBase\n  /**\n   * boolean indicating whether paths are compared case-insensitively\n   * @internal\n   */\n  nocase: boolean\n\n  /**\n   * boolean indicating that this path is the current working directory\n   * of the PathScurry collection that contains it.\n   */\n  isCWD: boolean = false\n\n  /**\n   * the string or regexp used to split paths. On posix, it is `'/'`, and on\n   * windows it is a RegExp matching either `'/'` or `'\\\\'`\n   */\n  abstract splitSep: string | RegExp\n  /**\n   * The path separator string to use when joining paths\n   */\n  abstract sep: string\n\n  // potential default fs override\n  #fs: FSValue\n\n  // Stats fields\n  #dev?: number\n  get dev() {\n    return this.#dev\n  }\n  #mode?: number\n  get mode() {\n    return this.#mode\n  }\n  #nlink?: number\n  get nlink() {\n    return this.#nlink\n  }\n  #uid?: number\n  get uid() {\n    return this.#uid\n  }\n  #gid?: number\n  get gid() {\n    return this.#gid\n  }\n  #rdev?: number\n  get rdev() {\n    return this.#rdev\n  }\n  #blksize?: number\n  get blksize() {\n    return this.#blksize\n  }\n  #ino?: number\n  get ino() {\n    return this.#ino\n  }\n  #size?: number\n  get size() {\n    return this.#size\n  }\n  #blocks?: number\n  get blocks() {\n    return this.#blocks\n  }\n  #atimeMs?: number\n  get atimeMs() {\n    return this.#atimeMs\n  }\n  #mtimeMs?: number\n  get mtimeMs() {\n    return this.#mtimeMs\n  }\n  #ctimeMs?: number\n  get ctimeMs() {\n    return this.#ctimeMs\n  }\n  #birthtimeMs?: number\n  get birthtimeMs() {\n    return this.#birthtimeMs\n  }\n  #atime?: Date\n  get atime() {\n    return this.#atime\n  }\n  #mtime?: Date\n  get mtime() {\n    return this.#mtime\n  }\n  #ctime?: Date\n  get ctime() {\n    return this.#ctime\n  }\n  #birthtime?: Date\n  get birthtime() {\n    return this.#birthtime\n  }\n\n  #matchName: string\n  #depth?: number\n  #fullpath?: string\n  #fullpathPosix?: string\n  #relative?: string\n  #relativePosix?: string\n  #type: number\n  #children: ChildrenCache\n  #linkTarget?: PathBase\n  #realpath?: PathBase\n\n  /**\n   * This property is for compatibility with the Dirent class as of\n   * Node v20, where Dirent['parentPath'] refers to the path of the\n   * directory that was passed to readdir. For root entries, it's the path\n   * to the entry itself.\n   */\n  get parentPath(): string {\n    return (this.parent || this).fullpath()\n  }\n\n  /**\n   * Deprecated alias for Dirent['parentPath'] Somewhat counterintuitively,\n   * this property refers to the *parent* path, not the path object itself.\n   *\n   * @deprecated\n   */\n  get path(): string {\n    return this.parentPath\n  }\n\n  /**\n   * Do not create new Path objects directly.  They should always be accessed\n   * via the PathScurry class or other methods on the Path class.\n   *\n   * @internal\n   */\n  constructor(\n    name: string,\n    type: number = UNKNOWN,\n    root: PathBase | undefined,\n    roots: { [k: string]: PathBase },\n    nocase: boolean,\n    children: ChildrenCache,\n    opts: PathOpts,\n  ) {\n    this.name = name\n    this.#matchName = nocase ? normalizeNocase(name) : normalize(name)\n    this.#type = type & TYPEMASK\n    this.nocase = nocase\n    this.roots = roots\n    this.root = root || this\n    this.#children = children\n    this.#fullpath = opts.fullpath\n    this.#relative = opts.relative\n    this.#relativePosix = opts.relativePosix\n    this.parent = opts.parent\n    if (this.parent) {\n      this.#fs = this.parent.#fs\n    } else {\n      this.#fs = fsFromOption(opts.fs)\n    }\n  }\n\n  /**\n   * Returns the depth of the Path object from its root.\n   *\n   * For example, a path at `/foo/bar` would have a depth of 2.\n   */\n  depth(): number {\n    if (this.#depth !== undefined) return this.#depth\n    if (!this.parent) return (this.#depth = 0)\n    return (this.#depth = this.parent.depth() + 1)\n  }\n\n  /**\n   * @internal\n   */\n  abstract getRootString(path: string): string\n  /**\n   * @internal\n   */\n  abstract getRoot(rootPath: string): PathBase\n  /**\n   * @internal\n   */\n  abstract newChild(name: string, type?: number, opts?: PathOpts): PathBase\n\n  /**\n   * @internal\n   */\n  childrenCache() {\n    return this.#children\n  }\n\n  /**\n   * Get the Path object referenced by the string path, resolved from this Path\n   */\n  resolve(path?: string): PathBase {\n    if (!path) {\n      return this\n    }\n    const rootPath = this.getRootString(path)\n    const dir = path.substring(rootPath.length)\n    const dirParts = dir.split(this.splitSep)\n    const result: PathBase =\n      rootPath ?\n        this.getRoot(rootPath).#resolveParts(dirParts)\n      : this.#resolveParts(dirParts)\n    return result\n  }\n\n  #resolveParts(dirParts: string[]) {\n    let p: PathBase = this\n    for (const part of dirParts) {\n      p = p.child(part)\n    }\n    return p\n  }\n\n  /**\n   * Returns the cached children Path objects, if still available.  If they\n   * have fallen out of the cache, then returns an empty array, and resets the\n   * READDIR_CALLED bit, so that future calls to readdir() will require an fs\n   * lookup.\n   *\n   * @internal\n   */\n  children(): Children {\n    const cached = this.#children.get(this)\n    if (cached) {\n      return cached\n    }\n    const children: Children = Object.assign([], { provisional: 0 })\n    this.#children.set(this, children)\n    this.#type &= ~READDIR_CALLED\n    return children\n  }\n\n  /**\n   * Resolves a path portion and returns or creates the child Path.\n   *\n   * Returns `this` if pathPart is `''` or `'.'`, or `parent` if pathPart is\n   * `'..'`.\n   *\n   * This should not be called directly.  If `pathPart` contains any path\n   * separators, it will lead to unsafe undefined behavior.\n   *\n   * Use `Path.resolve()` instead.\n   *\n   * @internal\n   */\n  child(pathPart: string, opts?: PathOpts): PathBase {\n    if (pathPart === '' || pathPart === '.') {\n      return this\n    }\n    if (pathPart === '..') {\n      return this.parent || this\n    }\n\n    // find the child\n    const children = this.children()\n    const name =\n      this.nocase ? normalizeNocase(pathPart) : normalize(pathPart)\n    for (const p of children) {\n      if (p.#matchName === name) {\n        return p\n      }\n    }\n\n    // didn't find it, create provisional child, since it might not\n    // actually exist.  If we know the parent isn't a dir, then\n    // in fact it CAN'T exist.\n    const s = this.parent ? this.sep : ''\n    const fullpath =\n      this.#fullpath ? this.#fullpath + s + pathPart : undefined\n    const pchild = this.newChild(pathPart, UNKNOWN, {\n      ...opts,\n      parent: this,\n      fullpath,\n    })\n\n    if (!this.canReaddir()) {\n      pchild.#type |= ENOENT\n    }\n\n    // don't have to update provisional, because if we have real children,\n    // then provisional is set to children.length, otherwise a lower number\n    children.push(pchild)\n    return pchild\n  }\n\n  /**\n   * The relative path from the cwd. If it does not share an ancestor with\n   * the cwd, then this ends up being equivalent to the fullpath()\n   */\n  relative(): string {\n    if (this.isCWD) return ''\n    if (this.#relative !== undefined) {\n      return this.#relative\n    }\n    const name = this.name\n    const p = this.parent\n    if (!p) {\n      return (this.#relative = this.name)\n    }\n    const pv = p.relative()\n    return pv + (!pv || !p.parent ? '' : this.sep) + name\n  }\n\n  /**\n   * The relative path from the cwd, using / as the path separator.\n   * If it does not share an ancestor with\n   * the cwd, then this ends up being equivalent to the fullpathPosix()\n   * On posix systems, this is identical to relative().\n   */\n  relativePosix(): string {\n    if (this.sep === '/') return this.relative()\n    if (this.isCWD) return ''\n    if (this.#relativePosix !== undefined) return this.#relativePosix\n    const name = this.name\n    const p = this.parent\n    if (!p) {\n      return (this.#relativePosix = this.fullpathPosix())\n    }\n    const pv = p.relativePosix()\n    return pv + (!pv || !p.parent ? '' : '/') + name\n  }\n\n  /**\n   * The fully resolved path string for this Path entry\n   */\n  fullpath(): string {\n    if (this.#fullpath !== undefined) {\n      return this.#fullpath\n    }\n    const name = this.name\n    const p = this.parent\n    if (!p) {\n      return (this.#fullpath = this.name)\n    }\n    const pv = p.fullpath()\n    const fp = pv + (!p.parent ? '' : this.sep) + name\n    return (this.#fullpath = fp)\n  }\n\n  /**\n   * On platforms other than windows, this is identical to fullpath.\n   *\n   * On windows, this is overridden to return the forward-slash form of the\n   * full UNC path.\n   */\n  fullpathPosix(): string {\n    if (this.#fullpathPosix !== undefined) return this.#fullpathPosix\n    if (this.sep === '/') return (this.#fullpathPosix = this.fullpath())\n    if (!this.parent) {\n      const p = this.fullpath().replace(/\\\\/g, '/')\n      if (/^[a-z]:\\//i.test(p)) {\n        return (this.#fullpathPosix = `//?/${p}`)\n      } else {\n        return (this.#fullpathPosix = p)\n      }\n    }\n    const p = this.parent\n    const pfpp = p.fullpathPosix()\n    const fpp = pfpp + (!pfpp || !p.parent ? '' : '/') + this.name\n    return (this.#fullpathPosix = fpp)\n  }\n\n  /**\n   * Is the Path of an unknown type?\n   *\n   * Note that we might know *something* about it if there has been a previous\n   * filesystem operation, for example that it does not exist, or is not a\n   * link, or whether it has child entries.\n   */\n  isUnknown(): boolean {\n    return (this.#type & IFMT) === UNKNOWN\n  }\n\n  isType(type: Type): boolean {\n    return this[`is${type}`]()\n  }\n\n  getType(): Type {\n    return (\n      this.isUnknown() ? 'Unknown'\n      : this.isDirectory() ? 'Directory'\n      : this.isFile() ? 'File'\n      : this.isSymbolicLink() ? 'SymbolicLink'\n      : this.isFIFO() ? 'FIFO'\n      : this.isCharacterDevice() ? 'CharacterDevice'\n      : this.isBlockDevice() ? 'BlockDevice'\n      : /* c8 ignore start */ this.isSocket() ? 'Socket'\n      : 'Unknown'\n    )\n    /* c8 ignore stop */\n  }\n\n  /**\n   * Is the Path a regular file?\n   */\n  isFile(): boolean {\n    return (this.#type & IFMT) === IFREG\n  }\n\n  /**\n   * Is the Path a directory?\n   */\n  isDirectory(): boolean {\n    return (this.#type & IFMT) === IFDIR\n  }\n\n  /**\n   * Is the path a character device?\n   */\n  isCharacterDevice(): boolean {\n    return (this.#type & IFMT) === IFCHR\n  }\n\n  /**\n   * Is the path a block device?\n   */\n  isBlockDevice(): boolean {\n    return (this.#type & IFMT) === IFBLK\n  }\n\n  /**\n   * Is the path a FIFO pipe?\n   */\n  isFIFO(): boolean {\n    return (this.#type & IFMT) === IFIFO\n  }\n\n  /**\n   * Is the path a socket?\n   */\n  isSocket(): boolean {\n    return (this.#type & IFMT) === IFSOCK\n  }\n\n  /**\n   * Is the path a symbolic link?\n   */\n  isSymbolicLink(): boolean {\n    return (this.#type & IFLNK) === IFLNK\n  }\n\n  /**\n   * Return the entry if it has been subject of a successful lstat, or\n   * undefined otherwise.\n   *\n   * Does not read the filesystem, so an undefined result *could* simply\n   * mean that we haven't called lstat on it.\n   */\n  lstatCached(): PathBase | undefined {\n    return this.#type & LSTAT_CALLED ? this : undefined\n  }\n\n  /**\n   * Return the cached link target if the entry has been the subject of a\n   * successful readlink, or undefined otherwise.\n   *\n   * Does not read the filesystem, so an undefined result *could* just mean we\n   * don't have any cached data. Only use it if you are very sure that a\n   * readlink() has been called at some point.\n   */\n  readlinkCached(): PathBase | undefined {\n    return this.#linkTarget\n  }\n\n  /**\n   * Returns the cached realpath target if the entry has been the subject\n   * of a successful realpath, or undefined otherwise.\n   *\n   * Does not read the filesystem, so an undefined result *could* just mean we\n   * don't have any cached data. Only use it if you are very sure that a\n   * realpath() has been called at some point.\n   */\n  realpathCached(): PathBase | undefined {\n    return this.#realpath\n  }\n\n  /**\n   * Returns the cached child Path entries array if the entry has been the\n   * subject of a successful readdir(), or [] otherwise.\n   *\n   * Does not read the filesystem, so an empty array *could* just mean we\n   * don't have any cached data. Only use it if you are very sure that a\n   * readdir() has been called recently enough to still be valid.\n   */\n  readdirCached(): PathBase[] {\n    const children = this.children()\n    return children.slice(0, children.provisional)\n  }\n\n  /**\n   * Return true if it's worth trying to readlink.  Ie, we don't (yet) have\n   * any indication that readlink will definitely fail.\n   *\n   * Returns false if the path is known to not be a symlink, if a previous\n   * readlink failed, or if the entry does not exist.\n   */\n  canReadlink(): boolean {\n    if (this.#linkTarget) return true\n    if (!this.parent) return false\n    // cases where it cannot possibly succeed\n    const ifmt = this.#type & IFMT\n    return !(\n      (ifmt !== UNKNOWN && ifmt !== IFLNK) ||\n      this.#type & ENOREADLINK ||\n      this.#type & ENOENT\n    )\n  }\n\n  /**\n   * Return true if readdir has previously been successfully called on this\n   * path, indicating that cachedReaddir() is likely valid.\n   */\n  calledReaddir(): boolean {\n    return !!(this.#type & READDIR_CALLED)\n  }\n\n  /**\n   * Returns true if the path is known to not exist. That is, a previous lstat\n   * or readdir failed to verify its existence when that would have been\n   * expected, or a parent entry was marked either enoent or enotdir.\n   */\n  isENOENT(): boolean {\n    return !!(this.#type & ENOENT)\n  }\n\n  /**\n   * Return true if the path is a match for the given path name.  This handles\n   * case sensitivity and unicode normalization.\n   *\n   * Note: even on case-sensitive systems, it is **not** safe to test the\n   * equality of the `.name` property to determine whether a given pathname\n   * matches, due to unicode normalization mismatches.\n   *\n   * Always use this method instead of testing the `path.name` property\n   * directly.\n   */\n  isNamed(n: string): boolean {\n    return !this.nocase ?\n        this.#matchName === normalize(n)\n      : this.#matchName === normalizeNocase(n)\n  }\n\n  /**\n   * Return the Path object corresponding to the target of a symbolic link.\n   *\n   * If the Path is not a symbolic link, or if the readlink call fails for any\n   * reason, `undefined` is returned.\n   *\n   * Result is cached, and thus may be outdated if the filesystem is mutated.\n   */\n  async readlink(): Promise {\n    const target = this.#linkTarget\n    if (target) {\n      return target\n    }\n    if (!this.canReadlink()) {\n      return undefined\n    }\n    /* c8 ignore start */\n    // already covered by the canReadlink test, here for ts grumples\n    if (!this.parent) {\n      return undefined\n    }\n    /* c8 ignore stop */\n    try {\n      const read = await this.#fs.promises.readlink(this.fullpath())\n      const linkTarget = (await this.parent.realpath())?.resolve(read)\n      if (linkTarget) {\n        return (this.#linkTarget = linkTarget)\n      }\n    } catch (er) {\n      this.#readlinkFail((er as NodeJS.ErrnoException).code)\n      return undefined\n    }\n  }\n\n  /**\n   * Synchronous {@link PathBase.readlink}\n   */\n  readlinkSync(): PathBase | undefined {\n    const target = this.#linkTarget\n    if (target) {\n      return target\n    }\n    if (!this.canReadlink()) {\n      return undefined\n    }\n    /* c8 ignore start */\n    // already covered by the canReadlink test, here for ts grumples\n    if (!this.parent) {\n      return undefined\n    }\n    /* c8 ignore stop */\n    try {\n      const read = this.#fs.readlinkSync(this.fullpath())\n      const linkTarget = this.parent.realpathSync()?.resolve(read)\n      if (linkTarget) {\n        return (this.#linkTarget = linkTarget)\n      }\n    } catch (er) {\n      this.#readlinkFail((er as NodeJS.ErrnoException).code)\n      return undefined\n    }\n  }\n\n  #readdirSuccess(children: Children) {\n    // succeeded, mark readdir called bit\n    this.#type |= READDIR_CALLED\n    // mark all remaining provisional children as ENOENT\n    for (let p = children.provisional; p < children.length; p++) {\n      const c = children[p]\n      if (c) c.#markENOENT()\n    }\n  }\n\n  #markENOENT() {\n    // mark as UNKNOWN and ENOENT\n    if (this.#type & ENOENT) return\n    this.#type = (this.#type | ENOENT) & IFMT_UNKNOWN\n    this.#markChildrenENOENT()\n  }\n\n  #markChildrenENOENT() {\n    // all children are provisional and do not exist\n    const children = this.children()\n    children.provisional = 0\n    for (const p of children) {\n      p.#markENOENT()\n    }\n  }\n\n  #markENOREALPATH() {\n    this.#type |= ENOREALPATH\n    this.#markENOTDIR()\n  }\n\n  // save the information when we know the entry is not a dir\n  #markENOTDIR() {\n    // entry is not a directory, so any children can't exist.\n    // this *should* be impossible, since any children created\n    // after it's been marked ENOTDIR should be marked ENOENT,\n    // so it won't even get to this point.\n    /* c8 ignore start */\n    if (this.#type & ENOTDIR) return\n    /* c8 ignore stop */\n    let t = this.#type\n    // this could happen if we stat a dir, then delete it,\n    // then try to read it or one of its children.\n    if ((t & IFMT) === IFDIR) t &= IFMT_UNKNOWN\n    this.#type = t | ENOTDIR\n    this.#markChildrenENOENT()\n  }\n\n  #readdirFail(code: string = '') {\n    // markENOTDIR and markENOENT also set provisional=0\n    if (code === 'ENOTDIR' || code === 'EPERM') {\n      this.#markENOTDIR()\n    } else if (code === 'ENOENT') {\n      this.#markENOENT()\n    } else {\n      this.children().provisional = 0\n    }\n  }\n\n  #lstatFail(code: string = '') {\n    // Windows just raises ENOENT in this case, disable for win CI\n    /* c8 ignore start */\n    if (code === 'ENOTDIR') {\n      // already know it has a parent by this point\n      const p = this.parent as PathBase\n      p.#markENOTDIR()\n    } else if (code === 'ENOENT') {\n      /* c8 ignore stop */\n      this.#markENOENT()\n    }\n  }\n\n  #readlinkFail(code: string = '') {\n    let ter = this.#type\n    ter |= ENOREADLINK\n    if (code === 'ENOENT') ter |= ENOENT\n    // windows gets a weird error when you try to readlink a file\n    if (code === 'EINVAL' || code === 'UNKNOWN') {\n      // exists, but not a symlink, we don't know WHAT it is, so remove\n      // all IFMT bits.\n      ter &= IFMT_UNKNOWN\n    }\n    this.#type = ter\n    // windows just gets ENOENT in this case.  We do cover the case,\n    // just disabled because it's impossible on Windows CI\n    /* c8 ignore start */\n    if (code === 'ENOTDIR' && this.parent) {\n      this.parent.#markENOTDIR()\n    }\n    /* c8 ignore stop */\n  }\n\n  #readdirAddChild(e: Dirent, c: Children) {\n    return (\n      this.#readdirMaybePromoteChild(e, c) ||\n      this.#readdirAddNewChild(e, c)\n    )\n  }\n\n  #readdirAddNewChild(e: Dirent, c: Children): PathBase {\n    // alloc new entry at head, so it's never provisional\n    const type = entToType(e)\n    const child = this.newChild(e.name, type, { parent: this })\n    const ifmt = child.#type & IFMT\n    if (ifmt !== IFDIR && ifmt !== IFLNK && ifmt !== UNKNOWN) {\n      child.#type |= ENOTDIR\n    }\n    c.unshift(child)\n    c.provisional++\n    return child\n  }\n\n  #readdirMaybePromoteChild(e: Dirent, c: Children): PathBase | undefined {\n    for (let p = c.provisional; p < c.length; p++) {\n      const pchild = c[p]\n      const name =\n        this.nocase ? normalizeNocase(e.name) : normalize(e.name)\n      if (name !== pchild!.#matchName) {\n        continue\n      }\n\n      return this.#readdirPromoteChild(e, pchild!, p, c)\n    }\n  }\n\n  #readdirPromoteChild(\n    e: Dirent,\n    p: PathBase,\n    index: number,\n    c: Children,\n  ): PathBase {\n    const v = p.name\n    // retain any other flags, but set ifmt from dirent\n    p.#type = (p.#type & IFMT_UNKNOWN) | entToType(e)\n    // case sensitivity fixing when we learn the true name.\n    if (v !== e.name) p.name = e.name\n\n    // just advance provisional index (potentially off the list),\n    // otherwise we have to splice/pop it out and re-insert at head\n    if (index !== c.provisional) {\n      if (index === c.length - 1) c.pop()\n      else c.splice(index, 1)\n      c.unshift(p)\n    }\n    c.provisional++\n    return p\n  }\n\n  /**\n   * Call lstat() on this Path, and update all known information that can be\n   * determined.\n   *\n   * Note that unlike `fs.lstat()`, the returned value does not contain some\n   * information, such as `mode`, `dev`, `nlink`, and `ino`.  If that\n   * information is required, you will need to call `fs.lstat` yourself.\n   *\n   * If the Path refers to a nonexistent file, or if the lstat call fails for\n   * any reason, `undefined` is returned.  Otherwise the updated Path object is\n   * returned.\n   *\n   * Results are cached, and thus may be out of date if the filesystem is\n   * mutated.\n   */\n  async lstat(): Promise {\n    if ((this.#type & ENOENT) === 0) {\n      try {\n        this.#applyStat(await this.#fs.promises.lstat(this.fullpath()))\n        return this\n      } catch (er) {\n        this.#lstatFail((er as NodeJS.ErrnoException).code)\n      }\n    }\n  }\n\n  /**\n   * synchronous {@link PathBase.lstat}\n   */\n  lstatSync(): PathBase | undefined {\n    if ((this.#type & ENOENT) === 0) {\n      try {\n        this.#applyStat(this.#fs.lstatSync(this.fullpath()))\n        return this\n      } catch (er) {\n        this.#lstatFail((er as NodeJS.ErrnoException).code)\n      }\n    }\n  }\n\n  #applyStat(st: Stats) {\n    const {\n      atime,\n      atimeMs,\n      birthtime,\n      birthtimeMs,\n      blksize,\n      blocks,\n      ctime,\n      ctimeMs,\n      dev,\n      gid,\n      ino,\n      mode,\n      mtime,\n      mtimeMs,\n      nlink,\n      rdev,\n      size,\n      uid,\n    } = st\n    this.#atime = atime\n    this.#atimeMs = atimeMs\n    this.#birthtime = birthtime\n    this.#birthtimeMs = birthtimeMs\n    this.#blksize = blksize\n    this.#blocks = blocks\n    this.#ctime = ctime\n    this.#ctimeMs = ctimeMs\n    this.#dev = dev\n    this.#gid = gid\n    this.#ino = ino\n    this.#mode = mode\n    this.#mtime = mtime\n    this.#mtimeMs = mtimeMs\n    this.#nlink = nlink\n    this.#rdev = rdev\n    this.#size = size\n    this.#uid = uid\n    const ifmt = entToType(st)\n    // retain any other flags, but set the ifmt\n    this.#type = (this.#type & IFMT_UNKNOWN) | ifmt | LSTAT_CALLED\n    if (ifmt !== UNKNOWN && ifmt !== IFDIR && ifmt !== IFLNK) {\n      this.#type |= ENOTDIR\n    }\n  }\n\n  #onReaddirCB: ((\n    er: NodeJS.ErrnoException | null,\n    entries: Path[],\n  ) => any)[] = []\n  #readdirCBInFlight: boolean = false\n  #callOnReaddirCB(children: Path[]) {\n    this.#readdirCBInFlight = false\n    const cbs = this.#onReaddirCB.slice()\n    this.#onReaddirCB.length = 0\n    cbs.forEach(cb => cb(null, children))\n  }\n\n  /**\n   * Standard node-style callback interface to get list of directory entries.\n   *\n   * If the Path cannot or does not contain any children, then an empty array\n   * is returned.\n   *\n   * Results are cached, and thus may be out of date if the filesystem is\n   * mutated.\n   *\n   * @param cb The callback called with (er, entries).  Note that the `er`\n   * param is somewhat extraneous, as all readdir() errors are handled and\n   * simply result in an empty set of entries being returned.\n   * @param allowZalgo Boolean indicating that immediately known results should\n   * *not* be deferred with `queueMicrotask`. Defaults to `false`. Release\n   * zalgo at your peril, the dark pony lord is devious and unforgiving.\n   */\n  readdirCB(\n    cb: (er: NodeJS.ErrnoException | null, entries: PathBase[]) => any,\n    allowZalgo: boolean = false,\n  ): void {\n    if (!this.canReaddir()) {\n      if (allowZalgo) cb(null, [])\n      else queueMicrotask(() => cb(null, []))\n      return\n    }\n\n    const children = this.children()\n    if (this.calledReaddir()) {\n      const c = children.slice(0, children.provisional)\n      if (allowZalgo) cb(null, c)\n      else queueMicrotask(() => cb(null, c))\n      return\n    }\n\n    // don't have to worry about zalgo at this point.\n    this.#onReaddirCB.push(cb)\n    if (this.#readdirCBInFlight) {\n      return\n    }\n    this.#readdirCBInFlight = true\n\n    // else read the directory, fill up children\n    // de-provisionalize any provisional children.\n    const fullpath = this.fullpath()\n    this.#fs.readdir(fullpath, { withFileTypes: true }, (er, entries) => {\n      if (er) {\n        this.#readdirFail((er as NodeJS.ErrnoException).code)\n        children.provisional = 0\n      } else {\n        // if we didn't get an error, we always get entries.\n        //@ts-ignore\n        for (const e of entries) {\n          this.#readdirAddChild(e, children)\n        }\n        this.#readdirSuccess(children)\n      }\n      this.#callOnReaddirCB(children.slice(0, children.provisional))\n      return\n    })\n  }\n\n  #asyncReaddirInFlight?: Promise\n\n  /**\n   * Return an array of known child entries.\n   *\n   * If the Path cannot or does not contain any children, then an empty array\n   * is returned.\n   *\n   * Results are cached, and thus may be out of date if the filesystem is\n   * mutated.\n   */\n  async readdir(): Promise {\n    if (!this.canReaddir()) {\n      return []\n    }\n\n    const children = this.children()\n    if (this.calledReaddir()) {\n      return children.slice(0, children.provisional)\n    }\n\n    // else read the directory, fill up children\n    // de-provisionalize any provisional children.\n    const fullpath = this.fullpath()\n    if (this.#asyncReaddirInFlight) {\n      await this.#asyncReaddirInFlight\n    } else {\n      /* c8 ignore start */\n      let resolve: () => void = () => {}\n      /* c8 ignore stop */\n      this.#asyncReaddirInFlight = new Promise(\n        res => (resolve = res),\n      )\n      try {\n        for (const e of await this.#fs.promises.readdir(fullpath, {\n          withFileTypes: true,\n        })) {\n          this.#readdirAddChild(e, children)\n        }\n        this.#readdirSuccess(children)\n      } catch (er) {\n        this.#readdirFail((er as NodeJS.ErrnoException).code)\n        children.provisional = 0\n      }\n      this.#asyncReaddirInFlight = undefined\n      resolve()\n    }\n    return children.slice(0, children.provisional)\n  }\n\n  /**\n   * synchronous {@link PathBase.readdir}\n   */\n  readdirSync(): PathBase[] {\n    if (!this.canReaddir()) {\n      return []\n    }\n\n    const children = this.children()\n    if (this.calledReaddir()) {\n      return children.slice(0, children.provisional)\n    }\n\n    // else read the directory, fill up children\n    // de-provisionalize any provisional children.\n    const fullpath = this.fullpath()\n    try {\n      for (const e of this.#fs.readdirSync(fullpath, {\n        withFileTypes: true,\n      })) {\n        this.#readdirAddChild(e, children)\n      }\n      this.#readdirSuccess(children)\n    } catch (er) {\n      this.#readdirFail((er as NodeJS.ErrnoException).code)\n      children.provisional = 0\n    }\n    return children.slice(0, children.provisional)\n  }\n\n  canReaddir() {\n    if (this.#type & ENOCHILD) return false\n    const ifmt = IFMT & this.#type\n    // we always set ENOTDIR when setting IFMT, so should be impossible\n    /* c8 ignore start */\n    if (!(ifmt === UNKNOWN || ifmt === IFDIR || ifmt === IFLNK)) {\n      return false\n    }\n    /* c8 ignore stop */\n    return true\n  }\n\n  shouldWalk(\n    dirs: Set,\n    walkFilter?: (e: PathBase) => boolean,\n  ): boolean {\n    return (\n      (this.#type & IFDIR) === IFDIR &&\n      !(this.#type & ENOCHILD) &&\n      !dirs.has(this) &&\n      (!walkFilter || walkFilter(this))\n    )\n  }\n\n  /**\n   * Return the Path object corresponding to path as resolved\n   * by realpath(3).\n   *\n   * If the realpath call fails for any reason, `undefined` is returned.\n   *\n   * Result is cached, and thus may be outdated if the filesystem is mutated.\n   * On success, returns a Path object.\n   */\n  async realpath(): Promise {\n    if (this.#realpath) return this.#realpath\n    if ((ENOREALPATH | ENOREADLINK | ENOENT) & this.#type) return undefined\n    try {\n      const rp = await this.#fs.promises.realpath(this.fullpath())\n      return (this.#realpath = this.resolve(rp))\n    } catch (_) {\n      this.#markENOREALPATH()\n    }\n  }\n\n  /**\n   * Synchronous {@link realpath}\n   */\n  realpathSync(): PathBase | undefined {\n    if (this.#realpath) return this.#realpath\n    if ((ENOREALPATH | ENOREADLINK | ENOENT) & this.#type) return undefined\n    try {\n      const rp = this.#fs.realpathSync(this.fullpath())\n      return (this.#realpath = this.resolve(rp))\n    } catch (_) {\n      this.#markENOREALPATH()\n    }\n  }\n\n  /**\n   * Internal method to mark this Path object as the scurry cwd,\n   * called by {@link PathScurry#chdir}\n   *\n   * @internal\n   */\n  [setAsCwd](oldCwd: PathBase): void {\n    if (oldCwd === this) return\n    oldCwd.isCWD = false\n    this.isCWD = true\n\n    const changed = new Set([])\n    let rp = []\n    let p: PathBase = this\n    while (p && p.parent) {\n      changed.add(p)\n      p.#relative = rp.join(this.sep)\n      p.#relativePosix = rp.join('/')\n      p = p.parent\n      rp.push('..')\n    }\n    // now un-memoize parents of old cwd\n    p = oldCwd\n    while (p && p.parent && !changed.has(p)) {\n      p.#relative = undefined\n      p.#relativePosix = undefined\n      p = p.parent\n    }\n  }\n}\n\n/**\n * Path class used on win32 systems\n *\n * Uses `'\\\\'` as the path separator for returned paths, either `'\\\\'` or `'/'`\n * as the path separator for parsing paths.\n */\nexport class PathWin32 extends PathBase {\n  /**\n   * Separator for generating path strings.\n   */\n  sep: '\\\\' = '\\\\'\n  /**\n   * Separator for parsing path strings.\n   */\n  splitSep: RegExp = eitherSep\n\n  /**\n   * Do not create new Path objects directly.  They should always be accessed\n   * via the PathScurry class or other methods on the Path class.\n   *\n   * @internal\n   */\n  constructor(\n    name: string,\n    type: number = UNKNOWN,\n    root: PathBase | undefined,\n    roots: { [k: string]: PathBase },\n    nocase: boolean,\n    children: ChildrenCache,\n    opts: PathOpts,\n  ) {\n    super(name, type, root, roots, nocase, children, opts)\n  }\n\n  /**\n   * @internal\n   */\n  newChild(name: string, type: number = UNKNOWN, opts: PathOpts = {}) {\n    return new PathWin32(\n      name,\n      type,\n      this.root,\n      this.roots,\n      this.nocase,\n      this.childrenCache(),\n      opts,\n    )\n  }\n\n  /**\n   * @internal\n   */\n  getRootString(path: string): string {\n    return win32.parse(path).root\n  }\n\n  /**\n   * @internal\n   */\n  getRoot(rootPath: string): PathBase {\n    rootPath = uncToDrive(rootPath.toUpperCase())\n    if (rootPath === this.root.name) {\n      return this.root\n    }\n    // ok, not that one, check if it matches another we know about\n    for (const [compare, root] of Object.entries(this.roots)) {\n      if (this.sameRoot(rootPath, compare)) {\n        return (this.roots[rootPath] = root)\n      }\n    }\n    // otherwise, have to create a new one.\n    return (this.roots[rootPath] = new PathScurryWin32(\n      rootPath,\n      this,\n    ).root)\n  }\n\n  /**\n   * @internal\n   */\n  sameRoot(rootPath: string, compare: string = this.root.name): boolean {\n    // windows can (rarely) have case-sensitive filesystem, but\n    // UNC and drive letters are always case-insensitive, and canonically\n    // represented uppercase.\n    rootPath = rootPath\n      .toUpperCase()\n      .replace(/\\//g, '\\\\')\n      .replace(uncDriveRegexp, '$1\\\\')\n    return rootPath === compare\n  }\n}\n\n/**\n * Path class used on all posix systems.\n *\n * Uses `'/'` as the path separator.\n */\nexport class PathPosix extends PathBase {\n  /**\n   * separator for parsing path strings\n   */\n  splitSep: '/' = '/'\n  /**\n   * separator for generating path strings\n   */\n  sep: '/' = '/'\n\n  /**\n   * Do not create new Path objects directly.  They should always be accessed\n   * via the PathScurry class or other methods on the Path class.\n   *\n   * @internal\n   */\n  constructor(\n    name: string,\n    type: number = UNKNOWN,\n    root: PathBase | undefined,\n    roots: { [k: string]: PathBase },\n    nocase: boolean,\n    children: ChildrenCache,\n    opts: PathOpts,\n  ) {\n    super(name, type, root, roots, nocase, children, opts)\n  }\n\n  /**\n   * @internal\n   */\n  getRootString(path: string): string {\n    return path.startsWith('/') ? '/' : ''\n  }\n\n  /**\n   * @internal\n   */\n  getRoot(_rootPath: string): PathBase {\n    return this.root\n  }\n\n  /**\n   * @internal\n   */\n  newChild(name: string, type: number = UNKNOWN, opts: PathOpts = {}) {\n    return new PathPosix(\n      name,\n      type,\n      this.root,\n      this.roots,\n      this.nocase,\n      this.childrenCache(),\n      opts,\n    )\n  }\n}\n\n/**\n * Options that may be provided to the PathScurry constructor\n */\nexport interface PathScurryOpts {\n  /**\n   * perform case-insensitive path matching. Default based on platform\n   * subclass.\n   */\n  nocase?: boolean\n  /**\n   * Number of Path entries to keep in the cache of Path child references.\n   *\n   * Setting this higher than 65536 will dramatically increase the data\n   * consumption and construction time overhead of each PathScurry.\n   *\n   * Setting this value to 256 or lower will significantly reduce the data\n   * consumption and construction time overhead, but may also reduce resolve()\n   * and readdir() performance on large filesystems.\n   *\n   * Default `16384`.\n   */\n  childrenCacheSize?: number\n  /**\n   * An object that overrides the built-in functions from the fs and\n   * fs/promises modules.\n   *\n   * See {@link FSOption}\n   */\n  fs?: FSOption\n}\n\n/**\n * The base class for all PathScurry classes, providing the interface for path\n * resolution and filesystem operations.\n *\n * Typically, you should *not* instantiate this class directly, but rather one\n * of the platform-specific classes, or the exported {@link PathScurry} which\n * defaults to the current platform.\n */\nexport abstract class PathScurryBase {\n  /**\n   * The root Path entry for the current working directory of this Scurry\n   */\n  root: PathBase\n  /**\n   * The string path for the root of this Scurry's current working directory\n   */\n  rootPath: string\n  /**\n   * A collection of all roots encountered, referenced by rootPath\n   */\n  roots: { [k: string]: PathBase }\n  /**\n   * The Path entry corresponding to this PathScurry's current working directory.\n   */\n  cwd: PathBase\n  #resolveCache: ResolveCache\n  #resolvePosixCache: ResolveCache\n  #children: ChildrenCache\n  /**\n   * Perform path comparisons case-insensitively.\n   *\n   * Defaults true on Darwin and Windows systems, false elsewhere.\n   */\n  nocase: boolean\n\n  /**\n   * The path separator used for parsing paths\n   *\n   * `'/'` on Posix systems, either `'/'` or `'\\\\'` on Windows\n   */\n  abstract sep: string | RegExp\n\n  #fs: FSValue\n\n  /**\n   * This class should not be instantiated directly.\n   *\n   * Use PathScurryWin32, PathScurryDarwin, PathScurryPosix, or PathScurry\n   *\n   * @internal\n   */\n  constructor(\n    cwd: URL | string = process.cwd(),\n    pathImpl: typeof win32 | typeof posix,\n    sep: string | RegExp,\n    {\n      nocase,\n      childrenCacheSize = 16 * 1024,\n      fs = defaultFS,\n    }: PathScurryOpts = {},\n  ) {\n    this.#fs = fsFromOption(fs)\n    if (cwd instanceof URL || cwd.startsWith('file://')) {\n      cwd = fileURLToPath(cwd)\n    }\n    // resolve and split root, and then add to the store.\n    // this is the only time we call path.resolve()\n    const cwdPath = pathImpl.resolve(cwd)\n    this.roots = Object.create(null)\n    this.rootPath = this.parseRootPath(cwdPath)\n    this.#resolveCache = new ResolveCache()\n    this.#resolvePosixCache = new ResolveCache()\n    this.#children = new ChildrenCache(childrenCacheSize)\n\n    const split = cwdPath.substring(this.rootPath.length).split(sep)\n    // resolve('/') leaves '', splits to [''], we don't want that.\n    if (split.length === 1 && !split[0]) {\n      split.pop()\n    }\n    /* c8 ignore start */\n    if (nocase === undefined) {\n      throw new TypeError(\n        'must provide nocase setting to PathScurryBase ctor',\n      )\n    }\n    /* c8 ignore stop */\n    this.nocase = nocase\n    this.root = this.newRoot(this.#fs)\n    this.roots[this.rootPath] = this.root\n    let prev: PathBase = this.root\n    let len = split.length - 1\n    const joinSep = pathImpl.sep\n    let abs = this.rootPath\n    let sawFirst = false\n    for (const part of split) {\n      const l = len--\n      prev = prev.child(part, {\n        relative: new Array(l).fill('..').join(joinSep),\n        relativePosix: new Array(l).fill('..').join('/'),\n        fullpath: (abs += (sawFirst ? '' : joinSep) + part),\n      })\n      sawFirst = true\n    }\n    this.cwd = prev\n  }\n\n  /**\n   * Get the depth of a provided path, string, or the cwd\n   */\n  depth(path: Path | string = this.cwd): number {\n    if (typeof path === 'string') {\n      path = this.cwd.resolve(path)\n    }\n    return path.depth()\n  }\n\n  /**\n   * Parse the root portion of a path string\n   *\n   * @internal\n   */\n  abstract parseRootPath(dir: string): string\n  /**\n   * create a new Path to use as root during construction.\n   *\n   * @internal\n   */\n  abstract newRoot(fs: FSValue): PathBase\n  /**\n   * Determine whether a given path string is absolute\n   */\n  abstract isAbsolute(p: string): boolean\n\n  /**\n   * Return the cache of child entries.  Exposed so subclasses can create\n   * child Path objects in a platform-specific way.\n   *\n   * @internal\n   */\n  childrenCache() {\n    return this.#children\n  }\n\n  /**\n   * Resolve one or more path strings to a resolved string\n   *\n   * Same interface as require('path').resolve.\n   *\n   * Much faster than path.resolve() when called multiple times for the same\n   * path, because the resolved Path objects are cached.  Much slower\n   * otherwise.\n   */\n  resolve(...paths: string[]): string {\n    // first figure out the minimum number of paths we have to test\n    // we always start at cwd, but any absolutes will bump the start\n    let r = ''\n    for (let i = paths.length - 1; i >= 0; i--) {\n      const p = paths[i]\n      if (!p || p === '.') continue\n      r = r ? `${p}/${r}` : p\n      if (this.isAbsolute(p)) {\n        break\n      }\n    }\n    const cached = this.#resolveCache.get(r)\n    if (cached !== undefined) {\n      return cached\n    }\n    const result = this.cwd.resolve(r).fullpath()\n    this.#resolveCache.set(r, result)\n    return result\n  }\n\n  /**\n   * Resolve one or more path strings to a resolved string, returning\n   * the posix path.  Identical to .resolve() on posix systems, but on\n   * windows will return a forward-slash separated UNC path.\n   *\n   * Same interface as require('path').resolve.\n   *\n   * Much faster than path.resolve() when called multiple times for the same\n   * path, because the resolved Path objects are cached.  Much slower\n   * otherwise.\n   */\n  resolvePosix(...paths: string[]): string {\n    // first figure out the minimum number of paths we have to test\n    // we always start at cwd, but any absolutes will bump the start\n    let r = ''\n    for (let i = paths.length - 1; i >= 0; i--) {\n      const p = paths[i]\n      if (!p || p === '.') continue\n      r = r ? `${p}/${r}` : p\n      if (this.isAbsolute(p)) {\n        break\n      }\n    }\n    const cached = this.#resolvePosixCache.get(r)\n    if (cached !== undefined) {\n      return cached\n    }\n    const result = this.cwd.resolve(r).fullpathPosix()\n    this.#resolvePosixCache.set(r, result)\n    return result\n  }\n\n  /**\n   * find the relative path from the cwd to the supplied path string or entry\n   */\n  relative(entry: PathBase | string = this.cwd): string {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    }\n    return entry.relative()\n  }\n\n  /**\n   * find the relative path from the cwd to the supplied path string or\n   * entry, using / as the path delimiter, even on Windows.\n   */\n  relativePosix(entry: PathBase | string = this.cwd): string {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    }\n    return entry.relativePosix()\n  }\n\n  /**\n   * Return the basename for the provided string or Path object\n   */\n  basename(entry: PathBase | string = this.cwd): string {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    }\n    return entry.name\n  }\n\n  /**\n   * Return the dirname for the provided string or Path object\n   */\n  dirname(entry: PathBase | string = this.cwd): string {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    }\n    return (entry.parent || entry).fullpath()\n  }\n\n  /**\n   * Return an array of known child entries.\n   *\n   * First argument may be either a string, or a Path object.\n   *\n   * If the Path cannot or does not contain any children, then an empty array\n   * is returned.\n   *\n   * Results are cached, and thus may be out of date if the filesystem is\n   * mutated.\n   *\n   * Unlike `fs.readdir()`, the `withFileTypes` option defaults to `true`. Set\n   * `{ withFileTypes: false }` to return strings.\n   */\n\n  readdir(): Promise\n  readdir(opts: { withFileTypes: true }): Promise\n  readdir(opts: { withFileTypes: false }): Promise\n  readdir(opts: { withFileTypes: boolean }): Promise\n  readdir(entry: PathBase | string): Promise\n  readdir(\n    entry: PathBase | string,\n    opts: { withFileTypes: true },\n  ): Promise\n  readdir(\n    entry: PathBase | string,\n    opts: { withFileTypes: false },\n  ): Promise\n  readdir(\n    entry: PathBase | string,\n    opts: { withFileTypes: boolean },\n  ): Promise\n  async readdir(\n    entry: PathBase | string | { withFileTypes: boolean } = this.cwd,\n    opts: { withFileTypes: boolean } = {\n      withFileTypes: true,\n    },\n  ): Promise {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    } else if (!(entry instanceof PathBase)) {\n      opts = entry\n      entry = this.cwd\n    }\n    const { withFileTypes } = opts\n    if (!entry.canReaddir()) {\n      return []\n    } else {\n      const p = await entry.readdir()\n      return withFileTypes ? p : p.map(e => e.name)\n    }\n  }\n\n  /**\n   * synchronous {@link PathScurryBase.readdir}\n   */\n  readdirSync(): PathBase[]\n  readdirSync(opts: { withFileTypes: true }): PathBase[]\n  readdirSync(opts: { withFileTypes: false }): string[]\n  readdirSync(opts: { withFileTypes: boolean }): PathBase[] | string[]\n  readdirSync(entry: PathBase | string): PathBase[]\n  readdirSync(\n    entry: PathBase | string,\n    opts: { withFileTypes: true },\n  ): PathBase[]\n  readdirSync(\n    entry: PathBase | string,\n    opts: { withFileTypes: false },\n  ): string[]\n  readdirSync(\n    entry: PathBase | string,\n    opts: { withFileTypes: boolean },\n  ): PathBase[] | string[]\n  readdirSync(\n    entry: PathBase | string | { withFileTypes: boolean } = this.cwd,\n    opts: { withFileTypes: boolean } = {\n      withFileTypes: true,\n    },\n  ): PathBase[] | string[] {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    } else if (!(entry instanceof PathBase)) {\n      opts = entry\n      entry = this.cwd\n    }\n    const { withFileTypes = true } = opts\n    if (!entry.canReaddir()) {\n      return []\n    } else if (withFileTypes) {\n      return entry.readdirSync()\n    } else {\n      return entry.readdirSync().map(e => e.name)\n    }\n  }\n\n  /**\n   * Call lstat() on the string or Path object, and update all known\n   * information that can be determined.\n   *\n   * Note that unlike `fs.lstat()`, the returned value does not contain some\n   * information, such as `mode`, `dev`, `nlink`, and `ino`.  If that\n   * information is required, you will need to call `fs.lstat` yourself.\n   *\n   * If the Path refers to a nonexistent file, or if the lstat call fails for\n   * any reason, `undefined` is returned.  Otherwise the updated Path object is\n   * returned.\n   *\n   * Results are cached, and thus may be out of date if the filesystem is\n   * mutated.\n   */\n  async lstat(\n    entry: string | PathBase = this.cwd,\n  ): Promise {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    }\n    return entry.lstat()\n  }\n\n  /**\n   * synchronous {@link PathScurryBase.lstat}\n   */\n  lstatSync(entry: string | PathBase = this.cwd): PathBase | undefined {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    }\n    return entry.lstatSync()\n  }\n\n  /**\n   * Return the Path object or string path corresponding to the target of a\n   * symbolic link.\n   *\n   * If the path is not a symbolic link, or if the readlink call fails for any\n   * reason, `undefined` is returned.\n   *\n   * Result is cached, and thus may be outdated if the filesystem is mutated.\n   *\n   * `{withFileTypes}` option defaults to `false`.\n   *\n   * On success, returns a Path object if `withFileTypes` option is true,\n   * otherwise a string.\n   */\n  readlink(): Promise\n  readlink(opt: { withFileTypes: false }): Promise\n  readlink(opt: { withFileTypes: true }): Promise\n  readlink(opt: {\n    withFileTypes: boolean\n  }): Promise\n  readlink(\n    entry: string | PathBase,\n    opt?: { withFileTypes: false },\n  ): Promise\n  readlink(\n    entry: string | PathBase,\n    opt: { withFileTypes: true },\n  ): Promise\n  readlink(\n    entry: string | PathBase,\n    opt: { withFileTypes: boolean },\n  ): Promise\n  async readlink(\n    entry: string | PathBase | { withFileTypes: boolean } = this.cwd,\n    { withFileTypes }: { withFileTypes: boolean } = {\n      withFileTypes: false,\n    },\n  ): Promise {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    } else if (!(entry instanceof PathBase)) {\n      withFileTypes = entry.withFileTypes\n      entry = this.cwd\n    }\n    const e = await entry.readlink()\n    return withFileTypes ? e : e?.fullpath()\n  }\n\n  /**\n   * synchronous {@link PathScurryBase.readlink}\n   */\n  readlinkSync(): string | undefined\n  readlinkSync(opt: { withFileTypes: false }): string | undefined\n  readlinkSync(opt: { withFileTypes: true }): PathBase | undefined\n  readlinkSync(opt: {\n    withFileTypes: boolean\n  }): PathBase | string | undefined\n  readlinkSync(\n    entry: string | PathBase,\n    opt?: { withFileTypes: false },\n  ): string | undefined\n  readlinkSync(\n    entry: string | PathBase,\n    opt: { withFileTypes: true },\n  ): PathBase | undefined\n  readlinkSync(\n    entry: string | PathBase,\n    opt: { withFileTypes: boolean },\n  ): string | PathBase | undefined\n  readlinkSync(\n    entry: string | PathBase | { withFileTypes: boolean } = this.cwd,\n    { withFileTypes }: { withFileTypes: boolean } = {\n      withFileTypes: false,\n    },\n  ): string | PathBase | undefined {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    } else if (!(entry instanceof PathBase)) {\n      withFileTypes = entry.withFileTypes\n      entry = this.cwd\n    }\n    const e = entry.readlinkSync()\n    return withFileTypes ? e : e?.fullpath()\n  }\n\n  /**\n   * Return the Path object or string path corresponding to path as resolved\n   * by realpath(3).\n   *\n   * If the realpath call fails for any reason, `undefined` is returned.\n   *\n   * Result is cached, and thus may be outdated if the filesystem is mutated.\n   *\n   * `{withFileTypes}` option defaults to `false`.\n   *\n   * On success, returns a Path object if `withFileTypes` option is true,\n   * otherwise a string.\n   */\n  realpath(): Promise\n  realpath(opt: { withFileTypes: false }): Promise\n  realpath(opt: { withFileTypes: true }): Promise\n  realpath(opt: {\n    withFileTypes: boolean\n  }): Promise\n  realpath(\n    entry: string | PathBase,\n    opt?: { withFileTypes: false },\n  ): Promise\n  realpath(\n    entry: string | PathBase,\n    opt: { withFileTypes: true },\n  ): Promise\n  realpath(\n    entry: string | PathBase,\n    opt: { withFileTypes: boolean },\n  ): Promise\n  async realpath(\n    entry: string | PathBase | { withFileTypes: boolean } = this.cwd,\n    { withFileTypes }: { withFileTypes: boolean } = {\n      withFileTypes: false,\n    },\n  ): Promise {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    } else if (!(entry instanceof PathBase)) {\n      withFileTypes = entry.withFileTypes\n      entry = this.cwd\n    }\n    const e = await entry.realpath()\n    return withFileTypes ? e : e?.fullpath()\n  }\n\n  realpathSync(): string | undefined\n  realpathSync(opt: { withFileTypes: false }): string | undefined\n  realpathSync(opt: { withFileTypes: true }): PathBase | undefined\n  realpathSync(opt: {\n    withFileTypes: boolean\n  }): PathBase | string | undefined\n  realpathSync(\n    entry: string | PathBase,\n    opt?: { withFileTypes: false },\n  ): string | undefined\n  realpathSync(\n    entry: string | PathBase,\n    opt: { withFileTypes: true },\n  ): PathBase | undefined\n  realpathSync(\n    entry: string | PathBase,\n    opt: { withFileTypes: boolean },\n  ): string | PathBase | undefined\n  realpathSync(\n    entry: string | PathBase | { withFileTypes: boolean } = this.cwd,\n    { withFileTypes }: { withFileTypes: boolean } = {\n      withFileTypes: false,\n    },\n  ): string | PathBase | undefined {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    } else if (!(entry instanceof PathBase)) {\n      withFileTypes = entry.withFileTypes\n      entry = this.cwd\n    }\n    const e = entry.realpathSync()\n    return withFileTypes ? e : e?.fullpath()\n  }\n\n  /**\n   * Asynchronously walk the directory tree, returning an array of\n   * all path strings or Path objects found.\n   *\n   * Note that this will be extremely memory-hungry on large filesystems.\n   * In such cases, it may be better to use the stream or async iterator\n   * walk implementation.\n   */\n  walk(): Promise\n  walk(\n    opts: WalkOptionsWithFileTypesTrue | WalkOptionsWithFileTypesUnset,\n  ): Promise\n  walk(opts: WalkOptionsWithFileTypesFalse): Promise\n  walk(opts: WalkOptions): Promise\n  walk(entry: string | PathBase): Promise\n  walk(\n    entry: string | PathBase,\n    opts: WalkOptionsWithFileTypesTrue | WalkOptionsWithFileTypesUnset,\n  ): Promise\n  walk(\n    entry: string | PathBase,\n    opts: WalkOptionsWithFileTypesFalse,\n  ): Promise\n  walk(\n    entry: string | PathBase,\n    opts: WalkOptions,\n  ): Promise\n  async walk(\n    entry: string | PathBase | WalkOptions = this.cwd,\n    opts: WalkOptions = {},\n  ): Promise {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    } else if (!(entry instanceof PathBase)) {\n      opts = entry\n      entry = this.cwd\n    }\n    const {\n      withFileTypes = true,\n      follow = false,\n      filter,\n      walkFilter,\n    } = opts\n    const results: (string | PathBase)[] = []\n    if (!filter || filter(entry)) {\n      results.push(withFileTypes ? entry : entry.fullpath())\n    }\n    const dirs = new Set()\n    const walk = (\n      dir: PathBase,\n      cb: (er?: NodeJS.ErrnoException) => void,\n    ) => {\n      dirs.add(dir)\n      dir.readdirCB((er, entries) => {\n        /* c8 ignore start */\n        if (er) {\n          return cb(er)\n        }\n        /* c8 ignore stop */\n        let len = entries.length\n        if (!len) return cb()\n        const next = () => {\n          if (--len === 0) {\n            cb()\n          }\n        }\n        for (const e of entries) {\n          if (!filter || filter(e)) {\n            results.push(withFileTypes ? e : e.fullpath())\n          }\n          if (follow && e.isSymbolicLink()) {\n            e.realpath()\n              .then(r => (r?.isUnknown() ? r.lstat() : r))\n              .then(r =>\n                r?.shouldWalk(dirs, walkFilter) ? walk(r, next) : next(),\n              )\n          } else {\n            if (e.shouldWalk(dirs, walkFilter)) {\n              walk(e, next)\n            } else {\n              next()\n            }\n          }\n        }\n      }, true) // zalgooooooo\n    }\n\n    const start = entry\n    return new Promise((res, rej) => {\n      walk(start, er => {\n        /* c8 ignore start */\n        if (er) return rej(er)\n        /* c8 ignore stop */\n        res(results as PathBase[] | string[])\n      })\n    })\n  }\n\n  /**\n   * Synchronously walk the directory tree, returning an array of\n   * all path strings or Path objects found.\n   *\n   * Note that this will be extremely memory-hungry on large filesystems.\n   * In such cases, it may be better to use the stream or async iterator\n   * walk implementation.\n   */\n  walkSync(): PathBase[]\n  walkSync(\n    opts: WalkOptionsWithFileTypesTrue | WalkOptionsWithFileTypesUnset,\n  ): PathBase[]\n  walkSync(opts: WalkOptionsWithFileTypesFalse): string[]\n  walkSync(opts: WalkOptions): string[] | PathBase[]\n  walkSync(entry: string | PathBase): PathBase[]\n  walkSync(\n    entry: string | PathBase,\n    opts: WalkOptionsWithFileTypesUnset | WalkOptionsWithFileTypesTrue,\n  ): PathBase[]\n  walkSync(\n    entry: string | PathBase,\n    opts: WalkOptionsWithFileTypesFalse,\n  ): string[]\n  walkSync(\n    entry: string | PathBase,\n    opts: WalkOptions,\n  ): PathBase[] | string[]\n  walkSync(\n    entry: string | PathBase | WalkOptions = this.cwd,\n    opts: WalkOptions = {},\n  ): PathBase[] | string[] {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    } else if (!(entry instanceof PathBase)) {\n      opts = entry\n      entry = this.cwd\n    }\n    const {\n      withFileTypes = true,\n      follow = false,\n      filter,\n      walkFilter,\n    } = opts\n    const results: (string | PathBase)[] = []\n    if (!filter || filter(entry)) {\n      results.push(withFileTypes ? entry : entry.fullpath())\n    }\n    const dirs = new Set([entry])\n    for (const dir of dirs) {\n      const entries = dir.readdirSync()\n      for (const e of entries) {\n        if (!filter || filter(e)) {\n          results.push(withFileTypes ? e : e.fullpath())\n        }\n        let r: PathBase | undefined = e\n        if (e.isSymbolicLink()) {\n          if (!(follow && (r = e.realpathSync()))) continue\n          if (r.isUnknown()) r.lstatSync()\n        }\n        if (r.shouldWalk(dirs, walkFilter)) {\n          dirs.add(r)\n        }\n      }\n    }\n    return results as string[] | PathBase[]\n  }\n\n  /**\n   * Support for `for await`\n   *\n   * Alias for {@link PathScurryBase.iterate}\n   *\n   * Note: As of Node 19, this is very slow, compared to other methods of\n   * walking.  Consider using {@link PathScurryBase.stream} if memory overhead\n   * and backpressure are concerns, or {@link PathScurryBase.walk} if not.\n   */\n  [Symbol.asyncIterator]() {\n    return this.iterate()\n  }\n\n  /**\n   * Async generator form of {@link PathScurryBase.walk}\n   *\n   * Note: As of Node 19, this is very slow, compared to other methods of\n   * walking, especially if most/all of the directory tree has been previously\n   * walked.  Consider using {@link PathScurryBase.stream} if memory overhead\n   * and backpressure are concerns, or {@link PathScurryBase.walk} if not.\n   */\n  iterate(): AsyncGenerator\n  iterate(\n    opts: WalkOptionsWithFileTypesTrue | WalkOptionsWithFileTypesUnset,\n  ): AsyncGenerator\n  iterate(\n    opts: WalkOptionsWithFileTypesFalse,\n  ): AsyncGenerator\n  iterate(opts: WalkOptions): AsyncGenerator\n  iterate(entry: string | PathBase): AsyncGenerator\n  iterate(\n    entry: string | PathBase,\n    opts: WalkOptionsWithFileTypesTrue | WalkOptionsWithFileTypesUnset,\n  ): AsyncGenerator\n  iterate(\n    entry: string | PathBase,\n    opts: WalkOptionsWithFileTypesFalse,\n  ): AsyncGenerator\n  iterate(\n    entry: string | PathBase,\n    opts: WalkOptions,\n  ): AsyncGenerator\n  iterate(\n    entry: string | PathBase | WalkOptions = this.cwd,\n    options: WalkOptions = {},\n  ): AsyncGenerator {\n    // iterating async over the stream is significantly more performant,\n    // especially in the warm-cache scenario, because it buffers up directory\n    // entries in the background instead of waiting for a yield for each one.\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    } else if (!(entry instanceof PathBase)) {\n      options = entry\n      entry = this.cwd\n    }\n    return this.stream(entry, options)[Symbol.asyncIterator]()\n  }\n\n  /**\n   * Iterating over a PathScurry performs a synchronous walk.\n   *\n   * Alias for {@link PathScurryBase.iterateSync}\n   */\n  [Symbol.iterator]() {\n    return this.iterateSync()\n  }\n\n  iterateSync(): Generator\n  iterateSync(\n    opts: WalkOptionsWithFileTypesTrue | WalkOptionsWithFileTypesUnset,\n  ): Generator\n  iterateSync(\n    opts: WalkOptionsWithFileTypesFalse,\n  ): Generator\n  iterateSync(opts: WalkOptions): Generator\n  iterateSync(entry: string | PathBase): Generator\n  iterateSync(\n    entry: string | PathBase,\n    opts: WalkOptionsWithFileTypesTrue | WalkOptionsWithFileTypesUnset,\n  ): Generator\n  iterateSync(\n    entry: string | PathBase,\n    opts: WalkOptionsWithFileTypesFalse,\n  ): Generator\n  iterateSync(\n    entry: string | PathBase,\n    opts: WalkOptions,\n  ): Generator\n  *iterateSync(\n    entry: string | PathBase | WalkOptions = this.cwd,\n    opts: WalkOptions = {},\n  ): Generator {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    } else if (!(entry instanceof PathBase)) {\n      opts = entry\n      entry = this.cwd\n    }\n    const {\n      withFileTypes = true,\n      follow = false,\n      filter,\n      walkFilter,\n    } = opts\n    if (!filter || filter(entry)) {\n      yield withFileTypes ? entry : entry.fullpath()\n    }\n    const dirs = new Set([entry])\n    for (const dir of dirs) {\n      const entries = dir.readdirSync()\n      for (const e of entries) {\n        if (!filter || filter(e)) {\n          yield withFileTypes ? e : e.fullpath()\n        }\n        let r: PathBase | undefined = e\n        if (e.isSymbolicLink()) {\n          if (!(follow && (r = e.realpathSync()))) continue\n          if (r.isUnknown()) r.lstatSync()\n        }\n        if (r.shouldWalk(dirs, walkFilter)) {\n          dirs.add(r)\n        }\n      }\n    }\n  }\n\n  /**\n   * Stream form of {@link PathScurryBase.walk}\n   *\n   * Returns a Minipass stream that emits {@link PathBase} objects by default,\n   * or strings if `{ withFileTypes: false }` is set in the options.\n   */\n  stream(): Minipass\n  stream(\n    opts: WalkOptionsWithFileTypesTrue | WalkOptionsWithFileTypesUnset,\n  ): Minipass\n  stream(opts: WalkOptionsWithFileTypesFalse): Minipass\n  stream(opts: WalkOptions): Minipass\n  stream(entry: string | PathBase): Minipass\n  stream(\n    entry: string | PathBase,\n    opts: WalkOptionsWithFileTypesUnset | WalkOptionsWithFileTypesTrue,\n  ): Minipass\n  stream(\n    entry: string | PathBase,\n    opts: WalkOptionsWithFileTypesFalse,\n  ): Minipass\n  stream(\n    entry: string | PathBase,\n    opts: WalkOptions,\n  ): Minipass | Minipass\n  stream(\n    entry: string | PathBase | WalkOptions = this.cwd,\n    opts: WalkOptions = {},\n  ): Minipass | Minipass {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    } else if (!(entry instanceof PathBase)) {\n      opts = entry\n      entry = this.cwd\n    }\n    const {\n      withFileTypes = true,\n      follow = false,\n      filter,\n      walkFilter,\n    } = opts\n    const results = new Minipass({ objectMode: true })\n    if (!filter || filter(entry)) {\n      results.write(withFileTypes ? entry : entry.fullpath())\n    }\n    const dirs = new Set()\n    const queue: PathBase[] = [entry]\n    let processing = 0\n    const process = () => {\n      let paused = false\n      while (!paused) {\n        const dir = queue.shift()\n        if (!dir) {\n          if (processing === 0) results.end()\n          return\n        }\n\n        processing++\n        dirs.add(dir)\n\n        const onReaddir = (\n          er: null | NodeJS.ErrnoException,\n          entries: PathBase[],\n          didRealpaths: boolean = false,\n        ) => {\n          /* c8 ignore start */\n          if (er) return results.emit('error', er)\n          /* c8 ignore stop */\n          if (follow && !didRealpaths) {\n            const promises: Promise[] = []\n            for (const e of entries) {\n              if (e.isSymbolicLink()) {\n                promises.push(\n                  e\n                    .realpath()\n                    .then((r: PathBase | undefined) =>\n                      r?.isUnknown() ? r.lstat() : r,\n                    ),\n                )\n              }\n            }\n            if (promises.length) {\n              Promise.all(promises).then(() =>\n                onReaddir(null, entries, true),\n              )\n              return\n            }\n          }\n\n          for (const e of entries) {\n            if (e && (!filter || filter(e))) {\n              if (!results.write(withFileTypes ? e : e.fullpath())) {\n                paused = true\n              }\n            }\n          }\n\n          processing--\n          for (const e of entries) {\n            const r = e.realpathCached() || e\n            if (r.shouldWalk(dirs, walkFilter)) {\n              queue.push(r)\n            }\n          }\n          if (paused && !results.flowing) {\n            results.once('drain', process)\n          } else if (!sync) {\n            process()\n          }\n        }\n\n        // zalgo containment\n        let sync = true\n        dir.readdirCB(onReaddir, true)\n        sync = false\n      }\n    }\n    process()\n    return results as Minipass | Minipass\n  }\n\n  /**\n   * Synchronous form of {@link PathScurryBase.stream}\n   *\n   * Returns a Minipass stream that emits {@link PathBase} objects by default,\n   * or strings if `{ withFileTypes: false }` is set in the options.\n   *\n   * Will complete the walk in a single tick if the stream is consumed fully.\n   * Otherwise, will pause as needed for stream backpressure.\n   */\n  streamSync(): Minipass\n  streamSync(\n    opts: WalkOptionsWithFileTypesTrue | WalkOptionsWithFileTypesUnset,\n  ): Minipass\n  streamSync(opts: WalkOptionsWithFileTypesFalse): Minipass\n  streamSync(opts: WalkOptions): Minipass\n  streamSync(entry: string | PathBase): Minipass\n  streamSync(\n    entry: string | PathBase,\n    opts: WalkOptionsWithFileTypesUnset | WalkOptionsWithFileTypesTrue,\n  ): Minipass\n  streamSync(\n    entry: string | PathBase,\n    opts: WalkOptionsWithFileTypesFalse,\n  ): Minipass\n  streamSync(\n    entry: string | PathBase,\n    opts: WalkOptions,\n  ): Minipass | Minipass\n  streamSync(\n    entry: string | PathBase | WalkOptions = this.cwd,\n    opts: WalkOptions = {},\n  ): Minipass | Minipass {\n    if (typeof entry === 'string') {\n      entry = this.cwd.resolve(entry)\n    } else if (!(entry instanceof PathBase)) {\n      opts = entry\n      entry = this.cwd\n    }\n    const {\n      withFileTypes = true,\n      follow = false,\n      filter,\n      walkFilter,\n    } = opts\n    const results = new Minipass({ objectMode: true })\n    const dirs = new Set()\n    if (!filter || filter(entry)) {\n      results.write(withFileTypes ? entry : entry.fullpath())\n    }\n    const queue: PathBase[] = [entry]\n    let processing = 0\n    const process = () => {\n      let paused = false\n      while (!paused) {\n        const dir = queue.shift()\n        if (!dir) {\n          if (processing === 0) results.end()\n          return\n        }\n        processing++\n        dirs.add(dir)\n\n        const entries = dir.readdirSync()\n        for (const e of entries) {\n          if (!filter || filter(e)) {\n            if (!results.write(withFileTypes ? e : e.fullpath())) {\n              paused = true\n            }\n          }\n        }\n        processing--\n        for (const e of entries) {\n          let r: PathBase | undefined = e\n          if (e.isSymbolicLink()) {\n            if (!(follow && (r = e.realpathSync()))) continue\n            if (r.isUnknown()) r.lstatSync()\n          }\n          if (r.shouldWalk(dirs, walkFilter)) {\n            queue.push(r)\n          }\n        }\n      }\n      if (paused && !results.flowing) results.once('drain', process)\n    }\n    process()\n    return results as Minipass | Minipass\n  }\n\n  chdir(path: string | Path = this.cwd) {\n    const oldCwd = this.cwd\n    this.cwd = typeof path === 'string' ? this.cwd.resolve(path) : path\n    this.cwd[setAsCwd](oldCwd)\n  }\n}\n\n/**\n * Options provided to all walk methods.\n */\nexport interface WalkOptions {\n  /**\n   * Return results as {@link PathBase} objects rather than strings.\n   * When set to false, results are fully resolved paths, as returned by\n   * {@link PathBase.fullpath}.\n   * @default true\n   */\n  withFileTypes?: boolean\n\n  /**\n   *  Attempt to read directory entries from symbolic links. Otherwise, only\n   *  actual directories are traversed. Regardless of this setting, a given\n   *  target path will only ever be walked once, meaning that a symbolic link\n   *  to a previously traversed directory will never be followed.\n   *\n   *  Setting this imposes a slight performance penalty, because `readlink`\n   *  must be called on all symbolic links encountered, in order to avoid\n   *  infinite cycles.\n   * @default false\n   */\n  follow?: boolean\n\n  /**\n   * Only return entries where the provided function returns true.\n   *\n   * This will not prevent directories from being traversed, even if they do\n   * not pass the filter, though it will prevent directories themselves from\n   * being included in the result set.  See {@link walkFilter}\n   *\n   * Asynchronous functions are not supported here.\n   *\n   * By default, if no filter is provided, all entries and traversed\n   * directories are included.\n   */\n  filter?: (entry: PathBase) => boolean\n\n  /**\n   * Only traverse directories (and in the case of {@link follow} being set to\n   * true, symbolic links to directories) if the provided function returns\n   * true.\n   *\n   * This will not prevent directories from being included in the result set,\n   * even if they do not pass the supplied filter function.  See {@link filter}\n   * to do that.\n   *\n   * Asynchronous functions are not supported here.\n   */\n  walkFilter?: (entry: PathBase) => boolean\n}\n\nexport type WalkOptionsWithFileTypesUnset = WalkOptions & {\n  withFileTypes?: undefined\n}\nexport type WalkOptionsWithFileTypesTrue = WalkOptions & {\n  withFileTypes: true\n}\nexport type WalkOptionsWithFileTypesFalse = WalkOptions & {\n  withFileTypes: false\n}\n\n/**\n * Windows implementation of {@link PathScurryBase}\n *\n * Defaults to case insensitve, uses `'\\\\'` to generate path strings.  Uses\n * {@link PathWin32} for Path objects.\n */\nexport class PathScurryWin32 extends PathScurryBase {\n  /**\n   * separator for generating path strings\n   */\n  sep: '\\\\' = '\\\\'\n\n  constructor(\n    cwd: URL | string = process.cwd(),\n    opts: PathScurryOpts = {},\n  ) {\n    const { nocase = true } = opts\n    super(cwd, win32, '\\\\', { ...opts, nocase })\n    this.nocase = nocase\n    for (let p: PathBase | undefined = this.cwd; p; p = p.parent) {\n      p.nocase = this.nocase\n    }\n  }\n\n  /**\n   * @internal\n   */\n  parseRootPath(dir: string): string {\n    // if the path starts with a single separator, it's not a UNC, and we'll\n    // just get separator as the root, and driveFromUNC will return \\\n    // In that case, mount \\ on the root from the cwd.\n    return win32.parse(dir).root.toUpperCase()\n  }\n\n  /**\n   * @internal\n   */\n  newRoot(fs: FSValue) {\n    return new PathWin32(\n      this.rootPath,\n      IFDIR,\n      undefined,\n      this.roots,\n      this.nocase,\n      this.childrenCache(),\n      { fs },\n    )\n  }\n\n  /**\n   * Return true if the provided path string is an absolute path\n   */\n  isAbsolute(p: string): boolean {\n    return (\n      p.startsWith('/') || p.startsWith('\\\\') || /^[a-z]:(\\/|\\\\)/i.test(p)\n    )\n  }\n}\n\n/**\n * {@link PathScurryBase} implementation for all posix systems other than Darwin.\n *\n * Defaults to case-sensitive matching, uses `'/'` to generate path strings.\n *\n * Uses {@link PathPosix} for Path objects.\n */\nexport class PathScurryPosix extends PathScurryBase {\n  /**\n   * separator for generating path strings\n   */\n  sep: '/' = '/'\n  constructor(\n    cwd: URL | string = process.cwd(),\n    opts: PathScurryOpts = {},\n  ) {\n    const { nocase = false } = opts\n    super(cwd, posix, '/', { ...opts, nocase })\n    this.nocase = nocase\n  }\n\n  /**\n   * @internal\n   */\n  parseRootPath(_dir: string): string {\n    return '/'\n  }\n\n  /**\n   * @internal\n   */\n  newRoot(fs: FSValue) {\n    return new PathPosix(\n      this.rootPath,\n      IFDIR,\n      undefined,\n      this.roots,\n      this.nocase,\n      this.childrenCache(),\n      { fs },\n    )\n  }\n\n  /**\n   * Return true if the provided path string is an absolute path\n   */\n  isAbsolute(p: string): boolean {\n    return p.startsWith('/')\n  }\n}\n\n/**\n * {@link PathScurryBase} implementation for Darwin (macOS) systems.\n *\n * Defaults to case-insensitive matching, uses `'/'` for generating path\n * strings.\n *\n * Uses {@link PathPosix} for Path objects.\n */\nexport class PathScurryDarwin extends PathScurryPosix {\n  constructor(\n    cwd: URL | string = process.cwd(),\n    opts: PathScurryOpts = {},\n  ) {\n    const { nocase = true } = opts\n    super(cwd, { ...opts, nocase })\n  }\n}\n\n/**\n * Default {@link PathBase} implementation for the current platform.\n *\n * {@link PathWin32} on Windows systems, {@link PathPosix} on all others.\n */\nexport const Path = process.platform === 'win32' ? PathWin32 : PathPosix\nexport type Path = PathBase | InstanceType\n\n/**\n * Default {@link PathScurryBase} implementation for the current platform.\n *\n * {@link PathScurryWin32} on Windows systems, {@link PathScurryDarwin} on\n * Darwin (macOS) systems, {@link PathScurryPosix} on all others.\n */\nexport const PathScurry:\n  | typeof PathScurryWin32\n  | typeof PathScurryDarwin\n  | typeof PathScurryPosix =\n  process.platform === 'win32' ? PathScurryWin32\n  : process.platform === 'darwin' ? PathScurryDarwin\n  : PathScurryPosix\nexport type PathScurry = PathScurryBase | InstanceType\n", "const proc =\n  typeof process === 'object' && process\n    ? process\n    : {\n        stdout: null,\n        stderr: null,\n      }\nimport { EventEmitter } from 'node:events'\nimport Stream from 'node:stream'\nimport { StringDecoder } from 'node:string_decoder'\n\n/**\n * Same as StringDecoder, but exposing the `lastNeed` flag on the type\n */\ntype SD = StringDecoder & { lastNeed: boolean }\n\nexport type { SD, Pipe, PipeProxyErrors }\n\n/**\n * Return true if the argument is a Minipass stream, Node stream, or something\n * else that Minipass can interact with.\n */\nexport const isStream = (\n  s: any\n): s is Minipass.Readable | Minipass.Writable =>\n  !!s &&\n  typeof s === 'object' &&\n  (s instanceof Minipass ||\n    s instanceof Stream ||\n    isReadable(s) ||\n    isWritable(s))\n\n/**\n * Return true if the argument is a valid {@link Minipass.Readable}\n */\nexport const isReadable = (s: any): s is Minipass.Readable =>\n  !!s &&\n  typeof s === 'object' &&\n  s instanceof EventEmitter &&\n  typeof (s as Minipass.Readable).pipe === 'function' &&\n  // node core Writable streams have a pipe() method, but it throws\n  (s as Minipass.Readable).pipe !== Stream.Writable.prototype.pipe\n\n/**\n * Return true if the argument is a valid {@link Minipass.Writable}\n */\nexport const isWritable = (s: any): s is Minipass.Readable =>\n  !!s &&\n  typeof s === 'object' &&\n  s instanceof EventEmitter &&\n  typeof (s as Minipass.Writable).write === 'function' &&\n  typeof (s as Minipass.Writable).end === 'function'\n\nconst EOF = Symbol('EOF')\nconst MAYBE_EMIT_END = Symbol('maybeEmitEnd')\nconst EMITTED_END = Symbol('emittedEnd')\nconst EMITTING_END = Symbol('emittingEnd')\nconst EMITTED_ERROR = Symbol('emittedError')\nconst CLOSED = Symbol('closed')\nconst READ = Symbol('read')\nconst FLUSH = Symbol('flush')\nconst FLUSHCHUNK = Symbol('flushChunk')\nconst ENCODING = Symbol('encoding')\nconst DECODER = Symbol('decoder')\nconst FLOWING = Symbol('flowing')\nconst PAUSED = Symbol('paused')\nconst RESUME = Symbol('resume')\nconst BUFFER = Symbol('buffer')\nconst PIPES = Symbol('pipes')\nconst BUFFERLENGTH = Symbol('bufferLength')\nconst BUFFERPUSH = Symbol('bufferPush')\nconst BUFFERSHIFT = Symbol('bufferShift')\nconst OBJECTMODE = Symbol('objectMode')\n// internal event when stream is destroyed\nconst DESTROYED = Symbol('destroyed')\n// internal event when stream has an error\nconst ERROR = Symbol('error')\nconst EMITDATA = Symbol('emitData')\nconst EMITEND = Symbol('emitEnd')\nconst EMITEND2 = Symbol('emitEnd2')\nconst ASYNC = Symbol('async')\nconst ABORT = Symbol('abort')\nconst ABORTED = Symbol('aborted')\nconst SIGNAL = Symbol('signal')\nconst DATALISTENERS = Symbol('dataListeners')\nconst DISCARDED = Symbol('discarded')\n\nconst defer = (fn: (...a: any[]) => any) => Promise.resolve().then(fn)\nconst nodefer = (fn: (...a: any[]) => any) => fn()\n\n// events that mean 'the stream is over'\n// these are treated specially, and re-emitted\n// if they are listened for after emitting.\ntype EndishEvent = 'end' | 'finish' | 'prefinish'\nconst isEndish = (ev: any): ev is EndishEvent =>\n  ev === 'end' || ev === 'finish' || ev === 'prefinish'\n\nconst isArrayBufferLike = (b: any): b is ArrayBufferLike =>\n  b instanceof ArrayBuffer ||\n  (!!b &&\n    typeof b === 'object' &&\n    b.constructor &&\n    b.constructor.name === 'ArrayBuffer' &&\n    b.byteLength >= 0)\n\nconst isArrayBufferView = (b: any): b is ArrayBufferView =>\n  !Buffer.isBuffer(b) && ArrayBuffer.isView(b)\n\n/**\n * Options that may be passed to stream.pipe()\n */\nexport interface PipeOptions {\n  /**\n   * end the destination stream when the source stream ends\n   */\n  end?: boolean\n  /**\n   * proxy errors from the source stream to the destination stream\n   */\n  proxyErrors?: boolean\n}\n\n/**\n * Internal class representing a pipe to a destination stream.\n *\n * @internal\n */\nclass Pipe {\n  src: Minipass\n  dest: Minipass\n  opts: PipeOptions\n  ondrain: () => any\n  constructor(\n    src: Minipass,\n    dest: Minipass.Writable,\n    opts: PipeOptions\n  ) {\n    this.src = src\n    this.dest = dest as Minipass\n    this.opts = opts\n    this.ondrain = () => src[RESUME]()\n    this.dest.on('drain', this.ondrain)\n  }\n  unpipe() {\n    this.dest.removeListener('drain', this.ondrain)\n  }\n  // only here for the prototype\n  /* c8 ignore start */\n  proxyErrors(_er: any) {}\n  /* c8 ignore stop */\n  end() {\n    this.unpipe()\n    if (this.opts.end) this.dest.end()\n  }\n}\n\n/**\n * Internal class representing a pipe to a destination stream where\n * errors are proxied.\n *\n * @internal\n */\nclass PipeProxyErrors extends Pipe {\n  unpipe() {\n    this.src.removeListener('error', this.proxyErrors)\n    super.unpipe()\n  }\n  constructor(\n    src: Minipass,\n    dest: Minipass.Writable,\n    opts: PipeOptions\n  ) {\n    super(src, dest, opts)\n    this.proxyErrors = er => dest.emit('error', er)\n    src.on('error', this.proxyErrors)\n  }\n}\n\nexport namespace Minipass {\n  /**\n   * Encoding used to create a stream that outputs strings rather than\n   * Buffer objects.\n   */\n  export type Encoding = BufferEncoding | 'buffer' | null\n\n  /**\n   * Any stream that Minipass can pipe into\n   */\n  export type Writable =\n    | Minipass\n    | NodeJS.WriteStream\n    | (NodeJS.WriteStream & { fd: number })\n    | (EventEmitter & {\n        end(): any\n        write(chunk: any, ...args: any[]): any\n      })\n\n  /**\n   * Any stream that can be read from\n   */\n  export type Readable =\n    | Minipass\n    | NodeJS.ReadStream\n    | (NodeJS.ReadStream & { fd: number })\n    | (EventEmitter & {\n        pause(): any\n        resume(): any\n        pipe(...destArgs: any[]): any\n      })\n\n  /**\n   * Utility type that can be iterated sync or async\n   */\n  export type DualIterable = Iterable & AsyncIterable\n\n  type EventArguments = Record\n\n  /**\n   * The listing of events that a Minipass class can emit.\n   * Extend this when extending the Minipass class, and pass as\n   * the third template argument.  The key is the name of the event,\n   * and the value is the argument list.\n   *\n   * Any undeclared events will still be allowed, but the handler will get\n   * arguments as `unknown[]`.\n   */\n  export interface Events\n    extends EventArguments {\n    readable: []\n    data: [chunk: RType]\n    error: [er: unknown]\n    abort: [reason: unknown]\n    drain: []\n    resume: []\n    end: []\n    finish: []\n    prefinish: []\n    close: []\n    [DESTROYED]: [er?: unknown]\n    [ERROR]: [er: unknown]\n  }\n\n  /**\n   * String or buffer-like data that can be joined and sliced\n   */\n  export type ContiguousData =\n    | Buffer\n    | ArrayBufferLike\n    | ArrayBufferView\n    | string\n  export type BufferOrString = Buffer | string\n\n  /**\n   * Options passed to the Minipass constructor.\n   */\n  export type SharedOptions = {\n    /**\n     * Defer all data emission and other events until the end of the\n     * current tick, similar to Node core streams\n     */\n    async?: boolean\n    /**\n     * A signal which will abort the stream\n     */\n    signal?: AbortSignal\n    /**\n     * Output string encoding. Set to `null` or `'buffer'` (or omit) to\n     * emit Buffer objects rather than strings.\n     *\n     * Conflicts with `objectMode`\n     */\n    encoding?: BufferEncoding | null | 'buffer'\n    /**\n     * Output data exactly as it was written, supporting non-buffer/string\n     * data (such as arbitrary objects, falsey values, etc.)\n     *\n     * Conflicts with `encoding`\n     */\n    objectMode?: boolean\n  }\n\n  /**\n   * Options for a string encoded output\n   */\n  export type EncodingOptions = SharedOptions & {\n    encoding: BufferEncoding\n    objectMode?: false\n  }\n\n  /**\n   * Options for contiguous data buffer output\n   */\n  export type BufferOptions = SharedOptions & {\n    encoding?: null | 'buffer'\n    objectMode?: false\n  }\n\n  /**\n   * Options for objectMode arbitrary output\n   */\n  export type ObjectModeOptions = SharedOptions & {\n    objectMode: true\n    encoding?: null\n  }\n\n  /**\n   * Utility type to determine allowed options based on read type\n   */\n  export type Options =\n    | ObjectModeOptions\n    | (T extends string\n        ? EncodingOptions\n        : T extends Buffer\n        ? BufferOptions\n        : SharedOptions)\n}\n\nconst isObjectModeOptions = (\n  o: Minipass.SharedOptions\n): o is Minipass.ObjectModeOptions => !!o.objectMode\n\nconst isEncodingOptions = (\n  o: Minipass.SharedOptions\n): o is Minipass.EncodingOptions =>\n  !o.objectMode && !!o.encoding && o.encoding !== 'buffer'\n\n/**\n * Main export, the Minipass class\n *\n * `RType` is the type of data emitted, defaults to Buffer\n *\n * `WType` is the type of data to be written, if RType is buffer or string,\n * then any {@link Minipass.ContiguousData} is allowed.\n *\n * `Events` is the set of event handler signatures that this object\n * will emit, see {@link Minipass.Events}\n */\nexport class Minipass<\n    RType extends unknown = Buffer,\n    WType extends unknown = RType extends Minipass.BufferOrString\n      ? Minipass.ContiguousData\n      : RType,\n    Events extends Minipass.Events = Minipass.Events\n  >\n  extends EventEmitter\n  implements Minipass.DualIterable\n{\n  [FLOWING]: boolean = false;\n  [PAUSED]: boolean = false;\n  [PIPES]: Pipe[] = [];\n  [BUFFER]: RType[] = [];\n  [OBJECTMODE]: boolean;\n  [ENCODING]: BufferEncoding | null;\n  [ASYNC]: boolean;\n  [DECODER]: SD | null;\n  [EOF]: boolean = false;\n  [EMITTED_END]: boolean = false;\n  [EMITTING_END]: boolean = false;\n  [CLOSED]: boolean = false;\n  [EMITTED_ERROR]: unknown = null;\n  [BUFFERLENGTH]: number = 0;\n  [DESTROYED]: boolean = false;\n  [SIGNAL]?: AbortSignal;\n  [ABORTED]: boolean = false;\n  [DATALISTENERS]: number = 0;\n  [DISCARDED]: boolean = false\n\n  /**\n   * true if the stream can be written\n   */\n  writable: boolean = true\n  /**\n   * true if the stream can be read\n   */\n  readable: boolean = true\n\n  /**\n   * If `RType` is Buffer, then options do not need to be provided.\n   * Otherwise, an options object must be provided to specify either\n   * {@link Minipass.SharedOptions.objectMode} or\n   * {@link Minipass.SharedOptions.encoding}, as appropriate.\n   */\n  constructor(\n    ...args:\n      | [Minipass.ObjectModeOptions]\n      | (RType extends Buffer\n          ? [] | [Minipass.Options]\n          : [Minipass.Options])\n  ) {\n    const options: Minipass.Options = (args[0] ||\n      {}) as Minipass.Options\n    super()\n    if (options.objectMode && typeof options.encoding === 'string') {\n      throw new TypeError(\n        'Encoding and objectMode may not be used together'\n      )\n    }\n    if (isObjectModeOptions(options)) {\n      this[OBJECTMODE] = true\n      this[ENCODING] = null\n    } else if (isEncodingOptions(options)) {\n      this[ENCODING] = options.encoding\n      this[OBJECTMODE] = false\n    } else {\n      this[OBJECTMODE] = false\n      this[ENCODING] = null\n    }\n    this[ASYNC] = !!options.async\n    this[DECODER] = this[ENCODING]\n      ? (new StringDecoder(this[ENCODING]) as SD)\n      : null\n\n    //@ts-ignore - private option for debugging and testing\n    if (options && options.debugExposeBuffer === true) {\n      Object.defineProperty(this, 'buffer', { get: () => this[BUFFER] })\n    }\n    //@ts-ignore - private option for debugging and testing\n    if (options && options.debugExposePipes === true) {\n      Object.defineProperty(this, 'pipes', { get: () => this[PIPES] })\n    }\n\n    const { signal } = options\n    if (signal) {\n      this[SIGNAL] = signal\n      if (signal.aborted) {\n        this[ABORT]()\n      } else {\n        signal.addEventListener('abort', () => this[ABORT]())\n      }\n    }\n  }\n\n  /**\n   * The amount of data stored in the buffer waiting to be read.\n   *\n   * For Buffer strings, this will be the total byte length.\n   * For string encoding streams, this will be the string character length,\n   * according to JavaScript's `string.length` logic.\n   * For objectMode streams, this is a count of the items waiting to be\n   * emitted.\n   */\n  get bufferLength() {\n    return this[BUFFERLENGTH]\n  }\n\n  /**\n   * The `BufferEncoding` currently in use, or `null`\n   */\n  get encoding() {\n    return this[ENCODING]\n  }\n\n  /**\n   * @deprecated - This is a read only property\n   */\n  set encoding(_enc) {\n    throw new Error('Encoding must be set at instantiation time')\n  }\n\n  /**\n   * @deprecated - Encoding may only be set at instantiation time\n   */\n  setEncoding(_enc: Minipass.Encoding) {\n    throw new Error('Encoding must be set at instantiation time')\n  }\n\n  /**\n   * True if this is an objectMode stream\n   */\n  get objectMode() {\n    return this[OBJECTMODE]\n  }\n\n  /**\n   * @deprecated - This is a read-only property\n   */\n  set objectMode(_om) {\n    throw new Error('objectMode must be set at instantiation time')\n  }\n\n  /**\n   * true if this is an async stream\n   */\n  get ['async'](): boolean {\n    return this[ASYNC]\n  }\n  /**\n   * Set to true to make this stream async.\n   *\n   * Once set, it cannot be unset, as this would potentially cause incorrect\n   * behavior.  Ie, a sync stream can be made async, but an async stream\n   * cannot be safely made sync.\n   */\n  set ['async'](a: boolean) {\n    this[ASYNC] = this[ASYNC] || !!a\n  }\n\n  // drop everything and get out of the flow completely\n  [ABORT]() {\n    this[ABORTED] = true\n    this.emit('abort', this[SIGNAL]?.reason)\n    this.destroy(this[SIGNAL]?.reason)\n  }\n\n  /**\n   * True if the stream has been aborted.\n   */\n  get aborted() {\n    return this[ABORTED]\n  }\n  /**\n   * No-op setter. Stream aborted status is set via the AbortSignal provided\n   * in the constructor options.\n   */\n  set aborted(_) {}\n\n  /**\n   * Write data into the stream\n   *\n   * If the chunk written is a string, and encoding is not specified, then\n   * `utf8` will be assumed. If the stream encoding matches the encoding of\n   * a written string, and the state of the string decoder allows it, then\n   * the string will be passed through to either the output or the internal\n   * buffer without any processing. Otherwise, it will be turned into a\n   * Buffer object for processing into the desired encoding.\n   *\n   * If provided, `cb` function is called immediately before return for\n   * sync streams, or on next tick for async streams, because for this\n   * base class, a chunk is considered \"processed\" once it is accepted\n   * and either emitted or buffered. That is, the callback does not indicate\n   * that the chunk has been eventually emitted, though of course child\n   * classes can override this function to do whatever processing is required\n   * and call `super.write(...)` only once processing is completed.\n   */\n  write(chunk: WType, cb?: () => void): boolean\n  write(\n    chunk: WType,\n    encoding?: Minipass.Encoding,\n    cb?: () => void\n  ): boolean\n  write(\n    chunk: WType,\n    encoding?: Minipass.Encoding | (() => void),\n    cb?: () => void\n  ): boolean {\n    if (this[ABORTED]) return false\n    if (this[EOF]) throw new Error('write after end')\n\n    if (this[DESTROYED]) {\n      this.emit(\n        'error',\n        Object.assign(\n          new Error('Cannot call write after a stream was destroyed'),\n          { code: 'ERR_STREAM_DESTROYED' }\n        )\n      )\n      return true\n    }\n\n    if (typeof encoding === 'function') {\n      cb = encoding\n      encoding = 'utf8'\n    }\n\n    if (!encoding) encoding = 'utf8'\n\n    const fn = this[ASYNC] ? defer : nodefer\n\n    // convert array buffers and typed array views into buffers\n    // at some point in the future, we may want to do the opposite!\n    // leave strings and buffers as-is\n    // anything is only allowed if in object mode, so throw\n    if (!this[OBJECTMODE] && !Buffer.isBuffer(chunk)) {\n      if (isArrayBufferView(chunk)) {\n        //@ts-ignore - sinful unsafe type changing\n        chunk = Buffer.from(\n          chunk.buffer,\n          chunk.byteOffset,\n          chunk.byteLength\n        )\n      } else if (isArrayBufferLike(chunk)) {\n        //@ts-ignore - sinful unsafe type changing\n        chunk = Buffer.from(chunk)\n      } else if (typeof chunk !== 'string') {\n        throw new Error(\n          'Non-contiguous data written to non-objectMode stream'\n        )\n      }\n    }\n\n    // handle object mode up front, since it's simpler\n    // this yields better performance, fewer checks later.\n    if (this[OBJECTMODE]) {\n      // maybe impossible?\n      /* c8 ignore start */\n      if (this[FLOWING] && this[BUFFERLENGTH] !== 0) this[FLUSH](true)\n      /* c8 ignore stop */\n\n      if (this[FLOWING]) this.emit('data', chunk as unknown as RType)\n      else this[BUFFERPUSH](chunk as unknown as RType)\n\n      if (this[BUFFERLENGTH] !== 0) this.emit('readable')\n\n      if (cb) fn(cb)\n\n      return this[FLOWING]\n    }\n\n    // at this point the chunk is a buffer or string\n    // don't buffer it up or send it to the decoder\n    if (!(chunk as Minipass.BufferOrString).length) {\n      if (this[BUFFERLENGTH] !== 0) this.emit('readable')\n      if (cb) fn(cb)\n      return this[FLOWING]\n    }\n\n    // fast-path writing strings of same encoding to a stream with\n    // an empty buffer, skipping the buffer/decoder dance\n    if (\n      typeof chunk === 'string' &&\n      // unless it is a string already ready for us to use\n      !(encoding === this[ENCODING] && !this[DECODER]?.lastNeed)\n    ) {\n      //@ts-ignore - sinful unsafe type change\n      chunk = Buffer.from(chunk, encoding)\n    }\n\n    if (Buffer.isBuffer(chunk) && this[ENCODING]) {\n      //@ts-ignore - sinful unsafe type change\n      chunk = this[DECODER].write(chunk)\n    }\n\n    // Note: flushing CAN potentially switch us into not-flowing mode\n    if (this[FLOWING] && this[BUFFERLENGTH] !== 0) this[FLUSH](true)\n\n    if (this[FLOWING]) this.emit('data', chunk as unknown as RType)\n    else this[BUFFERPUSH](chunk as unknown as RType)\n\n    if (this[BUFFERLENGTH] !== 0) this.emit('readable')\n\n    if (cb) fn(cb)\n\n    return this[FLOWING]\n  }\n\n  /**\n   * Low-level explicit read method.\n   *\n   * In objectMode, the argument is ignored, and one item is returned if\n   * available.\n   *\n   * `n` is the number of bytes (or in the case of encoding streams,\n   * characters) to consume. If `n` is not provided, then the entire buffer\n   * is returned, or `null` is returned if no data is available.\n   *\n   * If `n` is greater that the amount of data in the internal buffer,\n   * then `null` is returned.\n   */\n  read(n?: number | null): RType | null {\n    if (this[DESTROYED]) return null\n    this[DISCARDED] = false\n\n    if (\n      this[BUFFERLENGTH] === 0 ||\n      n === 0 ||\n      (n && n > this[BUFFERLENGTH])\n    ) {\n      this[MAYBE_EMIT_END]()\n      return null\n    }\n\n    if (this[OBJECTMODE]) n = null\n\n    if (this[BUFFER].length > 1 && !this[OBJECTMODE]) {\n      // not object mode, so if we have an encoding, then RType is string\n      // otherwise, must be Buffer\n      this[BUFFER] = [\n        (this[ENCODING]\n          ? this[BUFFER].join('')\n          : Buffer.concat(\n              this[BUFFER] as Buffer[],\n              this[BUFFERLENGTH]\n            )) as RType,\n      ]\n    }\n\n    const ret = this[READ](n || null, this[BUFFER][0] as RType)\n    this[MAYBE_EMIT_END]()\n    return ret\n  }\n\n  [READ](n: number | null, chunk: RType) {\n    if (this[OBJECTMODE]) this[BUFFERSHIFT]()\n    else {\n      const c = chunk as Minipass.BufferOrString\n      if (n === c.length || n === null) this[BUFFERSHIFT]()\n      else if (typeof c === 'string') {\n        this[BUFFER][0] = c.slice(n) as RType\n        chunk = c.slice(0, n) as RType\n        this[BUFFERLENGTH] -= n\n      } else {\n        this[BUFFER][0] = c.subarray(n) as RType\n        chunk = c.subarray(0, n) as RType\n        this[BUFFERLENGTH] -= n\n      }\n    }\n\n    this.emit('data', chunk)\n\n    if (!this[BUFFER].length && !this[EOF]) this.emit('drain')\n\n    return chunk\n  }\n\n  /**\n   * End the stream, optionally providing a final write.\n   *\n   * See {@link Minipass#write} for argument descriptions\n   */\n  end(cb?: () => void): this\n  end(chunk: WType, cb?: () => void): this\n  end(chunk: WType, encoding?: Minipass.Encoding, cb?: () => void): this\n  end(\n    chunk?: WType | (() => void),\n    encoding?: Minipass.Encoding | (() => void),\n    cb?: () => void\n  ): this {\n    if (typeof chunk === 'function') {\n      cb = chunk as () => void\n      chunk = undefined\n    }\n    if (typeof encoding === 'function') {\n      cb = encoding\n      encoding = 'utf8'\n    }\n    if (chunk !== undefined) this.write(chunk, encoding)\n    if (cb) this.once('end', cb)\n    this[EOF] = true\n    this.writable = false\n\n    // if we haven't written anything, then go ahead and emit,\n    // even if we're not reading.\n    // we'll re-emit if a new 'end' listener is added anyway.\n    // This makes MP more suitable to write-only use cases.\n    if (this[FLOWING] || !this[PAUSED]) this[MAYBE_EMIT_END]()\n    return this\n  }\n\n  // don't let the internal resume be overwritten\n  [RESUME]() {\n    if (this[DESTROYED]) return\n\n    if (!this[DATALISTENERS] && !this[PIPES].length) {\n      this[DISCARDED] = true\n    }\n    this[PAUSED] = false\n    this[FLOWING] = true\n    this.emit('resume')\n    if (this[BUFFER].length) this[FLUSH]()\n    else if (this[EOF]) this[MAYBE_EMIT_END]()\n    else this.emit('drain')\n  }\n\n  /**\n   * Resume the stream if it is currently in a paused state\n   *\n   * If called when there are no pipe destinations or `data` event listeners,\n   * this will place the stream in a \"discarded\" state, where all data will\n   * be thrown away. The discarded state is removed if a pipe destination or\n   * data handler is added, if pause() is called, or if any synchronous or\n   * asynchronous iteration is started.\n   */\n  resume() {\n    return this[RESUME]()\n  }\n\n  /**\n   * Pause the stream\n   */\n  pause() {\n    this[FLOWING] = false\n    this[PAUSED] = true\n    this[DISCARDED] = false\n  }\n\n  /**\n   * true if the stream has been forcibly destroyed\n   */\n  get destroyed() {\n    return this[DESTROYED]\n  }\n\n  /**\n   * true if the stream is currently in a flowing state, meaning that\n   * any writes will be immediately emitted.\n   */\n  get flowing() {\n    return this[FLOWING]\n  }\n\n  /**\n   * true if the stream is currently in a paused state\n   */\n  get paused() {\n    return this[PAUSED]\n  }\n\n  [BUFFERPUSH](chunk: RType) {\n    if (this[OBJECTMODE]) this[BUFFERLENGTH] += 1\n    else this[BUFFERLENGTH] += (chunk as Minipass.BufferOrString).length\n    this[BUFFER].push(chunk)\n  }\n\n  [BUFFERSHIFT](): RType {\n    if (this[OBJECTMODE]) this[BUFFERLENGTH] -= 1\n    else\n      this[BUFFERLENGTH] -= (\n        this[BUFFER][0] as Minipass.BufferOrString\n      ).length\n    return this[BUFFER].shift() as RType\n  }\n\n  [FLUSH](noDrain: boolean = false) {\n    do {} while (\n      this[FLUSHCHUNK](this[BUFFERSHIFT]()) &&\n      this[BUFFER].length\n    )\n\n    if (!noDrain && !this[BUFFER].length && !this[EOF]) this.emit('drain')\n  }\n\n  [FLUSHCHUNK](chunk: RType) {\n    this.emit('data', chunk)\n    return this[FLOWING]\n  }\n\n  /**\n   * Pipe all data emitted by this stream into the destination provided.\n   *\n   * Triggers the flow of data.\n   */\n  pipe(dest: W, opts?: PipeOptions): W {\n    if (this[DESTROYED]) return dest\n    this[DISCARDED] = false\n\n    const ended = this[EMITTED_END]\n    opts = opts || {}\n    if (dest === proc.stdout || dest === proc.stderr) opts.end = false\n    else opts.end = opts.end !== false\n    opts.proxyErrors = !!opts.proxyErrors\n\n    // piping an ended stream ends immediately\n    if (ended) {\n      if (opts.end) dest.end()\n    } else {\n      // \"as\" here just ignores the WType, which pipes don't care about,\n      // since they're only consuming from us, and writing to the dest\n      this[PIPES].push(\n        !opts.proxyErrors\n          ? new Pipe(this as Minipass, dest, opts)\n          : new PipeProxyErrors(this as Minipass, dest, opts)\n      )\n      if (this[ASYNC]) defer(() => this[RESUME]())\n      else this[RESUME]()\n    }\n\n    return dest\n  }\n\n  /**\n   * Fully unhook a piped destination stream.\n   *\n   * If the destination stream was the only consumer of this stream (ie,\n   * there are no other piped destinations or `'data'` event listeners)\n   * then the flow of data will stop until there is another consumer or\n   * {@link Minipass#resume} is explicitly called.\n   */\n  unpipe(dest: W) {\n    const p = this[PIPES].find(p => p.dest === dest)\n    if (p) {\n      if (this[PIPES].length === 1) {\n        if (this[FLOWING] && this[DATALISTENERS] === 0) {\n          this[FLOWING] = false\n        }\n        this[PIPES] = []\n      } else this[PIPES].splice(this[PIPES].indexOf(p), 1)\n      p.unpipe()\n    }\n  }\n\n  /**\n   * Alias for {@link Minipass#on}\n   */\n  addListener(\n    ev: Event,\n    handler: (...args: Events[Event]) => any\n  ): this {\n    return this.on(ev, handler)\n  }\n\n  /**\n   * Mostly identical to `EventEmitter.on`, with the following\n   * behavior differences to prevent data loss and unnecessary hangs:\n   *\n   * - Adding a 'data' event handler will trigger the flow of data\n   *\n   * - Adding a 'readable' event handler when there is data waiting to be read\n   *   will cause 'readable' to be emitted immediately.\n   *\n   * - Adding an 'endish' event handler ('end', 'finish', etc.) which has\n   *   already passed will cause the event to be emitted immediately and all\n   *   handlers removed.\n   *\n   * - Adding an 'error' event handler after an error has been emitted will\n   *   cause the event to be re-emitted immediately with the error previously\n   *   raised.\n   */\n  on(\n    ev: Event,\n    handler: (...args: Events[Event]) => any\n  ): this {\n    const ret = super.on(\n      ev as string | symbol,\n      handler as (...a: any[]) => any\n    )\n    if (ev === 'data') {\n      this[DISCARDED] = false\n      this[DATALISTENERS]++\n      if (!this[PIPES].length && !this[FLOWING]) {\n        this[RESUME]()\n      }\n    } else if (ev === 'readable' && this[BUFFERLENGTH] !== 0) {\n      super.emit('readable')\n    } else if (isEndish(ev) && this[EMITTED_END]) {\n      super.emit(ev)\n      this.removeAllListeners(ev)\n    } else if (ev === 'error' && this[EMITTED_ERROR]) {\n      const h = handler as (...a: Events['error']) => any\n      if (this[ASYNC]) defer(() => h.call(this, this[EMITTED_ERROR]))\n      else h.call(this, this[EMITTED_ERROR])\n    }\n    return ret\n  }\n\n  /**\n   * Alias for {@link Minipass#off}\n   */\n  removeListener(\n    ev: Event,\n    handler: (...args: Events[Event]) => any\n  ) {\n    return this.off(ev, handler)\n  }\n\n  /**\n   * Mostly identical to `EventEmitter.off`\n   *\n   * If a 'data' event handler is removed, and it was the last consumer\n   * (ie, there are no pipe destinations or other 'data' event listeners),\n   * then the flow of data will stop until there is another consumer or\n   * {@link Minipass#resume} is explicitly called.\n   */\n  off(\n    ev: Event,\n    handler: (...args: Events[Event]) => any\n  ) {\n    const ret = super.off(\n      ev as string | symbol,\n      handler as (...a: any[]) => any\n    )\n    // if we previously had listeners, and now we don't, and we don't\n    // have any pipes, then stop the flow, unless it's been explicitly\n    // put in a discarded flowing state via stream.resume().\n    if (ev === 'data') {\n      this[DATALISTENERS] = this.listeners('data').length\n      if (\n        this[DATALISTENERS] === 0 &&\n        !this[DISCARDED] &&\n        !this[PIPES].length\n      ) {\n        this[FLOWING] = false\n      }\n    }\n    return ret\n  }\n\n  /**\n   * Mostly identical to `EventEmitter.removeAllListeners`\n   *\n   * If all 'data' event handlers are removed, and they were the last consumer\n   * (ie, there are no pipe destinations), then the flow of data will stop\n   * until there is another consumer or {@link Minipass#resume} is explicitly\n   * called.\n   */\n  removeAllListeners(ev?: Event) {\n    const ret = super.removeAllListeners(ev as string | symbol | undefined)\n    if (ev === 'data' || ev === undefined) {\n      this[DATALISTENERS] = 0\n      if (!this[DISCARDED] && !this[PIPES].length) {\n        this[FLOWING] = false\n      }\n    }\n    return ret\n  }\n\n  /**\n   * true if the 'end' event has been emitted\n   */\n  get emittedEnd() {\n    return this[EMITTED_END]\n  }\n\n  [MAYBE_EMIT_END]() {\n    if (\n      !this[EMITTING_END] &&\n      !this[EMITTED_END] &&\n      !this[DESTROYED] &&\n      this[BUFFER].length === 0 &&\n      this[EOF]\n    ) {\n      this[EMITTING_END] = true\n      this.emit('end')\n      this.emit('prefinish')\n      this.emit('finish')\n      if (this[CLOSED]) this.emit('close')\n      this[EMITTING_END] = false\n    }\n  }\n\n  /**\n   * Mostly identical to `EventEmitter.emit`, with the following\n   * behavior differences to prevent data loss and unnecessary hangs:\n   *\n   * If the stream has been destroyed, and the event is something other\n   * than 'close' or 'error', then `false` is returned and no handlers\n   * are called.\n   *\n   * If the event is 'end', and has already been emitted, then the event\n   * is ignored. If the stream is in a paused or non-flowing state, then\n   * the event will be deferred until data flow resumes. If the stream is\n   * async, then handlers will be called on the next tick rather than\n   * immediately.\n   *\n   * If the event is 'close', and 'end' has not yet been emitted, then\n   * the event will be deferred until after 'end' is emitted.\n   *\n   * If the event is 'error', and an AbortSignal was provided for the stream,\n   * and there are no listeners, then the event is ignored, matching the\n   * behavior of node core streams in the presense of an AbortSignal.\n   *\n   * If the event is 'finish' or 'prefinish', then all listeners will be\n   * removed after emitting the event, to prevent double-firing.\n   */\n  emit(\n    ev: Event,\n    ...args: Events[Event]\n  ): boolean {\n    const data = args[0]\n    // error and close are only events allowed after calling destroy()\n    if (\n      ev !== 'error' &&\n      ev !== 'close' &&\n      ev !== DESTROYED &&\n      this[DESTROYED]\n    ) {\n      return false\n    } else if (ev === 'data') {\n      return !this[OBJECTMODE] && !data\n        ? false\n        : this[ASYNC]\n        ? (defer(() => this[EMITDATA](data as RType)), true)\n        : this[EMITDATA](data as RType)\n    } else if (ev === 'end') {\n      return this[EMITEND]()\n    } else if (ev === 'close') {\n      this[CLOSED] = true\n      // don't emit close before 'end' and 'finish'\n      if (!this[EMITTED_END] && !this[DESTROYED]) return false\n      const ret = super.emit('close')\n      this.removeAllListeners('close')\n      return ret\n    } else if (ev === 'error') {\n      this[EMITTED_ERROR] = data\n      super.emit(ERROR, data)\n      const ret =\n        !this[SIGNAL] || this.listeners('error').length\n          ? super.emit('error', data)\n          : false\n      this[MAYBE_EMIT_END]()\n      return ret\n    } else if (ev === 'resume') {\n      const ret = super.emit('resume')\n      this[MAYBE_EMIT_END]()\n      return ret\n    } else if (ev === 'finish' || ev === 'prefinish') {\n      const ret = super.emit(ev)\n      this.removeAllListeners(ev)\n      return ret\n    }\n\n    // Some other unknown event\n    const ret = super.emit(ev as string, ...args)\n    this[MAYBE_EMIT_END]()\n    return ret\n  }\n\n  [EMITDATA](data: RType) {\n    for (const p of this[PIPES]) {\n      if (p.dest.write(data as RType) === false) this.pause()\n    }\n    const ret = this[DISCARDED] ? false : super.emit('data', data)\n    this[MAYBE_EMIT_END]()\n    return ret\n  }\n\n  [EMITEND]() {\n    if (this[EMITTED_END]) return false\n\n    this[EMITTED_END] = true\n    this.readable = false\n    return this[ASYNC]\n      ? (defer(() => this[EMITEND2]()), true)\n      : this[EMITEND2]()\n  }\n\n  [EMITEND2]() {\n    if (this[DECODER]) {\n      const data = this[DECODER].end()\n      if (data) {\n        for (const p of this[PIPES]) {\n          p.dest.write(data as RType)\n        }\n        if (!this[DISCARDED]) super.emit('data', data)\n      }\n    }\n\n    for (const p of this[PIPES]) {\n      p.end()\n    }\n    const ret = super.emit('end')\n    this.removeAllListeners('end')\n    return ret\n  }\n\n  /**\n   * Return a Promise that resolves to an array of all emitted data once\n   * the stream ends.\n   */\n  async collect(): Promise {\n    const buf: RType[] & { dataLength: number } = Object.assign([], {\n      dataLength: 0,\n    })\n    if (!this[OBJECTMODE]) buf.dataLength = 0\n    // set the promise first, in case an error is raised\n    // by triggering the flow here.\n    const p = this.promise()\n    this.on('data', c => {\n      buf.push(c)\n      if (!this[OBJECTMODE])\n        buf.dataLength += (c as Minipass.BufferOrString).length\n    })\n    await p\n    return buf\n  }\n\n  /**\n   * Return a Promise that resolves to the concatenation of all emitted data\n   * once the stream ends.\n   *\n   * Not allowed on objectMode streams.\n   */\n  async concat(): Promise {\n    if (this[OBJECTMODE]) {\n      throw new Error('cannot concat in objectMode')\n    }\n    const buf = await this.collect()\n    return (\n      this[ENCODING]\n        ? buf.join('')\n        : Buffer.concat(buf as Buffer[], buf.dataLength)\n    ) as RType\n  }\n\n  /**\n   * Return a void Promise that resolves once the stream ends.\n   */\n  async promise(): Promise {\n    return new Promise((resolve, reject) => {\n      this.on(DESTROYED, () => reject(new Error('stream destroyed')))\n      this.on('error', er => reject(er))\n      this.on('end', () => resolve())\n    })\n  }\n\n  /**\n   * Asynchronous `for await of` iteration.\n   *\n   * This will continue emitting all chunks until the stream terminates.\n   */\n  [Symbol.asyncIterator](): AsyncGenerator {\n    // set this up front, in case the consumer doesn't call next()\n    // right away.\n    this[DISCARDED] = false\n    let stopped = false\n    const stop = async (): Promise> => {\n      this.pause()\n      stopped = true\n      return { value: undefined, done: true }\n    }\n    const next = (): Promise> => {\n      if (stopped) return stop()\n      const res = this.read()\n      if (res !== null) return Promise.resolve({ done: false, value: res })\n\n      if (this[EOF]) return stop()\n\n      let resolve!: (res: IteratorResult) => void\n      let reject!: (er: unknown) => void\n      const onerr = (er: unknown) => {\n        this.off('data', ondata)\n        this.off('end', onend)\n        this.off(DESTROYED, ondestroy)\n        stop()\n        reject(er)\n      }\n      const ondata = (value: RType) => {\n        this.off('error', onerr)\n        this.off('end', onend)\n        this.off(DESTROYED, ondestroy)\n        this.pause()\n        resolve({ value, done: !!this[EOF] })\n      }\n      const onend = () => {\n        this.off('error', onerr)\n        this.off('data', ondata)\n        this.off(DESTROYED, ondestroy)\n        stop()\n        resolve({ done: true, value: undefined })\n      }\n      const ondestroy = () => onerr(new Error('stream destroyed'))\n      return new Promise>((res, rej) => {\n        reject = rej\n        resolve = res\n        this.once(DESTROYED, ondestroy)\n        this.once('error', onerr)\n        this.once('end', onend)\n        this.once('data', ondata)\n      })\n    }\n\n    return {\n      next,\n      throw: stop,\n      return: stop,\n      [Symbol.asyncIterator]() {\n        return this\n      },\n    }\n  }\n\n  /**\n   * Synchronous `for of` iteration.\n   *\n   * The iteration will terminate when the internal buffer runs out, even\n   * if the stream has not yet terminated.\n   */\n  [Symbol.iterator](): Generator {\n    // set this up front, in case the consumer doesn't call next()\n    // right away.\n    this[DISCARDED] = false\n    let stopped = false\n    const stop = (): IteratorReturnResult => {\n      this.pause()\n      this.off(ERROR, stop)\n      this.off(DESTROYED, stop)\n      this.off('end', stop)\n      stopped = true\n      return { done: true, value: undefined }\n    }\n\n    const next = (): IteratorResult => {\n      if (stopped) return stop()\n      const value = this.read()\n      return value === null ? stop() : { done: false, value }\n    }\n\n    this.once('end', stop)\n    this.once(ERROR, stop)\n    this.once(DESTROYED, stop)\n\n    return {\n      next,\n      throw: stop,\n      return: stop,\n      [Symbol.iterator]() {\n        return this\n      },\n    }\n  }\n\n  /**\n   * Destroy a stream, preventing it from being used for any further purpose.\n   *\n   * If the stream has a `close()` method, then it will be called on\n   * destruction.\n   *\n   * After destruction, any attempt to write data, read data, or emit most\n   * events will be ignored.\n   *\n   * If an error argument is provided, then it will be emitted in an\n   * 'error' event.\n   */\n  destroy(er?: unknown) {\n    if (this[DESTROYED]) {\n      if (er) this.emit('error', er)\n      else this.emit(DESTROYED)\n      return this\n    }\n\n    this[DESTROYED] = true\n    this[DISCARDED] = true\n\n    // throw away all buffered data, it's never coming out\n    this[BUFFER].length = 0\n    this[BUFFERLENGTH] = 0\n\n    const wc = this as Minipass & {\n      close?: () => void\n    }\n    if (typeof wc.close === 'function' && !this[CLOSED]) wc.close()\n\n    if (er) this.emit('error', er)\n    // if no error to emit, still reject pending promises\n    else this.emit(DESTROYED)\n\n    return this\n  }\n\n  /**\n   * Alias for {@link isStream}\n   *\n   * Former export location, maintained for backwards compatibility.\n   *\n   * @deprecated\n   */\n  static get isStream() {\n    return isStream\n  }\n}\n", "// this is just a very light wrapper around 2 arrays with an offset index\n\nimport { GLOBSTAR } from 'minimatch'\nexport type MMPattern = string | RegExp | typeof GLOBSTAR\n\n// an array of length >= 1\nexport type PatternList = [p: MMPattern, ...rest: MMPattern[]]\nexport type UNCPatternList = [\n  p0: '',\n  p1: '',\n  p2: string,\n  p3: string,\n  ...rest: MMPattern[],\n]\nexport type DrivePatternList = [p0: string, ...rest: MMPattern[]]\nexport type AbsolutePatternList = [p0: '', ...rest: MMPattern[]]\nexport type GlobList = [p: string, ...rest: string[]]\n\nconst isPatternList = (pl: MMPattern[]): pl is PatternList =>\n  pl.length >= 1\nconst isGlobList = (gl: string[]): gl is GlobList => gl.length >= 1\n\n/**\n * An immutable-ish view on an array of glob parts and their parsed\n * results\n */\nexport class Pattern {\n  readonly #patternList: PatternList\n  readonly #globList: GlobList\n  readonly #index: number\n  readonly length: number\n  readonly #platform: NodeJS.Platform\n  #rest?: Pattern | null\n  #globString?: string\n  #isDrive?: boolean\n  #isUNC?: boolean\n  #isAbsolute?: boolean\n  #followGlobstar: boolean = true\n\n  constructor(\n    patternList: MMPattern[],\n    globList: string[],\n    index: number,\n    platform: NodeJS.Platform,\n  ) {\n    if (!isPatternList(patternList)) {\n      throw new TypeError('empty pattern list')\n    }\n    if (!isGlobList(globList)) {\n      throw new TypeError('empty glob list')\n    }\n    if (globList.length !== patternList.length) {\n      throw new TypeError('mismatched pattern list and glob list lengths')\n    }\n    this.length = patternList.length\n    if (index < 0 || index >= this.length) {\n      throw new TypeError('index out of range')\n    }\n    this.#patternList = patternList\n    this.#globList = globList\n    this.#index = index\n    this.#platform = platform\n\n    // normalize root entries of absolute patterns on initial creation.\n    if (this.#index === 0) {\n      // c: => ['c:/']\n      // C:/ => ['C:/']\n      // C:/x => ['C:/', 'x']\n      // //host/share => ['//host/share/']\n      // //host/share/ => ['//host/share/']\n      // //host/share/x => ['//host/share/', 'x']\n      // /etc => ['/', 'etc']\n      // / => ['/']\n      if (this.isUNC()) {\n        // '' / '' / 'host' / 'share'\n        const [p0, p1, p2, p3, ...prest] = this.#patternList\n        const [g0, g1, g2, g3, ...grest] = this.#globList\n        if (prest[0] === '') {\n          // ends in /\n          prest.shift()\n          grest.shift()\n        }\n        const p = [p0, p1, p2, p3, ''].join('/')\n        const g = [g0, g1, g2, g3, ''].join('/')\n        this.#patternList = [p, ...prest]\n        this.#globList = [g, ...grest]\n        this.length = this.#patternList.length\n      } else if (this.isDrive() || this.isAbsolute()) {\n        const [p1, ...prest] = this.#patternList\n        const [g1, ...grest] = this.#globList\n        if (prest[0] === '') {\n          // ends in /\n          prest.shift()\n          grest.shift()\n        }\n        const p = (p1 as string) + '/'\n        const g = g1 + '/'\n        this.#patternList = [p, ...prest]\n        this.#globList = [g, ...grest]\n        this.length = this.#patternList.length\n      }\n    }\n  }\n\n  /**\n   * The first entry in the parsed list of patterns\n   */\n  pattern(): MMPattern {\n    return this.#patternList[this.#index] as MMPattern\n  }\n\n  /**\n   * true of if pattern() returns a string\n   */\n  isString(): boolean {\n    return typeof this.#patternList[this.#index] === 'string'\n  }\n  /**\n   * true of if pattern() returns GLOBSTAR\n   */\n  isGlobstar(): boolean {\n    return this.#patternList[this.#index] === GLOBSTAR\n  }\n  /**\n   * true if pattern() returns a regexp\n   */\n  isRegExp(): boolean {\n    return this.#patternList[this.#index] instanceof RegExp\n  }\n\n  /**\n   * The /-joined set of glob parts that make up this pattern\n   */\n  globString(): string {\n    return (this.#globString =\n      this.#globString ||\n      (this.#index === 0 ?\n        this.isAbsolute() ?\n          this.#globList[0] + this.#globList.slice(1).join('/')\n        : this.#globList.join('/')\n      : this.#globList.slice(this.#index).join('/')))\n  }\n\n  /**\n   * true if there are more pattern parts after this one\n   */\n  hasMore(): boolean {\n    return this.length > this.#index + 1\n  }\n\n  /**\n   * The rest of the pattern after this part, or null if this is the end\n   */\n  rest(): Pattern | null {\n    if (this.#rest !== undefined) return this.#rest\n    if (!this.hasMore()) return (this.#rest = null)\n    this.#rest = new Pattern(\n      this.#patternList,\n      this.#globList,\n      this.#index + 1,\n      this.#platform,\n    )\n    this.#rest.#isAbsolute = this.#isAbsolute\n    this.#rest.#isUNC = this.#isUNC\n    this.#rest.#isDrive = this.#isDrive\n    return this.#rest\n  }\n\n  /**\n   * true if the pattern represents a //unc/path/ on windows\n   */\n  isUNC(): boolean {\n    const pl = this.#patternList\n    return this.#isUNC !== undefined ?\n        this.#isUNC\n      : (this.#isUNC =\n          this.#platform === 'win32' &&\n          this.#index === 0 &&\n          pl[0] === '' &&\n          pl[1] === '' &&\n          typeof pl[2] === 'string' &&\n          !!pl[2] &&\n          typeof pl[3] === 'string' &&\n          !!pl[3])\n  }\n\n  // pattern like C:/...\n  // split = ['C:', ...]\n  // XXX: would be nice to handle patterns like `c:*` to test the cwd\n  // in c: for *, but I don't know of a way to even figure out what that\n  // cwd is without actually chdir'ing into it?\n  /**\n   * True if the pattern starts with a drive letter on Windows\n   */\n  isDrive(): boolean {\n    const pl = this.#patternList\n    return this.#isDrive !== undefined ?\n        this.#isDrive\n      : (this.#isDrive =\n          this.#platform === 'win32' &&\n          this.#index === 0 &&\n          this.length > 1 &&\n          typeof pl[0] === 'string' &&\n          /^[a-z]:$/i.test(pl[0]))\n  }\n\n  // pattern = '/' or '/...' or '/x/...'\n  // split = ['', ''] or ['', ...] or ['', 'x', ...]\n  // Drive and UNC both considered absolute on windows\n  /**\n   * True if the pattern is rooted on an absolute path\n   */\n  isAbsolute(): boolean {\n    const pl = this.#patternList\n    return this.#isAbsolute !== undefined ?\n        this.#isAbsolute\n      : (this.#isAbsolute =\n          (pl[0] === '' && pl.length > 1) ||\n          this.isDrive() ||\n          this.isUNC())\n  }\n\n  /**\n   * consume the root of the pattern, and return it\n   */\n  root(): string {\n    const p = this.#patternList[0]\n    return (\n        typeof p === 'string' && this.isAbsolute() && this.#index === 0\n      ) ?\n        p\n      : ''\n  }\n\n  /**\n   * Check to see if the current globstar pattern is allowed to follow\n   * a symbolic link.\n   */\n  checkFollowGlobstar(): boolean {\n    return !(\n      this.#index === 0 ||\n      !this.isGlobstar() ||\n      !this.#followGlobstar\n    )\n  }\n\n  /**\n   * Mark that the current globstar pattern is following a symbolic link\n   */\n  markFollowGlobstar(): boolean {\n    if (this.#index === 0 || !this.isGlobstar() || !this.#followGlobstar)\n      return false\n    this.#followGlobstar = false\n    return true\n  }\n}\n", "// give it a pattern, and it'll be able to tell you if\n// a given path should be ignored.\n// Ignoring a path ignores its children if the pattern ends in /**\n// Ignores are always parsed in dot:true mode\n\nimport { Minimatch, MinimatchOptions } from 'minimatch'\nimport { Path } from 'path-scurry'\nimport { Pattern } from './pattern.js'\nimport { GlobWalkerOpts } from './walker.js'\n\nexport interface IgnoreLike {\n  ignored?: (p: Path) => boolean\n  childrenIgnored?: (p: Path) => boolean\n  add?: (ignore: string) => void\n}\n\nconst defaultPlatform: NodeJS.Platform =\n  (\n    typeof process === 'object' &&\n    process &&\n    typeof process.platform === 'string'\n  ) ?\n    process.platform\n  : 'linux'\n\n/**\n * Class used to process ignored patterns\n */\nexport class Ignore implements IgnoreLike {\n  relative: Minimatch[]\n  relativeChildren: Minimatch[]\n  absolute: Minimatch[]\n  absoluteChildren: Minimatch[]\n  platform: NodeJS.Platform\n  mmopts: MinimatchOptions\n\n  constructor(\n    ignored: string[],\n    {\n      nobrace,\n      nocase,\n      noext,\n      noglobstar,\n      platform = defaultPlatform,\n    }: GlobWalkerOpts,\n  ) {\n    this.relative = []\n    this.absolute = []\n    this.relativeChildren = []\n    this.absoluteChildren = []\n    this.platform = platform\n    this.mmopts = {\n      dot: true,\n      nobrace,\n      nocase,\n      noext,\n      noglobstar,\n      optimizationLevel: 2,\n      platform,\n      nocomment: true,\n      nonegate: true,\n    }\n    for (const ign of ignored) this.add(ign)\n  }\n\n  add(ign: string) {\n    // this is a little weird, but it gives us a clean set of optimized\n    // minimatch matchers, without getting tripped up if one of them\n    // ends in /** inside a brace section, and it's only inefficient at\n    // the start of the walk, not along it.\n    // It'd be nice if the Pattern class just had a .test() method, but\n    // handling globstars is a bit of a pita, and that code already lives\n    // in minimatch anyway.\n    // Another way would be if maybe Minimatch could take its set/globParts\n    // as an option, and then we could at least just use Pattern to test\n    // for absolute-ness.\n    // Yet another way, Minimatch could take an array of glob strings, and\n    // a cwd option, and do the right thing.\n    const mm = new Minimatch(ign, this.mmopts)\n    for (let i = 0; i < mm.set.length; i++) {\n      const parsed = mm.set[i]\n      const globParts = mm.globParts[i]\n      /* c8 ignore start */\n      if (!parsed || !globParts) {\n        throw new Error('invalid pattern object')\n      }\n      // strip off leading ./ portions\n      // https://github.com/isaacs/node-glob/issues/570\n      while (parsed[0] === '.' && globParts[0] === '.') {\n        parsed.shift()\n        globParts.shift()\n      }\n      /* c8 ignore stop */\n      const p = new Pattern(parsed, globParts, 0, this.platform)\n      const m = new Minimatch(p.globString(), this.mmopts)\n      const children = globParts[globParts.length - 1] === '**'\n      const absolute = p.isAbsolute()\n      if (absolute) this.absolute.push(m)\n      else this.relative.push(m)\n      if (children) {\n        if (absolute) this.absoluteChildren.push(m)\n        else this.relativeChildren.push(m)\n      }\n    }\n  }\n\n  ignored(p: Path): boolean {\n    const fullpath = p.fullpath()\n    const fullpaths = `${fullpath}/`\n    const relative = p.relative() || '.'\n    const relatives = `${relative}/`\n    for (const m of this.relative) {\n      if (m.match(relative) || m.match(relatives)) return true\n    }\n    for (const m of this.absolute) {\n      if (m.match(fullpath) || m.match(fullpaths)) return true\n    }\n    return false\n  }\n\n  childrenIgnored(p: Path): boolean {\n    const fullpath = p.fullpath() + '/'\n    const relative = (p.relative() || '.') + '/'\n    for (const m of this.relativeChildren) {\n      if (m.match(relative)) return true\n    }\n    for (const m of this.absoluteChildren) {\n      if (m.match(fullpath)) return true\n    }\n    return false\n  }\n}\n", "// synchronous utility for filtering entries and calculating subwalks\n\nimport { GLOBSTAR, MMRegExp } from 'minimatch'\nimport { Path } from 'path-scurry'\nimport { MMPattern, Pattern } from './pattern.js'\nimport { GlobWalkerOpts } from './walker.js'\n\n/**\n * A cache of which patterns have been processed for a given Path\n */\nexport class HasWalkedCache {\n  store: Map>\n  constructor(store: Map> = new Map()) {\n    this.store = store\n  }\n  copy() {\n    return new HasWalkedCache(new Map(this.store))\n  }\n  hasWalked(target: Path, pattern: Pattern) {\n    return this.store.get(target.fullpath())?.has(pattern.globString())\n  }\n  storeWalked(target: Path, pattern: Pattern) {\n    const fullpath = target.fullpath()\n    const cached = this.store.get(fullpath)\n    if (cached) cached.add(pattern.globString())\n    else this.store.set(fullpath, new Set([pattern.globString()]))\n  }\n}\n\n/**\n * A record of which paths have been matched in a given walk step,\n * and whether they only are considered a match if they are a directory,\n * and whether their absolute or relative path should be returned.\n */\nexport class MatchRecord {\n  store: Map = new Map()\n  add(target: Path, absolute: boolean, ifDir: boolean) {\n    const n = (absolute ? 2 : 0) | (ifDir ? 1 : 0)\n    const current = this.store.get(target)\n    this.store.set(target, current === undefined ? n : n & current)\n  }\n  // match, absolute, ifdir\n  entries(): [Path, boolean, boolean][] {\n    return [...this.store.entries()].map(([path, n]) => [\n      path,\n      !!(n & 2),\n      !!(n & 1),\n    ])\n  }\n}\n\n/**\n * A collection of patterns that must be processed in a subsequent step\n * for a given path.\n */\nexport class SubWalks {\n  store: Map = new Map()\n  add(target: Path, pattern: Pattern) {\n    if (!target.canReaddir()) {\n      return\n    }\n    const subs = this.store.get(target)\n    if (subs) {\n      if (!subs.find(p => p.globString() === pattern.globString())) {\n        subs.push(pattern)\n      }\n    } else this.store.set(target, [pattern])\n  }\n  get(target: Path): Pattern[] {\n    const subs = this.store.get(target)\n    /* c8 ignore start */\n    if (!subs) {\n      throw new Error('attempting to walk unknown path')\n    }\n    /* c8 ignore stop */\n    return subs\n  }\n  entries(): [Path, Pattern[]][] {\n    return this.keys().map(k => [k, this.store.get(k) as Pattern[]])\n  }\n  keys(): Path[] {\n    return [...this.store.keys()].filter(t => t.canReaddir())\n  }\n}\n\n/**\n * The class that processes patterns for a given path.\n *\n * Handles child entry filtering, and determining whether a path's\n * directory contents must be read.\n */\nexport class Processor {\n  hasWalkedCache: HasWalkedCache\n  matches = new MatchRecord()\n  subwalks = new SubWalks()\n  patterns?: Pattern[]\n  follow: boolean\n  dot: boolean\n  opts: GlobWalkerOpts\n\n  constructor(opts: GlobWalkerOpts, hasWalkedCache?: HasWalkedCache) {\n    this.opts = opts\n    this.follow = !!opts.follow\n    this.dot = !!opts.dot\n    this.hasWalkedCache =\n      hasWalkedCache ? hasWalkedCache.copy() : new HasWalkedCache()\n  }\n\n  processPatterns(target: Path, patterns: Pattern[]) {\n    this.patterns = patterns\n    const processingSet: [Path, Pattern][] = patterns.map(p => [target, p])\n\n    // map of paths to the magic-starting subwalks they need to walk\n    // first item in patterns is the filter\n\n    for (let [t, pattern] of processingSet) {\n      this.hasWalkedCache.storeWalked(t, pattern)\n\n      const root = pattern.root()\n      const absolute = pattern.isAbsolute() && this.opts.absolute !== false\n\n      // start absolute patterns at root\n      if (root) {\n        t = t.resolve(\n          root === '/' && this.opts.root !== undefined ?\n            this.opts.root\n          : root,\n        )\n        const rest = pattern.rest()\n        if (!rest) {\n          this.matches.add(t, true, false)\n          continue\n        } else {\n          pattern = rest\n        }\n      }\n\n      if (t.isENOENT()) continue\n\n      let p: MMPattern\n      let rest: Pattern | null\n      let changed = false\n      while (\n        typeof (p = pattern.pattern()) === 'string' &&\n        (rest = pattern.rest())\n      ) {\n        const c = t.resolve(p)\n        t = c\n        pattern = rest\n        changed = true\n      }\n      p = pattern.pattern()\n      rest = pattern.rest()\n      if (changed) {\n        if (this.hasWalkedCache.hasWalked(t, pattern)) continue\n        this.hasWalkedCache.storeWalked(t, pattern)\n      }\n\n      // now we have either a final string for a known entry,\n      // more strings for an unknown entry,\n      // or a pattern starting with magic, mounted on t.\n      if (typeof p === 'string') {\n        // must not be final entry, otherwise we would have\n        // concatenated it earlier.\n        const ifDir = p === '..' || p === '' || p === '.'\n        this.matches.add(t.resolve(p), absolute, ifDir)\n        continue\n      } else if (p === GLOBSTAR) {\n        // if no rest, match and subwalk pattern\n        // if rest, process rest and subwalk pattern\n        // if it's a symlink, but we didn't get here by way of a\n        // globstar match (meaning it's the first time THIS globstar\n        // has traversed a symlink), then we follow it. Otherwise, stop.\n        if (\n          !t.isSymbolicLink() ||\n          this.follow ||\n          pattern.checkFollowGlobstar()\n        ) {\n          this.subwalks.add(t, pattern)\n        }\n        const rp = rest?.pattern()\n        const rrest = rest?.rest()\n        if (!rest || ((rp === '' || rp === '.') && !rrest)) {\n          // only HAS to be a dir if it ends in **/ or **/.\n          // but ending in ** will match files as well.\n          this.matches.add(t, absolute, rp === '' || rp === '.')\n        } else {\n          if (rp === '..') {\n            // this would mean you're matching **/.. at the fs root,\n            // and no thanks, I'm not gonna test that specific case.\n            /* c8 ignore start */\n            const tp = t.parent || t\n            /* c8 ignore stop */\n            if (!rrest) this.matches.add(tp, absolute, true)\n            else if (!this.hasWalkedCache.hasWalked(tp, rrest)) {\n              this.subwalks.add(tp, rrest)\n            }\n          }\n        }\n      } else if (p instanceof RegExp) {\n        this.subwalks.add(t, pattern)\n      }\n    }\n\n    return this\n  }\n\n  subwalkTargets(): Path[] {\n    return this.subwalks.keys()\n  }\n\n  child() {\n    return new Processor(this.opts, this.hasWalkedCache)\n  }\n\n  // return a new Processor containing the subwalks for each\n  // child entry, and a set of matches, and\n  // a hasWalkedCache that's a copy of this one\n  // then we're going to call\n  filterEntries(parent: Path, entries: Path[]): Processor {\n    const patterns = this.subwalks.get(parent)\n    // put matches and entry walks into the results processor\n    const results = this.child()\n    for (const e of entries) {\n      for (const pattern of patterns) {\n        const absolute = pattern.isAbsolute()\n        const p = pattern.pattern()\n        const rest = pattern.rest()\n        if (p === GLOBSTAR) {\n          results.testGlobstar(e, pattern, rest, absolute)\n        } else if (p instanceof RegExp) {\n          results.testRegExp(e, p, rest, absolute)\n        } else {\n          results.testString(e, p, rest, absolute)\n        }\n      }\n    }\n    return results\n  }\n\n  testGlobstar(\n    e: Path,\n    pattern: Pattern,\n    rest: Pattern | null,\n    absolute: boolean,\n  ) {\n    if (this.dot || !e.name.startsWith('.')) {\n      if (!pattern.hasMore()) {\n        this.matches.add(e, absolute, false)\n      }\n      if (e.canReaddir()) {\n        // if we're in follow mode or it's not a symlink, just keep\n        // testing the same pattern. If there's more after the globstar,\n        // then this symlink consumes the globstar. If not, then we can\n        // follow at most ONE symlink along the way, so we mark it, which\n        // also checks to ensure that it wasn't already marked.\n        if (this.follow || !e.isSymbolicLink()) {\n          this.subwalks.add(e, pattern)\n        } else if (e.isSymbolicLink()) {\n          if (rest && pattern.checkFollowGlobstar()) {\n            this.subwalks.add(e, rest)\n          } else if (pattern.markFollowGlobstar()) {\n            this.subwalks.add(e, pattern)\n          }\n        }\n      }\n    }\n    // if the NEXT thing matches this entry, then also add\n    // the rest.\n    if (rest) {\n      const rp = rest.pattern()\n      if (\n        typeof rp === 'string' &&\n        // dots and empty were handled already\n        rp !== '..' &&\n        rp !== '' &&\n        rp !== '.'\n      ) {\n        this.testString(e, rp, rest.rest(), absolute)\n      } else if (rp === '..') {\n        /* c8 ignore start */\n        const ep = e.parent || e\n        /* c8 ignore stop */\n        this.subwalks.add(ep, rest)\n      } else if (rp instanceof RegExp) {\n        this.testRegExp(e, rp, rest.rest(), absolute)\n      }\n    }\n  }\n\n  testRegExp(\n    e: Path,\n    p: MMRegExp,\n    rest: Pattern | null,\n    absolute: boolean,\n  ) {\n    if (!p.test(e.name)) return\n    if (!rest) {\n      this.matches.add(e, absolute, false)\n    } else {\n      this.subwalks.add(e, rest)\n    }\n  }\n\n  testString(e: Path, p: string, rest: Pattern | null, absolute: boolean) {\n    // should never happen?\n    if (!e.isNamed(p)) return\n    if (!rest) {\n      this.matches.add(e, absolute, false)\n    } else {\n      this.subwalks.add(e, rest)\n    }\n  }\n}\n", "/**\n * Single-use utility classes to provide functionality to the {@link Glob}\n * methods.\n *\n * @module\n */\nimport { Minipass } from 'minipass'\nimport { Path } from 'path-scurry'\nimport { Ignore, IgnoreLike } from './ignore.js'\n\n// XXX can we somehow make it so that it NEVER processes a given path more than\n// once, enough that the match set tracking is no longer needed?  that'd speed\n// things up a lot.  Or maybe bring back nounique, and skip it in that case?\n\n// a single minimatch set entry with 1 or more parts\nimport { Pattern } from './pattern.js'\nimport { Processor } from './processor.js'\n\nexport interface GlobWalkerOpts {\n  absolute?: boolean\n  allowWindowsEscape?: boolean\n  cwd?: string | URL\n  dot?: boolean\n  dotRelative?: boolean\n  follow?: boolean\n  ignore?: string | string[] | IgnoreLike\n  mark?: boolean\n  matchBase?: boolean\n  // Note: maxDepth here means \"maximum actual Path.depth()\",\n  // not \"maximum depth beyond cwd\"\n  maxDepth?: number\n  nobrace?: boolean\n  nocase?: boolean\n  nodir?: boolean\n  noext?: boolean\n  noglobstar?: boolean\n  platform?: NodeJS.Platform\n  posix?: boolean\n  realpath?: boolean\n  root?: string\n  stat?: boolean\n  signal?: AbortSignal\n  windowsPathsNoEscape?: boolean\n  withFileTypes?: boolean\n  includeChildMatches?: boolean\n}\n\nexport type GWOFileTypesTrue = GlobWalkerOpts & {\n  withFileTypes: true\n}\nexport type GWOFileTypesFalse = GlobWalkerOpts & {\n  withFileTypes: false\n}\nexport type GWOFileTypesUnset = GlobWalkerOpts & {\n  withFileTypes?: undefined\n}\n\nexport type Result =\n  O extends GWOFileTypesTrue ? Path\n  : O extends GWOFileTypesFalse ? string\n  : O extends GWOFileTypesUnset ? string\n  : Path | string\n\nexport type Matches =\n  O extends GWOFileTypesTrue ? Set\n  : O extends GWOFileTypesFalse ? Set\n  : O extends GWOFileTypesUnset ? Set\n  : Set\n\nexport type MatchStream = Minipass<\n  Result,\n  Result\n>\n\nconst makeIgnore = (\n  ignore: string | string[] | IgnoreLike,\n  opts: GlobWalkerOpts,\n): IgnoreLike =>\n  typeof ignore === 'string' ? new Ignore([ignore], opts)\n  : Array.isArray(ignore) ? new Ignore(ignore, opts)\n  : ignore\n\n/**\n * basic walking utilities that all the glob walker types use\n */\nexport abstract class GlobUtil {\n  path: Path\n  patterns: Pattern[]\n  opts: O\n  seen: Set = new Set()\n  paused: boolean = false\n  aborted: boolean = false\n  #onResume: (() => any)[] = []\n  #ignore?: IgnoreLike\n  #sep: '\\\\' | '/'\n  signal?: AbortSignal\n  maxDepth: number\n  includeChildMatches: boolean\n\n  constructor(patterns: Pattern[], path: Path, opts: O)\n  constructor(patterns: Pattern[], path: Path, opts: O) {\n    this.patterns = patterns\n    this.path = path\n    this.opts = opts\n    this.#sep = !opts.posix && opts.platform === 'win32' ? '\\\\' : '/'\n    this.includeChildMatches = opts.includeChildMatches !== false\n    if (opts.ignore || !this.includeChildMatches) {\n      this.#ignore = makeIgnore(opts.ignore ?? [], opts)\n      if (\n        !this.includeChildMatches &&\n        typeof this.#ignore.add !== 'function'\n      ) {\n        const m = 'cannot ignore child matches, ignore lacks add() method.'\n        throw new Error(m)\n      }\n    }\n    // ignore, always set with maxDepth, but it's optional on the\n    // GlobOptions type\n    /* c8 ignore start */\n    this.maxDepth = opts.maxDepth || Infinity\n    /* c8 ignore stop */\n    if (opts.signal) {\n      this.signal = opts.signal\n      this.signal.addEventListener('abort', () => {\n        this.#onResume.length = 0\n      })\n    }\n  }\n\n  #ignored(path: Path): boolean {\n    return this.seen.has(path) || !!this.#ignore?.ignored?.(path)\n  }\n  #childrenIgnored(path: Path): boolean {\n    return !!this.#ignore?.childrenIgnored?.(path)\n  }\n\n  // backpressure mechanism\n  pause() {\n    this.paused = true\n  }\n  resume() {\n    /* c8 ignore start */\n    if (this.signal?.aborted) return\n    /* c8 ignore stop */\n    this.paused = false\n    let fn: (() => any) | undefined = undefined\n    while (!this.paused && (fn = this.#onResume.shift())) {\n      fn()\n    }\n  }\n  onResume(fn: () => any) {\n    if (this.signal?.aborted) return\n    /* c8 ignore start */\n    if (!this.paused) {\n      fn()\n    } else {\n      /* c8 ignore stop */\n      this.#onResume.push(fn)\n    }\n  }\n\n  // do the requisite realpath/stat checking, and return the path\n  // to add or undefined to filter it out.\n  async matchCheck(e: Path, ifDir: boolean): Promise {\n    if (ifDir && this.opts.nodir) return undefined\n    let rpc: Path | undefined\n    if (this.opts.realpath) {\n      rpc = e.realpathCached() || (await e.realpath())\n      if (!rpc) return undefined\n      e = rpc\n    }\n    const needStat = e.isUnknown() || this.opts.stat\n    const s = needStat ? await e.lstat() : e\n    if (this.opts.follow && this.opts.nodir && s?.isSymbolicLink()) {\n      const target = await s.realpath()\n      /* c8 ignore start */\n      if (target && (target.isUnknown() || this.opts.stat)) {\n        await target.lstat()\n      }\n      /* c8 ignore stop */\n    }\n    return this.matchCheckTest(s, ifDir)\n  }\n\n  matchCheckTest(e: Path | undefined, ifDir: boolean): Path | undefined {\n    return (\n        e &&\n          (this.maxDepth === Infinity || e.depth() <= this.maxDepth) &&\n          (!ifDir || e.canReaddir()) &&\n          (!this.opts.nodir || !e.isDirectory()) &&\n          (!this.opts.nodir ||\n            !this.opts.follow ||\n            !e.isSymbolicLink() ||\n            !e.realpathCached()?.isDirectory()) &&\n          !this.#ignored(e)\n      ) ?\n        e\n      : undefined\n  }\n\n  matchCheckSync(e: Path, ifDir: boolean): Path | undefined {\n    if (ifDir && this.opts.nodir) return undefined\n    let rpc: Path | undefined\n    if (this.opts.realpath) {\n      rpc = e.realpathCached() || e.realpathSync()\n      if (!rpc) return undefined\n      e = rpc\n    }\n    const needStat = e.isUnknown() || this.opts.stat\n    const s = needStat ? e.lstatSync() : e\n    if (this.opts.follow && this.opts.nodir && s?.isSymbolicLink()) {\n      const target = s.realpathSync()\n      if (target && (target?.isUnknown() || this.opts.stat)) {\n        target.lstatSync()\n      }\n    }\n    return this.matchCheckTest(s, ifDir)\n  }\n\n  abstract matchEmit(p: Result): void\n  abstract matchEmit(p: string | Path): void\n\n  matchFinish(e: Path, absolute: boolean) {\n    if (this.#ignored(e)) return\n    // we know we have an ignore if this is false, but TS doesn't\n    if (!this.includeChildMatches && this.#ignore?.add) {\n      const ign = `${e.relativePosix()}/**`\n      this.#ignore.add(ign)\n    }\n    const abs =\n      this.opts.absolute === undefined ? absolute : this.opts.absolute\n    this.seen.add(e)\n    const mark = this.opts.mark && e.isDirectory() ? this.#sep : ''\n    // ok, we have what we need!\n    if (this.opts.withFileTypes) {\n      this.matchEmit(e)\n    } else if (abs) {\n      const abs = this.opts.posix ? e.fullpathPosix() : e.fullpath()\n      this.matchEmit(abs + mark)\n    } else {\n      const rel = this.opts.posix ? e.relativePosix() : e.relative()\n      const pre =\n        this.opts.dotRelative && !rel.startsWith('..' + this.#sep) ?\n          '.' + this.#sep\n        : ''\n      this.matchEmit(!rel ? '.' + mark : pre + rel + mark)\n    }\n  }\n\n  async match(e: Path, absolute: boolean, ifDir: boolean): Promise {\n    const p = await this.matchCheck(e, ifDir)\n    if (p) this.matchFinish(p, absolute)\n  }\n\n  matchSync(e: Path, absolute: boolean, ifDir: boolean): void {\n    const p = this.matchCheckSync(e, ifDir)\n    if (p) this.matchFinish(p, absolute)\n  }\n\n  walkCB(target: Path, patterns: Pattern[], cb: () => any) {\n    /* c8 ignore start */\n    if (this.signal?.aborted) cb()\n    /* c8 ignore stop */\n    this.walkCB2(target, patterns, new Processor(this.opts), cb)\n  }\n\n  walkCB2(\n    target: Path,\n    patterns: Pattern[],\n    processor: Processor,\n    cb: () => any,\n  ) {\n    if (this.#childrenIgnored(target)) return cb()\n    if (this.signal?.aborted) cb()\n    if (this.paused) {\n      this.onResume(() => this.walkCB2(target, patterns, processor, cb))\n      return\n    }\n    processor.processPatterns(target, patterns)\n\n    // done processing.  all of the above is sync, can be abstracted out.\n    // subwalks is a map of paths to the entry filters they need\n    // matches is a map of paths to [absolute, ifDir] tuples.\n    let tasks = 1\n    const next = () => {\n      if (--tasks === 0) cb()\n    }\n\n    for (const [m, absolute, ifDir] of processor.matches.entries()) {\n      if (this.#ignored(m)) continue\n      tasks++\n      this.match(m, absolute, ifDir).then(() => next())\n    }\n\n    for (const t of processor.subwalkTargets()) {\n      if (this.maxDepth !== Infinity && t.depth() >= this.maxDepth) {\n        continue\n      }\n      tasks++\n      const childrenCached = t.readdirCached()\n      if (t.calledReaddir())\n        this.walkCB3(t, childrenCached, processor, next)\n      else {\n        t.readdirCB(\n          (_, entries) => this.walkCB3(t, entries, processor, next),\n          true,\n        )\n      }\n    }\n\n    next()\n  }\n\n  walkCB3(\n    target: Path,\n    entries: Path[],\n    processor: Processor,\n    cb: () => any,\n  ) {\n    processor = processor.filterEntries(target, entries)\n\n    let tasks = 1\n    const next = () => {\n      if (--tasks === 0) cb()\n    }\n\n    for (const [m, absolute, ifDir] of processor.matches.entries()) {\n      if (this.#ignored(m)) continue\n      tasks++\n      this.match(m, absolute, ifDir).then(() => next())\n    }\n    for (const [target, patterns] of processor.subwalks.entries()) {\n      tasks++\n      this.walkCB2(target, patterns, processor.child(), next)\n    }\n\n    next()\n  }\n\n  walkCBSync(target: Path, patterns: Pattern[], cb: () => any) {\n    /* c8 ignore start */\n    if (this.signal?.aborted) cb()\n    /* c8 ignore stop */\n    this.walkCB2Sync(target, patterns, new Processor(this.opts), cb)\n  }\n\n  walkCB2Sync(\n    target: Path,\n    patterns: Pattern[],\n    processor: Processor,\n    cb: () => any,\n  ) {\n    if (this.#childrenIgnored(target)) return cb()\n    if (this.signal?.aborted) cb()\n    if (this.paused) {\n      this.onResume(() =>\n        this.walkCB2Sync(target, patterns, processor, cb),\n      )\n      return\n    }\n    processor.processPatterns(target, patterns)\n\n    // done processing.  all of the above is sync, can be abstracted out.\n    // subwalks is a map of paths to the entry filters they need\n    // matches is a map of paths to [absolute, ifDir] tuples.\n    let tasks = 1\n    const next = () => {\n      if (--tasks === 0) cb()\n    }\n\n    for (const [m, absolute, ifDir] of processor.matches.entries()) {\n      if (this.#ignored(m)) continue\n      this.matchSync(m, absolute, ifDir)\n    }\n\n    for (const t of processor.subwalkTargets()) {\n      if (this.maxDepth !== Infinity && t.depth() >= this.maxDepth) {\n        continue\n      }\n      tasks++\n      const children = t.readdirSync()\n      this.walkCB3Sync(t, children, processor, next)\n    }\n\n    next()\n  }\n\n  walkCB3Sync(\n    target: Path,\n    entries: Path[],\n    processor: Processor,\n    cb: () => any,\n  ) {\n    processor = processor.filterEntries(target, entries)\n\n    let tasks = 1\n    const next = () => {\n      if (--tasks === 0) cb()\n    }\n\n    for (const [m, absolute, ifDir] of processor.matches.entries()) {\n      if (this.#ignored(m)) continue\n      this.matchSync(m, absolute, ifDir)\n    }\n    for (const [target, patterns] of processor.subwalks.entries()) {\n      tasks++\n      this.walkCB2Sync(target, patterns, processor.child(), next)\n    }\n\n    next()\n  }\n}\n\nexport class GlobWalker<\n  O extends GlobWalkerOpts = GlobWalkerOpts,\n> extends GlobUtil {\n  matches = new Set>()\n\n  constructor(patterns: Pattern[], path: Path, opts: O) {\n    super(patterns, path, opts)\n  }\n\n  matchEmit(e: Result): void {\n    this.matches.add(e)\n  }\n\n  async walk(): Promise>> {\n    if (this.signal?.aborted) throw this.signal.reason\n    if (this.path.isUnknown()) {\n      await this.path.lstat()\n    }\n    await new Promise((res, rej) => {\n      this.walkCB(this.path, this.patterns, () => {\n        if (this.signal?.aborted) {\n          rej(this.signal.reason)\n        } else {\n          res(this.matches)\n        }\n      })\n    })\n    return this.matches\n  }\n\n  walkSync(): Set> {\n    if (this.signal?.aborted) throw this.signal.reason\n    if (this.path.isUnknown()) {\n      this.path.lstatSync()\n    }\n    // nothing for the callback to do, because this never pauses\n    this.walkCBSync(this.path, this.patterns, () => {\n      if (this.signal?.aborted) throw this.signal.reason\n    })\n    return this.matches\n  }\n}\n\nexport class GlobStream<\n  O extends GlobWalkerOpts = GlobWalkerOpts,\n> extends GlobUtil {\n  results: Minipass, Result>\n\n  constructor(patterns: Pattern[], path: Path, opts: O) {\n    super(patterns, path, opts)\n    this.results = new Minipass, Result>({\n      signal: this.signal,\n      objectMode: true,\n    })\n    this.results.on('drain', () => this.resume())\n    this.results.on('resume', () => this.resume())\n  }\n\n  matchEmit(e: Result): void {\n    this.results.write(e)\n    if (!this.results.flowing) this.pause()\n  }\n\n  stream(): MatchStream {\n    const target = this.path\n    if (target.isUnknown()) {\n      target.lstat().then(() => {\n        this.walkCB(target, this.patterns, () => this.results.end())\n      })\n    } else {\n      this.walkCB(target, this.patterns, () => this.results.end())\n    }\n    return this.results\n  }\n\n  streamSync(): MatchStream {\n    if (this.path.isUnknown()) {\n      this.path.lstatSync()\n    }\n    this.walkCBSync(this.path, this.patterns, () => this.results.end())\n    return this.results\n  }\n}\n", "import { Minimatch } from 'minimatch'\nimport { GlobOptions } from './glob.js'\n\n/**\n * Return true if the patterns provided contain any magic glob characters,\n * given the options provided.\n *\n * Brace expansion is not considered \"magic\" unless the `magicalBraces` option\n * is set, as brace expansion just turns one string into an array of strings.\n * So a pattern like `'x{a,b}y'` would return `false`, because `'xay'` and\n * `'xby'` both do not contain any magic glob characters, and it's treated the\n * same as if you had called it on `['xay', 'xby']`. When `magicalBraces:true`\n * is in the options, brace expansion _is_ treated as a pattern having magic.\n */\nexport const hasMagic = (\n  pattern: string | string[],\n  options: GlobOptions = {},\n): boolean => {\n  if (!Array.isArray(pattern)) {\n    pattern = [pattern]\n  }\n  for (const p of pattern) {\n    if (new Minimatch(p, options).hasMagic()) return true\n  }\n  return false\n}\n", "import { escape, unescape } from 'minimatch'\nimport { Minipass } from 'minipass'\nimport { Path } from 'path-scurry'\nimport type {\n  GlobOptions,\n  GlobOptionsWithFileTypesFalse,\n  GlobOptionsWithFileTypesTrue,\n  GlobOptionsWithFileTypesUnset,\n} from './glob.js'\nimport { Glob } from './glob.js'\nimport { hasMagic } from './has-magic.js'\n\nexport { escape, unescape } from 'minimatch'\nexport type {\n  FSOption,\n  Path,\n  WalkOptions,\n  WalkOptionsWithFileTypesTrue,\n  WalkOptionsWithFileTypesUnset,\n} from 'path-scurry'\nexport { Glob } from './glob.js'\nexport type {\n  GlobOptions,\n  GlobOptionsWithFileTypesFalse,\n  GlobOptionsWithFileTypesTrue,\n  GlobOptionsWithFileTypesUnset,\n} from './glob.js'\nexport { hasMagic } from './has-magic.js'\nexport { Ignore } from './ignore.js'\nexport type { IgnoreLike } from './ignore.js'\nexport type { MatchStream } from './walker.js'\n\n/**\n * Syncronous form of {@link globStream}. Will read all the matches as fast as\n * you consume them, even all in a single tick if you consume them immediately,\n * but will still respond to backpressure if they're not consumed immediately.\n */\nexport function globStreamSync(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesTrue,\n): Minipass\nexport function globStreamSync(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesFalse,\n): Minipass\nexport function globStreamSync(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesUnset,\n): Minipass\nexport function globStreamSync(\n  pattern: string | string[],\n  options: GlobOptions,\n): Minipass | Minipass\nexport function globStreamSync(\n  pattern: string | string[],\n  options: GlobOptions = {},\n) {\n  return new Glob(pattern, options).streamSync()\n}\n\n/**\n * Return a stream that emits all the strings or `Path` objects and\n * then emits `end` when completed.\n */\nexport function globStream(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesFalse,\n): Minipass\nexport function globStream(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesTrue,\n): Minipass\nexport function globStream(\n  pattern: string | string[],\n  options?: GlobOptionsWithFileTypesUnset | undefined,\n): Minipass\nexport function globStream(\n  pattern: string | string[],\n  options: GlobOptions,\n): Minipass | Minipass\nexport function globStream(\n  pattern: string | string[],\n  options: GlobOptions = {},\n) {\n  return new Glob(pattern, options).stream()\n}\n\n/**\n * Synchronous form of {@link glob}\n */\nexport function globSync(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesFalse,\n): string[]\nexport function globSync(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesTrue,\n): Path[]\nexport function globSync(\n  pattern: string | string[],\n  options?: GlobOptionsWithFileTypesUnset | undefined,\n): string[]\nexport function globSync(\n  pattern: string | string[],\n  options: GlobOptions,\n): Path[] | string[]\nexport function globSync(\n  pattern: string | string[],\n  options: GlobOptions = {},\n) {\n  return new Glob(pattern, options).walkSync()\n}\n\n/**\n * Perform an asynchronous glob search for the pattern(s) specified. Returns\n * [Path](https://isaacs.github.io/path-scurry/classes/PathBase) objects if the\n * {@link withFileTypes} option is set to `true`. See {@link GlobOptions} for\n * full option descriptions.\n */\nasync function glob_(\n  pattern: string | string[],\n  options?: GlobOptionsWithFileTypesUnset | undefined,\n): Promise\nasync function glob_(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesTrue,\n): Promise\nasync function glob_(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesFalse,\n): Promise\nasync function glob_(\n  pattern: string | string[],\n  options: GlobOptions,\n): Promise\nasync function glob_(\n  pattern: string | string[],\n  options: GlobOptions = {},\n) {\n  return new Glob(pattern, options).walk()\n}\n\n/**\n * Return a sync iterator for walking glob pattern matches.\n */\nexport function globIterateSync(\n  pattern: string | string[],\n  options?: GlobOptionsWithFileTypesUnset | undefined,\n): Generator\nexport function globIterateSync(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesTrue,\n): Generator\nexport function globIterateSync(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesFalse,\n): Generator\nexport function globIterateSync(\n  pattern: string | string[],\n  options: GlobOptions,\n): Generator | Generator\nexport function globIterateSync(\n  pattern: string | string[],\n  options: GlobOptions = {},\n) {\n  return new Glob(pattern, options).iterateSync()\n}\n\n/**\n * Return an async iterator for walking glob pattern matches.\n */\nexport function globIterate(\n  pattern: string | string[],\n  options?: GlobOptionsWithFileTypesUnset | undefined,\n): AsyncGenerator\nexport function globIterate(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesTrue,\n): AsyncGenerator\nexport function globIterate(\n  pattern: string | string[],\n  options: GlobOptionsWithFileTypesFalse,\n): AsyncGenerator\nexport function globIterate(\n  pattern: string | string[],\n  options: GlobOptions,\n): AsyncGenerator | AsyncGenerator\nexport function globIterate(\n  pattern: string | string[],\n  options: GlobOptions = {},\n) {\n  return new Glob(pattern, options).iterate()\n}\n\n// aliases: glob.sync.stream() glob.stream.sync() glob.sync() etc\nexport const streamSync = globStreamSync\nexport const stream = Object.assign(globStream, { sync: globStreamSync })\nexport const iterateSync = globIterateSync\nexport const iterate = Object.assign(globIterate, {\n  sync: globIterateSync,\n})\nexport const sync = Object.assign(globSync, {\n  stream: globStreamSync,\n  iterate: globIterateSync,\n})\n\nexport const glob = Object.assign(glob_, {\n  glob: glob_,\n  globSync,\n  sync,\n  globStream,\n  stream,\n  globStreamSync,\n  streamSync,\n  globIterate,\n  iterate,\n  globIterateSync,\n  iterateSync,\n  Glob,\n  hasMagic,\n  escape,\n  unescape,\n})\nglob.glob = glob\n", "import { execFileSync } from 'child_process';\nimport { existsSync, readdirSync } from 'fs';\nimport { join, resolve } from 'path';\n\nimport type { ValidatedCdsCommand } from './types';\nimport { fileExists } from '../../filesystem';\nimport { cdsExtractorLog } from '../../logging';\nimport type { CdsDependencyGraph } from '../parser/types';\n\n/** Default timeout for command execution in milliseconds. **/\nexport const DEFAULT_COMMAND_TIMEOUT_MS = 10000;\n\n/**\n * Cache for CDS command test results to avoid running the same CLI commands repeatedly.\n */\ninterface CdsCommandCache {\n  /** Map of command strings to their test results */\n  commandResults: Map;\n  /** Available cache directories discovered during testing */\n  availableCacheDirs: string[];\n  /** Global command test results */\n  globalCommand?: string;\n  /** Whether cache has been initialized */\n  initialized: boolean;\n}\n\n// Global cache instance to share results across all calls\nconst cdsCommandCache: CdsCommandCache = {\n  commandResults: new Map(),\n  availableCacheDirs: [],\n  initialized: false,\n};\n\n/**\n * Information about CDS version dependencies from a project's package.json\n */\ninterface CdsVersionInfo {\n  /** Semver range for @sap/cds */\n  cdsVersion?: string;\n  /** Semver range for @sap/cds-dk */\n  cdsDkVersion?: string;\n  /** Computed compatible @sap/cds-dk version */\n  preferredDkVersion?: string;\n}\n\n/**\n * Factory functions to create {@link ValidatedCdsCommand} instances.\n */\nconst createCdsCommands = {\n  // Global CDS command\n  cds: (): ValidatedCdsCommand => ({\n    executable: 'cds',\n    args: [],\n    originalCommand: 'cds',\n  }),\n  // NPX with @sap/cds package\n  npxCds: (): ValidatedCdsCommand => ({\n    executable: 'npx',\n    args: ['--yes', '--package', '@sap/cds', 'cds'],\n    originalCommand: 'npx --yes --package @sap/cds cds',\n  }),\n  // NPX with @sap/cds-dk package\n  npxCdsDk: (): ValidatedCdsCommand => ({\n    executable: 'npx',\n    args: ['--yes', '--package', '@sap/cds-dk', 'cds'],\n    originalCommand: 'npx --yes --package @sap/cds-dk cds',\n  }),\n  // NPX with @sap/cds-dk package (alternative flag)\n  npxCdsDkAlt: (): ValidatedCdsCommand => ({\n    executable: 'npx',\n    args: ['--yes', '@sap/cds-dk', 'cds'],\n    originalCommand: 'npx --yes @sap/cds-dk cds',\n  }),\n  // NPX with versioned @sap/cds-dk package\n  npxCdsDkWithVersion: (version: string): ValidatedCdsCommand => ({\n    executable: 'npx',\n    args: ['--yes', '--package', `@sap/cds-dk@${version}`, 'cds'],\n    originalCommand: `npx --yes --package @sap/cds-dk@${version} cds`,\n  }),\n  // NPX with versioned @sap/cds package\n  npxCdsWithVersion: (version: string): ValidatedCdsCommand => ({\n    executable: 'npx',\n    args: ['--yes', '--package', `@sap/cds@${version}`, 'cds'],\n    originalCommand: `npx --yes --package @sap/cds@${version} cds`,\n  }),\n};\n\n/**\n * Converts a command string to a ValidatedCdsCommand object\n * @param commandString The command string to convert\n * @returns A ValidatedCdsCommand object\n */\nfunction parseCommandString(commandString: string): ValidatedCdsCommand {\n  const parts = commandString.trim().split(/\\s+/);\n  if (parts.length === 0) {\n    throw new Error('Empty command string');\n  }\n\n  const executable = parts[0];\n  const args = parts.slice(1);\n\n  return {\n    executable,\n    args,\n    originalCommand: commandString,\n  };\n}\n\n/**\n * Determines version-aware CDS commands for both primary and retry scenarios\n * @param cacheDir Optional cache directory\n * @param sourceRoot Source root directory\n * @param projectPath Project path for version resolution\n * @param dependencyGraph Dependency graph for version information\n * @returns Object containing both primary and retry commands\n */\nexport function determineVersionAwareCdsCommands(\n  cacheDir: string | undefined,\n  sourceRoot: string,\n  projectPath?: string,\n  dependencyGraph?: CdsDependencyGraph,\n): { primaryCommand: ValidatedCdsCommand; retryCommand: ValidatedCdsCommand } {\n  try {\n    // Get the best command string using existing logic\n    const commandString = getBestCdsCommand(cacheDir, sourceRoot, projectPath, dependencyGraph);\n\n    // Convert to ValidatedCdsCommand for primary use\n    const primaryCommand = parseCommandString(commandString);\n\n    // For retry command, always try to use a version-aware npx command if project context is available\n    let retryCommand: ValidatedCdsCommand;\n\n    if (projectPath && dependencyGraph) {\n      try {\n        const versionInfo = resolveCdsVersions(projectPath, dependencyGraph);\n        if (versionInfo?.preferredDkVersion) {\n          // Use version-specific command for retry\n          retryCommand = createCdsCommands.npxCdsDkWithVersion(versionInfo.preferredDkVersion);\n        } else if (versionInfo?.cdsDkVersion) {\n          // Use explicit cds-dk version\n          retryCommand = createCdsCommands.npxCdsDkWithVersion(versionInfo.cdsDkVersion);\n        } else {\n          // Fall back to generic npx cds-dk\n          retryCommand = createCdsCommands.npxCdsDk();\n        }\n      } catch (error) {\n        // If version resolution fails, fall back to generic npx\n        cdsExtractorLog(\n          'warn',\n          `Failed to resolve version info for ${projectPath}: ${String(error)}`,\n        );\n        retryCommand = createCdsCommands.npxCdsDk();\n      }\n    } else {\n      // No project context - use generic npx as fallback\n      retryCommand = createCdsCommands.npxCdsDk();\n    }\n\n    return { primaryCommand, retryCommand };\n  } catch (error) {\n    // If anything fails, fall back to simple commands\n    cdsExtractorLog('error', `Failed to determine version-aware commands: ${String(error)}`);\n    const fallbackCommand = parseCommandString('cds');\n    return {\n      primaryCommand: fallbackCommand,\n      retryCommand: createCdsCommands.npxCdsDk(),\n    };\n  }\n} /**\n * Creates a validated CDS command for an absolute path to a CDS executable.\n * @param absolutePath The absolute path to the CDS executable\n * @returns A {@link ValidatedCdsCommand} if the path exists and is valid, null otherwise\n */\nfunction createCdsCommandForPath(absolutePath: string): ValidatedCdsCommand | null {\n  try {\n    const resolvedPath = resolve(absolutePath);\n    if (resolvedPath && fileExists(resolvedPath)) {\n      return {\n        executable: resolvedPath,\n        args: [],\n        originalCommand: absolutePath,\n      };\n    }\n  } catch {\n    // Ignore path resolution errors\n  }\n  return null;\n}\n\n/**\n * Resolve CDS version information from a project's package.json via dependency graph\n * @param projectPath The path to the project\n * @param dependencyGraph The CDS dependency graph containing project information\n * @returns CDS version information or undefined if not available\n */\nfunction resolveCdsVersions(\n  projectPath: string,\n  dependencyGraph: CdsDependencyGraph,\n): CdsVersionInfo | undefined {\n  const project = dependencyGraph.projects.get(projectPath);\n  if (!project?.packageJson) {\n    return undefined;\n  }\n\n  const { dependencies = {}, devDependencies = {} } = project.packageJson;\n  const allDependencies = { ...dependencies, ...devDependencies };\n\n  const cdsVersion = allDependencies['@sap/cds'];\n  const cdsDkVersion = allDependencies['@sap/cds-dk'];\n\n  if (!cdsVersion && !cdsDkVersion) {\n    return undefined;\n  }\n\n  let preferredDkVersion: string | undefined;\n  if (cdsDkVersion) {\n    // Use explicit @sap/cds-dk version if available, but enforce minimum\n    preferredDkVersion = enforceMinimumCdsDkVersion(cdsDkVersion);\n  } else if (cdsVersion) {\n    // Derive compatible @sap/cds-dk version from @sap/cds version\n    preferredDkVersion = deriveCompatibleCdsDkVersion(cdsVersion);\n  }\n\n  return {\n    cdsVersion,\n    cdsDkVersion,\n    preferredDkVersion,\n  };\n}\n\n/**\n * Enforce minimum @sap/cds-dk version requirement\n * @param version The version string to check\n * @returns The version string with minimum version enforcement applied\n */\nfunction enforceMinimumCdsDkVersion(version: string): string {\n  const minimumVersion = 8;\n  const majorVersionMatch = version.match(/\\^?(\\d+)/);\n\n  if (majorVersionMatch) {\n    const majorVersion = parseInt(majorVersionMatch[1], 10);\n    if (majorVersion < minimumVersion) {\n      // Use the minimum version if derived version is too low\n      return `^${minimumVersion}`;\n    }\n  }\n\n  // Return original version if it meets minimum requirement or can't be parsed\n  return version;\n}\n\n/**\n * Derive a compatible @sap/cds-dk version from an @sap/cds version\n * @param cdsVersion The @sap/cds version semver range\n * @returns A compatible @sap/cds-dk version range with minimum version enforcement\n */\nfunction deriveCompatibleCdsDkVersion(cdsVersion: string): string {\n  // For simplicity, we'll use the same major version range\n  // This can be enhanced with more sophisticated logic as needed\n  const majorVersionMatch = cdsVersion.match(/\\^?(\\d+)/);\n  let derivedVersion: string;\n\n  if (majorVersionMatch) {\n    const majorVersion = majorVersionMatch[1];\n    derivedVersion = `^${majorVersion}`;\n  } else {\n    // Fallback to the original version if we can't parse it\n    derivedVersion = cdsVersion;\n  }\n\n  // Apply minimum version enforcement\n  return enforceMinimumCdsDkVersion(derivedVersion);\n}\n\n/**\n * Create a version-aware CDS command based on project information\n * @param projectPath The path to the project\n * @param dependencyGraph The CDS dependency graph containing project information\n * @returns A ValidatedCdsCommand if version information is available, null otherwise\n */\nfunction createVersionAwareCdsCommand(\n  projectPath: string,\n  dependencyGraph: CdsDependencyGraph,\n): ValidatedCdsCommand | null {\n  const versionInfo = resolveCdsVersions(projectPath, dependencyGraph);\n\n  if (!versionInfo?.preferredDkVersion) {\n    return null;\n  }\n\n  return createCdsCommands.npxCdsDkWithVersion(versionInfo.preferredDkVersion);\n}\n\n/**\n * Determine the `cds` command to use based on the environment and cache directory.\n *\n * This function uses a caching strategy to minimize repeated CLI command testing:\n * - Initializes a global cache on first call\n * - Tests global commands once and caches results\n * - Discovers all available cache directories upfront\n * - Reuses test results across multiple calls\n * - Supports project-specific version-aware command generation\n */\nexport function determineCdsCommand(\n  cacheDir: string | undefined,\n  sourceRoot: string,\n  projectPath?: string,\n  dependencyGraph?: CdsDependencyGraph,\n): string {\n  try {\n    // Always use the efficient path - debug information is collected separately\n    return getBestCdsCommand(cacheDir, sourceRoot, projectPath, dependencyGraph);\n  } catch (error) {\n    const errorMessage = `Failed to determine CDS command: ${String(error)}`;\n    cdsExtractorLog('error', errorMessage);\n    throw new Error(errorMessage);\n  }\n}\n\n/**\n * Discover all available cache directories in the source tree\n * @param sourceRoot The source root directory\n * @returns Array of cache directory paths\n */\nfunction discoverAvailableCacheDirs(sourceRoot: string): string[] {\n  if (cdsCommandCache.availableCacheDirs.length > 0) {\n    return cdsCommandCache.availableCacheDirs;\n  }\n\n  const cacheRootDir = join(sourceRoot, '.cds-extractor-cache');\n  const availableDirs: string[] = [];\n\n  try {\n    if (existsSync(cacheRootDir)) {\n      const entries = readdirSync(cacheRootDir, { withFileTypes: true });\n      for (const entry of entries) {\n        if (entry.isDirectory() && entry.name.startsWith('cds-')) {\n          const cacheDir = join(cacheRootDir, entry.name);\n          const cdsBin = join(cacheDir, 'node_modules', '.bin', 'cds');\n          if (fileExists(cdsBin)) {\n            availableDirs.push(cacheDir);\n          }\n        }\n      }\n    }\n  } catch (error) {\n    cdsExtractorLog('debug', `Failed to discover cache directories: ${String(error)}`);\n  }\n\n  cdsCommandCache.availableCacheDirs = availableDirs;\n  return availableDirs;\n}\n\n/**\n * Get the best CDS command for a specific cache directory\n * @param cacheDir Optional specific cache directory\n * @param sourceRoot The source root directory\n * @param projectPath Optional project path for version-aware commands\n * @param dependencyGraph Optional dependency graph for version information\n * @returns The best CDS command to use\n */\nfunction getBestCdsCommand(\n  cacheDir: string | undefined,\n  sourceRoot: string,\n  projectPath?: string,\n  dependencyGraph?: CdsDependencyGraph,\n): string {\n  // Initialize cache if needed\n  initializeCdsCommandCache(sourceRoot);\n\n  // If a specific cache directory is provided and valid, prefer it\n  if (cacheDir) {\n    const localCdsBin = join(cacheDir, 'node_modules', '.bin', 'cds');\n    const command = createCdsCommandForPath(localCdsBin);\n    if (command) {\n      const result = testCdsCommand(command, sourceRoot, true);\n      if (result.works) {\n        return localCdsBin;\n      }\n    }\n  }\n\n  // Try any available cache directories\n  for (const availableCacheDir of cdsCommandCache.availableCacheDirs) {\n    const localCdsBin = join(availableCacheDir, 'node_modules', '.bin', 'cds');\n    const command = createCdsCommandForPath(localCdsBin);\n    if (command) {\n      const result = testCdsCommand(command, sourceRoot, true);\n      if (result.works) {\n        return localCdsBin;\n      }\n    }\n  }\n\n  // Try project-specific version-aware commands if information is available\n  if (projectPath && dependencyGraph) {\n    const versionAwareCommand = createVersionAwareCdsCommand(projectPath, dependencyGraph);\n    if (versionAwareCommand) {\n      const result = testCdsCommand(versionAwareCommand, sourceRoot, true);\n      if (result.works) {\n        return versionAwareCommand.originalCommand;\n      }\n    }\n  }\n\n  // Fall back to global command\n  if (cdsCommandCache.globalCommand) {\n    return cdsCommandCache.globalCommand;\n  }\n\n  // Final fallback: test remaining npx options\n  const fallbackCommands = [createCdsCommands.npxCds(), createCdsCommands.npxCdsDk()];\n\n  for (const command of fallbackCommands) {\n    const result = testCdsCommand(command, sourceRoot, true);\n    if (result.works) {\n      return command.originalCommand;\n    }\n  }\n\n  // Return the default fallback even if it doesn't work, as tests expect this behavior\n  return createCdsCommands.npxCdsDk().originalCommand;\n}\n\n/**\n * Initialize the CDS command cache by testing global commands\n * @param sourceRoot The source root directory\n */\nfunction initializeCdsCommandCache(sourceRoot: string): void {\n  if (cdsCommandCache.initialized) {\n    return;\n  }\n\n  cdsExtractorLog('info', 'Initializing CDS command cache...');\n\n  // Test global commands first (most commonly used)\n  const globalCommands = [createCdsCommands.cds(), createCdsCommands.npxCdsDk()];\n\n  for (const command of globalCommands) {\n    const result = testCdsCommand(command, sourceRoot, true); // Silent testing\n    if (result.works) {\n      cdsCommandCache.globalCommand = command.originalCommand;\n      cdsExtractorLog(\n        'info',\n        `Found working global CDS command: ${command.originalCommand} (v${result.version ?? 'unknown'})`,\n      );\n      break;\n    }\n  }\n\n  // Discover available cache directories\n  const cacheDirs = discoverAvailableCacheDirs(sourceRoot);\n  if (cacheDirs.length > 0) {\n    cdsExtractorLog(\n      'info',\n      `Discovered ${cacheDirs.length} CDS cache director${cacheDirs.length === 1 ? 'y' : 'ies'}`,\n    );\n  }\n\n  cdsCommandCache.initialized = true;\n}\n\n/**\n * Reset the command cache - primarily for testing\n */\nexport function resetCdsCommandCache(): void {\n  cdsCommandCache.commandResults.clear();\n  cdsCommandCache.availableCacheDirs = [];\n  cdsCommandCache.globalCommand = undefined;\n  cdsCommandCache.initialized = false;\n}\n\n/**\n * Check if a CDS command is available and working.\n * @param validatedCommand The {@link ValidatedCdsCommand} instance for the command to test\n * @param sourceRoot The source root directory to use as cwd when testing the command\n * @param silent Whether to suppress logging of test failures\n * @returns Object with test result and version information\n */\nfunction testCdsCommand(\n  validatedCommand: ValidatedCdsCommand,\n  sourceRoot: string,\n  silent: boolean = false,\n): { works: boolean; version?: string; error?: string } {\n  const cacheKey = validatedCommand.originalCommand;\n\n  // Check cache first\n  const cachedResult = cdsCommandCache.commandResults.get(cacheKey);\n  if (cachedResult) {\n    return cachedResult;\n  }\n\n  try {\n    // Run the validated `cds` command with `--version` to test if it works.\n    const cleanEnv = {\n      ...process.env,\n      // Remove any CodeQL-specific environment variables that might interfere.\n      CODEQL_EXTRACTOR_CDS_WIP_DATABASE: undefined,\n      CODEQL_RUNNER: undefined,\n    };\n\n    const result = execFileSync(\n      validatedCommand.executable,\n      [...validatedCommand.args, '--version'],\n      {\n        encoding: 'utf8',\n        stdio: 'pipe',\n        timeout: DEFAULT_COMMAND_TIMEOUT_MS, // timeout after 10 seconds\n        cwd: sourceRoot,\n        env: cleanEnv,\n      },\n    ).toString();\n\n    // Extract version from output (typically in format \"@sap/cds-dk: 6.1.3\" or just \"6.1.3\")\n    const versionMatch = result.match(/(\\d+\\.\\d+\\.\\d+)/);\n    const version = versionMatch ? versionMatch[1] : undefined;\n\n    const testResult = { works: true, version };\n    cdsCommandCache.commandResults.set(cacheKey, testResult);\n    return testResult;\n  } catch (error) {\n    const errorMessage = String(error);\n    if (!silent) {\n      cdsExtractorLog('debug', `CDS command test failed for '${cacheKey}': ${errorMessage}`);\n    }\n\n    const testResult = { works: false, error: errorMessage };\n    cdsCommandCache.commandResults.set(cacheKey, testResult);\n    return testResult;\n  }\n}\n", "import { existsSync, readdirSync, renameSync, statSync } from 'fs';\nimport { format, join, parse } from 'path';\n\nimport { cdsExtractorLog } from './logging';\n\n/**\n * Check if a directory exists\n * @param dirPath Path to the directory to check\n * @returns True if the directory exists, false otherwise\n */\nexport function dirExists(dirPath: string): boolean {\n  return existsSync(dirPath) && statSync(dirPath).isDirectory();\n}\n\n/**\n * Check if a file exists and can be read\n * @param filePath Path to the file to check\n * @returns True if the file exists and can be read, false otherwise\n */\nexport function fileExists(filePath: string): boolean {\n  return existsSync(filePath) && statSync(filePath).isFile();\n}\n\n/**\n * Recursively renames all .json files to .cds.json in the given directory and\n * its subdirectories, except for those that already have .cds.json extension.\n *\n * @param {string} dirPath - The directory path to start recursion from\n */\nexport function recursivelyRenameJsonFiles(dirPath: string): void {\n  // Make sure the directory exists\n  if (!dirExists(dirPath)) {\n    cdsExtractorLog('info', `Directory not found: ${dirPath}`);\n    return;\n  }\n  cdsExtractorLog('info', `Processing JSON files in directory: ${dirPath}`);\n\n  // Get all entries in the directory\n  const entries = readdirSync(dirPath, { withFileTypes: true });\n\n  for (const entry of entries) {\n    const fullPath = join(dirPath, entry.name);\n\n    if (entry.isDirectory()) {\n      // Recursively process subdirectories\n      recursivelyRenameJsonFiles(fullPath);\n    } else if (\n      entry.isFile() &&\n      entry.name.endsWith('.json') &&\n      !entry.name.endsWith('.cds.json')\n    ) {\n      // Rename .json files to .cds.json\n      const newPath = format({ ...parse(fullPath), base: '', ext: '.cds.json' });\n      renameSync(fullPath, newPath);\n      cdsExtractorLog('info', `Renamed CDS output file from ${fullPath} to ${newPath}`);\n    }\n  }\n}\n", "import type { LogLevel } from './types';\n\n/**\n * Source root directory for logging context.\n */\nlet sourceRootDirectory: string | undefined;\n\n/**\n * Unique session ID for this CDS extractor run to help distinguish\n * between multiple concurrent or sequential runs in logs.\n * Uses the extractor start timestamp for uniqueness.\n */\nconst sessionId = Date.now().toString();\n\n/**\n * Start time of the CDS extractor session for performance tracking.\n */\nconst extractorStartTime = Date.now();\n\n/**\n * Performance tracking state for timing critical operations.\n */\nconst performanceTracking = new Map();\n\n/**\n * Unified logging function for the CDS extractor. Provides consistent\n * log formatting with level prefixes, elapsed time, and session IDs.\n *\n * @param level - The log level ('debug', 'info', 'warn', 'error')\n * @param message - The primary message or data to log\n * @param optionalParams - Additional parameters to log (same as console.log)\n */\nexport function cdsExtractorLog(\n  level: LogLevel,\n  message: unknown,\n  ...optionalParams: unknown[]\n): void {\n  if (!sourceRootDirectory) {\n    throw new Error('Source root directory is not set. Call setSourceRootDirectory() first.');\n  }\n\n  const currentTime = Date.now();\n  const elapsedMs = currentTime - extractorStartTime;\n  const levelPrefix = `[CDS-${sessionId} ${elapsedMs}] ${level.toUpperCase()}: `;\n\n  // Select the appropriate console function based on log level\n  switch (level) {\n    case 'debug':\n    case 'info':\n      if (typeof message === 'string') {\n        console.log(levelPrefix + message, ...optionalParams);\n      } else {\n        console.log(levelPrefix, message, ...optionalParams);\n      }\n      break;\n    case 'warn':\n      if (typeof message === 'string') {\n        console.warn(levelPrefix + message, ...optionalParams);\n      } else {\n        console.warn(levelPrefix, message, ...optionalParams);\n      }\n      break;\n    case 'error':\n      if (typeof message === 'string') {\n        console.error(levelPrefix + message, ...optionalParams);\n      } else {\n        console.error(levelPrefix, message, ...optionalParams);\n      }\n      break;\n    default:\n      // This should never happen due to TypeScript typing\n      throw new Error(`Invalid log level: ${String(level)}`);\n  }\n}\n/**\n * Calculates elapsed time from start and formats it with appropriate units.\n *\n * @param startTime - The start timestamp in milliseconds\n * @param endTime - The end timestamp in milliseconds (defaults to current time)\n * @returns Formatted duration string\n */\nfunction formatDuration(startTime: number, endTime: number = Date.now()): string {\n  const durationMs = endTime - startTime;\n\n  if (durationMs < 1000) {\n    return `${durationMs}ms`;\n  } else if (durationMs < 60000) {\n    return `${(durationMs / 1000).toFixed(2)}s`;\n  } else {\n    const minutes = Math.floor(durationMs / 60000);\n    const seconds = ((durationMs % 60000) / 1000).toFixed(2);\n    return `${minutes}m ${seconds}s`;\n  }\n}\n\n/**\n * Logs the start of the CDS extractor session with session information.\n *\n * @param sourceRoot - The source root directory being processed\n */\nexport function logExtractorStart(sourceRoot: string): void {\n  cdsExtractorLog('info', `=== CDS EXTRACTOR START [${sessionId}] ===`);\n  cdsExtractorLog('info', `Source Root: ${sourceRoot}`);\n}\n\n/**\n * Logs the end of the CDS extractor session with final performance summary.\n *\n * @param success - Whether the extraction completed successfully\n * @param additionalSummary - Optional additional summary information\n */\nexport function logExtractorStop(success: boolean = true, additionalSummary?: string): void {\n  const endTime = Date.now();\n  const totalDuration = formatDuration(extractorStartTime, endTime);\n  const status = success ? 'SUCCESS' : 'FAILURE';\n\n  if (additionalSummary) {\n    cdsExtractorLog('info', additionalSummary);\n  }\n\n  cdsExtractorLog('info', `=== CDS EXTRACTOR END [${sessionId}] - ${status} ===`);\n  cdsExtractorLog('info', `Total Duration: ${totalDuration}`);\n}\n\n/**\n * Logs a performance milestone with timing information.\n *\n * @param milestone - Description of the milestone reached\n * @param additionalInfo - Optional additional information to include\n */\nexport function logPerformanceMilestone(milestone: string, additionalInfo?: string): void {\n  const currentTime = Date.now();\n  const overallDuration = formatDuration(extractorStartTime, currentTime);\n  const info = additionalInfo ? ` - ${additionalInfo}` : '';\n  cdsExtractorLog('info', `MILESTONE: ${milestone} (after ${overallDuration})${info}`);\n}\n\n/**\n * Starts tracking performance for a named operation.\n *\n * @param operationName - Name of the operation to track\n */\nexport function logPerformanceTrackingStart(operationName: string): void {\n  performanceTracking.set(operationName, Date.now());\n  cdsExtractorLog('debug', `Started: ${operationName}`);\n}\n\n/**\n * Ends tracking performance for a named operation and logs the duration.\n *\n * @param operationName - Name of the operation to stop tracking\n */\nexport function logPerformanceTrackingStop(operationName: string): void {\n  const startTime = performanceTracking.get(operationName);\n  if (startTime) {\n    const duration = formatDuration(startTime);\n    performanceTracking.delete(operationName);\n    cdsExtractorLog('info', `Completed: ${operationName} (took ${duration})`);\n  } else {\n    cdsExtractorLog('warn', `No start time found for operation: ${operationName}`);\n  }\n}\n\n/**\n * Sets the source root directory for logging context.\n * This should typically be called once at the start of the CDS extractor.\n *\n * @param sourceRoot - The absolute path to the source root directory\n */\nexport function setSourceRootDirectory(sourceRoot: string): void {\n  sourceRootDirectory = sourceRoot;\n}\n", "import type { CdsDependencyGraph } from '../cds/parser';\n\n/**\n * Generate a comprehensive status report for the dependency graph\n * Supports both normal execution and debug modes\n */\nexport function generateStatusReport(dependencyGraph: CdsDependencyGraph): string {\n  const summary = dependencyGraph.statusSummary;\n  const lines: string[] = [];\n\n  lines.push('='.repeat(80));\n  lines.push(`CDS EXTRACTOR STATUS REPORT`);\n  lines.push('='.repeat(80));\n  lines.push('');\n\n  // OVERALL SUMMARY\n  lines.push('OVERALL SUMMARY:');\n  lines.push(`  Status: ${summary.overallSuccess ? 'SUCCESS' : 'FAILED'}`);\n  lines.push(`  Current Phase: ${dependencyGraph.currentPhase.toUpperCase()}`);\n  lines.push(`  Projects: ${summary.totalProjects}`);\n  lines.push(`  CDS Files: ${summary.totalCdsFiles}`);\n  lines.push(`  JSON Files Generated: ${summary.jsonFilesGenerated}`);\n  lines.push('');\n\n  // COMPILATION SUMMARY\n  lines.push('COMPILATION SUMMARY:');\n  lines.push(`  Total Tasks: ${summary.totalCompilationTasks}`);\n  lines.push(`  Successful: ${summary.successfulCompilations}`);\n  lines.push(`  Retried: ${dependencyGraph.retryStatus.totalRetryAttempts}`);\n  lines.push(`  Failed: ${summary.failedCompilations}`);\n  lines.push(`  Skipped: ${summary.skippedCompilations}`);\n  lines.push('');\n\n  // RETRY SUMMARY (if retry attempts were made)\n  if (dependencyGraph.retryStatus.totalRetryAttempts > 0) {\n    lines.push('RETRY SUMMARY:');\n    lines.push(`  Tasks Requiring Retry: ${dependencyGraph.retryStatus.totalTasksRequiringRetry}`);\n    lines.push(\n      `  Tasks Successfully Retried: ${dependencyGraph.retryStatus.totalTasksSuccessfullyRetried}`,\n    );\n    lines.push(`  Total Retry Attempts: ${dependencyGraph.retryStatus.totalRetryAttempts}`);\n    lines.push(\n      `  Projects Requiring Full Dependencies: ${dependencyGraph.retryStatus.projectsRequiringFullDependencies.size}`,\n    );\n    lines.push(\n      `  Projects with Full Dependencies: ${dependencyGraph.retryStatus.projectsWithFullDependencies.size}`,\n    );\n    lines.push('');\n  }\n\n  // PERFORMANCE metrics\n  lines.push('PERFORMANCE:');\n  lines.push(`  Total Duration: ${summary.performance.totalDurationMs}ms`);\n  lines.push(`  Parsing: ${summary.performance.parsingDurationMs}ms`);\n  lines.push(`  Compilation: ${summary.performance.compilationDurationMs}ms`);\n  lines.push(`  Extraction: ${summary.performance.extractionDurationMs}ms`);\n\n  // Add percentage breakdown if total duration > 0\n  if (summary.performance.totalDurationMs > 0) {\n    const parsingPct = Math.round(\n      (summary.performance.parsingDurationMs / summary.performance.totalDurationMs) * 100,\n    );\n    const compilationPct = Math.round(\n      (summary.performance.compilationDurationMs / summary.performance.totalDurationMs) * 100,\n    );\n    const extractionPct = Math.round(\n      (summary.performance.extractionDurationMs / summary.performance.totalDurationMs) * 100,\n    );\n\n    lines.push('  Breakdown:');\n    lines.push(`    Parsing: ${parsingPct}%`);\n    lines.push(`    Compilation: ${compilationPct}%`);\n    lines.push(`    Extraction: ${extractionPct}%`);\n  }\n  lines.push('');\n\n  // Errors and warnings\n  if (summary.criticalErrors.length > 0) {\n    lines.push('CRITICAL ERRORS:');\n    for (const error of summary.criticalErrors) {\n      lines.push(`  - ${error}`);\n    }\n    lines.push('');\n  }\n\n  if (summary.warnings.length > 0) {\n    lines.push('WARNINGS:');\n    for (const warning of summary.warnings) {\n      lines.push(`  - ${warning}`);\n    }\n    lines.push('');\n  }\n\n  lines.push('='.repeat(80));\n\n  return lines.join('\\n');\n}\n", "import { spawnSync, SpawnSyncOptions } from 'child_process';\nimport { resolve, join, delimiter, relative, dirname, basename } from 'path';\n\nimport { CdsCompilationResult } from './types';\nimport { getCdsVersion } from './version';\nimport { modelCdsJsonFile } from '../../constants';\nimport { fileExists, dirExists, recursivelyRenameJsonFiles } from '../../filesystem';\nimport { cdsExtractorLog } from '../../logging';\nimport { BasicCdsProject } from '../parser/types';\n\n/**\n * Parses a command string for use with spawnSync, handling multi-word commands like 'npx cds'.\n * @param commandString The command string to parse (e.g., 'npx cds' or 'cds')\n * @returns Object with executable and args arrays for spawnSync\n */\nfunction parseCommandForSpawn(commandString: string): { executable: string; baseArgs: string[] } {\n  const parts = commandString.trim().split(/\\s+/);\n  const executable = parts[0];\n  const baseArgs = parts.slice(1);\n  return { executable, baseArgs };\n}\n\n/**\n * Determines compilation targets for a CDS project according to the new project-only compilation approach.\n * @param project The CDS project\n * @param sourceRoot The source root directory\n * @returns Array of compilation targets (directories or files relative to project base)\n */\nfunction determineCompilationTargets(project: BasicCdsProject, sourceRoot: string): string[] {\n  const projectAbsolutePath = join(sourceRoot, project.projectDir);\n\n  // Check for index.cds in the project root first, which takes precedence over CAP directories.\n  const rootCdsFiles = project.cdsFiles\n    .filter(file => dirname(join(sourceRoot, file)) === projectAbsolutePath)\n    .map(file => basename(file));\n\n  if (rootCdsFiles.includes('index.cds')) {\n    // Use only index.cds when it exists in the project root\n    return ['index.cds'];\n  }\n\n  // Check for standard CAP directories\n  const capDirectories = ['db', 'srv', 'app'];\n  const existingCapDirs = capDirectories.filter(dir => dirExists(join(projectAbsolutePath, dir)));\n\n  if (existingCapDirs.length > 0) {\n    // Use standard CAP directories\n    return existingCapDirs;\n  }\n\n  if (rootCdsFiles.length > 0) {\n    // Use other root-level files\n    return rootCdsFiles;\n  }\n\n  // Use all CDS files with their relative paths\n  return project.cdsFiles.map(file => relative(projectAbsolutePath, join(sourceRoot, file)));\n}\n\n/**\n * Compiles a CDS project to JSON using project-level compilation only.\n * This function has been simplified to only use project-level compilation,\n * eliminating all individual file compilation logic and standardizing output\n * to a single {@link modelCdsJsonFile} file per project.\n *\n *\n * @param cdsFilePath The path to the CDS file to compile, relative to the `sourceRoot`.\n * @param sourceRoot The source root directory scanned by the CDS extractor.\n * CRITICAL: All spawned processes will use the project base directory as their `cwd` to\n * ensure that paths in generated JSON are relative to the project base directory.\n *\n * @param cdsCommand The actual shell command to use for `cds compile`.\n * @param cacheDir Full path to the cache directory where dependencies are stored.\n * @param projectMap Map of project directories to {@link BasicCdsProject} instances.\n * @param projectDir The project directory to which `cdsFilePath` belongs.\n *\n * @returns The {@link CdsCompilationResult} of the compilation attempt.\n */\nexport function compileCdsToJson(\n  cdsFilePath: string,\n  sourceRoot: string,\n  cdsCommand: string,\n  cacheDir: string | undefined,\n  projectMap: Map,\n  projectDir: string,\n): CdsCompilationResult {\n  try {\n    const resolvedCdsFilePath = resolve(cdsFilePath);\n    if (!fileExists(resolvedCdsFilePath)) {\n      throw new Error(`Expected CDS file '${resolvedCdsFilePath}' does not exist.`);\n    }\n\n    // Get and log the CDS version\n    const cdsVersion = getCdsVersion(cdsCommand, cacheDir);\n    const versionInfo = cdsVersion ? `with CDS v${cdsVersion}` : '';\n\n    // Calculate project base directory for consistent working directory\n    const projectBaseDir = join(sourceRoot, projectDir);\n\n    // Create spawn options with project base directory as cwd.\n    const spawnOptions = createSpawnOptions(projectBaseDir, cdsCommand, cacheDir);\n\n    // Throw an error if projectDir cannot be found in the projectMap.\n    if (!projectMap || !projectDir || !projectMap.has(projectDir)) {\n      throw new Error(\n        `Project directory '${projectDir}' not found in projectMap. Ensure the project is properly initialized.`,\n      );\n    }\n\n    const project = projectMap.get(projectDir);\n\n    // Always use project-level compilation\n    return compileProject(sourceRoot, projectDir, cdsCommand, spawnOptions, versionInfo, project!);\n  } catch (error) {\n    return { success: false, message: String(error) };\n  }\n}\n\n/**\n * Handles project-level compilation for CAP projects.\n * CRITICAL: Uses the project base directory as cwd and calculates paths relative to project base directory.\n *\n * @param sourceRoot The source root directory\n * @param projectDir The project directory (relative to sourceRoot)\n * @param cdsCommand The CDS command to use\n * @param spawnOptions Pre-configured spawn options with project base directory as cwd\n * @param versionInfo Version information for logging\n * @param project The CDS project instance\n * @returns Compilation result\n */\nfunction compileProject(\n  sourceRoot: string,\n  projectDir: string,\n  cdsCommand: string,\n  spawnOptions: SpawnSyncOptions,\n  versionInfo: string,\n  project: BasicCdsProject,\n): CdsCompilationResult {\n  cdsExtractorLog('info', `Compiling CDS project '${projectDir}' using ${versionInfo}...`);\n\n  // Determine compilation targets using the new centralized logic\n  const compilationTargets = determineCompilationTargets(project, sourceRoot);\n\n  if (compilationTargets.length === 0) {\n    throw new Error(\n      `Project directory '${projectDir}' does not contain any CDS files and cannot be compiled`,\n    );\n  }\n\n  const projectJsonOutPath = join(sourceRoot, projectDir, modelCdsJsonFile);\n\n  const compileArgs = [\n    'compile',\n    ...compilationTargets,\n    '--to',\n    'json',\n    '--dest',\n    modelCdsJsonFile,\n    '--locations',\n    '--log-level',\n    'warn',\n  ];\n\n  cdsExtractorLog('info', `Compiling CDS project targets: ${compilationTargets.join(', ')}`);\n  cdsExtractorLog(\n    'info',\n    `Running compilation task for CDS project '${projectDir}': command='${cdsCommand}' args='${JSON.stringify(compileArgs)}'`,\n  );\n\n  // Parse command for proper spawnSync execution\n  const { executable, baseArgs } = parseCommandForSpawn(cdsCommand);\n  const allArgs = [...baseArgs, ...compileArgs];\n\n  const result = spawnSync(executable, allArgs, spawnOptions);\n\n  if (result.error) {\n    cdsExtractorLog('error', `SpawnSync error: ${result.error.message}`);\n    throw new Error(`Error executing CDS compiler: ${result.error.message}`);\n  }\n\n  // Log stderr for debugging even on success (CDS often writes warnings to stderr).\n  if (result.stderr && result.stderr.length > 0) {\n    cdsExtractorLog('warn', `CDS stderr output: ${result.stderr.toString()}`);\n  }\n\n  if (result.status !== 0) {\n    cdsExtractorLog('error', `CDS command failed with status ${result.status}`);\n    cdsExtractorLog(\n      'error',\n      `Command: ${cdsCommand} ${compileArgs.map(arg => (arg.includes(' ') ? `\"${arg}\"` : arg)).join(' ')}`,\n    );\n    cdsExtractorLog('error', `Stdout: ${result.stdout?.toString() || 'No stdout'}`);\n    cdsExtractorLog('error', `Stderr: ${result.stderr?.toString() || 'No stderr'}`);\n    throw new Error(\n      `Could not compile the CAP project ${projectDir}.\\nReported error(s):\\n\\`\\`\\`\\n${\n        result.stderr?.toString() || 'Unknown error'\n      }\\n\\`\\`\\``,\n    );\n  }\n\n  if (!fileExists(projectJsonOutPath) && !dirExists(projectJsonOutPath)) {\n    throw new Error(\n      `CAP project '${projectDir}' was not compiled to JSON. This is likely because the project structure is invalid.`,\n    );\n  }\n\n  // Handle directory output if the CDS compiler generated a directory.\n  if (dirExists(projectJsonOutPath)) {\n    cdsExtractorLog(\n      'info',\n      `CDS compiler generated JSON to output directory: ${projectJsonOutPath}`,\n    );\n    // Recursively rename generated .json files to have a .cds.json extension\n    recursivelyRenameJsonFiles(projectJsonOutPath);\n  } else {\n    cdsExtractorLog('info', `CDS compiler generated JSON to file: ${projectJsonOutPath}`);\n  }\n\n  return {\n    success: true,\n    outputPath: projectJsonOutPath,\n    compiledAsProject: true,\n    message: 'Project was compiled using project-aware compilation',\n  };\n}\n\n/**\n * Creates spawn options for CDS compilation processes.\n * CRITICAL: Always sets cwd to project base directory to ensure generated JSON paths are relative to project base directory.\n *\n * @param projectBaseDir The project base directory (where package.json is located) - used as cwd for all spawned processes\n * @param cdsCommand The CDS command to determine if we need Node.js environment setup\n * @param cacheDir Optional cache directory for dependencies\n * @returns Spawn options configured for CDS compilation\n */\nfunction createSpawnOptions(\n  projectBaseDir: string,\n  cdsCommand: string,\n  cacheDir?: string,\n): SpawnSyncOptions {\n  const spawnOptions: SpawnSyncOptions = {\n    cwd: projectBaseDir, // CRITICAL: Always use project base directory as cwd to ensure correct path generation\n    shell: false, // Use shell=false to ensure proper argument handling for paths with spaces\n    stdio: 'pipe',\n    env: { ...process.env },\n  };\n\n  // Check if we're using a direct binary path (contains node_modules/.bin/) or npx-style command\n  const isDirectBinary = cdsCommand.includes('node_modules/.bin/');\n\n  // Only set up Node.js environment for npx-style commands, not for direct binary execution\n  if (cacheDir && !isDirectBinary) {\n    const nodePath = join(cacheDir, 'node_modules');\n\n    // Set up environment to use the cached dependencies\n    spawnOptions.env = {\n      ...process.env,\n      NODE_PATH: `${nodePath}${delimiter}${process.env.NODE_PATH ?? ''}`,\n      PATH: `${join(nodePath, '.bin')}${delimiter}${process.env.PATH}`,\n      // Add NPM configuration to ensure dependencies are resolved from the cache directory\n      npm_config_prefix: cacheDir,\n      // Ensure we don't pick up global CDS installations that might conflict\n      npm_config_global: 'false',\n      // Clear any existing CDS environment variables that might interfere\n      CDS_HOME: cacheDir,\n    };\n  } else if (isDirectBinary) {\n    // For direct binary execution, use minimal environment to avoid conflicts\n    // Remove Node.js-specific environment variables that might interfere\n    const cleanEnv = { ...process.env };\n    delete cleanEnv.NODE_PATH;\n    delete cleanEnv.npm_config_prefix;\n    delete cleanEnv.npm_config_global;\n    delete cleanEnv.CDS_HOME;\n\n    spawnOptions.env = cleanEnv;\n  }\n\n  return spawnOptions;\n}\n", "import { spawnSync, SpawnSyncOptions } from 'child_process';\nimport { join, delimiter } from 'path';\n\n/**\n * Get the CDS compiler version from a specific command or cache directory.\n * @param cdsCommand The CDS command to use.\n * @param cacheDir Optional path to a directory containing installed dependencies.\n * @returns The CDS compiler version string, or undefined if it couldn't be determined.\n */\nexport function getCdsVersion(cdsCommand: string, cacheDir?: string): string | undefined {\n  try {\n    // Set up environment vars if using a cache directory\n    const spawnOptions: SpawnSyncOptions = {\n      shell: true,\n      stdio: 'pipe',\n      env: { ...process.env },\n    };\n\n    // If a cache directory is provided, set NODE_PATH to use that cache\n    if (cacheDir) {\n      const nodePath = join(cacheDir, 'node_modules');\n\n      // Set up environment to use the cached dependencies\n      spawnOptions.env = {\n        ...process.env,\n        NODE_PATH: `${nodePath}${delimiter}${process.env.NODE_PATH ?? ''}`,\n        PATH: `${join(nodePath, '.bin')}${delimiter}${process.env.PATH}`,\n        npm_config_prefix: cacheDir,\n      };\n    }\n\n    // Execute the CDS command with the --version flag\n    const result = spawnSync(cdsCommand, ['--version'], spawnOptions);\n    if (result.status === 0 && result.stdout) {\n      const versionOutput = result.stdout.toString().trim();\n      // Extract version number, which is typically in formats like \"@sap/cds: 6.1.3\" or similar\n      const match = versionOutput.match(/@sap\\/cds[^0-9]*([0-9]+\\.[0-9]+\\.[0-9]+)/);\n      if (match?.[1]) {\n        return match[1]; // Return just the version number\n      }\n      return versionOutput; // Return full output if we couldn't parse it\n    }\n    return undefined;\n  } catch {\n    return undefined;\n  }\n}\n", "/** Common constants used throughout the CDS extractor. */\n\n/**\n * Common, expected name of the JSON file created by CDS compilation\n * tasks performed by, or on behalf of, the CDS extractor.\n */\nexport const modelCdsJsonFile = 'model.cds.json';\n", "/** Validation utilities for CDS compilation output files. */\n\nimport { readFileSync } from 'fs';\nimport { isAbsolute, join } from 'path';\n\nimport type {\n  CompilationTask,\n  ResultDependencyStatusUpdate,\n  ResultOutputFileValidation,\n  ResultTaskValidation,\n} from './types';\nimport { fileExists } from '../../filesystem';\nimport { cdsExtractorLog } from '../../logging';\nimport type { CdsDependencyGraph } from '../parser/types';\n\n/**\n * Identifies tasks requiring retry based on output validation\n * @param dependencyGraph The dependency graph containing tasks to validate\n * @returns Map of project directory to failed tasks that need retry\n */\nexport function identifyTasksRequiringRetry(\n  dependencyGraph: CdsDependencyGraph,\n): Map {\n  const tasksRequiringRetry = new Map();\n\n  for (const [projectDir, project] of dependencyGraph.projects.entries()) {\n    const failedTasks: CompilationTask[] = [];\n\n    for (const task of project.compilationTasks) {\n      // Skip tasks that have already been retried.\n      if (task.retryInfo?.hasBeenRetried) {\n        continue;\n      }\n\n      // Always validate output files exist, regardless of task status.\n      const validationResult = validateTaskOutputs(task, dependencyGraph.sourceRootDir);\n\n      if (!validationResult.isValid) {\n        failedTasks.push(task);\n        cdsExtractorLog(\n          'info',\n          `Task ${task.id} requires retry: ${validationResult.validFileCount}/${validationResult.expectedFileCount} output files valid (status: ${task.status})`,\n        );\n\n        // Update task status to reflect actual file state.\n        if (task.status === 'success') {\n          cdsExtractorLog(\n            'warn',\n            `Task ${task.id} was marked as successful but output files are missing or invalid - updating status to failed`,\n          );\n          task.status = 'failed';\n        }\n      }\n    }\n\n    if (failedTasks.length > 0) {\n      tasksRequiringRetry.set(projectDir, failedTasks);\n    }\n  }\n\n  if (tasksRequiringRetry.size > 0) {\n    const totalFailedTasks = Array.from(tasksRequiringRetry.values()).reduce(\n      (sum, tasks) => sum + tasks.length,\n      0,\n    );\n    cdsExtractorLog(\n      'info',\n      `Identified ${totalFailedTasks} task(s) requiring retry across ${tasksRequiringRetry.size} project(s)`,\n    );\n  }\n\n  return tasksRequiringRetry;\n}\n\n/**\n * Updates the dependency graph with current task status based on filesystem validation.\n * This is the single source of truth for compilation task status across all phases.\n */\nexport function updateCdsDependencyGraphStatus(\n  dependencyGraph: CdsDependencyGraph,\n  sourceRootDir: string,\n): ResultDependencyStatusUpdate {\n  let successfulTasks = 0;\n  let failedTasks = 0;\n  let tasksSuccessfullyRetried = 0;\n\n  // Validate all tasks using filesystem checks\n  for (const project of dependencyGraph.projects.values()) {\n    for (const task of project.compilationTasks) {\n      const validationResult = validateTaskOutputs(task, sourceRootDir);\n      const isValid = validationResult.isValid;\n\n      if (isValid) {\n        task.status = 'success';\n        successfulTasks++;\n\n        // If task has retry info and is now successful, count as successfully retried\n        if (task.retryInfo?.hasBeenRetried) {\n          tasksSuccessfullyRetried++;\n        }\n      } else {\n        task.status = 'failed';\n        failedTasks++;\n      }\n    }\n  }\n\n  // Update dependency graph counters\n  dependencyGraph.statusSummary.successfulCompilations = successfulTasks;\n  dependencyGraph.statusSummary.failedCompilations = failedTasks;\n\n  // Update retry status tracking\n  dependencyGraph.retryStatus.totalTasksSuccessfullyRetried = tasksSuccessfullyRetried;\n  dependencyGraph.retryStatus.totalTasksRequiringRetry = failedTasks;\n\n  return {\n    tasksValidated: successfulTasks + failedTasks,\n    successfulTasks,\n    failedTasks,\n    tasksSuccessfullyRetried,\n  };\n}\n/**\n * Validates a single expected output file.\n * @param filePath Path to the output file to validate\n * @returns Validation result with details\n */\nexport function validateOutputFile(filePath: string): ResultOutputFileValidation {\n  const result: ResultOutputFileValidation = {\n    isValid: false,\n    filePath,\n    exists: false,\n  };\n\n  // Check if file exists\n  if (!fileExists(filePath)) {\n    result.error = 'File does not exist';\n    return result;\n  }\n\n  result.exists = true;\n\n  // For .cds.json files, validate JSON content\n  if (filePath.endsWith('.cds.json') || filePath.endsWith('.json')) {\n    try {\n      const content = readFileSync(filePath, 'utf8');\n\n      // Check if content is empty\n      if (!content.trim()) {\n        result.error = 'File is empty';\n        return result;\n      }\n\n      // Try to parse as JSON\n      const parsed: unknown = JSON.parse(content);\n\n      // Basic structure validation for CDS JSON files\n      if (typeof parsed !== 'object' || parsed === null) {\n        result.error = 'File does not contain a valid JSON object';\n        return result;\n      }\n\n      result.hasValidJson = true;\n      result.isValid = true;\n    } catch (error) {\n      result.error = `Invalid JSON content: ${String(error)}`;\n      return result;\n    }\n  } else {\n    // For non-JSON files, existence is sufficient\n    result.isValid = true;\n  }\n\n  return result;\n}\n\n/**\n * Validates that all expected output files exist for a compilation task.\n * @param task The compilation task to validate\n * @param sourceRoot Source root directory for resolving relative paths\n * @returns Task-level validation result\n */\nexport function validateTaskOutputs(\n  task: CompilationTask,\n  sourceRoot: string,\n): ResultTaskValidation {\n  const fileResults: ResultOutputFileValidation[] = [];\n\n  // Resolve the output file path relative to source root\n  const expectedOutput = task.expectedOutputFile;\n  const absolutePath = isAbsolute(expectedOutput)\n    ? expectedOutput\n    : join(sourceRoot, expectedOutput);\n\n  const fileResult = validateOutputFile(absolutePath);\n  fileResults.push(fileResult);\n\n  const validFileCount = fileResults.filter(r => r.isValid).length;\n  const expectedFileCount = 1;\n  const isValid = validFileCount === expectedFileCount && expectedFileCount > 0;\n\n  return {\n    isValid,\n    task,\n    fileResults,\n    validFileCount,\n    expectedFileCount,\n  };\n}\n", "import { execFileSync } from 'child_process';\nimport { resolve } from 'path';\n\nimport { cdsExtractorLog } from './logging';\n\n/**\n * Severity levels for diagnostics\n */\nexport enum DiagnosticSeverity {\n  Error = 'error',\n  Warning = 'warning',\n  Note = 'note',\n  Recommendation = 'recommendation',\n}\n\n/**\n * Base function to add a diagnostic to the CodeQL database\n * @param filePath Path to the file related to the diagnostic\n * @param message The diagnostic message\n * @param codeqlExePath Path to the CodeQL executable\n * @param sourceId The source ID for the diagnostic\n * @param sourceName The source name for the diagnostic\n * @param severity The severity level of the diagnostic\n * @param logPrefix Prefix for the log message\n * @returns True if the diagnostic was added, false otherwise\n */\nfunction addDiagnostic(\n  filePath: string,\n  message: string,\n  codeqlExePath: string,\n  sourceId: string,\n  sourceName: string,\n  severity: DiagnosticSeverity,\n  logPrefix: string,\n): boolean {\n  try {\n    execFileSync(codeqlExePath, [\n      'database',\n      'add-diagnostic',\n      '--extractor-name=cds',\n      '--ready-for-status-page',\n      `--source-id=${sourceId}`,\n      `--source-name=${sourceName}`,\n      `--severity=${severity}`,\n      `--markdown-message=${message}`,\n      `--file-path=${resolve(filePath)}`,\n      '--',\n      `${process.env.CODEQL_EXTRACTOR_CDS_WIP_DATABASE ?? ''}`,\n    ]);\n    cdsExtractorLog('info', `Added ${severity} diagnostic for ${logPrefix}: ${filePath}`);\n    return true;\n  } catch (err) {\n    cdsExtractorLog(\n      'error',\n      `Failed to add ${severity} diagnostic for ${logPrefix}=${filePath} : ${String(err)}`,\n    );\n    return false;\n  }\n}\n\n/**\n * Add a diagnostic error to the CodeQL database for a failed CDS compilation\n * @param cdsFilePath Path to the CDS file that failed to compile\n * @param errorMessage The error message from the compilation\n * @param codeqlExePath Path to the CodeQL executable\n * @returns True if the diagnostic was added, false otherwise\n */\nexport function addCompilationDiagnostic(\n  cdsFilePath: string,\n  errorMessage: string,\n  codeqlExePath: string,\n): boolean {\n  return addDiagnostic(\n    cdsFilePath,\n    errorMessage,\n    codeqlExePath,\n    'cds/compilation-failure',\n    'Failure to compile one or more SAP CAP CDS files',\n    DiagnosticSeverity.Error,\n    'source file',\n  );\n}\n\n/**\n * Add a diagnostic error to the CodeQL database for dependency graph build failure\n * @param sourceRoot Source root directory to use as file context\n * @param errorMessage The error message from dependency graph build\n * @param codeqlExePath Path to the CodeQL executable\n * @returns True if the diagnostic was added, false otherwise\n */\nexport function addDependencyGraphDiagnostic(\n  sourceRoot: string,\n  errorMessage: string,\n  codeqlExePath: string,\n): boolean {\n  return addDiagnostic(\n    sourceRoot,\n    errorMessage,\n    codeqlExePath,\n    'cds/dependency-graph-failure',\n    'CDS project dependency graph build failure',\n    DiagnosticSeverity.Error,\n    'source root',\n  );\n}\n\n/**\n * Add a diagnostic error to the CodeQL database for dependency installation failure\n * @param sourceRoot Source root directory to use as file context\n * @param errorMessage The error message from dependency installation\n * @param codeqlExePath Path to the CodeQL executable\n * @returns True if the diagnostic was added, false otherwise\n */\nexport function addDependencyInstallationDiagnostic(\n  sourceRoot: string,\n  errorMessage: string,\n  codeqlExePath: string,\n): boolean {\n  return addDiagnostic(\n    sourceRoot,\n    errorMessage,\n    codeqlExePath,\n    'cds/dependency-installation-failure',\n    'CDS dependency installation failure',\n    DiagnosticSeverity.Error,\n    'source root',\n  );\n}\n\n/**\n * Add a diagnostic error to the CodeQL database for environment setup failure\n * @param sourceRoot Source root directory to use as file context\n * @param errorMessage The error message from environment setup\n * @param codeqlExePath Path to the CodeQL executable\n * @returns True if the diagnostic was added, false otherwise\n */\nexport function addEnvironmentSetupDiagnostic(\n  sourceRoot: string,\n  errorMessage: string,\n  codeqlExePath: string,\n): boolean {\n  // Use a representative file from source root or the directory itself\n  const contextFile = sourceRoot;\n  return addDiagnostic(\n    contextFile,\n    errorMessage,\n    codeqlExePath,\n    'cds/environment-setup-failure',\n    'CDS extractor environment setup failure',\n    DiagnosticSeverity.Error,\n    'source root',\n  );\n}\n\n/**\n * Add a diagnostic error to the CodeQL database for a JavaScript extractor failure\n * @param filePath Path to a relevant file for the error context\n * @param errorMessage The error message from the JavaScript extractor\n * @param codeqlExePath Path to the CodeQL executable\n * @returns True if the diagnostic was added, false otherwise\n */\nexport function addJavaScriptExtractorDiagnostic(\n  filePath: string,\n  errorMessage: string,\n  codeqlExePath: string,\n): boolean {\n  return addDiagnostic(\n    filePath,\n    errorMessage,\n    codeqlExePath,\n    'cds/js-extractor-failure',\n    'Failure in JavaScript extractor for SAP CAP CDS files',\n    DiagnosticSeverity.Error,\n    'extraction file',\n  );\n}\n\n/**\n * Add a diagnostic warning when no CDS projects are detected\n * @param sourceRoot Source root directory to use as file context\n * @param message The warning message about no CDS projects\n * @param codeqlExePath Path to the CodeQL executable\n * @returns True if the diagnostic was added, false otherwise\n */\nexport function addNoCdsProjectsDiagnostic(\n  sourceRoot: string,\n  message: string,\n  codeqlExePath: string,\n): boolean {\n  return addDiagnostic(\n    sourceRoot,\n    message,\n    codeqlExePath,\n    'cds/no-cds-projects',\n    'No CDS projects detected in source',\n    DiagnosticSeverity.Warning,\n    'source root',\n  );\n}\n", "import { execFileSync } from 'child_process';\nimport { createHash } from 'crypto';\nimport { existsSync, mkdirSync, writeFileSync } from 'fs';\nimport { join, resolve } from 'path';\n\nimport type { CdsDependencyCombination } from './types';\nimport { CdsDependencyGraph, CdsProject } from '../cds/parser/types';\nimport { DiagnosticSeverity } from '../diagnostics';\nimport { cdsExtractorLog } from '../logging';\nimport { resolveCdsVersions } from './versionResolver';\n\nconst cacheSubDirName = '.cds-extractor-cache';\n\n/**\n * Add a warning diagnostic for dependency version fallback\n * @param packageJsonPath Path to the package.json file\n * @param warningMessage The warning message\n * @param codeqlExePath Path to the CodeQL executable\n * @returns True if the diagnostic was added, false otherwise\n */\nfunction addDependencyVersionWarning(\n  packageJsonPath: string,\n  warningMessage: string,\n  codeqlExePath: string,\n): boolean {\n  try {\n    execFileSync(codeqlExePath, [\n      'database',\n      'add-diagnostic',\n      '--extractor-name=cds',\n      '--ready-for-status-page',\n      '--source-id=cds/dependency-version-fallback',\n      '--source-name=Using fallback versions for SAP CAP CDS dependencies',\n      `--severity=${DiagnosticSeverity.Warning}`,\n      `--markdown-message=${warningMessage}`,\n      `--file-path=${resolve(packageJsonPath)}`,\n      '--',\n      `${process.env.CODEQL_EXTRACTOR_CDS_WIP_DATABASE ?? ''}`,\n    ]);\n    cdsExtractorLog('info', `Added warning diagnostic for dependency fallback: ${packageJsonPath}`);\n    return true;\n  } catch (err) {\n    cdsExtractorLog(\n      'error',\n      `Failed to add warning diagnostic for ${packageJsonPath}: ${String(err)}`,\n    );\n    return false;\n  }\n}\n\n/**\n * Install dependencies for CDS projects using a robust cache strategy with fallback logic\n * @param dependencyGraph The dependency graph of the project\n * @param sourceRoot Source root directory\n * @param codeqlExePath Path to the CodeQL executable (optional)\n * @returns Map of project directories to their corresponding cache directories\n */\nexport function cacheInstallDependencies(\n  dependencyGraph: CdsDependencyGraph,\n  sourceRoot: string,\n  codeqlExePath?: string,\n): Map {\n  // Sanity check that we found at least one project\n  if (dependencyGraph.projects.size === 0) {\n    cdsExtractorLog('info', 'No CDS projects found for dependency installation.');\n    cdsExtractorLog(\n      'info',\n      'This is expected if the source contains no CAP/CDS projects and should be handled by the caller.',\n    );\n    return new Map();\n  }\n\n  // Extract unique dependency combinations from all projects with version resolution\n  const dependencyCombinations = extractUniqueDependencyCombinations(dependencyGraph.projects);\n\n  if (dependencyCombinations.length === 0) {\n    cdsExtractorLog(\n      'error',\n      'No CDS dependencies found in any project. This means projects were detected but lack proper @sap/cds dependencies.',\n    );\n    cdsExtractorLog(\n      'info',\n      'Will attempt to use system-installed CDS tools if available, but compilation may fail.',\n    );\n    return new Map();\n  }\n\n  cdsExtractorLog(\n    'info',\n    `Found ${dependencyCombinations.length} unique CDS dependency combination(s).`,\n  );\n\n  // Log each dependency combination for transparency\n  for (const combination of dependencyCombinations) {\n    const { cdsVersion, cdsDkVersion, hash, resolvedCdsVersion, resolvedCdsDkVersion, isFallback } =\n      combination;\n    const actualCdsVersion = resolvedCdsVersion ?? cdsVersion;\n    const actualCdsDkVersion = resolvedCdsDkVersion ?? cdsDkVersion;\n    const fallbackNote = isFallback ? ' (using fallback versions)' : '';\n\n    cdsExtractorLog(\n      'info',\n      `Dependency combination ${hash.substring(0, 8)}: @sap/cds@${actualCdsVersion}, @sap/cds-dk@${actualCdsDkVersion}${fallbackNote}`,\n    );\n  }\n\n  // Create a cache directory under the source root directory.\n  const cacheRootDir = join(sourceRoot, cacheSubDirName);\n  cdsExtractorLog(\n    'info',\n    `Using cache directory '${cacheSubDirName}' within source root directory '${cacheRootDir}'`,\n  );\n\n  if (!existsSync(cacheRootDir)) {\n    try {\n      mkdirSync(cacheRootDir, { recursive: true });\n      cdsExtractorLog('info', `Created cache directory: ${cacheRootDir}`);\n    } catch (err) {\n      cdsExtractorLog(\n        'warn',\n        `Failed to create cache directory: ${err instanceof Error ? err.message : String(err)}`,\n      );\n      cdsExtractorLog('info', 'Skipping dependency installation due to cache directory failure.');\n      return new Map();\n    }\n  } else {\n    cdsExtractorLog('info', `Cache directory already exists: ${cacheRootDir}`);\n  }\n\n  // Map to track which cache directory to use for each project\n  const projectCacheDirMap = new Map();\n  let successfulInstallations = 0;\n\n  // Install each unique dependency combination in its own cache directory\n  for (const combination of dependencyCombinations) {\n    const { cdsVersion, cdsDkVersion, hash } = combination;\n    const { resolvedCdsVersion, resolvedCdsDkVersion } = combination;\n    const cacheDirName = `cds-${hash}`;\n    const cacheDir = join(cacheRootDir, cacheDirName);\n\n    cdsExtractorLog(\n      'info',\n      `Processing dependency combination ${hash.substring(0, 8)} in cache directory: ${cacheDirName}`,\n    );\n\n    // Create the cache directory if it doesn't exist\n    if (!existsSync(cacheDir)) {\n      try {\n        mkdirSync(cacheDir, { recursive: true });\n        cdsExtractorLog('info', `Created cache subdirectory: ${cacheDirName}`);\n      } catch (err) {\n        cdsExtractorLog(\n          'error',\n          `Failed to create cache directory for combination ${hash.substring(0, 8)} (${cacheDirName}): ${\n            err instanceof Error ? err.message : String(err)\n          }`,\n        );\n        continue;\n      }\n\n      // Create a package.json for this dependency combination using resolved versions\n      const actualCdsVersion = resolvedCdsVersion ?? cdsVersion;\n      const actualCdsDkVersion = resolvedCdsDkVersion ?? cdsDkVersion;\n\n      const packageJson = {\n        name: `cds-extractor-cache-${hash}`,\n        version: '1.0.0',\n        private: true,\n        dependencies: {\n          '@sap/cds': actualCdsVersion,\n          '@sap/cds-dk': actualCdsDkVersion,\n        },\n      };\n\n      try {\n        writeFileSync(join(cacheDir, 'package.json'), JSON.stringify(packageJson, null, 2));\n        cdsExtractorLog('info', `Created package.json in cache subdirectory: ${cacheDirName}`);\n      } catch (err) {\n        cdsExtractorLog(\n          'error',\n          `Failed to create package.json in cache directory ${cacheDirName}: ${\n            err instanceof Error ? err.message : String(err)\n          }`,\n        );\n        continue;\n      }\n    }\n\n    // Try to install dependencies in the cache directory\n    // Get the first project package.json path for diagnostic purposes\n    const samplePackageJsonPath = Array.from(dependencyGraph.projects.values()).find(\n      project => project.packageJson,\n    )?.projectDir;\n    const packageJsonPath = samplePackageJsonPath\n      ? join(sourceRoot, samplePackageJsonPath, 'package.json')\n      : undefined;\n\n    const installSuccess = installDependenciesInCache(\n      cacheDir,\n      combination,\n      cacheDirName,\n      packageJsonPath,\n      codeqlExePath,\n    );\n\n    if (!installSuccess) {\n      cdsExtractorLog(\n        'warn',\n        `Skipping failed dependency combination ${hash.substring(0, 8)} (cache directory: ${cacheDirName})`,\n      );\n      continue;\n    }\n\n    successfulInstallations++;\n\n    // Associate projects with this dependency combination\n    for (const [projectDir, project] of Array.from(dependencyGraph.projects.entries())) {\n      if (!project.packageJson) {\n        continue;\n      }\n      const p_cdsVersion = project.packageJson.dependencies?.['@sap/cds'] ?? 'latest';\n      const p_cdsDkVersion = project.packageJson.devDependencies?.['@sap/cds-dk'] ?? p_cdsVersion;\n\n      // Resolve the project's versions to match against the combination's resolved versions\n      const projectResolvedVersions = resolveCdsVersions(p_cdsVersion, p_cdsDkVersion);\n      const projectActualCdsVersion = projectResolvedVersions.resolvedCdsVersion ?? p_cdsVersion;\n      const projectActualCdsDkVersion =\n        projectResolvedVersions.resolvedCdsDkVersion ?? p_cdsDkVersion;\n\n      // Match based on resolved versions since that's what the hash is based on\n      const combinationActualCdsVersion = combination.resolvedCdsVersion ?? combination.cdsVersion;\n      const combinationActualCdsDkVersion =\n        combination.resolvedCdsDkVersion ?? combination.cdsDkVersion;\n\n      if (\n        projectActualCdsVersion === combinationActualCdsVersion &&\n        projectActualCdsDkVersion === combinationActualCdsDkVersion\n      ) {\n        projectCacheDirMap.set(projectDir, cacheDir);\n      }\n    }\n  }\n\n  // Log final status\n  if (successfulInstallations === 0) {\n    cdsExtractorLog('error', 'Failed to install any dependency combinations.');\n    if (dependencyCombinations.length > 0) {\n      cdsExtractorLog(\n        'error',\n        `All ${dependencyCombinations.length} dependency combination(s) failed to install. This will likely cause compilation failures.`,\n      );\n    }\n  } else if (successfulInstallations < dependencyCombinations.length) {\n    cdsExtractorLog(\n      'warn',\n      `Successfully installed ${successfulInstallations} out of ${dependencyCombinations.length} dependency combinations.`,\n    );\n  } else {\n    cdsExtractorLog('info', 'All dependency combinations installed successfully.');\n  }\n\n  // Log project-to-cache-directory mappings for transparency.\n  if (projectCacheDirMap.size > 0) {\n    cdsExtractorLog('info', `Project to cache directory mappings:`);\n    for (const [projectDir, cacheDir] of Array.from(projectCacheDirMap.entries())) {\n      const cacheDirName = join(cacheDir).split('/').pop() ?? 'unknown';\n      cdsExtractorLog('info', `  ${projectDir} \u2192 ${cacheDirName}`);\n    }\n  } else {\n    cdsExtractorLog(\n      'warn',\n      'No project to cache directory mappings created. Projects may not have compatible dependencies installed.',\n    );\n  }\n\n  return projectCacheDirMap;\n}\n\n/**\n * Extracts unique dependency combinations from the dependency graph.\n * @param projects A map of projects from the dependency graph.\n * @returns An array of unique dependency combinations.\n */\nfunction extractUniqueDependencyCombinations(\n  projects: Map,\n): CdsDependencyCombination[] {\n  const combinations = new Map();\n\n  for (const project of Array.from(projects.values())) {\n    if (!project.packageJson) {\n      continue;\n    }\n\n    const cdsVersion = project.packageJson.dependencies?.['@sap/cds'] ?? 'latest';\n    const cdsDkVersion = project.packageJson.devDependencies?.['@sap/cds-dk'] ?? cdsVersion;\n\n    // Resolve versions first to ensure we cache based on actual resolved versions\n    cdsExtractorLog(\n      'info',\n      `Resolving available dependency versions for project '${project.projectDir}' with dependencies: [@sap/cds@${cdsVersion}, @sap/cds-dk@${cdsDkVersion}]`,\n    );\n    const resolvedVersions = resolveCdsVersions(cdsVersion, cdsDkVersion);\n    const { resolvedCdsVersion, resolvedCdsDkVersion, ...rest } = resolvedVersions;\n\n    // Log the resolved CDS dependency versions for the project\n    if (resolvedCdsVersion && resolvedCdsDkVersion) {\n      let statusMsg: string;\n      if (resolvedVersions.cdsExactMatch && resolvedVersions.cdsDkExactMatch) {\n        statusMsg = ' (exact match)';\n      } else if (!resolvedVersions.isFallback) {\n        statusMsg = ' (compatible versions)';\n      } else {\n        statusMsg = ' (using fallback versions)';\n      }\n      cdsExtractorLog(\n        'info',\n        `Resolved to: @sap/cds@${resolvedCdsVersion}, @sap/cds-dk@${resolvedCdsDkVersion}${statusMsg}`,\n      );\n    } else {\n      cdsExtractorLog(\n        'error',\n        `Failed to resolve CDS dependencies: @sap/cds@${cdsVersion}, @sap/cds-dk@${cdsDkVersion}`,\n      );\n    }\n\n    // Calculate hash based on resolved versions to ensure proper cache reuse\n    const actualCdsVersion = resolvedCdsVersion ?? cdsVersion;\n    const actualCdsDkVersion = resolvedCdsDkVersion ?? cdsDkVersion;\n    const hash = createHash('sha256')\n      .update(`${actualCdsVersion}|${actualCdsDkVersion}`)\n      .digest('hex');\n\n    if (!combinations.has(hash)) {\n      combinations.set(hash, {\n        cdsVersion,\n        cdsDkVersion,\n        hash,\n        resolvedCdsVersion: resolvedCdsVersion ?? undefined,\n        resolvedCdsDkVersion: resolvedCdsDkVersion ?? undefined,\n        ...rest,\n      });\n    }\n  }\n\n  return Array.from(combinations.values());\n}\n\n/**\n * Attempt to install dependencies in a cache directory with fallback logic\n * @param cacheDir Cache directory path\n * @param combination Dependency combination to install\n * @param cacheDirName Name of the cache directory for logging\n * @param packageJsonPath Optional package.json path for diagnostics\n * @param codeqlExePath Optional CodeQL executable path for diagnostics\n * @returns True if installation succeeded, false otherwise\n */\nfunction installDependenciesInCache(\n  cacheDir: string,\n  combination: CdsDependencyCombination,\n  cacheDirName: string,\n  packageJsonPath?: string,\n  codeqlExePath?: string,\n): boolean {\n  const { resolvedCdsVersion, resolvedCdsDkVersion, isFallback, warning } = combination;\n\n  // Check if node_modules directory already exists in the cache dir\n  const nodeModulesExists =\n    existsSync(join(cacheDir, 'node_modules', '@sap', 'cds')) &&\n    existsSync(join(cacheDir, 'node_modules', '@sap', 'cds-dk'));\n\n  if (nodeModulesExists) {\n    cdsExtractorLog(\n      'info',\n      `Using cached dependencies for @sap/cds@${resolvedCdsVersion} and @sap/cds-dk@${resolvedCdsDkVersion} from ${cacheDirName}`,\n    );\n\n    // Add warning diagnostic if using fallback versions\n    if (isFallback && warning && packageJsonPath && codeqlExePath) {\n      addDependencyVersionWarning(packageJsonPath, warning, codeqlExePath);\n    }\n\n    return true;\n  }\n\n  if (!resolvedCdsVersion || !resolvedCdsDkVersion) {\n    cdsExtractorLog('error', 'Cannot install dependencies: no compatible versions found');\n    return false;\n  }\n\n  // Install dependencies in the cache directory\n  cdsExtractorLog(\n    'info',\n    `Installing @sap/cds@${resolvedCdsVersion} and @sap/cds-dk@${resolvedCdsDkVersion} in cache directory: ${cacheDirName}`,\n  );\n\n  if (isFallback && warning) {\n    cdsExtractorLog('warn', warning);\n  }\n\n  try {\n    execFileSync('npm', ['install', '--quiet', '--no-audit', '--no-fund'], {\n      cwd: cacheDir,\n      stdio: 'inherit',\n    });\n\n    // Add warning diagnostic if using fallback versions\n    if (isFallback && warning && packageJsonPath && codeqlExePath) {\n      addDependencyVersionWarning(packageJsonPath, warning, codeqlExePath);\n    }\n\n    return true;\n  } catch (err) {\n    const errorMessage = `Failed to install resolved dependencies in cache directory ${cacheDir}: ${err instanceof Error ? err.message : String(err)}`;\n    cdsExtractorLog('error', errorMessage);\n    return false;\n  }\n}\n", "import { execSync } from 'child_process';\n\nimport type { SemanticVersion } from './types';\nimport { cdsExtractorLog } from '../logging';\n\n/**\n * Cache for storing available versions for npm packages to avoid duplicate\n * `npm view` calls.\n */\nconst availableVersionsCache = new Map();\n\n// Define the set of allowed npm packages for which we cache versions.\ntype CachedPackageName = '@sap/cds' | '@sap/cds-dk';\n\n/**\n * Cache statistics for debugging purposes\n */\nconst cacheStats = {\n  hits: 0,\n  misses: 0,\n  get hitRate() {\n    const total = this.hits + this.misses;\n    return total > 0 ? ((this.hits / total) * 100).toFixed(1) : '0.0';\n  },\n};\n\n/**\n * Check if @sap/cds and @sap/cds-dk versions are likely compatible.\n * @param cdsVersion The @sap/cds version\n * @param cdsDkVersion The @sap/cds-dk version\n * @returns Object with compatibility information and warnings\n */\nexport function checkVersionCompatibility(\n  cdsVersion: string,\n  cdsDkVersion: string,\n): {\n  isCompatible: boolean;\n  warning?: string;\n} {\n  // If either version is 'latest', assume they are compatible\n  if (cdsVersion === 'latest' || cdsDkVersion === 'latest') {\n    return { isCompatible: true };\n  }\n\n  const parsedCds = parseSemanticVersion(cdsVersion);\n  const parsedCdsDk = parseSemanticVersion(cdsDkVersion);\n\n  if (!parsedCds || !parsedCdsDk) {\n    return {\n      isCompatible: false,\n      warning: 'Unable to parse version numbers for compatibility check',\n    };\n  }\n\n  // Generally, @sap/cds and @sap/cds-dk should have the same major version\n  // and ideally the same minor version for best compatibility\n  const majorVersionsMatch = parsedCds.major === parsedCdsDk.major;\n  const minorVersionsMatch = parsedCds.minor === parsedCdsDk.minor;\n\n  if (!majorVersionsMatch) {\n    return {\n      isCompatible: false,\n      warning: `Major version mismatch: @sap/cds ${cdsVersion} and @sap/cds-dk ${cdsDkVersion} may not be compatible`,\n    };\n  }\n\n  if (!minorVersionsMatch) {\n    return {\n      isCompatible: true,\n      warning: `Minor version difference: @sap/cds ${cdsVersion} and @sap/cds-dk ${cdsDkVersion} - consider aligning versions for best compatibility`,\n    };\n  }\n\n  return { isCompatible: true };\n}\n\n/**\n * Compare two semantic versions\n * @param a First version\n * @param b Second version\n * @returns Negative if a < b, 0 if equal, positive if a > b\n */\nexport function compareVersions(a: SemanticVersion, b: SemanticVersion): number {\n  if (a.major !== b.major) return a.major - b.major;\n  if (a.minor !== b.minor) return a.minor - b.minor;\n  if (a.patch !== b.patch) return a.patch - b.patch;\n\n  // Handle prerelease versions (prerelease < release)\n  if (a.prerelease && !b.prerelease) return -1;\n  if (!a.prerelease && b.prerelease) return 1;\n  if (a.prerelease && b.prerelease) {\n    return a.prerelease.localeCompare(b.prerelease);\n  }\n\n  return 0;\n}\n\n/**\n * Find the best available version from a list of versions for a given requirement\n * @param availableVersions List of available version strings\n * @param requiredVersion Required version string\n * @returns Best matching version or null if no compatible version found\n */\nexport function findBestAvailableVersion(\n  availableVersions: string[],\n  requiredVersion: string,\n): string | null {\n  const parsedVersions = availableVersions\n    .map(v => parseSemanticVersion(v))\n    .filter((v): v is SemanticVersion => v !== null);\n\n  if (parsedVersions.length === 0) {\n    return null;\n  }\n\n  // First, try to find versions that satisfy the range\n  const satisfyingVersions = parsedVersions.filter(v => satisfiesRange(v, requiredVersion));\n\n  if (satisfyingVersions.length > 0) {\n    // Sort in descending order (newest first) and return the best match\n    satisfyingVersions.sort((a, b) => compareVersions(b, a));\n    return satisfyingVersions[0].original;\n  }\n\n  // If no exact match, prefer newer versions over older ones\n  // Sort all versions in descending order and return the newest\n  parsedVersions.sort((a, b) => compareVersions(b, a));\n  return parsedVersions[0].original;\n}\n\n/**\n * Get available versions for an npm package with caching to avoid duplicate\n * calls of the `npm view` command.\n * @param packageName The {@link CachedPackageName} for which to get versions\n * @returns Array of available version strings\n */\nexport function getAvailableVersions(packageName: CachedPackageName): string[] {\n  // Check cache first\n  if (availableVersionsCache.has(packageName)) {\n    cacheStats.hits++;\n    return availableVersionsCache.get(packageName)!;\n  }\n\n  // Cache miss - fetch from npm\n  cacheStats.misses++;\n  try {\n    const output = execSync(`npm view ${packageName} versions --json`, {\n      encoding: 'utf8',\n      timeout: 30000, // 30 second timeout\n    });\n\n    const versions: unknown = JSON.parse(output);\n    let versionArray: string[] = [];\n\n    if (Array.isArray(versions)) {\n      versionArray = versions.filter((v): v is string => typeof v === 'string');\n    } else if (typeof versions === 'string') {\n      versionArray = [versions];\n    }\n\n    // Cache the result\n    availableVersionsCache.set(packageName, versionArray);\n\n    return versionArray;\n  } catch (error) {\n    cdsExtractorLog('warn', `Failed to fetch versions for ${packageName}: ${String(error)}`);\n    // Cache empty array to avoid repeated failures\n    availableVersionsCache.set(packageName, []);\n    return [];\n  }\n}\n\n/**\n * Get cache statistics for debugging purposes\n * @returns Object with cache hit/miss statistics\n */\nexport function getCacheStatistics(): {\n  hits: number;\n  misses: number;\n  hitRate: string;\n  cachedPackages: string[];\n} {\n  return {\n    hits: cacheStats.hits,\n    misses: cacheStats.misses,\n    hitRate: cacheStats.hitRate,\n    cachedPackages: Array.from(availableVersionsCache.keys()),\n  };\n}\n\n/**\n * Parse a semantic version string\n * @param version Version string to parse (e.g., \"6.1.3\", \"^6.0.0\", \"~6.1.0\", \"latest\")\n * @returns Parsed semantic version or null if invalid\n */\nexport function parseSemanticVersion(version: string): SemanticVersion | null {\n  if (version === 'latest') {\n    // Return a very high version number for 'latest' to ensure it's preferred\n    return {\n      major: 999,\n      minor: 999,\n      patch: 999,\n      original: version,\n    };\n  }\n\n  // Remove common version prefixes\n  const cleanVersion = version.replace(/^[\\^~>=<]+/, '');\n\n  // Basic semver regex\n  const semverRegex = /^(\\d+)\\.(\\d+)\\.(\\d+)(?:-([a-zA-Z0-9.-]+))?(?:\\+([a-zA-Z0-9.-]+))?$/;\n  const match = cleanVersion.match(semverRegex);\n\n  if (!match) {\n    return null;\n  }\n\n  return {\n    major: parseInt(match[1], 10),\n    minor: parseInt(match[2], 10),\n    patch: parseInt(match[3], 10),\n    prerelease: match[4],\n    build: match[5],\n    original: version,\n  };\n}\n\n/**\n * Check if a resolved version satisfies the originally requested version.\n * @param resolvedVersion The version that was resolved\n * @param requestedVersion The originally requested version\n * @returns true if the resolved version satisfies the requested version range\n */\nfunction isSatisfyingVersion(resolvedVersion: string, requestedVersion: string): boolean {\n  // Exact string match or 'latest' case\n  if (resolvedVersion === requestedVersion || requestedVersion === 'latest') {\n    return true;\n  }\n\n  const parsedResolved = parseSemanticVersion(resolvedVersion);\n  if (!parsedResolved) {\n    return false;\n  }\n\n  return satisfiesRange(parsedResolved, requestedVersion);\n}\n\n/**\n * Resolve the best available version for CDS dependencies\n * @param cdsVersion Required @sap/cds version\n * @param cdsDkVersion Required @sap/cds-dk version\n * @returns Object with resolved versions and compatibility info\n */\nexport function resolveCdsVersions(\n  cdsVersion: string,\n  cdsDkVersion: string,\n): {\n  resolvedCdsVersion: string | null;\n  resolvedCdsDkVersion: string | null;\n  cdsExactMatch: boolean;\n  cdsDkExactMatch: boolean;\n  warning?: string;\n  isFallback?: boolean;\n} {\n  const cdsVersions = getAvailableVersions('@sap/cds');\n  const cdsDkVersions = getAvailableVersions('@sap/cds-dk');\n\n  const resolvedCdsVersion = findBestAvailableVersion(cdsVersions, cdsVersion);\n  const resolvedCdsDkVersion = findBestAvailableVersion(cdsDkVersions, cdsDkVersion);\n\n  // Check if resolved versions are exact matches (string equality or 'latest' case).\n  const cdsExactMatch =\n    resolvedCdsVersion === cdsVersion || (cdsVersion === 'latest' && resolvedCdsVersion !== null);\n  const cdsDkExactMatch =\n    resolvedCdsDkVersion === cdsDkVersion ||\n    (cdsDkVersion === 'latest' && resolvedCdsDkVersion !== null);\n\n  // Check if resolved versions satisfy the requested ranges (including exact matches).\n  const cdsSatisfiesRange = resolvedCdsVersion\n    ? isSatisfyingVersion(resolvedCdsVersion, cdsVersion)\n    : false;\n  const cdsDkSatisfiesRange = resolvedCdsDkVersion\n    ? isSatisfyingVersion(resolvedCdsDkVersion, cdsDkVersion)\n    : false;\n\n  // Only consider it a fallback if we couldn't find a satisfying version.\n  const isFallback = !cdsSatisfiesRange || !cdsDkSatisfiesRange;\n\n  let warning: string | undefined;\n\n  // Check compatibility between resolved versions (only if both were resolved).\n  // Show warnings when:\n  // 1. We're using fallback versions (couldn't find compatible versions), OR\n  // 2. At least one version isn't an exact match (version range was used), OR\n  // 3. Resolved versions have actual compatibility issues (e.g., major version mismatch).\n  if (resolvedCdsVersion && resolvedCdsDkVersion) {\n    const compatibility = checkVersionCompatibility(resolvedCdsVersion, resolvedCdsDkVersion);\n\n    const shouldShowWarning =\n      isFallback ||\n      !cdsExactMatch ||\n      !cdsDkExactMatch ||\n      (compatibility.warning && !compatibility.isCompatible);\n\n    if (compatibility.warning && shouldShowWarning) {\n      warning = compatibility.warning;\n    }\n  }\n\n  return {\n    resolvedCdsVersion,\n    resolvedCdsDkVersion,\n    cdsExactMatch,\n    cdsDkExactMatch,\n    warning,\n    isFallback,\n  };\n}\n\n/**\n * Check if version satisfies a version range.\n * @param version Version to check\n * @param range Version range (e.g., \"^6.0.0\", \"~6.1.0\", \">=6.0.0\")\n * @returns true if version satisfies the range\n */\nexport function satisfiesRange(version: SemanticVersion, range: string): boolean {\n  if (range === 'latest') {\n    return true;\n  }\n\n  const rangeVersion = parseSemanticVersion(range);\n  if (!rangeVersion) {\n    return false;\n  }\n\n  if (range.startsWith('^')) {\n    // Caret range: compatible within same major version\n    return version.major === rangeVersion.major && compareVersions(version, rangeVersion) >= 0;\n  } else if (range.startsWith('~')) {\n    // Tilde range: compatible within same minor version\n    return (\n      version.major === rangeVersion.major &&\n      version.minor === rangeVersion.minor &&\n      compareVersions(version, rangeVersion) >= 0\n    );\n  } else if (range.startsWith('>=')) {\n    // Greater than or equal\n    return compareVersions(version, rangeVersion) >= 0;\n  } else if (range.startsWith('>')) {\n    // Greater than\n    return compareVersions(version, rangeVersion) > 0;\n  } else if (range.startsWith('<=')) {\n    // Less than or equal\n    return compareVersions(version, rangeVersion) <= 0;\n  } else if (range.startsWith('<')) {\n    // Less than\n    return compareVersions(version, rangeVersion) < 0;\n  } else {\n    // Exact match\n    return compareVersions(version, rangeVersion) === 0;\n  }\n}\n\n/**\n * Test-only exports - DO NOT USE IN PRODUCTION CODE\n * These are exported only for testing purposes\n */\nexport const __testOnly__ = {\n  availableVersionsCache,\n  cacheStats,\n};\n", "/** Full dependency installation utilities for retry scenarios. */\n\nimport { execFileSync } from 'child_process';\nimport { join } from 'path';\n\nimport type { FullDependencyInstallationResult } from './types';\nimport type { CdsProject } from '../cds/parser';\nimport { cdsExtractorLog } from '../logging';\n\n/**\n * Determines if a {@link CdsProject} requires \"full\" dependency installation.\n *\n * @param project The {@link CdsProject} to check\n * @returns `true` if the project has at least one compilation task that is\n * currently marked as `failed` AND has not yet been retried. Otherwise, `false`.\n */\nexport function needsFullDependencyInstallation(project: CdsProject): boolean {\n  // Check if already installed\n  if (project.retryStatus?.fullDependenciesInstalled) {\n    return false;\n  }\n\n  // Check if project has failed tasks that could benefit from full dependencies.\n  //\n  // Currently, we only allow for one retry, because the only significant change we\n  // can make (to justify a retry) is to install and use the full set of declared\n  // dependencies instead of the minimal set of cached (`@sap/cds` and `@sap/cds-dk`)\n  // dependencies.\n  const hasFailedTasks = project.compilationTasks.some(\n    task => task.status === 'failed' && !task.retryInfo?.hasBeenRetried,\n  );\n\n  return hasFailedTasks && project.packageJson !== undefined;\n}\n\n/**\n * Installs full dependencies for a {@link CdsProject} in support of retry behavior\n * for compilation tasks that fail unless the `cds` CLI/compiler has access to the\n * full set of dependencies declared for the project.\n *\n * @param project The CDS project to install dependencies for\n * @param sourceRoot Source root directory\n * @returns Installation result with details\n */\nexport function projectInstallDependencies(\n  project: CdsProject,\n  sourceRoot: string,\n): FullDependencyInstallationResult {\n  const startTime = Date.now();\n  const projectPath = join(sourceRoot, project.projectDir);\n\n  const result: FullDependencyInstallationResult = {\n    success: false,\n    projectDir: projectPath,\n    warnings: [],\n    durationMs: 0,\n    timedOut: false,\n  };\n\n  try {\n    // Check if project has package.json\n    if (!project.packageJson) {\n      result.error = 'No package.json found for project';\n      return result;\n    }\n\n    // Install dependencies using npm in the project's directory\n    cdsExtractorLog(\n      'info',\n      `Installing full dependencies for project ${project.projectDir} in project's node_modules`,\n    );\n\n    try {\n      execFileSync('npm', ['install', '--quiet', '--no-audit', '--no-fund'], {\n        cwd: projectPath,\n        stdio: 'inherit',\n        timeout: 120000, // 2-minute timeout\n      });\n\n      result.success = true;\n      cdsExtractorLog(\n        'info',\n        `Successfully installed full dependencies for project ${project.projectDir}`,\n      );\n    } catch (execError) {\n      if (execError instanceof Error && 'signal' in execError && execError.signal === 'SIGTERM') {\n        result.timedOut = true;\n        result.error = 'Dependency installation timed out';\n      } else {\n        result.error = `npm install failed: ${String(execError)}`;\n      }\n\n      // Still attempt retry compilation even if dependency installation fails (optimistic approach)\n      result.warnings.push(\n        `Dependency installation failed but will still attempt retry compilation: ${result.error}`,\n      );\n      cdsExtractorLog('warn', result.warnings[0]);\n    }\n  } catch (error) {\n    result.error = `Failed to install full dependencies: ${String(error)}`;\n    cdsExtractorLog('error', result.error);\n  } finally {\n    result.durationMs = Date.now() - startTime;\n  }\n\n  return result;\n}\n", "/** Main retry orchestration logic for CDS compilation failures. */\n\nimport { compileCdsToJson } from './compile';\nimport type {\n  CompilationAttempt,\n  CompilationTask,\n  ResultRetryCompilationTask,\n  ResultRetryCompilationOrchestration,\n  ValidatedCdsCommand,\n} from './types';\nimport { identifyTasksRequiringRetry, updateCdsDependencyGraphStatus } from './validator';\nimport { addCompilationDiagnostic } from '../../diagnostics';\nimport { cdsExtractorLog } from '../../logging';\nimport { needsFullDependencyInstallation, projectInstallDependencies } from '../../packageManager';\nimport type { CdsDependencyGraph, CdsProject } from '../parser';\n\n/**\n * Add diagnostics only for tasks with `status: failed` in the {@link CdsDependencyGraph}.\n * @param dependencyGraph The dependency graph to use as the source of truth for task status\n * @param codeqlExePath Path to CodeQL executable used to add a diagnostic notification\n */\nfunction addCompilationDiagnosticsForFailedTasks(\n  dependencyGraph: CdsDependencyGraph,\n  codeqlExePath: string,\n): void {\n  for (const project of dependencyGraph.projects.values()) {\n    for (const task of project.compilationTasks) {\n      // Add diagnostics for tasks that currently have a `status` of 'failed'.\n      if (task.status === 'failed') {\n        // Add a diagnostic if the task:\n        //  - failed initially and was never retried, or...\n        //  - failed initially and was retried without success (or without updating status).\n        const shouldAddDiagnostic = task.retryInfo?.hasBeenRetried ?? !task.retryInfo;\n\n        if (shouldAddDiagnostic) {\n          for (const sourceFile of task.sourceFiles) {\n            addCompilationDiagnostic(\n              sourceFile,\n              task.errorSummary ?? 'Compilation failed',\n              codeqlExePath,\n            );\n          }\n        }\n      }\n    }\n  }\n}\n\n/**\n * Main orchestration function for retrying failed tasks in the {@link CdsDependencyGraph}.\n * @param dependencyGraph The dependency graph containing compilation tasks\n * @param codeqlExePath Path to `codeql` executable to use for adding diagnostic notifications\n * @returns The {@link ResultRetryCompilationOrchestration}\n */\nexport function orchestrateRetryAttempts(\n  dependencyGraph: CdsDependencyGraph,\n  codeqlExePath: string,\n): ResultRetryCompilationOrchestration {\n  const startTime = Date.now();\n  let dependencyInstallationStartTime = 0;\n  let dependencyInstallationEndTime = 0;\n  let retryCompilationStartTime = 0;\n  let retryCompilationEndTime = 0;\n\n  const result: ResultRetryCompilationOrchestration = {\n    success: true,\n    projectsWithRetries: [],\n    totalTasksRequiringRetry: 0,\n    totalSuccessfulRetries: 0,\n    totalFailedRetries: 0,\n    projectsWithSuccessfulDependencyInstallation: [],\n    projectsWithFailedDependencyInstallation: [],\n    retryDurationMs: 0,\n    dependencyInstallationDurationMs: 0,\n    retryCompilationDurationMs: 0,\n  };\n\n  try {\n    // Phase 1: Validate current outputs and identify failed tasks.\n    cdsExtractorLog('info', 'Identifying tasks requiring retry...');\n    const tasksRequiringRetry = identifyTasksRequiringRetry(dependencyGraph);\n\n    if (tasksRequiringRetry.size === 0) {\n      cdsExtractorLog('info', 'No tasks require retry - all compilations successful');\n      return result;\n    }\n\n    // Update retry status tracking.\n    result.totalTasksRequiringRetry = Array.from(tasksRequiringRetry.values()).reduce(\n      (sum, tasks) => sum + tasks.length,\n      0,\n    );\n    dependencyGraph.retryStatus.totalTasksRequiringRetry = result.totalTasksRequiringRetry;\n\n    // Phase 2: Install full dependencies for projects with failed tasks.\n    dependencyInstallationStartTime = Date.now();\n    for (const [projectDir, failedTasks] of tasksRequiringRetry) {\n      const project = dependencyGraph.projects.get(projectDir);\n      if (!project) {\n        continue;\n      }\n\n      if (needsFullDependencyInstallation(project)) {\n        try {\n          const installResult = projectInstallDependencies(project, dependencyGraph.sourceRootDir);\n\n          // Update project retry status.\n          project.retryStatus ??= {\n            fullDependenciesInstalled: false,\n            tasksRequiringRetry: failedTasks.length,\n            tasksRetried: 0,\n            installationErrors: [],\n          };\n\n          if (installResult.success) {\n            project.retryStatus.fullDependenciesInstalled = true;\n            result.projectsWithSuccessfulDependencyInstallation.push(projectDir);\n            dependencyGraph.retryStatus.projectsWithFullDependencies.add(projectDir);\n          } else {\n            project.retryStatus.installationErrors = [\n              ...(project.retryStatus.installationErrors ?? []),\n              installResult.error ?? 'Unknown installation error',\n            ];\n            result.projectsWithFailedDependencyInstallation.push(projectDir);\n          }\n\n          if (installResult.warnings.length > 0) {\n            for (const warning of installResult.warnings) {\n              dependencyGraph.errors.warnings.push({\n                phase: 'retry_dependency_installation',\n                message: warning,\n                timestamp: new Date(),\n                context: projectDir,\n              });\n            }\n          }\n        } catch (error) {\n          const errorMessage = `Failed to install full dependencies for project ${projectDir}: ${String(error)}`;\n          cdsExtractorLog('error', errorMessage);\n\n          dependencyGraph.errors.critical.push({\n            phase: 'retry_dependency_installation',\n            message: errorMessage,\n            timestamp: new Date(),\n          });\n\n          result.projectsWithFailedDependencyInstallation.push(projectDir);\n        }\n      }\n\n      dependencyGraph.retryStatus.projectsRequiringFullDependencies.add(projectDir);\n    }\n\n    dependencyInstallationEndTime = Date.now();\n    result.dependencyInstallationDurationMs =\n      dependencyInstallationEndTime - dependencyInstallationStartTime;\n\n    // Phase 3: Execute retry compilation attempts.\n    cdsExtractorLog('info', 'Executing retry compilation attempts...');\n    retryCompilationStartTime = Date.now();\n\n    for (const [projectDir, failedTasks] of tasksRequiringRetry) {\n      const project = dependencyGraph.projects.get(projectDir);\n      if (!project) {\n        continue;\n      }\n\n      const retryExecutionResult = retryCompilationTasksForProject(\n        failedTasks,\n        project,\n        dependencyGraph,\n      );\n\n      result.projectsWithRetries.push(projectDir);\n      result.totalSuccessfulRetries += retryExecutionResult.successfulRetries;\n      result.totalFailedRetries += retryExecutionResult.failedRetries;\n\n      // Update project retry status.\n      if (project.retryStatus) {\n        project.retryStatus.tasksRetried = retryExecutionResult.retriedTasks.length;\n      }\n    }\n\n    retryCompilationEndTime = Date.now();\n    result.retryCompilationDurationMs = retryCompilationEndTime - retryCompilationStartTime;\n\n    // After retry compilation attempts complete, update status.\n    updateCdsDependencyGraphStatus(dependencyGraph, dependencyGraph.sourceRootDir);\n\n    // Phase 4: Update dependency graph with retry results.\n    updateDependencyGraphWithRetryResults(dependencyGraph, result);\n\n    // Phase 5: Add diagnostics for definitively failed tasks.\n    addCompilationDiagnosticsForFailedTasks(dependencyGraph, codeqlExePath);\n\n    result.success = result.totalSuccessfulRetries > 0 || result.totalTasksRequiringRetry === 0;\n  } catch (error) {\n    const errorMessage = `Retry orchestration failed: ${String(error)}`;\n    cdsExtractorLog('error', errorMessage);\n\n    dependencyGraph.errors.critical.push({\n      phase: 'retry_orchestration',\n      message: errorMessage,\n      timestamp: new Date(),\n    });\n\n    result.success = false;\n  } finally {\n    result.retryDurationMs = Date.now() - startTime;\n  }\n\n  return result;\n}\n\n/**\n * Retry the provided {@link CompilationTask} using the task's configured retry command.\n * @param task The {@link CompilationTask} to be retried\n * @param retryCommand Validated CDS command to use for retry\n * @param projectDir Project directory to use as working directory\n * @param dependencyGraph The {@link CdsDependencyGraph} to be processed and updated\n * if retry succeeds.\n * @returns The result of the {@link CompilationAttempt}.\n */\nfunction retryCompilationTask(\n  task: CompilationTask,\n  retryCommand: ValidatedCdsCommand,\n  projectDir: string,\n  dependencyGraph: CdsDependencyGraph,\n): CompilationAttempt {\n  const startTime = new Date();\n  const attemptId = `${task.id}_retry_${startTime.getTime()}`;\n\n  // Use the original command string for consistency with existing compilation logic.\n  const cdsCommandString = retryCommand.originalCommand;\n\n  const attempt: CompilationAttempt = {\n    id: attemptId,\n    cdsCommand: cdsCommandString,\n    cacheDir: projectDir,\n    timestamp: startTime,\n    result: {\n      success: false,\n      timestamp: startTime,\n    },\n  };\n\n  try {\n    // Use the same compilation logic as the original attempt.\n    const primarySourceFile = task.sourceFiles[0];\n\n    const compilationResult = compileCdsToJson(\n      primarySourceFile,\n      dependencyGraph.sourceRootDir,\n      cdsCommandString,\n      projectDir,\n      // Convert CDS projects to BasicCdsProject format expected by compileCdsToJson\n      new Map(\n        Array.from(dependencyGraph.projects.entries()).map(([key, value]) => [\n          key,\n          {\n            cdsFiles: value.cdsFiles,\n            compilationTargets: value.compilationTargets,\n            expectedOutputFile: value.expectedOutputFile,\n            projectDir: value.projectDir,\n            dependencies: value.dependencies,\n            imports: value.imports,\n            packageJson: value.packageJson,\n          },\n        ]),\n      ),\n      task.projectDir,\n    );\n\n    attempt.result = {\n      ...compilationResult,\n      timestamp: startTime,\n    };\n  } catch (error) {\n    attempt.error = {\n      message: String(error),\n      stack: error instanceof Error ? error.stack : undefined,\n    };\n  }\n\n  return attempt;\n}\n\n/**\n * Executes retries for the provided array of {@link CompilationTask} instances.\n * @param tasksToRetry Tasks that need to be retried\n * @param project The {@link CdsProject} associated with the compilation tasks to retry\n * @param dependencyGraph The {@link CdsDependencyGraph} to update as tasks are retried\n * @returns The {@link ResultRetryCompilationTask}\n */\nfunction retryCompilationTasksForProject(\n  tasksToRetry: CompilationTask[],\n  project: CdsProject,\n  dependencyGraph: CdsDependencyGraph,\n): ResultRetryCompilationTask {\n  const startTime = Date.now();\n\n  const result: ResultRetryCompilationTask = {\n    projectDir: project.projectDir,\n    retriedTasks: [],\n    successfulRetries: 0,\n    failedRetries: 0,\n    fullDependenciesAvailable: Boolean(project.retryStatus?.fullDependenciesInstalled),\n    executionDurationMs: 0,\n    retryErrors: [],\n  };\n\n  cdsExtractorLog(\n    'info',\n    `Retrying ${tasksToRetry.length} task(s) for project ${project.projectDir} using ${result.fullDependenciesAvailable ? 'full' : 'minimal'} dependencies`,\n  );\n\n  for (const task of tasksToRetry) {\n    try {\n      // Mark task as being retried\n      task.retryInfo = {\n        hasBeenRetried: true,\n        retryReason: 'Output validation failed',\n        fullDependenciesInstalled: result.fullDependenciesAvailable,\n        retryTimestamp: new Date(),\n      };\n\n      // Use the retry command configured for this task\n      const retryAttempt = retryCompilationTask(\n        task,\n        task.retryCommand,\n        project.projectDir,\n        dependencyGraph,\n      );\n\n      task.retryInfo.retryAttempt = retryAttempt;\n      task.attempts.push(retryAttempt);\n      result.retriedTasks.push(task);\n\n      if (retryAttempt.result.success) {\n        task.status = 'success';\n        result.successfulRetries++;\n        cdsExtractorLog('info', `Retry successful for task ${task.id}`);\n      } else {\n        task.status = 'failed';\n        task.errorSummary = retryAttempt.error?.message ?? 'Retry compilation failed';\n        result.failedRetries++;\n        result.retryErrors.push(task.errorSummary);\n        cdsExtractorLog('warn', `Retry failed for task ${task.id}: ${task.errorSummary}`);\n      }\n    } catch (error) {\n      const errorMessage = `Failed to retry task ${task.id}: ${String(error)}`;\n      result.retryErrors.push(errorMessage);\n      result.failedRetries++;\n      task.status = 'failed';\n      task.errorSummary = errorMessage;\n      cdsExtractorLog('error', errorMessage);\n    }\n  }\n\n  result.executionDurationMs = Date.now() - startTime;\n\n  cdsExtractorLog(\n    'info',\n    `Retry execution completed for project ${project.projectDir}: ${result.successfulRetries} successful, ${result.failedRetries} failed`,\n  );\n\n  return result;\n}\n\n/**\n * Updates dependency graph with retry results\n * @param dependencyGraph The dependency graph to update\n * @param retryResults The retry orchestration results\n */\nfunction updateDependencyGraphWithRetryResults(\n  dependencyGraph: CdsDependencyGraph,\n  retryResults: ResultRetryCompilationOrchestration,\n): void {\n  // Remove manual counter updates - let updateCdsDependencyGraphStatus handle this\n  // Keep only non-status updates like timing and project tracking\n  dependencyGraph.retryStatus.totalRetryAttempts =\n    retryResults.totalSuccessfulRetries + retryResults.totalFailedRetries;\n}\n", "import { determineCdsCommand, determineVersionAwareCdsCommands } from './command';\nimport { compileCdsToJson } from './compile';\nimport { orchestrateRetryAttempts } from './retry';\nimport {\n  CompilationAttempt,\n  CompilationTask,\n  CompilationConfig,\n  ValidatedCdsCommand,\n} from './types';\nimport { updateCdsDependencyGraphStatus } from './validator';\nimport { cdsExtractorLog, generateStatusReport } from '../../logging';\nimport { CdsDependencyGraph, CdsProject } from '../parser/types';\n\n/** Attempt compilation with a specific command and configuration. */\nfunction attemptCompilation(\n  task: CompilationTask,\n  cdsCommand: string,\n  cacheDir: string | undefined,\n  dependencyGraph: CdsDependencyGraph,\n): CompilationAttempt {\n  const startTime = new Date();\n  const attemptId = `${task.id}_${startTime.getTime()}`;\n\n  const attempt: CompilationAttempt = {\n    id: attemptId,\n    cdsCommand,\n    cacheDir,\n    timestamp: startTime,\n    result: {\n      success: false,\n      timestamp: startTime,\n    },\n  };\n\n  try {\n    // For now, we'll use the first source file for compilation\n    // In a more sophisticated implementation, we might handle project-level compilation differently\n    const primarySourceFile = task.sourceFiles[0];\n\n    const compilationResult = compileCdsToJson(\n      primarySourceFile,\n      dependencyGraph.sourceRootDir,\n      cdsCommand,\n      cacheDir,\n      // Convert CDS projects to BasicCdsProject format expected by compileCdsToJson\n      new Map(\n        Array.from(dependencyGraph.projects.entries()).map(([key, value]) => [\n          key,\n          {\n            cdsFiles: value.cdsFiles,\n            compilationTargets: value.compilationTargets,\n            expectedOutputFile: value.expectedOutputFile,\n            projectDir: value.projectDir,\n            dependencies: value.dependencies,\n            imports: value.imports,\n            packageJson: value.packageJson,\n            compilationConfig: value.compilationConfig,\n          },\n        ]),\n      ),\n      task.projectDir,\n    );\n\n    const endTime = new Date();\n    attempt.result = {\n      ...compilationResult,\n      timestamp: endTime,\n      durationMs: endTime.getTime() - startTime.getTime(),\n      commandUsed: cdsCommand,\n      cacheDir,\n    };\n\n    if (compilationResult.success && compilationResult.outputPath) {\n      dependencyGraph.statusSummary.jsonFilesGenerated++;\n    }\n  } catch (error) {\n    const endTime = new Date();\n    attempt.error = {\n      message: String(error),\n      stack: error instanceof Error ? error.stack : undefined,\n    };\n    attempt.result.timestamp = endTime;\n    attempt.result.durationMs = endTime.getTime() - startTime.getTime();\n  }\n\n  task.attempts.push(attempt);\n  return attempt;\n}\n\n/**\n * Create a compilation task for a project or individual file\n */\nfunction createCompilationTask(\n  type: 'file' | 'project',\n  sourceFiles: string[],\n  expectedOutputFile: string,\n  projectDir: string,\n): CompilationTask {\n  // Create default commands for tasks - these should be updated later with proper commands\n  const defaultPrimaryCommand: ValidatedCdsCommand = {\n    executable: 'cds',\n    args: [],\n    originalCommand: 'cds',\n  };\n\n  const defaultRetryCommand: ValidatedCdsCommand = {\n    executable: 'npx',\n    args: ['cds'],\n    originalCommand: 'npx cds',\n  };\n\n  return {\n    id: `${type}_${projectDir}_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,\n    type,\n    status: 'pending',\n    sourceFiles,\n    expectedOutputFile,\n    projectDir,\n    attempts: [],\n    dependencies: [],\n    primaryCommand: defaultPrimaryCommand,\n    retryCommand: defaultRetryCommand,\n  };\n}\n\nfunction createCompilationConfig(\n  cdsCommand: string,\n  cacheDir: string | undefined,\n): CompilationConfig {\n  return {\n    cdsCommand: cdsCommand,\n    cacheDir: cacheDir,\n    versionCompatibility: {\n      isCompatible: true, // Will be validated during planning\n    },\n    maxRetryAttempts: 3,\n  };\n}\n\n/**\n * Execute a single compilation task\n */\nfunction executeCompilationTask(\n  task: CompilationTask,\n  project: CdsProject,\n  dependencyGraph: CdsDependencyGraph,\n  _codeqlExePath: string,\n): void {\n  task.status = 'in_progress';\n\n  const config = project.enhancedCompilationConfig;\n  if (!config) {\n    throw new Error(`No compilation configuration found for project ${project.projectDir}`);\n  }\n\n  const compilationAttempt = attemptCompilation(\n    task,\n    config.cdsCommand,\n    config.cacheDir,\n    dependencyGraph,\n  );\n\n  if (compilationAttempt.result.success) {\n    task.status = 'success';\n    return;\n  }\n\n  // Compilation failed - mark task as failed\n  const lastError = compilationAttempt.error\n    ? new Error(compilationAttempt.error.message)\n    : new Error('Compilation failed');\n\n  task.status = 'failed';\n  task.errorSummary = lastError?.message || 'Compilation failed';\n\n  // Note: Diagnostics are deferred until after retry phase completes\n  // to implement \"Silent Success\" - only add diagnostics for definitively failed tasks\n\n  cdsExtractorLog('error', `Compilation failed for task ${task.id}: ${task.errorSummary}`);\n}\n\n/**\n * Executes all compilation tasks for the provided {@link CdsDependencyGraph}.\n * Uses the provided `codeqlExePath` to run the CodeQL CLI, as needed, for\n * generating diagnositic warnings and/or errors for problems encountered while\n * running the CodeQL CDS extractor.\n */\nfunction executeCompilationTasks(dependencyGraph: CdsDependencyGraph, codeqlExePath: string): void {\n  cdsExtractorLog('info', 'Starting compilation execution for all projects...');\n\n  dependencyGraph.currentPhase = 'compiling';\n  const compilationStartTime = new Date();\n\n  // Collect all compilation tasks from all projects.\n  const allTasks: Array<{ task: CompilationTask; project: CdsProject }> = [];\n\n  for (const project of dependencyGraph.projects.values()) {\n    for (const task of project.compilationTasks) {\n      allTasks.push({ task, project });\n    }\n  }\n\n  // Execute compilation tasks sequentially. There is room for optimization in the future.\n  // For now, we keep it simple to ensure consistent debug information collection.\n  cdsExtractorLog('info', `Executing ${allTasks.length} compilation task(s)...`);\n  for (const { task, project } of allTasks) {\n    try {\n      executeCompilationTask(task, project, dependencyGraph, codeqlExePath);\n    } catch (error) {\n      const errorMessage = `Failed to execute compilation task ${task.id}: ${String(error)}`;\n      cdsExtractorLog('error', errorMessage);\n\n      dependencyGraph.errors.critical.push({\n        phase: 'compiling',\n        message: errorMessage,\n        timestamp: new Date(),\n        stack: error instanceof Error ? error.stack : undefined,\n      });\n\n      task.status = 'failed';\n      task.errorSummary = errorMessage;\n      dependencyGraph.statusSummary.failedCompilations++;\n    }\n  }\n\n  // Update project statuses\n  for (const project of dependencyGraph.projects.values()) {\n    const allTasksCompleted = project.compilationTasks.every(\n      task => task.status === 'success' || task.status === 'failed',\n    );\n\n    if (allTasksCompleted) {\n      const hasFailedTasks = project.compilationTasks.some(task => task.status === 'failed');\n      project.status = hasFailedTasks ? 'failed' : 'completed';\n      project.timestamps.compilationCompleted = new Date();\n    }\n  }\n\n  const compilationEndTime = new Date();\n  dependencyGraph.statusSummary.performance.compilationDurationMs =\n    compilationEndTime.getTime() - compilationStartTime.getTime();\n\n  cdsExtractorLog(\n    'info',\n    `Compilation execution completed. Success: ${dependencyGraph.statusSummary.successfulCompilations}, Failed: ${dependencyGraph.statusSummary.failedCompilations}`,\n  );\n}\n\n/**\n * Orchestrates the compilation process for CDS files based on a dependency graph.\n *\n * This function coordinates the planning and execution of compilation tasks,\n * tracks the compilation status, and generates a post-compilation report.\n *\n * @param dependencyGraph - The {@link CdsDependencyGraph} representing the CDS projects,\n * project dependencies, expected compilation tasks, and their statuses.\n * @param projectCacheDirMap - A map from project identifiers to their cache directory paths.\n * @param codeqlExePath - The path to the CodeQL executable. Used for generating diagnostic\n * messages as part of the broader CodeQL (JavaScript) extraction process.\n * @throws Will rethrow any errors encountered during compilation, after logging them.\n */\nexport function orchestrateCompilation(\n  dependencyGraph: CdsDependencyGraph,\n  projectCacheDirMap: Map,\n  codeqlExePath: string,\n): void {\n  try {\n    // Phase 1: Initial compilation\n    planCompilationTasks(dependencyGraph, projectCacheDirMap);\n    executeCompilationTasks(dependencyGraph, codeqlExePath);\n\n    // CENTRALIZED STATUS UPDATE: Establish post-initial-compilation state\n    updateCdsDependencyGraphStatus(dependencyGraph, dependencyGraph.sourceRootDir);\n\n    // Phase 2: Retry orchestration\n    cdsExtractorLog('info', 'Starting retry orchestration phase...');\n    const retryResults = orchestrateRetryAttempts(dependencyGraph, codeqlExePath);\n\n    // CENTRALIZED STATUS UPDATE: Final validation and status synchronization\n    updateCdsDependencyGraphStatus(dependencyGraph, dependencyGraph.sourceRootDir);\n\n    // Log retry results\n    if (retryResults.totalTasksRequiringRetry > 0) {\n      cdsExtractorLog(\n        'info',\n        `Retry phase completed: ${retryResults.totalTasksRequiringRetry} tasks retried, ${retryResults.totalSuccessfulRetries} successful, ${retryResults.totalFailedRetries} failed`,\n      );\n    } else {\n      cdsExtractorLog('info', 'Retry phase completed: no tasks required retry');\n    }\n\n    // Phase 3: Final status update\n    const hasFailures =\n      dependencyGraph.statusSummary.failedCompilations > 0 ||\n      dependencyGraph.errors.critical.length > 0;\n\n    dependencyGraph.statusSummary.overallSuccess = !hasFailures;\n    dependencyGraph.currentPhase = hasFailures ? 'failed' : 'completed';\n\n    // Phase 3: Status reporting (now guaranteed to be accurate)\n    const statusReport = generateStatusReport(dependencyGraph);\n    cdsExtractorLog('info', 'CDS Extractor Status Report : Post-Compilation...\\n' + statusReport);\n  } catch (error) {\n    const errorMessage = `Compilation orchestration failed: ${String(error)}`;\n    cdsExtractorLog('error', errorMessage);\n\n    dependencyGraph.errors.critical.push({\n      phase: 'compiling',\n      message: errorMessage,\n      timestamp: new Date(),\n      stack: error instanceof Error ? error.stack : undefined,\n    });\n\n    dependencyGraph.currentPhase = 'failed';\n    dependencyGraph.statusSummary.overallSuccess = false;\n\n    throw error;\n  }\n}\n\n/** Plan compilation tasks for all projects in the dependency graph. */\nfunction planCompilationTasks(\n  dependencyGraph: CdsDependencyGraph,\n  projectCacheDirMap: Map,\n): void {\n  cdsExtractorLog('info', 'Planning compilation tasks for all projects...');\n\n  dependencyGraph.currentPhase = 'compilation_planning';\n\n  for (const [projectDir, project] of dependencyGraph.projects.entries()) {\n    try {\n      const cacheDir = projectCacheDirMap.get(projectDir);\n\n      // Determine version-aware CDS commands for both primary and retry scenarios\n      const commands = determineVersionAwareCdsCommands(\n        cacheDir,\n        dependencyGraph.sourceRootDir,\n        projectDir,\n        dependencyGraph,\n      );\n\n      // Keep backward compatibility - determine command string for compilation config\n      const cdsCommand = determineCdsCommand(cacheDir, dependencyGraph.sourceRootDir);\n\n      // Create compilation configuration (always project-level now)\n      const compilationConfig = createCompilationConfig(cdsCommand, cacheDir);\n\n      project.enhancedCompilationConfig = compilationConfig;\n\n      // Create compilation task (always project-level now)\n      const task = createCompilationTask(\n        'project',\n        project.cdsFiles,\n        project.expectedOutputFile,\n        projectDir,\n      );\n\n      // Update task with version-aware commands\n      task.primaryCommand = commands.primaryCommand;\n      task.retryCommand = commands.retryCommand;\n\n      project.compilationTasks = [task];\n\n      project.status = 'compilation_planned';\n      project.timestamps.compilationStarted = new Date();\n\n      cdsExtractorLog(\n        'info',\n        `Planned ${project.compilationTasks.length} compilation task(s) for project ${projectDir}`,\n      );\n    } catch (error) {\n      const errorMessage = `Failed to plan compilation for project ${projectDir}: ${String(error)}`;\n      cdsExtractorLog('error', errorMessage);\n\n      dependencyGraph.errors.critical.push({\n        phase: 'compilation_planning',\n        message: errorMessage,\n        timestamp: new Date(),\n        stack: error instanceof Error ? error.stack : undefined,\n      });\n\n      project.status = 'failed';\n    }\n  }\n\n  const totalTasks = Array.from(dependencyGraph.projects.values()).reduce(\n    (sum, project) => sum + project.compilationTasks.length,\n    0,\n  );\n\n  dependencyGraph.statusSummary.totalCompilationTasks = totalTasks;\n\n  cdsExtractorLog('info', `Compilation planning completed. Total tasks: ${totalTasks}`);\n}\n", "import { relative } from 'path';\n\n/**\n * Helper functions for mapping CDS files to their projects and cache directories\n */\n\n/**\n * Find the project directory for a CDS file\n * @param cdsFilePath Path to the CDS file\n * @param sourceRoot Source root directory\n * @param projectMap Map of project directories to project objects\n * @returns The project directory the file belongs to, or undefined if not found\n */\nexport function findProjectForCdsFile(\n  cdsFilePath: string,\n  sourceRoot: string,\n  projectMap: Map,\n): string | undefined {\n  // Get the relative path to the project directory for this CDS file\n  const relativeCdsFilePath = relative(sourceRoot, cdsFilePath);\n\n  // If the file is outside the source root, path.relative() will start with '../'\n  // In this case, we should also check against the absolute path\n  const isOutsideSourceRoot = relativeCdsFilePath.startsWith('../');\n\n  // Find the project this file belongs to\n  for (const [projectDir, project] of projectMap.entries()) {\n    if (\n      project.cdsFiles.some(\n        cdsFile =>\n          cdsFile === relativeCdsFilePath ||\n          relativeCdsFilePath.startsWith(projectDir) ||\n          (isOutsideSourceRoot && cdsFile === cdsFilePath),\n      )\n    ) {\n      return projectDir;\n    }\n  }\n\n  return undefined;\n}\n", "import { dirname, join, resolve, sep, basename } from 'path';\n\nimport {\n  determineCdsFilesForProjectDir,\n  determineCdsFilesToCompile,\n  determineCdsProjectsUnderSourceDir,\n  extractCdsImports,\n  readPackageJsonFile,\n} from './functions';\nimport { CdsDependencyGraph, CdsImport, CdsProject, BasicCdsProject } from './types';\nimport { modelCdsJsonFile } from '../../constants';\nimport { cdsExtractorLog } from '../../logging';\n\n/**\n * Builds a basic dependency graph of CDS projects and performs the initial parsing stage of the CDS extractor.\n * This is the internal function that creates basic project structures.\n *\n * @param sourceRootDir - Source root directory\n * @returns Map of project directories to their BasicCdsProject objects with dependency information\n */\nfunction buildBasicCdsProjectDependencyGraph(sourceRootDir: string): Map {\n  // Find all CDS projects under the source directory\n  cdsExtractorLog('info', 'Detecting CDS projects...');\n  const projectDirs = determineCdsProjectsUnderSourceDir(sourceRootDir);\n\n  if (projectDirs.length === 0) {\n    cdsExtractorLog('info', 'No CDS projects found.');\n    return new Map();\n  }\n\n  cdsExtractorLog('info', `Found ${projectDirs.length} CDS project(s) under source directory.`);\n\n  const projectMap = new Map();\n\n  // First pass: create CdsProject objects for each project directory\n  for (const projectDir of projectDirs) {\n    const absoluteProjectDir = join(sourceRootDir, projectDir);\n    const cdsFiles = determineCdsFilesForProjectDir(sourceRootDir, absoluteProjectDir);\n\n    // Try to load package.json if it exists\n    const packageJsonPath = join(absoluteProjectDir, 'package.json');\n    const packageJson = readPackageJsonFile(packageJsonPath);\n\n    projectMap.set(projectDir, {\n      projectDir,\n      cdsFiles,\n      compilationTargets: [], // Will be populated in the third pass\n      expectedOutputFile: join(projectDir, modelCdsJsonFile),\n      packageJson,\n      dependencies: [],\n      imports: new Map(),\n    });\n  }\n\n  // Second pass: analyze dependencies between projects\n  cdsExtractorLog('info', 'Analyzing dependencies between CDS projects...');\n  for (const [projectDir, project] of projectMap.entries()) {\n    // Check each CDS file for imports\n    for (const relativeFilePath of project.cdsFiles) {\n      const absoluteFilePath = join(sourceRootDir, relativeFilePath);\n\n      try {\n        const imports = extractCdsImports(absoluteFilePath);\n        const enrichedImports: CdsImport[] = [];\n\n        // Process each import\n        for (const importInfo of imports) {\n          const enrichedImport: CdsImport = { ...importInfo };\n\n          if (importInfo.isRelative) {\n            // Resolve the relative import path\n            const importedFilePath = resolve(dirname(absoluteFilePath), importInfo.path);\n            const normalizedImportedPath = importedFilePath.endsWith('.cds')\n              ? importedFilePath\n              : `${importedFilePath}.cds`;\n\n            // Store the resolved path relative to source root\n            try {\n              const relativeToDirPath = dirname(relativeFilePath);\n              const resolvedPath = resolve(join(sourceRootDir, relativeToDirPath), importInfo.path);\n              const normalizedResolvedPath = resolvedPath.endsWith('.cds')\n                ? resolvedPath\n                : `${resolvedPath}.cds`;\n\n              // Convert to relative path from source root\n              if (normalizedResolvedPath.startsWith(sourceRootDir)) {\n                enrichedImport.resolvedPath = normalizedResolvedPath\n                  .substring(sourceRootDir.length)\n                  .replace(/^[/\\\\]/, '');\n              }\n            } catch (error) {\n              cdsExtractorLog(\n                'warn',\n                `Could not resolve import path for ${importInfo.path} in ${relativeFilePath}: ${String(error)}`,\n              );\n            }\n\n            // Find which project contains this imported file\n            for (const [otherProjectDir, otherProject] of projectMap.entries()) {\n              if (otherProjectDir === projectDir) continue; // Skip self\n\n              const otherProjectAbsoluteDir = join(sourceRootDir, otherProjectDir);\n\n              // Check if the imported file is in the other project\n              const isInOtherProject = otherProject.cdsFiles.some(otherFile => {\n                const otherAbsolutePath = join(sourceRootDir, otherFile);\n                return (\n                  otherAbsolutePath === normalizedImportedPath ||\n                  normalizedImportedPath.startsWith(otherProjectAbsoluteDir + sep)\n                );\n              });\n\n              if (isInOtherProject) {\n                // Add dependency if not already present\n                project.dependencies ??= [];\n\n                if (!project.dependencies.includes(otherProject)) {\n                  project.dependencies.push(otherProject);\n                }\n              }\n            }\n          }\n          // For module imports, check package.json dependencies\n          else if (importInfo.isModule && project.packageJson) {\n            const dependencies = {\n              ...(project.packageJson.dependencies ?? {}),\n              ...(project.packageJson.devDependencies ?? {}),\n            };\n\n            // Extract module name from import path (e.g., '@sap/cds/common' -> '@sap/cds')\n            const moduleName = importInfo.path.split('/')[0].startsWith('@')\n              ? importInfo.path.split('/').slice(0, 2).join('/')\n              : importInfo.path.split('/')[0];\n\n            if (dependencies[moduleName]) {\n              // This is a valid module dependency, nothing more to do here\n              // In the future, we could track module dependencies separately\n            }\n          }\n\n          enrichedImports.push(enrichedImport);\n        }\n\n        // Store the enriched imports in the project\n        project.imports?.set(relativeFilePath, enrichedImports);\n      } catch (error: unknown) {\n        cdsExtractorLog(\n          'warn',\n          `Error processing imports in ${absoluteFilePath}: ${String(error)}`,\n        );\n      }\n    }\n  }\n\n  // Third pass: determine CDS files to compile and expected output files for each project\n  cdsExtractorLog(\n    'info',\n    'Determining CDS files to compile and expected output files for each project...',\n  );\n  for (const [, project] of projectMap.entries()) {\n    try {\n      const projectPlan = determineCdsFilesToCompile(sourceRootDir, project);\n\n      // Assign the calculated values back to the project\n      project.compilationTargets = projectPlan.compilationTargets;\n      project.expectedOutputFile = projectPlan.expectedOutputFile;\n    } catch (error) {\n      cdsExtractorLog(\n        'warn',\n        `Error determining files to compile for project ${project.projectDir}: ${String(error)}`,\n      );\n      // Fall back to default project compilation on error\n      project.compilationTargets = project.cdsFiles.map(file => basename(file));\n      project.expectedOutputFile = join(project.projectDir, modelCdsJsonFile);\n    }\n  }\n\n  return projectMap;\n}\n\n/**\n * Builds a CDS dependency graph with comprehensive tracking and debug information.\n * This is the main function that returns a CdsDependencyGraph instead of a simple Map.\n * The extractor now runs in autobuild mode by default.\n *\n * @param sourceRootDir - Source root directory\n * @returns CDS dependency graph with comprehensive tracking\n */\nexport function buildCdsProjectDependencyGraph(sourceRootDir: string): CdsDependencyGraph {\n  const startTime = new Date();\n\n  // Create the initial dependency graph structure\n  const dependencyGraph: CdsDependencyGraph = {\n    id: `cds_graph_${Date.now()}`,\n    sourceRootDir,\n    projects: new Map(),\n    debugInfo: {\n      extractor: {\n        runMode: 'autobuild',\n        sourceRootDir,\n        startTime,\n        environment: {\n          nodeVersion: process.version,\n          platform: process.platform,\n          cwd: process.cwd(),\n          argv: process.argv,\n        },\n      },\n      parser: {\n        projectsDetected: 0,\n        cdsFilesFound: 0,\n        dependencyResolutionSuccess: true,\n        parsingErrors: [],\n        parsingWarnings: [],\n      },\n      compiler: {\n        availableCommands: [],\n        selectedCommand: '',\n        cacheDirectories: [],\n        cacheInitialized: false,\n      },\n    },\n    currentPhase: 'parsing',\n    statusSummary: {\n      overallSuccess: false,\n      totalProjects: 0,\n      totalCdsFiles: 0,\n      totalCompilationTasks: 0,\n      successfulCompilations: 0,\n      failedCompilations: 0,\n      skippedCompilations: 0,\n      jsonFilesGenerated: 0,\n      criticalErrors: [],\n      warnings: [],\n      performance: {\n        totalDurationMs: 0,\n        parsingDurationMs: 0,\n        compilationDurationMs: 0,\n        extractionDurationMs: 0,\n      },\n    },\n    config: {\n      maxRetryAttempts: 3,\n      enableDetailedLogging: false, // Debug modes removed\n      generateDebugOutput: false, // Debug modes removed\n      compilationTimeoutMs: 30000, // 30 seconds\n    },\n    errors: {\n      critical: [],\n      warnings: [],\n    },\n    retryStatus: {\n      totalTasksRequiringRetry: 0,\n      totalTasksSuccessfullyRetried: 0,\n      totalRetryAttempts: 0,\n      projectsRequiringFullDependencies: new Set(),\n      projectsWithFullDependencies: new Set(),\n    },\n  };\n\n  try {\n    // Use the existing function to build the basic project map\n    const basicProjectMap = buildBasicCdsProjectDependencyGraph(sourceRootDir);\n\n    // Convert basic projects to CDS projects\n    for (const [projectDir, basicProject] of basicProjectMap.entries()) {\n      const cdsProject: CdsProject = {\n        ...basicProject,\n        id: `project_${projectDir.replace(/[^a-zA-Z0-9]/g, '_')}_${Date.now()}`,\n        enhancedCompilationConfig: undefined, // Will be set during compilation planning\n        compilationTasks: [],\n        parserDebugInfo: {\n          dependenciesResolved: [],\n          importErrors: [],\n          parseErrors: new Map(),\n        },\n        status: 'discovered',\n        timestamps: {\n          discovered: new Date(),\n        },\n      };\n\n      dependencyGraph.projects.set(projectDir, cdsProject);\n    }\n\n    // Update summary statistics\n    dependencyGraph.statusSummary.totalProjects = dependencyGraph.projects.size;\n    dependencyGraph.statusSummary.totalCdsFiles = Array.from(\n      dependencyGraph.projects.values(),\n    ).reduce((sum, project) => sum + project.cdsFiles.length, 0);\n\n    dependencyGraph.debugInfo.parser.projectsDetected = dependencyGraph.projects.size;\n    dependencyGraph.debugInfo.parser.cdsFilesFound = dependencyGraph.statusSummary.totalCdsFiles;\n\n    // Mark dependency resolution phase as completed\n    dependencyGraph.currentPhase = 'dependency_resolution';\n\n    const endTime = new Date();\n    dependencyGraph.debugInfo.extractor.endTime = endTime;\n    dependencyGraph.debugInfo.extractor.durationMs = endTime.getTime() - startTime.getTime();\n    dependencyGraph.statusSummary.performance.parsingDurationMs =\n      dependencyGraph.debugInfo.extractor.durationMs;\n\n    cdsExtractorLog(\n      'info',\n      `CDS dependency graph created with ${dependencyGraph.projects.size} projects and ${dependencyGraph.statusSummary.totalCdsFiles} CDS files`,\n    );\n\n    return dependencyGraph;\n  } catch (error) {\n    const errorMessage = `Failed to build CDS dependency graph: ${String(error)}`;\n    cdsExtractorLog('error', errorMessage);\n\n    dependencyGraph.errors.critical.push({\n      phase: 'parsing',\n      message: errorMessage,\n      timestamp: new Date(),\n      stack: error instanceof Error ? error.stack : undefined,\n    });\n\n    dependencyGraph.currentPhase = 'failed';\n    return dependencyGraph;\n  }\n}\n", "import { existsSync, readFileSync, statSync } from 'fs';\nimport { basename, dirname, join, relative, sep } from 'path';\n\nimport { sync } from 'glob';\n\nimport { CdsFilesToCompile, CdsImport, PackageJson } from './types';\nimport { modelCdsJsonFile } from '../../constants';\nimport { cdsExtractorLog } from '../../logging';\n\n/**\n * Determines the list of CDS files to be parsed for the specified project directory.\n *\n * @param sourceRootDir - The source root directory to search for CDS files. This is\n * used to resolve relative paths in relation to a common (source root) directory for\n * multiple projects.\n * @param projectDir - The full, local filesystem path of the directory that contains\n * the individual `.cds` definition files for some `CAP` project.\n * @returns An array of strings representing the paths, relative to the source root\n * directory, of the `.cds` files to be parsed for a given project.\n */\nexport function determineCdsFilesForProjectDir(\n  sourceRootDir: string,\n  projectDir: string,\n): string[] {\n  if (!sourceRootDir || !projectDir) {\n    throw new Error(\n      `Unable to determine CDS files for project dir '${projectDir}'; both sourceRootDir and projectDir must be provided.`,\n    );\n  }\n\n  // Normalize paths by removing trailing slashes for comparison\n  const normalizedSourceRoot = sourceRootDir.replace(/[/\\\\]+$/, '');\n  const normalizedProjectDir = projectDir.replace(/[/\\\\]+$/, '');\n\n  if (\n    !normalizedProjectDir.startsWith(normalizedSourceRoot) &&\n    normalizedProjectDir !== normalizedSourceRoot\n  ) {\n    throw new Error(\n      'projectDir must be a subdirectory of sourceRootDir or equal to sourceRootDir.',\n    );\n  }\n\n  try {\n    // Use glob to find all .cds files under the project directory, excluding node_modules\n    const cdsFiles = sync(join(projectDir, '**/*.cds'), {\n      nodir: true,\n      ignore: ['**/node_modules/**', '**/*.testproj/**'],\n    });\n\n    // Convert absolute paths to paths relative to sourceRootDir\n    return cdsFiles.map(file => relative(sourceRootDir, file));\n  } catch (error: unknown) {\n    cdsExtractorLog('error', `Error finding CDS files in ${projectDir}: ${String(error)}`);\n    return [];\n  }\n}\n\n/**\n * Determines the list of distinct CDS projects under the specified source\n * directory.\n * @param sourceRootDir - The source root directory to search for CDS projects.\n * @returns An array of strings representing the paths, relative to the source\n * root directory, of the detected CDS projects.\n */\nexport function determineCdsProjectsUnderSourceDir(sourceRootDir: string): string[] {\n  if (!sourceRootDir || !existsSync(sourceRootDir)) {\n    throw new Error(`Source root directory '${sourceRootDir}' does not exist.`);\n  }\n\n  const foundProjects = new Set();\n\n  // Find all potential project directories by looking for package.json files and CDS files\n  const packageJsonFiles = sync(join(sourceRootDir, '**/package.json'), {\n    nodir: true,\n    ignore: ['**/node_modules/**', '**/*.testproj/**'],\n  });\n\n  const cdsFiles = sync(join(sourceRootDir, '**/*.cds'), {\n    nodir: true,\n    ignore: ['**/node_modules/**', '**/*.testproj/**'],\n  });\n\n  // Collect all potential project directories\n  const candidateDirectories = new Set();\n\n  // Add directories with package.json files\n  for (const packageJsonFile of packageJsonFiles) {\n    candidateDirectories.add(dirname(packageJsonFile));\n  }\n\n  // Add directories with CDS files and try to find their project roots\n  for (const cdsFile of cdsFiles) {\n    const cdsDir = dirname(cdsFile);\n    const projectRoot = findProjectRootFromCdsFile(cdsDir, sourceRootDir);\n    if (projectRoot) {\n      candidateDirectories.add(projectRoot);\n    } else {\n      candidateDirectories.add(cdsDir);\n    }\n  }\n\n  // Filter candidates to only include likely CDS projects\n  for (const dir of candidateDirectories) {\n    if (isLikelyCdsProject(dir)) {\n      const relativePath = relative(sourceRootDir, dir);\n      const projectDir = relativePath || '.';\n\n      // Check if this project is already included as a parent or child of an existing project\n      let shouldAdd = true;\n      const existingProjects = Array.from(foundProjects);\n\n      for (const existingProject of existingProjects) {\n        const existingAbsPath = join(sourceRootDir, existingProject);\n\n        // Skip if this directory is a subdirectory of an existing project,\n        // but only if the parent is not a monorepo with its own CDS content\n        if (dir.startsWith(existingAbsPath + sep)) {\n          // Check if parent is a monorepo root with its own CDS content\n          const parentPackageJsonPath = join(existingAbsPath, 'package.json');\n          const parentPackageJson = readPackageJsonFile(parentPackageJsonPath);\n          const isParentMonorepo =\n            parentPackageJson?.workspaces &&\n            Array.isArray(parentPackageJson.workspaces) &&\n            parentPackageJson.workspaces.length > 0;\n\n          // If parent is a monorepo with CDS content, allow both parent and child\n          if (\n            isParentMonorepo &&\n            (hasStandardCdsContent(existingAbsPath) || hasDirectCdsContent(existingAbsPath))\n          ) {\n            // Both parent and child can coexist as separate CDS projects\n            shouldAdd = true;\n          } else {\n            // Traditional case: exclude subdirectory\n            shouldAdd = false;\n          }\n          break;\n        }\n\n        // Remove existing project if it's a subdirectory of the current directory,\n        // unless the current directory is a monorepo root and the existing project has its own CDS content\n        if (existingAbsPath.startsWith(dir + sep)) {\n          const currentPackageJsonPath = join(dir, 'package.json');\n          const currentPackageJson = readPackageJsonFile(currentPackageJsonPath);\n          const isCurrentMonorepo =\n            currentPackageJson?.workspaces &&\n            Array.isArray(currentPackageJson.workspaces) &&\n            currentPackageJson.workspaces.length > 0;\n\n          // If current is a monorepo and the existing project is a legitimate CDS project, keep both\n          if (!(isCurrentMonorepo && isLikelyCdsProject(existingAbsPath))) {\n            foundProjects.delete(existingProject);\n          }\n        }\n      }\n\n      if (shouldAdd) {\n        foundProjects.add(projectDir);\n      }\n    }\n  }\n\n  return Array.from(foundProjects).sort();\n}\n\n/**\n * Parses a CDS file to extract import statements\n *\n * @param filePath - Path to the CDS file\n * @returns Array of import statements found in the file\n */\nexport function extractCdsImports(filePath: string): CdsImport[] {\n  if (!existsSync(filePath)) {\n    throw new Error(`File does not exist: ${filePath}`);\n  }\n\n  const content = readFileSync(filePath, 'utf8');\n  const imports: CdsImport[] = [];\n\n  // Regular expression to match using statements\n  // This handles: using X from 'path'; and using { X, Y } from 'path';\n  // and also using X as Y from 'path';\n  const usingRegex =\n    /using\\s+(?:{[^}]+}|[\\w.]+(?:\\s+as\\s+[\\w.]+)?)\\s+from\\s+['\"`]([^'\"`]+)['\"`]\\s*;/g;\n\n  let match;\n  while ((match = usingRegex.exec(content)) !== null) {\n    const path = match[1];\n    imports.push({\n      statement: match[0],\n      path,\n      isRelative: path.startsWith('./') || path.startsWith('../'),\n      isModule: !path.startsWith('./') && !path.startsWith('../') && !path.startsWith('/'),\n    });\n  }\n\n  return imports;\n}\n\n/**\n * Attempts to find the project root directory starting from a directory containing a CDS file\n *\n * @param cdsFileDir - Directory containing a CDS file\n * @param sourceRootDir - Source root directory to limit the search\n * @returns The project root directory or null if not found\n */\nfunction findProjectRootFromCdsFile(cdsFileDir: string, sourceRootDir: string): string | null {\n  // Skip node_modules and testproj directories entirely\n  if (cdsFileDir.includes('node_modules') || cdsFileDir.includes('.testproj')) {\n    return null;\n  }\n\n  let currentDir = cdsFileDir;\n\n  // Limit the upward search to the sourceRootDir\n  while (currentDir.startsWith(sourceRootDir)) {\n    // Check if this directory looks like a project root\n    if (isLikelyCdsProject(currentDir)) {\n      // If this is a standard CAP subdirectory (srv, db, app), check if the parent\n      // directory should be the real project root\n      const currentDirName = basename(currentDir);\n      const isStandardSubdir = ['srv', 'db', 'app'].includes(currentDirName);\n\n      if (isStandardSubdir) {\n        const parentDir = dirname(currentDir);\n\n        if (\n          parentDir !== currentDir &&\n          parentDir.startsWith(sourceRootDir) &&\n          !parentDir.includes('node_modules') &&\n          !parentDir.includes('.testproj') &&\n          isLikelyCdsProject(parentDir)\n        ) {\n          // The parent is also a CDS project, so it's likely the real project root\n          return parentDir;\n        }\n      }\n\n      // For non-standard subdirectories, also check if the parent might be a better project root\n      const parentDir = dirname(currentDir);\n\n      if (\n        parentDir !== currentDir &&\n        parentDir.startsWith(sourceRootDir) &&\n        !parentDir.includes('node_modules') &&\n        !parentDir.includes('.testproj')\n      ) {\n        const hasDbDir =\n          existsSync(join(parentDir, 'db')) && statSync(join(parentDir, 'db')).isDirectory();\n        const hasSrvDir =\n          existsSync(join(parentDir, 'srv')) && statSync(join(parentDir, 'srv')).isDirectory();\n        const hasAppDir =\n          existsSync(join(parentDir, 'app')) && statSync(join(parentDir, 'app')).isDirectory();\n\n        // Use the same CAP project structure logic as below\n        if ((hasDbDir && hasSrvDir) || (hasSrvDir && hasAppDir)) {\n          return parentDir;\n        }\n      }\n\n      return currentDir;\n    }\n\n    // Check for typical CAP project structure indicators\n    const hasDbDir =\n      existsSync(join(currentDir, 'db')) && statSync(join(currentDir, 'db')).isDirectory();\n    const hasSrvDir =\n      existsSync(join(currentDir, 'srv')) && statSync(join(currentDir, 'srv')).isDirectory();\n    const hasAppDir =\n      existsSync(join(currentDir, 'app')) && statSync(join(currentDir, 'app')).isDirectory();\n\n    if ((hasDbDir && hasSrvDir) || (hasSrvDir && hasAppDir)) {\n      return currentDir;\n    }\n\n    // Move up one directory\n    const parentDir = dirname(currentDir);\n    if (parentDir === currentDir) {\n      // We've reached the root of the filesystem\n      break;\n    }\n    currentDir = parentDir;\n  }\n\n  // If we couldn't determine a proper project root, return the original directory\n  return cdsFileDir;\n}\n\n/**\n * Determines if a directory likely contains a CAP project by checking for key\n * indicators like package.json with CAP dependencies or .cds files in standard\n * locations.\n *\n * @param dir - Directory to check\n * @returns true if the directory likely contains a CAP project\n */\nexport function isLikelyCdsProject(dir: string): boolean {\n  try {\n    // Skip node_modules and testproj directories entirely\n    if (dir.includes('node_modules') || dir.includes('.testproj')) {\n      return false;\n    }\n\n    // Check for CDS files in standard locations (checking both direct and nested files)\n    const hasStandardCdsDirectories = hasStandardCdsContent(dir);\n    const hasDirectCdsFiles = hasDirectCdsContent(dir);\n    const hasCdsFiles = hasStandardCdsDirectories || hasDirectCdsFiles;\n\n    // Check if package.json exists and has CAP dependencies\n    const hasCapDependencies = hasPackageJsonWithCapDeps(dir);\n\n    if (hasCapDependencies) {\n      // If there are CAP dependencies but no CDS files, there's nothing for us to do\n      if (!hasCdsFiles) {\n        return false;\n      }\n\n      // Check if this is a monorepo root\n      const packageJsonPath = join(dir, 'package.json');\n      const packageJson = readPackageJsonFile(packageJsonPath);\n\n      if (\n        packageJson?.workspaces &&\n        Array.isArray(packageJson.workspaces) &&\n        packageJson.workspaces.length > 0\n      ) {\n        // This is likely a monorepo - only treat as CDS project if it has actual CDS content\n        if (!hasCdsFiles) {\n          // This is a monorepo root without its own CDS content\n          return false;\n        }\n      }\n\n      return true;\n    }\n\n    // If no CAP dependencies, only consider it a CDS project if it has CDS files\n    return hasCdsFiles;\n  } catch (error: unknown) {\n    cdsExtractorLog('error', `Error checking directory ${dir}: ${String(error)}`);\n    return false;\n  }\n}\n\n/**\n * Check if a directory has CDS content in standard CAP directories.\n */\nfunction hasStandardCdsContent(dir: string): boolean {\n  const standardLocations = [join(dir, 'db'), join(dir, 'srv'), join(dir, 'app')];\n\n  for (const location of standardLocations) {\n    if (existsSync(location) && statSync(location).isDirectory()) {\n      // Check for any .cds files at any level under these directories.\n      const cdsFiles = sync(join(location, '**/*.cds'), { nodir: true });\n      if (cdsFiles.length > 0) {\n        return true;\n      }\n    }\n  }\n\n  return false;\n}\n\n/**\n * Check if a directory has direct CDS files.\n */\nfunction hasDirectCdsContent(dir: string): boolean {\n  const directCdsFiles = sync(join(dir, '*.cds'));\n  return directCdsFiles.length > 0;\n}\n\n/**\n * Safely parses a package.json file, using the cache if available\n * @param filePath - Path to the package.json file\n * @returns The parsed package.json content or undefined if the file doesn't exist or can't be parsed\n */\nexport function readPackageJsonFile(filePath: string): PackageJson | undefined {\n  if (!existsSync(filePath)) {\n    return undefined;\n  }\n\n  try {\n    const content = readFileSync(filePath, 'utf8');\n    const packageJson = JSON.parse(content) as PackageJson;\n    return packageJson;\n  } catch (error) {\n    cdsExtractorLog('warn', `Error parsing package.json at ${filePath}: ${String(error)}`);\n    return undefined;\n  }\n}\n\n/**\n * Determines which CDS files should be compiled for a given project and what output files to expect.\n * This function analyzes the project structure and dependencies to decide\n * whether to use project-level compilation or individual file compilation.\n *\n * For CAP projects (identified by either having @sap/cds dependencies or\n * typical CAP directory structure), it returns a special marker indicating\n * project-level compilation should be used. For other projects, it attempts\n * to identify root files (files that are not imported by others) and returns\n * those for individual compilation.\n *\n * @param sourceRootDir - The source root directory\n * @param project - The project to analyze, containing cdsFiles, imports, and projectDir\n * @returns Object containing files to compile and expected output files\n */\nexport function determineCdsFilesToCompile(\n  sourceRootDir: string,\n  project: {\n    cdsFiles: string[];\n    imports?: Map;\n    projectDir: string;\n  },\n): CdsFilesToCompile {\n  if (!project.cdsFiles || project.cdsFiles.length === 0) {\n    return {\n      compilationTargets: [],\n      expectedOutputFile: join(project.projectDir, modelCdsJsonFile),\n    };\n  }\n\n  const absoluteProjectDir = join(sourceRootDir, project.projectDir);\n\n  // Check for standard CAP directories\n  const capDirectories = ['db', 'srv', 'app'];\n  const existingCapDirs = capDirectories.filter(dir => existsSync(join(absoluteProjectDir, dir)));\n\n  if (existingCapDirs.length > 0) {\n    // Use standard CAP directories\n    return {\n      compilationTargets: existingCapDirs,\n      expectedOutputFile: join(project.projectDir, modelCdsJsonFile),\n    };\n  }\n\n  // Check for root-level CDS files\n  const rootCdsFiles = project.cdsFiles\n    .filter(file => dirname(join(sourceRootDir, file)) === absoluteProjectDir)\n    .map(file => basename(file));\n\n  if (rootCdsFiles.length > 0) {\n    // Use root-level files\n    return {\n      compilationTargets: rootCdsFiles,\n      expectedOutputFile: join(project.projectDir, modelCdsJsonFile),\n    };\n  }\n\n  // Use all CDS files with their relative paths\n  const compilationTargets = project.cdsFiles.map(file =>\n    relative(absoluteProjectDir, join(sourceRootDir, file)),\n  );\n\n  return {\n    compilationTargets,\n    expectedOutputFile: join(project.projectDir, modelCdsJsonFile),\n  };\n}\n\n/**\n * Checks if a directory has a package.json with CAP dependencies.\n * This function is used to determine if a directory has the necessary CAP packages installed,\n * which is one indicator that it might be a CAP project.\n *\n * @param dir - Directory to check for package.json with CAP dependencies\n * @returns true if the directory has a package.json with CAP dependencies\n */\nexport function hasPackageJsonWithCapDeps(dir: string): boolean {\n  try {\n    const packageJsonPath = join(dir, 'package.json');\n    const packageJson = readPackageJsonFile(packageJsonPath);\n\n    if (packageJson) {\n      const dependencies = {\n        ...(packageJson.dependencies ?? {}),\n        ...(packageJson.devDependencies ?? {}),\n      };\n\n      // Check for common CAP dependencies\n      return !!(dependencies['@sap/cds'] || dependencies['@sap/cds-dk']);\n    }\n\n    return false;\n  } catch {\n    return false;\n  }\n}\n", "import { spawnSync, SpawnSyncReturns } from 'child_process';\n\nimport { addJavaScriptExtractorDiagnostic } from './diagnostics';\nimport { cdsExtractorLog } from './logging';\n\n/**\n * Run the JavaScript extractor autobuild script\n * @param sourceRoot The source root directory\n * @param autobuildScriptPath Path to the autobuild script\n * @param codeqlExePath Path to the CodeQL executable (optional)\n * @returns Success status and any error message\n */\nexport function runJavaScriptExtractor(\n  sourceRoot: string,\n  autobuildScriptPath: string,\n  codeqlExePath?: string,\n): { success: boolean; error?: string } {\n  cdsExtractorLog(\n    'info',\n    `Extracting the .cds.json files by running the 'javascript' extractor autobuild script:\n        ${autobuildScriptPath}`,\n  );\n\n  /**\n   * Invoke the javascript autobuilder to index the .cds.json files only.\n   *\n   * Environment variables must be passed from this script's process to the\n   * process that invokes the autobuild script, otherwise the CDS autobuild.sh\n   * script will not be invoked by the autobuild script built into the\n   * 'javascript' extractor.\n   *\n   * IMPORTANT: The JavaScript extractor autobuild script must be invoked with\n   * the current working directory set to the project (source) root directory\n   * because it assumes it is running from there.\n   */\n  const result: SpawnSyncReturns = spawnSync(autobuildScriptPath, [], {\n    cwd: sourceRoot,\n    env: process.env,\n    shell: true,\n    stdio: 'inherit',\n  });\n\n  if (result.error) {\n    const errorMessage = `Error running JavaScript extractor: ${result.error.message}`;\n    if (codeqlExePath) {\n      addJavaScriptExtractorDiagnostic(sourceRoot, errorMessage, codeqlExePath);\n    }\n    return {\n      success: false,\n      error: errorMessage,\n    };\n  }\n\n  if (result.status !== 0) {\n    const errorMessage = `JavaScript extractor failed with exit code ${String(result.status)}`;\n    if (codeqlExePath) {\n      addJavaScriptExtractorDiagnostic(sourceRoot, errorMessage, codeqlExePath);\n    }\n    return {\n      success: false,\n      error: errorMessage,\n    };\n  }\n\n  return { success: true };\n}\n", "import { execFileSync } from 'child_process';\nimport { existsSync } from 'fs';\nimport { arch, platform } from 'os';\nimport { join, resolve } from 'path';\n\nimport { dirExists } from './filesystem';\nimport { cdsExtractorLog } from './logging';\n\n/**\n * Interface for platform information\n */\nexport interface PlatformInfo {\n  platform: string;\n  arch: string;\n  isWindows: boolean;\n  exeExtension: string;\n}\n\n/**\n * Interface for environment validation results\n */\nexport interface EnvironmentSetupResult {\n  success: boolean;\n  errorMessages: string[];\n  codeqlExePath: string;\n  jsExtractorRoot: string;\n  autobuildScriptPath: string;\n  platformInfo: PlatformInfo;\n}\n\n/**\n * Get platform information\n * @returns Platform information including OS platform, architecture, and whether it's Windows\n */\nexport function getPlatformInfo(): PlatformInfo {\n  const osPlatform: string = platform();\n  const osPlatformArch: string = arch();\n  const isWindows = osPlatform === 'win32';\n  const exeExtension = isWindows ? '.exe' : '';\n\n  return {\n    platform: osPlatform,\n    arch: osPlatformArch,\n    isWindows,\n    exeExtension,\n  };\n}\n\n/**\n * Get the path to the CodeQL executable.\n * Prioritizes CODEQL_DIST if set and valid. Otherwise, tries to find CodeQL via system PATH.\n * @returns The resolved path to the CodeQL executable, or an empty string if not found.\n */\nexport function getCodeQLExePath(): string {\n  const platformInfo = getPlatformInfo();\n  const codeqlExeName: string = platformInfo.isWindows ? 'codeql.exe' : 'codeql';\n\n  // First, check if CODEQL_DIST is set and valid\n  const codeqlDist = process.env.CODEQL_DIST;\n  if (codeqlDist) {\n    const codeqlPathFromDist = resolve(join(codeqlDist, codeqlExeName));\n    if (existsSync(codeqlPathFromDist)) {\n      cdsExtractorLog('info', `Using CodeQL executable from CODEQL_DIST: ${codeqlPathFromDist}`);\n      return codeqlPathFromDist;\n    } else {\n      cdsExtractorLog(\n        'error',\n        `CODEQL_DIST is set to '${codeqlDist}', but CodeQL executable was not found at '${codeqlPathFromDist}'. Please ensure this path is correct. Falling back to PATH-based discovery.`,\n      );\n      // Fall through to PATH-based discovery\n    }\n  }\n\n  // CODEQL_DIST is not set or was invalid, attempt to find CodeQL via system PATH using 'codeql version --format=json'\n  cdsExtractorLog(\n    'info',\n    'CODEQL_DIST environment variable not set or invalid. Attempting to find CodeQL executable via system PATH using \"codeql version --format=json\".',\n  );\n  try {\n    const versionOutput = execFileSync(codeqlExeName, ['version', '--format=json'], {\n      encoding: 'utf8',\n      timeout: 5000, // 5 seconds timeout\n      stdio: 'pipe', // Suppress output to console\n    });\n\n    interface CodeQLVersionInfo {\n      unpackedLocation?: string;\n      cliVersion?: string; // For potential future use or richer logging\n    }\n\n    try {\n      const versionInfo = JSON.parse(versionOutput) as CodeQLVersionInfo;\n\n      if (\n        versionInfo &&\n        typeof versionInfo.unpackedLocation === 'string' &&\n        versionInfo.unpackedLocation\n      ) {\n        const resolvedPathFromVersion = resolve(join(versionInfo.unpackedLocation, codeqlExeName));\n        if (existsSync(resolvedPathFromVersion)) {\n          cdsExtractorLog(\n            'info',\n            `CodeQL executable found via 'codeql version --format=json' at: ${resolvedPathFromVersion}`,\n          );\n          return resolvedPathFromVersion;\n        }\n        cdsExtractorLog(\n          'warn',\n          `'codeql version --format=json' provided unpackedLocation '${versionInfo.unpackedLocation}', but executable not found at '${resolvedPathFromVersion}'.`,\n        );\n      } else {\n        cdsExtractorLog(\n          'warn',\n          \"Could not determine CodeQL executable path from 'codeql version --format=json' output. 'unpackedLocation' field missing, empty, or invalid.\",\n        );\n      }\n    } catch (parseError) {\n      cdsExtractorLog(\n        'warn',\n        `Failed to parse 'codeql version --format=json' output: ${String(parseError)}. Output was: ${versionOutput}`,\n      );\n    }\n  } catch (error) {\n    let errorMessage = `INFO: Failed to find CodeQL executable via 'codeql version --format=json'. Error: ${String(error)}`;\n    if (error && typeof error === 'object' && 'code' in error && error.code === 'ENOENT') {\n      errorMessage += `\\nINFO: The command '${codeqlExeName}' was not found in your system PATH.`;\n    }\n    cdsExtractorLog('info', errorMessage);\n  }\n\n  cdsExtractorLog(\n    'error',\n    'Failed to determine CodeQL executable path. Please ensure the CODEQL_DIST environment variable is set and points to a valid CodeQL distribution, or that the CodeQL CLI (codeql) is available in your system PATH and \"codeql version --format=json\" can provide its location.',\n  );\n  return ''; // Return empty string if all attempts fail\n}\n\n/**\n * Get the JavaScript extractor root path.\n * @param codeqlExePath The path to the CodeQL executable. If empty, resolution will be skipped.\n * @returns The JavaScript extractor root path, or an empty string if not found or if codeqlExePath is empty.\n */\nexport function getJavaScriptExtractorRoot(codeqlExePath: string): string {\n  let jsExtractorRoot = process.env.CODEQL_EXTRACTOR_JAVASCRIPT_ROOT ?? '';\n\n  if (jsExtractorRoot) {\n    cdsExtractorLog(\n      'info',\n      `Using JavaScript extractor root from environment variable CODEQL_EXTRACTOR_JAVASCRIPT_ROOT: ${jsExtractorRoot}`,\n    );\n    return jsExtractorRoot;\n  }\n\n  if (!codeqlExePath) {\n    cdsExtractorLog(\n      'warn',\n      'Cannot resolve JavaScript extractor root because the CodeQL executable path was not provided or found.',\n    );\n    return '';\n  }\n\n  try {\n    jsExtractorRoot = execFileSync(\n      codeqlExePath,\n      ['resolve', 'extractor', '--language=javascript'],\n      { stdio: 'pipe' }, // Suppress output from the command itself\n    )\n      .toString()\n      .trim();\n    if (jsExtractorRoot) {\n      cdsExtractorLog('info', `JavaScript extractor root resolved to: ${jsExtractorRoot}`);\n    } else {\n      cdsExtractorLog(\n        'warn',\n        `'codeql resolve extractor --language=javascript' using '${codeqlExePath}' returned an empty path.`,\n      );\n    }\n  } catch (error) {\n    cdsExtractorLog(\n      'error',\n      `Error resolving JavaScript extractor root using '${codeqlExePath}': ${String(error)}`,\n    );\n    jsExtractorRoot = ''; // Ensure it's empty on error\n  }\n  return jsExtractorRoot;\n}\n\n/**\n * Set JavaScript extractor environment variables using CDS extractor variables\n */\nexport function setupJavaScriptExtractorEnv(): void {\n  process.env.CODEQL_EXTRACTOR_JAVASCRIPT_WIP_DATABASE =\n    process.env.CODEQL_EXTRACTOR_CDS_WIP_DATABASE;\n  process.env.CODEQL_EXTRACTOR_JAVASCRIPT_DIAGNOSTIC_DIR =\n    process.env.CODEQL_EXTRACTOR_CDS_DIAGNOSTIC_DIR;\n  process.env.CODEQL_EXTRACTOR_JAVASCRIPT_LOG_DIR = process.env.CODEQL_EXTRACTOR_CDS_LOG_DIR;\n  process.env.CODEQL_EXTRACTOR_JAVASCRIPT_SCRATCH_DIR =\n    process.env.CODEQL_EXTRACTOR_CDS_SCRATCH_DIR;\n  process.env.CODEQL_EXTRACTOR_JAVASCRIPT_TRAP_DIR = process.env.CODEQL_EXTRACTOR_CDS_TRAP_DIR;\n  process.env.CODEQL_EXTRACTOR_JAVASCRIPT_SOURCE_ARCHIVE_DIR =\n    process.env.CODEQL_EXTRACTOR_CDS_SOURCE_ARCHIVE_DIR;\n}\n\n/**\n * Get the path to the autobuild script\n * @param jsExtractorRoot The JavaScript extractor root path\n * @returns The path to the autobuild script, or an empty string if jsExtractorRoot is empty.\n */\nexport function getAutobuildScriptPath(jsExtractorRoot: string): string {\n  if (!jsExtractorRoot) return '';\n  const platformInfo = getPlatformInfo();\n  const autobuildScriptName: string = platformInfo.isWindows ? 'autobuild.cmd' : 'autobuild.sh';\n  return resolve(join(jsExtractorRoot, 'tools', autobuildScriptName));\n}\n\n/**\n * Configure LGTM index filters for CDS files\n */\nexport function configureLgtmIndexFilters(): void {\n  let excludeFilters = '';\n\n  if (process.env.LGTM_INDEX_FILTERS) {\n    cdsExtractorLog(\n      'info',\n      `Found $LGTM_INDEX_FILTERS already set to:\n${process.env.LGTM_INDEX_FILTERS}`,\n    );\n    const allowedExcludePatterns = [join('exclude:**', '*'), join('exclude:**', '*.*')];\n\n    excludeFilters =\n      '\\n' +\n      process.env.LGTM_INDEX_FILTERS.split('\\n')\n        .filter(\n          line =>\n            line.startsWith('exclude') &&\n            !allowedExcludePatterns.some(pattern => line.includes(pattern)),\n        )\n        .join('\\n');\n  }\n\n  // Enable extraction of the .cds.json files only.\n  const lgtmIndexFiltersPatterns = [\n    join('exclude:**', '*.*'),\n    join('include:**', '*.cds.json'),\n    join('include:**', '*.cds'),\n    join('exclude:**', 'node_modules', '**', '*.*'),\n  ].join('\\n');\n\n  process.env.LGTM_INDEX_FILTERS = lgtmIndexFiltersPatterns + excludeFilters;\n  process.env.LGTM_INDEX_TYPESCRIPT = 'NONE';\n  // Configure to copy over the .cds files as well, by pretending they are JSON.\n  process.env.LGTM_INDEX_FILETYPES = '.cds:JSON';\n}\n\n/**\n * Sets up the environment and validates key components for running the CDS extractor.\n * This includes checking for the CodeQL executable, validating the source root directory,\n * and setting up environment variables for the JavaScript extractor.\n *\n * @param sourceRoot The source root directory.\n *\n * @returns The {@link EnvironmentSetupResult} containing success status, error messages,\n *          CodeQL executable path, JavaScript extractor root, autobuild script path,\n *          and platform information.\n *\n * @throws Will throw an error if the environment setup fails.\n */\nexport function setupAndValidateEnvironment(sourceRoot: string): EnvironmentSetupResult {\n  const errorMessages: string[] = [];\n  const platformInfo = getPlatformInfo();\n\n  // Get the CodeQL executable path\n  const codeqlExePath = getCodeQLExePath();\n  if (!codeqlExePath) {\n    errorMessages.push(\n      'Failed to find CodeQL executable. Ensure CODEQL_DIST is set and valid, or CodeQL CLI is in PATH.',\n    );\n  }\n\n  // Validate that the required source root directory exists\n  if (!dirExists(sourceRoot)) {\n    errorMessages.push(`Project root directory '${sourceRoot}' does not exist.`);\n  }\n\n  // Get JavaScript extractor root\n  const jsExtractorRoot = getJavaScriptExtractorRoot(codeqlExePath);\n  if (!jsExtractorRoot) {\n    if (codeqlExePath) {\n      // Only add this error if codeqlExePath was found but JS extractor root wasn't\n      errorMessages.push(\n        'Failed to determine JavaScript extractor root using the found CodeQL executable.',\n      );\n    } else {\n      // If codeqlExePath is empty, the error from getCodeQLExePath is usually sufficient.\n      // However, we can add a more specific one if needed.\n      errorMessages.push(\n        'Cannot determine JavaScript extractor root because CodeQL executable was not found.',\n      );\n    }\n  }\n\n  // Set environment variables for JavaScript extractor only if jsExtractorRoot is valid\n  if (jsExtractorRoot) {\n    process.env.CODEQL_EXTRACTOR_JAVASCRIPT_ROOT = jsExtractorRoot;\n    setupJavaScriptExtractorEnv();\n  }\n\n  // Get autobuild script path\n  const autobuildScriptPath = jsExtractorRoot ? getAutobuildScriptPath(jsExtractorRoot) : '';\n  // Not having an autobuild script path might be an error depending on the run mode,\n  // but for now, the function just returns what it found.\n\n  return {\n    success: errorMessages.length === 0,\n    errorMessages,\n    codeqlExePath, // Will be '' if not found\n    jsExtractorRoot, // Will be '' if not found\n    autobuildScriptPath,\n    platformInfo,\n  };\n}\n", "import { resolve } from 'path';\n\nconst USAGE_MESSAGE = `\\tUsage: node