From 4d33494ead8b747035ab693f1c37725f7d8a65f8 Mon Sep 17 00:00:00 2001 From: Bart Veneman Date: Sun, 12 Oct 2025 23:32:58 +0200 Subject: [PATCH] feat: make HTML parser optional argument --- README.md | 96 ++++++++++++++++++++------------------ src/filter-entries.test.ts | 11 +++++ src/filter-entries.ts | 7 ++- src/index.ts | 2 +- 4 files changed, 68 insertions(+), 48 deletions(-) diff --git a/README.md b/README.md index f672041..672eec5 100644 --- a/README.md +++ b/README.md @@ -19,52 +19,33 @@ npm install @projectwallace/css-code-coverage ### Prerequisites -1. You have collected browser coverage data of your CSS. There are several ways to do this: - - 1. in the browser devtools in [Edge](https://learn.microsoft.com/en-us/microsoft-edge/devtools-guide-chromium/coverage/)/[Chrome](https://developer.chrome.com/docs/devtools/coverage/)/chromium - 1. Via the `coverage.startCSSCoverage()` API that headless browsers like [Playwright](https://playwright.dev/docs/api/class-coverage#coverage-start-css-coverage) or [Puppeteer](https://pptr.dev/api/puppeteer.coverage.startcsscoverage/) provide. - - Either way you end up with one or more JSON files that contain coverage data. - - ```ts - // Read a single JSON or a folder full of JSON files with coverage data - // Coverage data looks like this: - // { - // url: 'https://www.projectwallace.com/style.css', - // text: 'a { color: blue; text-decoration: underline; }', etc. - // ranges: [ - // { start: 0, end: 46 } - // ] - // } - import { parse_coverage } from '@projectwallace/css-code-coverage' - - let files = await fs.glob('./css-coverage/**/*.json') - let coverage_data = [] - - for (let file of files) { - let json_content = await fs.readFile(file, 'urf-8') - coverage_data.push(...parse_coverage(json_content)) - } - ``` - -1. You provide a HTML parser that we use to 'scrape' the HTML in case the browser gives us not just plain CSS contents. Depending on where you run this analysis you can use: - - 1. Browser: - ```ts - function parse_html(html) { - return new DOMParser().parseFromString(html, 'text/html') - } - ``` - 1. Node (using [linkedom](https://github.com/WebReflection/linkedom) in this example): - - ```ts - // $ npm install linkedom - import { DOMParser } from 'linkedom' - - function parse_html(html: string) { - return new DOMParser().parseFromString(html, 'text/html') - } - ``` +You have collected browser coverage data of your CSS. There are several ways to do this: + +1. in the browser devtools in [Edge](https://learn.microsoft.com/en-us/microsoft-edge/devtools-guide-chromium/coverage/)/[Chrome](https://developer.chrome.com/docs/devtools/coverage/)/chromium +1. Via the `coverage.startCSSCoverage()` API that headless browsers like [Playwright](https://playwright.dev/docs/api/class-coverage#coverage-start-css-coverage) or [Puppeteer](https://pptr.dev/api/puppeteer.coverage.startcsscoverage/) provide. + +Either way you end up with one or more JSON files that contain coverage data. + +```ts +// Read a single JSON or a folder full of JSON files with coverage data +// Coverage data looks like this: +// { +// url: 'https://www.projectwallace.com/style.css', +// text: 'a { color: blue; text-decoration: underline; }', etc. +// ranges: [ +// { start: 0, end: 46 } +// ] +// } +import { parse_coverage } from '@projectwallace/css-code-coverage' + +let files = await fs.glob('./css-coverage/**/*.json') +let coverage_data = [] + +for (let file of files) { + let json_content = await fs.readFile(file, 'urf-8') + coverage_data.push(...parse_coverage(json_content)) +} +``` ### Bringing it together @@ -73,3 +54,26 @@ import { calculate_coverage } from '@projectwallace/css-code-coverage' let report = calculcate_coverage(coverage_data, parse_html) ``` + +See [src/index.ts](https://github.com/projectwallace/css-code-coverage/blob/main/src/index.ts) for the data that's returned. + +### Optional: coverage from ``, + ranges: [{ start: 13, end: 26 }], + }, + ] + expect(filter_coverage(entries)).toEqual([]) +}) diff --git a/src/filter-entries.ts b/src/filter-entries.ts index aa6f375..99b543e 100644 --- a/src/filter-entries.ts +++ b/src/filter-entries.ts @@ -7,7 +7,7 @@ function is_html(text: string): boolean { return /<\/?(html|body|head|div|span|script|style)/i.test(text) } -export function filter_coverage(coverage: Coverage[], parse_html: Parser): Coverage[] { +export function filter_coverage(coverage: Coverage[], parse_html?: Parser): Coverage[] { let result = [] for (let entry of coverage) { @@ -22,6 +22,11 @@ export function filter_coverage(coverage: Coverage[], parse_html: Parser): Cover } if (is_html(entry.text)) { + if (!parse_html) { + // No parser provided, cannot extract CSS from HTML, silently skip this entry + continue + } + let { css, ranges } = remap_html(parse_html, entry.text, entry.ranges) result.push({ url: entry.url, diff --git a/src/index.ts b/src/index.ts index b61c71f..35c6fe4 100644 --- a/src/index.ts +++ b/src/index.ts @@ -44,7 +44,7 @@ function ratio(fraction: number, total: number) { * 4. Calculate used/unused CSS bytes (fastest path, no inspection of the actual CSS needed) * 5. Calculate line-coverage, byte-coverage per stylesheet */ -export function calculate_coverage(coverage: Coverage[], parse_html: Parser): CoverageResult { +export function calculate_coverage(coverage: Coverage[], parse_html?: Parser): CoverageResult { let total_files_found = coverage.length if (!is_valid_coverage(coverage)) {