diff --git a/docs/mcp.md b/docs/mcp.md index 6609440..4fc8a9d 100644 --- a/docs/mcp.md +++ b/docs/mcp.md @@ -433,6 +433,10 @@ Validates an XML test case for schema correctness (validity score) and best prac **Key schema rules:** TC_001 (missing XML declaration), TC_002 (malformed XML), TC_003 (wrong root element), TC_010/011/012 (missing/invalid id/guid), TC_031 (invalid apiCall guid), TC_034/035 (non-integer testItemId). +**Warning rules:** +- **DATA-001** — `testCase` declares a `` element. CLI standalone execution does not bind CSV column variables; steps using variable references will resolve to null. Use `SetValues` (Test scope) steps instead, or add the test to a test plan. +- **ASSERT-001** — An `AssertValues` step uses the `argument id="values"` (namedValues) format, which is designed for UI element attribute assertions. For Apex/SOQL result or variable comparisons this silently passes as `null=null`. Use separate `expectedValue`, `actualValue`, and `comparisonType` arguments instead. + --- ### `provar.testsuite.validate` @@ -938,7 +942,9 @@ Triggers a Provar Automation test run using the currently loaded properties file | --------- | -------- | -------- | ------------------------------------------------------------------------ | | `flags` | string[] | no | Raw CLI flags to forward (e.g. `["--project-path", "/path/to/project"]`) | -**Output** — `{ requestId, exitCode, stdout, stderr }` +**Output** — `{ requestId, exitCode, stdout, stderr[, output_lines_suppressed] }` + +The `stdout` field is filtered before returning: Java schema-validator lines (`com.networknt.schema.*`) and stale logger-lock `SEVERE` warnings are stripped. If any lines were suppressed, `output_lines_suppressed` contains the count and a note is appended to `stdout`. Use `provar.testrun.rca` to inspect the full raw JUnit output. **Error codes:** `AUTOMATION_TESTRUN_FAILED`, `SF_NOT_FOUND` @@ -1084,7 +1090,9 @@ Analyse a completed test run and return a structured Root Cause Analysis report. | `screenshot_dir` | Path to `Artifacts/` directory if it exists, else `null` | | `pre_existing` | `true` if the same test failed in a prior Increment run | -**Root cause categories:** `DRIVER_VERSION_MISMATCH`, `LOCATOR_STALE`, `TIMEOUT`, `ASSERTION_FAILED`, `CREDENTIAL_FAILURE`, `MISSING_CALLABLE`, `METADATA_CACHE`, `PAGE_OBJECT_COMPILE`, `CONNECTION_REFUSED`, `DATA_SETUP`, `LICENSE_INVALID`, `UNKNOWN` +**Root cause categories:** `DRIVER_VERSION_MISMATCH`, `LOCATOR_STALE`, `TIMEOUT`, `ASSERTION_FAILED`, `CREDENTIAL_FAILURE`, `MISSING_CALLABLE`, `METADATA_CACHE`, `PAGE_OBJECT_COMPILE`, `CONNECTION_REFUSED`, `DATA_SETUP`, `LICENSE_INVALID`, `SALESFORCE_VALIDATION`, `SALESFORCE_PICKLIST`, `SALESFORCE_REFERENCE`, `SALESFORCE_ACCESS`, `SALESFORCE_TRIGGER`, `UNKNOWN` + +Salesforce DML error categories (`SALESFORCE_*`) represent test-data failures — they appear in `failures[].root_cause_category` but are **not** included in `infrastructure_issues`. **Error codes:** `RESULTS_NOT_CONFIGURED` diff --git a/src/mcp/tools/automationTools.ts b/src/mcp/tools/automationTools.ts index f0f1f93..61d5d6d 100644 --- a/src/mcp/tools/automationTools.ts +++ b/src/mcp/tools/automationTools.ts @@ -160,6 +160,48 @@ export function registerAutomationConfigLoad(server: McpServer, config: ServerCo ); } +// ── Testrun output filter ───────────────────────────────────────────────────── + +const NOISE_PATTERNS: RegExp[] = [ + /com\.networknt\.schema/, + /SEVERE.*Failed to configure logger.*\.lck/, +]; + +/** + * Strip Java schema-validator debug lines and stale logger-lock SEVERE warnings + * from Provar testrun output. These two patterns account for the bulk of output + * volume and cause MCP responses to be truncated before the pass/fail lines. + * + * Everything else (including real SEVERE failures) passes through unchanged. + * Collapses runs of blank lines to a single blank to keep the output readable. + * Returns the filtered text and the count of suppressed lines. + */ +export function filterTestRunOutput(raw: string): { filtered: string; suppressed: number } { + const lines = raw.split(/\r?\n/); + const kept: string[] = []; + let suppressed = 0; + let lastKeptWasBlank = false; + + for (const rawLine of lines) { + const line = rawLine.endsWith('\r') ? rawLine.slice(0, -1) : rawLine; + if (NOISE_PATTERNS.some((p) => p.test(line))) { + suppressed++; + continue; + } + const isBlank = line.trim() === ''; + if (isBlank && lastKeptWasBlank) continue; // collapse blank runs + kept.push(line); + lastKeptWasBlank = isBlank; + } + + let filtered = kept.join('\n'); + if (suppressed > 0) { + filtered += + `\n[testrun: ${suppressed} lines suppressed (schema validator / logger noise) — use provar.testrun.rca for full results]`; + } + return { filtered, suppressed }; +} + // ── Tool: provar.automation.testrun ─────────────────────────────────────────── export function registerAutomationTestRun(server: McpServer): void { @@ -183,12 +225,19 @@ export function registerAutomationTestRun(server: McpServer): void { try { const result = runSfCommand(['provar', 'automation', 'test', 'run', ...flags], sf_path); - const response = { requestId, exitCode: result.exitCode, stdout: result.stdout, stderr: result.stderr }; + const { filtered, suppressed } = filterTestRunOutput(result.stdout); if (result.exitCode !== 0) { - return { isError: true as const, content: [{ type: 'text' as const, text: JSON.stringify(makeError('AUTOMATION_TESTRUN_FAILED', result.stderr || result.stdout, requestId)) }] }; + const { filtered: filteredErr, suppressed: suppressedErr } = filterTestRunOutput(result.stderr || result.stdout); + const errBody = { + ...makeError('AUTOMATION_TESTRUN_FAILED', filteredErr, requestId), + ...(suppressedErr > 0 ? { output_lines_suppressed: suppressedErr } : {}), + }; + return { isError: true as const, content: [{ type: 'text' as const, text: JSON.stringify(errBody) }] }; } + const response: Record = { requestId, exitCode: result.exitCode, stdout: filtered, stderr: result.stderr }; + if (suppressed > 0) response['output_lines_suppressed'] = suppressed; return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response }; } catch (err) { return handleSpawnError(err, requestId, 'provar.automation.testrun'); diff --git a/src/mcp/tools/rcaTools.ts b/src/mcp/tools/rcaTools.ts index bdd6f4e..57f007b 100644 --- a/src/mcp/tools/rcaTools.ts +++ b/src/mcp/tools/rcaTools.ts @@ -77,6 +77,36 @@ const RCA_RULES: RcaRule[] = [ summary: 'Test assertion failed', recommendation: 'Verify expected value is correct for current org state', }, + { + category: 'SALESFORCE_VALIDATION', + pattern: /Required fields are missing:\s*\[/i, + summary: 'Salesforce required-field validation failed', + recommendation: 'Ensure all required fields have values; check field-level security for the running user', + }, + { + category: 'SALESFORCE_PICKLIST', + pattern: /bad value for restricted picklist field/i, + summary: 'Invalid picklist value used', + recommendation: 'Query valid picklist values (run metadata download or Apex describe); check for trailing spaces or case differences', + }, + { + category: 'SALESFORCE_REFERENCE', + pattern: /INVALID_CROSS_REFERENCE_KEY/i, + summary: 'Referenced record ID does not exist or is inaccessible', + recommendation: 'Verify the referenced record exists and the running user has access to it', + }, + { + category: 'SALESFORCE_ACCESS', + pattern: /INSUFFICIENT_ACCESS_ON_CROSS_REFERENCE_ENTITY/i, + summary: 'Running user lacks permission on a referenced record', + recommendation: 'Grant the running user appropriate object and record-level permissions', + }, + { + category: 'SALESFORCE_TRIGGER', + pattern: /FIELD_CUSTOM_VALIDATION_EXCEPTION/i, + summary: 'A Salesforce validation rule or trigger blocked the DML operation', + recommendation: 'Review validation rules and triggers on the target object; ensure test data satisfies all business rules', + }, { category: 'CREDENTIAL_FAILURE', pattern: /InvalidPasswordException|AuthenticationException|INVALID_LOGIN/i, @@ -184,6 +214,29 @@ function numericChildDirs(dir: string): number[] { } } +/** + * Find Provar Increment-mode sibling directories next to resultsBase. + * Provar creates Results, Results(1), Results(2), … as siblings in the same + * parent directory — NOT as numeric children of Results. Returns the numeric + * suffixes of all matching siblings (e.g. [1, 2, 18]). + */ +function incrementSiblingDirs(resultsBase: string): number[] { + const parent = path.dirname(resultsBase); + const base = path.basename(resultsBase); + if (!fs.existsSync(parent)) return []; + try { + const safeName = base.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); + const pattern = new RegExp(`^${safeName}\\((\\d+)\\)$`); + return fs + .readdirSync(parent, { withFileTypes: true }) + .filter((e) => e.isDirectory() && pattern.test(e.name)) + .map((e) => parseInt((pattern.exec(e.name) as RegExpExecArray)[1], 10)) + .filter((n) => n > 0); + } catch { + return []; + } +} + /** * Expand ${env.VAR} placeholders in a string using process.env. */ @@ -293,8 +346,20 @@ function resolveResultsLocation( } // Increment resolution + // Provar's primary Increment pattern: Results, Results(1), Results(2)… as siblings. + // Legacy fallback: purely-numeric children (Results/1/, Results/2/…). + const siblings = incrementSiblingDirs(resultsBase); const numericDirs = numericChildDirs(resultsBase); - if (disposition === 'Increment' || numericDirs.length > 0) { + if (disposition === 'Increment' || siblings.length > 0 || numericDirs.length > 0) { + if (siblings.length > 0) { + const targetIndex = run_index ?? Math.max(...siblings); + return { + results_dir: path.join(path.dirname(resultsBase), `${path.basename(resultsBase)}(${targetIndex})`), + run_index: targetIndex, + disposition, + resolution_source, + }; + } if (numericDirs.length > 0) { const targetIndex = run_index ?? Math.max(...numericDirs); return { @@ -304,7 +369,7 @@ function resolveResultsLocation( resolution_source, }; } - // Disposition is Increment but no numeric subdirs yet — fall through to base + // Disposition is Increment but no numbered dirs yet — fall through to base } return { @@ -336,8 +401,10 @@ export function registerTestRunLocate(server: McpServer): void { .describe('Explicit override for the results base directory; if provided, skip auto-detection'), run_index: z .number() + .int() + .positive() .optional() - .describe('Which Increment run to target (default: latest)'), + .describe('Which Increment run to target (default: latest); must be a positive integer'), }, (input) => { const requestId = makeRequestId(); @@ -603,8 +670,10 @@ export function registerTestRunRca(server: McpServer): void { .describe('Explicit override for the results base directory'), run_index: z .number() + .int() + .positive() .optional() - .describe('Which Increment run to target (default: latest)'), + .describe('Which Increment run to target (default: latest); must be a positive integer'), locate_only: z .boolean() .optional() diff --git a/src/mcp/tools/testCaseValidate.ts b/src/mcp/tools/testCaseValidate.ts index 89f02fb..dee9985 100644 --- a/src/mcp/tools/testCaseValidate.ts +++ b/src/mcp/tools/testCaseValidate.ts @@ -250,7 +250,7 @@ export function validateTestCase(xmlContent: string, testName?: string): TestCas severity: 'ERROR', message: `testCase guid "${tcGuid}" is not a valid UUID v4.`, applies_to: 'testCase', - suggestion: 'Generate a proper UUID v4 for the guid attribute.', + suggestion: 'Replace with a valid UUID v4 — e.g. crypto.randomUUID(). The 4th segment must begin with 8, 9, a, or b.', }); } // TC_013 (registryId) is intentionally not checked here — registryId is a @@ -270,6 +270,17 @@ export function validateTestCase(xmlContent: string, testName?: string): TestCas return finalize(issues, tcId, tcName, 0, xmlContent, testName); } + // DATA-001: binding is silently ignored in standalone CLI execution + if ('dataTable' in tc && tc['dataTable'] != null) { + issues.push({ + rule_id: 'DATA-001', + severity: 'WARNING', + message: 'testCase declares a but CLI standalone execution does not bind CSV column variables — steps using references will resolve to null.', + applies_to: 'testCase', + suggestion: 'Use SetValues (Test scope) steps to bind data for standalone CLI execution, or add this test case to a test plan.', + }); + } + // Same self-closing guard for → fast-xml-parser yields '' const rawSteps = tc['steps']; const steps: Record = @@ -307,7 +318,7 @@ function validateApiCall(call: Record, issues: ValidationIssue[ severity: 'ERROR', message: `apiCall${label} guid "${callGuid}" is not a valid UUID v4.`, applies_to: 'apiCall', - suggestion: 'Use proper UUID v4 format.', + suggestion: 'Replace with a valid UUID v4 — e.g. crypto.randomUUID(). The 4th segment must begin with 8, 9, a, or b.', }); } if (!apiId) { @@ -345,6 +356,29 @@ function validateApiCall(call: Record, issues: ValidationIssue[ suggestion: 'Use sequential integers for testItemId.', }); } + + // ASSERT-001: AssertValues using UI namedValues format instead of variable format + if (apiId?.includes('AssertValues')) { + const rawArgs = call['arguments'] as Record | undefined; + if (rawArgs) { + const argRaw = rawArgs['argument']; + const argList: Array> = !argRaw + ? [] + : Array.isArray(argRaw) + ? (argRaw as Array>) + : [argRaw as Record]; + const hasValuesArg = argList.some((a) => (a['@_id'] as string | undefined) === 'values'); + if (hasValuesArg) { + issues.push({ + rule_id: 'ASSERT-001', + severity: 'WARNING', + message: `AssertValues step "${name ?? '(unnamed)'}" uses namedValues format (argument id="values") — designed for UI element attribute assertions. For Apex/SOQL result or variable comparisons this silently passes as null=null.`, + applies_to: 'apiCall', + suggestion: 'Use separate expectedValue, actualValue, and comparisonType arguments for variable or Apex result comparisons.', + }); + } + } + } } function finalize( diff --git a/test/unit/mcp/automationTools.test.ts b/test/unit/mcp/automationTools.test.ts index 1a3fd70..f527000 100644 --- a/test/unit/mcp/automationTools.test.ts +++ b/test/unit/mcp/automationTools.test.ts @@ -13,6 +13,7 @@ import path from 'node:path'; import sinon from 'sinon'; import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js'; import { sfSpawnHelper } from '../../../src/mcp/tools/sfSpawn.js'; +import { filterTestRunOutput } from '../../../src/mcp/tools/automationTools.js'; // ── Minimal mock server ─────────────────────────────────────────────────────── @@ -120,6 +121,27 @@ describe('automationTools', () => { assert.equal(body.error_code, 'SF_NOT_FOUND'); assert.ok((body.message as string).includes('npm install -g @salesforce/cli')); }); + + it('strips schema-validator noise from stdout and sets output_lines_suppressed', () => { + const noisy = [ + 'com.networknt.schema.validator.SchemaLoader - loading schema', + 'INFO Starting test run', + 'Tests: 5 passed, 0 failed', + ].join('\n'); + spawnStub.returns(makeSpawnResult(noisy, '', 0)); + const result = server.call('provar.automation.testrun', { flags: [] }); + const body = parseBody(result); + assert.ok(!(body.stdout as string).includes('networknt'), 'Filtered stdout should not contain schema noise'); + assert.ok((body.stdout as string).includes('Tests: 5 passed'), 'Real output should remain'); + assert.ok((body.output_lines_suppressed as number) > 0, 'output_lines_suppressed should be positive'); + }); + + it('does not set output_lines_suppressed when stdout has no noise', () => { + spawnStub.returns(makeSpawnResult('Tests: 3 passed, 0 failed', '', 0)); + const result = server.call('provar.automation.testrun', { flags: [] }); + const body = parseBody(result); + assert.equal(body.output_lines_suppressed, undefined, 'output_lines_suppressed should be absent'); + }); }); // ── provar.automation.compile ───────────────────────────────────────────── @@ -555,3 +577,72 @@ describe('automationTools', () => { }); }); }); + +// ── filterTestRunOutput ─────────────────────────────────────────────────────── + +describe('filterTestRunOutput', () => { + it('suppresses com.networknt.schema lines', () => { + const raw = 'INFO Starting\ncom.networknt.schema.JsonSchemaFactory - loaded schema\nINFO Done'; + const { filtered, suppressed } = filterTestRunOutput(raw); + assert.equal(suppressed, 1); + assert.ok(!filtered.includes('networknt'), 'Schema lines should be removed'); + assert.ok(filtered.includes('INFO Starting'), 'Real output should remain'); + }); + + it('suppresses SEVERE logger lock lines', () => { + const raw = 'INFO Starting\nSEVERE Failed to configure logger: java.lck\nINFO Done'; + const { filtered, suppressed } = filterTestRunOutput(raw); + assert.equal(suppressed, 1); + assert.ok(!filtered.includes('Failed to configure logger'), 'Logger lock lines should be removed'); + assert.ok(filtered.includes('INFO Done'), 'Real output should remain'); + }); + + it('keeps SEVERE lines that are real test failures', () => { + const raw = 'SEVERE Test execution failed: AssertionError expected true but got false'; + const { filtered, suppressed } = filterTestRunOutput(raw); + assert.equal(suppressed, 0); + assert.ok(filtered.includes('SEVERE Test execution failed'), 'Real SEVERE lines should be kept'); + }); + + it('counts suppressed lines correctly across both patterns', () => { + const lines = [ + 'INFO Tests running', + 'com.networknt.schema.validator.SchemaLoader - loading', + 'com.networknt.schema.format.FormatValidator - checking', + 'SEVERE Failed to configure logger: file.lck', + 'INFO Tests complete', + ]; + const { suppressed } = filterTestRunOutput(lines.join('\n')); + assert.equal(suppressed, 3); + }); + + it('appends suppressed-count note referencing provar.testrun.rca', () => { + const raw = 'com.networknt.schema.SchemaLoader\nINFO Done'; + const { filtered } = filterTestRunOutput(raw); + assert.ok(filtered.includes('lines suppressed'), 'Should append suppressed note'); + assert.ok(filtered.includes('provar.testrun.rca'), 'Should mention rca tool'); + }); + + it('does not append note when nothing was suppressed', () => { + const { filtered, suppressed } = filterTestRunOutput('INFO Starting\nINFO Done'); + assert.equal(suppressed, 0); + assert.ok(!filtered.includes('lines suppressed'), 'Should not append note when nothing suppressed'); + }); + + it('collapses consecutive blank lines to one', () => { + const { filtered } = filterTestRunOutput('line1\n\n\n\nline2'); + assert.ok(!filtered.includes('\n\n\n'), 'Multiple blank lines should be collapsed'); + assert.ok(filtered.includes('line1'), 'Content should be preserved'); + assert.ok(filtered.includes('line2'), 'Content should be preserved'); + }); + + it('handles Windows CRLF line endings without leaving trailing \\r', () => { + const raw = 'INFO Starting\r\ncom.networknt.schema.JsonSchemaFactory - loaded\r\nINFO Done\r\n'; + const { filtered, suppressed } = filterTestRunOutput(raw); + assert.equal(suppressed, 1, 'CRLF noise line should be suppressed'); + assert.ok(!filtered.includes('\r'), 'No trailing \\r should remain in output'); + assert.ok(filtered.includes('INFO Starting'), 'Real output should remain'); + assert.ok(filtered.includes('INFO Done'), 'Real output should remain'); + }); +}); + diff --git a/test/unit/mcp/rcaTools.test.ts b/test/unit/mcp/rcaTools.test.ts index a12c2bc..d9ffdba 100644 --- a/test/unit/mcp/rcaTools.test.ts +++ b/test/unit/mcp/rcaTools.test.ts @@ -237,6 +237,47 @@ describe('provar.testrun.report.locate', () => { } }); + // Results(N) sibling detection (Provar Increment mode) + it('with Results(N) sibling dirs detects the highest sibling index', () => { + // Provar Increment mode creates Results, Results(1), Results(2)… as siblings. + const parent = path.join(tmpDir, 'SiblingProject'); + fs.mkdirSync(parent, { recursive: true }); + const resultsBase = path.join(parent, 'Results'); + makeResultsDir(resultsBase); // base Results/ exists + makeResultsDir(path.join(parent, 'Results(1)')); + makeResultsDir(path.join(parent, 'Results(18)'), JUNIT_XML); + + const result = server.call('provar.testrun.report.locate', { + project_path: parent, + results_path: resultsBase, + }); + + assert.equal(isError(result), false); + const body = parseText(result); + assert.equal(body['run_index'], 18); + assert.ok((body['results_dir'] as string).endsWith('Results(18)')); + }); + + it('with Results(N) siblings and explicit run_index returns that specific sibling', () => { + const parent = path.join(tmpDir, 'SiblingProject2'); + fs.mkdirSync(parent, { recursive: true }); + const resultsBase = path.join(parent, 'Results'); + makeResultsDir(resultsBase); + makeResultsDir(path.join(parent, 'Results(3)')); + makeResultsDir(path.join(parent, 'Results(5)'), JUNIT_XML); + + const result = server.call('provar.testrun.report.locate', { + project_path: parent, + results_path: resultsBase, + run_index: 3, + }); + + assert.equal(isError(result), false); + const body = parseText(result); + assert.equal(body['run_index'], 3); + assert.ok((body['results_dir'] as string).endsWith('Results(3)')); + }); + // Additional: collects per_test_reports for *.testcase.html files it('collects per_test_reports for *.testcase.html files', () => { const resultsDir = makeResultsDir(path.join(tmpDir, 'results'), JUNIT_XML); @@ -505,4 +546,65 @@ describe('provar.testrun.rca', () => { const failures = body['failures'] as Array>; assert.equal((failures[0]['error_message'] as string).length, 500); }); + + // Salesforce API error classification + it('classifies "Required fields are missing" as SALESFORCE_VALIDATION', () => { + const junit = ` + + + + Insert failed. Required fields are missing: [AccountId, Name] + + +`; + const resultsDir = makeResultsDir(path.join(tmpDir, 'sf-validation'), junit); + const body = parseText(server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir })); + const failures = body['failures'] as Array>; + assert.equal(failures[0]['root_cause_category'], 'SALESFORCE_VALIDATION'); + }); + + it('classifies "bad value for restricted picklist field" as SALESFORCE_PICKLIST', () => { + const junit = ` + + + + Update failed. bad value for restricted picklist field: Status + + +`; + const resultsDir = makeResultsDir(path.join(tmpDir, 'sf-picklist'), junit); + const body = parseText(server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir })); + const failures = body['failures'] as Array>; + assert.equal(failures[0]['root_cause_category'], 'SALESFORCE_PICKLIST'); + }); + + it('classifies FIELD_CUSTOM_VALIDATION_EXCEPTION as SALESFORCE_TRIGGER', () => { + const junit = ` + + + + FIELD_CUSTOM_VALIDATION_EXCEPTION: Close date required when stage is Closed Won + + +`; + const resultsDir = makeResultsDir(path.join(tmpDir, 'sf-trigger'), junit); + const body = parseText(server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir })); + const failures = body['failures'] as Array>; + assert.equal(failures[0]['root_cause_category'], 'SALESFORCE_TRIGGER'); + }); + + it('Salesforce error categories are not in infrastructure_issues', () => { + const junit = ` + + + + Required fields are missing: [Name] + + +`; + const resultsDir = makeResultsDir(path.join(tmpDir, 'sf-infra-check'), junit); + const body = parseText(server.call('provar.testrun.rca', { project_path: tmpDir, results_path: resultsDir })); + const infra = body['infrastructure_issues'] as string[]; + assert.ok(!infra.some((s) => s.includes('SALESFORCE_')), 'Salesforce categories should not appear in infrastructure_issues'); + }); }); diff --git a/test/unit/mcp/testCaseValidate.test.ts b/test/unit/mcp/testCaseValidate.test.ts index 7096077..3f0952a 100644 --- a/test/unit/mcp/testCaseValidate.test.ts +++ b/test/unit/mcp/testCaseValidate.test.ts @@ -164,6 +164,121 @@ describe('validateTestCase', () => { assert.equal(r.step_count, 0); }); }); + + describe('TC_012 / TC_031 suggestion text', () => { + it('TC_012 suggestion names crypto.randomUUID() and variant byte rule', () => { + const r = validateTestCase( + '' + ); + const issue = r.issues.find((i) => i.rule_id === 'TC_012'); + assert.ok(issue, 'Expected TC_012 issue'); + assert.ok( + issue.suggestion?.includes('crypto.randomUUID()'), + `Suggestion should mention crypto.randomUUID(): ${issue.suggestion}` + ); + assert.ok( + issue.suggestion?.includes('8, 9, a, or b'), + `Suggestion should mention variant byte: ${issue.suggestion}` + ); + }); + + it('TC_031 suggestion names crypto.randomUUID() and variant byte rule', () => { + const r = validateTestCase( + ` + + + + +` + ); + const issue = r.issues.find((i) => i.rule_id === 'TC_031'); + assert.ok(issue, 'Expected TC_031 issue'); + assert.ok( + issue.suggestion?.includes('crypto.randomUUID()'), + `Suggestion should mention crypto.randomUUID(): ${issue.suggestion}` + ); + assert.ok( + issue.suggestion?.includes('8, 9, a, or b'), + `Suggestion should mention variant byte: ${issue.suggestion}` + ); + }); + }); + + describe('DATA-001', () => { + it('warns when testCase has a child element', () => { + const r = validateTestCase( + ` + + mydata.csv + + + +` + ); + assert.ok(r.issues.some((i) => i.rule_id === 'DATA-001'), 'Expected DATA-001'); + const issue = r.issues.find((i) => i.rule_id === 'DATA-001')!; + assert.equal(issue.severity, 'WARNING'); + }); + + it('does not fire when no present', () => { + const r = validateTestCase(VALID_TC); + assert.ok(!r.issues.some((i) => i.rule_id === 'DATA-001'), 'DATA-001 should not fire for valid test case'); + }); + }); + + describe('ASSERT-001', () => { + it('warns when AssertValues uses argument id="values"', () => { + const r = validateTestCase( + ` + + + + + + + + +` + ); + assert.ok(r.issues.some((i) => i.rule_id === 'ASSERT-001'), 'Expected ASSERT-001'); + const issue = r.issues.find((i) => i.rule_id === 'ASSERT-001')!; + assert.equal(issue.severity, 'WARNING'); + assert.ok(issue.message.includes('Check values'), `Message should include step name: ${issue.message}`); + }); + + it('does not fire for AssertValues with expectedValue/actualValue arguments', () => { + const r = validateTestCase( + ` + + + + + expected + actual + + + +` + ); + assert.ok(!r.issues.some((i) => i.rule_id === 'ASSERT-001'), 'ASSERT-001 should not fire for non-values arguments'); + }); + + it('does not fire for non-AssertValues apiCall with argument id="values"', () => { + const r = validateTestCase( + ` + + + + + + + + +` + ); + assert.ok(!r.issues.some((i) => i.rule_id === 'ASSERT-001'), 'ASSERT-001 should not fire for SetValues'); + }); + }); }); // ── Handler-level tests (registerTestCaseValidate) ────────────────────────────