Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 10 additions & 2 deletions docs/mcp.md
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,10 @@ Validates an XML test case for schema correctness (validity score) and best prac

**Key schema rules:** TC_001 (missing XML declaration), TC_002 (malformed XML), TC_003 (wrong root element), TC_010/011/012 (missing/invalid id/guid), TC_031 (invalid apiCall guid), TC_034/035 (non-integer testItemId).

**Warning rules:**
- **DATA-001** — `testCase` declares a `<dataTable>` element. CLI standalone execution does not bind CSV column variables; steps using variable references will resolve to null. Use `SetValues` (Test scope) steps instead, or add the test to a test plan.
- **ASSERT-001** — An `AssertValues` step uses the `argument id="values"` (namedValues) format, which is designed for UI element attribute assertions. For Apex/SOQL result or variable comparisons this silently passes as `null=null`. Use separate `expectedValue`, `actualValue`, and `comparisonType` arguments instead.

---

### `provar.testsuite.validate`
Expand Down Expand Up @@ -938,7 +942,9 @@ Triggers a Provar Automation test run using the currently loaded properties file
| --------- | -------- | -------- | ------------------------------------------------------------------------ |
| `flags` | string[] | no | Raw CLI flags to forward (e.g. `["--project-path", "/path/to/project"]`) |

**Output** — `{ requestId, exitCode, stdout, stderr }`
**Output** — `{ requestId, exitCode, stdout, stderr[, output_lines_suppressed] }`

The `stdout` field is filtered before returning: Java schema-validator lines (`com.networknt.schema.*`) and stale logger-lock `SEVERE` warnings are stripped. If any lines were suppressed, `output_lines_suppressed` contains the count and a note is appended to `stdout`. Use `provar.testrun.rca` to inspect the full raw JUnit output.

**Error codes:** `AUTOMATION_TESTRUN_FAILED`, `SF_NOT_FOUND`

Expand Down Expand Up @@ -1084,7 +1090,9 @@ Analyse a completed test run and return a structured Root Cause Analysis report.
| `screenshot_dir` | Path to `Artifacts/` directory if it exists, else `null` |
| `pre_existing` | `true` if the same test failed in a prior Increment run |

**Root cause categories:** `DRIVER_VERSION_MISMATCH`, `LOCATOR_STALE`, `TIMEOUT`, `ASSERTION_FAILED`, `CREDENTIAL_FAILURE`, `MISSING_CALLABLE`, `METADATA_CACHE`, `PAGE_OBJECT_COMPILE`, `CONNECTION_REFUSED`, `DATA_SETUP`, `LICENSE_INVALID`, `UNKNOWN`
**Root cause categories:** `DRIVER_VERSION_MISMATCH`, `LOCATOR_STALE`, `TIMEOUT`, `ASSERTION_FAILED`, `CREDENTIAL_FAILURE`, `MISSING_CALLABLE`, `METADATA_CACHE`, `PAGE_OBJECT_COMPILE`, `CONNECTION_REFUSED`, `DATA_SETUP`, `LICENSE_INVALID`, `SALESFORCE_VALIDATION`, `SALESFORCE_PICKLIST`, `SALESFORCE_REFERENCE`, `SALESFORCE_ACCESS`, `SALESFORCE_TRIGGER`, `UNKNOWN`

Salesforce DML error categories (`SALESFORCE_*`) represent test-data failures — they appear in `failures[].root_cause_category` but are **not** included in `infrastructure_issues`.

**Error codes:** `RESULTS_NOT_CONFIGURED`

Expand Down
53 changes: 51 additions & 2 deletions src/mcp/tools/automationTools.ts
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,48 @@ export function registerAutomationConfigLoad(server: McpServer, config: ServerCo
);
}

// ── Testrun output filter ─────────────────────────────────────────────────────

const NOISE_PATTERNS: RegExp[] = [
/com\.networknt\.schema/,
/SEVERE.*Failed to configure logger.*\.lck/,
];

/**
* Strip Java schema-validator debug lines and stale logger-lock SEVERE warnings
* from Provar testrun output. These two patterns account for the bulk of output
* volume and cause MCP responses to be truncated before the pass/fail lines.
*
* Everything else (including real SEVERE failures) passes through unchanged.
* Collapses runs of blank lines to a single blank to keep the output readable.
* Returns the filtered text and the count of suppressed lines.
*/
export function filterTestRunOutput(raw: string): { filtered: string; suppressed: number } {
const lines = raw.split(/\r?\n/);
const kept: string[] = [];
let suppressed = 0;
let lastKeptWasBlank = false;

for (const rawLine of lines) {
const line = rawLine.endsWith('\r') ? rawLine.slice(0, -1) : rawLine;
if (NOISE_PATTERNS.some((p) => p.test(line))) {
suppressed++;
continue;
}
const isBlank = line.trim() === '';
if (isBlank && lastKeptWasBlank) continue; // collapse blank runs
kept.push(line);
lastKeptWasBlank = isBlank;
}

let filtered = kept.join('\n');
if (suppressed > 0) {
filtered +=
`\n[testrun: ${suppressed} lines suppressed (schema validator / logger noise) — use provar.testrun.rca for full results]`;
}
return { filtered, suppressed };
}

// ── Tool: provar.automation.testrun ───────────────────────────────────────────

export function registerAutomationTestRun(server: McpServer): void {
Expand All @@ -183,12 +225,19 @@ export function registerAutomationTestRun(server: McpServer): void {

try {
const result = runSfCommand(['provar', 'automation', 'test', 'run', ...flags], sf_path);
const response = { requestId, exitCode: result.exitCode, stdout: result.stdout, stderr: result.stderr };
const { filtered, suppressed } = filterTestRunOutput(result.stdout);

if (result.exitCode !== 0) {
return { isError: true as const, content: [{ type: 'text' as const, text: JSON.stringify(makeError('AUTOMATION_TESTRUN_FAILED', result.stderr || result.stdout, requestId)) }] };
const { filtered: filteredErr, suppressed: suppressedErr } = filterTestRunOutput(result.stderr || result.stdout);
const errBody = {
...makeError('AUTOMATION_TESTRUN_FAILED', filteredErr, requestId),
...(suppressedErr > 0 ? { output_lines_suppressed: suppressedErr } : {}),
};
return { isError: true as const, content: [{ type: 'text' as const, text: JSON.stringify(errBody) }] };
}

const response: Record<string, unknown> = { requestId, exitCode: result.exitCode, stdout: filtered, stderr: result.stderr };
if (suppressed > 0) response['output_lines_suppressed'] = suppressed;
return { content: [{ type: 'text' as const, text: JSON.stringify(response) }], structuredContent: response };
} catch (err) {
return handleSpawnError(err, requestId, 'provar.automation.testrun');
Expand Down
77 changes: 73 additions & 4 deletions src/mcp/tools/rcaTools.ts
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,36 @@ const RCA_RULES: RcaRule[] = [
summary: 'Test assertion failed',
recommendation: 'Verify expected value is correct for current org state',
},
{
category: 'SALESFORCE_VALIDATION',
pattern: /Required fields are missing:\s*\[/i,
summary: 'Salesforce required-field validation failed',
recommendation: 'Ensure all required fields have values; check field-level security for the running user',
},
{
category: 'SALESFORCE_PICKLIST',
pattern: /bad value for restricted picklist field/i,
summary: 'Invalid picklist value used',
recommendation: 'Query valid picklist values (run metadata download or Apex describe); check for trailing spaces or case differences',
},
{
category: 'SALESFORCE_REFERENCE',
pattern: /INVALID_CROSS_REFERENCE_KEY/i,
summary: 'Referenced record ID does not exist or is inaccessible',
recommendation: 'Verify the referenced record exists and the running user has access to it',
},
{
category: 'SALESFORCE_ACCESS',
pattern: /INSUFFICIENT_ACCESS_ON_CROSS_REFERENCE_ENTITY/i,
summary: 'Running user lacks permission on a referenced record',
recommendation: 'Grant the running user appropriate object and record-level permissions',
},
{
category: 'SALESFORCE_TRIGGER',
pattern: /FIELD_CUSTOM_VALIDATION_EXCEPTION/i,
summary: 'A Salesforce validation rule or trigger blocked the DML operation',
recommendation: 'Review validation rules and triggers on the target object; ensure test data satisfies all business rules',
},
{
category: 'CREDENTIAL_FAILURE',
pattern: /InvalidPasswordException|AuthenticationException|INVALID_LOGIN/i,
Expand Down Expand Up @@ -184,6 +214,29 @@ function numericChildDirs(dir: string): number[] {
}
}

/**
* Find Provar Increment-mode sibling directories next to resultsBase.
* Provar creates Results, Results(1), Results(2), … as siblings in the same
* parent directory — NOT as numeric children of Results. Returns the numeric
* suffixes of all matching siblings (e.g. [1, 2, 18]).
*/
function incrementSiblingDirs(resultsBase: string): number[] {
const parent = path.dirname(resultsBase);
const base = path.basename(resultsBase);
if (!fs.existsSync(parent)) return [];
try {
const safeName = base.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
const pattern = new RegExp(`^${safeName}\\((\\d+)\\)$`);
return fs
.readdirSync(parent, { withFileTypes: true })
.filter((e) => e.isDirectory() && pattern.test(e.name))
.map((e) => parseInt((pattern.exec(e.name) as RegExpExecArray)[1], 10))
.filter((n) => n > 0);
} catch {
return [];
}
}

/**
* Expand ${env.VAR} placeholders in a string using process.env.
*/
Expand Down Expand Up @@ -293,8 +346,20 @@ function resolveResultsLocation(
}

// Increment resolution
// Provar's primary Increment pattern: Results, Results(1), Results(2)… as siblings.
// Legacy fallback: purely-numeric children (Results/1/, Results/2/…).
const siblings = incrementSiblingDirs(resultsBase);
const numericDirs = numericChildDirs(resultsBase);
if (disposition === 'Increment' || numericDirs.length > 0) {
if (disposition === 'Increment' || siblings.length > 0 || numericDirs.length > 0) {
if (siblings.length > 0) {
const targetIndex = run_index ?? Math.max(...siblings);
return {
results_dir: path.join(path.dirname(resultsBase), `${path.basename(resultsBase)}(${targetIndex})`),
run_index: targetIndex,
Comment on lines +354 to +358
Copy link

Copilot AI Apr 15, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

resolveResultsLocation() uses run_index directly when selecting a Results(N) sibling. If a caller passes run_index <= 0 (or a negative number), this will construct a path like Results(0)/Results(-1) and report it as the resolved directory. Consider validating run_index is a positive integer at the tool schema level (e.g. z.number().int().positive()) or returning an error when run_index <= 0.

Copilot uses AI. Check for mistakes.
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed in b8f3c87 — both provar.testrun.report.locate and provar.testrun.rca now use z.number().int().positive().optional() for run_index, so the MCP layer rejects 0 and negative values before they reach resolveResultsLocation.

disposition,
resolution_source,
};
}
if (numericDirs.length > 0) {
const targetIndex = run_index ?? Math.max(...numericDirs);
return {
Expand All @@ -304,7 +369,7 @@ function resolveResultsLocation(
resolution_source,
};
}
// Disposition is Increment but no numeric subdirs yet — fall through to base
// Disposition is Increment but no numbered dirs yet — fall through to base
}

return {
Expand Down Expand Up @@ -336,8 +401,10 @@ export function registerTestRunLocate(server: McpServer): void {
.describe('Explicit override for the results base directory; if provided, skip auto-detection'),
run_index: z
.number()
.int()
.positive()
.optional()
.describe('Which Increment run to target (default: latest)'),
.describe('Which Increment run to target (default: latest); must be a positive integer'),
},
(input) => {
const requestId = makeRequestId();
Expand Down Expand Up @@ -603,8 +670,10 @@ export function registerTestRunRca(server: McpServer): void {
.describe('Explicit override for the results base directory'),
run_index: z
.number()
.int()
.positive()
.optional()
.describe('Which Increment run to target (default: latest)'),
.describe('Which Increment run to target (default: latest); must be a positive integer'),
locate_only: z
.boolean()
.optional()
Expand Down
38 changes: 36 additions & 2 deletions src/mcp/tools/testCaseValidate.ts
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ export function validateTestCase(xmlContent: string, testName?: string): TestCas
severity: 'ERROR',
message: `testCase guid "${tcGuid}" is not a valid UUID v4.`,
applies_to: 'testCase',
suggestion: 'Generate a proper UUID v4 for the guid attribute.',
suggestion: 'Replace with a valid UUID v4 — e.g. crypto.randomUUID(). The 4th segment must begin with 8, 9, a, or b.',
});
}
// TC_013 (registryId) is intentionally not checked here — registryId is a
Expand All @@ -270,6 +270,17 @@ export function validateTestCase(xmlContent: string, testName?: string): TestCas
return finalize(issues, tcId, tcName, 0, xmlContent, testName);
}

// DATA-001: <dataTable> binding is silently ignored in standalone CLI execution
if ('dataTable' in tc && tc['dataTable'] != null) {
issues.push({
rule_id: 'DATA-001',
severity: 'WARNING',
message: 'testCase declares a <dataTable> but CLI standalone execution does not bind CSV column variables — steps using <value class="variable"> references will resolve to null.',
applies_to: 'testCase',
suggestion: 'Use SetValues (Test scope) steps to bind data for standalone CLI execution, or add this test case to a test plan.',
});
}

// Same self-closing guard for <steps/> → fast-xml-parser yields ''
const rawSteps = tc['steps'];
const steps: Record<string, unknown> =
Expand Down Expand Up @@ -307,7 +318,7 @@ function validateApiCall(call: Record<string, unknown>, issues: ValidationIssue[
severity: 'ERROR',
message: `apiCall${label} guid "${callGuid}" is not a valid UUID v4.`,
applies_to: 'apiCall',
suggestion: 'Use proper UUID v4 format.',
suggestion: 'Replace with a valid UUID v4 — e.g. crypto.randomUUID(). The 4th segment must begin with 8, 9, a, or b.',
});
}
if (!apiId) {
Expand Down Expand Up @@ -345,6 +356,29 @@ function validateApiCall(call: Record<string, unknown>, issues: ValidationIssue[
suggestion: 'Use sequential integers for testItemId.',
});
}

// ASSERT-001: AssertValues using UI namedValues format instead of variable format
if (apiId?.includes('AssertValues')) {
const rawArgs = call['arguments'] as Record<string, unknown> | undefined;
if (rawArgs) {
const argRaw = rawArgs['argument'];
const argList: Array<Record<string, unknown>> = !argRaw
? []
: Array.isArray(argRaw)
? (argRaw as Array<Record<string, unknown>>)
: [argRaw as Record<string, unknown>];
const hasValuesArg = argList.some((a) => (a['@_id'] as string | undefined) === 'values');
if (hasValuesArg) {
issues.push({
rule_id: 'ASSERT-001',
severity: 'WARNING',
message: `AssertValues step "${name ?? '(unnamed)'}" uses namedValues format (argument id="values") — designed for UI element attribute assertions. For Apex/SOQL result or variable comparisons this silently passes as null=null.`,
applies_to: 'apiCall',
suggestion: 'Use separate expectedValue, actualValue, and comparisonType arguments for variable or Apex result comparisons.',
});
}
}
}
}

function finalize(
Expand Down
Loading
Loading