Skip to content

Commit

Permalink
switch testing output to test result panel (#22039)
Browse files Browse the repository at this point in the history
closes #21861 and
related issues

---------

Co-authored-by: Courtney Webster <60238438+cwebster-99@users.noreply.github.com>
  • Loading branch information
eleanorjboyd and cwebster-99 committed Oct 11, 2023
1 parent 055a352 commit 1dd8a4b
Show file tree
Hide file tree
Showing 14 changed files with 298 additions and 83 deletions.
35 changes: 35 additions & 0 deletions pythonFiles/tests/pytestadapter/.data/test_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
import sys


def test_logging2(caplog):
logger = logging.getLogger(__name__)
caplog.set_level(logging.DEBUG) # Set minimum log level to capture

logger.debug("This is a debug message.")
logger.info("This is an info message.")
logger.warning("This is a warning message.")
logger.error("This is an error message.")
logger.critical("This is a critical message.")

# Printing to stdout and stderr
print("This is a stdout message.")
print("This is a stderr message.", file=sys.stderr)
assert False


def test_logging(caplog):
logger = logging.getLogger(__name__)
caplog.set_level(logging.DEBUG) # Set minimum log level to capture

logger.debug("This is a debug message.")
logger.info("This is an info message.")
logger.warning("This is a warning message.")
logger.error("This is an error message.")
logger.critical("This is a critical message.")

# Printing to stdout and stderr
print("This is a stdout message.")
print("This is a stderr message.", file=sys.stderr)
28 changes: 28 additions & 0 deletions pythonFiles/tests/pytestadapter/expected_execution_test_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -596,3 +596,31 @@
"subtest": None,
}
}


# This is the expected output for the test logging file.
# └── test_logging.py
# └── test_logging2: failure
# └── test_logging: success
test_logging_path = TEST_DATA_PATH / "test_logging.py"

logging_test_expected_execution_output = {
get_absolute_test_id("test_logging.py::test_logging2", test_logging_path): {
"test": get_absolute_test_id(
"test_logging.py::test_logging2", test_logging_path
),
"outcome": "failure",
"message": "ERROR MESSAGE",
"traceback": None,
"subtest": None,
},
get_absolute_test_id("test_logging.py::test_logging", test_logging_path): {
"test": get_absolute_test_id(
"test_logging.py::test_logging", test_logging_path
),
"outcome": "success",
"message": None,
"traceback": None,
"subtest": None,
},
}
1 change: 1 addition & 0 deletions pythonFiles/tests/pytestadapter/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ def runner_with_cwd(
"pytest",
"-p",
"vscode_pytest",
"-s",
] + args
listener: socket.socket = create_server()
_, port = listener.getsockname()
Expand Down
27 changes: 17 additions & 10 deletions pythonFiles/tests/pytestadapter/test_execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,23 +215,30 @@ def test_bad_id_error_execution():
],
expected_execution_test_output.doctest_pytest_expected_execution_output,
),
(
["test_logging.py::test_logging2", "test_logging.py::test_logging"],
expected_execution_test_output.logging_test_expected_execution_output,
),
],
)
def test_pytest_execution(test_ids, expected_const):
"""
Test that pytest discovery works as expected where run pytest is always successful
but the actual test results are both successes and failures.:
1. uf_execution_expected_output: unittest tests run on multiple files.
2. uf_single_file_expected_output: test run on a single file.
3. uf_single_method_execution_expected_output: test run on a single method in a file.
4. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer.
5. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests.
6. dual_level_nested_folder_execution_expected_output: test run on a file with one test file
1: skip_tests_execution_expected_output: test run on a file with skipped tests.
2. error_raised_exception_execution_expected_output: test run on a file that raises an exception.
3. uf_execution_expected_output: unittest tests run on multiple files.
4. uf_single_file_expected_output: test run on a single file.
5. uf_single_method_execution_expected_output: test run on a single method in a file.
6. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer.
7. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests.
8. dual_level_nested_folder_execution_expected_output: test run on a file with one test file
at the top level and one test file in a nested folder.
7. double_nested_folder_expected_execution_output: test run on a double nested folder.
8. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs.
9. single_parametrize_tests_expected_execution_output: test run on single parametrize test.
10. doctest_pytest_expected_execution_output: test run on doctest file.
9. double_nested_folder_expected_execution_output: test run on a double nested folder.
10. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs.
11. single_parametrize_tests_expected_execution_output: test run on single parametrize test.
12. doctest_pytest_expected_execution_output: test run on doctest file.
13. logging_test_expected_execution_output: test run on a file with logging.
Keyword arguments:
Expand Down
2 changes: 0 additions & 2 deletions pythonFiles/unittestadapter/execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,6 @@ def post_response(
)
# Clear the buffer as complete JSON object is received
buffer = b""

# Process the JSON data
break
except json.JSONDecodeError:
# JSON decoding error, the complete JSON object is not yet received
Expand Down
2 changes: 0 additions & 2 deletions pythonFiles/vscode_pytest/run_pytest_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,6 @@
)
# Clear the buffer as complete JSON object is received
buffer = b""

# Process the JSON data
print("Received JSON data in run script")
break
except json.JSONDecodeError:
Expand Down
48 changes: 18 additions & 30 deletions src/client/testing/testController/common/resultResolver.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import { clearAllChildren, createErrorTestItem, getTestCaseNodes } from './testI
import { sendTelemetryEvent } from '../../../telemetry';
import { EventName } from '../../../telemetry/constants';
import { splitLines } from '../../../common/stringUtils';
import { buildErrorNodeOptions, fixLogLines, populateTestTree, splitTestNameWithRegex } from './utils';
import { buildErrorNodeOptions, populateTestTree, splitTestNameWithRegex } from './utils';
import { Deferred } from '../../../common/utils/async';

export class PythonResultResolver implements ITestResultResolver {
Expand Down Expand Up @@ -151,15 +151,16 @@ export class PythonResultResolver implements ITestResultResolver {
const tempArr: TestItem[] = getTestCaseNodes(i);
testCases.push(...tempArr);
});
const testItem = rawTestExecData.result[keyTemp];

if (rawTestExecData.result[keyTemp].outcome === 'error') {
const rawTraceback = rawTestExecData.result[keyTemp].traceback ?? '';
if (testItem.outcome === 'error') {
const rawTraceback = testItem.traceback ?? '';
const traceback = splitLines(rawTraceback, {
trim: false,
removeEmptyEntries: true,
}).join('\r\n');
const text = `${rawTestExecData.result[keyTemp].test} failed with error: ${
rawTestExecData.result[keyTemp].message ?? rawTestExecData.result[keyTemp].outcome
const text = `${testItem.test} failed with error: ${
testItem.message ?? testItem.outcome
}\r\n${traceback}\r\n`;
const message = new TestMessage(text);

Expand All @@ -170,23 +171,17 @@ export class PythonResultResolver implements ITestResultResolver {
if (indiItem.uri && indiItem.range) {
message.location = new Location(indiItem.uri, indiItem.range);
runInstance.errored(indiItem, message);
runInstance.appendOutput(fixLogLines(text));
}
}
});
} else if (
rawTestExecData.result[keyTemp].outcome === 'failure' ||
rawTestExecData.result[keyTemp].outcome === 'passed-unexpected'
) {
const rawTraceback = rawTestExecData.result[keyTemp].traceback ?? '';
} else if (testItem.outcome === 'failure' || testItem.outcome === 'passed-unexpected') {
const rawTraceback = testItem.traceback ?? '';
const traceback = splitLines(rawTraceback, {
trim: false,
removeEmptyEntries: true,
}).join('\r\n');

const text = `${rawTestExecData.result[keyTemp].test} failed: ${
rawTestExecData.result[keyTemp].message ?? rawTestExecData.result[keyTemp].outcome
}\r\n${traceback}\r\n`;
const text = `${testItem.test} failed: ${testItem.message ?? testItem.outcome}\r\n${traceback}\r\n`;
const message = new TestMessage(text);

// note that keyTemp is a runId for unittest library...
Expand All @@ -197,14 +192,10 @@ export class PythonResultResolver implements ITestResultResolver {
if (indiItem.uri && indiItem.range) {
message.location = new Location(indiItem.uri, indiItem.range);
runInstance.failed(indiItem, message);
runInstance.appendOutput(fixLogLines(text));
}
}
});
} else if (
rawTestExecData.result[keyTemp].outcome === 'success' ||
rawTestExecData.result[keyTemp].outcome === 'expected-failure'
) {
} else if (testItem.outcome === 'success' || testItem.outcome === 'expected-failure') {
const grabTestItem = this.runIdToTestItem.get(keyTemp);
const grabVSid = this.runIdToVSid.get(keyTemp);
if (grabTestItem !== undefined) {
Expand All @@ -216,7 +207,7 @@ export class PythonResultResolver implements ITestResultResolver {
}
});
}
} else if (rawTestExecData.result[keyTemp].outcome === 'skipped') {
} else if (testItem.outcome === 'skipped') {
const grabTestItem = this.runIdToTestItem.get(keyTemp);
const grabVSid = this.runIdToVSid.get(keyTemp);
if (grabTestItem !== undefined) {
Expand All @@ -228,11 +219,11 @@ export class PythonResultResolver implements ITestResultResolver {
}
});
}
} else if (rawTestExecData.result[keyTemp].outcome === 'subtest-failure') {
} else if (testItem.outcome === 'subtest-failure') {
// split on [] or () based on how the subtest is setup.
const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp);
const parentTestItem = this.runIdToTestItem.get(parentTestCaseId);
const data = rawTestExecData.result[keyTemp];
const data = testItem;
// find the subtest's parent test item
if (parentTestItem) {
const subtestStats = this.subTestStats.get(parentTestCaseId);
Expand All @@ -243,20 +234,19 @@ export class PythonResultResolver implements ITestResultResolver {
failed: 1,
passed: 0,
});
runInstance.appendOutput(fixLogLines(`${parentTestCaseId} [subtests]:\r\n`));
// clear since subtest items don't persist between runs
clearAllChildren(parentTestItem);
}
const subTestItem = this.testController?.createTestItem(subtestId, subtestId);
runInstance.appendOutput(fixLogLines(`${subtestId} Failed\r\n`));
// create a new test item for the subtest
if (subTestItem) {
const traceback = data.traceback ?? '';
const text = `${data.subtest} Failed: ${data.message ?? data.outcome}\r\n${traceback}\r\n`;
runInstance.appendOutput(fixLogLines(text));
const text = `${data.subtest} failed: ${
testItem.message ?? testItem.outcome
}\r\n${traceback}\r\n`;
parentTestItem.children.add(subTestItem);
runInstance.started(subTestItem);
const message = new TestMessage(rawTestExecData?.result[keyTemp].message ?? '');
const message = new TestMessage(text);
if (parentTestItem.uri && parentTestItem.range) {
message.location = new Location(parentTestItem.uri, parentTestItem.range);
}
Expand All @@ -267,7 +257,7 @@ export class PythonResultResolver implements ITestResultResolver {
} else {
throw new Error('Parent test item not found');
}
} else if (rawTestExecData.result[keyTemp].outcome === 'subtest-success') {
} else if (testItem.outcome === 'subtest-success') {
// split on [] or () based on how the subtest is setup.
const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp);
const parentTestItem = this.runIdToTestItem.get(parentTestCaseId);
Expand All @@ -279,7 +269,6 @@ export class PythonResultResolver implements ITestResultResolver {
subtestStats.passed += 1;
} else {
this.subTestStats.set(parentTestCaseId, { failed: 0, passed: 1 });
runInstance.appendOutput(fixLogLines(`${parentTestCaseId} [subtests]:\r\n`));
// clear since subtest items don't persist between runs
clearAllChildren(parentTestItem);
}
Expand All @@ -289,7 +278,6 @@ export class PythonResultResolver implements ITestResultResolver {
parentTestItem.children.add(subTestItem);
runInstance.started(subTestItem);
runInstance.passed(subTestItem);
runInstance.appendOutput(fixLogLines(`${subtestId} Passed\r\n`));
} else {
throw new Error('Unable to create new child node for subtest');
}
Expand Down

0 comments on commit 1dd8a4b

Please sign in to comment.