Skip to content

Commit

Permalink
Merge e5b4735 into 8fcd228
Browse files Browse the repository at this point in the history
  • Loading branch information
Eric-Arellano committed Aug 23, 2020
2 parents 8fcd228 + e5b4735 commit edf9dc9
Show file tree
Hide file tree
Showing 10 changed files with 176 additions and 181 deletions.
146 changes: 67 additions & 79 deletions src/python/pants/backend/python/rules/pytest_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import logging
from dataclasses import dataclass
from pathlib import PurePath
from typing import Optional, Tuple
from typing import Optional
from uuid import UUID

from pants.backend.python.rules.coverage import (
Expand Down Expand Up @@ -33,7 +33,7 @@
from pants.engine.addresses import Addresses
from pants.engine.fs import AddPrefix, Digest, DigestSubset, MergeDigests, PathGlobs, Snapshot
from pants.engine.internals.uuid import UUIDRequest
from pants.engine.process import FallibleProcessResult, InteractiveProcess
from pants.engine.process import FallibleProcessResult, InteractiveProcess, Process
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import TransitiveTargets
from pants.engine.unions import UnionRule
Expand Down Expand Up @@ -61,30 +61,31 @@ def is_conftest(self) -> bool:


@dataclass(frozen=True)
class TestTargetSetup:
test_runner_pex: Pex
args: Tuple[str, ...]
input_digest: Digest
source_roots: Tuple[str, ...]
timeout_seconds: Optional[int]
xml_dir: Optional[str]
junit_family: str
execution_slot_variable: str
class TestSetupRequest:
field_set: PythonTestFieldSet
is_debug: bool


@dataclass(frozen=True)
class TestSetup:
process: Process
results_file_name: Optional[str]

# Prevent this class from being detected by pytest as a test class.
__test__ = False


@rule(level=LogLevel.DEBUG)
async def setup_pytest_for_target(
field_set: PythonTestFieldSet,
request: TestSetupRequest,
pytest: PyTest,
test_subsystem: TestSubsystem,
python_setup: PythonSetup,
coverage_config: CoverageConfig,
coverage_subsystem: CoverageSubsystem,
) -> TestTargetSetup:
test_addresses = Addresses((field_set.address,))
global_options: GlobalOptions,
) -> TestSetup:
test_addresses = Addresses((request.field_set.address,))

transitive_targets = await Get(TransitiveTargets, Addresses, test_addresses)
all_targets = transitive_targets.closure
Expand Down Expand Up @@ -154,7 +155,9 @@ async def setup_pytest_for_target(

# Get the file names for the test_target so that we can specify to Pytest precisely which files
# to test, rather than using auto-discovery.
field_set_source_files_request = Get(SourceFiles, SourceFilesRequest([field_set.sources]))
field_set_source_files_request = Get(
SourceFiles, SourceFilesRequest([request.field_set.sources])
)

(
pytest_pex,
Expand Down Expand Up @@ -183,56 +186,32 @@ async def setup_pytest_for_target(
),
)

coverage_args = []
if test_subsystem.use_coverage:
cov_paths = coverage_subsystem.filter if coverage_subsystem.filter else (".",)
coverage_args = [
"--cov-report=", # Turn off output.
*itertools.chain.from_iterable(["--cov", cov_path] for cov_path in cov_paths),
]
return TestTargetSetup(
test_runner_pex=test_runner_pex,
args=(*pytest.options.args, *coverage_args, *field_set_source_files.files),
input_digest=input_digest,
source_roots=prepared_sources.source_roots,
timeout_seconds=field_set.timeout.calculate_from_global_options(pytest),
xml_dir=pytest.options.junit_xml_dir,
junit_family=pytest.options.junit_family,
execution_slot_variable=pytest.options.execution_slot_var,
)


# TODO(#10618): Once this is fixed, move `TestTargetSetup` into an `await Get` so that we only set
# up the test if it isn't skipped.
@rule(desc="Run Pytest", level=LogLevel.DEBUG)
async def run_python_test(
field_set: PythonTestFieldSet,
setup: TestTargetSetup,
global_options: GlobalOptions,
test_subsystem: TestSubsystem,
) -> TestResult:
if field_set.is_conftest():
return TestResult.skip(field_set.address)

add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"]

output_files = []
# Configure generation of JUnit-compatible test report.
test_results_file = None
if setup.xml_dir:
test_results_file = f"{field_set.address.path_safe_spec}.xml"

results_file_name = None
if pytest.options.junit_xml_dir and not request.is_debug:
results_file_name = f"{request.field_set.address.path_safe_spec}.xml"
add_opts.extend(
(f"--junitxml={test_results_file}", "-o", f"junit_family={setup.junit_family}")
(f"--junitxml={results_file_name}", "-o", f"junit_family={pytest.options.junit_family}")
)
output_files.append(test_results_file)
output_files.append(results_file_name)

# Configure generation of a coverage report.
if test_subsystem.use_coverage:
coverage_args = []
if test_subsystem.use_coverage and not request.is_debug:
output_files.append(".coverage")
cov_paths = coverage_subsystem.filter if coverage_subsystem.filter else (".",)
coverage_args = [
"--cov-report=", # Turn off output.
*itertools.chain.from_iterable(["--cov", cov_path] for cov_path in cov_paths),
]

env = {"PYTEST_ADDOPTS": " ".join(add_opts), "PEX_EXTRA_SYS_PATH": ":".join(setup.source_roots)}
extra_env = {
"PYTEST_ADDOPTS": " ".join(add_opts),
"PEX_EXTRA_SYS_PATH": ":".join(prepared_sources.source_roots),
}

if test_subsystem.force:
if test_subsystem.force and not request.is_debug:
# This is a slightly hacky way to force the process to run: since the env var
# value is unique, this input combination will never have been seen before,
# and therefore never cached. The two downsides are:
Expand All @@ -241,22 +220,34 @@ async def run_python_test(
# 2. This run will be cached even though it can never be re-used.
# TODO: A more principled way of forcing rules to run?
uuid = await Get(UUID, UUIDRequest())
env["__PANTS_FORCE_TEST_RUN__"] = str(uuid)
extra_env["__PANTS_FORCE_TEST_RUN__"] = str(uuid)

result = await Get(
FallibleProcessResult,
process = await Get(
Process,
PexProcess(
setup.test_runner_pex,
argv=setup.args,
input_digest=setup.input_digest,
output_files=tuple(output_files) if output_files else None,
description=f"Run Pytest for {field_set.address}",
timeout_seconds=setup.timeout_seconds,
extra_env=env,
execution_slot_variable=setup.execution_slot_variable,
test_runner_pex,
argv=(*pytest.options.args, *coverage_args, *field_set_source_files.files),
extra_env=extra_env,
input_digest=input_digest,
output_files=output_files,
timeout_seconds=request.field_set.timeout.calculate_from_global_options(pytest),
execution_slot_variable=pytest.options.execution_slot_var,
description=f"Run Pytest for {request.field_set.address}",
level=LogLevel.DEBUG,
),
)
return TestSetup(process, results_file_name=results_file_name)


@rule(desc="Run Pytest", level=LogLevel.DEBUG)
async def run_python_test(
field_set: PythonTestFieldSet, test_subsystem: TestSubsystem, pytest: PyTest
) -> TestResult:
if field_set.is_conftest():
return TestResult.skip(field_set.address)

setup = await Get(TestSetup, TestSetupRequest(field_set, is_debug=False))
result = await Get(FallibleProcessResult, Process, setup.process)

coverage_data = None
if test_subsystem.use_coverage:
Expand All @@ -269,34 +260,31 @@ async def run_python_test(
logger.warning(f"Failed to generate coverage data for {field_set.address}.")

xml_results_digest = None
if test_results_file:
if setup.results_file_name:
xml_results_snapshot = await Get(
Snapshot, DigestSubset(result.output_digest, PathGlobs([test_results_file]))
Snapshot, DigestSubset(result.output_digest, PathGlobs([setup.results_file_name]))
)
if xml_results_snapshot.files == (test_results_file,):
if xml_results_snapshot.files == (setup.results_file_name,):
xml_results_digest = await Get(
Digest,
AddPrefix(xml_results_snapshot.digest, setup.xml_dir), # type: ignore[arg-type]
Digest, AddPrefix(xml_results_snapshot.digest, pytest.options.junit_xml_dir),
)
else:
logger.warning(f"Failed to generate JUnit XML data for {field_set.address}.")

return TestResult.from_fallible_process_result(
result,
address=field_set.address,
coverage_data=coverage_data,
xml_results=xml_results_digest,
address=field_set.address,
)


@rule(desc="Set up Pytest to run interactively", level=LogLevel.DEBUG)
def debug_python_test(field_set: PythonTestFieldSet, setup: TestTargetSetup) -> TestDebugRequest:
async def debug_python_test(field_set: PythonTestFieldSet) -> TestDebugRequest:
if field_set.is_conftest():
return TestDebugRequest(None)
process = InteractiveProcess(
argv=(setup.test_runner_pex.name, *setup.args), input_digest=setup.input_digest,
)
return TestDebugRequest(process)
setup = await Get(TestSetup, TestSetupRequest(field_set, is_debug=True))
return TestDebugRequest(InteractiveProcess.from_process(setup.process))


def rules():
Expand Down
35 changes: 19 additions & 16 deletions src/python/pants/backend/python/rules/repl.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from typing import Tuple

from pants.backend.python.rules.pex import Pex, PexRequest, PexRequirements
from pants.backend.python.rules.pex_environment import PexEnvironment
from pants.backend.python.rules.pex_from_targets import PexFromTargetsRequest
from pants.backend.python.rules.python_sources import PythonSourceFiles, PythonSourceFilesRequest
from pants.backend.python.subsystems.ipython import IPython
Expand All @@ -20,7 +21,7 @@ class PythonRepl(ReplImplementation):


@rule(level=LogLevel.DEBUG)
async def create_python_repl_request(repl: PythonRepl) -> ReplRequest:
async def create_python_repl_request(repl: PythonRepl, pex_env: PexEnvironment) -> ReplRequest:
requirements_request = Get(
Pex,
PexFromTargetsRequest,
Expand All @@ -35,20 +36,21 @@ async def create_python_repl_request(repl: PythonRepl) -> ReplRequest:
merged_digest = await Get(
Digest, MergeDigests((requirements_pex.digest, sources.source_files.snapshot.digest))
)

chrooted_source_roots = [repl.in_chroot(sr) for sr in sources.source_roots]
return ReplRequest(
digest=merged_digest,
args=(repl.in_chroot(requirements_pex.name),),
env={"PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots)},
)
env = {**pex_env.environment_dict, "PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots)}

return ReplRequest(digest=merged_digest, args=(repl.in_chroot(requirements_pex.name),), env=env)


class IPythonRepl(ReplImplementation):
name = "ipython"


@rule(level=LogLevel.DEBUG)
async def create_ipython_repl_request(repl: IPythonRepl, ipython: IPython) -> ReplRequest:
async def create_ipython_repl_request(
repl: IPythonRepl, ipython: IPython, pex_env: PexEnvironment
) -> ReplRequest:
# Note that we get an intermediate PexRequest here (instead of going straight to a Pex)
# so that we can get the interpreter constraints for use in ipython_request.
requirements_pex_request = await Get(
Expand Down Expand Up @@ -85,18 +87,19 @@ async def create_ipython_repl_request(repl: IPythonRepl, ipython: IPython) -> Re
(requirements_pex.digest, sources.source_files.snapshot.digest, ipython_pex.digest)
),
)
chrooted_source_roots = [repl.in_chroot(sr) for sr in sources.source_roots]

args: Tuple[str, ...] = (repl.in_chroot(ipython_pex.name),)
if ipython.options.ignore_cwd:
args = args + ("--ignore-cwd",)
return ReplRequest(
digest=merged_digest,
args=args,
env={
"PEX_PATH": repl.in_chroot(requirements_pex_request.output_filename),
"PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots),
},
)

chrooted_source_roots = [repl.in_chroot(sr) for sr in sources.source_roots]
env = {
**pex_env.environment_dict,
"PEX_PATH": repl.in_chroot(requirements_pex_request.output_filename),
"PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots),
}

return ReplRequest(digest=merged_digest, args=args, env=env)


def rules():
Expand Down
21 changes: 12 additions & 9 deletions src/python/pants/backend/python/rules/run_python_binary.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from pants.backend.python.rules.create_python_binary import PythonBinaryFieldSet
from pants.backend.python.rules.pex import Pex, PexRequest
from pants.backend.python.rules.pex_environment import PexEnvironment
from pants.backend.python.rules.pex_from_targets import PexFromTargetsRequest
from pants.backend.python.rules.python_sources import PythonSourceFiles, PythonSourceFilesRequest
from pants.backend.python.target_types import PythonBinaryDefaults, PythonBinarySources
Expand All @@ -26,7 +27,9 @@

@rule(level=LogLevel.DEBUG)
async def create_python_binary_run_request(
field_set: PythonBinaryFieldSet, python_binary_defaults: PythonBinaryDefaults
field_set: PythonBinaryFieldSet,
python_binary_defaults: PythonBinaryDefaults,
pex_env: PexEnvironment,
) -> RunRequest:
entry_point = field_set.entry_point.value
if entry_point is None:
Expand Down Expand Up @@ -87,16 +90,16 @@ def in_chroot(relpath: str) -> str:
return os.path.join("{chroot}", relpath)

chrooted_source_roots = [in_chroot(sr) for sr in sources.source_roots]
pex_path = in_chroot(requirements_pex_request.output_filename)
env = {
**pex_env.environment_dict,
"PEX_PATH": in_chroot(requirements_pex_request.output_filename),
"PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots),
}

return RunRequest(
digest=merged_digest,
args=(in_chroot(runner_pex.name), "-m", entry_point),
env={"PEX_PATH": pex_path, "PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots)},
digest=merged_digest, args=(in_chroot(runner_pex.name), "-m", entry_point), env=env
)


def rules():
return [
*collect_rules(),
UnionRule(RunFieldSet, PythonBinaryFieldSet),
]
return [*collect_rules(), UnionRule(RunFieldSet, PythonBinaryFieldSet)]
8 changes: 6 additions & 2 deletions src/python/pants/core/goals/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@
FieldSetsWithSources,
FieldSetsWithSourcesRequest,
)
from pants.engine import desktop
from pants.engine.addresses import Address
from pants.engine.collection import Collection
from pants.engine.console import Console
from pants.engine.desktop import OpenFiles, OpenFilesRequest
from pants.engine.engine_aware import EngineAwareParameter, EngineAwareReturnType
from pants.engine.fs import Digest, MergeDigests, Workspace
from pants.engine.goal import Goal, GoalSubsystem
Expand Down Expand Up @@ -436,7 +436,11 @@ async def run_tests(
coverage_report_files.extend(report_files)

if coverage_report_files and test_subsystem.open_coverage:
desktop.ui_open(console, interactive_runner, coverage_report_files)
open_files = await Get(
OpenFiles, OpenFilesRequest(coverage_report_files, error_if_open_not_found=False)
)
for process in open_files.processes:
interactive_runner.run(process)

return Test(exit_code)

Expand Down
6 changes: 6 additions & 0 deletions src/python/pants/core/goals/test_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
FieldSetsWithSourcesRequest,
)
from pants.engine.addresses import Address
from pants.engine.desktop import OpenFiles, OpenFilesRequest
from pants.engine.fs import EMPTY_DIGEST, CreateDigest, Digest, FileContent, MergeDigests, Workspace
from pants.engine.process import InteractiveProcess, InteractiveRunner
from pants.engine.target import (
Expand Down Expand Up @@ -181,6 +182,11 @@ def mock_coverage_report_generation(
subject_type=CoverageDataCollection,
mock=mock_coverage_report_generation,
),
MockGet(
product_type=OpenFiles,
subject_type=OpenFilesRequest,
mock=lambda _: OpenFiles(()),
),
],
union_membership=union_membership,
)
Expand Down

0 comments on commit edf9dc9

Please sign in to comment.