Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "Fix run, repl, and test --debug to have hermetic environments (#10668)" #10688

Merged
merged 1 commit into from Aug 24, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
146 changes: 79 additions & 67 deletions src/python/pants/backend/python/rules/pytest_runner.py
Expand Up @@ -5,7 +5,7 @@
import logging
from dataclasses import dataclass
from pathlib import PurePath
from typing import Optional
from typing import Optional, Tuple
from uuid import UUID

from pants.backend.python.rules.coverage import (
Expand Down Expand Up @@ -33,7 +33,7 @@
from pants.engine.addresses import Addresses
from pants.engine.fs import AddPrefix, Digest, DigestSubset, MergeDigests, PathGlobs, Snapshot
from pants.engine.internals.uuid import UUIDRequest
from pants.engine.process import FallibleProcessResult, InteractiveProcess, Process
from pants.engine.process import FallibleProcessResult, InteractiveProcess
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import TransitiveTargets
from pants.engine.unions import UnionRule
Expand Down Expand Up @@ -61,31 +61,30 @@ def is_conftest(self) -> bool:


@dataclass(frozen=True)
class TestSetupRequest:
field_set: PythonTestFieldSet
is_debug: bool


@dataclass(frozen=True)
class TestSetup:
process: Process
results_file_name: Optional[str]
class TestTargetSetup:
test_runner_pex: Pex
args: Tuple[str, ...]
input_digest: Digest
source_roots: Tuple[str, ...]
timeout_seconds: Optional[int]
xml_dir: Optional[str]
junit_family: str
execution_slot_variable: str

# Prevent this class from being detected by pytest as a test class.
__test__ = False


@rule(level=LogLevel.DEBUG)
async def setup_pytest_for_target(
request: TestSetupRequest,
field_set: PythonTestFieldSet,
pytest: PyTest,
test_subsystem: TestSubsystem,
python_setup: PythonSetup,
coverage_config: CoverageConfig,
coverage_subsystem: CoverageSubsystem,
global_options: GlobalOptions,
) -> TestSetup:
test_addresses = Addresses((request.field_set.address,))
) -> TestTargetSetup:
test_addresses = Addresses((field_set.address,))

transitive_targets = await Get(TransitiveTargets, Addresses, test_addresses)
all_targets = transitive_targets.closure
Expand Down Expand Up @@ -155,9 +154,7 @@ async def setup_pytest_for_target(

# Get the file names for the test_target so that we can specify to Pytest precisely which files
# to test, rather than using auto-discovery.
field_set_source_files_request = Get(
SourceFiles, SourceFilesRequest([request.field_set.sources])
)
field_set_source_files_request = Get(SourceFiles, SourceFilesRequest([field_set.sources]))

(
pytest_pex,
Expand Down Expand Up @@ -186,32 +183,56 @@ async def setup_pytest_for_target(
),
)

add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"]
output_files = []

results_file_name = None
if pytest.options.junit_xml_dir and not request.is_debug:
results_file_name = f"{request.field_set.address.path_safe_spec}.xml"
add_opts.extend(
(f"--junitxml={results_file_name}", "-o", f"junit_family={pytest.options.junit_family}")
)
output_files.append(results_file_name)

coverage_args = []
if test_subsystem.use_coverage and not request.is_debug:
output_files.append(".coverage")
if test_subsystem.use_coverage:
cov_paths = coverage_subsystem.filter if coverage_subsystem.filter else (".",)
coverage_args = [
"--cov-report=", # Turn off output.
*itertools.chain.from_iterable(["--cov", cov_path] for cov_path in cov_paths),
]
return TestTargetSetup(
test_runner_pex=test_runner_pex,
args=(*pytest.options.args, *coverage_args, *field_set_source_files.files),
input_digest=input_digest,
source_roots=prepared_sources.source_roots,
timeout_seconds=field_set.timeout.calculate_from_global_options(pytest),
xml_dir=pytest.options.junit_xml_dir,
junit_family=pytest.options.junit_family,
execution_slot_variable=pytest.options.execution_slot_var,
)

extra_env = {
"PYTEST_ADDOPTS": " ".join(add_opts),
"PEX_EXTRA_SYS_PATH": ":".join(prepared_sources.source_roots),
}

if test_subsystem.force and not request.is_debug:
# TODO(#10618): Once this is fixed, move `TestTargetSetup` into an `await Get` so that we only set
# up the test if it isn't skipped.
@rule(desc="Run Pytest", level=LogLevel.DEBUG)
async def run_python_test(
field_set: PythonTestFieldSet,
setup: TestTargetSetup,
global_options: GlobalOptions,
test_subsystem: TestSubsystem,
) -> TestResult:
if field_set.is_conftest():
return TestResult.skip(field_set.address)

add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"]

output_files = []
# Configure generation of JUnit-compatible test report.
test_results_file = None
if setup.xml_dir:
test_results_file = f"{field_set.address.path_safe_spec}.xml"
add_opts.extend(
(f"--junitxml={test_results_file}", "-o", f"junit_family={setup.junit_family}")
)
output_files.append(test_results_file)

# Configure generation of a coverage report.
if test_subsystem.use_coverage:
output_files.append(".coverage")

env = {"PYTEST_ADDOPTS": " ".join(add_opts), "PEX_EXTRA_SYS_PATH": ":".join(setup.source_roots)}

if test_subsystem.force:
# This is a slightly hacky way to force the process to run: since the env var
# value is unique, this input combination will never have been seen before,
# and therefore never cached. The two downsides are:
Expand All @@ -220,34 +241,22 @@ async def setup_pytest_for_target(
# 2. This run will be cached even though it can never be re-used.
# TODO: A more principled way of forcing rules to run?
uuid = await Get(UUID, UUIDRequest())
extra_env["__PANTS_FORCE_TEST_RUN__"] = str(uuid)
env["__PANTS_FORCE_TEST_RUN__"] = str(uuid)

process = await Get(
Process,
result = await Get(
FallibleProcessResult,
PexProcess(
test_runner_pex,
argv=(*pytest.options.args, *coverage_args, *field_set_source_files.files),
extra_env=extra_env,
input_digest=input_digest,
output_files=output_files,
timeout_seconds=request.field_set.timeout.calculate_from_global_options(pytest),
execution_slot_variable=pytest.options.execution_slot_var,
description=f"Run Pytest for {request.field_set.address}",
setup.test_runner_pex,
argv=setup.args,
input_digest=setup.input_digest,
output_files=tuple(output_files) if output_files else None,
description=f"Run Pytest for {field_set.address}",
timeout_seconds=setup.timeout_seconds,
extra_env=env,
execution_slot_variable=setup.execution_slot_variable,
level=LogLevel.DEBUG,
),
)
return TestSetup(process, results_file_name=results_file_name)


@rule(desc="Run Pytest", level=LogLevel.DEBUG)
async def run_python_test(
field_set: PythonTestFieldSet, test_subsystem: TestSubsystem, pytest: PyTest
) -> TestResult:
if field_set.is_conftest():
return TestResult.skip(field_set.address)

setup = await Get(TestSetup, TestSetupRequest(field_set, is_debug=False))
result = await Get(FallibleProcessResult, Process, setup.process)

coverage_data = None
if test_subsystem.use_coverage:
Expand All @@ -260,31 +269,34 @@ async def run_python_test(
logger.warning(f"Failed to generate coverage data for {field_set.address}.")

xml_results_digest = None
if setup.results_file_name:
if test_results_file:
xml_results_snapshot = await Get(
Snapshot, DigestSubset(result.output_digest, PathGlobs([setup.results_file_name]))
Snapshot, DigestSubset(result.output_digest, PathGlobs([test_results_file]))
)
if xml_results_snapshot.files == (setup.results_file_name,):
if xml_results_snapshot.files == (test_results_file,):
xml_results_digest = await Get(
Digest, AddPrefix(xml_results_snapshot.digest, pytest.options.junit_xml_dir),
Digest,
AddPrefix(xml_results_snapshot.digest, setup.xml_dir), # type: ignore[arg-type]
)
else:
logger.warning(f"Failed to generate JUnit XML data for {field_set.address}.")

return TestResult.from_fallible_process_result(
result,
address=field_set.address,
coverage_data=coverage_data,
xml_results=xml_results_digest,
address=field_set.address,
)


@rule(desc="Set up Pytest to run interactively", level=LogLevel.DEBUG)
async def debug_python_test(field_set: PythonTestFieldSet) -> TestDebugRequest:
def debug_python_test(field_set: PythonTestFieldSet, setup: TestTargetSetup) -> TestDebugRequest:
if field_set.is_conftest():
return TestDebugRequest(None)
setup = await Get(TestSetup, TestSetupRequest(field_set, is_debug=True))
return TestDebugRequest(InteractiveProcess.from_process(setup.process))
process = InteractiveProcess(
argv=(setup.test_runner_pex.name, *setup.args), input_digest=setup.input_digest,
)
return TestDebugRequest(process)


def rules():
Expand Down
35 changes: 16 additions & 19 deletions src/python/pants/backend/python/rules/repl.py
Expand Up @@ -4,7 +4,6 @@
from typing import Tuple

from pants.backend.python.rules.pex import Pex, PexRequest, PexRequirements
from pants.backend.python.rules.pex_environment import PexEnvironment
from pants.backend.python.rules.pex_from_targets import PexFromTargetsRequest
from pants.backend.python.rules.python_sources import PythonSourceFiles, PythonSourceFilesRequest
from pants.backend.python.subsystems.ipython import IPython
Expand All @@ -21,7 +20,7 @@ class PythonRepl(ReplImplementation):


@rule(level=LogLevel.DEBUG)
async def create_python_repl_request(repl: PythonRepl, pex_env: PexEnvironment) -> ReplRequest:
async def create_python_repl_request(repl: PythonRepl) -> ReplRequest:
requirements_request = Get(
Pex,
PexFromTargetsRequest,
Expand All @@ -36,21 +35,20 @@ async def create_python_repl_request(repl: PythonRepl, pex_env: PexEnvironment)
merged_digest = await Get(
Digest, MergeDigests((requirements_pex.digest, sources.source_files.snapshot.digest))
)

chrooted_source_roots = [repl.in_chroot(sr) for sr in sources.source_roots]
env = {**pex_env.environment_dict, "PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots)}

return ReplRequest(digest=merged_digest, args=(repl.in_chroot(requirements_pex.name),), env=env)
return ReplRequest(
digest=merged_digest,
args=(repl.in_chroot(requirements_pex.name),),
env={"PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots)},
)


class IPythonRepl(ReplImplementation):
name = "ipython"


@rule(level=LogLevel.DEBUG)
async def create_ipython_repl_request(
repl: IPythonRepl, ipython: IPython, pex_env: PexEnvironment
) -> ReplRequest:
async def create_ipython_repl_request(repl: IPythonRepl, ipython: IPython) -> ReplRequest:
# Note that we get an intermediate PexRequest here (instead of going straight to a Pex)
# so that we can get the interpreter constraints for use in ipython_request.
requirements_pex_request = await Get(
Expand Down Expand Up @@ -87,19 +85,18 @@ async def create_ipython_repl_request(
(requirements_pex.digest, sources.source_files.snapshot.digest, ipython_pex.digest)
),
)

chrooted_source_roots = [repl.in_chroot(sr) for sr in sources.source_roots]
args: Tuple[str, ...] = (repl.in_chroot(ipython_pex.name),)
if ipython.options.ignore_cwd:
args = args + ("--ignore-cwd",)

chrooted_source_roots = [repl.in_chroot(sr) for sr in sources.source_roots]
env = {
**pex_env.environment_dict,
"PEX_PATH": repl.in_chroot(requirements_pex_request.output_filename),
"PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots),
}

return ReplRequest(digest=merged_digest, args=args, env=env)
return ReplRequest(
digest=merged_digest,
args=args,
env={
"PEX_PATH": repl.in_chroot(requirements_pex_request.output_filename),
"PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots),
},
)


def rules():
Expand Down
21 changes: 9 additions & 12 deletions src/python/pants/backend/python/rules/run_python_binary.py
Expand Up @@ -5,7 +5,6 @@

from pants.backend.python.rules.create_python_binary import PythonBinaryFieldSet
from pants.backend.python.rules.pex import Pex, PexRequest
from pants.backend.python.rules.pex_environment import PexEnvironment
from pants.backend.python.rules.pex_from_targets import PexFromTargetsRequest
from pants.backend.python.rules.python_sources import PythonSourceFiles, PythonSourceFilesRequest
from pants.backend.python.target_types import PythonBinaryDefaults, PythonBinarySources
Expand All @@ -27,9 +26,7 @@

@rule(level=LogLevel.DEBUG)
async def create_python_binary_run_request(
field_set: PythonBinaryFieldSet,
python_binary_defaults: PythonBinaryDefaults,
pex_env: PexEnvironment,
field_set: PythonBinaryFieldSet, python_binary_defaults: PythonBinaryDefaults
) -> RunRequest:
entry_point = field_set.entry_point.value
if entry_point is None:
Expand Down Expand Up @@ -90,16 +87,16 @@ def in_chroot(relpath: str) -> str:
return os.path.join("{chroot}", relpath)

chrooted_source_roots = [in_chroot(sr) for sr in sources.source_roots]
env = {
**pex_env.environment_dict,
"PEX_PATH": in_chroot(requirements_pex_request.output_filename),
"PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots),
}

pex_path = in_chroot(requirements_pex_request.output_filename)
return RunRequest(
digest=merged_digest, args=(in_chroot(runner_pex.name), "-m", entry_point), env=env
digest=merged_digest,
args=(in_chroot(runner_pex.name), "-m", entry_point),
env={"PEX_PATH": pex_path, "PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots)},
)


def rules():
return [*collect_rules(), UnionRule(RunFieldSet, PythonBinaryFieldSet)]
return [
*collect_rules(),
UnionRule(RunFieldSet, PythonBinaryFieldSet),
]
8 changes: 2 additions & 6 deletions src/python/pants/core/goals/test.py
Expand Up @@ -13,10 +13,10 @@
FieldSetsWithSources,
FieldSetsWithSourcesRequest,
)
from pants.engine import desktop
from pants.engine.addresses import Address
from pants.engine.collection import Collection
from pants.engine.console import Console
from pants.engine.desktop import OpenFiles, OpenFilesRequest
from pants.engine.engine_aware import EngineAwareParameter, EngineAwareReturnType
from pants.engine.fs import Digest, MergeDigests, Workspace
from pants.engine.goal import Goal, GoalSubsystem
Expand Down Expand Up @@ -436,11 +436,7 @@ async def run_tests(
coverage_report_files.extend(report_files)

if coverage_report_files and test_subsystem.open_coverage:
open_files = await Get(
OpenFiles, OpenFilesRequest(coverage_report_files, error_if_open_not_found=False)
)
for process in open_files.processes:
interactive_runner.run(process)
desktop.ui_open(console, interactive_runner, coverage_report_files)

return Test(exit_code)

Expand Down
6 changes: 0 additions & 6 deletions src/python/pants/core/goals/test_test.py
Expand Up @@ -26,7 +26,6 @@
FieldSetsWithSourcesRequest,
)
from pants.engine.addresses import Address
from pants.engine.desktop import OpenFiles, OpenFilesRequest
from pants.engine.fs import EMPTY_DIGEST, CreateDigest, Digest, FileContent, MergeDigests, Workspace
from pants.engine.process import InteractiveProcess, InteractiveRunner
from pants.engine.target import (
Expand Down Expand Up @@ -182,11 +181,6 @@ def mock_coverage_report_generation(
subject_type=CoverageDataCollection,
mock=mock_coverage_report_generation,
),
MockGet(
product_type=OpenFiles,
subject_type=OpenFilesRequest,
mock=lambda _: OpenFiles(()),
),
],
union_membership=union_membership,
)
Expand Down