Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 5 additions & 17 deletions torch/_inductor/runtime/triton_heuristics.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,14 +185,6 @@ def autotune_hints_to_configs(
return configs


def disable_pointwise_autotuning(inductor_meta):
# Autotuning can give different benchmarking results from run to run, and
# therefore we disable autotuning when use_deterministic flag is on.
if inductor_meta.get("are_deterministic_algorithms_enabled"):
return True
return not inductor_meta.get("autotune_pointwise", True)


def _dump_launch_params(args, kwargs, launcher, kernel_name, grid):
call_args = []
call_kwargs = {}
Expand Down Expand Up @@ -2583,7 +2575,7 @@ def pointwise(

configs = None
if len(size_hints) == 1:
if disable_pointwise_autotuning(inductor_meta) and not (
if not inductor_meta.get("autotune_pointwise", True) and not (
inductor_meta.get("max_autotune")
or inductor_meta.get("max_autotune_pointwise")
):
Expand All @@ -2598,7 +2590,8 @@ def pointwise(
]
if len(size_hints) == 2:
if (
disable_pointwise_autotuning(inductor_meta) or tile_hint == TileHint.SQUARE
not inductor_meta.get("autotune_pointwise", True)
or tile_hint == TileHint.SQUARE
) and not (
inductor_meta.get("max_autotune")
or inductor_meta.get("max_autotune_pointwise")
Expand All @@ -2615,7 +2608,7 @@ def pointwise(
*hinted_configs,
]
if len(size_hints) == 3:
if disable_pointwise_autotuning(inductor_meta):
if not inductor_meta.get("autotune_pointwise", True):
configs = [triton_config_with_settings(size_hints, 16, 16, 16)]
else:
configs = [
Expand Down Expand Up @@ -2796,8 +2789,6 @@ def outer_config_opt():
return configs + [outer_config]
elif reduction_hint == ReductionHint.OUTER_TINY:
return configs + [tiny_config]
if disable_pointwise_autotuning(inductor_meta):
return configs + [make_config(32, 128)]

return configs + [
contiguous_config,
Expand Down Expand Up @@ -2908,7 +2899,7 @@ def _do_filter_due_to_inductor_config():
return (
inductor_meta.get("deterministic", False)
or torch._inductor.config.test_configs.force_filter_reduction_configs
)
) or inductor_meta.get("are_deterministic_algorithms_enabled")

if not _do_filter_due_to_inductor_config() or len(configs) == 1:
# no filtering happening if NOT in deterministic mode
Expand Down Expand Up @@ -3161,9 +3152,6 @@ def _persistent_reduction_configs(
if prefix_is_reduction(prefix):
c.kwargs.pop(f"{prefix.upper()}BLOCK")

if disable_pointwise_autotuning(inductor_meta):
configs = configs[:1]

return configs


Expand Down
Loading