New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[AutoTVM] Re-enable ref_input
#8113
Changes from 6 commits
55268b4
5ac6c2c
8d0ecfa
6ca79a1
bb859f8
4afe826
ea54b30
8635fcb
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -32,6 +32,7 @@ | |
import typing | ||
from collections import namedtuple | ||
from random import getrandbits | ||
import warnings | ||
|
||
import tvm._ffi | ||
import tvm.ir.transform | ||
|
@@ -235,13 +236,30 @@ def __init__( | |
self.number = number | ||
self.repeat = repeat | ||
self.min_repeat_ms = min_repeat_ms | ||
self._ref_input = None | ||
|
||
self.enable_cpu_cache_flush = enable_cpu_cache_flush | ||
self.cooldown_interval = cooldown_interval | ||
self.module_loader = module_loader | ||
|
||
self.executor = LocalExecutor(timeout=timeout * (self.n_parallel + 1)) | ||
|
||
@property | ||
def ref_input(self): | ||
"""Fixed input for tuning special operators.""" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. could you qualify "special" (explain it is for operators that cannot handle random input) |
||
return self._ref_input | ||
|
||
@ref_input.setter | ||
def ref_input(self, val): | ||
warnings.warn( | ||
"You are specifying fixed input for tuning the operator. " | ||
"Be sure your input always fits the operator. Some " | ||
"operators may conduct layout transformation during tuning, " | ||
"thus can lead to unexpected behaviors. ", | ||
RuntimeWarning, | ||
) | ||
self._ref_input = val | ||
|
||
def set_task(self, task): | ||
self.task = task | ||
|
||
|
@@ -308,6 +326,7 @@ def run(self, measure_inputs, build_results): | |
self.min_repeat_ms, | ||
self.cooldown_interval, | ||
remote_kwargs, | ||
self.ref_input, | ||
self.enable_cpu_cache_flush, | ||
module_loader, | ||
) | ||
|
@@ -508,6 +527,7 @@ def run_through_rpc( | |
min_repeat_ms, | ||
cooldown_interval, | ||
remote_kwargs, | ||
ref_input, | ||
enable_cpu_cache_flush=False, | ||
module_loader=None, | ||
): | ||
|
@@ -539,6 +559,8 @@ def run_through_rpc( | |
The cool down interval between two measurements | ||
remote_kwargs: dict | ||
Passed to module_loader(). Ultimately, keyword args to request_remote(). | ||
ref_input: List of np.ndarray | ||
The reference input used for tuning. Empty for randomly filled input. | ||
enable_cpu_cache_flush: bool | ||
Whether to flush cache on CPU between repeated measurements. | ||
Flushing cache can make the measured latency of one operator closer to | ||
|
@@ -573,18 +595,22 @@ def run_through_rpc( | |
f_preproc=f_prepare, | ||
) | ||
|
||
try: | ||
random_fill = remote.get_function("tvm.contrib.random.random_fill") | ||
except AttributeError: | ||
raise AttributeError( | ||
"Please make sure USE_RANDOM is ON in the config.cmake " "on the remote devices" | ||
) | ||
args = [nd.empty(x[0], x[1], dev) for x in build_result.arg_info] | ||
if "scatter" not in measure_input.task.name: | ||
# the index tensor of scatter op cannot be randomly initialized | ||
for arg in args: | ||
random_fill(arg) | ||
dev.sync() | ||
if ref_input: | ||
args = [nd.array(x, device=dev) for x in ref_input] | ||
else: | ||
try: | ||
random_fill = remote.get_function("tvm.contrib.random.random_fill") | ||
except AttributeError: | ||
raise AttributeError( | ||
"Please make sure USE_RANDOM is ON in the config.cmake " | ||
"on the remote devices" | ||
) | ||
args = [nd.empty(x[0], x[1], dev) for x in build_result.arg_info] | ||
if "scatter" not in measure_input.task.name: | ||
# the index tensor of scatter op cannot be randomly initialized | ||
for arg in args: | ||
random_fill(arg) | ||
dev.sync() | ||
areusch marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
costs = time_f(*args).results | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -26,6 +26,8 @@ | |
from test_autotvm_common import DummyRunner, bad_matmul, get_sample_task | ||
from tvm import autotvm | ||
from tvm.autotvm.measure.measure import MeasureErrorNo, MeasureResult | ||
from tvm.autotvm import measure | ||
from inspect import Signature | ||
|
||
|
||
def test_task_tuner_without_measurement(): | ||
|
@@ -60,8 +62,24 @@ def test_task_tuner_without_measurement_spawn(): | |
p.join() | ||
|
||
|
||
def test_task_runner_with_ref_input(): | ||
"""test runner ref_input without measurement""" | ||
refinp = [np.random.rand(128, 128) for i in range(3)] | ||
measure_option = autotvm.measure_option(builder="local", runner="local", ref_input=refinp) | ||
|
||
class DummyExecutor(measure.executor.Executor): | ||
def submit(self, func, *args, **kwargs): | ||
sig = Signature.from_callable(measure.measure_methods.run_through_rpc) | ||
assert sig.bind(*args, **kwargs).arguments["ref_input"] == refinp | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can you set a variable here in the outer |
||
return measure.local_executor.LocalFutureNoFork(None) | ||
|
||
measure_option["runner"].executor = DummyExecutor() | ||
measure_option["runner"].run([None], [None]) | ||
|
||
|
||
if __name__ == "__main__": | ||
logging.basicConfig(level=logging.INFO) | ||
|
||
test_task_tuner_without_measurement() | ||
test_task_tuner_without_measurement_spawn() | ||
test_task_runner_with_ref_input() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
apologies but on looking at this change in code now, i think it may be a bit too fine-grained to add directly to measure_option. it seems like we should either encapsulate the per-tuning-run options in an e.g.
operator_opts
kwarg to measure_option or just keep them in RPCRunner for now. it seems like if we continue with this pattern, we'd want to do anoperator_opts
reorganization at some future point anyhow, so i'd rather not start down that path for one option right now. i suggest we stick with the RPCRunner property for now then, and improve the interface as we add more options. what do you think?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes, finding it strange when
measure_option
is shared for multiple tasks. I'm fine with reverting themeasure_option
change. the runner property will be enough for now. maybe someday we can seperate general opts and operator-spec opts for the tuner.