diff --git a/apps/extension/python/tvm_ext/__init__.py b/apps/extension/python/tvm_ext/__init__.py
index 0315a8f11b39..be1b42328c1b 100644
--- a/apps/extension/python/tvm_ext/__init__.py
+++ b/apps/extension/python/tvm_ext/__init__.py
@@ -44,7 +44,7 @@ def load_lib():
@tvm.register_object("tvm_ext.IntVector")
class IntVec(tvm.Object):
- """Example for using extension class in c++ """
+ """Example for using extension class in c++"""
@property
def _tvm_handle(self):
diff --git a/python/tvm/_ffi/_ctypes/packed_func.py b/python/tvm/_ffi/_ctypes/packed_func.py
index 6cfa3e5c286a..bf763a194311 100644
--- a/python/tvm/_ffi/_ctypes/packed_func.py
+++ b/python/tvm/_ffi/_ctypes/packed_func.py
@@ -73,7 +73,7 @@ def convert_to_tvm_func(pyfunc):
local_pyfunc = pyfunc
def cfun(args, type_codes, num_args, ret, _):
- """ ctypes function """
+ """ctypes function"""
num_args = num_args.value if isinstance(num_args, ctypes.c_int) else num_args
pyargs = (C_TO_PY_ARG_SWITCH[type_codes[i]](args[i]) for i in range(num_args))
# pylint: disable=broad-except
diff --git a/python/tvm/arith/analyzer.py b/python/tvm/arith/analyzer.py
index c3b32b5960eb..5c532c692b1d 100644
--- a/python/tvm/arith/analyzer.py
+++ b/python/tvm/arith/analyzer.py
@@ -22,7 +22,7 @@
@tvm._ffi.register_object("arith.ModularSet")
class ModularSet(Object):
- """Represent range of (coeff * x + base) for x in Z """
+ """Represent range of (coeff * x + base) for x in Z"""
def __init__(self, coeff, base):
self.__init_handle_by_constructor__(_ffi_api.ModularSet, coeff, base)
diff --git a/python/tvm/auto_scheduler/loop_state.py b/python/tvm/auto_scheduler/loop_state.py
index 7cfe6ccbc2c0..03cc00def6b7 100644
--- a/python/tvm/auto_scheduler/loop_state.py
+++ b/python/tvm/auto_scheduler/loop_state.py
@@ -48,12 +48,12 @@
@tvm._ffi.register_object("auto_scheduler.Iterator")
class Iterator(Object):
- """ A loop iterator structure. """
+ """A loop iterator structure."""
@tvm._ffi.register_object("auto_scheduler.Stage")
class Stage(Object):
- """ A stage in the compute declaration. Similar to tvm.te.schedule.Stage. """
+ """A stage in the compute declaration. Similar to tvm.te.schedule.Stage."""
# Static trans table for compute_at location
# This is used to transform the compute_at location to C++ enum
@@ -62,7 +62,7 @@ class Stage(Object):
@tvm._ffi.register_object("auto_scheduler.State")
class StateObject(Object):
- """ The internal State object """
+ """The internal State object"""
def __eq__(self, other):
return _ffi_api.StateEqual(self, other)
@@ -579,7 +579,7 @@ def rfactor(self, stage, iterator, factor_iter_id):
return self.stages[int(new_stage_id)].op
def copy(self):
- """ Do deep copy of this State. """
+ """Do deep copy of this State."""
state = State(self.state_object, self.compute_dag)
state.stage_id_map = self.stage_id_map.copy()
return state
diff --git a/python/tvm/auto_scheduler/measure.py b/python/tvm/auto_scheduler/measure.py
index ea4a129727c3..8d762602bfd1 100644
--- a/python/tvm/auto_scheduler/measure.py
+++ b/python/tvm/auto_scheduler/measure.py
@@ -84,7 +84,7 @@ class BuildFunc:
@tvm._ffi.register_object("auto_scheduler.MeasureCallback")
class MeasureCallback(Object):
- """ The base class of measurement callback functions. """
+ """The base class of measurement callback functions."""
@tvm._ffi.register_object("auto_scheduler.PythonBasedMeasureCallback")
@@ -244,7 +244,7 @@ def recover_measure_input(inp, rebuild_state=False):
@tvm._ffi.register_object("auto_scheduler.ProgramBuilder")
class ProgramBuilder(Object):
- """ The base class of ProgramBuilders. """
+ """The base class of ProgramBuilders."""
def build(self, measure_inputs, verbose=1):
"""Build programs and return results.
@@ -265,7 +265,7 @@ def build(self, measure_inputs, verbose=1):
@tvm._ffi.register_object("auto_scheduler.ProgramRunner")
class ProgramRunner(Object):
- """ The base class of ProgramRunners. """
+ """The base class of ProgramRunners."""
def run(self, measure_inputs, build_results, verbose=1):
"""Run measurement and return results.
@@ -585,7 +585,7 @@ def __del__(self):
class MeasureErrorNo(object):
- """ Error type for MeasureResult. """
+ """Error type for MeasureResult."""
NO_ERROR = 0 # No error
INSTANTIATION_ERROR = 1 # Errors happen when apply transform steps from init state
diff --git a/python/tvm/auto_scheduler/search_policy.py b/python/tvm/auto_scheduler/search_policy.py
index f0388a886c5f..a88c1305b560 100644
--- a/python/tvm/auto_scheduler/search_policy.py
+++ b/python/tvm/auto_scheduler/search_policy.py
@@ -96,7 +96,7 @@ def __init__(self, meet_condition_func, apply_func, rule_name="CustomSketchRule"
@tvm._ffi.register_object("auto_scheduler.SearchPolicy")
class SearchPolicy(Object):
- """ The base class of search policies. """
+ """The base class of search policies."""
def continue_search_one_round(self, num_measure, measurer):
"""
diff --git a/python/tvm/auto_scheduler/task_scheduler.py b/python/tvm/auto_scheduler/task_scheduler.py
index 5cae556e2747..dd5073331083 100644
--- a/python/tvm/auto_scheduler/task_scheduler.py
+++ b/python/tvm/auto_scheduler/task_scheduler.py
@@ -540,7 +540,7 @@ def _restore_status(self, log_file, num_measures_per_round):
class TaskSchedulerCallback:
- """The base class of task scheduler callback functions. """
+ """The base class of task scheduler callback functions."""
def pre_tune(self, task_scheduler, task_id):
"""The callback before tuning each task.
diff --git a/python/tvm/auto_scheduler/utils.py b/python/tvm/auto_scheduler/utils.py
index 14dc5b8984c3..1c03491c5614 100644
--- a/python/tvm/auto_scheduler/utils.py
+++ b/python/tvm/auto_scheduler/utils.py
@@ -190,7 +190,7 @@ def get_const_tuple(in_tuple):
def list_to_tuple(x):
- """ Convert a list to a tuple recursively. """
+ """Convert a list to a tuple recursively."""
assert isinstance(x, list)
return tuple(list_to_tuple(y) if isinstance(y, list) else y for y in x)
@@ -250,7 +250,7 @@ def kill_child_processes(parent_pid, sig=signal.SIGTERM):
def make_traceback_info():
- """ Get the error message from traceback. """
+ """Get the error message from traceback."""
info = str(traceback.format_exc())
if len(info) > MAX_TRACEBACK_INFO_LEN:
info = (
diff --git a/python/tvm/contrib/cc.py b/python/tvm/contrib/cc.py
index f48ae395fbcd..64cbbd28604c 100644
--- a/python/tvm/contrib/cc.py
+++ b/python/tvm/contrib/cc.py
@@ -92,7 +92,7 @@ def get_target_by_dump_machine(compiler):
"""
def get_target_triple():
- """ Get target triple according to dumpmachine option of compiler."""
+ """Get target triple according to dumpmachine option of compiler."""
if compiler:
cmd = [compiler, "-dumpmachine"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
diff --git a/python/tvm/contrib/peak.py b/python/tvm/contrib/peak.py
index 195f3dc9d81e..4133aa31a50b 100644
--- a/python/tvm/contrib/peak.py
+++ b/python/tvm/contrib/peak.py
@@ -26,7 +26,7 @@
def _convert_to_remote(func, remote):
- """ convert module function to remote rpc function"""
+ """convert module function to remote rpc function"""
temp = utils.tempdir()
path_dso = temp.relpath("tmp_func.tar")
func.export_library(path_dso)
diff --git a/python/tvm/contrib/tedd.py b/python/tvm/contrib/tedd.py
index 10598e26824e..a65f5e474a3d 100644
--- a/python/tvm/contrib/tedd.py
+++ b/python/tvm/contrib/tedd.py
@@ -147,7 +147,7 @@ def get_itervar_label_color(itervar, iv_type):
def linebrk(s, n):
- """ Break input string s with
for every n charactors."""
+ """Break input string s with
for every n charactors."""
result = ""
j = 0
for i, c in enumerate(s):
diff --git a/python/tvm/driver/tvmc/autotuner.py b/python/tvm/driver/tvmc/autotuner.py
index b92ab86ef621..e5e59b2bbde2 100644
--- a/python/tvm/driver/tvmc/autotuner.py
+++ b/python/tvm/driver/tvmc/autotuner.py
@@ -46,7 +46,7 @@
@register_parser
def add_tune_parser(subparsers):
- """ Include parser for 'tune' subcommand """
+ """Include parser for 'tune' subcommand"""
parser = subparsers.add_parser("tune", help="auto-tune a model")
parser.set_defaults(func=drive_tune)
diff --git a/python/tvm/driver/tvmc/frontends.py b/python/tvm/driver/tvmc/frontends.py
index 89ca1b8fc329..ceee5ccd7266 100644
--- a/python/tvm/driver/tvmc/frontends.py
+++ b/python/tvm/driver/tvmc/frontends.py
@@ -77,7 +77,7 @@ def load(self, path, shape_dict=None, **kwargs):
def import_keras():
- """ Lazy import function for Keras"""
+ """Lazy import function for Keras"""
# Keras writes the message "Using TensorFlow backend." to stderr
# Redirect stderr during the import to disable this
stderr = sys.stderr
@@ -93,7 +93,7 @@ def import_keras():
class KerasFrontend(Frontend):
- """ Keras frontend for TVMC """
+ """Keras frontend for TVMC"""
@staticmethod
def name():
@@ -151,7 +151,7 @@ def sequential_to_functional(self, model):
class OnnxFrontend(Frontend):
- """ ONNX frontend for TVMC """
+ """ONNX frontend for TVMC"""
@staticmethod
def name():
@@ -172,7 +172,7 @@ def load(self, path, shape_dict=None, **kwargs):
class TensorflowFrontend(Frontend):
- """ TensorFlow frontend for TVMC """
+ """TensorFlow frontend for TVMC"""
@staticmethod
def name():
@@ -199,7 +199,7 @@ def load(self, path, shape_dict=None, **kwargs):
class TFLiteFrontend(Frontend):
- """ TFLite frontend for TVMC """
+ """TFLite frontend for TVMC"""
@staticmethod
def name():
@@ -237,7 +237,7 @@ def load(self, path, shape_dict=None, **kwargs):
class PyTorchFrontend(Frontend):
- """ PyTorch frontend for TVMC """
+ """PyTorch frontend for TVMC"""
@staticmethod
def name():
diff --git a/python/tvm/driver/tvmc/main.py b/python/tvm/driver/tvmc/main.py
index 1d360d98206e..2574daab02ac 100644
--- a/python/tvm/driver/tvmc/main.py
+++ b/python/tvm/driver/tvmc/main.py
@@ -53,7 +53,7 @@ def _example_parser(main_subparser):
def _main(argv):
- """ TVM command line interface. """
+ """TVM command line interface."""
parser = argparse.ArgumentParser(
prog="tvmc",
diff --git a/python/tvm/relay/analysis/feature.py b/python/tvm/relay/analysis/feature.py
index 99e2cdc785e6..0e264a0eef7d 100644
--- a/python/tvm/relay/analysis/feature.py
+++ b/python/tvm/relay/analysis/feature.py
@@ -20,7 +20,7 @@
class Feature(IntEnum):
- """ The features a program might contain. """
+ """The features a program might contain."""
fVar = 0
fGlobalVar = 1
diff --git a/python/tvm/relay/backend/executor_factory.py b/python/tvm/relay/backend/executor_factory.py
index 4ed76f4b6366..701ca06a87e0 100644
--- a/python/tvm/relay/backend/executor_factory.py
+++ b/python/tvm/relay/backend/executor_factory.py
@@ -31,7 +31,7 @@ class ExecutorFactoryModule:
@abstractmethod
def get_executor_config(self):
- """ Return the internal configuration the executor uses to execute the network """
+ """Return the internal configuration the executor uses to execute the network"""
raise NotImplementedError
@abstractmethod
@@ -41,7 +41,7 @@ def get_params(self):
@abstractmethod
def get_lib(self):
- """ Return the generated library"""
+ """Return the generated library"""
raise NotImplementedError
def __getitem__(self, item):
diff --git a/python/tvm/relay/frontend/caffe.py b/python/tvm/relay/frontend/caffe.py
index caf4f1a14741..d48e5634d986 100644
--- a/python/tvm/relay/frontend/caffe.py
+++ b/python/tvm/relay/frontend/caffe.py
@@ -33,7 +33,7 @@
class OperatorConverter(object):
- """ Operator Converted for converting Caffe ops to Relay ops """
+ """Operator Converted for converting Caffe ops to Relay ops"""
def __init__(self, init_layer_dict, predict_layer, exp_tab):
self.init_layer_dict = init_layer_dict
@@ -66,7 +66,7 @@ def __init__(self, init_layer_dict, predict_layer, exp_tab):
}
def convert_flatten(self, op):
- """ Convert Flatten layer """
+ """Convert Flatten layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
@@ -77,7 +77,7 @@ def convert_flatten(self, op):
return out
def convert_eltwise(self, op):
- """ Convert Eltwise layer """
+ """Convert Eltwise layer"""
inputs = op.bottom
assert len(inputs) == 2, "input tensors length should be 2"
@@ -115,7 +115,7 @@ def convert_eltwise(self, op):
return out
def _parse_conv_params(self, op):
- """ Parse the parameters of Convolution and Deconvolution layer """
+ """Parse the parameters of Convolution and Deconvolution layer"""
nonzone = lambda val, pos, dflt: val[pos] if pos < len(val) else dflt
conv_params = op.convolution_param
@@ -160,7 +160,7 @@ def _parse_conv_params(self, op):
return params
def convert_batch_norm(self, op):
- """ Convert BatchNorm layer """
+ """Convert BatchNorm layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
n, c, h, w = _infer_shape(in_expr)
@@ -215,7 +215,7 @@ def convert_batch_norm(self, op):
return out[0]
def convert_scale(self, op):
- """ Convert Scale layer """
+ """Convert Scale layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
weight_bias_blobs = self.init_layer_dict[op.name].blobs
@@ -243,7 +243,7 @@ def convert_scale(self, op):
return out
def convert_concat(self, op):
- """ Convert Concat layer """
+ """Convert Concat layer"""
inputs = op.bottom
in_expr = (self.exp_tab.get_expr(inputs[i]) for i in range(len(inputs)))
@@ -254,7 +254,7 @@ def convert_concat(self, op):
return out
def convert_reshape(self, op):
- """ Convert Reshape layer """
+ """Convert Reshape layer"""
inputs = op.bottom
input_name = inputs[0]
@@ -294,7 +294,7 @@ def convert_reshape(self, op):
return out
def convert_softmax(self, op):
- """ Convert Softmax layer """
+ """Convert Softmax layer"""
inputs = op.bottom
assert len(inputs) == 1, "input tensors length should be 1"
@@ -309,7 +309,7 @@ def convert_softmax(self, op):
return out
def convert_conv(self, op):
- """ Convert Convolution layer """
+ """Convert Convolution layer"""
params = self._parse_conv_params(op)
weight_bias_blobs = self.init_layer_dict[op.name].blobs
conv_params = op.convolution_param
@@ -339,7 +339,7 @@ def convert_conv(self, op):
return out
def convert_pooling(self, op):
- """ Convert Pooling layer """
+ """Convert Pooling layer"""
inputs = op.bottom
input_name = inputs[0]
@@ -400,7 +400,7 @@ def convert_pooling(self, op):
return out
def convert_lrn(self, op):
- """ Convert LRN layer """
+ """Convert LRN layer"""
inputs = op.bottom
input_name = inputs[0]
@@ -416,7 +416,7 @@ def convert_lrn(self, op):
return out
def convert_innerproduct(self, op):
- """ Convert InnerProduct layer """
+ """Convert InnerProduct layer"""
inputs = op.bottom
weight_bias_blobs = self.init_layer_dict[op.name].blobs
dense_params = op.inner_product_param
@@ -457,7 +457,7 @@ def convert_innerproduct(self, op):
return out
def convert_dropout(self, op):
- """ Convert Dropout layer """
+ """Convert Dropout layer"""
inputs = op.bottom
input_name = inputs[0]
@@ -471,7 +471,7 @@ def convert_dropout(self, op):
return out
def convert_relu(self, op):
- """ Convert ReLU layer """
+ """Convert ReLU layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
negative_slope = op.relu_param.negative_slope
@@ -483,7 +483,7 @@ def convert_relu(self, op):
return out
def convert_prelu(self, op):
- """ Convert PReLU layer """
+ """Convert PReLU layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
@@ -495,7 +495,7 @@ def convert_prelu(self, op):
return out
def convert_deconv(self, op):
- """ Convert Deconvolution layer """
+ """Convert Deconvolution layer"""
params = self._parse_conv_params(op)
weight_bias_blobs = self.init_layer_dict[op.name].blobs
conv_params = op.convolution_param
@@ -527,7 +527,7 @@ def convert_deconv(self, op):
return out
def convert_slice(self, op):
- """ Convert Slice layer """
+ """Convert Slice layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
@@ -545,21 +545,21 @@ def convert_slice(self, op):
return out
def convert_sigmoid(self, op):
- """ Convert Sigmoid layer """
+ """Convert Sigmoid layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.sigmoid(in_expr)
return out
def convert_tanh(self, op):
- """ Convert TanH layer """
+ """Convert TanH layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.tanh(in_expr)
return out
def convert_crop(self, op):
- """ Convert Crop layer """
+ """Convert Crop layer"""
inputs = op.bottom
assert len(inputs) == 2, "Need two inputs of Crop layer"
in_expr_a = self.exp_tab.get_expr(inputs[0])
@@ -615,7 +615,7 @@ def check_unsupported_ops(self):
raise tvm.error.OpNotImplemented(msg.format(ops))
def fuse_op(self, layers):
- """ Fusing the BatchNorm and Scale layer """
+ """Fusing the BatchNorm and Scale layer"""
bn, scale = layers["bn"], layers["scale"]
# bn params
@@ -641,7 +641,7 @@ def fuse_op(self, layers):
return bn
def op_fuse(self):
- """fuse bn and scale """
+ """fuse bn and scale"""
new_layers = []
temp_layers = {}
changed_layers = {}
diff --git a/python/tvm/relay/frontend/tflite_flexbuffer.py b/python/tvm/relay/frontend/tflite_flexbuffer.py
index 734908214dce..4b5d2b9c605c 100644
--- a/python/tvm/relay/frontend/tflite_flexbuffer.py
+++ b/python/tvm/relay/frontend/tflite_flexbuffer.py
@@ -76,7 +76,7 @@ def __init__(self, buffer):
self.buffer = buffer
def indirect_jump(self, offset, byte_width):
- """ Helper function to read the offset value and jump """
+ """Helper function to read the offset value and jump"""
unpack_str = ""
if byte_width == 1:
unpack_str = "= 8.4 or ((arch_version in (8.2, 8.3)) and "+dotprod" in target.mattr)
def is_mmla_available():
- """ Checks whether the hardware has support for ummla/smmla instructions. """
+ """Checks whether the hardware has support for ummla/smmla instructions."""
target = tvm.target.Target.current(allow_none=False)
arch_version = get_arch_version(target.mattr)
return arch_version >= 8.6 or (
@@ -59,7 +59,7 @@ def is_mmla_available():
def is_aarch64_arm():
- """ Checks whether we are compiling for an AArch64 target. """
+ """Checks whether we are compiling for an AArch64 target."""
target = tvm.target.Target.current(allow_none=False)
return "aarch64" in target.attrs.get("mtriple", "")
diff --git a/python/tvm/topi/arm_cpu/bitserial_conv2d.py b/python/tvm/topi/arm_cpu/bitserial_conv2d.py
index 6406861885c3..def9b8345cd8 100644
--- a/python/tvm/topi/arm_cpu/bitserial_conv2d.py
+++ b/python/tvm/topi/arm_cpu/bitserial_conv2d.py
@@ -55,7 +55,7 @@ def bitserial_conv2d_nhwc(
out_dtype,
unipolar,
):
- """ Compute convolution with pack on spatial axes. """
+ """Compute convolution with pack on spatial axes."""
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
assert pack_dtype == "uint8", "only support packing into uint8 bits"
assert out_dtype == "int16", "only support output type of int16"
diff --git a/python/tvm/topi/arm_cpu/conv2d.py b/python/tvm/topi/arm_cpu/conv2d.py
index 7dbbf9d3d447..b3af36740551 100644
--- a/python/tvm/topi/arm_cpu/conv2d.py
+++ b/python/tvm/topi/arm_cpu/conv2d.py
@@ -381,7 +381,7 @@ def _callback(op):
def _conv2d_arm_cpu_winograd_nnpack(
cfg, data, kernel, strides, padding, dilation, out_dtype, convolution_algorithm
):
- """ TOPI compute callback. Use winograd NNPACK template """
+ """TOPI compute callback. Use winograd NNPACK template"""
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
diff --git a/python/tvm/topi/arm_cpu/conv2d_gemm.py b/python/tvm/topi/arm_cpu/conv2d_gemm.py
index 85c03997a98d..8e416be8daa2 100644
--- a/python/tvm/topi/arm_cpu/conv2d_gemm.py
+++ b/python/tvm/topi/arm_cpu/conv2d_gemm.py
@@ -33,7 +33,7 @@
def configure_knobs(cfg, M, K):
- """ Configure auto-tuning knobs for the interleaved strategy """
+ """Configure auto-tuning knobs for the interleaved strategy"""
x, y = cfg.axis(M // 4), cfg.axis(K // 16)
cfg.define_reorder("reorder_gemm", [x, y], policy="candidate", candidate=[[x, y], [y, x]])
@@ -280,7 +280,7 @@ def compute_conv2d_gemm_without_weight_transform(
def schedule_conv2d_gemm_interleaved(cfg, s, out, final_out):
- """ Schedule the conv2d_gemm interleaved strategy """
+ """Schedule the conv2d_gemm interleaved strategy"""
C = out.op.input_tensors[0]
C_interleaved = C.op.input_tensors[0]
A_interleaved = C_interleaved.op.input_tensors[0]
@@ -372,7 +372,7 @@ def schedule_conv2d_gemm_interleaved(cfg, s, out, final_out):
def schedule_conv2d_gemm_native(cfg, s, out, final_out):
- """ Schedule the conv2d_gemm hybrid strategy """
+ """Schedule the conv2d_gemm hybrid strategy"""
C = out.op.input_tensors[0]
A = C.op.input_tensors[0]
in_type = A.dtype
diff --git a/python/tvm/topi/arm_cpu/conv2d_int8.py b/python/tvm/topi/arm_cpu/conv2d_int8.py
index fc7e4036341a..bf4c03a6e5ed 100644
--- a/python/tvm/topi/arm_cpu/conv2d_int8.py
+++ b/python/tvm/topi/arm_cpu/conv2d_int8.py
@@ -196,7 +196,7 @@ def _callback(op):
def compute_conv2d_NHWC_quantized_interleaved(
cfg, data, kernel, strides, padding, dilation, out_dtype
):
- """ Interface for interleaved compute_conv2d_NHWC_quantized_interleaved"""
+ """Interface for interleaved compute_conv2d_NHWC_quantized_interleaved"""
return _compute_conv2d_NHWC_quantized(
cfg, data, kernel, strides, padding, dilation, out_dtype, True
)
@@ -206,7 +206,7 @@ def compute_conv2d_NHWC_quantized_interleaved(
def compute_conv2d_NHWC_quantized_interleaved_without_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype, kernel_size, output_channels
):
- """ Interface for interleaved compute_conv2d_NHWC_quantized_interleaved_without_transform"""
+ """Interface for interleaved compute_conv2d_NHWC_quantized_interleaved_without_transform"""
return _compute_conv2d_NHWC_quantized_without_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype, kernel_size, output_channels, True
)
@@ -214,7 +214,7 @@ def compute_conv2d_NHWC_quantized_interleaved_without_transform(
@autotvm.register_topi_schedule("conv2d_NHWC_quantized_interleaved.arm_cpu")
def schedule_conv2d_NHWC_quantized_interleaved(cfg, outs):
- """ Interface for interleaved schedule_conv2d_NHWC_quantized_interleaved"""
+ """Interface for interleaved schedule_conv2d_NHWC_quantized_interleaved"""
return _schedule_conv2d_NHWC_quantized(cfg, outs, True)
@@ -222,7 +222,7 @@ def schedule_conv2d_NHWC_quantized_interleaved(cfg, outs):
# The weights are interleaved and transposed
@autotvm.register_topi_compute("conv2d_NHWC_quantized_native.arm_cpu")
def compute_conv2d_NHWC_quantized_native(cfg, data, kernel, strides, padding, dilation, out_dtype):
- """ Interface for native compute_conv2d_NHWC_quantized"""
+ """Interface for native compute_conv2d_NHWC_quantized"""
return _compute_conv2d_NHWC_quantized(
cfg, data, kernel, strides, padding, dilation, out_dtype, False
)
@@ -232,7 +232,7 @@ def compute_conv2d_NHWC_quantized_native(cfg, data, kernel, strides, padding, di
def compute_conv2d_NHWC_quantized_native_without_transform(
cfg, data, kernel, strides, padding, dilation, out_dtype, kernel_size, output_channels
):
- """ Interface for compute_conv2d_NHWC_quantized_native_without_transform"""
+ """Interface for compute_conv2d_NHWC_quantized_native_without_transform"""
return _compute_conv2d_NHWC_quantized_without_transform(
cfg,
data,
@@ -249,5 +249,5 @@ def compute_conv2d_NHWC_quantized_native_without_transform(
@autotvm.register_topi_schedule("conv2d_NHWC_quantized_native.arm_cpu")
def schedule_conv2d_NHWC_quantized_native(cfg, outs):
- """ Interface for native schedule_conv2d_NHWC_quantized"""
+ """Interface for native schedule_conv2d_NHWC_quantized"""
return _schedule_conv2d_NHWC_quantized(cfg, outs, False)
diff --git a/python/tvm/topi/bifrost/dense.py b/python/tvm/topi/bifrost/dense.py
index 9ab8b4ebea62..7e827813ed66 100644
--- a/python/tvm/topi/bifrost/dense.py
+++ b/python/tvm/topi/bifrost/dense.py
@@ -103,7 +103,7 @@ def _callback(op):
def fuse_and_bind(s, tensor, axis=None, num_thread=None):
- """ fuse all the axis and bind to GPU threads """
+ """fuse all the axis and bind to GPU threads"""
axis = axis or s[tensor].op.axis
fused = s[tensor].fuse(*axis)
bx, tx = s[tensor].split(fused, num_thread)
diff --git a/python/tvm/topi/bifrost/depthwise_conv2d.py b/python/tvm/topi/bifrost/depthwise_conv2d.py
index 625c274213ad..801acd676aa6 100644
--- a/python/tvm/topi/bifrost/depthwise_conv2d.py
+++ b/python/tvm/topi/bifrost/depthwise_conv2d.py
@@ -52,7 +52,7 @@ def _schedule(pad_data, kernel, conv):
output = conv
def tile_and_bind3d(tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
- """ tile and bind 3d """
+ """tile and bind 3d"""
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
diff --git a/python/tvm/topi/cuda/conv2d_hwnc_tensorcore.py b/python/tvm/topi/cuda/conv2d_hwnc_tensorcore.py
index b3d8397791fe..be9218431c85 100644
--- a/python/tvm/topi/cuda/conv2d_hwnc_tensorcore.py
+++ b/python/tvm/topi/cuda/conv2d_hwnc_tensorcore.py
@@ -65,7 +65,7 @@ def unpack_HWNCnc_to_hwnc(packed_out, out_dtype):
def conv2d_hwnc_tensorcore(data, kernel, strides, padding, dilation, in_dtype, out_dtype="int32"):
- """"Compute conv2d with tensorcore for HWNC layout with int8/int4"""
+ """ "Compute conv2d with tensorcore for HWNC layout with int8/int4"""
assert data.dtype in ("int4", "uint4", "int8", "uint8")
assert kernel.dtype in ("int4", "uint4", "int8", "uint8")
packed_out = hwnc_tensorcore_cuda(data, kernel, strides, padding, dilation, out_dtype)
diff --git a/python/tvm/topi/image/resize.py b/python/tvm/topi/image/resize.py
index f0d564581d95..42d0455665a1 100644
--- a/python/tvm/topi/image/resize.py
+++ b/python/tvm/topi/image/resize.py
@@ -24,7 +24,7 @@
def get_2d_indices(indices, layout="NCHW"):
- """ Get 2d indices """
+ """Get 2d indices"""
(cc, inum, ic) = (0, 0, 0)
if layout == "NHWC":
n, y, x, c = indices
@@ -43,7 +43,7 @@ def get_2d_indices(indices, layout="NCHW"):
def get_2d_pixel(data, layout, boxes, image_height, image_width, n, c, y, x, cc, ib, ic):
- """ Get 2d pixel """
+ """Get 2d pixel"""
if boxes is None:
y = tvm.te.max(tvm.te.min(y, image_height - 1), 0)
x = tvm.te.max(tvm.te.min(x, image_width - 1), 0)
@@ -62,7 +62,7 @@ def get_2d_pixel(data, layout, boxes, image_height, image_width, n, c, y, x, cc,
def get_iny_inx(
y, x, image_height, image_width, target_height, target_width, coordinate_transformation_mode
):
- """ Infer input x,y from output x,y with various coordinate transformation methods """
+ """Infer input x,y from output x,y with various coordinate transformation methods"""
scale_y = te.div(image_height.astype("float"), target_height.astype("float"))
scale_x = te.div(image_width.astype("float"), target_width.astype("float"))
if coordinate_transformation_mode == "half_pixel":
diff --git a/python/tvm/topi/intel_graphics/conv2d.py b/python/tvm/topi/intel_graphics/conv2d.py
index bdbde91918dd..c3ce78295561 100644
--- a/python/tvm/topi/intel_graphics/conv2d.py
+++ b/python/tvm/topi/intel_graphics/conv2d.py
@@ -125,7 +125,7 @@ def _create_schedule_template(cfg, dshape, kshape, strides, padding, dilation):
##### SCHEDULE UTILITIES #####
def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
- """ tile and bind 3d """
+ """tile and bind 3d"""
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
diff --git a/python/tvm/topi/mali/dense.py b/python/tvm/topi/mali/dense.py
index 53f76219bacd..a8ca66b09cd5 100644
--- a/python/tvm/topi/mali/dense.py
+++ b/python/tvm/topi/mali/dense.py
@@ -103,7 +103,7 @@ def _callback(op):
def fuse_and_bind(s, tensor, axis=None, num_thread=None):
- """ fuse all the axis and bind to GPU threads """
+ """fuse all the axis and bind to GPU threads"""
# TODO(@comaniac): figure out where this function is used.
axis = axis or s[tensor].op.axis
fused = s[tensor].fuse(*axis)
diff --git a/python/tvm/topi/mali/depthwise_conv2d.py b/python/tvm/topi/mali/depthwise_conv2d.py
index 55fcb1de9c4a..b292f694b995 100644
--- a/python/tvm/topi/mali/depthwise_conv2d.py
+++ b/python/tvm/topi/mali/depthwise_conv2d.py
@@ -132,7 +132,7 @@ def _callback(op):
def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
- """ tile and bind 3d """
+ """tile and bind 3d"""
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
diff --git a/python/tvm/topi/nn/conv2d.py b/python/tvm/topi/nn/conv2d.py
index 80f87f86736c..130eb4b69844 100644
--- a/python/tvm/topi/nn/conv2d.py
+++ b/python/tvm/topi/nn/conv2d.py
@@ -159,7 +159,7 @@ def conv2d_infer_layout(workload, cfg):
def _get_workload(data, kernel, stride, padding, dilation, out_dtype, data_layout="NCHW"):
- """ Get the workload structure. """
+ """Get the workload structure."""
if data_layout == "NCHW":
_, CI, IH, IW = get_const_tuple(data.shape)
elif data_layout == "NHWC":
diff --git a/python/tvm/topi/nn/depthwise_conv2d.py b/python/tvm/topi/nn/depthwise_conv2d.py
index 052ab8b88d1c..a3639b57e7e0 100644
--- a/python/tvm/topi/nn/depthwise_conv2d.py
+++ b/python/tvm/topi/nn/depthwise_conv2d.py
@@ -51,7 +51,7 @@
def _get_workload(data, kernel, stride, padding, dilation, out_dtype):
- """ Get the workload structure. """
+ """Get the workload structure."""
_, in_channel, height, width = [x.value for x in data.shape]
channel, channel_multiplier, kh, kw = [x.value for x in kernel.shape]
out_channel = channel * channel_multiplier
diff --git a/python/tvm/topi/testing/adaptive_pool_python.py b/python/tvm/topi/testing/adaptive_pool_python.py
index dd8fadd71f14..9a61e52a2826 100644
--- a/python/tvm/topi/testing/adaptive_pool_python.py
+++ b/python/tvm/topi/testing/adaptive_pool_python.py
@@ -73,7 +73,7 @@ def _pool3d(in_size, out_size, np_data, np_op):
def adaptive_pool_channel_first(np_data, out_size, pool_op, np_op):
- """ The reference function for adaptive pool, channel first layout """
+ """The reference function for adaptive pool, channel first layout"""
ishape = np_data.shape
n, c = ishape[:2]
oshape = (n, c) + out_size
@@ -87,7 +87,7 @@ def adaptive_pool_channel_first(np_data, out_size, pool_op, np_op):
def adaptive_pool_channel_last(np_data, out_size, pool_op, np_op):
- """ The reference function for adaptive pool, channel last layout """
+ """The reference function for adaptive pool, channel last layout"""
ishape = np_data.shape
n, c = ishape[0], ishape[-1]
oshape = (n,) + out_size + (c,)
@@ -108,7 +108,7 @@ def adaptive_pool_channel_last(np_data, out_size, pool_op, np_op):
def adaptive_pool(np_data, out_size, pool_type, layout):
- """ The reference function for adaptive pool, for 2d and 3d """
+ """The reference function for adaptive pool, for 2d and 3d"""
if isinstance(out_size, int):
out_size = (out_size,)
if len(out_size) == 1:
diff --git a/python/tvm/topi/testing/bilinear_resize_python.py b/python/tvm/topi/testing/bilinear_resize_python.py
index 844546e0643f..b1fb8b0b4845 100644
--- a/python/tvm/topi/testing/bilinear_resize_python.py
+++ b/python/tvm/topi/testing/bilinear_resize_python.py
@@ -22,7 +22,7 @@
def bilinear_resize_python(image, out_size, layout, coordinate_transformation_mode="align_corners"):
- """ Bilinear scaling using python"""
+ """Bilinear scaling using python"""
(new_h, new_w) = out_size
(ib, ic) = (1, 1)
diff --git a/python/tvm/topi/testing/trilinear_resize3d_python.py b/python/tvm/topi/testing/trilinear_resize3d_python.py
index de1e2307737f..d603e987d5ef 100644
--- a/python/tvm/topi/testing/trilinear_resize3d_python.py
+++ b/python/tvm/topi/testing/trilinear_resize3d_python.py
@@ -23,7 +23,7 @@
def trilinear_resize3d_python(
data_in, out_size, layout, coordinate_transformation_mode="align_corners"
):
- """ Trilinear 3d scaling using python"""
+ """Trilinear 3d scaling using python"""
(new_d, new_h, new_w) = out_size
if layout == "NDHWC":
diff --git a/python/tvm/topi/testing/upsampling_python.py b/python/tvm/topi/testing/upsampling_python.py
index 7f48aa47b8d1..dd187c4d8cff 100644
--- a/python/tvm/topi/testing/upsampling_python.py
+++ b/python/tvm/topi/testing/upsampling_python.py
@@ -22,7 +22,7 @@
def upsample_nearest(arr, scale):
- """ Populate the array by scale factor"""
+ """Populate the array by scale factor"""
h, w = arr.shape
out_h = int(round(h * scale[0]))
out_w = int(round(w * scale[1]))
@@ -36,7 +36,7 @@ def upsample_nearest(arr, scale):
def upsampling_python(data, scale, layout="NCHW"):
- """ Python version of scaling using nearest neighbour """
+ """Python version of scaling using nearest neighbour"""
ishape = data.shape
if layout == "NCHW":
@@ -87,7 +87,7 @@ def upsampling_python(data, scale, layout="NCHW"):
def upsample3d_nearest(arr, scale):
- """ Populate the array by scale factor"""
+ """Populate the array by scale factor"""
d, h, w = arr.shape
out_d = int(round(d * scale[0]))
out_h = int(round(h * scale[1]))
@@ -104,7 +104,7 @@ def upsample3d_nearest(arr, scale):
def upsampling3d_python(data, scale, layout="NCDHW"):
- """ Python version of 3D scaling using nearest neighbour """
+ """Python version of 3D scaling using nearest neighbour"""
ishape = data.shape
if layout == "NCDHW":
diff --git a/python/tvm/topi/utils.py b/python/tvm/topi/utils.py
index 2e8528c5e76c..3a056cfb4326 100644
--- a/python/tvm/topi/utils.py
+++ b/python/tvm/topi/utils.py
@@ -495,5 +495,5 @@ def ceil_div(a, b):
def swap(arr, axis):
- """ swap arr[axis] and arr[-1] """
+ """swap arr[axis] and arr[-1]"""
return arr[:axis] + [arr[-1]] + arr[axis + 1 : -1] + [arr[axis]]
diff --git a/python/tvm/topi/x86/bitserial_conv2d.py b/python/tvm/topi/x86/bitserial_conv2d.py
index 18f305094754..73c9dd56517f 100644
--- a/python/tvm/topi/x86/bitserial_conv2d.py
+++ b/python/tvm/topi/x86/bitserial_conv2d.py
@@ -39,7 +39,7 @@ def bitserial_conv2d_nchw(
out_dtype="int16",
unipolar=True,
):
- """ Compute convolution with pack on spatial axes. """
+ """Compute convolution with pack on spatial axes."""
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
data_q = bitpack(data, in_bits, pack_axis=1, bit_axis=0, pack_type=pack_dtype)
# Check if kernel is already bitpacked
@@ -181,7 +181,7 @@ def bitserial_conv2d_nhwc(
out_dtype="int16",
unipolar=True,
):
- """ Compute convolution with pack on spatial axes. """
+ """Compute convolution with pack on spatial axes."""
assert data.shape[0].value == 1, "spatial pack convolution only support batch size=1"
data_q = bitpack(data, in_bits, pack_axis=3, bit_axis=4, pack_type=pack_dtype)
pack_kernel = len(kernel.shape) == 4
diff --git a/python/tvm/topi/x86/conv3d.py b/python/tvm/topi/x86/conv3d.py
index cb202f5257af..d5b09e640e16 100644
--- a/python/tvm/topi/x86/conv3d.py
+++ b/python/tvm/topi/x86/conv3d.py
@@ -471,7 +471,7 @@ def _get_default_config(cfg, data, kernel, strides, padding, out_dtype, layout):
def _get_conv3d_workload(data, kernel, stride, padding, out_dtype, data_layout="NCHW"):
- """ Get the workload structure. """
+ """Get the workload structure."""
if data_layout == "NCDHW":
_, CI, ID, IH, IW = get_const_tuple(data.shape)
CO, CIG, KD, KH, KW = get_const_tuple(kernel.shape)
diff --git a/tests/python/relay/test_pass_legalize.py b/tests/python/relay/test_pass_legalize.py
index 8a37da33a10f..95069d29fd84 100644
--- a/tests/python/relay/test_pass_legalize.py
+++ b/tests/python/relay/test_pass_legalize.py
@@ -74,7 +74,7 @@ def expected():
def test_legalize_none():
- """Test doing nothing by returning 'None' """
+ """Test doing nothing by returning 'None'"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
diff --git a/tests/python/unittest/test_autotvm_flop_calculator.py b/tests/python/unittest/test_autotvm_flop_calculator.py
index e07cdac9cc9c..e28beaf98709 100644
--- a/tests/python/unittest/test_autotvm_flop_calculator.py
+++ b/tests/python/unittest/test_autotvm_flop_calculator.py
@@ -152,7 +152,7 @@ def test_average_pool():
def test_move():
- """No float number operation in simple move. So the estimator should raise an error """
+ """No float number operation in simple move. So the estimator should raise an error"""
N = 1024
A = te.placeholder((N,))
diff --git a/tutorials/dev/low_level_custom_pass.py b/tutorials/dev/low_level_custom_pass.py
index 0bd656dd81dd..8f631075429f 100644
--- a/tutorials/dev/low_level_custom_pass.py
+++ b/tutorials/dev/low_level_custom_pass.py
@@ -86,7 +86,7 @@
def find_width8(op):
- """ Find all the 'tir.For' nodes whose extent can be divided by 8. """
+ """Find all the 'tir.For' nodes whose extent can be divided by 8."""
if isinstance(op, tvm.tir.For):
if isinstance(op.extent, tvm.tir.IntImm):
if op.extent.value % 8 == 0:
@@ -110,7 +110,7 @@ def find_width8(op):
def vectorize8(op):
- """ Split can vectorize the loops found in `find_width8`. """
+ """Split can vectorize the loops found in `find_width8`."""
if op in loops:
extent = op.extent.value
name = op.loop_var.name
diff --git a/vta/python/vta/top/graphpack.py b/vta/python/vta/top/graphpack.py
index 5ec11677da70..a982b88b75e8 100644
--- a/vta/python/vta/top/graphpack.py
+++ b/vta/python/vta/top/graphpack.py
@@ -210,7 +210,7 @@ def __init__(self, start=-1, end=-1):
super().__init__()
def visit_call(self, call):
- """ Visit the children. """
+ """Visit the children."""
# First visit the children.
args = [self.visit(arg) for arg in call.args]
@@ -265,7 +265,7 @@ def __init__(self):
super().__init__()
def visit_call(self, call):
- """ Visit the children. """
+ """Visit the children."""
# First visit the children.
args = [self.visit(arg) for arg in call.args]
@@ -302,7 +302,7 @@ def __init__(self, bfactor, cfactor, weight_bits):
super().__init__()
def visit_call(self, call):
- """ Visit the children. """
+ """Visit the children."""
# First visit the children.
oshape = _get_tensor_shape(call)
odtype = _get_tensor_type(call)
diff --git a/vta/python/vta/top/op.py b/vta/python/vta/top/op.py
index f243c3fc2c89..6b06d88096bf 100644
--- a/vta/python/vta/top/op.py
+++ b/vta/python/vta/top/op.py
@@ -41,7 +41,7 @@
# add clip vta strategy
def compute_clip_vta(attrs, inputs, output_type):
- """ Clip operator. """
+ """Clip operator."""
x = inputs[0]
a_min = attrs.a_min
a_max = attrs.a_max
diff --git a/vta/python/vta/top/vta_conv2d.py b/vta/python/vta/top/vta_conv2d.py
index 0b9cb719189f..5271b407fb8d 100644
--- a/vta/python/vta/top/vta_conv2d.py
+++ b/vta/python/vta/top/vta_conv2d.py
@@ -29,7 +29,7 @@
@autotvm.register_topi_compute("conv2d_packed.vta")
def conv2d_packed(cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
- """ Packed conv2d function."""
+ """Packed conv2d function."""
if not is_packed_layout(layout):
raise topi.InvalidShapeError()
assert dilation == (1, 1)
diff --git a/vta/python/vta/top/vta_group_conv2d.py b/vta/python/vta/top/vta_group_conv2d.py
index deb4ea779214..69d2579ad78c 100644
--- a/vta/python/vta/top/vta_group_conv2d.py
+++ b/vta/python/vta/top/vta_group_conv2d.py
@@ -28,7 +28,7 @@
@autotvm.register_topi_compute("group_conv2d_packed.vta")
def group_conv2d_packed(cfg, data, kernel, strides, padding, dilation, group, out_dtype):
- """ Packed group conv2d nchw function."""
+ """Packed group conv2d nchw function."""
assert dilation == (1, 1)
if padding[0]:
diff --git a/vta/python/vta/transform.py b/vta/python/vta/transform.py
index f8b4f2d2c5c3..7c7d02b40fbb 100644
--- a/vta/python/vta/transform.py
+++ b/vta/python/vta/transform.py
@@ -419,7 +419,7 @@ def _get_2d_pattern(buf, elem_width, elem_bytes, dtype, scope, allow_fold):
strides = list(x for x in strides)
def raise_error():
- """Internal function to raise error """
+ """Internal function to raise error"""
raise RuntimeError(
(
"Scope[%s]: cannot detect 2d pattern with elem_block=%d:"