diff --git a/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td b/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td index 28764009a393..b38c03f644bb 100644 --- a/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td +++ b/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td @@ -6684,6 +6684,35 @@ def Torch_AtenConv3dOp : Torch_Op<"aten.conv3d", [ }]; } +def Torch_AtenConv3dPaddingOp : Torch_Op<"aten.conv3d.padding", [ + AllowsTypeRefinement, + HasValueSemantics, + ReadOnly + ]> { + let summary = "Generated op for `aten::conv3d.padding : (Tensor, Tensor, Tensor?, int[], str, int[], int) -> (Tensor)`"; + let arguments = (ins + AnyTorchTensorType:$input, + AnyTorchTensorType:$weight, + AnyTorchOptionalTensorType:$bias, + AnyTorchListOfTorchIntType:$stride, + Torch_StringType:$padding, + AnyTorchListOfTorchIntType:$dilation, + Torch_IntType:$groups + ); + let results = (outs + AnyTorchOptionalTensorType:$result + ); + let hasCustomAssemblyFormat = 1; + let extraClassDefinition = [{ + ParseResult AtenConv3dPaddingOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 7, 1); + } + void AtenConv3dPaddingOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 7, 1); + } + }]; +} + def Torch_AtenConv2dOp : Torch_Op<"aten.conv2d", [ AllowsTypeRefinement, HasValueSemantics, @@ -6713,6 +6742,35 @@ def Torch_AtenConv2dOp : Torch_Op<"aten.conv2d", [ }]; } +def Torch_AtenConv2dPaddingOp : Torch_Op<"aten.conv2d.padding", [ + AllowsTypeRefinement, + HasValueSemantics, + ReadOnly + ]> { + let summary = "Generated op for `aten::conv2d.padding : (Tensor, Tensor, Tensor?, int[], str, int[], int) -> (Tensor)`"; + let arguments = (ins + AnyTorchTensorType:$input, + AnyTorchTensorType:$weight, + AnyTorchOptionalTensorType:$bias, + AnyTorchListOfTorchIntType:$stride, + Torch_StringType:$padding, + AnyTorchListOfTorchIntType:$dilation, + Torch_IntType:$groups + ); + let results = (outs + AnyTorchOptionalTensorType:$result + ); + let hasCustomAssemblyFormat = 1; + let extraClassDefinition = [{ + ParseResult AtenConv2dPaddingOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 7, 1); + } + void AtenConv2dPaddingOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 7, 1); + } + }]; +} + def Torch_AtenConv1dOp : Torch_Op<"aten.conv1d", [ AllowsTypeRefinement, HasValueSemantics, @@ -6742,6 +6800,35 @@ def Torch_AtenConv1dOp : Torch_Op<"aten.conv1d", [ }]; } +def Torch_AtenConv1dPaddingOp : Torch_Op<"aten.conv1d.padding", [ + AllowsTypeRefinement, + HasValueSemantics, + ReadOnly + ]> { + let summary = "Generated op for `aten::conv1d.padding : (Tensor, Tensor, Tensor?, int[], str, int[], int) -> (Tensor)`"; + let arguments = (ins + AnyTorchTensorType:$input, + AnyTorchTensorType:$weight, + AnyTorchOptionalTensorType:$bias, + AnyTorchListOfTorchIntType:$stride, + Torch_StringType:$padding, + AnyTorchListOfTorchIntType:$dilation, + Torch_IntType:$groups + ); + let results = (outs + AnyTorchOptionalTensorType:$result + ); + let hasCustomAssemblyFormat = 1; + let extraClassDefinition = [{ + ParseResult AtenConv1dPaddingOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 7, 1); + } + void AtenConv1dPaddingOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 7, 1); + } + }]; +} + def Torch_AtenConvTranspose1dOp : Torch_Op<"aten.conv_transpose1d", [ AllowsTypeRefinement, HasValueSemantics, diff --git a/lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp b/lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp index 1cc02a48f37f..d9c68082a5c7 100644 --- a/lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp +++ b/lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp @@ -10024,10 +10024,65 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() { " %0 = call @__torch__.torch.jit._shape_functions.conv2d(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.list\n" " return %0 : !torch.list\n" " }\n" +" func.func @\"__torch_mlir_shape_fn.aten.conv2d.padding\"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.str, %arg5: !torch.list, %arg6: !torch.int) -> !torch.list {\n" +" %0 = call @__torch__._conv_padding(%arg1, %arg5, %arg4) : (!torch.list, !torch.list, !torch.str) -> !torch.list\n" +" %1 = call @__torch__.torch.jit._shape_functions.conv2d(%arg0, %arg1, %arg2, %arg3, %0, %arg5, %arg6) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.list\n" +" return %1 : !torch.list\n" +" }\n" +" func.func @__torch__._conv_padding(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.str) -> !torch.list {\n" +" %true = torch.constant.bool true\n" +" %int-1 = torch.constant.int -1\n" +" %str = torch.constant.str \"same\"\n" +" %none = torch.constant.none\n" +" %str_0 = torch.constant.str \"AssertionError: conv: weight must be at least 3 dimensional.\"\n" +" %int2 = torch.constant.int 2\n" +" %int0 = torch.constant.int 0\n" +" %int1 = torch.constant.int 1\n" +" %0 = torch.aten.len.t %arg0 : !torch.list -> !torch.int\n" +" %1 = torch.aten.gt.int %0, %int2 : !torch.int, !torch.int -> !torch.bool\n" +" torch.prim.If %1 -> () {\n" +" torch.prim.If.yield\n" +" } else {\n" +" torch.prim.RaiseException %str_0, %none : !torch.str, !torch.none\n" +" torch.prim.If.yield\n" +" }\n" +" %2 = torch.aten.sub.int %0, %int2 : !torch.int, !torch.int -> !torch.int\n" +" %3 = torch.prim.ListConstruct %int0 : (!torch.int) -> !torch.list\n" +" %4 = torch.aten.mul.left_t %3, %2 : !torch.list, !torch.int -> !torch.list\n" +" %5 = torch.aten.eq.str %arg2, %str : !torch.str, !torch.str -> !torch.bool\n" +" torch.prim.If %5 -> () {\n" +" %6 = torch.aten.sub.int %2, %int1 : !torch.int, !torch.int -> !torch.int\n" +" %7 = torch.aten.len.t %arg1 : !torch.list -> !torch.int\n" +" %8 = torch.aten.__range_length %6, %int-1, %int-1 : !torch.int, !torch.int, !torch.int -> !torch.int\n" +" %9 = torch.prim.ListConstruct %7, %8 : (!torch.int, !torch.int) -> !torch.list\n" +" %10 = torch.prim.min.self_int %9 : !torch.list -> !torch.int\n" +" torch.prim.Loop %10, %true, init() {\n" +" ^bb0(%arg3: !torch.int):\n" +" %11 = torch.aten.__getitem__.t %arg1, %arg3 : !torch.list, !torch.int -> !torch.int\n" +" %12 = torch.aten.__derive_index %arg3, %6, %int-1 : !torch.int, !torch.int, !torch.int -> !torch.int\n" +" %13 = torch.aten.add.int %int2, %12 : !torch.int, !torch.int -> !torch.int\n" +" %14 = torch.aten.__getitem__.t %arg0, %13 : !torch.list, !torch.int -> !torch.int\n" +" %15 = torch.aten.sub.int %14, %int1 : !torch.int, !torch.int -> !torch.int\n" +" %16 = torch.aten.mul.int %11, %15 : !torch.int, !torch.int -> !torch.int\n" +" %17 = torch.aten.floordiv.int %16, %int2 : !torch.int, !torch.int -> !torch.int\n" +" %18 = torch.aten._set_item.t %4, %12, %17 : !torch.list, !torch.int, !torch.int -> !torch.list\n" +" torch.prim.Loop.condition %true, iter()\n" +" } : (!torch.int, !torch.bool) -> ()\n" +" torch.prim.If.yield\n" +" } else {\n" +" torch.prim.If.yield\n" +" }\n" +" return %4 : !torch.list\n" +" }\n" " func.func @\"__torch_mlir_shape_fn.aten.conv3d\"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.int) -> !torch.list {\n" " %0 = call @__torch__.torch.jit._shape_functions.conv3d(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.list\n" " return %0 : !torch.list\n" " }\n" +" func.func @\"__torch_mlir_shape_fn.aten.conv3d.padding\"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.str, %arg5: !torch.list, %arg6: !torch.int) -> !torch.list {\n" +" %0 = call @__torch__._conv_padding(%arg1, %arg5, %arg4) : (!torch.list, !torch.list, !torch.str) -> !torch.list\n" +" %1 = call @__torch__.torch.jit._shape_functions.conv3d(%arg0, %arg1, %arg2, %arg3, %0, %arg5, %arg6) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.int) -> !torch.list\n" +" return %1 : !torch.list\n" +" }\n" " func.func @\"__torch_mlir_shape_fn.aten.conv_transpose2d.input\"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.int, %arg7: !torch.list) -> !torch.list {\n" " %0 = torch.derefine %arg3 : !torch.list to !torch.optional>\n" " %1 = torch.derefine %arg4 : !torch.list to !torch.optional>\n" @@ -10097,6 +10152,14 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() { " %1 = call @__torch__.torch.jit._shape_functions.conv_forwards(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %false, %0, %int1) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.bool, !torch.list, !torch.int) -> !torch.list\n" " return %1 : !torch.list\n" " }\n" +" func.func @\"__torch_mlir_shape_fn.aten.conv1d.padding\"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.str, %arg5: !torch.list, %arg6: !torch.int) -> !torch.list {\n" +" %false = torch.constant.bool false\n" +" %int1 = torch.constant.int 1\n" +" %0 = call @__torch__._conv_padding(%arg1, %arg5, %arg4) : (!torch.list, !torch.list, !torch.str) -> !torch.list\n" +" %1 = torch.prim.ListConstruct : () -> !torch.list\n" +" %2 = call @__torch__.torch.jit._shape_functions.conv_forwards(%arg0, %arg1, %arg2, %arg3, %0, %arg5, %false, %1, %int1) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.bool, !torch.list, !torch.int) -> !torch.list\n" +" return %2 : !torch.list\n" +" }\n" " func.func @\"__torch_mlir_shape_fn.aten.conv_transpose1d\"(%arg0: !torch.list, %arg1: !torch.list, %arg2: !torch.optional>, %arg3: !torch.list, %arg4: !torch.list, %arg5: !torch.list, %arg6: !torch.int, %arg7: !torch.list) -> !torch.list {\n" " %true = torch.constant.bool true\n" " %0 = call @\"__torch_mlir_shape_fn.aten.convolution\"(%arg0, %arg1, %arg2, %arg3, %arg4, %arg7, %true, %arg5, %arg6) : (!torch.list, !torch.list, !torch.optional>, !torch.list, !torch.list, !torch.list, !torch.bool, !torch.list, !torch.int) -> !torch.list\n" diff --git a/lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp b/lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp index 2f276b1a296f..2d24d3e2c20d 100644 --- a/lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp +++ b/lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp @@ -5169,6 +5169,82 @@ class DecomposeAtenConv2dOp : public OpRewritePattern { }; } // namespace +// Decompose aten.conv(1/2/3)d.padding to aten.convolution +namespace { +template +class DecomposeAtenConvPaddingOp : public OpRewritePattern { +public: + using OpRewritePattern::OpRewritePattern; + LogicalResult matchAndRewrite(ConvPaddingOp op, + PatternRewriter &rewriter) const override { + + Location loc = op.getLoc(); + + Value weight = op.getWeight(); + std::optional maybeRank = getTensorRank(weight); + if (!maybeRank) { + return rewriter.notifyMatchFailure(op, "expected weight to have a rank"); + } + unsigned rank = *maybeRank; + // first 2 dimensions of weight are out_channels and in_channels / groups + if (rank < 3) + return rewriter.notifyMatchFailure( + op, "ConvPaddingOp weight must be at least 3 dimensional."); + + std::string padding_str; + if (!matchPattern(op.getPadding(), m_TorchConstantStr(padding_str))) + return rewriter.notifyMatchFailure(op, + "padding must be a constant string"); + + Value zero = rewriter.create( + loc, rewriter.getI64IntegerAttr(0)); + + SmallVector paddingValues; + if (padding_str == "valid") { + // valid means no padding + for (unsigned iRank = 2; iRank < rank; iRank++) { + paddingValues.push_back(zero); + } + } else { + + SmallVector dilation; + getListConstructElements(op.getDilation(), dilation); + + Value one = + rewriter.create(loc, rewriter.getI64IntegerAttr(1)); + Value two = + rewriter.create(loc, rewriter.getI64IntegerAttr(2)); + for (unsigned iRank = 2; iRank < rank; iRank++) { + Value dim = rewriter.create( + loc, rewriter.getI64IntegerAttr(iRank)); + Value kernelSize = + rewriter.create(loc, weight, dim); + Value kernelSizeMinusOne = + rewriter.create(loc, kernelSize, one); + Value padding = rewriter.create( + loc, dilation[iRank - 2], kernelSizeMinusOne); + padding = rewriter.create(loc, padding, two); + paddingValues.push_back(padding); + } + } + + Value emptyList = rewriter.create( + op.getLoc(), Torch::ListType::get(Torch::IntType::get(op.getContext())), + SmallVector()); + Value cstFalse = rewriter.create(op.getLoc(), false); + Value padding = rewriter.create( + op.getLoc(), Torch::ListType::get(Torch::IntType::get(op.getContext())), + paddingValues); + rewriter.replaceOpWithNewOp( + op, op->getResultTypes(), op.getInput(), op.getWeight(), op.getBias(), + op.getStride(), padding, op.getDilation(), cstFalse, emptyList, + op.getGroups()); + + return success(); + } +}; +} // namespace + // Decompose aten.conv3d to aten.convolution namespace { class DecomposeAtenConv3dOp : public OpRewritePattern { @@ -10941,6 +11017,12 @@ class DecomposeComplexOpsPass addPatternIfTargetOpIsIllegal(patterns); addPatternIfTargetOpIsIllegal(patterns); addPatternIfTargetOpIsIllegal(patterns); + addPatternIfTargetOpIsIllegal< + DecomposeAtenConvPaddingOp>(patterns); + addPatternIfTargetOpIsIllegal< + DecomposeAtenConvPaddingOp>(patterns); + addPatternIfTargetOpIsIllegal< + DecomposeAtenConvPaddingOp>(patterns); addPatternIfTargetOpIsIllegal(patterns); addPatternIfTargetOpIsIllegal( patterns); diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index e0011b9a347e..d9551162fca9 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -2055,6 +2055,8 @@ "Conv2dWithPaddingDilationStrideStaticModule_depthwise", "Conv2dWithPaddingDilationStrideStaticModule_depthwise_multiplier", "Conv2dWithPaddingModule_basic", + "Conv2dWithValidPaddingModule_basic", + "Conv2dWithSamePaddingModule_basic", "Convolution2DStaticModule_basic", "CosineSimilarityStaticModule_basic", "DetachModule_basic", @@ -2545,6 +2547,8 @@ "Conv2dNoPaddingModule_basic", "Conv2dWithPaddingDilationStrideModule_basic", "Conv2dWithPaddingModule_basic", + "Conv2dWithSamePaddingModule_basic", + "Conv2dWithValidPaddingModule_basic", # failed to legalize operation 'torch.operator' "ElementwisePreluModule_basic", "ElementwisePreluStaticModule_basic", @@ -2872,6 +2876,8 @@ "ContainsIntList_False", "ContainsIntList_True", "Conv1dModule_basic", + "Conv1dWithSamePaddingModule_basic", + "Conv1dWithValidPaddingModule_basic", "Conv2dBiasNoPaddingModule_basic", "Conv2dModule_basic", "Conv2dNoPaddingModule_basic", @@ -2884,7 +2890,11 @@ "Conv2dQInt8PerChannelModule_grouped", "Conv2dWithPaddingDilationStrideModule_basic", "Conv2dWithPaddingModule_basic", + "Conv2dWithSamePaddingModule_basic", + "Conv2dWithValidPaddingModule_basic", "Conv3dModule_basic", + "Conv3dWithSamePaddingModule_basic", + "Conv3dWithValidPaddingModule_basic", "ConvTbcModule_basic", "ConvTranspose2DQInt8_basic", "Conv_Transpose2dModule_basic", @@ -3572,6 +3582,8 @@ "ContainsIntList_True", "Conv1dModule_basic", "Conv1dDepthwiseWithPaddingDilationStrideStaticModule_basic", + "Conv1dWithSamePaddingModule_basic", + "Conv1dWithValidPaddingModule_basic", "Conv2dQInt8Module_basic", "Conv2dQInt8Module_depthwise", "Conv2dQInt8Module_grouped", @@ -3582,6 +3594,8 @@ "Conv2dWithPaddingDilationStrideStaticModule_grouped", "Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier", "Conv3dModule_basic", + "Conv3dWithSamePaddingModule_basic", + "Conv3dWithValidPaddingModule_basic", "ConvTbcModule_basic", "ConvTranspose2DQInt8_basic", "Conv_Transpose2dModule_basic", @@ -4193,6 +4207,8 @@ "ContainsIntList_False", "ContainsIntList_True", "Conv1dModule_basic", + "Conv1dWithSamePaddingModule_basic", + "Conv1dWithValidPaddingModule_basic", "Conv1dDepthwiseWithPaddingDilationStrideStaticModule_basic", "Conv2dBiasNoPaddingModule_basic", "Conv2dModule_basic", @@ -4208,7 +4224,11 @@ "Conv2dWithPaddingDilationStrideStaticModule_grouped", "Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier", "Conv2dWithPaddingModule_basic", + "Conv2dWithSamePaddingModule_basic", + "Conv2dWithValidPaddingModule_basic", "Conv3dModule_basic", + "Conv3dWithSamePaddingModule_basic", + "Conv3dWithValidPaddingModule_basic", "ConvTbcModule_basic", "ConvTranspose2DQInt8_basic", "Conv_Transpose2dModule_basic", diff --git a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py index 8dfacca3238b..643651a7b1f8 100644 --- a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py +++ b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py @@ -1839,9 +1839,32 @@ def torchvision〇deform_conv2d〡dtype(input_rank_dtype: Tuple[int, int], weigh def aten〇conv2d〡shape(input: List[int], weight: List[int], bias: Optional[List[int]] = None, stride: List[int] = (1, 1,), padding: List[int] = (0, 0,), dilation: List[int] = (1, 1,), groups: int = 1) -> List[int]: return upstream_shape_functions.conv2d(input, weight, bias, stride, padding, dilation, groups) +def _conv_padding(weight: List[int], dilation: List[int], padding: str): + rank = len(weight) + # first 2 dimensions of weight corresponds to out_channels and in_channels/groups + num_unpadded_dims = 2 + assert rank > num_unpadded_dims, "conv: weight must be at least 3 dimensional." + num_kernel_elems = rank - num_unpadded_dims + padding_int = [0] * num_kernel_elems + if padding == "same": + for d, i in zip( + dilation, range(num_kernel_elems - 1, -1, -1) + ): + padding_val = d * (weight[num_unpadded_dims+i] - 1) + padding_int[i] = padding_val // 2 + return padding_int + +def aten〇conv2d〇padding〡shape(input: List[int], weight: List[int], bias: Optional[List[int]] = None, stride: List[int] = (1, 1,), padding: str = "valid", dilation: List[int] = (1, 1,), groups: int = 1) -> List[int]: + padding_int = _conv_padding(weight, dilation, padding) + return upstream_shape_functions.conv2d(input, weight, bias, stride, padding_int, dilation, groups) + def aten〇conv3d〡shape(input: List[int], weight: List[int], bias: Optional[List[int]] = None, stride: List[int] = (1, 1, 1,), padding: List[int] = (0, 0, 0,), dilation: List[int] = (1, 1, 1,), groups: int = 1) -> List[int]: return upstream_shape_functions.conv3d(input, weight, bias, stride, padding, dilation, groups) +def aten〇conv3d〇padding〡shape(input: List[int], weight: List[int], bias: Optional[List[int]] = None, stride: List[int] = (1, 1, 1,), padding: str = "valid", dilation: List[int] = (1, 1, 1,), groups: int = 1) -> List[int]: + padding_int = _conv_padding(weight, dilation, padding) + return upstream_shape_functions.conv3d(input, weight, bias, stride, padding_int, dilation, groups) + def aten〇conv_transpose2d〇input〡shape(input: List[int], weight: List[int], bias: Optional[List[int]] = None, stride: List[int] = (1, 1,), padding: List[int] = (0, 0,), output_padding: List[int] = (0, 0,), groups: int = 1, dilation: List[int] = (1, 1,)) -> List[int]: return upstream_shape_functions.conv_transpose2d_input(input, weight, bias, stride, padding, output_padding, groups, dilation) @@ -1883,6 +1906,10 @@ def aten〇convolution〡shape(input: List[int], weight: List[int], bias: Option def aten〇conv1d〡shape(input: List[int], weight: List[int], bias: Optional[List[int]] = None, stride: List[int] = (1,), padding: List[int] = (0,), dilation: List[int] = (1,), groups: int = 1) -> List[int]: return upstream_shape_functions.conv_forwards(input, weight, bias, stride, padding, dilation, transposed=False, output_padding=[], groups=1) +def aten〇conv1d〇padding〡shape(input: List[int], weight: List[int], bias: Optional[List[int]] = None, stride: List[int] = (1,), padding: str = "valid", dilation: List[int] = (1,), groups: int = 1) -> List[int]: + padding_int = _conv_padding(weight, dilation, padding) + return upstream_shape_functions.conv_forwards(input, weight, bias, stride, padding_int, dilation, transposed=False, output_padding=[], groups=1) + def aten〇conv_transpose1d〡shape(input: List[int], weight: List[int], bias: Optional[List[int]] = None, stride: List[int] = (1,), padding: List[int] = (0,), output_padding: List[int] = (0,), groups: int = 1, dilation: List[int] = (1,)) -> List[int]: return aten〇convolution〡shape(input, weight, bias, stride, padding, dilation, True, output_padding, groups) diff --git a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py index 31916f7fe896..a3a2383b509a 100644 --- a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py +++ b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py @@ -574,12 +574,21 @@ def emit_with_mutating_variants(key, **kwargs): emit( "aten::conv3d : (Tensor, Tensor, Tensor?, int[], int[], int[], int) -> (Tensor)" ) + emit( + "aten::conv3d.padding : (Tensor, Tensor, Tensor?, int[], str, int[], int) -> (Tensor)" + ) emit( "aten::conv2d : (Tensor, Tensor, Tensor?, int[], int[], int[], int) -> (Tensor)" ) + emit( + "aten::conv2d.padding : (Tensor, Tensor, Tensor?, int[], str, int[], int) -> (Tensor)" + ) emit( "aten::conv1d : (Tensor, Tensor, Tensor?, int[], int[], int[], int) -> (Tensor)" ) + emit( + "aten::conv1d.padding : (Tensor, Tensor, Tensor?, int[], str, int[], int) -> (Tensor)" + ) emit( "aten::conv_transpose1d : (Tensor, Tensor, Tensor?, int[], int[], int[], int, int[]) -> (Tensor)" ) diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py index e6332579d575..7a45dd7fc0ce 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py +++ b/projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py @@ -191,6 +191,54 @@ def Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier( module.forward(tu.rand(5, 4, 10, 20)) +class Conv2dWithSamePaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.conv = torch.nn.Conv2d(2, 10, 3, bias=False, padding="same") + self.train(False) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.conv(x) + + +@register_test_case(module_factory=lambda: Conv2dWithSamePaddingModule()) +def Conv2dWithSamePaddingModule_basic(module, tu: TestUtils): + t = tu.rand(5, 2, 10, 20) + module.forward(t) + + +class Conv2dWithValidPaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.conv = torch.nn.Conv2d(2, 10, 3, bias=False, padding="valid") + self.train(False) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.conv(x) + + +@register_test_case(module_factory=lambda: Conv2dWithValidPaddingModule()) +def Conv2dWithValidPaddingModule_basic(module, tu: TestUtils): + t = tu.rand(5, 2, 10, 20) + module.forward(t) + + # ============================================================================== @@ -1094,6 +1142,63 @@ def Conv1dDepthwiseWithPaddingDilationStrideStaticModule_basic(module, tu: TestU module.forward(inputVec, weight) +class Conv1dWithSamePaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + torch.manual_seed(0) + self.conv = torch.nn.Conv1d(2, 10, 3, bias=False, padding="same") + self.train(False) + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ] + ) + def forward(self, x): + return self.conv(x) + + +@register_test_case(module_factory=lambda: Conv1dWithSamePaddingModule()) +def Conv1dWithSamePaddingModule_basic(module, tu: TestUtils): + t = tu.rand(5, 2, 10) + module.forward(t) + + +class Conv1dWithValidPaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1], torch.float32, True), + ([-1, -1, -1], torch.float32, True), + ([-1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.conv1d( + inputVec, + weight, + bias=bias, + stride=[1], + padding="valid", + dilation=[1], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv1dWithValidPaddingModule()) +def Conv1dWithValidPaddingModule_basic(module, tu: TestUtils): + inputVec = tu.rand(2, 2, 6) + weight = torch.randn(8, 2, 3) + bias = torch.randn(8) + module.forward(inputVec, weight, bias) + + class Conv2dModule(torch.nn.Module): def __init__(self): super().__init__() @@ -1160,6 +1265,72 @@ def Conv3dModule_basic(module, tu: TestUtils): module.forward(inputVec, weight, bias) +class Conv3dWithSamePaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1, -1], torch.float32, True), + ([-1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.conv3d( + inputVec, + weight, + bias=bias, + stride=[1, 1, 1], + padding="same", + dilation=[1, 1, 1], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv3dWithSamePaddingModule()) +def Conv3dWithSamePaddingModule_basic(module, tu: TestUtils): + inputVec = tu.rand(2, 2, 6, 6, 6) + weight = torch.randn(8, 2, 3, 3, 3) + bias = torch.randn(8) + module.forward(inputVec, weight, bias) + + +class Conv3dWithValidPaddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + + @export + @annotate_args( + [ + None, + ([-1, -1, -1, -1, -1], torch.float32, True), + ([-1, -1, -1, -1, -1], torch.float32, True), + ([-1], torch.float32, True), + ] + ) + def forward(self, inputVec, weight, bias): + return torch.ops.aten.conv3d( + inputVec, + weight, + bias=bias, + stride=[1, 1, 1], + padding="valid", + dilation=[1, 1, 1], + groups=1, + ) + + +@register_test_case(module_factory=lambda: Conv3dWithValidPaddingModule()) +def Conv3dWithValidPaddingModule_basic(module, tu: TestUtils): + inputVec = tu.rand(2, 2, 6, 6, 6) + weight = torch.randn(8, 2, 3, 3, 3) + bias = torch.randn(8) + module.forward(inputVec, weight, bias) + + class ConvTbcModule(torch.nn.Module): def __init__(self): super().__init__()