diff --git a/kernels/portable/cpu/op_detach_copy.cpp b/kernels/portable/cpu/op_detach_copy.cpp index 844f259f6de..a8db4b0804b 100644 --- a/kernels/portable/cpu/op_detach_copy.cpp +++ b/kernels/portable/cpu/op_detach_copy.cpp @@ -33,6 +33,9 @@ Tensor& detach_copy_out(RuntimeContext& ctx, const Tensor& self, Tensor& out) { out, "Failed to resize output tensor."); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(self, out), InvalidArgument, out); + ET_KERNEL_CHECK( ctx, tensors_have_same_shape_and_dtype(self, out), InvalidArgument, out); diff --git a/kernels/portable/cpu/op_diagonal_copy.cpp b/kernels/portable/cpu/op_diagonal_copy.cpp index 67b14c3f792..0de86ea0a64 100644 --- a/kernels/portable/cpu/op_diagonal_copy.cpp +++ b/kernels/portable/cpu/op_diagonal_copy.cpp @@ -73,6 +73,11 @@ Tensor& diagonal_copy_out( ET_KERNEL_CHECK( ctx, check_diagonal_copy_args(in, dim1, dim2, out), InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out); + + ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out); + if (dim1 < 0) { dim1 += nonzero_dim(in); } diff --git a/kernels/portable/cpu/op_div.cpp b/kernels/portable/cpu/op_div.cpp index 84591cb0ebd..db2079c2ff2 100644 --- a/kernels/portable/cpu/op_div.cpp +++ b/kernels/portable/cpu/op_div.cpp @@ -41,6 +41,9 @@ div_out(RuntimeContext& ctx, const Tensor& a, const Tensor& b, Tensor& out) { InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out); + ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); @@ -97,6 +100,9 @@ Tensor& div_out_mode( InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out); + ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); ScalarType common_type = get_compute_type(a_type, b_type); @@ -159,6 +165,9 @@ Tensor& div_scalar_out( ScalarType common_type = isFloatingType(a_type) ? a_type : ScalarType::Float; ScalarType out_type = out.scalar_type(); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out); + ET_KERNEL_CHECK(ctx, common_type == out_type, InvalidArgument, out); ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "div.Scalar_out", CTYPE_A, [&]() { diff --git a/kernels/portable/cpu/op_embedding.cpp b/kernels/portable/cpu/op_embedding.cpp index ffa43da7395..1b493435af5 100644 --- a/kernels/portable/cpu/op_embedding.cpp +++ b/kernels/portable/cpu/op_embedding.cpp @@ -102,6 +102,15 @@ Tensor& embedding_out( out.size(1), weight.size(1)); + ET_KERNEL_CHECK( + ctx, + tensors_have_same_dim_order(weight, indices, out), + InvalidArgument, + out); + + ET_KERNEL_CHECK( + ctx, tensor_is_default_dim_order(weight), InvalidArgument, out); + ScalarType ix_type = indices.scalar_type(); ET_CHECK_MSG( ix_type == ScalarType::Long || ix_type == ScalarType::Int, diff --git a/kernels/portable/cpu/op_eq.cpp b/kernels/portable/cpu/op_eq.cpp index 8a4e4656f08..8fef70e564f 100644 --- a/kernels/portable/cpu/op_eq.cpp +++ b/kernels/portable/cpu/op_eq.cpp @@ -34,6 +34,9 @@ Tensor& eq_tensor_out( ScalarType b_type = b.scalar_type(); ScalarType out_type = out.scalar_type(); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out); + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "eq.Scalar_out", CTYPE_A, [&]() { ET_SWITCH_REAL_TYPES_AND( Bool, b_type, ctx, "eq.Scalar_out", CTYPE_B, [&]() { @@ -80,6 +83,9 @@ Tensor& eq_scalar_out( ScalarType b_type = utils::get_scalar_dtype(b); ScalarType out_type = out.scalar_type(); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out); + ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "eq.Scalar_out", CTYPE_A, [&]() { ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "eq.Scalar_out", CTYPE_B, [&]() { using CTYPE_IN = diff --git a/kernels/portable/cpu/op_expand_copy.cpp b/kernels/portable/cpu/op_expand_copy.cpp index 5f0d19adc59..67e9149ff9f 100644 --- a/kernels/portable/cpu/op_expand_copy.cpp +++ b/kernels/portable/cpu/op_expand_copy.cpp @@ -85,6 +85,10 @@ Tensor& expand_copy_out( InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(self, out), InvalidArgument, out); + ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(self), InvalidArgument, out); + // Holds the result of expand_sizes converted to repeat sizes int64_t repeats[kTensorDimensionLimit]; const auto repeats_size{map_expand_to_repeats( diff --git a/kernels/portable/cpu/op_fill.cpp b/kernels/portable/cpu/op_fill.cpp index 60ebd5de5ab..d908c53e0c7 100644 --- a/kernels/portable/cpu/op_fill.cpp +++ b/kernels/portable/cpu/op_fill.cpp @@ -31,6 +31,9 @@ Tensor& fill_scalar_out( ET_KERNEL_CHECK(ctx, a_type == out_type, InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out); + // Resize for dynamic shape ET_KERNEL_CHECK_MSG( ctx, @@ -67,6 +70,9 @@ Tensor& fill_tensor_out( // Assert `b` must be a scalar tensor. ET_KERNEL_CHECK(ctx, tensor_is_scalar(b), InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out); + ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); ScalarType out_type = out.scalar_type(); diff --git a/kernels/portable/cpu/op_flip.cpp b/kernels/portable/cpu/op_flip.cpp index 10c52439d11..c88585f88a5 100644 --- a/kernels/portable/cpu/op_flip.cpp +++ b/kernels/portable/cpu/op_flip.cpp @@ -45,6 +45,9 @@ flip_out(RuntimeContext& ctx, const Tensor& in, IntArrayRef dims, Tensor& out) { ET_KERNEL_CHECK( ctx, resize_tensor(out, in.sizes()) == Error::Ok, InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out); + ET_KERNEL_CHECK(ctx, check_flip_args(in, dims, out), InvalidArgument, out); bool flip_dim_data[kTensorDimensionLimit]; diff --git a/kernels/portable/cpu/op_floor_divide.cpp b/kernels/portable/cpu/op_floor_divide.cpp index 0514df0ca25..88c6d5e7e79 100644 --- a/kernels/portable/cpu/op_floor_divide.cpp +++ b/kernels/portable/cpu/op_floor_divide.cpp @@ -87,6 +87,9 @@ Tensor& floor_divide_out( ET_KERNEL_CHECK(ctx, tensor_is_real_type(out), InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out); + ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); ScalarType common_type = promoteTypes(a_type, b_type); diff --git a/kernels/portable/cpu/op_fmod.cpp b/kernels/portable/cpu/op_fmod.cpp index 42f83731199..6743eb8cf8a 100644 --- a/kernels/portable/cpu/op_fmod.cpp +++ b/kernels/portable/cpu/op_fmod.cpp @@ -85,6 +85,9 @@ Tensor& fmod_Tensor_out( InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out); + ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); ScalarType common_type = promoteTypes(a_type, b_type); @@ -139,6 +142,9 @@ Tensor& fmod_Scalar_out( out, "Failed to resize output tensor."); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out); + ScalarType a_type = a.scalar_type(); ScalarType b_type = utils::get_scalar_dtype(b); ScalarType common_type = utils::promote_type_with_scalar(a_type, b); diff --git a/kernels/portable/cpu/op_full_like.cpp b/kernels/portable/cpu/op_full_like.cpp index 880e02efe66..0ce8923ccdf 100644 --- a/kernels/portable/cpu/op_full_like.cpp +++ b/kernels/portable/cpu/op_full_like.cpp @@ -34,6 +34,11 @@ Tensor& full_like_out( "memory_format must be contiguous"); } + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out); + + ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out); + // Resize for dynamic shape ET_KERNEL_CHECK_MSG( ctx, diff --git a/kernels/portable/cpu/op_ge.cpp b/kernels/portable/cpu/op_ge.cpp index 88e056e7362..d89c45cca45 100644 --- a/kernels/portable/cpu/op_ge.cpp +++ b/kernels/portable/cpu/op_ge.cpp @@ -31,6 +31,9 @@ Tensor& ge_tensor_out( InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out); + ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); ScalarType out_type = out.scalar_type(); @@ -77,6 +80,9 @@ Tensor& ge_scalar_out( out, "Failed to resize output tensor."); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out); + ScalarType a_type = a.scalar_type(); ScalarType b_type = utils::get_scalar_dtype(b); ScalarType common_type = utils::promote_type_with_scalar(a_type, b); diff --git a/kernels/portable/cpu/op_gelu.cpp b/kernels/portable/cpu/op_gelu.cpp index 0432c028141..4fadd2aff58 100644 --- a/kernels/portable/cpu/op_gelu.cpp +++ b/kernels/portable/cpu/op_gelu.cpp @@ -34,6 +34,9 @@ Tensor& gelu_out( ET_KERNEL_CHECK( ctx, resize_tensor(out, in.sizes()) == Error::Ok, InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out); + ET_SWITCH_FLOAT_TYPES(in.scalar_type(), ctx, "gelu.out", CTYPE, [&]() { if (approximate == "tanh") { apply_unary_map_fn( diff --git a/kernels/portable/cpu/op_glu.cpp b/kernels/portable/cpu/op_glu.cpp index 5a075ff35ca..4ecdbbc1caf 100644 --- a/kernels/portable/cpu/op_glu.cpp +++ b/kernels/portable/cpu/op_glu.cpp @@ -144,6 +144,9 @@ glu_out(RuntimeContext& ctx, const Tensor& self, int64_t dim, Tensor& out) { ET_KERNEL_CHECK( ctx, resize_glu_out(self, dim, out) == Error::Ok, InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(self, out), InvalidArgument, out); + ET_KERNEL_CHECK(ctx, check_glu_args(self, dim, out), InvalidArgument, out); const size_t non_negative_dim = dim < 0 ? dim + self.dim() : dim; diff --git a/kernels/portable/cpu/op_gt.cpp b/kernels/portable/cpu/op_gt.cpp index 56d8657c9b5..4c5df64cb69 100644 --- a/kernels/portable/cpu/op_gt.cpp +++ b/kernels/portable/cpu/op_gt.cpp @@ -31,6 +31,9 @@ Tensor& gt_tensor_out( InvalidArgument, out); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out); + ScalarType a_type = a.scalar_type(); ScalarType b_type = b.scalar_type(); ScalarType out_type = out.scalar_type(); @@ -77,6 +80,9 @@ Tensor& gt_scalar_out( out, "Failed to resize output tensor."); + ET_KERNEL_CHECK( + ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out); + ScalarType a_type = a.scalar_type(); ScalarType b_type = utils::get_scalar_dtype(b); ScalarType common_type = utils::promote_type_with_scalar(a_type, b);