From b7294d717606615ef9c4fd3be9e1e18284f68962 Mon Sep 17 00:00:00 2001 From: lvyufeng Date: Thu, 30 Oct 2025 21:17:09 +0800 Subject: [PATCH] fix m,n class on OrangePi --- mindtorch/_apis/npu.py | 10 ++++++++-- mindtorch/nn/functional.py | 5 ++++- mindtorch/nn/modules/activation.py | 2 +- mindtorch/nn/modules/dropout.py | 2 +- 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/mindtorch/_apis/npu.py b/mindtorch/_apis/npu.py index 4473ded28..6ce735856 100644 --- a/mindtorch/_apis/npu.py +++ b/mindtorch/_apis/npu.py @@ -1887,7 +1887,7 @@ def relu6(input): def col2im(input, output_size, kernel_size, dilation=1, padding=0, stride=1): if use_pyboost(): return pyboost.col2im_ext_op(input, output_size, kernel_size, dilation, padding, stride) - return legacy.col2im(input, output_size, kernel_size, dilation, padding, stride) + return legacy.col2_im(input, mindspore.Tensor(output_size), kernel_size, dilation, padding, stride) def flash_attention_score(query, key, value, real_shift, drop_mask, padding_mask, attn_mask, prefix, actual_seq_qlen, actual_seq_kvlen, head_num, keep_prob, scale_value, pre_tokens, next_tokens, inner_precise, input_layout, sparse_mode): if use_pyboost(): @@ -1992,4 +1992,10 @@ def logaddexp(input, other): return y def reflection_pad_1d(input, padding): - return pyboost.reflection_pad_1d_op(input, padding) \ No newline at end of file + return pyboost.reflection_pad_1d_op(input, padding) + +def replication_pad_1d(input, padding): + return pyboost.reflection_pad_1d_op(input, padding) + +def hardtanh(input, min_val, max_val): + return pyboost.hardtanh_op(input, min_val, max_val) \ No newline at end of file diff --git a/mindtorch/nn/functional.py b/mindtorch/nn/functional.py index 914550ee3..41a235228 100644 --- a/mindtorch/nn/functional.py +++ b/mindtorch/nn/functional.py @@ -1897,4 +1897,7 @@ def make_causal_mask( ) def rotary_position_embedding(x, cos, sin, mode=0): - return ops.rotary_position_embedding(x, cos, sin, mode) \ No newline at end of file + return ops.rotary_position_embedding(x, cos, sin, mode) + +def hardtanh(input, min_val=-1.0, max_val=1.0): + return execute('hardtanh', input, min_val, max_val) \ No newline at end of file diff --git a/mindtorch/nn/modules/activation.py b/mindtorch/nn/modules/activation.py index 3602e3946..dcdbce877 100644 --- a/mindtorch/nn/modules/activation.py +++ b/mindtorch/nn/modules/activation.py @@ -289,7 +289,7 @@ def forward(self, input: Tensor) -> Tensor: """ Runs the forward pass. """ - return F.hardtanh(input, self.min_val, self.max_val, self.inplace) + return F.hardtanh(input, self.min_val, self.max_val) def extra_repr(self) -> str: """ diff --git a/mindtorch/nn/modules/dropout.py b/mindtorch/nn/modules/dropout.py index 8cafc9a1d..57c0facfb 100644 --- a/mindtorch/nn/modules/dropout.py +++ b/mindtorch/nn/modules/dropout.py @@ -173,7 +173,7 @@ def forward(self, input: Tensor) -> Tensor: """ Runs the forward pass. """ - return F.dropout2d(input, self.p, self.training, self.inplace) + return F.dropout2d(input, self.p, self.training) class Dropout3d(_DropoutNd):