Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions mindtorch/_apis/npu.py
Original file line number Diff line number Diff line change
Expand Up @@ -1887,7 +1887,7 @@ def relu6(input):
def col2im(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
if use_pyboost():
return pyboost.col2im_ext_op(input, output_size, kernel_size, dilation, padding, stride)
return legacy.col2im(input, output_size, kernel_size, dilation, padding, stride)
return legacy.col2_im(input, mindspore.Tensor(output_size), kernel_size, dilation, padding, stride)

def flash_attention_score(query, key, value, real_shift, drop_mask, padding_mask, attn_mask, prefix, actual_seq_qlen, actual_seq_kvlen, head_num, keep_prob, scale_value, pre_tokens, next_tokens, inner_precise, input_layout, sparse_mode):
if use_pyboost():
Expand Down Expand Up @@ -1992,4 +1992,10 @@ def logaddexp(input, other):
return y

def reflection_pad_1d(input, padding):
return pyboost.reflection_pad_1d_op(input, padding)
return pyboost.reflection_pad_1d_op(input, padding)

def replication_pad_1d(input, padding):
return pyboost.reflection_pad_1d_op(input, padding)

def hardtanh(input, min_val, max_val):
return pyboost.hardtanh_op(input, min_val, max_val)
5 changes: 4 additions & 1 deletion mindtorch/nn/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -1897,4 +1897,7 @@ def make_causal_mask(
)

def rotary_position_embedding(x, cos, sin, mode=0):
return ops.rotary_position_embedding(x, cos, sin, mode)
return ops.rotary_position_embedding(x, cos, sin, mode)

def hardtanh(input, min_val=-1.0, max_val=1.0):
return execute('hardtanh', input, min_val, max_val)
2 changes: 1 addition & 1 deletion mindtorch/nn/modules/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.hardtanh(input, self.min_val, self.max_val, self.inplace)
return F.hardtanh(input, self.min_val, self.max_val)

def extra_repr(self) -> str:
"""
Expand Down
2 changes: 1 addition & 1 deletion mindtorch/nn/modules/dropout.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.dropout2d(input, self.p, self.training, self.inplace)
return F.dropout2d(input, self.p, self.training)


class Dropout3d(_DropoutNd):
Expand Down