Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion mindnlp/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
from .amp import autocast, GradScaler

from . import profiler, cuda, optim, amp, compiler, jit, version, __future__, overrides, \
return_types, linalg, fx, backends, testing, nn, fft
return_types, linalg, fx, backends, testing, nn, fft, _jit_internal, utils

from ._lowrank import svd_lowrank
from .random import get_rng_state, initial_seed, manual_seed, seed, set_rng_state
Expand Down
3 changes: 3 additions & 0 deletions mindnlp/core/_dynamo/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
def is_compile_supported(device_type):
return False

15 changes: 15 additions & 0 deletions mindnlp/core/_jit_internal.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,18 @@
from typing import ( # noqa: UP035, F401 # (Dict, List, Tuple) imported by torch.jit.annotations
Any,
Callable,
Dict,
Final,
ForwardRef,
get_args,
get_origin,
List,
Optional,
Tuple,
TypeVar,
Union,
)

class FunctionModifiers:
"""
Used to denote the behavior of a function in TorchScript. See export() and
Expand Down
5 changes: 5 additions & 0 deletions mindnlp/core/_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -463,6 +463,11 @@ def clamp_min(self, value):
Tensor.unsqueeze_ = ops.inplace_unsqueeze
StubTensor.unsqueeze_ = ops.inplace_unsqueeze

def pin_memory(self, *args, **kwargs):
return self

Tensor.pin_memory = pin_memory
StubTensor.pin_memory = pin_memory

def _rebuild_from_type_v2(func, new_type, args, state):
ret = func(*args)
Expand Down
2 changes: 1 addition & 1 deletion mindnlp/core/jit/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# _overload,
# _overload_method,
# export,
# Final,
Final,
# Future,
# ignore,
# is_scripting,
Expand Down
4 changes: 2 additions & 2 deletions mindnlp/core/jit/annotations.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from core._jit_internal import ( # type: ignore[attr-defined]
from .._jit_internal import ( # type: ignore[attr-defined]
# _Await,
# _qualified_name,
# Any,
Expand All @@ -15,7 +15,7 @@
# is_optional,
# is_tuple,
# is_union,
# List,
List,
# Optional,
# Tuple,
# Union,
Expand Down
4 changes: 3 additions & 1 deletion mindnlp/core/linalg/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from collections import namedtuple
from mindspore import ops
from mindspore import ops, mint
from mindspore.ops._primitive_cache import _get_cache_prim

from mindnlp import core
Expand All @@ -21,3 +21,5 @@ def cholesky_ex(A, *, upper=False, check_errors=False, out=None):
return linalg_cholesky_ex(out, info)


def norm(A, ord=None, dim=None, keepdim=False, *, out=None, dtype=None):
return mint.norm(A, ord, dim, keepdim, dtype=dtype)
7 changes: 6 additions & 1 deletion mindnlp/core/nn/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,11 @@ def avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, coun

return ops.avg_pool2d(input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)

def adaptive_avg_pool1d(input, output_size):
if use_pyboost():
return mint.nn.functional.adaptive_avg_pool1d(input, output_size)
return ops.adaptive_avg_pool1d(input, output_size)

def adaptive_avg_pool2d(input, output_size):
if use_pyboost():
return mint.nn.functional.adaptive_avg_pool2d(input, output_size)
Expand Down Expand Up @@ -1206,7 +1211,7 @@ def _none_or_dtype(input: Optional[core.Tensor]) -> Optional[int]:
raise RuntimeError("input to _none_or_dtype() must be None or core.Tensor")

def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
if use_pyboost():
if use_pyboost() and not ON_A1:
return mint.nn.functional.unfold(input, kernel_size, dilation, padding, stride)
return ops.unfold(input, kernel_size, dilation, padding, stride)

Expand Down
2 changes: 1 addition & 1 deletion mindnlp/core/nn/modules/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from .activation import *
from .conv import Conv3d, Conv2d, Conv1d, ConvTranspose2d, ConvTranspose1d
from .padding import ZeroPad2d, ConstantPad2d, ConstantPad1d, ConstantPad3d
from .batchnorm import BatchNorm2d, BatchNorm1d
from .batchnorm import BatchNorm2d, BatchNorm1d, SyncBatchNorm
from .pooling import AdaptiveAvgPool2d, AvgPool1d, MaxPool2d, MaxPool1d, AdaptiveAvgPool1d, AvgPool2d
from .flatten import Unflatten, Flatten
from .rnn_cell import RNNCell, GRUCell, LSTMCell
Expand Down
Loading
Loading