Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions mindnlp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@
except:
disable_multi_thread = None
# for different ascend devices
context.set_context(device_target='CPU')

if platform.system().lower() == 'linux':
SOC = MSContext.get_instance().get_ascend_soc_version()
if ('910b' not in SOC and '310' not in SOC) or version.parse(mindspore.__version__) < version.parse('2.4.0'):
Expand Down
18 changes: 0 additions & 18 deletions mindnlp/accelerate/__init__.py
Original file line number Diff line number Diff line change
@@ -1,18 +0,0 @@
import sys
import accelerate
from transformers.utils import _LazyModule

_import_structure = {
"utils": [
'DistributedType',

]
}

sys.modules[__name__] = _LazyModule(
'accelerate',
accelerate.__file__,
_import_structure,
module_spec=__spec__,
extra_objects={"__version__": accelerate.__version__},
)
File renamed without changes.
Empty file.
12 changes: 12 additions & 0 deletions mindnlp/core/_bind.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@

DEFAULT_DTYPE, DEFAULT_DEVICE = float32, device_('cpu')

DEVICE_IN_CONTEXT = None

AUTO_CAST_DTYE = {
'cuda': float16,
'cpu': bfloat16,
Expand Down Expand Up @@ -41,6 +43,16 @@ def get_default_device():
"""get default dtype"""
return DEFAULT_DEVICE

def set_device_in_context(device):
global DEVICE_IN_CONTEXT
DEVICE_IN_CONTEXT = device

def get_device_in_context():
"""get default dtype"""
if DEVICE_IN_CONTEXT is None:
return get_default_device()
return DEVICE_IN_CONTEXT

bits_map = {

}
Expand Down
159 changes: 159 additions & 0 deletions mindnlp/core/_prims/ascend.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
import numbers
from mindspore import ops
from mindspore.ops.auto_generate import gen_ops_prim
from mindspore.ops.auto_generate import pyboost_inner_prim
from mindspore._c_expression import _empty_instance

from mindnlp import core
from mindnlp.core._C import default_generator

op_list = list(filter(lambda s: s.endswith("_op"), dir(gen_ops_prim)))

__all__ = []

for op_name in op_list:
func_name = op_name.replace('_op', '')
__all__.append(func_name)
globals()[func_name] = getattr(gen_ops_prim, op_name).__class__().set_device('Ascend')

def empty(*args, **kwargs):
return _empty_instance(*args, **kwargs, device='Ascend')

def reduce_any(input, dim, keepdim):
if dim is None:
dim = ()
return pyboost_inner_prim.reduce_any_impl(input, dim, keepdim)

__all__.append('reduce_any')

def reduce_all(input, dim, keepdim):
if dim is None:
dim = ()
return pyboost_inner_prim.reduce_all_impl(input, dim, keepdim)

__all__.append('reduce_all')

broadcast_to_op = ops.Primitive('BroadcastTo').set_device('Ascend')
def broadcast_to(*args):
return broadcast_to_op(*args)

__all__.append('broadcast_to')

cast_op = ops.Cast().set_device('Ascend')
def cast(*args):
return cast_op(*args)

__all__.append('cast')

zeros_op = ops.Zeros().set_device('Ascend')
def zeros(*args):
return zeros_op(*args)

__all__.append('zeros')

def softmax(*args):
return pyboost_inner_prim.softmax_impl(*args)

__all__.append('softmax')

def dropout_ext(input, p):
seed, offset = default_generator._step(12) # pylint: disable=protected-access
return gen_ops_prim.dropout_ext_op(input, p, seed, offset)

def squeeze(*args):
return pyboost_inner_prim.squeeze_impl(*args)

__all__.append('squeeze')

ones_op = ops.Ones().set_device('Ascend')
def ones(*args):
return ones_op(*args)

__all__.append('ones')

def nllloss(*args):
return pyboost_inner_prim.nllloss_impl(*args)

__all__.append('nllloss')

def repeat_elements(*args):
return ops.repeat_elements(*args)

__all__.append('repeat_elements')

def concat(*args):
return pyboost_inner_prim.concat_impl(*args)

__all__.append('concat')

def multinomial_ext(input, num_samples, replacement, generator):
seed, offset = generator._step(12) # pylint: disable=protected-access
return gen_ops_prim.multinomial_ext_op(input, num_samples, replacement, seed, offset)

def isclose(*args):
return pyboost_inner_prim.isclose_impl(*args)

__all__.append('isclose')

tile_op = ops.Primitive('Tile').set_device('Ascend')
def tile(*args):
return tile_op(*args)

__all__.append('tile')

def pad_v3(input_x, padding, mode='constant', value=None):
pad_op = ops.PadV3(mode=mode, paddings_contiguous=True).set_device('CPU')
if isinstance(value, (float, int)):
value = core.tensor(value, dtype=input_x.dtype)
return pad_op(input_x, padding, value)

__all__.append('pad_v3')

def inplace_uniform(input, from_, to_, generator_):
seed, offset = generator_._step(12)
return gen_ops_prim.inplace_uniform_op(input, from_, to_, seed, offset)

def binary_cross_entropy_with_logits(*args):
return pyboost_inner_prim.binary_cross_entropy_with_logits_impl(*args)

__all__.append('binary_cross_entropy_with_logits')

def gather(input_params, input_indices, axis, batch_dims=0):
return ops.gather(input_params, input_indices, axis, batch_dims)

__all__.append('gather')

def randint(low, high, shape, dtype, generator):
seed, offset = generator._step(12) # pylint: disable=protected-access
return gen_ops_prim.randint_op(low, high, shape, seed, offset, dtype)

def stack_ext(*args):
return pyboost_inner_prim.stack_ext_impl(*args)

__all__.append('stack_ext')

def argmax_with_value(*args):
return pyboost_inner_prim.argmax_with_value_impl(*args)

__all__.append('argmax_with_value')

right_shift_op = ops.RightShift().set_device('Ascend')
def right_shift(input, other):
if isinstance(other, numbers.Number):
other = core.Tensor(other, input.dtype)
return right_shift_op(input, other)

tensor_mul = ops.Mul().set_device('Ascend')
tensor_pow = ops.Pow().set_device('Ascend')
def ldexp(input, other):
out = tensor_mul(input, tensor_pow(2.0, other))
return out

__all__.append('ldexp')

def reverse_v2(input, dims):
if isinstance(dims, int):
dims = (dims,)
return pyboost_inner_prim.reverse_v2_impl(input, dims)

__all__.append('reverse_v2')
7 changes: 0 additions & 7 deletions mindnlp/core/_prims/ascend/__init__.py

This file was deleted.

87 changes: 0 additions & 87 deletions mindnlp/core/_prims/ascend/aclop.py

This file was deleted.

Loading
Loading