diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 8b3e430952e63..829b27a405970 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -69,10 +69,10 @@ import numpy as np import paddle import paddle.nn.functional as F - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = F.sigmoid(x) print(out.numpy()) # [0.40131234 0.450166 0.52497919 0.57444252] @@ -86,10 +86,10 @@ import numpy as np import paddle import paddle.nn.functional as F - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = F.logsigmoid(x) print(out.numpy()) # [-0.91301525 -0.79813887 -0.64439666 -0.55435524] @@ -102,10 +102,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.exp(x) print(out.numpy()) # [0.67032005 0.81873075 1.10517092 1.34985881] @@ -118,10 +118,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.tanh(x) print(out.numpy()) # [-0.37994896 -0.19737532 0.09966799 0.29131261] @@ -134,10 +134,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.atan(x) print(out.numpy()) # [-0.38050638 -0.19739556 0.09966865 0.29145679] @@ -151,10 +151,10 @@ import numpy as np import paddle import paddle.nn.functional as F - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = F.tanh_shrink(x) print(out.numpy()) # [-0.02005104 -0.00262468 0.00033201 0.00868739] @@ -167,10 +167,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([0.1, 0.2, 0.3, 0.4]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.sqrt(x) print(out.numpy()) # [0.31622777 0.4472136 0.54772256 0.63245553] @@ -183,10 +183,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([0.1, 0.2, 0.3, 0.4]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.rsqrt(x) print(out.numpy()) # [3.16227766 2.23606798 1.82574186 1.58113883] @@ -199,10 +199,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.abs(x) print(out.numpy()) # [0.4 0.2 0.1 0.3] @@ -215,10 +215,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.ceil(x) print(out.numpy()) # [-0. -0. 1. 1.] @@ -231,10 +231,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.floor(x) print(out.numpy()) # [-1. -1. 0. 0.] @@ -247,10 +247,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.cos(x) print(out.numpy()) # [0.92106099 0.98006658 0.99500417 0.95533649] @@ -263,10 +263,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.acos(x) print(out.numpy()) # [1.98231317 1.77215425 1.47062891 1.26610367] @@ -279,10 +279,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.sin(x) print(out.numpy()) # [-0.38941834 -0.19866933 0.09983342 0.29552021] @@ -295,10 +295,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.asin(x) print(out.numpy()) # [-0.41151685 -0.20135792 0.10016742 0.30469265] @@ -311,10 +311,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.cosh(x) print(out.numpy()) # [1.08107237 1.02006676 1.00500417 1.04533851] @@ -327,10 +327,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.sinh(x) print(out.numpy()) # [-0.41075233 -0.201336 0.10016675 0.30452029] @@ -343,10 +343,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.5, -0.2, 0.6, 1.5]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.round(x) print(out.numpy()) # [-1. -0. 1. 2.] @@ -359,10 +359,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.reciprocal(x) print(out.numpy()) # [-2.5 -5. 10. 3.33333333] @@ -375,10 +375,10 @@ import numpy as np import paddle - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = paddle.square(x) print(out.numpy()) # [0.16 0.04 0.01 0.09] @@ -392,10 +392,10 @@ import numpy as np import paddle import paddle.nn.functional as F - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = F.softplus(x) print(out.numpy()) # [0.51301525 0.59813887 0.74439666 0.85435524] @@ -409,10 +409,10 @@ import numpy as np import paddle import paddle.nn.functional as F - paddle.enable_imperative() + paddle.disable_static() x_data = np.array([-0.4, -0.2, 0.1, 0.3]) - x = paddle.imperative.to_variable(x_data) + x = paddle.to_variable(x_data) out = F.softsign(x) print(out.numpy()) # [-0.28571429 -0.16666667 0.09090909 0.23076923] diff --git a/python/paddle/fluid/tests/unittests/test_addmm_op.py b/python/paddle/fluid/tests/unittests/test_addmm_op.py index 0bcdc45a2ccd0..6e66c0c0029ac 100644 --- a/python/paddle/fluid/tests/unittests/test_addmm_op.py +++ b/python/paddle/fluid/tests/unittests/test_addmm_op.py @@ -240,13 +240,13 @@ def test_api_error(self): data_y = np.ones((2, 2)).astype(np.float32) data_input = np.ones((2, 2)).astype(np.float32) - paddle.enable_imperative() + paddle.disable_static() def test_error1(): data_x_wrong = np.ones((2, 3)).astype(np.float32) - x = paddle.imperative.to_variable(data_x_wrong) - y = paddle.imperative.to_variable(data_y) - input = paddle.imperative.to_variable(data_input) + x = paddle.to_variable(data_x_wrong) + y = paddle.to_variable(data_y) + input = paddle.to_variable(data_input) out = paddle.tensor.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 ) self.assertRaises(ValueError, test_error1) ''' diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index e625e5496c7e3..0875fb4c219a0 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -561,9 +561,9 @@ def tril(x, diagonal=0, name=None): # [ 5, 6, 7, 8], # [ 9, 10, 11, 12]]) - paddle.enable_imperative() + paddle.disable_static() - x = paddle.imperative.to_variable(data) + x = paddle.to_variable(data) tril1 = paddle.tensor.tril(x) # array([[ 1, 0, 0, 0], @@ -632,10 +632,10 @@ def triu(x, diagonal=0, name=None): # [ 5, 6, 7, 8], # [ 9, 10, 11, 12]]) - paddle.enable_imperative() + paddle.disable_static() # example 1, default diagonal - x = paddle.imperative.to_variable(data) + x = paddle.to_variable(data) triu1 = paddle.tensor.triu(x) # array([[ 1, 2, 3, 4], # [ 0, 6, 7, 8], diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 8744e02b9f7c5..306e683f8ae37 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -735,10 +735,10 @@ def bmm(x, y, name=None): input1 = np.array([[[1.0, 1.0, 1.0],[2.0, 2.0, 2.0]],[[3.0, 3.0, 3.0],[4.0, 4.0, 4.0]]]) input2 = np.array([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) - paddle.enable_imperative() + paddle.disable_static() - x = paddle.imperative.to_variable(input1) - y = paddle.imperative.to_variable(input2) + x = paddle.to_variable(input1) + y = paddle.to_variable(input2) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index efd384df4e7c0..bffdf15864f01 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -457,10 +457,10 @@ def stack(x, axis=0, name=None): data2 = np.array([[3.0, 4.0]]) data3 = np.array([[5.0, 6.0]]) - paddle.enable_imperative() - x1 = paddle.imperative.to_variable(data1) - x2 = paddle.imperative.to_variable(data2) - x3 = paddle.imperative.to_variable(data3) + paddle.disable_static() + x1 = paddle.to_variable(data1) + x2 = paddle.to_variable(data2) + x3 = paddle.to_variable(data3) out = paddle.stack([x1, x2, x3], axis=0) print(out.shape) # [3, 1, 2] @@ -637,7 +637,7 @@ def unsqueeze(x, axis, name=None): import paddle - paddle.enable_imperative() + paddle.disable_static() x = paddle.rand([5, 10]) print(x.shape) # [5, 10] diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index e21aeb039f41f..60994a5165942 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -883,11 +883,11 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): data_y = np.ones((2, 2)).astype(np.float32) data_input = np.ones((2, 2)).astype(np.float32) - paddle.enable_imperative() + paddle.disable_static() - x = paddle.imperative.to_variable(data_x) - y = paddle.imperative.to_variable(data_y) - input = paddle.imperative.to_variable(data_input) + x = paddle.to_variable(data_x) + y = paddle.to_variable(data_y) + input = paddle.to_variable(data_input) out = paddle.tensor.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 ) @@ -1561,10 +1561,10 @@ def cumsum(x, axis=None, dtype=None, name=None): .. code-block:: python import paddle - from paddle.imperative import to_variable + from paddle import to_variable import numpy as np - paddle.enable_imperative() + paddle.disable_static() data_np = np.arange(12).reshape(3, 4) data = to_variable(data_np)