From 8e02ef72f7e4cafb94a0c118915c139dfd44f095 Mon Sep 17 00:00:00 2001 From: ForFishes <2282912238@qq.com> Date: Mon, 29 Jun 2020 19:10:24 +0800 Subject: [PATCH 1/6] add cholesky, test=develop --- python/paddle/tensor/linalg.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index f0e1c78f11750..d1abae18dcb02 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -648,7 +648,7 @@ def cross(input, other, dim=None): return out -def cholesky(x, upper=False): +def cholesky(x, upper=False, name=None): """ :alias_main: paddle.cholesky :alias: paddle.cholesky,paddle.tensor.cholesky,paddle.tensor.linalg.cholesky @@ -682,14 +682,17 @@ def cholesky(x, upper=False): with fluid.dygraph.guard(): a = np.random.rand(3, 3) + print(a) + # [[0.10548146 0.44426157 0.85944377] + # [0.84469568 0.72855948 0.44987977] + # [0.34449094 0.89552855 0.79255662]] a_t = np.transpose(a, [1, 0]) - x = np.matmul(a, a_t) + 1e-03 + x = np.matmul(a, a_t) + 1e-03 * np.eye(3) x = fluid.dygraph.to_variable(x) out = paddle.cholesky(x, upper=False) - print(out.numpy()) - # [[1.190523 0. 0. ] - # [0.9906703 0.27676893 0. ] - # [1.25450498 0.05600871 0.06400121]] + # [[0.97372392 0. 0. ] + # [0.82098946 0.87958958 0. ] + # [1.1454419 0.40882168 0.26574251]] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') From 3c1e7c396e3ce5f8c9200ae854f68d59a4e56a35 Mon Sep 17 00:00:00 2001 From: ForFishes <2282912238@qq.com> Date: Tue, 30 Jun 2020 16:13:15 +0800 Subject: [PATCH 2/6] add inverse, dot, test=develop --- python/paddle/tensor/linalg.py | 4 +++- python/paddle/tensor/math.py | 29 +++++++++++------------------ 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index d1abae18dcb02..9741301326395 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -478,9 +478,11 @@ def dot(x, y, name=None): with fluid.dygraph.guard(): x = fluid.dygraph.to_variable(np.random.uniform(0.1, 1, [10]).astype(np.float32)) + # [0.94936 0.119406 0.696538 0.611788 0.864026 0.858593 0.198292 0.782601 0.370953 0.97247] y = fluid.dygraph.to_variable(np.random.uniform(1, 3, [10]).astype(np.float32)) + # [2.94794 1.24812 1.91361 2.56719 2.92583 1.83007 1.23439 2.70771 2.04935 2.66508] z = paddle.dot(x, y) - print(z.numpy()) + # [15.666201] """ op_type = 'dot' diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 7cc19186d0068..40dc42a1867c4 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1155,7 +1155,7 @@ def logsumexp(x, dim=None, keepdim=False, out=None, name=None): return layers.log(sum_out, name) -def inverse(input, out=None, name=None): +def inverse(x, name=None): """ :alias_main: paddle.inverse :alias: paddle.inverse,paddle.tensor.inverse,paddle.tensor.math.inverse @@ -1165,20 +1165,17 @@ def inverse(input, out=None, name=None): (2-D Tensor) or batches of square matrices. Args: - input (Variable): The input Variable which holds a Tensor. The last two + x (Variable): The input Variable which holds a Tensor. The last two dimensions should be equal. When the number of dimensions is greater than 2, it is treated as batches of square matrix. The data type can be float32 and float64. - out (Variable, optional): Optional output which can be any created - Variable that meets the requirements to store the result of operation. - If out is None, a new Varibale will be create to store the result. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: - Variable: A Tensor holds the inverse of input. The shape and data type - is the same as input. + Variable: A Tensor holds the inverse of x. The shape and data type + is the same as x. Examples: .. code-block:: python @@ -1208,25 +1205,21 @@ def inverse(input, out=None, name=None): if in_dygraph_mode(): return core.ops.inverse(input) - def _check_input(input): - check_variable_and_dtype(input, 'input', + def _check_input(x): + check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'inverse') - if len(input.shape) < 2: + if len(x.shape) < 2: raise ValueError( "The input of inverse is expected to be a Tensor whose number " "of dimensions is no less than 2. But reviced: %d, " - "input's shape: %s." % (len(input.shape), input.shape)) - - if out is not None: - check_variable_and_dtype(out, 'out', input.dtype, 'inverse') + "x's shape: %s." % (len(x.shape), x.shape)) - _check_input(input) + _check_input(x) helper = LayerHelper('inverse', **locals()) - if out is None: - out = helper.create_variable_for_type_inference(dtype=input.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( - type='inverse', inputs={'Input': [input] }, outputs={'Output': [out]}) + type='inverse', inputs={'Input': [x] }, outputs={'Output': [out]}) return out From 840b0cb0b78b1505cbdbe1fdf3ad2e03360b71e4 Mon Sep 17 00:00:00 2001 From: ForFishes <2282912238@qq.com> Date: Wed, 1 Jul 2020 16:25:21 +0800 Subject: [PATCH 3/6] fix inverse doc, test=develop --- python/paddle/fluid/tests/unittests/test_inverse_op.py | 6 +----- python/paddle/tensor/math.py | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_inverse_op.py b/python/paddle/fluid/tests/unittests/test_inverse_op.py index 13cb2b1f8b116..6f1319639bbe1 100644 --- a/python/paddle/fluid/tests/unittests/test_inverse_op.py +++ b/python/paddle/fluid/tests/unittests/test_inverse_op.py @@ -89,11 +89,7 @@ def setUp(self): def check_static_result(self, place, with_out=False): with fluid.program_guard(fluid.Program(), fluid.Program()): input = fluid.data(name="input", shape=[4, 4], dtype="float64") - if with_out: - out = fluid.data(name="output", shape=[4, 4], dtype="float64") - else: - out = None - result = paddle.inverse(input=input, out=out) + result = paddle.inverse(x=input) input_np = np.random.random([4, 4]).astype("float64") result_np = np.linalg.inv(input_np) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 40dc42a1867c4..2e09de2a2c44e 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1203,7 +1203,7 @@ def inverse(x, name=None): print(inv) # [[0.5, 0], [0, 0.5]] """ if in_dygraph_mode(): - return core.ops.inverse(input) + return core.ops.inverse(x) def _check_input(x): check_variable_and_dtype(x, 'x', From 44d2e639ad88b4637f46181bca24b7c2653e0c7e Mon Sep 17 00:00:00 2001 From: ForFishes <2282912238@qq.com> Date: Wed, 15 Jul 2020 21:20:24 +0800 Subject: [PATCH 4/6] fix the dynamic mode, test=develop --- python/paddle/tensor/linalg.py | 44 ++++++++++++++++------------------ python/paddle/tensor/math.py | 20 ++++------------ 2 files changed, 24 insertions(+), 40 deletions(-) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 9741301326395..65a9b7526f05e 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -461,12 +461,12 @@ def dot(x, y, name=None): Only support 1-d Tensor(vector). Parameters: - x(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64`` - y(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64`` + x(Variable): 1-D ``Tensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64`` + y(Variable): 1-D ``Tensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: - Variable: the calculated result Tensor/LoDTensor. + Variable: the calculated result Tensor. Examples: @@ -476,13 +476,13 @@ def dot(x, y, name=None): import paddle.fluid as fluid import numpy as np - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(np.random.uniform(0.1, 1, [10]).astype(np.float32)) - # [0.94936 0.119406 0.696538 0.611788 0.864026 0.858593 0.198292 0.782601 0.370953 0.97247] - y = fluid.dygraph.to_variable(np.random.uniform(1, 3, [10]).astype(np.float32)) - # [2.94794 1.24812 1.91361 2.56719 2.92583 1.83007 1.23439 2.70771 2.04935 2.66508] - z = paddle.dot(x, y) - # [15.666201] + paddle.enable_imperative() + x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) + y_data = np.random.uniform(1, 3, [10]).astype(np.float32) + x = paddle.imperative.to_variable(x_data) + y = paddle.imperative.to_variable(y_data) + z = paddle.dot(x, y) + print(z.numpy()) """ op_type = 'dot' @@ -682,20 +682,16 @@ def cholesky(x, upper=False, name=None): import paddle.fluid as fluid import numpy as np - with fluid.dygraph.guard(): - a = np.random.rand(3, 3) - print(a) - # [[0.10548146 0.44426157 0.85944377] - # [0.84469568 0.72855948 0.44987977] - # [0.34449094 0.89552855 0.79255662]] - a_t = np.transpose(a, [1, 0]) - x = np.matmul(a, a_t) + 1e-03 * np.eye(3) - x = fluid.dygraph.to_variable(x) - out = paddle.cholesky(x, upper=False) - # [[0.97372392 0. 0. ] - # [0.82098946 0.87958958 0. ] - # [1.1454419 0.40882168 0.26574251]] - + paddle.enable_imperative() + a = np.random.rand(3, 3) + a_t = np.transpose(a, [1, 0]) + x_data = np.matmul(a, a_t) + 1e-03 + x = paddle.imperative.to_variable(x_data) + out = paddle.cholesky(x, upper=False) + print(out.numpy()) + # [[1.190523 0. 0. ] + # [0.9906703 0.27676893 0. ] + # [1.25450498 0.05600871 0.06400121]] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 2e09de2a2c44e..c77b767c4a478 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1185,22 +1185,10 @@ def inverse(x, name=None): import paddle.fluid as fluid mat_np = np.array([[2, 0], [0, 2]]).astype("float32") - - # example for static graph - input = fluid.data("input", shape=[2, 2], dtype="float32") - out = paddle.inverse(input) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - results = exe.run(feed={"input": mat_np }, - fetch_list=[out.name]) - print(results[0]) # [[0.5, 0], [0, 0.5]] - - # example for dynamic graph - with fluid.dygraph.guard(): - mat = fluid.dygraph.to_variable(mat_np) - inv = paddle.inverse(mat) - print(inv) # [[0.5, 0], [0, 0.5]] + paddle.enable_imperative() + mat = paddle.imperative.to_variable(mat_np) + inv = paddle.inverse(mat) + print(inv) # [[0.5, 0], [0, 0.5]] """ if in_dygraph_mode(): return core.ops.inverse(x) From f50a742f44f12caf0279a8a37c3fba4832604521 Mon Sep 17 00:00:00 2001 From: ForFishes <2282912238@qq.com> Date: Thu, 16 Jul 2020 13:40:48 +0800 Subject: [PATCH 5/6] fix the doc, test=develop --- python/paddle/tensor/linalg.py | 5 ++--- python/paddle/tensor/math.py | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 65a9b7526f05e..0cce74dc0fc4b 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -453,7 +453,7 @@ def dist(x, y, p=2): def dot(x, y, name=None): """ :alias_main: paddle.dot - :alias: paddle.dot,paddle.tensor.dot,paddle.tensor.linalg.dot + :alias: paddle.dot, paddle.tensor.dot, paddle.tensor.linalg.dot This operator calculates inner product for vectors. @@ -473,7 +473,6 @@ def dot(x, y, name=None): .. code-block:: python import paddle - import paddle.fluid as fluid import numpy as np paddle.enable_imperative() @@ -653,7 +652,7 @@ def cross(input, other, dim=None): def cholesky(x, upper=False, name=None): """ :alias_main: paddle.cholesky - :alias: paddle.cholesky,paddle.tensor.cholesky,paddle.tensor.linalg.cholesky + :alias: paddle.cholesky, paddle.tensor.cholesky, paddle.tensor.linalg.cholesky Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index c77b767c4a478..0d6860c7ee015 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1157,8 +1157,8 @@ def logsumexp(x, dim=None, keepdim=False, out=None, name=None): def inverse(x, name=None): """ - :alias_main: paddle.inverse - :alias: paddle.inverse,paddle.tensor.inverse,paddle.tensor.math.inverse + :alias_main: paddle.inverse + :alias: paddle.inverse, paddle.tensor.inverse, paddle.tensor.math.inverse Takes the inverse of the square matrix. A square matrix is a matrix with the same number of rows and columns. The input can be a square matrix @@ -1182,7 +1182,6 @@ def inverse(x, name=None): import numpy as np import paddle - import paddle.fluid as fluid mat_np = np.array([[2, 0], [0, 2]]).astype("float32") paddle.enable_imperative() From 397692810dd2850b66edc8f21ddafe92cb75eaf2 Mon Sep 17 00:00:00 2001 From: ForFishes <2282912238@qq.com> Date: Thu, 16 Jul 2020 17:08:21 +0800 Subject: [PATCH 6/6] fix doc ,test=develop --- python/paddle/tensor/linalg.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 0cce74dc0fc4b..800ab3ac1d187 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -678,7 +678,6 @@ def cholesky(x, upper=False, name=None): .. code-block:: python import paddle - import paddle.fluid as fluid import numpy as np paddle.enable_imperative()