Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix cholesky doc, test=develop #25250

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 1 addition & 5 deletions python/paddle/fluid/tests/unittests/test_inverse_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,7 @@ def setUp(self):
def check_static_result(self, place, with_out=False):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[4, 4], dtype="float64")
if with_out:
out = fluid.data(name="output", shape=[4, 4], dtype="float64")
else:
out = None
result = paddle.inverse(input=input, out=out)
result = paddle.inverse(x=input)

input_np = np.random.random([4, 4]).astype("float64")
result_np = np.linalg.inv(input_np)
Expand Down
47 changes: 23 additions & 24 deletions python/paddle/tensor/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -453,34 +453,35 @@ def dist(x, y, p=2):
def dot(x, y, name=None):
"""
:alias_main: paddle.dot
:alias: paddle.dot,paddle.tensor.dot,paddle.tensor.linalg.dot
:alias: paddle.dot, paddle.tensor.dot, paddle.tensor.linalg.dot

This operator calculates inner product for vectors.

.. note::
Only support 1-d Tensor(vector).

Parameters:
x(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64``
y(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64``
x(Variable): 1-D ``Tensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64``
y(Variable): 1-D ``Tensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64``
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`

Returns:
Variable: the calculated result Tensor/LoDTensor.
Variable: the calculated result Tensor.

Examples:

.. code-block:: python

import paddle
import paddle.fluid as fluid
import numpy as np

with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.random.uniform(0.1, 1, [10]).astype(np.float32))
y = fluid.dygraph.to_variable(np.random.uniform(1, 3, [10]).astype(np.float32))
z = paddle.dot(x, y)
print(z.numpy())
paddle.enable_imperative()
x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)
y_data = np.random.uniform(1, 3, [10]).astype(np.float32)
x = paddle.imperative.to_variable(x_data)
y = paddle.imperative.to_variable(y_data)
z = paddle.dot(x, y)
print(z.numpy())

"""
op_type = 'dot'
Expand Down Expand Up @@ -648,10 +649,10 @@ def cross(input, other, dim=None):
return out


def cholesky(x, upper=False):
def cholesky(x, upper=False, name=None):
"""
:alias_main: paddle.cholesky
:alias: paddle.cholesky,paddle.tensor.cholesky,paddle.tensor.linalg.cholesky
:alias: paddle.cholesky, paddle.tensor.cholesky, paddle.tensor.linalg.cholesky

Computes the Cholesky decomposition of one symmetric positive-definite
matrix or batches of symmetric positive-definite matrice.
Expand All @@ -677,20 +678,18 @@ def cholesky(x, upper=False):
.. code-block:: python

import paddle
import paddle.fluid as fluid
import numpy as np

with fluid.dygraph.guard():
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x = np.matmul(a, a_t) + 1e-03
x = fluid.dygraph.to_variable(x)
out = paddle.cholesky(x, upper=False)
print(out.numpy())
# [[1.190523 0. 0. ]
# [0.9906703 0.27676893 0. ]
# [1.25450498 0.05600871 0.06400121]]

paddle.enable_imperative()
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03
x = paddle.imperative.to_variable(x_data)
out = paddle.cholesky(x, upper=False)
print(out.numpy())
# [[1.190523 0. 0. ]
# [0.9906703 0.27676893 0. ]
# [1.25450498 0.05600871 0.06400121]]
"""
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky')
check_type(upper, 'upper', bool, 'cholesky')
Expand Down
54 changes: 17 additions & 37 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -1064,78 +1064,58 @@ def logsumexp(x, dim=None, keepdim=False, out=None, name=None):
return layers.log(sum_out, name)


def inverse(input, out=None, name=None):
def inverse(x, name=None):
"""
:alias_main: paddle.inverse
:alias: paddle.inverse,paddle.tensor.inverse,paddle.tensor.math.inverse
:alias: paddle.inverse, paddle.tensor.inverse, paddle.tensor.math.inverse

Takes the inverse of the square matrix. A square matrix is a matrix with
the same number of rows and columns. The input can be a square matrix
(2-D Tensor) or batches of square matrices.

Args:
input (Variable): The input Variable which holds a Tensor. The last two
x (Variable): The input Variable which holds a Tensor. The last two
dimensions should be equal. When the number of dimensions is
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The input Variable which holds a Tensor. --> The input tensor.
现在概念上是不是Variable holds tensor,还不明确。。。

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok, fixed it.

greater than 2, it is treated as batches of square matrix. The data
type can be float32 and float64.
out (Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
If out is None, a new Varibale will be create to store the result.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information,
please refer to :ref:`api_guide_Name`

Returns:
Variable: A Tensor holds the inverse of input. The shape and data type
is the same as input.
Variable: A Tensor holds the inverse of x. The shape and data type
is the same as x.

Examples:
.. code-block:: python
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

示例代码只保留动态图版本即可。
用paddle.enable_imperative开启。

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok,fixed it


import numpy as np
import paddle
import paddle.fluid as fluid

mat_np = np.array([[2, 0], [0, 2]]).astype("float32")

# example for static graph
input = fluid.data("input", shape=[2, 2], dtype="float32")
out = paddle.inverse(input)

place = fluid.CPUPlace()
exe = fluid.Executor(place)
results = exe.run(feed={"input": mat_np },
fetch_list=[out.name])
print(results[0]) # [[0.5, 0], [0, 0.5]]

# example for dynamic graph
with fluid.dygraph.guard():
mat = fluid.dygraph.to_variable(mat_np)
inv = paddle.inverse(mat)
print(inv) # [[0.5, 0], [0, 0.5]]
paddle.enable_imperative()
mat = paddle.imperative.to_variable(mat_np)
inv = paddle.inverse(mat)
print(inv) # [[0.5, 0], [0, 0.5]]
"""
if in_dygraph_mode():
return core.ops.inverse(input)
return core.ops.inverse(x)

def _check_input(input):
check_variable_and_dtype(input, 'input',
def _check_input(x):
check_variable_and_dtype(x, 'x',
['float32', 'float64'], 'inverse')
if len(input.shape) < 2:
if len(x.shape) < 2:
raise ValueError(
"The input of inverse is expected to be a Tensor whose number "
"of dimensions is no less than 2. But reviced: %d, "
"input's shape: %s." % (len(input.shape), input.shape))

if out is not None:
check_variable_and_dtype(out, 'out', input.dtype, 'inverse')
"x's shape: %s." % (len(x.shape), x.shape))

_check_input(input)
_check_input(x)

helper = LayerHelper('inverse', **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='inverse', inputs={'Input': [input] }, outputs={'Output': [out]})
type='inverse', inputs={'Input': [x] }, outputs={'Output': [out]})
return out


Expand Down