Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow constant inputs to cost functions to be passed as floats #150

Merged
merged 10 commits into from
Apr 27, 2022
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions theseus/embodied/collision/collision.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

from typing import List, Optional, Tuple, cast
from typing import List, Optional, Tuple, Union, cast

import torch

Expand All @@ -22,7 +22,7 @@ def __init__(
sdf_origin: Variable,
sdf_data: Variable,
sdf_cell_size: Variable,
cost_eps: Variable,
cost_eps: Union[float, Variable, torch.Tensor],
name: Optional[str] = None,
):
if not isinstance(pose, Point2):
Expand All @@ -32,7 +32,13 @@ def __init__(
self.sdf_origin = sdf_origin
self.sdf_data = sdf_data
self.sdf_cell_size = sdf_cell_size
self.cost_eps = cost_eps
if not isinstance(cost_eps, Variable):
if not isinstance(cost_eps, torch.Tensor):
cost_eps = torch.tensor(cost_eps)
luisenp marked this conversation as resolved.
Show resolved Hide resolved
self.cost_eps = Variable(cost_eps)
else:
self.cost_eps = cost_eps
self.cost_eps.data = self.cost_eps.data.view(-1, 1)
self.register_optim_vars(["pose"])
self.register_aux_vars(["sdf_origin", "sdf_data", "sdf_cell_size", "cost_eps"])
self.robot: KinematicsModel = IdentityModel()
Expand Down
14 changes: 11 additions & 3 deletions theseus/embodied/collision/eff_obj_contact.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

from typing import List, Optional, Tuple, cast
from typing import List, Optional, Tuple, Union, cast

import torch

Expand All @@ -23,7 +23,7 @@ def __init__(
sdf_origin: Variable,
sdf_data: Variable,
sdf_cell_size: Variable,
eff_radius: Variable,
eff_radius: Union[float, Variable, torch.Tensor],
name: Optional[str] = None,
use_huber_loss: bool = False,
):
Expand All @@ -33,7 +33,15 @@ def __init__(
self.sdf_origin = sdf_origin
self.sdf_data = sdf_data
self.sdf_cell_size = sdf_cell_size
self.eff_radius = eff_radius
if not isinstance(eff_radius, Variable):
if not isinstance(eff_radius, torch.Tensor):
eff_radius = torch.tensor(eff_radius)
self.eff_radius = Variable(eff_radius)
else:
self.eff_radius = eff_radius
if eff_radius.data.squeeze().ndim > 1:
raise ValueError("eff_radius must be a 0-D or 1-D tensor.")
self.eff_radius.data = self.eff_radius.data.view(-1, 1)
self.register_optim_vars(["obj", "eff"])
self.register_aux_vars(
["sdf_origin", "sdf_data", "sdf_cell_size", "eff_radius"]
Expand Down
40 changes: 40 additions & 0 deletions theseus/embodied/collision/tests/test_eff_obj_contact.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import torch

import theseus as th
from theseus.core import Variable
mhmukadam marked this conversation as resolved.
Show resolved Hide resolved
from theseus.geometry.tests.test_se2 import create_random_se2
from theseus.utils import numeric_jacobian

Expand Down Expand Up @@ -149,3 +150,42 @@ def test_eff_obj_interesect_errors():
actual = cost_fn.error()
expected = outputs["error"][sdf_idx, :]
assert torch.allclose(actual, expected)


def test_eff_obj_variable_type():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 10, 100]:
obj = create_random_se2(batch_size, rng)
eff = create_random_se2(batch_size, rng)
origin = th.Variable(torch.randn(batch_size, 2).double())
sdf_data = th.Variable(torch.randn(batch_size, 10, 10).double())
cell_size = th.Variable(torch.rand(batch_size, 1).double())
eff_radius = th.Variable(torch.rand(batch_size, 1).double())
cost_weight = th.ScaleCostWeight(1.0)
cost_function = th.eb.EffectorObjectContactPlanar(
obj, eff, cost_weight, origin, sdf_data, cell_size, eff_radius
)

assert isinstance(cost_function.eff_radius, Variable)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here we can check that cost_function.eff_radius is eff_radius.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can delete the line above that has the isinstance check now.

assert cost_function.eff_radius is eff_radius

eff_radius_t = torch.rand(batch_size, 1).double()

cost_function = th.eb.EffectorObjectContactPlanar(
obj, eff, cost_weight, origin, sdf_data, cell_size, eff_radius_t
)

assert isinstance(cost_function.eff_radius, Variable)
assert np.allclose(cost_function.eff_radius.data, eff_radius_t)
assert len(cost_function.eff_radius.shape) == 2

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here we can add another check that cost_function.eff_radius.data is eff_radius_t.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@luisenp While assert cost_function.eff_radius.data is eff_radius_t gives error as one is tensor and other is th.Variable .Is it better to use assert torch.allclose(cost_function.eff_radius.data,eff_radius_t) ?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm a bit confused. Shouldn't both cost_function.eff_radius.data (notice, .data) and eff_radius_t be both torch tensors?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@luisenp when i try to do assert cost_function.eff_radius.data is eff_radius_t i get this error which expained above

>           assert cost_function.eff_radius.data is eff_radius_t
E           assert tensor([[0.4160]], dtype=torch.float64) is tensor([[0.4160]], dtype=torch.float64)
E            +  where tensor([[0.4160]], dtype=torch.float64) = Variable(data=tensor([[0.4160]], dtype=torch.float64), name=Variable__680).data
E            +    where Variable(data=tensor([[0.4160]], dtype=torch.float64), name=Variable__680) = <theseus.embodied.collision.eff_obj_contact.EffectorObjectContactPlanar object at 0x7f813704edd0>.eff_radius

theseus/embodied/collision/tests/test_eff_obj_contact.py:180: AssertionError


Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, right, it's because of the view(-1, 1) we added. In this case, let's stick to compare that they are close in value as you had initially.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@luisenp Then can you take a look at PR. I've already committed the changes

eff_radius_f = torch.rand(1)

cost_function = th.eb.EffectorObjectContactPlanar(
obj, eff, cost_weight, origin, sdf_data, cell_size, eff_radius_f
)

assert isinstance(cost_function.eff_radius, Variable)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here we can first set up some random value for eff_radius_f and then add an additional check that np.isclose(cost_function.eff_radius.data.item(), eff_radius_f).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@luisenp
we're already giving a random value to eff_radius_f=1.0
So is this enough assert np.isclose(cost_function.eff_radius.data.item(), eff_radius_f)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

By random I mean sampling a new value every iteration rather than hardcoding to 1.0.

assert np.allclose(cost_function.eff_radius.data.item(), eff_radius_f)
assert len(cost_function.eff_radius.shape) == 2
25 changes: 21 additions & 4 deletions theseus/embodied/motionmodel/double_integrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def __init__(
vel1: Vector,
pose2: LieGroup,
vel2: Vector,
dt: Variable,
dt: Union[float, torch.Tensor, Variable],
cost_weight: CostWeight,
name: Optional[str] = None,
):
Expand All @@ -28,16 +28,22 @@ def __init__(
raise ValueError(
"All variables for a DoubleIntegrator must have the same dimension."
)
if not isinstance(dt, Variable):
if not isinstance(dt, torch.Tensor):
dt = torch.tensor(dt)
self.dt = Variable(dt)
else:
self.dt = dt
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

After the ndim check below, we can add dt.data = self.dt.data.view(-1, 1), to make sure we have a batch dimension as well, as discussed above.

if dt.data.squeeze().ndim > 1:
raise ValueError(
"dt data must be a 0-D or 1-D tensor with numel in {1, batch_size}."
)
self.dt.data = self.dt.data.view(-1, 1)
self.pose1 = pose1
self.vel1 = vel1
self.pose2 = pose2
self.vel2 = vel2
self.register_optim_vars(["pose1", "vel1", "pose2", "vel2"])
self.dt = dt
self.register_aux_vars(["dt"])
self.weight = cost_weight

Expand Down Expand Up @@ -96,11 +102,13 @@ class GPCostWeight(CostWeight):
def __init__(
self,
Qc_inv: Union[Variable, torch.Tensor],
dt: Union[Variable, torch.Tensor],
dt: Union[float, Variable, torch.Tensor],
name: Optional[str] = None,
):
super().__init__(name=name)
if not isinstance(dt, Variable):
if not isinstance(dt, torch.Tensor):
dt = torch.tensor(dt)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have added a fix for this class in #157, make sure you merge those changes as well.

dt = Variable(dt)
if dt.data.squeeze().ndim > 1:
raise ValueError("dt must be a 0-D or 1-D tensor.")
Expand Down Expand Up @@ -172,7 +180,7 @@ def __init__(
vel1: Vector,
pose2: LieGroup,
vel2: Vector,
dt: Variable,
dt: Union[float, Variable, torch.Tensor],
cost_weight: GPCostWeight,
name: Optional[str] = None,
):
Expand All @@ -181,6 +189,15 @@ def __init__(
"GPMotionModel only accepts cost weights of type GPCostWeight. "
"For other weight types, consider using DoubleIntegrator instead."
)
if not isinstance(dt, Variable):
if not isinstance(dt, torch.Tensor):
dt = torch.tensor(dt)
self.dt = Variable(dt)
else:
self.dt = dt
if dt.data.squeeze().ndim > 1:
raise ValueError("dt must be a 0-D or 1-D tensor.")
self.dt.data = self.dt.data.view(-1, 1)
super().__init__(pose1, vel1, pose2, vel2, dt, cost_weight, name=name)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Similar comment about maintaining a batch shape here.


def _copy_impl(self, new_name: Optional[str] = None) -> "GPMotionModel":
Expand Down
30 changes: 30 additions & 0 deletions theseus/embodied/motionmodel/tests/test_double_integrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,12 @@

import copy

import numpy as np
import pytest # noqa: F401
import torch

import theseus as th
from theseus.core import Variable
from theseus.core.tests.common import check_another_theseus_function_is_copy
from theseus.utils import numeric_jacobian

Expand Down Expand Up @@ -60,6 +62,34 @@ def test_gp_motion_model_cost_weight_copy():
)


def test_gp_motion_model_variable_type():
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add similar modifications to the test here, as mentioned above.

for dof in range(1, 10):
for batch_size in [1, 10, 100]:
aux = torch.randn(batch_size, dof, dof).double()
q_inv = aux.transpose(-2, -1).bmm(aux)
dt = torch.rand(1).double()
cost_weight = th.eb.GPCostWeight(q_inv, dt)

assert isinstance(cost_weight.Qc_inv, Variable)
assert isinstance(cost_weight.dt, Variable)
assert torch.allclose(cost_weight.Qc_inv.data, q_inv)
assert torch.allclose(cost_weight.dt.data, dt)

q_inv_v = Variable(q_inv)
dt_v = Variable(dt)
cost_weight = th.eb.GPCostWeight(q_inv_v, dt_v)
assert isinstance(cost_weight.dt, Variable)
assert cost_weight.Qc_inv is q_inv_v
assert cost_weight.dt is dt_v

q_inv_v = Variable(q_inv)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We don't need the second check for q_inv_v.

dt_f = torch.rand(1)
cost_weight = th.eb.GPCostWeight(q_inv_v, dt_f)
assert isinstance(cost_weight.dt, Variable)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, didn't think of this before, but we should also check that cost_weight.dt has a batch dimension. And similar for eff_radius_t in the other cost function. We are also missing the check np.isclose(cost_function.eff_radius.data.item(), eff_radius_f) (and similar for dt).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@jeffin07 Missing the np.allclose() check for eff_radius_f and also checking that the batch dimension is present.

assert np.allclose(cost_weight.dt.data.item(), dt_f)
assert len(cost_weight.dt.shape) == 2


def test_gp_motion_model_cost_function_error_vector_vars():
for batch_size in [1, 10, 100]:
for dof in range(1, 10):
Expand Down