Skip to content

Commit

Permalink
Added example script for Lie labs.
Browse files Browse the repository at this point in the history
  • Loading branch information
luisenp committed Mar 9, 2023
1 parent 332b23b commit 53f8c0c
Showing 1 changed file with 121 additions and 0 deletions.
121 changes: 121 additions & 0 deletions examples/lie_labs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
import torch

import theseus.labs.lie as lie
import theseus.labs.lie.functional as lieF

batch_size = 5

# ### Lie Tensor creation functions
g1 = lie.rand(batch_size, lie.SE3, requires_grad=True)
print(f"Created SE3 tensor with shape {g1.shape}")
g2 = g1.clone()

# Can create from a tensor as long as it's consistent with the desired ltype
g3_data = lieF.so3.rand(5)
g3 = lie.from_tensor(g3_data, lie.SO3)

try:
x = lie.from_tensor(torch.zeros(1, 3, 3), lie.SO3)
except ValueError as e:
print(f"ERROR: {e}")

g4 = lie.as_lietensor(g3_data, lie.SO3)
g5 = lie.cast(g3_data, lie.SO3) # alias for as_lietensor

# ### Lie operations
v = torch.randn(batch_size, 6)

# Exponential and logarithmic map
out1 = g1.exp(v) # also lie.exp(v, g1.ltype)
print(f"Exp map returns a {type(out1)}.")
out2 = g1.log() # also lie.log(g1)
print(f"Log map returns a {type(out2)}.")

# Inverse
out1 = g1.inv() # also lie.inv(g1)

# Compose
# also lie.compose(g1, g2)
out1 = g1.compose(g2) # type: ignore

# Differentiable jacobians
jacs, out = g1.jcompose(g2) # type: ignore
print("Jacobians output is a 2-tuple.")
print(" First element is a list of jacobians, one per group argument.")
print(f" For compose this means length {len(jacs)}.")
print(" The second element of the tuple is the result of the operation itself.")
print(f" Which for compose is a {type(out).__name__}.")

# Other options:
# * adj(), hat(), vee(), retract(), local(),
# * Jacobians: jlog(), jinv(), jexp()

# ### Overriden operators
# Local map
z = g2 - g1 # equivalent to g1.local(g2)
print(f"Output of local is a {type(z)}.")
g4 = -g1 # equivalent to g1.inv()
torch.testing.assert_close(g1.inv(), g4)

# Some operators have strict typecheck requirements
try:
g2 - z
except TypeError as e:
print(f"ERROR: {e}")

try:
g3 - g1
except ValueError as e:
print(f"ERROR: {e}")

# For retract(), which can be written as g + x, the requirement is that x
# is a LieTensor of ltype=lie.tgt, which we require as an extra layer of safety.
# 6 is the DOF of SE3
x = torch.randn(batch_size, 6) # type: ignore
try:
g1 + x
except TypeError as e:
print(f"ERROR: {e}")

y = lie.cast(x, lie.tgt)
g2 = g1 + y
torch.testing.assert_close(g2, g1.retract(x))
print("Success: + operator works for LieTensors of type lie.tgt.")


# Note that tangent tensors are torch.Tensors for all practical purposes.
# All torch operations are supported. Here is an arbitrary example
def fun(tt_):
out = torch.nn.functional.linear(tt_, torch.ones(2, 6), torch.zeros(2))
return torch.sigmoid(out)


yy1 = fun(y)
yy2 = fun(y._t) # this is a torch.Tensor view of y's data
torch.testing.assert_close(yy1, yy2)
print("Success: a lie.tgt tensor works just like a regular tensor.")

# For convenience, we provide a context to drop all ltype checks, and operate
# on raw tensor data. However, keep in mind that this is prone to error.
# Here is one example of how this works.
with lie.as_euclidean():
gg1 = torch.sin(g1)
# The above is the same as this next call, but the context might be more convenient
# if one is doing similar hacky stuff on several group objects.
gg2 = torch.sin(g1._t)
torch.testing.assert_close(gg1, gg2)
print("Success: We just did some ops that make no sense for SE3 tensors.")

# ### Lie tensors can also be used as leaf tensors for torch optimizers
g1 = lie.rand(1, lie.SE3, requires_grad=True)
g2 = lie.rand(1, lie.SE3)

opt = torch.optim.Adam([g1], lr=0.1)

for i in range(10):
opt.zero_grad()
d = g2 - g1 # same as g1.local(g2)
loss = torch.sum(d**2)
loss.backward()
opt.step()
print(f"Iter {i}. Loss: {loss.item(): .3f}")

0 comments on commit 53f8c0c

Please sign in to comment.