Skip to content

Commit

Permalink
Enable masked jacobians in vectorization.
Browse files Browse the repository at this point in the history
  • Loading branch information
luisenp committed Dec 13, 2022
1 parent 5077785 commit 2ebe001
Show file tree
Hide file tree
Showing 4 changed files with 96 additions and 0 deletions.
6 changes: 6 additions & 0 deletions tests/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,9 @@ def weight_jacobians_and_error(self, jacobians, error):
def _copy_impl(self, new_name=None):
return MockCostWeight(self.the_data.copy(), name=new_name)

def is_zero(self):
raise NotImplementedError


class NullCostWeight(th.CostWeight):
def __init__(self):
Expand All @@ -97,6 +100,9 @@ def weight_jacobians_and_error(self, jacobians, error):
def _copy_impl(self, new_name=None):
return NullCostWeight()

def is_zero(self):
raise NotImplementedError


class MockCostFunction(th.CostFunction):
def __init__(
Expand Down
86 changes: 86 additions & 0 deletions tests/core/test_vectorizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,3 +267,89 @@ def test_vectorized_retract():

for v1, v2 in zip(variables, variables_vectorized):
assert v1.tensor.allclose(v2.tensor)


# This solves a very simple objective of the form sum (wi * (xi - ti)) **2, where
# some wi can be zero with some probability. When vectorize=True, our vectorization
# class will compute masked batched jacobians. So, this function can be used to test
# that the solution is the same when this feature is on/off. We also check if we
# can do a backward pass when this masking is used.
def _solve_fn_for_masked_jacobians(
batch_size, dof, num_costs, weight_cls, vectorize, device
):
rng = torch.Generator()
rng.manual_seed(batch_size)
obj = th.Objective()
variables = [th.Vector(dof=dof, name=f"x{i}") for i in range(num_costs)]
targets = [
th.Vector(tensor=torch.randn(batch_size, dof, generator=rng), name=f"t{i}")
for i in range(num_costs)
]
base_tensor = torch.ones(
batch_size, dof if weight_cls == th.DiagonalCostWeight else 1, device=device
)
# Wrapped into a param to pass to torch optimizer if necessary
params = [
torch.nn.Parameter(
base_tensor.clone() * (torch.rand(1, generator=rng).item() > 0.9)
)
for _ in range(num_costs)
]
weights = [weight_cls(params[i]) for i in range(num_costs)]
for i in range(num_costs):
obj.add(th.Difference(variables[i], targets[i], weights[i], name=f"cf{i}"))

input_tensors = {
f"x{i}": torch.ones(batch_size, dof, device=device) for i in range(num_costs)
}
layer = th.TheseusLayer(
th.LevenbergMarquardt(obj, step_size=0.1, max_iterations=5),
vectorize=vectorize,
)
layer.to(device=device)
sol, _ = layer.forward(input_tensors)

# Check that we can backprop through this without errors
if vectorize:
optim = torch.optim.Adam(params, lr=1e-4)
for _ in range(5): # do a few steps
optim.zero_grad()
layer.forward(input_tensors)
loss = obj.error_squared_norm().sum()
loss.backward()
optim.step()

return sol


@pytest.mark.parametrize("batch_size", [16])
@pytest.mark.parametrize("dof", [1, 4])
@pytest.mark.parametrize("num_costs", [1, 64])
@pytest.mark.parametrize("weight_cls", [th.ScaleCostWeight, th.DiagonalCostWeight])
def test_masked_jacobians(batch_size, dof, num_costs, weight_cls):
device = "cuda:0" if torch.cuda.is_available() else "cpu"

sol1 = _solve_fn_for_masked_jacobians(
batch_size, dof, num_costs, weight_cls, True, device
)
sol2 = _solve_fn_for_masked_jacobians(
batch_size, dof, num_costs, weight_cls, False, device
)

for i in range(num_costs):
torch.testing.assert_close(sol1[f"x{i}"], sol2[f"x{i}"])


def test_masked_jacobians_called(monkeypatch):
called = [False]

def masked_jacobians_mock(cost_fn, mask):
called[0] = True
return cost_fn.jacobians()

monkeypatch.setattr(
th.core.cost_function, "masked_jacobians", masked_jacobians_mock
)

_solve_fn_for_masked_jacobians(128, 2, 16, th.ScaleCostWeight, True, "cpu")
assert called[0]
3 changes: 3 additions & 0 deletions tests/optimizer/linearization_test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,9 @@ def weight_jacobians_and_error(self, jacobians, error):
def _copy_impl(self, new_name=None):
raise NotImplementedError

def is_zero(self):
raise NotImplementedError


def build_test_objective_and_linear_system():
# This function creates the an objective that results in the
Expand Down
1 change: 1 addition & 0 deletions theseus/core/vectorizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ def __init__(self, objective: Objective, empty_cuda_cache: bool = False):
vectorized_cost_fn.weight = base_cost_fn.weight.copy(
keep_variable_names=False
)
vectorized_cost_fn._supports_masking = True
self._vectorized_cost_fns[schema] = vectorized_cost_fn

# Dict[_CostFunctionSchema, List[str]]
Expand Down

0 comments on commit 2ebe001

Please sign in to comment.