Skip to content

Commit

Permalink
[SymForce] Add dense linearizer
Browse files Browse the repository at this point in the history
The existing linearizer only knows how to linearize factors into a
sparse hessian (and jacobian).

Since a dense linear solver expects a dense hessian, we need a
linearizer that knows how to linearize factors into a dense hessian.

This commit adds such a dense linearizer.

Topic: dense_linearizer
GitOrigin-RevId: 214c4054ea48aee28d0fb02f86e496775e90ad20
  • Loading branch information
bradley-solliday-skydio authored and aaron-skydio committed Apr 17, 2023
1 parent 61d70f0 commit d792e99
Show file tree
Hide file tree
Showing 3 changed files with 552 additions and 0 deletions.
281 changes: 281 additions & 0 deletions symforce/opt/dense_linearizer.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,281 @@
/* ----------------------------------------------------------------------------
* SymForce - Copyright 2022, Skydio, Inc.
* This source code is under the Apache 2.0 license found in the LICENSE file.
* ---------------------------------------------------------------------------- */

#include "./dense_linearizer.h"

#include <tuple>

#include "./internal/linearizer_utils.h"

namespace sym {

template <typename Scalar>
DenseLinearizer<Scalar>::DenseLinearizer(const std::string& name,
const std::vector<Factor<Scalar>>& factors,
const std::vector<Key>& key_order,
const bool include_jacobians)
: name_(name),
factors_{&factors},
state_index_{},
is_initialized_{false},
include_jacobians_{include_jacobians} {
if (key_order.empty()) {
keys_ = ComputeKeysToOptimize(factors);
} else {
keys_ = key_order;
}
}

template <typename ScalarType>
bool DenseLinearizer<ScalarType>::IsInitialized() const {
return is_initialized_;
}

template <typename ScalarType>
const std::vector<Key>& DenseLinearizer<ScalarType>::Keys() const {
return keys_;
}

template <typename ScalarType>
const std::unordered_map<key_t, index_entry_t>& DenseLinearizer<ScalarType>::StateIndex() const {
SYM_ASSERT(IsInitialized());
return state_index_;
}

template <typename Scalar>
using LinearizedDenseFactor = typename DenseLinearizer<Scalar>::LinearizedDenseFactor;

template <typename Scalar, typename Derived>
void CopyJacobianFactorToCombined(const Eigen::DenseBase<Derived>& jacobian,
const std::vector<linearization_offsets_t>& key_offsets,
const int residual_offset,
DenseLinearization<Scalar>& linearization) {
const int residual_dim = jacobian.rows();
for (const linearization_offsets_t& offsets : key_offsets) {
linearization.jacobian.block(residual_offset, offsets.combined_offset, residual_dim,
offsets.tangent_dim) =
jacobian.block(0, offsets.factor_offset, residual_dim, offsets.tangent_dim);
}
}

template <typename Scalar>
void CopyRhsFactorToCombined(const LinearizedDenseFactor<Scalar>& linearized_dense_factor,
const std::vector<linearization_offsets_t>& key_offsets,
DenseLinearization<Scalar>& linearization) {
for (const linearization_offsets_t& offsets : key_offsets) {
linearization.rhs.segment(offsets.combined_offset, offsets.tangent_dim) +=
linearized_dense_factor.rhs.segment(offsets.factor_offset, offsets.tangent_dim);
}
}

template <typename Scalar>
void CopyHessianFactorToCombined(const LinearizedDenseFactor<Scalar>& factor_linearization,
const std::vector<linearization_offsets_t>& key_offsets,
DenseLinearization<Scalar>& linearization) {
// NOTE(brad): Assumes no repeats in key_offsets (and thus no repeats in a factor's
// OptimizedKeys()). If there is a repeat, then the strict upper triangle of a diagonal
// block will be copied. Not the biggest deal, but it's a waste.

// Loop over key blocks of lower triangle of factor linearization hessian
for (int col_index = 0; col_index < static_cast<int>(key_offsets.size()); col_index++) {
const linearization_offsets_t& col_block = key_offsets[col_index];

// Diagonal block is triangular
linearization.hessian_lower
.block(col_block.combined_offset, col_block.combined_offset, col_block.tangent_dim,
col_block.tangent_dim)
.template triangularView<Eigen::Lower>() +=
factor_linearization.hessian.block(col_block.factor_offset, col_block.factor_offset,
col_block.tangent_dim, col_block.tangent_dim);

// strict lower triangle
for (int row_index = col_index + 1; row_index < static_cast<int>(key_offsets.size());
row_index++) {
const linearization_offsets_t& row_block = key_offsets[row_index];

// If keys have reverse order in combined hessian, the combined block will be in the
// upper triangle, in which case we must transpose to get the lower triangle version.
if (row_block.combined_offset < col_block.combined_offset) {
linearization.hessian_lower.block(col_block.combined_offset, row_block.combined_offset,
col_block.tangent_dim, row_block.tangent_dim) +=
factor_linearization.hessian
.block(row_block.factor_offset, col_block.factor_offset, row_block.tangent_dim,
col_block.tangent_dim)
.transpose();
} else {
linearization.hessian_lower.block(row_block.combined_offset, col_block.combined_offset,
row_block.tangent_dim, col_block.tangent_dim) +=
factor_linearization.hessian.block(row_block.factor_offset, col_block.factor_offset,
row_block.tangent_dim, col_block.tangent_dim);
}
}
}
}

/**
* Linearizes this->factors_ at values into linearization. Is different than subsequent runs
* because it additionally:
* - Allocates space for the dense linearized factors
* - Figures out which parts of the problem linearization correspond to which parts of each
* factor linearization
* - Allocates memory for each factor linearization of each shape
* - Performs sanity checks that the outputs of each factor are consistent
* - Computes indices into values for the arguments of each factor
*/
template <typename Scalar>
void DenseLinearizer<Scalar>::InitialLinearization(const Values<Scalar>& values,
DenseLinearization<Scalar>& linearization) {
// Compute state vector index
int32_t offset = 0;
for (const Key& key : keys_) {
auto entry = values.IndexEntryAt(key);
entry.offset = offset;
state_index_[key.GetLcmType()] = entry;

offset += entry.tangent_dim;
}

// N is sum of tangent dim of all keys to optimize. Equal to num cols of jacobian
const int32_t N = offset;

// Allocate final storage for the combined RHS since it is dense and has a known size before
// linearizing factors. Allocate temporary storage for the residual because the combined residual
// dimension is not known yet.
linearization.rhs.resize(N);
linearization.rhs.setZero();
linearization.hessian_lower.resize(N, N);
linearization.hessian_lower.template triangularView<Eigen::Lower>().setZero();

// NOTE(brad): Currently we assume all factors are dense factors. The reason for this is that it
// is simpler to only copy dense factors into a dense linearization, and want to get that version
// working first before worrying about sparse factors.
linearized_dense_factors_.reserve(factors_->size());
typename internal::LinearizedDenseFactorPool<Scalar>::SizeTracker size_tracker;

LinearizedDenseFactor factor_linearization{};

std::vector<Scalar> combined_residual;
std::vector<decltype(factor_linearization.jacobian)> factor_jacobians;

// Track these to make sure that all combined keys are touched by at least one factor.
std::unordered_set<Key> keys_touched_by_factors;

// Inside this loop
for (const auto& factor : *factors_) {
for (const auto& key : factor.OptimizedKeys()) {
keys_touched_by_factors.insert(key);
}

// First, we evaluate into a temporary linearized factor
factor_indices_.push_back(values.CreateIndex(factor.AllKeys()).entries);
factor.Linearize(values, factor_linearization, &factor_indices_.back());

factor_keyoffsets_.emplace_back();
const int32_t tangent_dim = [&] {
int32_t factor_tangent_dim;
std::tie(factor_keyoffsets_.back(), factor_tangent_dim) =
internal::FactorOffsets<linearization_offsets_t>(values, factor.OptimizedKeys(),
state_index_);
return factor_tangent_dim;
}();
const std::vector<linearization_offsets_t>& key_offsets = factor_keyoffsets_.back();

if (key_offsets.empty()) {
std::vector<key_t> input_keys;
for (const Key& key : factor.OptimizedKeys()) {
input_keys.push_back(key.GetLcmType());
}

spdlog::warn(
"LM<{}>: Optimizing a factor that touches no optimized keys! Optimized input keys for "
"the factor are: {}",
name_, input_keys);
}

internal::AssertConsistentShapes(tangent_dim, factor_linearization, include_jacobians_);

// Make sure a temporary of the right dimension is kept for relinearizations
linearized_dense_factors_.AppendFactorSize(factor_linearization.residual.rows(),
factor_linearization.rhs.rows(), size_tracker);

// Add contributions of residual and jacobian to temporary vectors because we don't yet know
// their total size.
combined_residual.insert(
combined_residual.cend(), factor_linearization.residual.data(),
factor_linearization.residual.data() + factor_linearization.residual.size());
if (include_jacobians_) {
factor_jacobians.emplace_back(std::move(factor_linearization.jacobian));
}

// Add contribution of rhs and hessian (can be done directly because we know the whole size)
CopyRhsFactorToCombined(factor_linearization, key_offsets, linearization /* mut */);
CopyHessianFactorToCombined(factor_linearization, key_offsets, linearization /* mut */);
}
// Now that we know the full problem size, we can construct the residual and jacobian
linearization.residual =
Eigen::Map<VectorX<Scalar>>(combined_residual.data(), combined_residual.size());
if (include_jacobians_) {
linearization.jacobian.resize(combined_residual.size(), N);
linearization.jacobian.setZero();

int row_offset = 0;
for (int i = 0; i < static_cast<int>(factor_jacobians.size()); i++) {
const auto& jacobian = factor_jacobians[i];
CopyJacobianFactorToCombined(jacobian, factor_keyoffsets_[i], row_offset, linearization);
row_offset += jacobian.rows();
}
}

if (keys_.size() != keys_touched_by_factors.size()) {
for (const auto& key : keys_) {
if (keys_touched_by_factors.count(key) == 0) {
throw std::runtime_error(
fmt::format("Key {} is in the state vector but is not optimized by any factor.", key));
}
}
}

linearization.SetInitialized();
}

template <typename ScalarType>
void DenseLinearizer<ScalarType>::Relinearize(const Values<ScalarType>& values,
DenseLinearization<ScalarType>& linearization) {
if (is_initialized_) {
// Set rhs & hessian_lower to 0 as they will be built additively
linearization.rhs.setZero();
linearization.hessian_lower.template triangularView<Eigen::Lower>().setZero();
// The parts of linearization.jacobian that aren't being set are assumed to have already
// been set to 0 by InitialLinearization and to have not been mutated since.

int residual_offset = 0;
for (int i = 0; i < static_cast<int>(factors_->size()); i++) {
(*factors_)[i].Linearize(values, linearized_dense_factors_.at(i), &factor_indices_[i]);

const LinearizedDenseFactor& factor_linearization = linearized_dense_factors_.at(i);
const std::vector<linearization_offsets_t>& key_offsets = factor_keyoffsets_[i];
const int residual_dim = factor_linearization.residual.size();

// Copy factor_linearzation values into linearization
linearization.residual.segment(residual_offset, residual_dim) = factor_linearization.residual;
if (include_jacobians_) {
CopyJacobianFactorToCombined(factor_linearization.jacobian, key_offsets, residual_offset,
linearization /* mut */);
}
CopyRhsFactorToCombined(factor_linearization, key_offsets, linearization /* mut */);
CopyHessianFactorToCombined(factor_linearization, key_offsets, linearization /* mut */);

residual_offset += residual_dim;
}

} else {
InitialLinearization(values, linearization /* mut */);
}
}

} // namespace sym

template class sym::DenseLinearizer<double>;
template class sym::DenseLinearizer<float>;
112 changes: 112 additions & 0 deletions symforce/opt/dense_linearizer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
/* ----------------------------------------------------------------------------
* SymForce - Copyright 2022, Skydio, Inc.
* This source code is under the Apache 2.0 license found in the LICENSE file.
* ---------------------------------------------------------------------------- */

#pragma once

#include <string>
#include <unordered_map>
#include <vector>

#include <lcmtypes/sym/linearization_offsets_t.hpp>

#include "./factor.h"
#include "./internal/linearized_dense_factor_pool.h"
#include "./linearization.h"

namespace sym {

template <typename Scalar>
class DenseLinearizer {
public:
using LinearizedDenseFactor = typename Factor<Scalar>::LinearizedDenseFactor;

/**
* Construct a Linearizer from factors and optional keys
*
* Args:
* name: name of linearizer used for debug information
* factors: Only stores a pointer, MUST be in scope for the lifetime of this object!
* key_order: If provided, acts as an ordered set of keys that form the state vector
* to optimize. Can equal the set of all factor keys or a subset of all
* factor keys. If not provided, it is computed from all keys for all
* factors using a default ordering.
* include_jacobians: Relinearize only allocates and fills out the jacobian if true.
*/
DenseLinearizer(const std::string& name, const std::vector<Factor<Scalar>>& factors,
const std::vector<Key>& key_order = {}, bool include_jacobians = false);

/**
* Returns whether Relinearize has already been called once. Matters because many calculations
* need to be called only on the first linearization that are then cached for subsequent use.
* Also, if Relinearize has already been called, then the matrices in the linearization are
* expected to already be allocated to the right size.
*/
bool IsInitialized() const;

/**
* The keys to optimize, in the order they appear in the state vector (i.e., in rhs).
*/
const std::vector<Key>& Keys() const;

/**
* A map from all optimized keys in the problem to an index entry for the corresponding optimized
* values (where the offset is into the problem state vector, i.e., the rhs of a linearization).
*
* Only read if IsInitialized() returns true (i.e., after the first call to Relinearize).
*/
const std::unordered_map<key_t, index_entry_t>& StateIndex() const;

/**
* Update linearization at a new evaluation point.
* This is more efficient than reconstructing this object repeatedly. On the first call, it will
* allocate memory and perform analysis needed for efficient repeated relinearization.
*
* On the first call to Relinearize, the matrices in linearization will be allocated and sized
* correctly. On subsequent calls, the matrices of linearization are expected to already be
* allocated and sized correctly.
*
* TODO(aaron): This should be const except that it can initialize the object
*/
void Relinearize(const Values<Scalar>& values, DenseLinearization<Scalar>& linearization);

private:
// The name of this linearizer to be used for printing debug information.
std::string name_;
const std::vector<Factor<Scalar>>* factors_;
std::vector<Key> keys_;
std::unordered_map<key_t, index_entry_t> state_index_;
internal::LinearizedDenseFactorPool<Scalar> linearized_dense_factors_;
bool is_initialized_;
bool include_jacobians_;

// The index for each factor in the values. Cached the first time we linearize, to avoid repeated
// unordered_map lookups
std::vector<std::vector<index_entry_t>> factor_indices_;

// One std::vector<linearization_offsets_t> per factor in factors_ (same order).
// nth linearization_offsets_t of a vector is the offsets into factor and state vectors of the
// nth key to optimize (NOT linearized key, a factor key may be linearized but not optimized) of
// the corresponding factor.
std::vector<std::vector<linearization_offsets_t>> factor_keyoffsets_;

/**
* Evaluates the linearizations of the factors at values into linearization, caching all values
* needed for relinearization along the way. Specifically:
* - Calculates state_index_
* - Allocates all factor linearizations in lineared_dense_factors_
* - Calculates factor_indices_
* - Calculates factor_keyoffsets_
* Sets is_initialized_ to true.
*
* NOTE(brad): Things might break if you call it twice.
*/
void InitialLinearization(const Values<Scalar>& values,
DenseLinearization<Scalar>& linearization);
};

} // namespace sym

extern template class sym::DenseLinearizer<double>;
extern template class sym::DenseLinearizer<float>;

0 comments on commit d792e99

Please sign in to comment.