Permalink
Cannot retrieve contributors at this time
Name already in use
A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
mrcal/mrcal.c
Go to fileThis commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
5589 lines (4925 sloc)
239 KB
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Copyright (c) 2017-2023 California Institute of Technology ("Caltech"). U.S. | |
// Government sponsorship acknowledged. All rights reserved. | |
// | |
// Licensed under the Apache License, Version 2.0 (the "License"); | |
// You may obtain a copy of the License at | |
// | |
// http://www.apache.org/licenses/LICENSE-2.0 | |
#define _GNU_SOURCE | |
#include <stdio.h> | |
#include <stdlib.h> | |
#include <inttypes.h> | |
#include <dogleg.h> | |
#include <assert.h> | |
#include <stdbool.h> | |
#include <math.h> | |
#include <string.h> | |
#include "mrcal.h" | |
#include "minimath/minimath.h" | |
#include "cahvore.h" | |
#include "util.h" | |
// These are parameter variable scales. They have the units of the parameters | |
// themselves, so the optimizer sees x/SCALE_X for each parameter. I.e. as far | |
// as the optimizer is concerned, the scale of each variable is 1. This doesn't | |
// need to be precise; just need to get all the variables to be within the same | |
// order of magnitute. This is important because the dogleg solve treats the | |
// trust region as a ball in state space, and this ball is isotropic, and has a | |
// radius that applies in every direction | |
// | |
// Can be visualized like this: | |
// | |
// b0,x0,J0 = mrcal.optimizer_callback(**optimization_inputs)[:3] | |
// J0 = J0.toarray() | |
// ss = np.sum(np.abs(J0), axis=-2) | |
// gp.plot(ss, _set=mrcal.plotoptions_state_boundaries(**optimization_inputs)) | |
// | |
// This visualizes the overall effect of each variable. If the scales aren't | |
// tuned properly, some variables will have orders of magnitude stronger | |
// response than others, and the optimization problem won't converge well. | |
// | |
// The scipy.optimize.least_squares() function claims to be able to estimate | |
// these automatically, without requiring these hard-coded values from the user. | |
// See the description of the "x_scale" argument: | |
// | |
// https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html | |
// | |
// Supposedly this paper describes the method: | |
// | |
// J. J. More, "The Levenberg-Marquardt Algorithm: Implementation and Theory," | |
// Numerical Analysis, ed. G. A. Watson, Lecture Notes in Mathematics 630, | |
// Springer Verlag, pp. 105-116, 1977. | |
// | |
// Please somebody look at this | |
#define SCALE_INTRINSICS_FOCAL_LENGTH 500.0 | |
#define SCALE_INTRINSICS_CENTER_PIXEL 20.0 | |
#define SCALE_ROTATION_CAMERA (0.1 * M_PI/180.0) | |
#define SCALE_TRANSLATION_CAMERA 1.0 | |
#define SCALE_ROTATION_FRAME (15.0 * M_PI/180.0) | |
#define SCALE_TRANSLATION_FRAME 1.0 | |
#define SCALE_POSITION_POINT SCALE_TRANSLATION_FRAME | |
#define SCALE_CALOBJECT_WARP 0.01 | |
#define SCALE_DISTORTION 1.0 | |
#define MSG_IF_VERBOSE(...) do { if(verbose) MSG( __VA_ARGS__ ); } while(0) | |
#define CHECK_CONFIG_NPARAM_NOCONFIG(s,n) \ | |
static_assert(n > 0, "no-config implies known-at-compile-time param count"); | |
#define CHECK_CONFIG_NPARAM_WITHCONFIG(s,n) \ | |
static_assert(n <= 0, "Models with a configuration define their parameter counts in LENSMODEL_XXX__lensmodel_num_params(); their compile-time-defined counts are ignored"); | |
MRCAL_LENSMODEL_NOCONFIG_LIST( CHECK_CONFIG_NPARAM_NOCONFIG) | |
MRCAL_LENSMODEL_WITHCONFIG_LIST(CHECK_CONFIG_NPARAM_WITHCONFIG) | |
// Returns a static string, using "..." as a placeholder for any configuration | |
// values | |
#define LENSMODEL_PRINT_CFG_ELEMENT_TEMPLATE(name, type, pybuildvaluecode, PRIcode,SCNcode, bitfield, cookie) \ | |
"_" #name "=..." | |
#define LENSMODEL_PRINT_CFG_ELEMENT_FMT(name, type, pybuildvaluecode, PRIcode,SCNcode, bitfield, cookie) \ | |
"_" #name "=%" PRIcode | |
#define LENSMODEL_PRINT_CFG_ELEMENT_VAR(name, type, pybuildvaluecode, PRIcode,SCNcode, bitfield, cookie) \ | |
,config->name | |
#define LENSMODEL_SCAN_CFG_ELEMENT_FMT(name, type, pybuildvaluecode, PRIcode,SCNcode, bitfield, cookie) \ | |
"_" #name "=%" SCNcode | |
#define LENSMODEL_SCAN_CFG_ELEMENT_VAR(name, type, pybuildvaluecode, PRIcode,SCNcode, bitfield, cookie) \ | |
,&config->name | |
#define LENSMODEL_SCAN_CFG_ELEMENT_PLUS1(name, type, pybuildvaluecode, PRIcode,SCNcode, bitfield, cookie) \ | |
+1 | |
const char* mrcal_lensmodel_name_unconfigured( const mrcal_lensmodel_t* lensmodel ) | |
{ | |
switch(lensmodel->type) | |
{ | |
#define CASE_STRING_NOCONFIG(s,n) case MRCAL_##s: ; \ | |
return #s; | |
#define _CASE_STRING_WITHCONFIG(s,n,s_CONFIG_LIST) case MRCAL_##s: ; \ | |
return #s s_CONFIG_LIST(LENSMODEL_PRINT_CFG_ELEMENT_TEMPLATE, ); | |
#define CASE_STRING_WITHCONFIG(s,n) _CASE_STRING_WITHCONFIG(s,n,MRCAL_ ## s ## _CONFIG_LIST) | |
MRCAL_LENSMODEL_NOCONFIG_LIST( CASE_STRING_NOCONFIG ) | |
MRCAL_LENSMODEL_WITHCONFIG_LIST( CASE_STRING_WITHCONFIG ) | |
default: | |
assert(0); | |
#undef CASE_STRING_NOCONFIG | |
#undef CASE_STRING_WITHCONFIG | |
} | |
return NULL; | |
} | |
// Write the model name WITH the full config into the given buffer. Identical to | |
// mrcal_lensmodel_name_unconfigured() for configuration-free models | |
static int LENSMODEL_CAHVORE__snprintf_model | |
(char* out, int size, | |
const mrcal_LENSMODEL_CAHVORE__config_t* config) | |
{ | |
return | |
snprintf( out, size, "LENSMODEL_CAHVORE" | |
MRCAL_LENSMODEL_CAHVORE_CONFIG_LIST(LENSMODEL_PRINT_CFG_ELEMENT_FMT, ) | |
MRCAL_LENSMODEL_CAHVORE_CONFIG_LIST(LENSMODEL_PRINT_CFG_ELEMENT_VAR, )); | |
} | |
static int LENSMODEL_SPLINED_STEREOGRAPHIC__snprintf_model | |
(char* out, int size, | |
const mrcal_LENSMODEL_SPLINED_STEREOGRAPHIC__config_t* config) | |
{ | |
return | |
snprintf( out, size, "LENSMODEL_SPLINED_STEREOGRAPHIC" | |
MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC_CONFIG_LIST(LENSMODEL_PRINT_CFG_ELEMENT_FMT, ) | |
MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC_CONFIG_LIST(LENSMODEL_PRINT_CFG_ELEMENT_VAR, )); | |
} | |
bool mrcal_lensmodel_name( char* out, int size, const mrcal_lensmodel_t* lensmodel ) | |
{ | |
switch(lensmodel->type) | |
{ | |
#define CASE_STRING_NOCONFIG(s,n) case MRCAL_##s: \ | |
return size > snprintf(out,size, #s); | |
#define CASE_STRING_WITHCONFIG(s,n) case MRCAL_##s: \ | |
return size > s##__snprintf_model(out, size, &lensmodel->s##__config); | |
MRCAL_LENSMODEL_NOCONFIG_LIST( CASE_STRING_NOCONFIG ) | |
MRCAL_LENSMODEL_WITHCONFIG_LIST( CASE_STRING_WITHCONFIG ) | |
default: | |
assert(0); | |
#undef CASE_STRING_NOCONFIG | |
#undef CASE_STRING_WITHCONFIG | |
} | |
return NULL; | |
} | |
static bool LENSMODEL_CAHVORE__scan_model_config( mrcal_LENSMODEL_CAHVORE__config_t* config, const char* config_str) | |
{ | |
int pos; | |
int Nelements = 0 MRCAL_LENSMODEL_CAHVORE_CONFIG_LIST(LENSMODEL_SCAN_CFG_ELEMENT_PLUS1, ); | |
return | |
Nelements == | |
sscanf( config_str, | |
MRCAL_LENSMODEL_CAHVORE_CONFIG_LIST(LENSMODEL_SCAN_CFG_ELEMENT_FMT, )"%n" | |
MRCAL_LENSMODEL_CAHVORE_CONFIG_LIST(LENSMODEL_SCAN_CFG_ELEMENT_VAR, ), | |
&pos) && | |
config_str[pos] == '\0'; | |
} | |
static bool LENSMODEL_SPLINED_STEREOGRAPHIC__scan_model_config( mrcal_LENSMODEL_SPLINED_STEREOGRAPHIC__config_t* config, const char* config_str) | |
{ | |
int pos; | |
int Nelements = 0 MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC_CONFIG_LIST(LENSMODEL_SCAN_CFG_ELEMENT_PLUS1, ); | |
return | |
Nelements == | |
sscanf( config_str, | |
MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC_CONFIG_LIST(LENSMODEL_SCAN_CFG_ELEMENT_FMT, )"%n" | |
MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC_CONFIG_LIST(LENSMODEL_SCAN_CFG_ELEMENT_VAR, ), | |
&pos) && | |
config_str[pos] == '\0'; | |
} | |
const char* const* mrcal_supported_lensmodel_names( void ) | |
{ | |
#define NAMESTRING_NOCONFIG(s,n) #s, | |
#define _NAMESTRING_WITHCONFIG(s,n,s_CONFIG_LIST) #s s_CONFIG_LIST(LENSMODEL_PRINT_CFG_ELEMENT_TEMPLATE, ), | |
#define NAMESTRING_WITHCONFIG(s,n) _NAMESTRING_WITHCONFIG(s,n,MRCAL_ ## s ## _CONFIG_LIST) | |
static const char* names[] = { | |
MRCAL_LENSMODEL_NOCONFIG_LIST( NAMESTRING_NOCONFIG) | |
MRCAL_LENSMODEL_WITHCONFIG_LIST(NAMESTRING_WITHCONFIG) | |
NULL }; | |
return names; | |
} | |
#undef LENSMODEL_PRINT_CFG_ELEMENT_TEMPLATE | |
#undef LENSMODEL_PRINT_CFG_ELEMENT_FMT | |
#undef LENSMODEL_PRINT_CFG_ELEMENT_VAR | |
#undef LENSMODEL_SCAN_CFG_ELEMENT_FMT | |
#undef LENSMODEL_SCAN_CFG_ELEMENT_VAR | |
#undef LENSMODEL_SCAN_CFG_ELEMENT_PLUS1 | |
// parses the model name AND the configuration into a mrcal_lensmodel_t structure. | |
// Strings with valid model names but missing or unparseable configuration | |
// return {.type = MRCAL_LENSMODEL_INVALID_BADCONFIG}. Unknown model names return | |
// {.type = MRCAL_LENSMODEL_INVALID} | |
bool mrcal_lensmodel_from_name( // output | |
mrcal_lensmodel_t* lensmodel, | |
// input | |
const char* name ) | |
{ | |
#define CHECK_AND_RETURN_NOCONFIG(s,n) \ | |
if( 0 == strcmp( name, #s) ) \ | |
{ \ | |
*lensmodel = (mrcal_lensmodel_t){.type = MRCAL_##s}; \ | |
return true; \ | |
} | |
#define CHECK_AND_RETURN_WITHCONFIG(s,n) \ | |
/* Configured model. I need to extract the config from the string. */ \ | |
/* The string format is NAME_cfg1=var1_cfg2=var2... */ \ | |
if( 0 == strcmp( name, #s) ) \ | |
{ \ | |
*lensmodel = (mrcal_lensmodel_t){.type = MRCAL_LENSMODEL_INVALID_BADCONFIG}; \ | |
return false; \ | |
} \ | |
if( 0 == strncmp( name, #s"_", strlen(#s)+1) ) \ | |
{ \ | |
/* found name. Now extract the config */ \ | |
*lensmodel = (mrcal_lensmodel_t){.type = MRCAL_##s}; \ | |
mrcal_##s##__config_t* config = &lensmodel->s##__config; \ | |
\ | |
const char* config_str = &name[strlen(#s)]; \ | |
\ | |
if(s##__scan_model_config(config, config_str)) \ | |
return true; \ | |
else \ | |
{ \ | |
*lensmodel = (mrcal_lensmodel_t){.type = MRCAL_LENSMODEL_INVALID_BADCONFIG}; \ | |
return false; \ | |
} \ | |
} | |
MRCAL_LENSMODEL_NOCONFIG_LIST( CHECK_AND_RETURN_NOCONFIG ); | |
MRCAL_LENSMODEL_WITHCONFIG_LIST( CHECK_AND_RETURN_WITHCONFIG ); | |
*lensmodel = (mrcal_lensmodel_t){.type = MRCAL_LENSMODEL_INVALID}; | |
return false; | |
#undef CHECK_AND_RETURN_NOCONFIG | |
#undef CHECK_AND_RETURN_WITHCONFIG | |
} | |
// parses the model name only. The configuration is ignored. Even if it's | |
// missing or unparseable. Unknown model names return MRCAL_LENSMODEL_INVALID | |
mrcal_lensmodel_type_t mrcal_lensmodel_type_from_name( const char* name ) | |
{ | |
#define CHECK_AND_RETURN_NOCONFIG(s,n) \ | |
if( 0 == strcmp( name, #s) ) return MRCAL_##s; | |
#define CHECK_AND_RETURN_WITHCONFIG(s,n) \ | |
/* Configured model. If the name is followed by _ or nothing, I */ \ | |
/* accept this model */ \ | |
if( 0 == strcmp( name, #s) ) return MRCAL_##s; \ | |
if( 0 == strncmp( name, #s"_", strlen(#s)+1) ) return MRCAL_##s; | |
MRCAL_LENSMODEL_NOCONFIG_LIST( CHECK_AND_RETURN_NOCONFIG ); | |
MRCAL_LENSMODEL_WITHCONFIG_LIST( CHECK_AND_RETURN_WITHCONFIG ); | |
return MRCAL_LENSMODEL_INVALID; | |
#undef CHECK_AND_RETURN_NOCONFIG | |
#undef CHECK_AND_RETURN_WITHCONFIG | |
} | |
mrcal_lensmodel_metadata_t mrcal_lensmodel_metadata( const mrcal_lensmodel_t* lensmodel ) | |
{ | |
switch(lensmodel->type) | |
{ | |
case MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC: | |
case MRCAL_LENSMODEL_STEREOGRAPHIC: | |
case MRCAL_LENSMODEL_LONLAT: | |
case MRCAL_LENSMODEL_LATLON: | |
return (mrcal_lensmodel_metadata_t) { .has_core = true, | |
.can_project_behind_camera = true, | |
.has_gradients = true, | |
.noncentral = false}; | |
case MRCAL_LENSMODEL_PINHOLE: | |
case MRCAL_LENSMODEL_OPENCV4: | |
case MRCAL_LENSMODEL_OPENCV5: | |
case MRCAL_LENSMODEL_OPENCV8: | |
case MRCAL_LENSMODEL_OPENCV12: | |
case MRCAL_LENSMODEL_CAHVOR: | |
return (mrcal_lensmodel_metadata_t) { .has_core = true, | |
.can_project_behind_camera = false, | |
.has_gradients = true, | |
.noncentral = false }; | |
case MRCAL_LENSMODEL_CAHVORE: | |
return (mrcal_lensmodel_metadata_t) { .has_core = true, | |
.can_project_behind_camera = false, | |
.has_gradients = true, | |
.noncentral = true }; | |
default: ; | |
} | |
MSG("Unknown lens model %d. Barfing out", lensmodel->type); | |
assert(0); | |
} | |
static | |
bool modelHasCore_fxfycxcy( const mrcal_lensmodel_t* lensmodel ) | |
{ | |
mrcal_lensmodel_metadata_t meta = mrcal_lensmodel_metadata(lensmodel); | |
return meta.has_core; | |
} | |
static | |
bool model_supports_projection_behind_camera( const mrcal_lensmodel_t* lensmodel ) | |
{ | |
mrcal_lensmodel_metadata_t meta = mrcal_lensmodel_metadata(lensmodel); | |
return meta.can_project_behind_camera; | |
} | |
static int LENSMODEL_CAHVORE__lensmodel_num_params(const mrcal_LENSMODEL_CAHVORE__config_t* config) | |
{ | |
/* CAHVORE is CAHVOR + E */ | |
return | |
4 + // core | |
5 + // CAHVOR distortion | |
3; // E | |
} | |
static int LENSMODEL_SPLINED_STEREOGRAPHIC__lensmodel_num_params(const mrcal_LENSMODEL_SPLINED_STEREOGRAPHIC__config_t* config) | |
{ | |
return | |
// I have two surfaces: one for x and another for y | |
(int)config->Nx * (int)config->Ny * 2 + | |
// and I have a core | |
4; | |
} | |
int mrcal_lensmodel_num_params(const mrcal_lensmodel_t* lensmodel) | |
{ | |
switch(lensmodel->type) | |
{ | |
#define CASE_NUM_NOCONFIG(s,n) \ | |
case MRCAL_##s: return n; | |
#define CASE_NUM_WITHCONFIG(s,n) \ | |
case MRCAL_##s: return s##__lensmodel_num_params(&lensmodel->s##__config); | |
MRCAL_LENSMODEL_NOCONFIG_LIST( CASE_NUM_NOCONFIG ) | |
MRCAL_LENSMODEL_WITHCONFIG_LIST( CASE_NUM_WITHCONFIG ) | |
default: ; | |
} | |
return -1; | |
#undef CASE_NUM_NOCONFIG | |
#undef CASE_NUM_WITHCONFIG | |
} | |
static | |
int get_num_distortions_optimization_params(mrcal_problem_selections_t problem_selections, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
if( !problem_selections.do_optimize_intrinsics_distortions ) | |
return 0; | |
int N = mrcal_lensmodel_num_params(lensmodel); | |
if(modelHasCore_fxfycxcy(lensmodel)) | |
N -= 4; // ignoring fx,fy,cx,cy | |
return N; | |
} | |
int mrcal_num_intrinsics_optimization_params(mrcal_problem_selections_t problem_selections, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
int N = get_num_distortions_optimization_params(problem_selections, lensmodel); | |
if( problem_selections.do_optimize_intrinsics_core && | |
modelHasCore_fxfycxcy(lensmodel) ) | |
N += 4; // fx,fy,cx,cy | |
return N; | |
} | |
int mrcal_num_states(int Ncameras_intrinsics, int Ncameras_extrinsics, | |
int Nframes, | |
int Npoints, int Npoints_fixed, int Nobservations_board, | |
mrcal_problem_selections_t problem_selections, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
return | |
mrcal_num_states_intrinsics(Ncameras_intrinsics, | |
problem_selections, | |
lensmodel) + | |
mrcal_num_states_extrinsics(Ncameras_extrinsics, | |
problem_selections) + | |
mrcal_num_states_frames(Nframes, | |
problem_selections) + | |
mrcal_num_states_points(Npoints, Npoints_fixed, | |
problem_selections) + | |
mrcal_num_states_calobject_warp( problem_selections, | |
Nobservations_board); | |
} | |
static int num_regularization_terms_percamera(mrcal_problem_selections_t problem_selections, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
if(!problem_selections.do_apply_regularization) | |
return 0; | |
// distortions | |
int N = get_num_distortions_optimization_params(problem_selections, lensmodel); | |
// optical center | |
if(problem_selections.do_optimize_intrinsics_core) | |
N += 2; | |
return N; | |
} | |
int mrcal_measurement_index_boards(int i_observation_board, | |
int Nobservations_board, | |
int Nobservations_point, | |
int calibration_object_width_n, | |
int calibration_object_height_n) | |
{ | |
if(Nobservations_board <= 0) | |
return -1; | |
// *2 because I have separate x and y measurements | |
return | |
0 + | |
mrcal_num_measurements_boards(i_observation_board, | |
calibration_object_width_n, | |
calibration_object_height_n); | |
} | |
int mrcal_num_measurements_boards(int Nobservations_board, | |
int calibration_object_width_n, | |
int calibration_object_height_n) | |
{ | |
if(Nobservations_board <= 0) | |
return 0; | |
// *2 because I have separate x and y measurements | |
return | |
Nobservations_board * | |
calibration_object_width_n*calibration_object_height_n * | |
2; | |
return mrcal_measurement_index_boards( Nobservations_board, | |
0,0, | |
calibration_object_width_n, | |
calibration_object_height_n); | |
} | |
int mrcal_measurement_index_points(int i_observation_point, | |
int Nobservations_board, | |
int Nobservations_point, | |
int calibration_object_width_n, | |
int calibration_object_height_n) | |
{ | |
if(Nobservations_point <= 0) | |
return -1; | |
// 3: x,y measurements, range normalization | |
return | |
mrcal_num_measurements_boards(Nobservations_board, | |
calibration_object_width_n, | |
calibration_object_height_n) + | |
i_observation_point * 3; | |
} | |
int mrcal_num_measurements_points(int Nobservations_point) | |
{ | |
// 3: x,y measurements, range normalization | |
return Nobservations_point * 3; | |
} | |
int mrcal_measurement_index_regularization(int calibration_object_width_n, | |
int calibration_object_height_n, | |
int Ncameras_intrinsics, int Ncameras_extrinsics, | |
int Nframes, | |
int Npoints, int Npoints_fixed, int Nobservations_board, int Nobservations_point, | |
mrcal_problem_selections_t problem_selections, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
if(mrcal_num_measurements_regularization(Ncameras_intrinsics, Ncameras_extrinsics, | |
Nframes, | |
Npoints, Npoints_fixed, Nobservations_board, | |
problem_selections, | |
lensmodel) <= 0) | |
return -1; | |
return | |
mrcal_num_measurements_boards(Nobservations_board, | |
calibration_object_width_n, | |
calibration_object_height_n) + | |
mrcal_num_measurements_points(Nobservations_point); | |
} | |
int mrcal_num_measurements_regularization(int Ncameras_intrinsics, int Ncameras_extrinsics, | |
int Nframes, | |
int Npoints, int Npoints_fixed, int Nobservations_board, | |
mrcal_problem_selections_t problem_selections, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
return | |
Ncameras_intrinsics * | |
num_regularization_terms_percamera(problem_selections, lensmodel); | |
} | |
int mrcal_num_measurements(int Nobservations_board, | |
int Nobservations_point, | |
int calibration_object_width_n, | |
int calibration_object_height_n, | |
int Ncameras_intrinsics, int Ncameras_extrinsics, | |
int Nframes, | |
int Npoints, int Npoints_fixed, | |
mrcal_problem_selections_t problem_selections, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
return | |
mrcal_num_measurements_boards( Nobservations_board, | |
calibration_object_width_n, | |
calibration_object_height_n) + | |
mrcal_num_measurements_points(Nobservations_point) + | |
mrcal_num_measurements_regularization(Ncameras_intrinsics, Ncameras_extrinsics, | |
Nframes, | |
Npoints, Npoints_fixed, Nobservations_board, | |
problem_selections, | |
lensmodel); | |
} | |
int _mrcal_num_j_nonzero(int Nobservations_board, | |
int Nobservations_point, | |
int calibration_object_width_n, | |
int calibration_object_height_n, | |
int Ncameras_intrinsics, int Ncameras_extrinsics, | |
int Nframes, | |
int Npoints, int Npoints_fixed, | |
const mrcal_observation_board_t* observations_board, | |
const mrcal_observation_point_t* observations_point, | |
mrcal_problem_selections_t problem_selections, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
// each observation depends on all the parameters for THAT frame and for | |
// THAT camera. Camera0 doesn't have extrinsics, so I need to loop through | |
// all my observations | |
// Each projected point has an x and y measurement, and each one depends on | |
// some number of the intrinsic parameters. Parametric models are simple: | |
// each one depends on ALL of the intrinsics. Splined models are sparse, | |
// however, and there's only a partial dependence | |
int Nintrinsics_per_measurement; | |
if(lensmodel->type == MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC) | |
{ | |
int run_len = | |
lensmodel->LENSMODEL_SPLINED_STEREOGRAPHIC__config.order + 1; | |
Nintrinsics_per_measurement = | |
(problem_selections.do_optimize_intrinsics_core ? 4 : 0) + | |
(problem_selections.do_optimize_intrinsics_distortions ? (run_len*run_len) : 0); | |
} | |
else | |
Nintrinsics_per_measurement = | |
mrcal_num_intrinsics_optimization_params(problem_selections, lensmodel); | |
// x depends on fx,cx but NOT on fy, cy. And similarly for y. | |
if( problem_selections.do_optimize_intrinsics_core && | |
modelHasCore_fxfycxcy(lensmodel) ) | |
Nintrinsics_per_measurement -= 2; | |
int N = Nobservations_board * ( (problem_selections.do_optimize_frames ? 6 : 0) + | |
(problem_selections.do_optimize_extrinsics ? 6 : 0) + | |
(problem_selections.do_optimize_calobject_warp ? MRCAL_NSTATE_CALOBJECT_WARP : 0) + | |
Nintrinsics_per_measurement ); | |
// initial estimate counts extrinsics for the reference camera, which need | |
// to be subtracted off | |
if(problem_selections.do_optimize_extrinsics) | |
for(int i=0; i<Nobservations_board; i++) | |
if(observations_board[i].icam.extrinsics < 0) | |
N -= 6; | |
// *2 because I have separate x and y measurements | |
N *= 2*calibration_object_width_n*calibration_object_height_n; | |
// Now the point observations | |
for(int i=0; i<Nobservations_point; i++) | |
{ | |
N += 2*Nintrinsics_per_measurement; | |
if( problem_selections.do_optimize_frames && | |
observations_point[i].i_point < Npoints-Npoints_fixed ) | |
N += 2*3; | |
if( problem_selections.do_optimize_extrinsics && | |
observations_point[i].icam.extrinsics >= 0 ) | |
N += 2*6; | |
// range normalization | |
if(problem_selections.do_optimize_frames && | |
observations_point[i].i_point < Npoints-Npoints_fixed ) | |
N += 3; | |
if( problem_selections.do_optimize_extrinsics && | |
observations_point[i].icam.extrinsics >= 0 ) | |
N += 6; | |
} | |
if(lensmodel->type == MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC) | |
{ | |
if(problem_selections.do_apply_regularization) | |
{ | |
// Each regularization term depends on | |
// - two values for distortions | |
// - one value for the center pixel | |
N += | |
Ncameras_intrinsics * | |
2 * | |
num_regularization_terms_percamera(problem_selections, | |
lensmodel); | |
// I multiplied by 2, so I double-counted the center pixel | |
// contributions. Subtract those off | |
if(problem_selections.do_optimize_intrinsics_core) | |
N -= Ncameras_intrinsics*2; | |
} | |
} | |
else | |
N += | |
Ncameras_intrinsics * | |
num_regularization_terms_percamera(problem_selections, | |
lensmodel); | |
return N; | |
} | |
// Used in the spline-based projection function. | |
// | |
// See bsplines.py for the derivation of the spline expressions and for | |
// justification of the 2D scheme | |
// | |
// Here we sample two interpolated surfaces at once: one each for the x and y | |
// focal-length scales | |
static | |
void sample_bspline_surface_cubic(double* out, | |
double* dout_dx, | |
double* dout_dy, | |
double* ABCDx_ABCDy, | |
double x, double y, | |
// control points | |
const double* c, | |
int stridey | |
// stridex is 2: the control points from the | |
// two surfaces are next to each other. Better | |
// cache locality maybe | |
) | |
{ | |
double* ABCDx = &ABCDx_ABCDy[0]; | |
double* ABCDy = &ABCDx_ABCDy[4]; | |
// The sampling function assumes evenly spaced knots. | |
// a,b,c,d are sequential control points | |
// x is in [0,1] between b and c. Function looks like this: | |
// double A = fA(x); | |
// double B = fB(x); | |
// double C = fC(x); | |
// double D = fD(x); | |
// return A*a + B*b + C*c + D*d; | |
// I need to sample many such 1D segments, so I compute A,B,C,D separately, | |
// and apply them together | |
void get_sample_coeffs(double* ABCD, double* ABCDgrad, double x) | |
{ | |
double x2 = x*x; | |
double x3 = x2*x; | |
ABCD[0] = (-x3 + 3*x2 - 3*x + 1)/6; | |
ABCD[1] = (3 * x3/2 - 3*x2 + 2)/3; | |
ABCD[2] = (-3 * x3 + 3*x2 + 3*x + 1)/6; | |
ABCD[3] = x3 / 6; | |
ABCDgrad[0] = -x2/2 + x - 1./2.; | |
ABCDgrad[1] = 3*x2/2 - 2*x; | |
ABCDgrad[2] = -3*x2/2 + x + 1./2.; | |
ABCDgrad[3] = x2 / 2; | |
} | |
// 4 samples along one dimension, and then one sample along the other | |
// dimension, using the 4 samples as the control points. Order doesn't | |
// matter. See bsplines.py | |
// | |
// I do this twice: one for each focal length surface | |
double ABCDgradx[4]; | |
double ABCDgrady[4]; | |
get_sample_coeffs(ABCDx, ABCDgradx, x); | |
get_sample_coeffs(ABCDy, ABCDgrady, y); | |
void interp(double* out, const double* ABCDx, const double* ABCDy) | |
{ | |
double cinterp[4][2]; | |
const int stridex = 2; | |
for(int iy=0; iy<4; iy++) | |
for(int k=0;k<2;k++) | |
cinterp[iy][k] = | |
ABCDx[0] * c[iy*stridey + 0*stridex + k] + | |
ABCDx[1] * c[iy*stridey + 1*stridex + k] + | |
ABCDx[2] * c[iy*stridey + 2*stridex + k] + | |
ABCDx[3] * c[iy*stridey + 3*stridex + k]; | |
for(int k=0;k<2;k++) | |
out[k] = | |
ABCDy[0] * cinterp[0][k] + | |
ABCDy[1] * cinterp[1][k] + | |
ABCDy[2] * cinterp[2][k] + | |
ABCDy[3] * cinterp[3][k]; | |
} | |
// the intrinsics gradient is flatten(ABCDx[0..3] * ABCDy[0..3]) for both x | |
// and y. By returning ABCD[xy] and not the cartesian products, I make | |
// smaller temporary data arrays | |
interp(out, ABCDx, ABCDy); | |
interp(dout_dx, ABCDgradx, ABCDy); | |
interp(dout_dy, ABCDx, ABCDgrady); | |
} | |
static | |
void sample_bspline_surface_quadratic(double* out, | |
double* dout_dx, | |
double* dout_dy, | |
double* ABCx_ABCy, | |
double x, double y, | |
// control points | |
const double* c, | |
int stridey | |
// stridex is 2: the control points from the | |
// two surfaces are next to each other. Better | |
// cache locality maybe | |
) | |
{ | |
double* ABCx = &ABCx_ABCy[0]; | |
double* ABCy = &ABCx_ABCy[3]; | |
// The sampling function assumes evenly spaced knots. | |
// a,b,c are sequential control points | |
// x is in [-1/2,1/2] around b. Function looks like this: | |
// double A = fA(x); | |
// double B = fB(x); | |
// double C = fC(x); | |
// return A*a + B*b + C*c; | |
// I need to sample many such 1D segments, so I compute A,B,C separately, | |
// and apply them together | |
void get_sample_coeffs(double* ABC, double* ABCgrad, double x) | |
{ | |
double x2 = x*x; | |
ABC[0] = (4*x2 - 4*x + 1)/8; | |
ABC[1] = (3 - 4*x2)/4; | |
ABC[2] = (4*x2 + 4*x + 1)/8; | |
ABCgrad[0] = x - 1./2.; | |
ABCgrad[1] = -2.*x; | |
ABCgrad[2] = x + 1./2.; | |
} | |
// 3 samples along one dimension, and then one sample along the other | |
// dimension, using the 3 samples as the control points. Order doesn't | |
// matter. See bsplines.py | |
// | |
// I do this twice: one for each focal length surface | |
double ABCgradx[3]; | |
double ABCgrady[3]; | |
get_sample_coeffs(ABCx, ABCgradx, x); | |
get_sample_coeffs(ABCy, ABCgrady, y); | |
void interp(double* out, const double* ABCx, const double* ABCy) | |
{ | |
double cinterp[3][2]; | |
const int stridex = 2; | |
for(int iy=0; iy<3; iy++) | |
for(int k=0;k<2;k++) | |
cinterp[iy][k] = | |
ABCx[0] * c[iy*stridey + 0*stridex + k] + | |
ABCx[1] * c[iy*stridey + 1*stridex + k] + | |
ABCx[2] * c[iy*stridey + 2*stridex + k]; | |
for(int k=0;k<2;k++) | |
out[k] = | |
ABCy[0] * cinterp[0][k] + | |
ABCy[1] * cinterp[1][k] + | |
ABCy[2] * cinterp[2][k]; | |
} | |
// the intrinsics gradient is flatten(ABCx[0..3] * ABCy[0..3]) for both x | |
// and y. By returning ABC[xy] and not the cartesian products, I make | |
// smaller temporary data arrays | |
interp(out, ABCx, ABCy); | |
interp(dout_dx, ABCgradx, ABCy); | |
interp(dout_dy, ABCx, ABCgrady); | |
} | |
typedef struct | |
{ | |
double _d_rj_rf[3*3]; | |
double _d_rj_rc[3*3]; | |
double _d_tj_tf[3*3]; | |
double _d_tj_rc[3*3]; | |
// _d_tj_tc is always identity | |
// _d_tj_rf is always 0 | |
// _d_rj_tf is always 0 | |
// _d_rj_tc is always 0 | |
} geometric_gradients_t; | |
static | |
void project_cahvor( // outputs | |
mrcal_point2_t* restrict q, | |
mrcal_point2_t* restrict dq_dfxy, | |
double* restrict dq_dintrinsics_nocore, | |
mrcal_point3_t* restrict dq_drcamera, | |
mrcal_point3_t* restrict dq_dtcamera, | |
mrcal_point3_t* restrict dq_drframe, | |
mrcal_point3_t* restrict dq_dtframe, | |
// inputs | |
const mrcal_point3_t* restrict p, | |
const mrcal_point3_t* restrict dp_drc, | |
const mrcal_point3_t* restrict dp_dtc, | |
const mrcal_point3_t* restrict dp_drf, | |
const mrcal_point3_t* restrict dp_dtf, | |
const double* restrict intrinsics, | |
bool camera_at_identity, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
int NdistortionParams = mrcal_lensmodel_num_params(lensmodel) - 4; | |
// I perturb p, and then apply the focal length, center pixel stuff | |
// normally | |
mrcal_point3_t p_distorted; | |
bool need_any_extrinsics_gradients = | |
( dq_drcamera != NULL ) || | |
( dq_dtcamera != NULL ) || | |
( dq_drframe != NULL ) || | |
( dq_dtframe != NULL ); | |
// distortion parameter layout: | |
// alpha | |
// beta | |
// r0 | |
// r1 | |
// r2 | |
double alpha = intrinsics[4 + 0]; | |
double beta = intrinsics[4 + 1]; | |
double r0 = intrinsics[4 + 2]; | |
double r1 = intrinsics[4 + 3]; | |
double r2 = intrinsics[4 + 4]; | |
double s_al, c_al, s_be, c_be; | |
sincos(alpha, &s_al, &c_al); | |
sincos(beta, &s_be, &c_be); | |
// I parametrize the optical axis such that | |
// - o(alpha=0, beta=0) = (0,0,1) i.e. the optical axis is at the center | |
// if both parameters are 0 | |
// - The gradients are cartesian. I.e. do/dalpha and do/dbeta are both | |
// NOT 0 at (alpha=0,beta=0). This would happen at the poles (gimbal | |
// lock), and that would make my solver unhappy | |
double o [] = { s_al*c_be, s_be, c_al*c_be }; | |
double do_dalpha[] = { c_al*c_be, 0, -s_al*c_be }; | |
double do_dbeta[] = { -s_al*s_be, c_be, -c_al*s_be }; | |
double norm2p = norm2_vec(3, p->xyz); | |
double omega = dot_vec(3, p->xyz, o); | |
double domega_dalpha = dot_vec(3, p->xyz, do_dalpha); | |
double domega_dbeta = dot_vec(3, p->xyz, do_dbeta); | |
double omega_recip = 1.0 / omega; | |
double tau = norm2p * omega_recip*omega_recip - 1.0; | |
double s__dtau_dalphabeta__domega_dalphabeta = -2.0*norm2p * omega_recip*omega_recip*omega_recip; | |
double dmu_dtau = r1 + 2.0*tau*r2; | |
double dmu_dxyz[3]; | |
for(int i=0; i<3; i++) | |
dmu_dxyz[i] = dmu_dtau * | |
(2.0 * p->xyz[i] * omega_recip*omega_recip + s__dtau_dalphabeta__domega_dalphabeta * o[i]); | |
double mu = r0 + tau*r1 + tau*tau*r2; | |
double s__dmu_dalphabeta__domega_dalphabeta = dmu_dtau * s__dtau_dalphabeta__domega_dalphabeta; | |
double dpdistorted_dpcam[3*3] = {}; | |
double dpdistorted_ddistortion[3*NdistortionParams]; | |
for(int i=0; i<3; i++) | |
{ | |
double dmu_ddist[5] = { s__dmu_dalphabeta__domega_dalphabeta * domega_dalpha, | |
s__dmu_dalphabeta__domega_dalphabeta * domega_dbeta, | |
1.0, | |
tau, | |
tau * tau }; | |
dpdistorted_ddistortion[i*NdistortionParams + 0] = p->xyz[i] * dmu_ddist[0]; | |
dpdistorted_ddistortion[i*NdistortionParams + 1] = p->xyz[i] * dmu_ddist[1]; | |
dpdistorted_ddistortion[i*NdistortionParams + 2] = p->xyz[i] * dmu_ddist[2]; | |
dpdistorted_ddistortion[i*NdistortionParams + 3] = p->xyz[i] * dmu_ddist[3]; | |
dpdistorted_ddistortion[i*NdistortionParams + 4] = p->xyz[i] * dmu_ddist[4]; | |
dpdistorted_ddistortion[i*NdistortionParams + 0] -= dmu_ddist[0] * omega*o[i]; | |
dpdistorted_ddistortion[i*NdistortionParams + 1] -= dmu_ddist[1] * omega*o[i]; | |
dpdistorted_ddistortion[i*NdistortionParams + 2] -= dmu_ddist[2] * omega*o[i]; | |
dpdistorted_ddistortion[i*NdistortionParams + 3] -= dmu_ddist[3] * omega*o[i]; | |
dpdistorted_ddistortion[i*NdistortionParams + 4] -= dmu_ddist[4] * omega*o[i]; | |
dpdistorted_ddistortion[i*NdistortionParams + 0] -= mu * domega_dalpha*o[i]; | |
dpdistorted_ddistortion[i*NdistortionParams + 1] -= mu * domega_dbeta *o[i]; | |
dpdistorted_ddistortion[i*NdistortionParams + 0] -= mu * omega * do_dalpha[i]; | |
dpdistorted_ddistortion[i*NdistortionParams + 1] -= mu * omega * do_dbeta [i]; | |
dpdistorted_dpcam[3*i + i] = mu+1.0; | |
for(int j=0; j<3; j++) | |
{ | |
dpdistorted_dpcam[3*i + j] += (p->xyz[i] - omega*o[i]) * dmu_dxyz[j]; | |
dpdistorted_dpcam[3*i + j] -= mu*o[i]*o[j]; | |
} | |
p_distorted.xyz[i] = p->xyz[i] + mu * (p->xyz[i] - omega*o[i]); | |
} | |
// q = fxy pxy/pz + cxy | |
// dqx/dp = d( fx px/pz + cx ) = fx/pz^2 (pz [1 0 0] - px [0 0 1]) | |
// dqy/dp = d( fy py/pz + cy ) = fy/pz^2 (pz [0 1 0] - py [0 0 1]) | |
const double fx = intrinsics[0]; | |
const double fy = intrinsics[1]; | |
const double cx = intrinsics[2]; | |
const double cy = intrinsics[3]; | |
double pz_recip = 1. / p_distorted.z; | |
q->x = p_distorted.x*pz_recip * fx + cx; | |
q->y = p_distorted.y*pz_recip * fy + cy; | |
if(need_any_extrinsics_gradients) | |
{ | |
double dq_dp[2][3] = | |
{ { fx * pz_recip, 0, -fx*p_distorted.x*pz_recip*pz_recip}, | |
{ 0, fy * pz_recip, -fy*p_distorted.y*pz_recip*pz_recip} }; | |
// This is for the DISTORTED p. | |
// dq/deee = dq/dpdistorted dpdistorted/dpundistorted dpundistorted/deee | |
double dq_dpundistorted[6]; | |
mul_genN3_gen33_vout(2, (double*)dq_dp, dpdistorted_dpcam, dq_dpundistorted); | |
// dq/deee = dq/dp dp/deee | |
if(camera_at_identity) | |
{ | |
if( dq_drcamera != NULL ) memset(dq_drcamera, 0, 6*sizeof(double)); | |
if( dq_dtcamera != NULL ) memset(dq_dtcamera, 0, 6*sizeof(double)); | |
if( dq_drframe != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dpundistorted, (double*)dp_drf, (double*)dq_drframe); | |
if( dq_dtframe != NULL ) memcpy(dq_dtframe, dq_dpundistorted, 6*sizeof(double)); | |
} | |
else | |
{ | |
if( dq_drcamera != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dpundistorted, (double*)dp_drc, (double*)dq_drcamera); | |
if( dq_dtcamera != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dpundistorted, (double*)dp_dtc, (double*)dq_dtcamera); | |
if( dq_drframe != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dpundistorted, (double*)dp_drf, (double*)dq_drframe ); | |
if( dq_dtframe != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dpundistorted, (double*)dp_dtf, (double*)dq_dtframe ); | |
} | |
} | |
if( dq_dintrinsics_nocore != NULL ) | |
{ | |
for(int i=0; i<NdistortionParams; i++) | |
{ | |
const double dx = dpdistorted_ddistortion[i + 0*NdistortionParams]; | |
const double dy = dpdistorted_ddistortion[i + 1*NdistortionParams]; | |
const double dz = dpdistorted_ddistortion[i + 2*NdistortionParams]; | |
dq_dintrinsics_nocore[0*NdistortionParams + i] = fx * pz_recip * (dx - p_distorted.x*pz_recip*dz); | |
dq_dintrinsics_nocore[1*NdistortionParams + i] = fy * pz_recip * (dy - p_distorted.y*pz_recip*dz); | |
} | |
} | |
if( dq_dfxy ) | |
{ | |
// I have the projection, and I now need to propagate the gradients | |
// xy = fxy * distort(xy)/distort(z) + cxy | |
dq_dfxy->x = (q->x - cx)/fx; // dqx/dfx | |
dq_dfxy->y = (q->y - cy)/fy; // dqy/dfy | |
} | |
} | |
static | |
bool project_cahvore( // outputs | |
mrcal_point2_t* restrict q, | |
mrcal_point2_t* restrict dq_dfxy, | |
double* restrict dq_dintrinsics_nocore, | |
mrcal_point3_t* restrict dq_drcamera, | |
mrcal_point3_t* restrict dq_dtcamera, | |
mrcal_point3_t* restrict dq_drframe, | |
mrcal_point3_t* restrict dq_dtframe, | |
// inputs | |
const mrcal_point3_t* restrict p, | |
const mrcal_point3_t* restrict dp_drc, | |
const mrcal_point3_t* restrict dp_dtc, | |
const mrcal_point3_t* restrict dp_drf, | |
const mrcal_point3_t* restrict dp_dtf, | |
const double* restrict intrinsics, | |
bool camera_at_identity, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
int NdistortionParams = mrcal_lensmodel_num_params(lensmodel) - 4; // This is 8: alpha,beta,R,E | |
mrcal_point3_t p_distorted; | |
double dpdistorted_ddistortion[8*3]; // 8 intrinsics parameters, nvec(p)=3 | |
double dpdistorted_dpcam[3*3]; | |
bool need_any_extrinsics_gradients = | |
( dq_drcamera != NULL ) || | |
( dq_dtcamera != NULL ) || | |
( dq_drframe != NULL ) || | |
( dq_dtframe != NULL ); | |
if(!project_cahvore_internals( // outputs | |
&p_distorted, | |
dq_dintrinsics_nocore ? dpdistorted_ddistortion : NULL, | |
need_any_extrinsics_gradients ? dpdistorted_dpcam : NULL, | |
// inputs | |
p, | |
&intrinsics[4], | |
lensmodel->LENSMODEL_CAHVORE__config.linearity)) | |
return false; | |
////////////// exactly like in project_cahvor() above. Consolidate. | |
// q = fxy pxy/pz + cxy | |
// dqx/dp = d( fx px/pz + cx ) = fx/pz^2 (pz [1 0 0] - px [0 0 1]) | |
// dqy/dp = d( fy py/pz + cy ) = fy/pz^2 (pz [0 1 0] - py [0 0 1]) | |
const double fx = intrinsics[0]; | |
const double fy = intrinsics[1]; | |
const double cx = intrinsics[2]; | |
const double cy = intrinsics[3]; | |
double pz_recip = 1. / p_distorted.z; | |
q->x = p_distorted.x*pz_recip * fx + cx; | |
q->y = p_distorted.y*pz_recip * fy + cy; | |
if(need_any_extrinsics_gradients) | |
{ | |
double dq_dp[2][3] = | |
{ { fx * pz_recip, 0, -fx*p_distorted.x*pz_recip*pz_recip}, | |
{ 0, fy * pz_recip, -fy*p_distorted.y*pz_recip*pz_recip} }; | |
// This is for the DISTORTED p. | |
// dq/deee = dq/dpdistorted dpdistorted/dpundistorted dpundistorted/deee | |
double dq_dpundistorted[6]; | |
mul_genN3_gen33_vout(2, (double*)dq_dp, dpdistorted_dpcam, dq_dpundistorted); | |
// dq/deee = dq/dp dp/deee | |
if(camera_at_identity) | |
{ | |
if( dq_drcamera != NULL ) memset(dq_drcamera, 0, 6*sizeof(double)); | |
if( dq_dtcamera != NULL ) memset(dq_dtcamera, 0, 6*sizeof(double)); | |
if( dq_drframe != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dpundistorted, (double*)dp_drf, (double*)dq_drframe); | |
if( dq_dtframe != NULL ) memcpy(dq_dtframe, dq_dpundistorted, 6*sizeof(double)); | |
} | |
else | |
{ | |
if( dq_drcamera != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dpundistorted, (double*)dp_drc, (double*)dq_drcamera); | |
if( dq_dtcamera != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dpundistorted, (double*)dp_dtc, (double*)dq_dtcamera); | |
if( dq_drframe != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dpundistorted, (double*)dp_drf, (double*)dq_drframe ); | |
if( dq_dtframe != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dpundistorted, (double*)dp_dtf, (double*)dq_dtframe ); | |
} | |
} | |
if( dq_dintrinsics_nocore != NULL ) | |
{ | |
for(int i=0; i<NdistortionParams; i++) | |
{ | |
const double dx = dpdistorted_ddistortion[i + 0*NdistortionParams]; | |
const double dy = dpdistorted_ddistortion[i + 1*NdistortionParams]; | |
const double dz = dpdistorted_ddistortion[i + 2*NdistortionParams]; | |
dq_dintrinsics_nocore[0*NdistortionParams + i] = fx * pz_recip * (dx - p_distorted.x*pz_recip*dz); | |
dq_dintrinsics_nocore[1*NdistortionParams + i] = fy * pz_recip * (dy - p_distorted.y*pz_recip*dz); | |
} | |
} | |
if( dq_dfxy ) | |
{ | |
// I have the projection, and I now need to propagate the gradients | |
// xy = fxy * distort(xy)/distort(z) + cxy | |
dq_dfxy->x = (q->x - cx)/fx; // dqx/dfx | |
dq_dfxy->y = (q->y - cy)/fy; // dqy/dfy | |
} | |
return true; | |
} | |
// These are all internals for project(). It was getting unwieldy otherwise | |
static | |
void _project_point_parametric( // outputs | |
mrcal_point2_t* q, | |
mrcal_point2_t* dq_dfxy, double* dq_dintrinsics_nocore, | |
mrcal_point3_t* restrict dq_drcamera, | |
mrcal_point3_t* restrict dq_dtcamera, | |
mrcal_point3_t* restrict dq_drframe, | |
mrcal_point3_t* restrict dq_dtframe, | |
// inputs | |
const mrcal_point3_t* p, | |
const mrcal_point3_t* dp_drc, | |
const mrcal_point3_t* dp_dtc, | |
const mrcal_point3_t* dp_drf, | |
const mrcal_point3_t* dp_dtf, | |
const double* restrict intrinsics, | |
bool camera_at_identity, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
// u = distort(p, distortions) | |
// q = uxy/uz * fxy + cxy | |
if(!( lensmodel->type == MRCAL_LENSMODEL_PINHOLE || | |
lensmodel->type == MRCAL_LENSMODEL_STEREOGRAPHIC || | |
lensmodel->type == MRCAL_LENSMODEL_LONLAT || | |
lensmodel->type == MRCAL_LENSMODEL_LATLON || | |
MRCAL_LENSMODEL_IS_OPENCV(lensmodel->type) )) | |
{ | |
MSG("Unhandled lens model: %d (%s)", | |
lensmodel->type, mrcal_lensmodel_name_unconfigured(lensmodel)); | |
assert(0); | |
} | |
mrcal_point3_t dq_dp[2]; | |
if( lensmodel->type == MRCAL_LENSMODEL_PINHOLE ) | |
mrcal_project_pinhole(q, dq_dp, | |
p, 1, intrinsics); | |
else if(lensmodel->type == MRCAL_LENSMODEL_STEREOGRAPHIC) | |
mrcal_project_stereographic(q, dq_dp, | |
p, 1, intrinsics); | |
else if(lensmodel->type == MRCAL_LENSMODEL_LONLAT) | |
mrcal_project_lonlat(q, dq_dp, | |
p, 1, intrinsics); | |
else if(lensmodel->type == MRCAL_LENSMODEL_LATLON) | |
mrcal_project_latlon(q, dq_dp, | |
p, 1, intrinsics); | |
else | |
{ | |
int Nintrinsics = mrcal_lensmodel_num_params(lensmodel); | |
_mrcal_project_internal_opencv( q, dq_dp, | |
dq_dintrinsics_nocore, | |
p, 1, intrinsics, Nintrinsics); | |
} | |
// dq/deee = dq/dp dp/deee | |
if(camera_at_identity) | |
{ | |
if( dq_drcamera != NULL ) memset(dq_drcamera, 0, 6*sizeof(double)); | |
if( dq_dtcamera != NULL ) memset(dq_dtcamera, 0, 6*sizeof(double)); | |
if( dq_drframe != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dp, (double*)dp_drf, (double*)dq_drframe); | |
if( dq_dtframe != NULL ) memcpy(dq_dtframe, (double*)dq_dp, 6*sizeof(double)); | |
} | |
else | |
{ | |
if( dq_drcamera != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dp, (double*)dp_drc, (double*)dq_drcamera); | |
if( dq_dtcamera != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dp, (double*)dp_dtc, (double*)dq_dtcamera); | |
if( dq_drframe != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dp, (double*)dp_drf, (double*)dq_drframe ); | |
if( dq_dtframe != NULL ) mul_genN3_gen33_vout(2, (double*)dq_dp, (double*)dp_dtf, (double*)dq_dtframe ); | |
} | |
// I have the projection, and I now need to propagate the gradients | |
if( dq_dfxy ) | |
{ | |
const double fx = intrinsics[0]; | |
const double fy = intrinsics[1]; | |
const double cx = intrinsics[2]; | |
const double cy = intrinsics[3]; | |
// I have the projection, and I now need to propagate the gradients | |
// xy = fxy * distort(xy)/distort(z) + cxy | |
dq_dfxy->x = (q->x - cx)/fx; // dqx/dfx | |
dq_dfxy->y = (q->y - cy)/fy; // dqy/dfy | |
} | |
} | |
// Compute a pinhole projection using a constant fxy, cxy | |
void mrcal_project_pinhole( // output | |
mrcal_point2_t* q, | |
mrcal_point3_t* dq_dv, // May be NULL. Each point | |
// gets a block of 2 mrcal_point3_t | |
// objects | |
// input | |
const mrcal_point3_t* v, | |
int N, | |
const double* fxycxy) | |
{ | |
const double fx = fxycxy[0]; | |
const double fy = fxycxy[1]; | |
const double cx = fxycxy[2]; | |
const double cy = fxycxy[3]; | |
// q = fxy pxy/pz + cxy | |
// dqx/dp = d( fx px/pz + cx ) = fx/pz^2 (pz [1 0 0] - px [0 0 1]) | |
// dqy/dp = d( fy py/pz + cy ) = fy/pz^2 (pz [0 1 0] - py [0 0 1]) | |
for(int i=0; i<N; i++) | |
{ | |
double pz_recip = 1. / v[i].z; | |
q->x = v[i].x*pz_recip * fx + cx; | |
q->y = v[i].y*pz_recip * fy + cy; | |
if(dq_dv) | |
{ | |
dq_dv[2*i + 0].x = fx * pz_recip; | |
dq_dv[2*i + 0].y = 0; | |
dq_dv[2*i + 0].z = -fx*v[i].x*pz_recip*pz_recip; | |
dq_dv[2*i + 1].x = 0; | |
dq_dv[2*i + 1].y = fy * pz_recip; | |
dq_dv[2*i + 1].z = -fy*v[i].y*pz_recip*pz_recip; | |
} | |
} | |
} | |
// Compute a pinhole unprojection using a constant fxy, cxy | |
void mrcal_unproject_pinhole( // output | |
mrcal_point3_t* v, | |
mrcal_point2_t* dv_dq, // May be NULL. Each point | |
// gets a block of 3 | |
// mrcal_point2_t objects | |
// input | |
const mrcal_point2_t* q, | |
int N, | |
const double* fxycxy) | |
{ | |
const double fx = fxycxy[0]; | |
const double fy = fxycxy[1]; | |
const double cx = fxycxy[2]; | |
const double cy = fxycxy[3]; | |
double fx_recip = 1./fx; | |
double fy_recip = 1./fy; | |
for(int i=0; i<N; i++) | |
{ | |
v[i].x = (q[i].x - cx) / fx; | |
v[i].y = (q[i].y - cy) / fy; | |
v[i].z = 1.0; | |
if(dv_dq) | |
{ | |
dv_dq[3*i + 0] = (mrcal_point2_t){.x = fx_recip}; | |
dv_dq[3*i + 1] = (mrcal_point2_t){.y = fy_recip}; | |
dv_dq[3*i + 2] = (mrcal_point2_t){}; | |
} | |
} | |
} | |
// Compute a stereographic projection using a constant fxy, cxy. This is the | |
// same as the pinhole projection for long lenses, but supports views behind the | |
// camera | |
void mrcal_project_stereographic( // output | |
mrcal_point2_t* q, | |
mrcal_point3_t* dq_dv, // May be NULL. Each point | |
// gets a block of 2 mrcal_point3_t | |
// objects | |
// input | |
const mrcal_point3_t* v, | |
int N, | |
const double* fxycxy) | |
{ | |
const double fx = fxycxy[0]; | |
const double fy = fxycxy[1]; | |
const double cx = fxycxy[2]; | |
const double cy = fxycxy[3]; | |
// stereographic projection: | |
// (from https://en.wikipedia.org/wiki/Fisheye_lens) | |
// u = xy_unit * tan(th/2) * 2 | |
// | |
// I compute the normalized (focal-length = 1) projection, and | |
// use that to look-up the x and y focal length scalings | |
// th is the angle between the observation and the projection | |
// center | |
// | |
// sin(th) = mag_xy/mag_xyz | |
// cos(th) = z/mag_xyz | |
// tan(th/2) = sin(th) / (1 + cos(th)) | |
// tan(th/2) = mag_xy/mag_xyz / (1 + z/mag_xyz) = | |
// = mag_xy / (mag_xyz + z) | |
// u = xy_unit * tan(th/2) * 2 = | |
// = xy/mag_xy * mag_xy/(mag_xyz + z) * 2 = | |
// = xy / (mag_xyz + z) * 2 | |
for(int i=0; i<N; i++) | |
{ | |
double mag_xyz = sqrt( v[i].x*v[i].x + | |
v[i].y*v[i].y + | |
v[i].z*v[i].z ); | |
double scale = 2.0 / (mag_xyz + v[i].z); | |
if(dq_dv) | |
{ | |
// this is more or less already derived in _project_point_splined() | |
// | |
// dqx/dv = fx ( scale dx + x dscale ) = | |
// = fx ( [1 0 0] scale - 2 x / ()^2 * ( [x y z]/(sqrt) + [0 0 1]) ) | |
// = fx ( [scale 0 0] - x scale^2/2 * ( [x y z]/mag_xyz + [0 0 1]) ) | |
// Let A = -scale^2/2 | |
// B = A/mag_xyz | |
// dqx_dv = fx ( [scale 0 0] - x scale^2/2 * [x y z]/mag_xyz - x scale^2/2 [0 0 1] ) | |
// = fx ( [scale 0 0] + B x * [x y z] + x A [0 0 1] ) | |
double A = -scale*scale / 2.; | |
double B = A / mag_xyz; | |
dq_dv[2*i + 0] = (mrcal_point3_t){.x = fx * (v[i].x * (B*v[i].x) + scale), | |
.y = fx * (v[i].x * (B*v[i].y)), | |
.z = fx * (v[i].x * (B*v[i].z + A))}; | |
dq_dv[2*i + 1] = (mrcal_point3_t){.x = fy * (v[i].y * (B*v[i].x)), | |
.y = fy * (v[i].y * (B*v[i].y) + scale), | |
.z = fy * (v[i].y * (B*v[i].z + A))}; | |
} | |
q[i] = (mrcal_point2_t){.x = v[i].x * scale * fx + cx, | |
.y = v[i].y * scale * fy + cy}; | |
} | |
} | |
// Compute a stereographic unprojection using a constant fxy, cxy | |
void mrcal_unproject_stereographic( // output | |
mrcal_point3_t* v, | |
mrcal_point2_t* dv_dq, // May be NULL. Each point | |
// gets a block of 3 | |
// mrcal_point2_t objects | |
// input | |
const mrcal_point2_t* q, | |
int N, | |
const double* fxycxy) | |
{ | |
const double fx = fxycxy[0]; | |
const double fy = fxycxy[1]; | |
const double cx = fxycxy[2]; | |
const double cy = fxycxy[3]; | |
// stereographic projection: | |
// (from https://en.wikipedia.org/wiki/Fisheye_lens) | |
// u = xy_unit * tan(th/2) * 2 | |
// | |
// I compute the normalized (focal-length = 1) projection, and | |
// use that to look-up the x and y focal length scalings | |
// | |
// th is the angle between the observation and the projection | |
// center | |
// | |
// sin(th) = mag_xy/mag_xyz | |
// cos(th) = z/mag_xyz | |
// tan(th/2) = sin(th) / (1 + cos(th)) | |
// | |
// tan(th/2) = mag_xy/mag_xyz / (1 + z/mag_xyz) = | |
// = mag_xy / (mag_xyz + z) | |
// u = xy_unit * tan(th/2) * 2 = | |
// = xy/mag_xy * mag_xy/(mag_xyz + z) * 2 = | |
// = xy / (mag_xyz + z) * 2 | |
// | |
// How do I compute the inverse? | |
// | |
// So q = u f + c | |
// -> u = (q-c)/f | |
// mag(u) = tan(th/2)*2 | |
// | |
// So I can compute th. az comes from the direction of u. This is enough to | |
// compute everything. th is in [0,pi]. | |
// | |
// [ sin(th) cos(az) ] [ cos(az) ] | |
// v = [ sin(th) sin(az) ] ~ [ sin(az) ] | |
// [ cos(th) ] [ 1/tan(th) ] | |
// | |
// mag(u) = tan(th/2)*2 -> mag(u)/2 = tan(th/2) -> | |
// tan(th) = mag(u) / (1 - (mag(u)/2)^2) | |
// 1/tan(th) = (1 - 1/4*mag(u)^2) / mag(u) | |
// | |
// This has a singularity at u=0 (imager center). But I can scale v to avoid | |
// this. So | |
// | |
// [ cos(az) mag(u) ] | |
// v = [ sin(az) mag(u) ] | |
// [ 1 - 1/4*mag(u)^2 ] | |
// | |
// I can simplify this even more. az = atan2(u.y,u.x). cos(az) = u.x/mag(u) -> | |
// | |
// [ u.x ] | |
// v = [ u.y ] | |
// [ 1 - 1/4 mag(u)^2 ] | |
// | |
// Test script to confirm that the project/unproject expressions are | |
// correct. unproj(proj(v))/v should be a constant | |
// | |
// import numpy as np | |
// import numpysane as nps | |
// f = 2000 | |
// c = 1000 | |
// def proj(v): | |
// m = nps.mag(v) | |
// scale = 2.0 / (m + v[..., 2]) | |
// u = v[..., :2] * nps.dummy(scale, -1) | |
// return u * f + c | |
// def unproj(q): | |
// u = (q-c)/f | |
// muxy = nps.mag(u[..., :2]) | |
// m = nps.mag(u) | |
// return nps.mv(nps.cat( u[..., 0], | |
// u[..., 1], | |
// 1 - 1./4.* m*m), | |
// 0, -1) | |
// v = np.array(((1., 2., 3.), | |
// (3., -2., -4.))) | |
// print( unproj(proj(v)) / v) | |
double fx_recip = 1./fx; | |
double fy_recip = 1./fy; | |
for(int i=0; i<N; i++) | |
{ | |
mrcal_point2_t u = {.x = (q[i].x - cx) * fx_recip, | |
.y = (q[i].y - cy) * fy_recip}; | |
double norm2u = u.x*u.x + u.y*u.y; | |
if(dv_dq) | |
{ | |
dv_dq[3*i + 0] = (mrcal_point2_t){.x = 1.0*fx_recip}; | |
dv_dq[3*i + 1] = (mrcal_point2_t){.y = 1.0*fy_recip}; | |
dv_dq[3*i + 2] = (mrcal_point2_t){.x = -u.x/2.0*fx_recip, | |
.y = -u.y/2.0*fy_recip}; | |
} | |
v[i] = (mrcal_point3_t){ .x = u.x, | |
.y = u.y, | |
.z = 1. - 1./4. * norm2u }; | |
} | |
} | |
void mrcal_project_lonlat( // output | |
mrcal_point2_t* q, | |
mrcal_point3_t* dq_dv, // May be NULL. Each point | |
// gets a block of 2 mrcal_point3_t | |
// objects | |
// input | |
const mrcal_point3_t* v, | |
int N, | |
const double* fxycxy) | |
{ | |
const double fx = fxycxy[0]; | |
const double fy = fxycxy[1]; | |
const double cx = fxycxy[2]; | |
const double cy = fxycxy[3]; | |
// equirectangular projection: | |
// q = (lon, lat) | |
// lon = arctan2(vx, vz) | |
// lat = arcsin(vy / mag(v)) | |
// | |
// At the optical axis we have vx ~ vy ~ 0 and vz ~1, so | |
// lon ~ vx | |
// lat ~ vy | |
// qx ~ arctan( vx/vz ) -> | |
// dqx/dv = 1/(1 + (vx/vz)^2) 1/vz^2 ( dvx/dv vz - vx dvz/dv ) = | |
// = [vz 0 -vx] / (vx^2 + vz^2) | |
// | |
// qy ~ arcsin( vy / mag(v) ) -> | |
// dqy/dv = 1 / sqrt( 1 - vy^2/norm2(v) ) 1/norm2(v) (dvy/dv mag(v) - dmag(v)/dv vy) | |
// = 1 / sqrt( norm2(v) - vy^2 ) 1/mag(v) ( [0 mag(v) 0] - v/mag(v) vy) | |
// = 1 / sqrt( norm2(v) - vy^2 ) ( [0 1 0] - v/norm2(v) vy) | |
// = 1 / sqrt( vx^2 + vz^2 ) ( [0 1 0] - v/norm2(v) vy) | |
for(int i=0; i<N; i++) | |
{ | |
double norm2_xyz_recip = 1. / (v[i].x*v[i].x + | |
v[i].y*v[i].y + | |
v[i].z*v[i].z ); | |
double mag_xyz_recip = sqrt(norm2_xyz_recip); | |
double norm2_xz_recip = 1. / (v[i].x*v[i].x + | |
v[i].z*v[i].z ); | |
double mag_xz_recip = sqrt(norm2_xz_recip); | |
if(dq_dv) | |
{ | |
dq_dv[2*i + 0] = (mrcal_point3_t){.x = fx*norm2_xz_recip * v[i].z, | |
.z = -fx*norm2_xz_recip * v[i].x }; | |
dq_dv[2*i + 1] = (mrcal_point3_t){.x = -fy*mag_xz_recip * (v[i].y*v[i].x * norm2_xyz_recip), | |
.y = -fy*mag_xz_recip * (v[i].y*v[i].y * norm2_xyz_recip - 1.), | |
.z = -fy*mag_xz_recip * (v[i].y*v[i].z * norm2_xyz_recip) }; | |
} | |
q[i] = (mrcal_point2_t){.x = atan2(v[i].x, v[i].z) * fx + cx, | |
.y = asin( v[i].y * mag_xyz_recip ) * fy + cy}; | |
} | |
} | |
void mrcal_unproject_lonlat( // output | |
mrcal_point3_t* v, | |
mrcal_point2_t* dv_dq, // May be NULL. Each point | |
// gets a block of 3 mrcal_point2_t | |
// objects | |
// input | |
const mrcal_point2_t* q, | |
int N, | |
const double* fxycxy) | |
{ | |
const double fx = fxycxy[0]; | |
const double fy = fxycxy[1]; | |
const double cx = fxycxy[2]; | |
const double cy = fxycxy[3]; | |
// equirectangular projection: | |
// q = (lon, lat) | |
// lon = arctan2(vx, vz) | |
// lat = arcsin(vy / mag(v)) | |
// | |
// Let's say v is normalized. Then: | |
// | |
// vx/vz = tan(lon) | |
// vy = sin(lat) | |
// | |
// -> vx = vz tan(lon) | |
// -> 1-sin^2(lat) = vz^2 (1 + tan^2(lon)) = | |
// cos^2(lat) = (vz/cos(lon))^2 | |
// | |
// -> vx = cos(lat) sin(lon) | |
// vy = sin(lat) | |
// vz = cos(lat) cos(lon) | |
// | |
// mag(v) is arbitrary, and I can simplify: | |
// | |
// -> v_unnormalized_x = sin(lon) | |
// v_unnormalized_y = tan(lat) | |
// v_unnormalized_z = cos(lon) | |
// | |
// If the computational cost of tan(lat) is smaller than of | |
// sin(lat),cos(lat) and 2 multiplications, then this is a better | |
// representation. A quick web search tells me that the cost of sincos ~ the | |
// cost of either sin or cos. And that tan is more costly. So I use the | |
// normalized form | |
// | |
// dv/dlon = [ cos(lat) cos(lon) 0 -cos(lat) sin(lon)] | |
// dv/dlat = [-sin(lat) sin(lon) cos(lat) -sin(lat) cos(lon)] | |
double fx_recip = 1./fx; | |
double fy_recip = 1./fy; | |
for(int i=0; i<N; i++) | |
{ | |
double lon = (q[i].x - cx) * fx_recip; | |
double lat = (q[i].y - cy) * fy_recip; | |
double clon,slon,clat,slat; | |
sincos(lat, &slat, &clat); | |
sincos(lon, &slon, &clon); | |
if(dv_dq) | |
{ | |
dv_dq[3*i + 0] = (mrcal_point2_t){.x = fx_recip * clat * clon, | |
.y = -fy_recip * slat * slon}; | |
dv_dq[3*i + 1] = (mrcal_point2_t){.y = fy_recip * clat }; | |
dv_dq[3*i + 2] = (mrcal_point2_t){.x = -fx_recip * clat * slon, | |
.y = -fy_recip * slat * clon }; | |
} | |
v[i] = (mrcal_point3_t){.x = clat * slon, | |
.y = slat, | |
.z = clat * clon}; | |
} | |
} | |
void mrcal_project_latlon( // output | |
mrcal_point2_t* q, | |
mrcal_point3_t* dq_dv, // May be NULL. Each point | |
// gets a block of 2 mrcal_point3_t | |
// objects | |
// input | |
const mrcal_point3_t* v, | |
int N, | |
const double* fxycxy) | |
{ | |
const double fx = fxycxy[0]; | |
const double fy = fxycxy[1]; | |
const double cx = fxycxy[2]; | |
const double cy = fxycxy[3]; | |
// copy of mrcal_project_lonlat(), with swapped x/y | |
for(int i=0; i<N; i++) | |
{ | |
double norm2_xyz_recip = 1. / (v[i].x*v[i].x + | |
v[i].y*v[i].y + | |
v[i].z*v[i].z ); | |
double mag_xyz_recip = sqrt(norm2_xyz_recip); | |
double norm2_yz_recip = 1. / (v[i].y*v[i].y + | |
v[i].z*v[i].z ); | |
double mag_yz_recip = sqrt(norm2_yz_recip); | |
if(dq_dv) | |
{ | |
dq_dv[2*i + 0] = (mrcal_point3_t){.x = -fx*mag_yz_recip * (v[i].x*v[i].x * norm2_xyz_recip - 1.), | |
.y = -fx*mag_yz_recip * (v[i].x*v[i].y * norm2_xyz_recip), | |
.z = -fx*mag_yz_recip * (v[i].x*v[i].z * norm2_xyz_recip) }; | |
dq_dv[2*i + 1] = (mrcal_point3_t){.y = fy*norm2_yz_recip * v[i].z, | |
.z = -fy*norm2_yz_recip * v[i].y }; | |
} | |
q[i] = (mrcal_point2_t){.x = asin( v[i].x * mag_xyz_recip ) * fx + cx, | |
.y = atan2(v[i].y, v[i].z) * fy + cy}; | |
} | |
} | |
void mrcal_unproject_latlon( // output | |
mrcal_point3_t* v, | |
mrcal_point2_t* dv_dq, // May be NULL. Each point | |
// gets a block of 3 mrcal_point2_t | |
// objects | |
// input | |
const mrcal_point2_t* q, | |
int N, | |
const double* fxycxy) | |
{ | |
const double fx = fxycxy[0]; | |
const double fy = fxycxy[1]; | |
const double cx = fxycxy[2]; | |
const double cy = fxycxy[3]; | |
// copy of mrcal_unproject_lonlat(), with swapped x/y | |
double fx_recip = 1./fx; | |
double fy_recip = 1./fy; | |
for(int i=0; i<N; i++) | |
{ | |
double lat = (q[i].x - cx) * fx_recip; | |
double lon = (q[i].y - cy) * fy_recip; | |
double clon,slon,clat,slat; | |
sincos(lat, &slat, &clat); | |
sincos(lon, &slon, &clon); | |
if(dv_dq) | |
{ | |
dv_dq[3*i + 0] = (mrcal_point2_t){.x = fx_recip * clat }; | |
dv_dq[3*i + 1] = (mrcal_point2_t){.x = -fx_recip * slat * slon, | |
.y = fy_recip * clat * clon }; | |
dv_dq[3*i + 2] = (mrcal_point2_t){.x = -fx_recip * slat * clon, | |
.y = -fy_recip * clat * slon }; | |
} | |
v[i] = (mrcal_point3_t){.x = slat, | |
.y = clat * slon, | |
.z = clat * clon}; | |
} | |
} | |
static void _mrcal_precompute_lensmodel_data_MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC | |
( // output | |
mrcal_LENSMODEL_SPLINED_STEREOGRAPHIC__precomputed_t* precomputed, | |
//input | |
const mrcal_LENSMODEL_SPLINED_STEREOGRAPHIC__config_t* config ) | |
{ | |
// I have N control points describing a given field-of-view. I | |
// want to space out the control points evenly. I'm using | |
// B-splines, so I need extra control points out past my edge. | |
// With cubic splines I need a whole extra interval past the | |
// edge. With quadratic splines I need half an interval (see | |
// stuff in analyses/splines/). | |
// | |
// (u_width_needed + Nknots_margin*u_interval_size)/(Nknots - 1) = u_interval_size | |
// ---> u_width_needed/(Nknots-1) = u_interval_size * (1 - Nknots_margin/(Nknots - 1)) | |
// ---> u_width_needed = u_interval_size * (Nknots - 1 - Nknots_margin) | |
// ---> u_interval_size = u_width_needed / (Nknots - 1 - Nknots_margin) | |
int Nknots_margin; | |
if(config->order == 2) | |
{ | |
Nknots_margin = 1; | |
if(config->Nx < 3 || config->Ny < 3) | |
{ | |
MSG("Quadratic splines: absolute minimum Nx, Ny is 3. Got Nx=%d Ny=%d. Barfing out", | |
config->Nx, config->Ny); | |
assert(0); | |
} | |
} | |
else if(config->order == 3) | |
{ | |
Nknots_margin = 2; | |
if(config->Nx < 4 || config->Ny < 4) | |
{ | |
MSG("Cubic splines: absolute minimum Nx, Ny is 4. Got Nx=%d Ny=%d. Barfing out", | |
config->Nx, config->Ny); | |
assert(0); | |
} | |
} | |
else | |
{ | |
MSG("I only support spline order 2 and 3"); | |
assert(0); | |
} | |
double th_edge_x = (double)config->fov_x_deg/2. * M_PI / 180.; | |
double u_edge_x = tan(th_edge_x / 2.) * 2; | |
precomputed->segments_per_u = (config->Nx - 1 - Nknots_margin) / (u_edge_x*2.); | |
} | |
// NOT A PART OF THE EXTERNAL API. This is exported for the mrcal python wrapper | |
// only | |
void _mrcal_precompute_lensmodel_data(mrcal_projection_precomputed_t* precomputed, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
// currently only this model has anything | |
if(lensmodel->type == MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC) | |
_mrcal_precompute_lensmodel_data_MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC | |
( &precomputed->LENSMODEL_SPLINED_STEREOGRAPHIC__precomputed, | |
&lensmodel->LENSMODEL_SPLINED_STEREOGRAPHIC__config ); | |
precomputed->ready = true; | |
} | |
bool mrcal_knots_for_splined_models( // buffers must hold at least | |
// config->Nx and config->Ny values | |
// respectively | |
double* ux, double* uy, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
if(lensmodel->type != MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC) | |
{ | |
MSG("This function works only with the MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC model. '%s' passed in", | |
mrcal_lensmodel_name_unconfigured(lensmodel)); | |
return false; | |
} | |
mrcal_projection_precomputed_t precomputed_all; | |
_mrcal_precompute_lensmodel_data(&precomputed_all, lensmodel); | |
const mrcal_LENSMODEL_SPLINED_STEREOGRAPHIC__config_t* config = | |
&lensmodel->LENSMODEL_SPLINED_STEREOGRAPHIC__config; | |
const mrcal_LENSMODEL_SPLINED_STEREOGRAPHIC__precomputed_t* precomputed = | |
&precomputed_all.LENSMODEL_SPLINED_STEREOGRAPHIC__precomputed; | |
// The logic I'm reversing is | |
// double ix = u.x*segments_per_u + (double)(Nx-1)/2.; | |
for(int i=0; i<config->Nx; i++) | |
ux[i] = | |
((double)i - (double)(config->Nx-1)/2.) / | |
precomputed->segments_per_u; | |
for(int i=0; i<config->Ny; i++) | |
uy[i] = | |
((double)i - (double)(config->Ny-1)/2.) / | |
precomputed->segments_per_u; | |
return true; | |
} | |
static int get_Ngradients(const mrcal_lensmodel_t* lensmodel, | |
int Nintrinsics) | |
{ | |
int N = 0; | |
bool has_core = modelHasCore_fxfycxcy(lensmodel); | |
bool has_dense_intrinsics_grad = (lensmodel->type != MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC); | |
bool has_sparse_intrinsics_grad = (lensmodel->type == MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC); | |
int runlen = (lensmodel->type == MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC) ? | |
(lensmodel->LENSMODEL_SPLINED_STEREOGRAPHIC__config.order + 1) : | |
0; | |
if(has_core) | |
// qx(fx) and qy(fy) | |
N += 2; | |
if(has_dense_intrinsics_grad) | |
// each of (qx,qy) depends on all the non-core intrinsics | |
N += 2 * (Nintrinsics-4); | |
if(has_sparse_intrinsics_grad) | |
{ | |
// spline coefficients | |
N += 2*runlen; | |
} | |
return N; | |
} | |
static | |
void _project_point_splined( // outputs | |
mrcal_point2_t* q, | |
mrcal_point2_t* dq_dfxy, | |
double* grad_ABCDx_ABCDy, | |
int* ivar0, | |
// Gradient outputs. May be NULL | |
mrcal_point3_t* restrict dq_drcamera, | |
mrcal_point3_t* restrict dq_dtcamera, | |
mrcal_point3_t* restrict dq_drframe, | |
mrcal_point3_t* restrict dq_dtframe, | |
// inputs | |
const mrcal_point3_t* restrict p, | |
const mrcal_point3_t* restrict dp_drc, | |
const mrcal_point3_t* restrict dp_dtc, | |
const mrcal_point3_t* restrict dp_drf, | |
const mrcal_point3_t* restrict dp_dtf, | |
const double* restrict intrinsics, | |
bool camera_at_identity, | |
int spline_order, | |
uint16_t Nx, uint16_t Ny, | |
double segments_per_u) | |
{ | |
// projections out-of-bounds will yield SOME value (function remains | |
// continuous as we move out-of-bounds), but it wont be particularly | |
// meaningful | |
// stereographic projection: | |
// (from https://en.wikipedia.org/wiki/Fisheye_lens) | |
// u = xy_unit * tan(th/2) * 2 | |
// | |
// I compute the normalized (focal-length = 1) projection, and | |
// use that to look-up deltau | |
// th is the angle between the observation and the projection | |
// center | |
// | |
// sin(th) = mag_pxy/mag_p | |
// cos(th) = z/mag_p | |
// tan(th/2) = sin(th) / (1 + cos(th)) | |
// tan(th/2) = mag_pxy/mag_p / (1 + z/mag_p) = | |
// = mag_pxy / (mag_p + z) | |
// u = xy_unit * tan(th/2) * 2 = | |
// = xy/mag_pxy * mag_pxy/(mag_p + z) * 2 = | |
// = xy / (mag_p + z) * 2 | |
// | |
// The stereographic projection is used to query the spline surface, and it | |
// is also the projection baseline. I do | |
// | |
// q = (u + deltau(u)) * f + c | |
// | |
// If the spline surface is at 0 (deltau == 0) then this is a pure | |
// stereographic projection | |
double mag_p = sqrt( p->x*p->x + | |
p->y*p->y + | |
p->z*p->z ); | |
double scale = 2.0 / (mag_p + p->z); | |
mrcal_point2_t u = {.x = p->x * scale, | |
.y = p->y * scale}; | |
// du/dp = d/dp ( xy * scale ) | |
// = pxy dscale/dp + [I; 0] scale | |
// dscale/dp = d (2.0 / (mag_p + p->z))/dp = | |
// = -2/()^2 ( [0,0,1] + dmag/dp) | |
// = -2/()^2 ( [0,0,1] + 2pt/2mag) | |
// = -2 scale^2/4 ( [0,0,1] + pt/mag) | |
// = -scale^2/2 * ( [0,0,1] + pt/mag ) | |
// = A*[0,0,1] + B*pt | |
double A = -scale*scale / 2.; | |
double B = A / mag_p; | |
double du_dp[2][3] = { { p->x * (B * p->x) + scale, | |
p->x * (B * p->y), | |
p->x * (B * p->z + A) }, | |
{ p->y * (B * p->x), | |
p->y * (B * p->y) + scale, | |
p->y * (B * p->z + A) } }; | |
double ix = u.x*segments_per_u + (double)(Nx-1)/2.; | |
double iy = u.y*segments_per_u + (double)(Ny-1)/2.; | |
mrcal_point2_t deltau; | |
double ddeltau_dux[2]; | |
double ddeltau_duy[2]; | |
const double fx = intrinsics[0]; | |
const double fy = intrinsics[1]; | |
const double cx = intrinsics[2]; | |
const double cy = intrinsics[3]; | |
if( spline_order == 3 ) | |
{ | |
int ix0 = (int)ix; | |
int iy0 = (int)iy; | |
// If out-of-bounds, clamp to the nearest valid spline segment. The | |
// projection will fly off to infinity quickly (we're extrapolating a | |
// polynomial), but at least it'll stay continuous | |
if( ix0 < 1) ix0 = 1; | |
else if(ix0 > Nx-3) ix0 = Nx-3; | |
if( iy0 < 1) iy0 = 1; | |
else if(iy0 > Ny-3) iy0 = Ny-3; | |
*ivar0 = | |
4 + // skip the core | |
2*( (iy0-1)*Nx + | |
(ix0-1) ); | |
sample_bspline_surface_cubic(deltau.xy, | |
ddeltau_dux, ddeltau_duy, | |
grad_ABCDx_ABCDy, | |
ix - ix0, iy - iy0, | |
// control points | |
&intrinsics[*ivar0], | |
2*Nx); | |
} | |
else if( spline_order == 2 ) | |
{ | |
int ix0 = (int)(ix + 0.5); | |
int iy0 = (int)(iy + 0.5); | |
// If out-of-bounds, clamp to the nearest valid spline segment. The | |
// projection will fly off to infinity quickly (we're extrapolating a | |
// polynomial), but at least it'll stay continuous | |
if( ix0 < 1) ix0 = 1; | |
else if(ix0 > Nx-2) ix0 = Nx-2; | |
if( iy0 < 1) iy0 = 1; | |
else if(iy0 > Ny-2) iy0 = Ny-2; | |
*ivar0 = | |
4 + // skip the core | |
2*( (iy0-1)*Nx + | |
(ix0-1) ); | |
sample_bspline_surface_quadratic(deltau.xy, | |
ddeltau_dux, ddeltau_duy, | |
grad_ABCDx_ABCDy, | |
ix - ix0, iy - iy0, | |
// control points | |
&intrinsics[*ivar0], | |
2*Nx); | |
} | |
else | |
{ | |
MSG("I only support spline order==2 or 3. Somehow got %d. This is a bug. Barfing", | |
spline_order); | |
assert(0); | |
} | |
// u = stereographic(p) | |
// q = (u + deltau(u)) * f + c | |
// | |
// Extrinsics: | |
// dqx/deee = fx (dux/deee + ddeltaux/du du/deee) | |
// = fx ( dux/deee (1 + ddeltaux/dux) + ddeltaux/duy duy/deee) | |
// dqy/deee = fy ( duy/deee (1 + ddeltauy/duy) + ddeltauy/dux dux/deee) | |
q->x = (u.x + deltau.x) * fx + cx; | |
q->y = (u.y + deltau.y) * fy + cy; | |
if( dq_dfxy ) | |
{ | |
// I have the projection, and I now need to propagate the gradients | |
// xy = fxy * distort(xy)/distort(z) + cxy | |
dq_dfxy->x = u.x + deltau.x; | |
dq_dfxy->y = u.y + deltau.y; | |
} | |
// convert ddeltau_dixy to ddeltau_duxy | |
for(int i=0; i<2; i++) | |
{ | |
ddeltau_dux[i] *= segments_per_u; | |
ddeltau_duy[i] *= segments_per_u; | |
} | |
void propagate_extrinsics( mrcal_point3_t* dq_deee, | |
const mrcal_point3_t* dp_deee) | |
{ | |
mrcal_point3_t du_deee[2]; | |
mul_genN3_gen33_vout(2, (double*)du_dp, (double*)dp_deee, (double*)du_deee); | |
for(int i=0; i<3; i++) | |
{ | |
dq_deee[0].xyz[i] = | |
fx * | |
( du_deee[0].xyz[i] * (1. + ddeltau_dux[0]) + | |
ddeltau_duy[0] * du_deee[1].xyz[i]); | |
dq_deee[1].xyz[i] = | |
fy * | |
( du_deee[1].xyz[i] * (1. + ddeltau_duy[1]) + | |
ddeltau_dux[1] * du_deee[0].xyz[i]); | |
} | |
} | |
void propagate_extrinsics_cam0( mrcal_point3_t* dq_deee) | |
{ | |
for(int i=0; i<3; i++) | |
{ | |
dq_deee[0].xyz[i] = | |
fx * | |
( du_dp[0][i] * (1. + ddeltau_dux[0]) + | |
ddeltau_duy[0] * du_dp[1][i]); | |
dq_deee[1].xyz[i] = | |
fy * | |
( du_dp[1][i] * (1. + ddeltau_duy[1]) + | |
ddeltau_dux[1] * du_dp[0][i]); | |
} | |
} | |
if(camera_at_identity) | |
{ | |
if( dq_drcamera != NULL ) memset(dq_drcamera->xyz, 0, 6*sizeof(double)); | |
if( dq_dtcamera != NULL ) memset(dq_dtcamera->xyz, 0, 6*sizeof(double)); | |
if( dq_drframe != NULL ) propagate_extrinsics( dq_drframe, dp_drf ); | |
if( dq_dtframe != NULL ) propagate_extrinsics_cam0( dq_dtframe ); | |
} | |
else | |
{ | |
if( dq_drcamera != NULL ) propagate_extrinsics( dq_drcamera, dp_drc ); | |
if( dq_dtcamera != NULL ) propagate_extrinsics( dq_dtcamera, dp_dtc ); | |
if( dq_drframe != NULL ) propagate_extrinsics( dq_drframe, dp_drf ); | |
if( dq_dtframe != NULL ) propagate_extrinsics( dq_dtframe, dp_dtf ); | |
} | |
} | |
typedef struct | |
{ | |
double* pool; | |
uint16_t run_side_length; | |
uint16_t ivar_stridey; | |
} gradient_sparse_meta_t; | |
// Projects 3D point(s), and reports the projection, and all the gradients. This | |
// is the main internal callback in the optimizer. This operates in one of two modes: | |
// | |
// if(calibration_object_width_n == 0) then we're projecting ONE point. In world | |
// coords this point is at frame_rt->t. frame_rt->r is not referenced. q and the | |
// gradients reference 2 values (x,y in the imager) | |
// | |
// if(calibration_object_width_n > 0) then we're projecting a whole calibration | |
// object. The pose of this object is given in frame_rt. We project ALL | |
// calibration_object_width_n*calibration_object_height_n points. q and the | |
// gradients reference ALL of these points | |
static | |
void project( // out | |
mrcal_point2_t* restrict q, | |
// The intrinsics gradients. These are split among several arrays. | |
// High-parameter-count lens models can return their gradients | |
// sparsely. All the actual gradient values live in | |
// dq_dintrinsics_pool_double, a buffer supplied by the caller. If | |
// dq_dintrinsics_pool_double is not NULL, the rest of the | |
// variables are assumed non-NULL, and we compute all the | |
// intrinsics gradients. Conversely, if dq_dintrinsics_pool_double | |
// is NULL, no intrinsics gradients are computed | |
double* restrict dq_dintrinsics_pool_double, | |
int* restrict dq_dintrinsics_pool_int, | |
double** restrict dq_dfxy, | |
double** restrict dq_dintrinsics_nocore, | |
gradient_sparse_meta_t* gradient_sparse_meta, | |
mrcal_point3_t* restrict dq_drcamera, | |
mrcal_point3_t* restrict dq_dtcamera, | |
mrcal_point3_t* restrict dq_drframe, | |
mrcal_point3_t* restrict dq_dtframe, | |
mrcal_calobject_warp_t* restrict dq_dcalobject_warp, | |
// in | |
// everything; includes the core, if there is one | |
const double* restrict intrinsics, | |
const mrcal_pose_t* restrict camera_rt, | |
const mrcal_pose_t* restrict frame_rt, | |
const mrcal_calobject_warp_t* restrict calobject_warp, | |
bool camera_at_identity, // if true, camera_rt is unused | |
const mrcal_lensmodel_t* lensmodel, | |
const mrcal_projection_precomputed_t* precomputed, | |
double calibration_object_spacing, | |
int calibration_object_width_n, | |
int calibration_object_height_n) | |
{ | |
assert(precomputed->ready); | |
// Parametric and non-parametric models do different things: | |
// | |
// parametric models: | |
// u = distort(p, distortions) | |
// q = uxy/uz * fxy + cxy | |
// | |
// extrinsic gradients: | |
// dqx/deee = d( ux/uz * fx + cx)/deee = | |
// = fx d(ux/uz)/deee = | |
// = fx/uz^2 ( uz dux/deee - duz/deee ux ) | |
// | |
// nonparametric (splined) models | |
// u = stereographic(p) | |
// q = (u + deltau(u)) * f + c | |
// | |
// Extrinsics: | |
// dqx/deee = fx (dux/deee + ddeltaux/du du/deee) | |
// = fx ( dux/deee (1 + ddeltaux/dux) + ddeltaux/duy duy/deee) | |
// dqy/deee = fy ( duy/deee (1 + ddeltauy/duy) + ddeltauy/dux dux/deee) | |
// | |
// Intrinsics: | |
// dq/diii = f ddeltau/diii | |
// | |
// So the two kinds of models have completely different expressions for | |
// their gradients, and I implement them separately | |
const int Npoints = | |
calibration_object_width_n ? | |
calibration_object_width_n*calibration_object_height_n : 1; | |
const int Nintrinsics = mrcal_lensmodel_num_params(lensmodel); | |
// I need to compose two transformations | |
// | |
// (object in reference frame) -> [frame transform] -> (object in the reference frame) -> | |
// -> [camera rt] -> (camera frame) | |
// | |
// Note that here the frame transform transforms TO the reference frame and | |
// the camera transform transforms FROM the reference frame. This is how my | |
// data is expected to be set up | |
// | |
// [Rc tc] [Rf tf] = [Rc*Rf Rc*tf + tc] | |
// [0 1 ] [0 1 ] [0 1 ] | |
// | |
// This transformation (and its gradients) is handled by mrcal_compose_rt() | |
// I refer to the camera*frame transform as the "joint" transform, or the | |
// letter j | |
geometric_gradients_t gg; | |
double _joint_rt[6]; | |
double* joint_rt; | |
// The caller has an odd-looking array reference [-3]. This is intended, but | |
// the compiler throws a warning. I silence it here. gcc-10 produces a very | |
// strange-looking warning that was reported to the maintainers: | |
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97261 | |
#pragma GCC diagnostic push | |
#pragma GCC diagnostic ignored "-Warray-bounds" | |
mrcal_pose_t frame_rt_validr = {.t = frame_rt->t}; | |
#pragma GCC diagnostic pop | |
if(calibration_object_width_n) frame_rt_validr.r = frame_rt->r; | |
if(!camera_at_identity) | |
{ | |
// make sure I can pass mrcal_pose_t.r as an rt[] transformation | |
static_assert( offsetof(mrcal_pose_t, r) == 0, "mrcal_pose_t has expected structure"); | |
static_assert( offsetof(mrcal_pose_t, t) == 3*sizeof(double), "mrcal_pose_t has expected structure"); | |
mrcal_compose_rt( _joint_rt, | |
gg._d_rj_rc, gg._d_rj_rf, | |
gg._d_tj_rc, gg._d_tj_tf, | |
camera_rt ->r.xyz, | |
frame_rt_validr.r.xyz); | |
joint_rt = _joint_rt; | |
} | |
else | |
{ | |
// We're looking at the reference frame, so this camera transform is | |
// fixed at the identity. We don't need to compose anything, nor | |
// propagate gradients for the camera extrinsics, since those don't | |
// exist in the parameter vector | |
// Here the "joint" transform is just the "frame" transform | |
joint_rt = frame_rt_validr.r.xyz; | |
} | |
// Not using OpenCV distortions, the distortion and projection are not | |
// coupled | |
double Rj[3*3]; | |
double d_Rj_rj[9*3]; | |
mrcal_R_from_r(Rj, d_Rj_rj, joint_rt); | |
mrcal_point2_t* p_dq_dfxy = NULL; | |
double* p_dq_dintrinsics_nocore = NULL; | |
bool has_core = modelHasCore_fxfycxcy(lensmodel); | |
bool has_dense_intrinsics_grad = (lensmodel->type != MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC); | |
bool has_sparse_intrinsics_grad = (lensmodel->type == MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC); | |
int runlen = (lensmodel->type == MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC) ? | |
(lensmodel->LENSMODEL_SPLINED_STEREOGRAPHIC__config.order + 1) : | |
0; | |
if(dq_dintrinsics_pool_double != NULL) | |
{ | |
// nothing by default | |
*dq_dfxy = NULL; | |
*dq_dintrinsics_nocore = NULL; | |
gradient_sparse_meta->pool = NULL; | |
int ivar_pool = 0; | |
if(has_core) | |
{ | |
// Each point produces 2 measurements. Each one depends on ONE fxy | |
// element. So Npoints*2 of these | |
*dq_dfxy = &dq_dintrinsics_pool_double[ivar_pool]; | |
p_dq_dfxy = (mrcal_point2_t*)*dq_dfxy; | |
ivar_pool += Npoints*2; | |
} | |
if(has_dense_intrinsics_grad) | |
{ | |
*dq_dintrinsics_nocore = p_dq_dintrinsics_nocore = &dq_dintrinsics_pool_double[ivar_pool]; | |
ivar_pool += Npoints*2 * (Nintrinsics-4); | |
} | |
if(has_sparse_intrinsics_grad) | |
{ | |
if(lensmodel->type != MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC) | |
{ | |
MSG("Unhandled lens model: %d (%s)", | |
lensmodel->type, | |
mrcal_lensmodel_name_unconfigured(lensmodel)); | |
assert(0); | |
} | |
const mrcal_LENSMODEL_SPLINED_STEREOGRAPHIC__config_t* config = | |
&lensmodel->LENSMODEL_SPLINED_STEREOGRAPHIC__config; | |
*gradient_sparse_meta = | |
(gradient_sparse_meta_t) | |
{ | |
.run_side_length = config->order+1, | |
.ivar_stridey = 2*config->Nx, | |
.pool = &dq_dintrinsics_pool_double[ivar_pool] | |
}; | |
ivar_pool += Npoints*2 * runlen; | |
} | |
} | |
// These are produced by propagate_extrinsics() and consumed by | |
// project_point() | |
mrcal_point3_t _dp_drc[3]; | |
mrcal_point3_t _dp_dtc[3]; | |
mrcal_point3_t _dp_drf[3]; | |
mrcal_point3_t _dp_dtf[3]; | |
mrcal_point3_t* dp_drc; | |
mrcal_point3_t* dp_dtc; | |
mrcal_point3_t* dp_drf; | |
mrcal_point3_t* dp_dtf; | |
mrcal_point3_t propagate_extrinsics( const mrcal_point3_t* pt_ref, | |
const geometric_gradients_t* gg, | |
const double* Rj, const double* d_Rj_rj, | |
const double* _tj ) | |
{ | |
// Rj * pt + tj -> pt | |
mrcal_point3_t p; | |
mul_vec3_gen33t_vout(pt_ref->xyz, Rj, p.xyz); | |
add_vec(3, p.xyz, _tj); | |
void propagate_extrinsics_one(mrcal_point3_t* dp_dparam, | |
const double* drj_dparam, | |
const double* dtj_dparam, | |
const double* d_Rj_rj) | |
{ | |
// dRj[row0]/drj is 3x3 matrix at &d_Rj_rj[0] | |
// dRj[row0]/drc = dRj[row0]/drj * drj_drc | |
for(int i=0; i<3; i++) | |
{ | |
mul_vec3_gen33_vout( pt_ref->xyz, &d_Rj_rj[9*i], dp_dparam[i].xyz ); | |
mul_vec3_gen33 ( dp_dparam[i].xyz, drj_dparam); | |
add_vec(3, dp_dparam[i].xyz, &dtj_dparam[3*i] ); | |
} | |
} | |
void propagate_extrinsics_one_rzero(mrcal_point3_t* dp_dparam, | |
const double* dtj_dparam, | |
const double* d_Rj_rj) | |
{ | |
// dRj[row0]/drj is 3x3 matrix at &d_Rj_rj[0] | |
// dRj[row0]/drc = dRj[row0]/drj * drj_drc | |
memcpy(dp_dparam->xyz, dtj_dparam, 9*sizeof(double)); | |
} | |
void propagate_extrinsics_one_tzero(mrcal_point3_t* dp_dparam, | |
const double* drj_dparam, | |
const double* d_Rj_rj) | |
{ | |
// dRj[row0]/drj is 3x3 matrix at &d_Rj_rj[0] | |
// dRj[row0]/drc = dRj[row0]/drj * drj_drc | |
for(int i=0; i<3; i++) | |
{ | |
mul_vec3_gen33_vout( pt_ref->xyz, &d_Rj_rj[9*i], dp_dparam[i].xyz ); | |
mul_vec3_gen33 ( dp_dparam[i].xyz, drj_dparam); | |
} | |
} | |
void propagate_extrinsics_one_rzero_tidentity(mrcal_point3_t* dp_dparam, | |
const double* d_Rj_rj) | |
{ | |
dp_dparam[0] = (mrcal_point3_t){.x = 1.0}; | |
dp_dparam[1] = (mrcal_point3_t){.y = 1.0}; | |
dp_dparam[2] = (mrcal_point3_t){.z = 1.0}; | |
} | |
void propagate_extrinsics_one_cam0(mrcal_point3_t* dp_rf, | |
const double* _d_Rf_rf) | |
{ | |
// dRj[row0]/drj is 3x3 matrix at &_d_Rf_rf[0] | |
// dRj[row0]/drc = dRj[row0]/drj * drj_drc | |
for(int i=0; i<3; i++) | |
mul_vec3_gen33_vout( pt_ref->xyz, &_d_Rf_rf[9*i], dp_rf[i].xyz ); | |
} | |
if(gg != NULL) | |
{ | |
propagate_extrinsics_one( _dp_drc, gg->_d_rj_rc, gg->_d_tj_rc, d_Rj_rj); | |
propagate_extrinsics_one_rzero_tidentity(_dp_dtc, d_Rj_rj); | |
propagate_extrinsics_one_tzero( _dp_drf, gg->_d_rj_rf, d_Rj_rj); | |
propagate_extrinsics_one_rzero( _dp_dtf, gg->_d_tj_tf, d_Rj_rj); | |
dp_drc = _dp_drc; | |
dp_dtc = _dp_dtc; | |
dp_drf = _dp_drf; | |
dp_dtf = _dp_dtf; | |
} | |
else | |
{ | |
// camera is at the reference. The "joint" coord system is the "frame" | |
// coord system | |
// | |
// p_cam = Rf p_ref + tf | |
// | |
// dp/drc = 0 | |
// dp/dtc = 0 | |
// dp/drf = reshape(dRf_drf p_ref) | |
// dp/dtf = I | |
propagate_extrinsics_one_cam0(_dp_drf, d_Rj_rj); | |
dp_drc = NULL; | |
dp_dtc = NULL; | |
dp_drf = _dp_drf; | |
dp_dtf = NULL; // this is I. The user of this MUST know to interpret | |
// it that way | |
} | |
return p; | |
} | |
void project_point( // outputs | |
mrcal_point2_t* q, | |
mrcal_point2_t* p_dq_dfxy, | |
double* p_dq_dintrinsics_nocore, | |
double* gradient_sparse_meta_pool, | |
int runlen, | |
mrcal_point3_t* restrict dq_drcamera, | |
mrcal_point3_t* restrict dq_dtcamera, | |
mrcal_point3_t* restrict dq_drframe, | |
mrcal_point3_t* restrict dq_dtframe, | |
mrcal_calobject_warp_t* restrict dq_dcalobject_warp, | |
// inputs | |
const mrcal_point3_t* p, | |
const double* restrict intrinsics, | |
const mrcal_lensmodel_t* lensmodel, | |
const mrcal_calobject_warp_t* dpt_refz_dwarp, | |
// if NULL then the camera is at the reference | |
bool camera_at_identity, | |
const double* Rj) | |
{ | |
if(lensmodel->type == MRCAL_LENSMODEL_SPLINED_STEREOGRAPHIC) | |
{ | |
// only need 3+3 for quadratic splines | |
double grad_ABCDx_ABCDy[4+4]; | |
int ivar0; | |
_project_point_splined( // outputs | |
q, p_dq_dfxy, | |
grad_ABCDx_ABCDy, | |
&ivar0, | |
dq_drcamera,dq_dtcamera,dq_drframe,dq_dtframe, | |
// inputs | |
p, | |
dp_drc, dp_dtc, dp_drf, dp_dtf, | |
intrinsics, | |
camera_at_identity, | |
lensmodel->LENSMODEL_SPLINED_STEREOGRAPHIC__config.order, | |
lensmodel->LENSMODEL_SPLINED_STEREOGRAPHIC__config.Nx, | |
lensmodel->LENSMODEL_SPLINED_STEREOGRAPHIC__config.Ny, | |
precomputed->LENSMODEL_SPLINED_STEREOGRAPHIC__precomputed.segments_per_u); | |
// WARNING: if I could assume that dq_dintrinsics_pool_double!=NULL then I wouldnt need to copy the context | |
if(dq_dintrinsics_pool_int != NULL) | |
{ | |
*(dq_dintrinsics_pool_int++) = ivar0; | |
memcpy(gradient_sparse_meta_pool, | |
grad_ABCDx_ABCDy, | |
sizeof(double)*runlen*2); | |
} | |
} | |
else if(lensmodel->type == MRCAL_LENSMODEL_CAHVOR) | |
{ | |
project_cahvor( // outputs | |
q,p_dq_dfxy, | |
p_dq_dintrinsics_nocore, | |
dq_drcamera,dq_dtcamera,dq_drframe,dq_dtframe, | |
// inputs | |
p, | |
dp_drc, dp_dtc, dp_drf, dp_dtf, | |
intrinsics, | |
camera_at_identity, | |
lensmodel); | |
} | |
else if(lensmodel->type == MRCAL_LENSMODEL_CAHVORE) | |
{ | |
if(!project_cahvore( // outputs | |
q,p_dq_dfxy, | |
p_dq_dintrinsics_nocore, | |
dq_drcamera,dq_dtcamera,dq_drframe,dq_dtframe, | |
// inputs | |
p, | |
dp_drc, dp_dtc, dp_drf, dp_dtf, | |
intrinsics, | |
camera_at_identity, | |
lensmodel)) | |
{ | |
MSG("CAHVORE PROJECTION OF (%f,%f,%f) FAILED. I don't know what to do. Setting result and all gradients to 0", | |
p->x, p->y, p->z); | |
memset(q, 0, sizeof(*q)); | |
if(p_dq_dfxy) memset(p_dq_dfxy, 0, sizeof(*p_dq_dfxy)); | |
if(p_dq_dintrinsics_nocore) memset(p_dq_dintrinsics_nocore, 0, sizeof(*p_dq_dintrinsics_nocore) * 2 * (Nintrinsics-4)); | |
if(dq_drcamera) memset(dq_drcamera, 0, sizeof(*dq_drcamera)); | |
if(dq_dtcamera) memset(dq_dtcamera, 0, sizeof(*dq_dtcamera)); | |
if(dq_drframe) memset(dq_drframe, 0, sizeof(*dq_drframe)); | |
if(dq_dtframe) memset(dq_dtframe, 0, sizeof(*dq_dtframe)); | |
} | |
} | |
else | |
{ | |
_project_point_parametric( // outputs | |
q,p_dq_dfxy, | |
p_dq_dintrinsics_nocore, | |
dq_drcamera,dq_dtcamera,dq_drframe,dq_dtframe, | |
// inputs | |
p, | |
dp_drc, dp_dtc, dp_drf, dp_dtf, | |
intrinsics, | |
camera_at_identity, | |
lensmodel); | |
} | |
if( dq_dcalobject_warp != NULL && dpt_refz_dwarp != NULL ) | |
{ | |
// p = proj(Rc Rf warp(x) + Rc tf + tc); | |
// dp/dw = dp/dRcRf(warp(x)) dR(warp(x))/dwarp(x) dwarp/dw = | |
// = dp/dtc RcRf dwarp/dw | |
// dp/dtc is dq_dtcamera | |
// R is rodrigues(rj) | |
// dwarp/dw = [0 0 0 ...] | |
// [0 0 0 ...] | |
// [a b c ...] | |
// Let R = [r0 r1 r2] | |
// dp/dw = dp/dt [a r2 b r2] = | |
// [a dp/dt r2 b dp/dt r2 ...] | |
mrcal_point3_t* p_dq_dt; | |
if(!camera_at_identity) p_dq_dt = dq_dtcamera; | |
else p_dq_dt = dq_dtframe; | |
double d[] = | |
{ p_dq_dt[0].xyz[0] * Rj[0*3 + 2] + | |
p_dq_dt[0].xyz[1] * Rj[1*3 + 2] + | |
p_dq_dt[0].xyz[2] * Rj[2*3 + 2], | |
p_dq_dt[1].xyz[0] * Rj[0*3 + 2] + | |
p_dq_dt[1].xyz[1] * Rj[1*3 + 2] + | |
p_dq_dt[1].xyz[2] * Rj[2*3 + 2]}; | |
for(int i=0; i<MRCAL_NSTATE_CALOBJECT_WARP; i++) | |
{ | |
dq_dcalobject_warp[0].values[i] = d[0]*dpt_refz_dwarp->values[i]; | |
dq_dcalobject_warp[1].values[i] = d[1]*dpt_refz_dwarp->values[i]; | |
} | |
} | |
} | |
if( calibration_object_width_n == 0 ) | |
{ // projecting discrete points | |
mrcal_point3_t p = | |
propagate_extrinsics( &(mrcal_point3_t){}, | |
camera_at_identity ? NULL : &gg, | |
Rj, d_Rj_rj, &joint_rt[3]); | |
project_point( q, | |
p_dq_dfxy, p_dq_dintrinsics_nocore, | |
gradient_sparse_meta ? gradient_sparse_meta->pool : NULL, | |
runlen, | |
dq_drcamera, dq_dtcamera, dq_drframe, dq_dtframe, NULL, | |
&p, | |
intrinsics, lensmodel, | |
NULL, | |
camera_at_identity, Rj); | |
} | |
else | |
{ // projecting a chessboard | |
int i_pt = 0; | |
// The calibration object has a simple grid geometry | |
for(int y = 0; y<calibration_object_height_n; y++) | |
for(int x = 0; x<calibration_object_width_n; x++) | |
{ | |
mrcal_point3_t pt_ref = {.x = (double)x * calibration_object_spacing, | |
.y = (double)y * calibration_object_spacing}; | |
mrcal_calobject_warp_t dpt_refz_dwarp = {}; | |
if(calobject_warp != NULL) | |
{ | |
// Add a board warp here. I have two parameters, and they describe | |
// additive flex along the x axis and along the y axis, in that | |
// order. In each direction the flex is a parabola, with the | |
// parameter k describing the max deflection at the center. If the | |
// ends are at +- 1 I have d = k*(1 - x^2). If the ends are at | |
// (0,N-1) the equivalent expression is: d = k*( 1 - 4*x^2/(N-1)^2 + | |
// 4*x/(N-1) - 1 ) = d = 4*k*(x/(N-1) - x^2/(N-1)^2) = d = | |
// 4.*k*x*r(1. - x*r) | |
double xr = (double)x / (double)(calibration_object_width_n -1); | |
double yr = (double)y / (double)(calibration_object_height_n-1); | |
double dx = 4. * xr * (1. - xr); | |
double dy = 4. * yr * (1. - yr); | |
pt_ref.z += calobject_warp->x2 * dx; | |
pt_ref.z += calobject_warp->y2 * dy; | |
dpt_refz_dwarp.x2 = dx; | |
dpt_refz_dwarp.y2 = dy; | |
} | |
mrcal_point3_t p = | |
propagate_extrinsics( &pt_ref, | |
camera_at_identity ? NULL : &gg, | |
Rj, d_Rj_rj, &joint_rt[3]); | |
mrcal_point3_t* dq_drcamera_here = dq_drcamera ? &dq_drcamera [i_pt*2] : NULL; | |
mrcal_point3_t* dq_dtcamera_here = dq_dtcamera ? &dq_dtcamera [i_pt*2] : NULL; | |
mrcal_point3_t* dq_drframe_here = dq_drframe ? &dq_drframe [i_pt*2] : NULL; | |
mrcal_point3_t* dq_dtframe_here = dq_dtframe ? &dq_dtframe [i_pt*2] : NULL; | |
mrcal_calobject_warp_t* dq_dcalobject_warp_here = dq_dcalobject_warp ? &dq_dcalobject_warp [i_pt*2] : NULL; | |
mrcal_point3_t dq_dtcamera_here_dummy[2]; | |
mrcal_point3_t dq_dtframe_here_dummy [2]; | |
if(dq_dcalobject_warp) | |
{ | |
// I need all translation gradients to be available to | |
// compute the calobject_warp gradients (see the end of the | |
// project_point() function above). So I compute those even | |
// if the caller didn't ask for them | |
if(!dq_dtcamera_here) dq_dtcamera_here = dq_dtcamera_here_dummy; | |
if(!dq_dtframe_here) dq_dtframe_here = dq_dtframe_here_dummy; | |
} | |
project_point(&q[i_pt], | |
p_dq_dfxy ? &p_dq_dfxy[i_pt] : NULL, | |
p_dq_dintrinsics_nocore ? &p_dq_dintrinsics_nocore[2*(Nintrinsics-4)*i_pt] : NULL, | |
gradient_sparse_meta ? &gradient_sparse_meta->pool[i_pt*runlen*2] : NULL, | |
runlen, | |
dq_drcamera_here, dq_dtcamera_here, dq_drframe_here, dq_dtframe_here, dq_dcalobject_warp_here, | |
&p, | |
intrinsics, lensmodel, | |
&dpt_refz_dwarp, | |
camera_at_identity, Rj); | |
i_pt++; | |
} | |
} | |
} | |
// NOT A PART OF THE EXTERNAL API. This is exported for the mrcal python wrapper | |
// only | |
bool _mrcal_project_internal( // out | |
mrcal_point2_t* q, | |
// Stored as a row-first array of shape (N,2,3). Each | |
// row lives in a mrcal_point3_t | |
mrcal_point3_t* dq_dp, | |
// core, distortions concatenated. Stored as a row-first | |
// array of shape (N,2,Nintrinsics). This is a DENSE array. | |
// High-parameter-count lens models have very sparse | |
// gradients here, and the internal project() function | |
// returns those sparsely. For now THIS function densifies | |
// all of these | |
double* dq_dintrinsics, | |
// in | |
const mrcal_point3_t* p, | |
int N, | |
const mrcal_lensmodel_t* lensmodel, | |
// core, distortions concatenated | |
const double* intrinsics, | |
int Nintrinsics, | |
const mrcal_projection_precomputed_t* precomputed) | |
{ | |
if( dq_dintrinsics == NULL ) | |
{ | |
for(int i=0; i<N; i++) | |
{ | |
mrcal_pose_t frame = {.r = {}, | |
.t = p[i]}; | |
// simple non-intrinsics-gradient path | |
project( &q[i], | |
NULL, NULL, NULL, NULL, NULL, | |
NULL, NULL, NULL, dq_dp, NULL, | |
// in | |
intrinsics, NULL, &frame, NULL, true, | |
lensmodel, precomputed, | |
0.0, 0,0); | |
} | |
return true; | |
} | |
// Some models have sparse gradients, but I'm returning a dense array here. | |
// So I init everything to 0 | |
memset(dq_dintrinsics, 0, N*2*Nintrinsics*sizeof(double)); | |
int Ngradients = get_Ngradients(lensmodel, Nintrinsics); | |
for(int i=0; i<N; i++) | |
{ | |
mrcal_pose_t frame = {.r = {}, | |
.t = p[i]}; | |
// simple non-intrinsics-gradient path | |
double dq_dintrinsics_pool_double[Ngradients]; | |
int dq_dintrinsics_pool_int [1]; | |
double* dq_dfxy = NULL; | |
double* dq_dintrinsics_nocore = NULL; | |
gradient_sparse_meta_t gradient_sparse_meta = {}; // init to pacify compiler warning | |
project( &q[i], | |
dq_dintrinsics_pool_double, | |
dq_dintrinsics_pool_int, | |
&dq_dfxy, &dq_dintrinsics_nocore, &gradient_sparse_meta, | |
NULL, NULL, NULL, dq_dp, NULL, | |
// in | |
intrinsics, NULL, &frame, NULL, true, | |
lensmodel, precomputed, | |
0.0, 0,0); | |
int Ncore = 0; | |
if(dq_dfxy != NULL) | |
{ | |
Ncore = 4; | |
// fxy. off-diagonal elements are 0 | |
dq_dintrinsics[0*Nintrinsics + 0] = dq_dfxy[0]; | |
dq_dintrinsics[0*Nintrinsics + 1] = 0.0; | |
dq_dintrinsics[1*Nintrinsics + 0] = 0.0; | |
dq_dintrinsics[1*Nintrinsics + 1] = dq_dfxy[1]; | |
// cxy. Identity | |
dq_dintrinsics[0*Nintrinsics + 2] = 1.0; | |
dq_dintrinsics[0*Nintrinsics + 3] = 0.0; | |
dq_dintrinsics[1*Nintrinsics + 2] = 0.0; | |
dq_dintrinsics[1*Nintrinsics + 3] = 1.0; | |
} | |
if( dq_dintrinsics_nocore != NULL ) | |
{ | |
for(int i_xy=0; i_xy<2; i_xy++) | |
memcpy(&dq_dintrinsics[i_xy*Nintrinsics + Ncore], | |
&dq_dintrinsics_nocore[i_xy*(Nintrinsics-Ncore)], | |
(Nintrinsics-Ncore)*sizeof(double)); | |
} | |
if(gradient_sparse_meta.pool != NULL) | |
{ | |
// u = stereographic(p) | |
// q = (u + deltau(u)) * f + c | |
// | |
// Intrinsics: | |
// dq/diii = f ddeltau/diii | |
// | |
// ddeltau/diii = flatten(ABCDx[0..3] * ABCDy[0..3]) | |
const int ivar0 = dq_dintrinsics_pool_int[0]; | |
const int len = gradient_sparse_meta.run_side_length; | |
const double* ABCDx = &gradient_sparse_meta.pool[0]; | |
const double* ABCDy = &gradient_sparse_meta.pool[len]; | |
const int ivar_stridey = gradient_sparse_meta.ivar_stridey; | |
const double* fxy = &intrinsics[0]; | |
for(int i_xy=0; i_xy<2; i_xy++) | |
for(int iy=0; iy<len; iy++) | |
for(int ix=0; ix<len; ix++) | |
{ | |
int ivar = ivar0 + ivar_stridey*iy + ix*2 + i_xy; | |
dq_dintrinsics[ivar + i_xy*Nintrinsics] = | |
ABCDx[ix]*ABCDy[iy]*fxy[i_xy]; | |
} | |
} | |
// advance | |
dq_dintrinsics = &dq_dintrinsics[2*Nintrinsics]; | |
if(dq_dp != NULL) | |
dq_dp = &dq_dp[2]; | |
} | |
return true; | |
} | |
// External interface to the internal project() function. The internal function | |
// is more general (supports geometric transformations prior to projection, and | |
// supports chessboards). dq_dintrinsics and/or dq_dp are allowed to be NULL if | |
// we're not interested in gradients. | |
// | |
// Projecting out-of-bounds points (beyond the field of view) returns undefined | |
// values. Generally things remain continuous even as we move off the imager | |
// domain. Pinhole-like projections will work normally if projecting a point | |
// behind the camera. Splined projections clamp to the nearest spline segment: | |
// the projection will fly off to infinity quickly since we're extrapolating a | |
// polynomial, but the function will remain continuous. | |
bool mrcal_project( // out | |
mrcal_point2_t* q, | |
// Stored as a row-first array of shape (N,2,3). Each row | |
// lives in a mrcal_point3_t. May be NULL | |
mrcal_point3_t* dq_dp, | |
// core, distortions concatenated. Stored as a row-first | |
// array of shape (N,2,Nintrinsics). This is a DENSE array. | |
// High-parameter-count lens models have very sparse | |
// gradients here, and the internal project() function | |
// returns those sparsely. For now THIS function densifies | |
// all of these. May be NULL | |
double* dq_dintrinsics, | |
// in | |
const mrcal_point3_t* p, | |
int N, | |
const mrcal_lensmodel_t* lensmodel, | |
// core, distortions concatenated | |
const double* intrinsics) | |
{ | |
// The outer logic (outside the loop-over-N-points) is duplicated in | |
// mrcal_project() and in the python wrapper definition in _project() and | |
// _project_withgrad() in mrcal-genpywrap.py. Please keep them in sync | |
if(dq_dintrinsics != NULL || dq_dp != NULL) | |
{ | |
mrcal_lensmodel_metadata_t meta = mrcal_lensmodel_metadata(lensmodel); | |
if(!meta.has_gradients) | |
{ | |
MSG("mrcal_project(lensmodel='%s') cannot return gradients; this is not yet implemented", | |
mrcal_lensmodel_name_unconfigured(lensmodel)); | |
return false; | |
} | |
} | |
int Nintrinsics = mrcal_lensmodel_num_params(lensmodel); | |
// Special-case for opencv/pinhole and projection-only. cvProjectPoints2 and | |
// project() have a lot of overhead apparently, and calling either in a loop | |
// is very slow. I can call it once, and use its fast internal loop, | |
// however. This special case does the same thing, but much faster. | |
if(dq_dintrinsics == NULL && dq_dp == NULL && | |
(MRCAL_LENSMODEL_IS_OPENCV(lensmodel->type) || | |
lensmodel->type == MRCAL_LENSMODEL_PINHOLE)) | |
{ | |
_mrcal_project_internal_opencv( q, NULL,NULL, | |
p, N, intrinsics, Nintrinsics); | |
return true; | |
} | |
mrcal_projection_precomputed_t precomputed; | |
_mrcal_precompute_lensmodel_data(&precomputed, lensmodel); | |
return | |
_mrcal_project_internal(q, dq_dp, dq_dintrinsics, | |
p, N, lensmodel, intrinsics, | |
Nintrinsics, &precomputed); | |
} | |
// Maps a set of distorted 2D imager points q to a 3D vector in camera | |
// coordinates that produced these pixel observations. The 3D vector is defined | |
// up-to-length. The returned vectors v are not normalized, and may have any | |
// length. | |
// | |
// This is the "reverse" direction, so an iterative nonlinear optimization is | |
// performed internally to compute this result. This is much slower than | |
// mrcal_project. For OpenCV distortions specifically, OpenCV has | |
// cvUndistortPoints() (and cv2.undistortPoints()), but these are inaccurate: | |
// https://github.com/opencv/opencv/issues/8811 | |
bool mrcal_unproject( // out | |
mrcal_point3_t* out, | |
// in | |
const mrcal_point2_t* q, | |
int N, | |
const mrcal_lensmodel_t* lensmodel, | |
// core, distortions concatenated | |
const double* intrinsics) | |
{ | |
mrcal_lensmodel_metadata_t meta = mrcal_lensmodel_metadata(lensmodel); | |
if(!meta.has_gradients) | |
{ | |
MSG("mrcal_unproject(lensmodel='%s') is not yet implemented: we need gradients", | |
mrcal_lensmodel_name_unconfigured(lensmodel)); | |
return false; | |
} | |
mrcal_projection_precomputed_t precomputed; | |
_mrcal_precompute_lensmodel_data(&precomputed, lensmodel); | |
return _mrcal_unproject_internal(out, q, N, lensmodel, intrinsics, &precomputed); | |
} | |
// NOT A PART OF THE EXTERNAL API. This is exported for the mrcal python wrapper | |
// only | |
bool _mrcal_unproject_internal( // out | |
mrcal_point3_t* out, | |
// in | |
const mrcal_point2_t* q, | |
int N, | |
const mrcal_lensmodel_t* lensmodel, | |
// core, distortions concatenated | |
const double* intrinsics, | |
const mrcal_projection_precomputed_t* precomputed) | |
{ | |
// easy special-cases | |
if( lensmodel->type == MRCAL_LENSMODEL_PINHOLE ) | |
{ | |
mrcal_unproject_pinhole(out, NULL, q, N, intrinsics); | |
return true; | |
} | |
if( lensmodel->type == MRCAL_LENSMODEL_STEREOGRAPHIC ) | |
{ | |
mrcal_unproject_stereographic(out, NULL, q, N, intrinsics); | |
return true; | |
} | |
if( lensmodel->type == MRCAL_LENSMODEL_LONLAT ) | |
{ | |
mrcal_unproject_lonlat(out, NULL, q, N, intrinsics); | |
return true; | |
} | |
if( lensmodel->type == MRCAL_LENSMODEL_LATLON ) | |
{ | |
mrcal_unproject_latlon(out, NULL, q, N, intrinsics); | |
return true; | |
} | |
if( lensmodel->type == MRCAL_LENSMODEL_CAHVORE ) | |
{ | |
const int Nintrinsics = mrcal_lensmodel_num_params(lensmodel); | |
for(int i = Nintrinsics-3; i<Nintrinsics; i++) | |
if(intrinsics[i] != 0.) | |
{ | |
MSG("unproject() currently only works with a central projection. So I cannot unproject(CAHVORE,E!=0). Please set E=0 to centralize this model"); | |
return false; | |
} | |
} | |
// I optimize in the space of the stereographic projection. This is a 2D | |
// space with a direct mapping to/from observation vectors with a single | |
// singularity directly behind the camera. The allows me to run an | |
// unconstrained optimization here | |
for(int i=0; i<N; i++) | |
{ | |
void cb(const double* u, | |
double* x, | |
double* J, | |
void* cookie __attribute__((unused))) | |
{ | |
// u is the constant-fxy-cxy 2D stereographic | |
// projection of the hypothesis v. I unproject it stereographically, | |
// and project it using the actual model | |
mrcal_point2_t dv_du[3]; | |
mrcal_pose_t frame = {}; | |
mrcal_unproject_stereographic( &frame.t, dv_du, | |
(mrcal_point2_t*)u, 1, | |
intrinsics ); | |
mrcal_point3_t dq_dtframe[2]; | |
mrcal_point2_t q_hypothesis; | |
project( &q_hypothesis, | |
NULL,NULL,NULL,NULL,NULL, | |
NULL, NULL, NULL, dq_dtframe, | |
NULL, | |
// in | |
intrinsics, | |
NULL, | |
&frame, | |
NULL, | |
true, | |
lensmodel, precomputed, | |
0.0, 0,0); | |
x[0] = q_hypothesis.x - q[i].x; | |
x[1] = q_hypothesis.y - q[i].y; | |
J[0*2 + 0] = | |
dq_dtframe[0].x*dv_du[0].x + | |
dq_dtframe[0].y*dv_du[1].x + | |
dq_dtframe[0].z*dv_du[2].x; | |
J[0*2 + 1] = | |
dq_dtframe[0].x*dv_du[0].y + | |
dq_dtframe[0].y*dv_du[1].y + | |
dq_dtframe[0].z*dv_du[2].y; | |
J[1*2 + 0] = | |
dq_dtframe[1].x*dv_du[0].x + | |
dq_dtframe[1].y*dv_du[1].x + | |
dq_dtframe[1].z*dv_du[2].x; | |
J[1*2 + 1] = | |
dq_dtframe[1].x*dv_du[0].y + | |
dq_dtframe[1].y*dv_du[1].y + | |
dq_dtframe[1].z*dv_du[2].y; | |
} | |
const double fx = intrinsics[0]; | |
const double fy = intrinsics[1]; | |
const double cx = intrinsics[2]; | |
const double cy = intrinsics[3]; | |
// MSG("init. q=(%g,%g)", q[i].x, q[i].y); | |
// initial estimate: pinhole projection | |
mrcal_project_stereographic( (mrcal_point2_t*)out->xyz, NULL, | |
&(mrcal_point3_t){.x = (q[i].x-cx)/fx, | |
.y = (q[i].y-cy)/fy, | |
.z = 1.}, | |
1, | |
intrinsics ); | |
// MSG("init. out->xyz[]=(%g,%g)", out->x, out->y); | |
dogleg_parameters2_t dogleg_parameters; | |
dogleg_getDefaultParameters(&dogleg_parameters); | |
dogleg_parameters.dogleg_debug = 0; | |
double norm2x = | |
dogleg_optimize_dense2(out->xyz, 2, 2, cb, NULL, | |
&dogleg_parameters, | |
NULL); | |
//This needs to be precise; if it isn't, I barf. Shouldn't happen | |
//very often | |
static bool already_complained = false; | |
// MSG("norm2x = %g", norm2x); | |
if(norm2x/2.0 > 1e-4) | |
{ | |
if(!already_complained) | |
{ | |
// MSG("WARNING: I wasn't able to precisely compute some points. norm2x=%f. Returning nan for those. Will complain just once", | |
// norm2x); | |
already_complained = true; | |
} | |
double nan = strtod("NAN", NULL); | |
out->xyz[0] = nan; | |
out->xyz[1] = nan; | |
} | |
else | |
{ | |
// out[0,1] is the stereographic representation of the observation | |
// vector using idealized fx,fy,cx,cy. This is already the right | |
// thing if we're reporting in 2d. Otherwise I need to unproject | |
// This is the normal no-error path | |
mrcal_unproject_stereographic((mrcal_point3_t*)out, NULL, | |
(mrcal_point2_t*)out, 1, | |
intrinsics); | |
if(!model_supports_projection_behind_camera(lensmodel) && out->xyz[2] < 0.0) | |
{ | |
out->xyz[0] *= -1.0; | |
out->xyz[1] *= -1.0; | |
out->xyz[2] *= -1.0; | |
} | |
} | |
// Advance to the next point. Error or not | |
out++; | |
} | |
return true; | |
} | |
// The following functions define/use the layout of the state vector. In general | |
// I do: | |
// | |
// intrinsics_cam0 | |
// intrinsics_cam1 | |
// intrinsics_cam2 | |
// ... | |
// extrinsics_cam1 | |
// extrinsics_cam2 | |
// extrinsics_cam3 | |
// ... | |
// frame0 | |
// frame1 | |
// frame2 | |
// .... | |
// calobject_warp0 | |
// calobject_warp1 | |
// ... | |
// From real values to unit-scale values. Optimizer sees unit-scale values | |
static int pack_solver_state_intrinsics( // out | |
double* b, // subset based on problem_selections | |
// in | |
const double* intrinsics, // ALL variables. Not a subset | |
const mrcal_lensmodel_t* lensmodel, | |
mrcal_problem_selections_t problem_selections, | |
int Ncameras_intrinsics ) | |
{ | |
int i_state = 0; | |
const int Nintrinsics = mrcal_lensmodel_num_params(lensmodel); | |
const int Ncore = modelHasCore_fxfycxcy(lensmodel) ? 4 : 0; | |
const int Ndistortions = Nintrinsics - Ncore; | |
for(int icam_intrinsics=0; icam_intrinsics < Ncameras_intrinsics; icam_intrinsics++) | |
{ | |
if( problem_selections.do_optimize_intrinsics_core && Ncore ) | |
{ | |
const mrcal_intrinsics_core_t* intrinsics_core = (const mrcal_intrinsics_core_t*)intrinsics; | |
b[i_state++] = intrinsics_core->focal_xy [0] / SCALE_INTRINSICS_FOCAL_LENGTH; | |
b[i_state++] = intrinsics_core->focal_xy [1] / SCALE_INTRINSICS_FOCAL_LENGTH; | |
b[i_state++] = intrinsics_core->center_xy[0] / SCALE_INTRINSICS_CENTER_PIXEL; | |
b[i_state++] = intrinsics_core->center_xy[1] / SCALE_INTRINSICS_CENTER_PIXEL; | |
} | |
if( problem_selections.do_optimize_intrinsics_distortions ) | |
for(int i = 0; i<Ndistortions; i++) | |
b[i_state++] = intrinsics[Ncore + i] / SCALE_DISTORTION; | |
intrinsics = &intrinsics[Nintrinsics]; | |
} | |
return i_state; | |
} | |
// It is ugly to have this as separate from pack_solver_state_intrinsics(), but | |
// I am tired. THIS function uses only intrinsic arrays that respect the skipped | |
// optimization variables in problem_selections. And this function works inline | |
static int pack_solver_state_intrinsics_subset_to_subset( // out,in | |
double* b, // subset based on problem_selections | |
// in | |
const mrcal_lensmodel_t* lensmodel, | |
mrcal_problem_selections_t problem_selections, | |
int Ncameras_intrinsics ) | |
{ | |
if( !problem_selections.do_optimize_intrinsics_core && | |
!problem_selections.do_optimize_intrinsics_distortions ) | |
return 0; | |
int i_state = 0; | |
const int Nintrinsics = mrcal_lensmodel_num_params(lensmodel); | |
const int Ncore = modelHasCore_fxfycxcy(lensmodel) ? 4 : 0; | |
const int Ndistortions = Nintrinsics - Ncore; | |
for(int icam_intrinsics=0; icam_intrinsics < Ncameras_intrinsics; icam_intrinsics++) | |
{ | |
if( problem_selections.do_optimize_intrinsics_core && Ncore ) | |
{ | |
b[i_state++] /= SCALE_INTRINSICS_FOCAL_LENGTH; | |
b[i_state++] /= SCALE_INTRINSICS_FOCAL_LENGTH; | |
b[i_state++] /= SCALE_INTRINSICS_CENTER_PIXEL; | |
b[i_state++] /= SCALE_INTRINSICS_CENTER_PIXEL; | |
} | |
if( problem_selections.do_optimize_intrinsics_distortions ) | |
for(int i = 0; i<Ndistortions; i++) | |
b[i_state++] /= SCALE_DISTORTION; | |
} | |
return i_state; | |
} | |
static void pack_solver_state( // out | |
double* b, | |
// in | |
const mrcal_lensmodel_t* lensmodel, | |
const double* intrinsics, // Ncameras_intrinsics of these | |
const mrcal_pose_t* extrinsics_fromref, // Ncameras_extrinsics of these | |
const mrcal_pose_t* frames_toref, // Nframes of these | |
const mrcal_point3_t* points, // Npoints of these | |
const mrcal_calobject_warp_t* calobject_warp, // 1 of these | |
mrcal_problem_selections_t problem_selections, | |
int Ncameras_intrinsics, int Ncameras_extrinsics, int Nframes, | |
int Npoints_variable, | |
int Nstate_ref) | |
{ | |
int i_state = 0; | |
i_state += pack_solver_state_intrinsics( b, intrinsics, | |
lensmodel, problem_selections, | |
Ncameras_intrinsics ); | |
if( problem_selections.do_optimize_extrinsics ) | |
for(int icam_extrinsics=0; icam_extrinsics < Ncameras_extrinsics; icam_extrinsics++) | |
{ | |
b[i_state++] = extrinsics_fromref[icam_extrinsics].r.xyz[0] / SCALE_ROTATION_CAMERA; | |
b[i_state++] = extrinsics_fromref[icam_extrinsics].r.xyz[1] / SCALE_ROTATION_CAMERA; | |
b[i_state++] = extrinsics_fromref[icam_extrinsics].r.xyz[2] / SCALE_ROTATION_CAMERA; | |
b[i_state++] = extrinsics_fromref[icam_extrinsics].t.xyz[0] / SCALE_TRANSLATION_CAMERA; | |
b[i_state++] = extrinsics_fromref[icam_extrinsics].t.xyz[1] / SCALE_TRANSLATION_CAMERA; | |
b[i_state++] = extrinsics_fromref[icam_extrinsics].t.xyz[2] / SCALE_TRANSLATION_CAMERA; | |
} | |
if( problem_selections.do_optimize_frames ) | |
{ | |
for(int iframe = 0; iframe < Nframes; iframe++) | |
{ | |
b[i_state++] = frames_toref[iframe].r.xyz[0] / SCALE_ROTATION_FRAME; | |
b[i_state++] = frames_toref[iframe].r.xyz[1] / SCALE_ROTATION_FRAME; | |
b[i_state++] = frames_toref[iframe].r.xyz[2] / SCALE_ROTATION_FRAME; | |
b[i_state++] = frames_toref[iframe].t.xyz[0] / SCALE_TRANSLATION_FRAME; | |
b[i_state++] = frames_toref[iframe].t.xyz[1] / SCALE_TRANSLATION_FRAME; | |
b[i_state++] = frames_toref[iframe].t.xyz[2] / SCALE_TRANSLATION_FRAME; | |
} | |
for(int i_point = 0; i_point < Npoints_variable; i_point++) | |
{ | |
b[i_state++] = points[i_point].xyz[0] / SCALE_POSITION_POINT; | |
b[i_state++] = points[i_point].xyz[1] / SCALE_POSITION_POINT; | |
b[i_state++] = points[i_point].xyz[2] / SCALE_POSITION_POINT; | |
} | |
} | |
if( problem_selections.do_optimize_calobject_warp ) | |
{ | |
b[i_state++] = calobject_warp->x2 / SCALE_CALOBJECT_WARP; | |
b[i_state++] = calobject_warp->y2 / SCALE_CALOBJECT_WARP; | |
} | |
assert(i_state == Nstate_ref); | |
} | |
// Same as above, but packs/unpacks a vector instead of structures | |
void mrcal_pack_solver_state_vector( // out, in | |
double* b, // FULL state on input, unitless | |
// state on output | |
// in | |
int Ncameras_intrinsics, int Ncameras_extrinsics, | |
int Nframes, | |
int Npoints, int Npoints_fixed, | |
mrcal_problem_selections_t problem_selections, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
int Npoints_variable = Npoints - Npoints_fixed; | |
int i_state = 0; | |
i_state += pack_solver_state_intrinsics_subset_to_subset( b, | |
lensmodel, problem_selections, | |
Ncameras_intrinsics ); | |
static_assert( offsetof(mrcal_pose_t, r) == 0, "mrcal_pose_t has expected structure"); | |
static_assert( offsetof(mrcal_pose_t, t) == 3*sizeof(double), "mrcal_pose_t has expected structure"); | |
if( problem_selections.do_optimize_extrinsics ) | |
for(int icam_extrinsics=0; icam_extrinsics < Ncameras_extrinsics; icam_extrinsics++) | |
{ | |
mrcal_pose_t* extrinsics_fromref = (mrcal_pose_t*)(&b[i_state]); | |
b[i_state++] = extrinsics_fromref->r.xyz[0] / SCALE_ROTATION_CAMERA; | |
b[i_state++] = extrinsics_fromref->r.xyz[1] / SCALE_ROTATION_CAMERA; | |
b[i_state++] = extrinsics_fromref->r.xyz[2] / SCALE_ROTATION_CAMERA; | |
b[i_state++] = extrinsics_fromref->t.xyz[0] / SCALE_TRANSLATION_CAMERA; | |
b[i_state++] = extrinsics_fromref->t.xyz[1] / SCALE_TRANSLATION_CAMERA; | |
b[i_state++] = extrinsics_fromref->t.xyz[2] / SCALE_TRANSLATION_CAMERA; | |
} | |
if( problem_selections.do_optimize_frames ) | |
{ | |
for(int iframe = 0; iframe < Nframes; iframe++) | |
{ | |
mrcal_pose_t* frames_toref = (mrcal_pose_t*)(&b[i_state]); | |
b[i_state++] = frames_toref->r.xyz[0] / SCALE_ROTATION_FRAME; | |
b[i_state++] = frames_toref->r.xyz[1] / SCALE_ROTATION_FRAME; | |
b[i_state++] = frames_toref->r.xyz[2] / SCALE_ROTATION_FRAME; | |
b[i_state++] = frames_toref->t.xyz[0] / SCALE_TRANSLATION_FRAME; | |
b[i_state++] = frames_toref->t.xyz[1] / SCALE_TRANSLATION_FRAME; | |
b[i_state++] = frames_toref->t.xyz[2] / SCALE_TRANSLATION_FRAME; | |
} | |
for(int i_point = 0; i_point < Npoints_variable; i_point++) | |
{ | |
mrcal_point3_t* points = (mrcal_point3_t*)(&b[i_state]); | |
b[i_state++] = points->xyz[0] / SCALE_POSITION_POINT; | |
b[i_state++] = points->xyz[1] / SCALE_POSITION_POINT; | |
b[i_state++] = points->xyz[2] / SCALE_POSITION_POINT; | |
} | |
} | |
if( problem_selections.do_optimize_calobject_warp ) | |
{ | |
mrcal_calobject_warp_t* calobject_warp = (mrcal_calobject_warp_t*)(&b[i_state]); | |
b[i_state++] = calobject_warp->x2 / SCALE_CALOBJECT_WARP; | |
b[i_state++] = calobject_warp->y2 / SCALE_CALOBJECT_WARP; | |
} | |
} | |
static int unpack_solver_state_intrinsics( // out | |
// Ncameras_intrinsics of these | |
double* intrinsics, // ALL variables. Not a subset. | |
// I don't touch the elemnents I'm | |
// not optimizing | |
// in | |
const double* b, // subset based on problem_selections | |
const mrcal_lensmodel_t* lensmodel, | |
mrcal_problem_selections_t problem_selections, | |
int intrinsics_stride, | |
int Ncameras_intrinsics ) | |
{ | |
if( !problem_selections.do_optimize_intrinsics_core && | |
!problem_selections.do_optimize_intrinsics_distortions ) | |
return 0; | |
const int Nintrinsics = mrcal_lensmodel_num_params(lensmodel); | |
const int Ncore = modelHasCore_fxfycxcy(lensmodel) ? 4 : 0; | |
int i_state = 0; | |
for(int icam_intrinsics=0; icam_intrinsics < Ncameras_intrinsics; icam_intrinsics++) | |
{ | |
if( problem_selections.do_optimize_intrinsics_core && Ncore ) | |
{ | |
intrinsics[icam_intrinsics*intrinsics_stride + 0] = b[i_state++] * SCALE_INTRINSICS_FOCAL_LENGTH; | |
intrinsics[icam_intrinsics*intrinsics_stride + 1] = b[i_state++] * SCALE_INTRINSICS_FOCAL_LENGTH; | |
intrinsics[icam_intrinsics*intrinsics_stride + 2] = b[i_state++] * SCALE_INTRINSICS_CENTER_PIXEL; | |
intrinsics[icam_intrinsics*intrinsics_stride + 3] = b[i_state++] * SCALE_INTRINSICS_CENTER_PIXEL; | |
} | |
if( problem_selections.do_optimize_intrinsics_distortions ) | |
{ | |
for(int i = 0; i<Nintrinsics-Ncore; i++) | |
intrinsics[icam_intrinsics*intrinsics_stride + Ncore + i] = b[i_state++] * SCALE_DISTORTION; | |
} | |
} | |
return i_state; | |
} | |
// It is ugly to have this as separate from unpack_solver_state_intrinsics(), | |
// but I am tired. THIS function uses only intrinsic arrays that respect the | |
// skipped optimization variables in problem_selections. And this function works | |
// inline | |
static int unpack_solver_state_intrinsics_subset_to_subset( // in,out | |
double* b, // subset based on problem_selections | |
// in | |
const mrcal_lensmodel_t* lensmodel, | |
mrcal_problem_selections_t problem_selections, | |
int Ncameras_intrinsics ) | |
{ | |
if( !problem_selections.do_optimize_intrinsics_core && | |
!problem_selections.do_optimize_intrinsics_distortions ) | |
return 0; | |
int i_state = 0; | |
const int Nintrinsics = mrcal_lensmodel_num_params(lensmodel); | |
const int Ncore = modelHasCore_fxfycxcy(lensmodel) ? 4 : 0; | |
const int Ndistortions = Nintrinsics - Ncore; | |
for(int icam_intrinsics=0; icam_intrinsics < Ncameras_intrinsics; icam_intrinsics++) | |
{ | |
if( problem_selections.do_optimize_intrinsics_core && Ncore ) | |
{ | |
b[i_state++] *= SCALE_INTRINSICS_FOCAL_LENGTH; | |
b[i_state++] *= SCALE_INTRINSICS_FOCAL_LENGTH; | |
b[i_state++] *= SCALE_INTRINSICS_CENTER_PIXEL; | |
b[i_state++] *= SCALE_INTRINSICS_CENTER_PIXEL; | |
} | |
if( problem_selections.do_optimize_intrinsics_distortions ) | |
for(int i = 0; i<Ndistortions; i++) | |
b[i_state++] *= SCALE_DISTORTION; | |
} | |
return i_state; | |
} | |
static int unpack_solver_state_extrinsics_one(// out | |
mrcal_pose_t* extrinsic, | |
// in | |
const double* b) | |
{ | |
int i_state = 0; | |
extrinsic->r.xyz[0] = b[i_state++] * SCALE_ROTATION_CAMERA; | |
extrinsic->r.xyz[1] = b[i_state++] * SCALE_ROTATION_CAMERA; | |
extrinsic->r.xyz[2] = b[i_state++] * SCALE_ROTATION_CAMERA; | |
extrinsic->t.xyz[0] = b[i_state++] * SCALE_TRANSLATION_CAMERA; | |
extrinsic->t.xyz[1] = b[i_state++] * SCALE_TRANSLATION_CAMERA; | |
extrinsic->t.xyz[2] = b[i_state++] * SCALE_TRANSLATION_CAMERA; | |
return i_state; | |
} | |
static int unpack_solver_state_framert_one(// out | |
mrcal_pose_t* frame, | |
// in | |
const double* b) | |
{ | |
int i_state = 0; | |
frame->r.xyz[0] = b[i_state++] * SCALE_ROTATION_FRAME; | |
frame->r.xyz[1] = b[i_state++] * SCALE_ROTATION_FRAME; | |
frame->r.xyz[2] = b[i_state++] * SCALE_ROTATION_FRAME; | |
frame->t.xyz[0] = b[i_state++] * SCALE_TRANSLATION_FRAME; | |
frame->t.xyz[1] = b[i_state++] * SCALE_TRANSLATION_FRAME; | |
frame->t.xyz[2] = b[i_state++] * SCALE_TRANSLATION_FRAME; | |
return i_state; | |
} | |
static int unpack_solver_state_point_one(// out | |
mrcal_point3_t* point, | |
// in | |
const double* b) | |
{ | |
int i_state = 0; | |
point->xyz[0] = b[i_state++] * SCALE_POSITION_POINT; | |
point->xyz[1] = b[i_state++] * SCALE_POSITION_POINT; | |
point->xyz[2] = b[i_state++] * SCALE_POSITION_POINT; | |
return i_state; | |
} | |
static int unpack_solver_state_calobject_warp(// out | |
mrcal_calobject_warp_t* calobject_warp, | |
// in | |
const double* b) | |
{ | |
int i_state = 0; | |
calobject_warp->x2 = b[i_state++] * SCALE_CALOBJECT_WARP; | |
calobject_warp->y2 = b[i_state++] * SCALE_CALOBJECT_WARP; | |
return i_state; | |
} | |
// From unit-scale values to real values. Optimizer sees unit-scale values | |
static void unpack_solver_state( // out | |
// ALL intrinsics; whether we're optimizing | |
// them or not. Don't touch the parts of this | |
// array we're not optimizing | |
double* intrinsics_all, // Ncameras_intrinsics of these | |
mrcal_pose_t* extrinsics_fromref, // Ncameras_extrinsics of these | |
mrcal_pose_t* frames_toref, // Nframes of these | |
mrcal_point3_t* points, // Npoints of these | |
mrcal_calobject_warp_t* calobject_warp, // 1 of these | |
// in | |
const double* b, | |
const mrcal_lensmodel_t* lensmodel, | |
mrcal_problem_selections_t problem_selections, | |
int Ncameras_intrinsics, int Ncameras_extrinsics, int Nframes, int Npoints_variable, | |
int Nstate_ref) | |
{ | |
int i_state = unpack_solver_state_intrinsics(intrinsics_all, | |
b, lensmodel, problem_selections, | |
mrcal_lensmodel_num_params(lensmodel), | |
Ncameras_intrinsics); | |
if( problem_selections.do_optimize_extrinsics ) | |
for(int icam_extrinsics=0; icam_extrinsics < Ncameras_extrinsics; icam_extrinsics++) | |
i_state += unpack_solver_state_extrinsics_one( &extrinsics_fromref[icam_extrinsics], &b[i_state] ); | |
if( problem_selections.do_optimize_frames ) | |
{ | |
for(int iframe = 0; iframe < Nframes; iframe++) | |
i_state += unpack_solver_state_framert_one( &frames_toref[iframe], &b[i_state] ); | |
for(int i_point = 0; i_point < Npoints_variable; i_point++) | |
i_state += unpack_solver_state_point_one( &points[i_point], &b[i_state] ); | |
} | |
if( problem_selections.do_optimize_calobject_warp ) | |
i_state += unpack_solver_state_calobject_warp(calobject_warp, &b[i_state]); | |
assert(i_state == Nstate_ref); | |
} | |
// Same as above, but packs/unpacks a vector instead of structures | |
void mrcal_unpack_solver_state_vector( // out, in | |
double* b, // unitless state on input, | |
// scaled, meaningful state on | |
// output | |
// in | |
int Ncameras_intrinsics, int Ncameras_extrinsics, | |
int Nframes, | |
int Npoints, int Npoints_fixed, | |
mrcal_problem_selections_t problem_selections, | |
const mrcal_lensmodel_t* lensmodel) | |
{ | |
int Npoints_variable = Npoints - Npoints_fixed; | |
int i_state = | |
unpack_solver_state_intrinsics_subset_to_subset(b, | |
lensmodel, problem_selections, | |
Ncameras_intrinsics); | |
if( problem_selections.do_optimize_extrinsics ) | |
{ | |
static_assert( offsetof(mrcal_pose_t, r) == 0, | |
"mrcal_pose_t has expected structure"); | |
static_assert( offsetof(mrcal_pose_t, t) == 3*sizeof(double), | |
"mrcal_pose_t has expected structure"); | |
mrcal_pose_t* extrinsics_fromref = (mrcal_pose_t*)(&b[i_state]); | |
for(int icam_extrinsics=0; icam_extrinsics < Ncameras_extrinsics; icam_extrinsics++) | |
i_state += unpack_solver_state_extrinsics_one( &extrinsics_fromref[icam_extrinsics], &b[i_state] ); | |
} | |
if( problem_selections.do_optimize_frames ) | |
{ | |
mrcal_pose_t* frames_toref = (mrcal_pose_t*)(&b[i_state]); | |
for(int iframe = 0; iframe < Nframes; iframe++) | |
i_state += unpack_solver_state_framert_one( &frames_toref[iframe], &b[i_state] ); | |
mrcal_point3_t* points = (mrcal_point3_t*)(&b[i_state]); | |
for(int i_point = 0; i_point < Npoints_variable; i_point++) | |
i_state += unpack_solver_state_point_one( &points[i_point], &b[i_state] ); | |
} | |
if( problem_selections.do_optimize_calobject_warp ) | |
{ | |
mrcal_calobject_warp_t* calobject_warp = (mrcal_calobject_warp_t*)(&b[i_state]); | |
i_state += unpack_solver_state_calobject_warp(calobject_warp, &b[i_state]); | |
} | |
} | |