diff --git a/NEWS.md b/NEWS.md index 42f86622e..1ffb9fc56 100644 --- a/NEWS.md +++ b/NEWS.md @@ -19,6 +19,7 @@ The list below highlights major breaking changes, and please note that significa - Deprecating use of `ensureAllInitialized!`, use `initAll!` instead. - New helper function `randToPoints(::SamplableBelief, N=1)::Vector{P}` to help with `getSample` for cases with new `ManifoldKernelDensity` beliefs for manifolds containing points of type `P`. - Upstream `calcHelix_T` canonical generator utility from RoME.jl. +- Deserialization of factors with DFG needs new API and change of solverData and CCW type in factor. # Major changes in v0.24 diff --git a/Project.toml b/Project.toml index fbb13ddc0..9b8f83692 100644 --- a/Project.toml +++ b/Project.toml @@ -47,7 +47,7 @@ ApproxManifoldProducts = "0.4.11" BSON = "0.2, 0.3" Combinatorics = "1.0" DataStructures = "0.16, 0.17, 0.18" -DistributedFactorGraphs = "0.15" +DistributedFactorGraphs = "0.15.1" Distributions = "0.24, 0.25" DocStringExtensions = "0.8" FileIO = "1" diff --git a/src/ApproxConv.jl b/src/ApproxConv.jl index 9ae95e758..d008a1c99 100644 --- a/src/ApproxConv.jl +++ b/src/ApproxConv.jl @@ -399,9 +399,7 @@ function evalPotentialSpecific( Xi::AbstractVector{<:DFGVariable}, mani = getManifold(getVariableType(Xi[sfidx])) # perform the numeric solutions on the indicated elements - # error("ccwl.xDim=$(ccwl.xDim)") # FIXME consider repeat solve as workaround for inflation off-zero - # @info "EVALSPEC END" ccwl.varidx string(ccwl.params) string(allelements) computeAcrossHypothesis!( ccwl, allelements, activehypo, certainidx, sfidx, maxlen, mani, spreadNH=spreadNH, inflateCycles=inflateCycles, skipSolve=skipSolve, diff --git a/src/CalcFactor.jl b/src/CalcFactor.jl index 25c698501..2137fd907 100644 --- a/src/CalcFactor.jl +++ b/src/CalcFactor.jl @@ -114,11 +114,12 @@ function calcFactorResidualTemporary( fct::AbstractRelative, measurement::Tuple, varTypes::Tuple, pts::Tuple; - tfg::AbstractDFG = initfg() ) + tfg::AbstractDFG = initfg(), + _blockRecursion::Bool=false ) # # build a new temporary graph - _, _dfgfct = _buildGraphByFactorAndTypes!(fct, varTypes, pts, dfg=tfg) + _, _dfgfct = _buildGraphByFactorAndTypes!(fct, varTypes, pts, dfg=tfg, _blockRecursion=_blockRecursion) # get a fresh measurement if needed _measurement = if length(measurement) != 0 diff --git a/src/Deprecated.jl b/src/Deprecated.jl index 5b6ee2f33..7e830c50c 100644 --- a/src/Deprecated.jl +++ b/src/Deprecated.jl @@ -25,6 +25,8 @@ end ## Deprecate code below before v0.27 ##============================================================================== +@deprecate calcPerturbationFromVariable( fgc::FactorGradientsCached!, fromVar::Int, infoPerCoord::AbstractVector;tol::Real=0.02*fgc._h ) calcPerturbationFromVariable(fgc, [fromVar => infoPerCoord;], tol=tol ) + @deprecate findRelatedFromPotential(w...;kw...) (calcProposalBelief(w...;kw...),) # function generateNullhypoEntropy( val::AbstractMatrix{<:Real}, diff --git a/src/DispatchPackedConversions.jl b/src/DispatchPackedConversions.jl index 84ba0f12f..faf9f95a2 100644 --- a/src/DispatchPackedConversions.jl +++ b/src/DispatchPackedConversions.jl @@ -63,14 +63,17 @@ end After deserializing a factor using decodePackedType, use this to completely rebuild the factor's CCW and user data. """ -function rebuildFactorMetadata!(dfg::AbstractDFG{SolverParams}, factor::DFGFactor) +function rebuildFactorMetadata!(dfg::AbstractDFG{SolverParams}, + factor::DFGFactor, + neighbors = map(vId->getVariable(dfg, vId), getNeighbors(dfg, factor)) ) + # # Set up the neighbor data - neighbors = map(vId->getVariable(dfg, vId), getNeighbors(dfg, factor)) - neighborUserData = map(v->getVariableType(v), neighbors) # Rebuilding the CCW fsd = getSolverData(factor) - ccw_new = getDefaultFactorData( dfg, neighbors, getFactorType(factor), + fnd_new = getDefaultFactorData( dfg, + neighbors, + getFactorType(factor), multihypo=fsd.multihypo, nullhypo=fsd.nullhypo, # special inflation override @@ -79,7 +82,29 @@ function rebuildFactorMetadata!(dfg::AbstractDFG{SolverParams}, factor::DFGFacto potentialused=fsd.potentialused, edgeIDs=fsd.edgeIDs, solveInProgress=fsd.solveInProgress) - setSolverData!(factor, ccw_new) + # + + factor_ = if typeof(fnd_new) != typeof(getSolverData(factor)) + # must change the type of factor solver data FND{CCW{...}} + # create a new factor + factor__ = DFGFactor(getLabel(factor), + getTimestamp(factor), + factor.nstime, + getTags(factor), + fnd_new, + getSolvable(factor), + Tuple(getVariableOrder(factor))) + # + + # replace old factor in dfg with a new one + deleteFactor!(dfg, factor) + addFactor!(dfg, factor__) + + factor__ + else + setSolverData!(factor, fnd_new) + factor + end #... Copying neighbor data into the factor? # JT TODO it looks like this is already updated in getDefaultFactorData -> prepgenericconvolution @@ -88,7 +113,7 @@ function rebuildFactorMetadata!(dfg::AbstractDFG{SolverParams}, factor::DFGFacto # ccw_new.fnc.cpt[i].factormetadata.variableuserdata = deepcopy(neighborUserData) # end - return factor + return factor_ end diff --git a/src/FactorGraph.jl b/src/FactorGraph.jl index 7f30d83b9..cdca62132 100644 --- a/src/FactorGraph.jl +++ b/src/FactorGraph.jl @@ -605,7 +605,7 @@ function calcZDim(cf::CalcFactor{T}) where {T <: AbstractFactor} M = getManifold(cf.factor) return manifold_dimension(M) catch - @warn "no method getManifold(::$T), calcZDim will attempt legacy length(sample) method instead" + @warn "no method getManifold(::$(string(T))), calcZDim will attempt legacy length(sample) method instead" end end @@ -620,34 +620,51 @@ calcZDim(cf::CalcFactor{<:GenericMarginal}) = 0 calcZDim(cf::CalcFactor{<:ManifoldPrior}) = manifold_dimension(cf.factor.M) +# return a BitVector masking the fractional portion, assuming converted 0's on 100% confident variables +_getFractionalVars(varList::Union{<:Tuple, <:AbstractVector}, mh::Nothing) = zeros(length(varList)) .== 1 +_getFractionalVars(varList::Union{<:Tuple, <:AbstractVector}, mh::Categorical) = 0 .< mh.p + +function _selectHypoVariables(allVars::Union{<:Tuple, <:AbstractVector}, + mh::Categorical, + sel::Integer = rand(mh) ) + # + mask = mh.p .≈ 0.0 + mask[sel] = true + (1:length(allVars))[mask] +end + +_selectHypoVariables(allVars::Union{<:Tuple, <:AbstractVector},mh::Nothing,sel::Integer=0 ) = collect(1:length(allVars)) + function prepgenericconvolution(Xi::Vector{<:DFGVariable}, usrfnc::T; multihypo::Union{Nothing, Distributions.Categorical}=nothing, nullhypo::Real=0.0, threadmodel=MultiThreaded, - inflation::Real=0.0 ) where {T <: FunctorInferenceType} + inflation::Real=0.0, + _blockRecursion::Bool=false ) where {T <: AbstractFactor} # pttypes = getVariableType.(Xi) .|> getPointType PointType = 0 < length(pttypes) ? pttypes[1] : Vector{Float64} # FIXME stop using Any, see #1321 - varParams = Vector{Vector{Any}}() - maxlen, sfidx, mani = prepareparamsarray!(varParams, Xi, nothing, 0) # Nothing for init. + varParamsAll = Vector{Vector{Any}}() + maxlen, sfidx, mani = prepareparamsarray!(varParamsAll, Xi, nothing, 0) # Nothing for init. # standard factor metadata sflbl = 0==length(Xi) ? :null : getLabel(Xi[end]) - fmd = FactorMetadata(Xi, getLabel.(Xi), varParams, sflbl, nothing) + fmd = FactorMetadata(Xi, getLabel.(Xi), varParamsAll, sflbl, nothing) # create a temporary CalcFactor object for extracting the first sample # TODO, deprecate this: guess measurement points type # MeasType = Vector{Float64} # FIXME use `usrfnc` to get this information instead - _cf = CalcFactor( usrfnc, fmd, 0, 1, nothing, varParams) # (Vector{MeasType}(),) + _cf = CalcFactor( usrfnc, fmd, 0, 1, nothing, varParamsAll) # (Vector{MeasType}(),) # get a measurement sample meas_single = sampleFactor(_cf, 1) + # get the measurement dimension zdim = calcZDim(_cf) - # zdim = T != GenericMarginal ? size(getSample(usrfnc, 2)[1],1) : 0 + # some hypo resolution certainhypo = multihypo !== nothing ? collect(1:length(multihypo.p))[multihypo.p .== 0.0] : collect(1:length(Xi)) # sort out partialDims here @@ -662,19 +679,32 @@ function prepgenericconvolution(Xi::Vector{<:DFGVariable}, varTypes::Vector{DataType} = typeof.(getVariableType.(Xi)) gradients = nothing # prepare new cached gradient lambdas (attempt) - # try - # measurement = tuple(((x->x[1]).(meas_single))...) - # pts = tuple(((x->x[1]).(varParams))...) - # gradients = FactorGradientsCached!(usrfnc, varTypes, measurement, pts); - # catch e - # @warn "Unable to create measurements and gradients for $usrfnc during prep of CCW, falling back on no-partial information assumption." - # end + try + # this try block definitely fails on deserialization, due to empty DFGVariable[] vector here: + # https://github.com/JuliaRobotics/IncrementalInference.jl/blob/db7ff84225cc848c325e57b5fb9d0d85cb6c79b8/src/DispatchPackedConversions.jl#L46 + # also https://github.com/JuliaRobotics/DistributedFactorGraphs.jl/issues/590#issuecomment-891450762 + if (!_blockRecursion) && usrfnc isa AbstractRelative + # take first value from each measurement-tuple-element + measurement_ = map(x->x[1], meas_single) + # compensate if no info available during deserialization + # take the first value from each variable param + pts_ = map(x->x[1], varParamsAll) + # FIXME, only using first meas and params values at this time... + # NOTE, must block recurions here, since FGC uses this function to calculate numerical gradients on a temp fg. + # assume for now fractional-var in multihypo have same varType + hypoidxs = _selectHypoVariables(pts_, multihypo) + gradients = FactorGradientsCached!(usrfnc, tuple(varTypes[hypoidxs]...), measurement_, tuple(pts_[hypoidxs]...), _blockRecursion=true); + end + catch e + @warn "Unable to create measurements and gradients for $usrfnc during prep of CCW, falling back on no-partial information assumption. Enable @debug printing to see the error." + @debug(e) + end ccw = CommonConvWrapper( usrfnc, PointType[], zdim, - varParams, + varParamsAll, fmd, specialzDim = hasfield(T, :zDim), partial = ispartl, @@ -707,7 +737,8 @@ function getDefaultFactorData(dfg::AbstractDFG, potentialused::Bool = false, edgeIDs = Int[], solveInProgress = 0, - inflation::Real=getSolverParams(dfg).inflation ) where T <: FunctorInferenceType + inflation::Real=getSolverParams(dfg).inflation, + _blockRecursion::Bool=false ) where T <: FunctorInferenceType # # prepare multihypo particulars @@ -715,7 +746,7 @@ function getDefaultFactorData(dfg::AbstractDFG, mhcat, nh = parseusermultihypo(multihypo, nullhypo) # allocate temporary state for convolutional operations (not stored) - ccw = prepgenericconvolution(Xi, usrfnc, multihypo=mhcat, nullhypo=nh, threadmodel=threadmodel, inflation=inflation) + ccw = prepgenericconvolution(Xi, usrfnc, multihypo=mhcat, nullhypo=nh, threadmodel=threadmodel, inflation=inflation, _blockRecursion=_blockRecursion) # and the factor data itself return FunctionNodeData{typeof(ccw)}(eliminated, potentialused, edgeIDs, ccw, multihypo, ccw.certainhypo, nullhypo, solveInProgress, inflation) @@ -1177,7 +1208,7 @@ Experimental - `inflation`, to better disperse kernels before convolution solve, see IIF #1051. """ function DFG.addFactor!(dfg::AbstractDFG, - Xi::Vector{<:DFGVariable}, + Xi::AbstractVector{<:DFGVariable}, usrfnc::AbstractFactor; multihypo::Vector{Float64}=Float64[], nullhypo::Float64=0.0, @@ -1188,7 +1219,8 @@ function DFG.addFactor!(dfg::AbstractDFG, threadmodel=SingleThreaded, suppressChecks::Bool=false, inflation::Real=getSolverParams(dfg).inflation, - namestring::Symbol = assembleFactorName(dfg, Xi) ) + namestring::Symbol = assembleFactorName(dfg, Xi), + _blockRecursion::Bool=false ) # # depcrecation @@ -1199,7 +1231,8 @@ function DFG.addFactor!(dfg::AbstractDFG, multihypo=multihypo, nullhypo=nullhypo, threadmodel=threadmodel, - inflation=inflation) + inflation=inflation, + _blockRecursion=_blockRecursion) newFactor = DFGFactor(Symbol(namestring), varOrderLabels, solverData; @@ -1208,7 +1241,7 @@ function DFG.addFactor!(dfg::AbstractDFG, timestamp=timestamp) # - success = DFG.addFactor!(dfg, newFactor) + success = addFactor!(dfg, newFactor) # TODO: change this operation to update a conditioning variable graphinit && doautoinit!(dfg, Xi, singles=false) @@ -1216,8 +1249,15 @@ function DFG.addFactor!(dfg::AbstractDFG, return newFactor end +function _checkFactorAdd(usrfnc, xisyms) + if length(xisyms) == 1 && !(usrfnc isa AbstractPrior) && !(usrfnc isa Mixture) + @warn("Listing only one variable $xisyms for non-unary factor type $(typeof(usrfnc))") + end + nothing +end + function DFG.addFactor!(dfg::AbstractDFG, - xisyms::Vector{Symbol}, + xisyms::AbstractVector{Symbol}, usrfnc::AbstractFactor; suppressChecks::Bool=false, kw... ) @@ -1233,12 +1273,12 @@ function DFG.addFactor!(dfg::AbstractDFG, # depcrecation # basic sanity check for unary vs n-ary - if !suppressChecks && length(xisyms) == 1 && !(usrfnc isa AbstractPrior) && !(usrfnc isa Mixture) - @warn("Listing only one variable $xisyms for non-unary factor type $(typeof(usrfnc))") + if !suppressChecks + _checkFactorAdd(usrfnc, xisyms) end - variables = getVariable.(dfg, xisyms) - # verts = map(vid -> DFG.getVariable(dfg, vid), xisyms) + # variables = getVariable.(dfg, xisyms) + variables = map(vid -> getVariable(dfg, vid), xisyms) addFactor!(dfg, variables, usrfnc; suppressChecks=suppressChecks, kw... ) # multihypo=multihypo, nullhypo=nullhypo, solvable=solvable, tags=tags, graphinit=graphinit, threadmodel=threadmodel, timestamp=timestamp, inflation=inflation ) end diff --git a/src/Factors/GenericFunctions.jl b/src/Factors/GenericFunctions.jl index 8076258ba..2325645f7 100644 --- a/src/Factors/GenericFunctions.jl +++ b/src/Factors/GenericFunctions.jl @@ -71,7 +71,7 @@ export ManifoldFactor # For now, `Z` is on the tangent space in coordinates at the point used in the factor. # For groups just the lie algebra # As transition it will be easier this way, we can reevaluate -struct ManifoldFactor{M <: AbstractManifold, T <: SamplableBelief} <: AbstractManifoldMinimize#AbstractFactor +struct ManifoldFactor{M <: AbstractManifold, T <: SamplableBelief} <: AbstractManifoldMinimize #AbstractFactor M::M Z::T end diff --git a/src/NumericalCalculations.jl b/src/NumericalCalculations.jl index a5db42146..579bc8174 100644 --- a/src/NumericalCalculations.jl +++ b/src/NumericalCalculations.jl @@ -199,6 +199,7 @@ function _buildCalcFactorLambdaSample(ccwl::CommonConvWrapper, # slack is used to shift the residual away from the natural "zero" tension position of a factor, # this is useful when calculating factor gradients at a variety of param locations resulting in "non-zero slack" of the residual. # see `IIF.calcFactorResidualTemporary` + # NOTE this minus operation assumes _slack is either coordinate or tangent vector element (not a manifold or group element) () -> cf( (_getindextuple(measurement_, smpid))..., (getindex.(varParams, smpid))... ) .- _slack end diff --git a/src/SolverUtilities.jl b/src/SolverUtilities.jl index 77bf60cb7..7801e3a1a 100644 --- a/src/SolverUtilities.jl +++ b/src/SolverUtilities.jl @@ -165,12 +165,13 @@ function _buildGraphByFactorAndTypes!(fct::AbstractFactor, _allVars::AbstractVector{Symbol} = sortDFG(ls(dfg, destPattern)), currLabel::Symbol = 0 < length(_allVars) ? _allVars[end] : Symbol(destPrefix, 0), currNumber::Integer = reverse(match(r"\d+", reverse(string(currLabel))).match) |> x->parse(Int,x), - graphinit::Bool = false ) + graphinit::Bool = false, + _blockRecursion::Bool=false ) # # TODO generalize beyond binary len = length(varTypes) - vars = [Symbol(destPrefix, s_) for s_ in (currNumber .+ (1:len))] + vars = Symbol[Symbol(destPrefix, s_) for s_ in (currNumber .+ (1:len))] for (s_, vTyp) in enumerate(varTypes) # add the necessary variables exists(dfg, vars[s_]) ? nothing : addVariable!(dfg, vars[s_], vTyp) @@ -178,7 +179,7 @@ function _buildGraphByFactorAndTypes!(fct::AbstractFactor, ((0 < length(pts)) && (pts[s_] isa Nothing)) ? nothing : initManual!(dfg, vars[s_], [pts[s_],], solveKey, bw=ones(getDimension(vTyp))) end # if newFactor then add the factor on vars, else assume only one existing factor between vars - _dfgfct = newFactor ? addFactor!(dfg, vars, fct, graphinit=graphinit) : getFactor(dfg, intersect((ls.(dfg, vars))...)[1] ) + _dfgfct = newFactor ? addFactor!(dfg, vars, fct, graphinit=graphinit, _blockRecursion=_blockRecursion) : getFactor(dfg, intersect((ls.(dfg, vars))...)[1] ) return dfg, _dfgfct end diff --git a/src/services/FactorGradients.jl b/src/services/FactorGradients.jl index dfd1c8133..f3e71cc3c 100644 --- a/src/services/FactorGradients.jl +++ b/src/services/FactorGradients.jl @@ -11,8 +11,9 @@ function _prepFactorGradientLambdas(fct::Union{<:AbstractRelativeMinimize,<:Abst varTypes::Tuple, pts::Tuple; tfg::AbstractDFG = initfg(), + _blockRecursion::Bool=true, # gradients relative to coords requires - slack_resid = calcFactorResidualTemporary(fct, measurement, varTypes, pts, tfg=tfg), + slack_resid = calcFactorResidualTemporary(fct, measurement, varTypes, pts, tfg=tfg, _blockRecursion=_blockRecursion), # numerical diff perturbation size h::Real=1e-4 ) # @@ -76,14 +77,15 @@ function FactorGradientsCached!(fct::Union{<:AbstractRelativeMinimize, <:Abstrac varTypes::Tuple, meas_single::Tuple, pts::Tuple; - h::Real=1e-4 ) + h::Real=1e-4, + _blockRecursion::Bool=true ) # # working memory location for computations tfg = initfg() # permanent location for points and later reference # generate the necessary lambdas - J__, λ_fncs, λ_sizes, slack_resid = _prepFactorGradientLambdas(fct, meas_single, varTypes, pts; tfg=tfg, h=1e-4); + J__, λ_fncs, λ_sizes, slack_resid = _prepFactorGradientLambdas(fct, meas_single, varTypes, pts; tfg=tfg, _blockRecursion=_blockRecursion, h=1e-4); # get the one factor in tfg fctsyms = lsf(tfg) @@ -106,6 +108,12 @@ end getCoordSizes(fgc::FactorGradientsCached!) = fgc._coord_sizes +_setFGCSlack!(fgc::FactorGradientsCached!{F}, slack) where F = _setPointsMani!(fgc.slack_residual, slack) + +function _setFGCSlack!(fgc::FactorGradientsCached!{F,S}, slack::Number) where {F,S<:Number} + fgc.slack_residual = slack +end + function (fgc::FactorGradientsCached!)(meas_pts...) # separate the measurements (forst) from the variable points (rest) lenm = length(fgc.measurement) @@ -123,7 +131,8 @@ function (fgc::FactorGradientsCached!)(meas_pts...) varTypes = tuple(getVariableType.(getVariable.(fgc._tfg, getVariableOrder(fgc.dfgfct)))...) new_slack = calcFactorResidualTemporary(fct, measurement, varTypes, pts; tfg=fgc._tfg) # TODO make sure slack_residual is properly wired up with all the lambda functions as expected - _setPointsMani!(fgc.slack_residual, new_slack) + _setFGCSlack!(fgc, new_slack) + # _setPointsMani!(fgc.slack_residual, new_slack) # set new points in preparation for new gradient calculation for (s,pt) in enumerate(meas_pts[(lenm+1):end]) @@ -174,8 +183,8 @@ end """ $SIGNATURES -Return a tuple of infoPerCoord vectors that result from input `fromVar::Int`'s `infoPerCoord`. -For example, a binary `LinearRelative` factor has a one to one influence from the input to the one other variable. +Return a tuple of infoPerCoord vectors that result from input variables as vector of `::Pair`, i.e. `fromVar::Int => infoPerCoord`. +For example, a binary `LinearRelative` factor has a one-to-one influence from the input to the one other variable. Notes @@ -197,7 +206,7 @@ fgc = FactorGradientsCached!(fct, varTypes, measurement, pts); fgc(measurement..., pts...) # check the perturbation influence through gradients on factor -ret = calcPerturbationFromVariable(fgc, 1, [1;1]) +ret = calcPerturbationFromVariable(fgc, [1=>[1;1]]) @assert isapprox(ret[2], [1;1]) ``` @@ -210,20 +219,23 @@ Related [`FactorGradientsCached!`](@ref), [`checkGradientsToleranceMask`](@ref) """ function calcPerturbationFromVariable(fgc::FactorGradientsCached!, - fromVar::Int, - infoPerCoord::AbstractVector; + from_var_ipc::AbstractVector{<:Pair}; tol::Real=0.02*fgc._h ) # blkszs = getCoordSizes(fgc) - @assert blkszs[fromVar] == length(infoPerCoord) "Expecting incoming length(infoPerCoord) to equal the block size for variable $fromVar, as per factor used to construct the FactorGradientsCached!: $(getFactorType(fgc.dfgfct))" # assume projection through pp-factor from first to second variable # ipc values from first variable belief, and zero for second ipcAll = zeros(sum(blkszs)) - - # nextVar = minimum([fromVar+1; length(blkszs)]) - curr_b = sum(blkszs[1:(fromVar-1)]) + 1 - curr_e = sum(blkszs[1:fromVar]) - ipcAll[curr_b:curr_e] .= infoPerCoord + + # set any incoming infoPerCoord values + for (fromVar, infoPC) in from_var_ipc + # check on sizes with print warning + (blkszs[fromVar] == length(infoPC)) ? nothing : @warn("Expecting incoming length(infoPerCoord) to equal the block size for variable $fromVar, as per factor used to construct the FactorGradientsCached!: $(getFactorType(fgc.dfgfct))") + # get range of interest + curr_b = sum(blkszs[1:(fromVar-1)]) + 1 + curr_e = sum(blkszs[1:fromVar]) + ipcAll[curr_b:curr_e] .= infoPC + end # clamp gradients below numerical solver resolution mask = checkGradientsToleranceMask(fgc, tol=tol) @@ -251,4 +263,5 @@ function calcPerturbationFromVariable(fgc::FactorGradientsCached!, end + # \ No newline at end of file diff --git a/test/testSaveLoadDFG.jl b/test/testSaveLoadDFG.jl index a83c13360..6a2aeb740 100644 --- a/test/testSaveLoadDFG.jl +++ b/test/testSaveLoadDFG.jl @@ -2,73 +2,80 @@ using IncrementalInference using Test +## + @testset "saving to and loading from FileDFG" begin +## + +fg = generateCanonicalFG_Kaess() +addVariable!(fg, :x4, ContinuousScalar) +addFactor!(fg, [:x2;:x3;:x4], LinearRelative(Normal()), multihypo=[1.0;0.6;0.4]) - fg = generateCanonicalFG_Kaess() - addVariable!(fg, :x4, ContinuousScalar) - addFactor!(fg, [:x2;:x3;:x4], LinearRelative(Normal()), multihypo=[1.0;0.6;0.4]) +saveFolder = "/tmp/dfg_test" +saveDFG(fg, saveFolder) +# VERSION above 1.0.x hack required since Julia 1.0 does not seem to havfunction `splitpath` - saveFolder = "/tmp/dfg_test" - saveDFG(fg, saveFolder) - # VERSION above 1.0.x hack required since Julia 1.0 does not seem to havfunction `splitpath` - if v"1.1" <= VERSION - retDFG = initfg() - retDFG = loadDFG!(retDFG, saveFolder) - Base.rm(saveFolder*".tar.gz") +retDFG = initfg() +retDFG = loadDFG!(retDFG, saveFolder) +Base.rm(saveFolder*".tar.gz") - @test symdiff(ls(fg), ls(retDFG)) == [] - @test symdiff(lsf(fg), lsf(retDFG)) == [] +@test symdiff(ls(fg), ls(retDFG)) == [] +@test symdiff(lsf(fg), lsf(retDFG)) == [] - @show getFactor(fg, :x2x3x4f1).solverData.multihypo - @show getFactor(retDFG, :x2x3x4f1).solverData.multihypo +@show getFactor(fg, :x2x3x4f1).solverData.multihypo +@show getFactor(retDFG, :x2x3x4f1).solverData.multihypo - # check for match - @test getFactor(fg, :x2x3x4f1).solverData.multihypo - getFactor(retDFG, :x2x3x4f1).solverData.multihypo |> norm < 1e-10 - @test getFactor(fg, :x2x3x4f1).solverData.certainhypo - getFactor(retDFG, :x2x3x4f1).solverData.certainhypo |> norm < 1e-10 - end +# check for match +@test getFactor(fg, :x2x3x4f1).solverData.multihypo - getFactor(retDFG, :x2x3x4f1).solverData.multihypo |> norm < 1e-10 +@test getFactor(fg, :x2x3x4f1).solverData.certainhypo - getFactor(retDFG, :x2x3x4f1).solverData.certainhypo |> norm < 1e-10 +## end @testset "saving to and loading from FileDFG with nullhypo, eliminated, solveInProgress" begin - fg = generateCanonicalFG_Kaess() - addVariable!(fg, :x4, ContinuousScalar) - addFactor!(fg, [:x2;:x3;:x4], LinearRelative(Normal()), multihypo=[1.0;0.6;0.4]) - addFactor!(fg, [:x1;], Prior(Normal(10,1)), nullhypo=0.5) +## + +fg = generateCanonicalFG_Kaess() +addVariable!(fg, :x4, ContinuousScalar) +addFactor!(fg, [:x2;:x3;:x4], LinearRelative(Normal()), multihypo=[1.0;0.6;0.4]) +addFactor!(fg, [:x1;], Prior(Normal(10,1)), nullhypo=0.5) + +solveTree!(fg) - solveTree!(fg) +#manually change a few fields to test if they are preserved +fa = getFactor(fg, :x2x3x4f1) +fa.solverData.eliminated = true +fa.solverData.solveInProgress = 1 +fa.solverData.nullhypo = 0.5 - #manually change a few fields to test if they are preserved - fa = getFactor(fg, :x2x3x4f1) - fa.solverData.eliminated = true - fa.solverData.solveInProgress = 1 - fa.solverData.nullhypo = 0.5 +saveFolder = "/tmp/dfg_test" +saveDFG(fg, saveFolder) - saveFolder = "/tmp/dfg_test" - saveDFG(fg, saveFolder) +retDFG = initfg() +loadDFG!(retDFG, saveFolder) +Base.rm(saveFolder*".tar.gz") - retDFG = initfg() - loadDFG!(retDFG, saveFolder) - Base.rm(saveFolder*".tar.gz") +@test issetequal(ls(fg), ls(retDFG)) +@test issetequal(lsf(fg), lsf(retDFG)) - @test issetequal(ls(fg), ls(retDFG)) - @test issetequal(lsf(fg), lsf(retDFG)) +@show getFactor(fg, :x2x3x4f1).solverData.multihypo +@show getFactor(retDFG, :x2x3x4f1).solverData.multihypo - @show getFactor(fg, :x2x3x4f1).solverData.multihypo - @show getFactor(retDFG, :x2x3x4f1).solverData.multihypo +# check for match +@test isapprox(getFactor(fg, :x2x3x4f1).solverData.multihypo, getFactor(retDFG, :x2x3x4f1).solverData.multihypo) +@test isapprox(getFactor(fg, :x2x3x4f1).solverData.certainhypo, getFactor(retDFG, :x2x3x4f1).solverData.certainhypo) - # check for match - @test isapprox(getFactor(fg, :x2x3x4f1).solverData.multihypo, getFactor(retDFG, :x2x3x4f1).solverData.multihypo) - @test isapprox(getFactor(fg, :x2x3x4f1).solverData.certainhypo, getFactor(retDFG, :x2x3x4f1).solverData.certainhypo) +fb = getFactor(retDFG, :x2x3x4f1) +@test fa == fb - fb = getFactor(retDFG, :x2x3x4f1) - @test fa == fb +fa = getFactor(fg, :x1f2) +fb = getFactor(retDFG, :x1f2) - fa = getFactor(fg, :x1f2) - fb = getFactor(retDFG, :x1f2) +@test fa == fb - @test fa == fb +## end diff --git a/test/testSphereMani.jl b/test/testSphereMani.jl index 84caf22e2..5f9a602fc 100644 --- a/test/testSphereMani.jl +++ b/test/testSphereMani.jl @@ -4,7 +4,10 @@ using Manifolds using StaticArrays using Test +## + @testset "Test Sphere(2) prior" begin +## Base.convert(::Type{<:Tuple}, M::Sphere{2, ℝ}) = (:Euclid, :Euclid) Base.convert(::Type{<:Tuple}, ::IIF.InstanceType{Sphere{2, ℝ}}) = (:Euclid, :Euclid) @@ -39,7 +42,8 @@ f = addFactor!(fg, [:x0, :x1], mf) ## # Debugging Sphere error smtasks = Task[] -solveTree!(fg; smtasks, verbose=true, recordcliqs=ls(fg)) +solveTree!(fg; smtasks) #, verbose=true, recordcliqs=ls(fg)) # hists = fetchCliqHistoryAll!(smtasks); +## end \ No newline at end of file diff --git a/test/testpartialconstraint.jl b/test/testpartialconstraint.jl index 20157ef78..e98b793a4 100644 --- a/test/testpartialconstraint.jl +++ b/test/testpartialconstraint.jl @@ -2,10 +2,11 @@ using Test using IncrementalInference # going to introduce two new constraint mutable structs -import IncrementalInference: getSample using Statistics using TensorCast +using Manifolds +import IncrementalInference: getManifold, getSample ## @@ -27,6 +28,8 @@ mutable struct DevelopPartialPairwise <: AbstractRelativeMinimize partial::Tuple DevelopPartialPairwise(x::Distribution) = new(x, (2,)) end +getManifold(::IIF.InstanceType{DevelopPartialPairwise}) = TranslationGroup(1) + getSample(cf::CalcFactor{<:DevelopPartialPairwise}, N::Int=1) = ([rand(cf.factor.x, 1)[:] for _ in 1:N], ) function (dp::CalcFactor{<:DevelopPartialPairwise})(meas, @@ -93,7 +96,7 @@ pts_ = approxConv(fg, getLabel(f2), :x1, N=N) end -@testset "check that partials are received through convolutions" begin +@testset "check that partials are received through convolutions of prior" begin ## # check that a partial belief is obtained @@ -105,13 +108,6 @@ X1_ = approxConvBelief(fg, :x1f2, :x1) end -@testset "test partial info per coord through relative convolution (conditional)" begin - -@test_broken false - -end - - @testset "test solving of factor graph" begin ## @@ -128,30 +124,52 @@ end # plotKDE(getBelief(fg, :x1),levels=3) - ## partial relative gradient and graph +v2 = addVariable!(fg,:x2,ContinuousEuclid{2},N=N) + dpp = DevelopPartialPairwise(Normal(10.0, 1.0)) +f3 = addFactor!(fg,[:x1;:x2],dpp) + +dp2 = DevelopPartial( Normal(-20.0, 1.0), (1,) ) +f4 = addFactor!(fg,[:x2;], dp2, graphinit=false) -# measurement = ([10.0;], ) -# pts = ([0;0.0], [0;10.0]) -# gradients = FactorGradientsCached!(dpp, (ContinuousEuclid{2}, ContinuousEuclid{2}), measurement, pts); +doautoinit!(fg, :x2) ## +@testset "test partial info per coord through relative convolution (conditional)" begin +## -v2 = addVariable!(fg,:x2,ContinuousEuclid{2},N=N) +one_meas = ([10.0;], ) +pts = ([0;0.0], [0;10.0]) +gradients = FactorGradientsCached!(dpp, (ContinuousEuclid{2}, ContinuousEuclid{2}), one_meas, pts); +## -f3 = addFactor!(fg,[:x1;:x2],dpp) +# check that the gradients can be calculated +J = gradients(one_meas..., pts...) +@test size(J) == (4,4) +@test norm(J - [0 0 0 0; 0 0 0 1; 0 0 0 0; 0 1 0 0] ) < 1e-4 -dp2 = DevelopPartial( Normal(-20.0, 1.0), (1,) ) -f4 = addFactor!(fg,[:x2;], dp2, graphinit=false) -doautoinit!(fg, :x2) +## check perturbation logic + +prtb = calcPerturbationFromVariable(gradients, [1=>[1;1]]) + +# self variation is taken as 0 at this time +@test isapprox( prtb[1], [0;0] ) +# variable 1 influences 2 only through partial dimension 2 (as per DevelopPartialPairwise) +@test isapprox( prtb[2], [0;1] ) -# drawGraph(fg, show=true) +## test evaluation through the convolution operation withing a factor graph +# add relative IPC calculation inside evalFactor +bel = approxConvBelief(fg, getLabel(f3), :x2) +@test_broken isPartial(bel) + +## +end ##